summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig16
-rw-r--r--drivers/net/Makefile5
-rw-r--r--drivers/net/arcnet/Kconfig2
-rw-r--r--drivers/net/arcnet/arcdevice.h3
-rw-r--r--drivers/net/arcnet/arcnet.c11
-rw-r--r--drivers/net/arcnet/com20020-isa.c1
-rw-r--r--drivers/net/bareudp.c19
-rw-r--r--drivers/net/bonding/bond_main.c60
-rw-r--r--drivers/net/bonding/bond_netlink.c3
-rw-r--r--drivers/net/bonding/bond_options.c10
-rw-r--r--drivers/net/bonding/bond_procfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c25
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c2
-rw-r--r--drivers/net/caif/caif_virtio.c9
-rw-r--r--drivers/net/can/Kconfig5
-rw-r--r--drivers/net/can/cc770/Kconfig1
-rw-r--r--drivers/net/can/dev/dev.c4
-rw-r--r--drivers/net/can/kvaser_pciefd.c135
-rw-r--r--drivers/net/can/m_can/m_can.c169
-rw-r--r--drivers/net/can/m_can/m_can.h2
-rw-r--r--drivers/net/can/m_can/m_can_pci.c2
-rw-r--r--drivers/net/can/m_can/m_can_platform.c2
-rw-r--r--drivers/net/can/m_can/tcan4x5x-core.c15
-rw-r--r--drivers/net/can/mscan/mscan.c6
-rw-r--r--drivers/net/can/peak_canfd/peak_canfd.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c41
-rw-r--r--drivers/net/can/sja1000/Kconfig1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c3
-rw-r--r--drivers/net/can/spi/hi311x.c7
-rw-r--r--drivers/net/can/spi/mcp251x.c11
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c103
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c2
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c5
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c165
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c129
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c29
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c55
-rw-r--r--drivers/net/can/spi/mcp251xfd/mcp251xfd.h61
-rw-r--r--drivers/net/can/usb/Kconfig3
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_devlink.c2
-rw-r--r--drivers/net/can/usb/gs_usb.c7
-rw-r--r--drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c12
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h2
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/can/vxcan.c2
-rw-r--r--drivers/net/can/xilinx_can.c2
-rw-r--r--drivers/net/dsa/Kconfig3
-rw-r--r--drivers/net/dsa/b53/b53_common.c211
-rw-r--r--drivers/net/dsa/b53/b53_priv.h12
-rw-r--r--drivers/net/dsa/bcm_sf2.c53
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.h8
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h2
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek_ptp.c25
-rw-r--r--drivers/net/dsa/lan9303-core.c61
-rw-r--r--drivers/net/dsa/lan9303_i2c.c2
-rw-r--r--drivers/net/dsa/lan9303_mdio.c8
-rw-r--r--drivers/net/dsa/lantiq_gswip.c162
-rw-r--r--drivers/net/dsa/microchip/Kconfig2
-rw-r--r--drivers/net/dsa/microchip/Makefile2
-rw-r--r--drivers/net/dsa/microchip/ksz8.h9
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c251
-rw-r--r--drivers/net/dsa/microchip/ksz8795_reg.h10
-rw-r--r--drivers/net/dsa/microchip/ksz9477.c67
-rw-r--r--drivers/net/dsa/microchip/ksz9477.h2
-rw-r--r--drivers/net/dsa/microchip/ksz9477_i2c.c4
-rw-r--r--drivers/net/dsa/microchip/ksz9477_reg.h11
-rw-r--r--drivers/net/dsa/microchip/ksz9477_tc_flower.c3
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c265
-rw-r--r--drivers/net/dsa/microchip/ksz_common.h24
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.c819
-rw-r--r--drivers/net/dsa/microchip/ksz_dcb.h23
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.c7
-rw-r--r--drivers/net/dsa/microchip/ksz_ptp.h2
-rw-r--r--drivers/net/dsa/microchip/ksz_spi.c8
-rw-r--r--drivers/net/dsa/microchip/lan937x_main.c32
-rw-r--r--drivers/net/dsa/microchip/lan937x_reg.h5
-rw-r--r--drivers/net/dsa/mt7530-mdio.c28
-rw-r--r--drivers/net/dsa/mt7530.c588
-rw-r--r--drivers/net/dsa/mt7530.h294
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c159
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c89
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1_atu.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h4
-rw-r--r--drivers/net/dsa/mv88e6xxx/trace.h4
-rw-r--r--drivers/net/dsa/ocelot/felix.c240
-rw-r--r--drivers/net/dsa/ocelot/felix.h9
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c115
-rw-r--r--drivers/net/dsa/ocelot/ocelot_ext.c54
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c60
-rw-r--r--drivers/net/dsa/qca/ar9331.c39
-rw-r--r--drivers/net/dsa/qca/qca8k-8xxx.c51
-rw-r--r--drivers/net/dsa/qca/qca8k-common.c118
-rw-r--r--drivers/net/dsa/qca/qca8k-leds.c12
-rw-r--r--drivers/net/dsa/qca/qca8k.h1
-rw-r--r--drivers/net/dsa/realtek/realtek.h2
-rw-r--r--drivers/net/dsa/realtek/rtl8365mb.c32
-rw-r--r--drivers/net/dsa/realtek/rtl8366rb.c392
-rw-r--r--drivers/net/dsa/realtek/rtl83xx.c8
-rw-r--r--drivers/net/dsa/rzn1_a5psw.c47
-rw-r--r--drivers/net/dsa/sja1105/sja1105_flower.c3
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c47
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.c2
-rw-r--r--drivers/net/dsa/sja1105/sja1105_ptp.h2
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx-core.c1004
-rw-r--r--drivers/net/dsa/vitesse-vsc73xx.h64
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c26
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_i2c.c4
-rw-r--r--drivers/net/ethernet/3com/3c515.c3
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/Kconfig4
-rw-r--r--drivers/net/ethernet/8390/Kconfig6
-rw-r--r--drivers/net/ethernet/8390/etherh.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c11
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c8
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c11
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.c37
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_eth_com.h2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c17
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c39
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h1
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_regs_defs.h1
-rw-r--r--drivers/net/ethernet/amd/7990.c1
-rw-r--r--drivers/net/ethernet/amd/Kconfig4
-rw-r--r--drivers/net/ethernet/amd/a2065.c1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c7
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h1
-rw-r--r--drivers/net/ethernet/amd/ariadne.c1
-rw-r--r--drivers/net/ethernet/amd/atarilance.c1
-rw-r--r--drivers/net/ethernet/amd/hplance.c1
-rw-r--r--drivers/net/ethernet/amd/lance.c1
-rw-r--r--drivers/net/ethernet/amd/mvme147.c1
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/pds_core/core.h3
-rw-r--r--drivers/net/ethernet/amd/pds_core/devlink.c3
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c1
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-platform.c8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c9
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c2
-rw-r--r--drivers/net/ethernet/arc/Kconfig10
-rw-r--r--drivers/net/ethernet/arc/Makefile1
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c88
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c2
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1396
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h135
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c286
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h491
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c163
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h45
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c173
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c35
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c14
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c34
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c13
-rw-r--r--drivers/net/ethernet/cadence/macb.h10
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c127
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c6
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c5
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c33
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c67
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c1
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/Makefile2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c25
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c14
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c20
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h5
-rw-r--r--drivers/net/ethernet/cortina/gemini.c70
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c2
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c78
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.h20
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c12
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h4
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c16
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c9
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c44
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c17
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c17
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c1
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c2
-rw-r--r--drivers/net/ethernet/fujitsu/Kconfig2
-rw-r--r--drivers/net/ethernet/fungible/funeth/Makefile2
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_ethtool.c2
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_main.c2
-rw-r--r--drivers/net/ethernet/fungible/funeth/funeth_trace.h6
-rw-r--r--drivers/net/ethernet/google/gve/Makefile2
-rw-r--r--drivers/net/ethernet/google/gve/gve.h151
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c453
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h153
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h6
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c234
-rw-r--r--drivers/net/ethernet/google/gve/gve_flow_rule.c298
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c714
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c138
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c148
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c36
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h24
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c646
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h643
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c44
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c433
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h36
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c184
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h102
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c64
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h58
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c28
-rw-r--r--drivers/net/ethernet/intel/Kconfig22
-rw-r--r--drivers/net/ethernet/intel/Makefile3
-rw-r--r--drivers/net/ethernet/intel/e100.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/Makefile2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/Makefile7
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c64
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c55
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c175
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c3
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h31
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c253
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ddp.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c36
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c31
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c485
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c1010
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_trace.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c92
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h88
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/iavf/Makefile5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_common.c253
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c140
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c55
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_prototype.h7
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_trace.h6
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.c553
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_txrx.h146
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_type.h90
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c17
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile7
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.c (renamed from drivers/net/ethernet/intel/ice/ice_devlink.c)696
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink.h (renamed from drivers/net/ethernet/intel/ice/ice_devlink.h)0
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.c487
-rw-r--r--drivers/net/ethernet/intel/ice/devlink/devlink_port.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h77
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.c112
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adapter.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h97
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c75
-rw-r--r--drivers/net/ethernet/intel/ice/ice_cgu_regs.h77
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c219
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.c30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_controlq.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c249
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c462
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h31
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c463
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c142
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c111
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hwmon.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c53
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h320
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c114
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h39
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c403
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.c123
-rw-r--r--drivers/net/ethernet/intel/ice/ice_nvm.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h55
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c384
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_consts.h402
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c3245
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h295
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c157
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h25
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sbq_cmd.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c76
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c960
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c132
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h30
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c60
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c122
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h88
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c15
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c202
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h14
-rw-r--r--drivers/net/ethernet/intel/idpf/Kconfig26
-rw-r--r--drivers/net/ethernet/intel/idpf/Makefile3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h11
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c176
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c139
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c306
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c1462
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h737
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c178
-rw-r--r--drivers/net/ethernet/intel/idpf/virtchnl2.h24
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c70
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c9
-rw-r--r--drivers/net/ethernet/intel/igc/Makefile6
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h72
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h6
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c32
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c219
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c57
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.c76
-rw-r--r--drivers/net/ethernet/intel/igc/igc_tsn.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c56
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c11
-rw-r--r--drivers/net/ethernet/intel/libeth/Kconfig9
-rw-r--r--drivers/net/ethernet/intel/libeth/Makefile6
-rw-r--r--drivers/net/ethernet/intel/libeth/rx.c259
-rw-r--r--drivers/net/ethernet/intel/libie/Kconfig10
-rw-r--r--drivers/net/ethernet/intel/libie/Makefile6
-rw-r--r--drivers/net/ethernet/intel/libie/rx.c123
-rw-r--r--drivers/net/ethernet/jme.c12
-rw-r--r--drivers/net/ethernet/lantiq_etop.c7
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c5
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c25
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c1
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c27
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h8
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c29
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c365
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c33
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h9
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c69
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c68
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h55
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/qos.c87
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_flower.c4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c83
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c15
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c4
-rw-r--r--drivers/net/ethernet/marvell/sky2.c5
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig10
-rw-r--r--drivers/net/ethernet/mediatek/Makefile1
-rw-r--r--drivers/net/ethernet/mediatek/airoha_eth.c2731
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c486
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h48
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c21
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c7
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c61
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.c83
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/channels.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/dim.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c100
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h56
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dim.c95
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c358
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c578
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c239
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c552
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c153
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c254
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wc.c434
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_linecards.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_thermal.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c862
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c33
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c77
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c12
-rw-r--r--drivers/net/ethernet/meta/Kconfig33
-rw-r--r--drivers/net/ethernet/meta/Makefile6
-rw-r--r--drivers/net/ethernet/meta/fbnic/Makefile19
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic.h144
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_csr.h838
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_devlink.c88
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h5
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.c791
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_fw.h124
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_irq.c208
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.c666
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_mac.h86
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.c488
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_netdev.h63
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_pci.c564
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_phylink.c161
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.c651
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_rpc.h189
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.c529
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_tlv.h175
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.c1913
-rw-r--r--drivers/net/ethernet/meta/fbnic/fbnic_txrx.h127
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c44
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c4
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/microchip/encx24j600-regmap.c10
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c7
-rw-r--r--drivers/net/ethernet/microchip/encx24j600_hw.h2
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ethtool.c67
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c61
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h28
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c4
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.h1
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c10
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.c18
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_main.h2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_port.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c14
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c2
-rw-r--r--drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Makefile3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c3
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.h25
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h68
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c235
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_packet.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_port.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c2
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c88
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c125
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_ag_api.h2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c16
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.h2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_client.h4
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c2
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_private.h2
-rw-r--r--drivers/net/ethernet/microsoft/Kconfig3
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c11
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c17
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c166
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c27
-rw-r--r--drivers/net/ethernet/microsoft/mana/shm_channel.c13
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c279
-rw-r--r--drivers/net/ethernet/mscc/ocelot_fdma.c3
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c7
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_ptp.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vcap.c1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c8
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfd3/xsk.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_devlink.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c7
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c41
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h7
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c129
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c13
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h237
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c161
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h12
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c110
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c7
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c14
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c138
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_debug.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c16
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c35
-rw-r--r--drivers/net/ethernet/renesas/Kconfig11
-rw-r--r--drivers/net/ethernet/renesas/Makefile2
-rw-r--r--drivers/net/ethernet/renesas/ravb.h15
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c535
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c6
-rw-r--r--drivers/net/ethernet/renesas/rtsn.c1389
-rw-r--r--drivers/net/ethernet/renesas/rtsn.h464
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c2
-rw-r--r--drivers/net/ethernet/sfc/ef100_ethtool.c4
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c12
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c6
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c168
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.h12
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/falcon/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.c135
-rw-r--r--drivers/net/ethernet/sfc/mcdi_filters.h8
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h28
-rw-r--r--drivers/net/ethernet/sfc/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/ptp.h5
-rw-r--r--drivers/net/ethernet/sfc/rx_common.c64
-rw-r--r--drivers/net/ethernet/sfc/rx_common.h8
-rw-r--r--drivers/net/ethernet/sfc/siena/efx_common.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/ethtool.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.c2
-rw-r--r--drivers/net/ethernet/sfc/siena/ptp.h4
-rw-r--r--drivers/net/ethernet/sfc/tc.c12
-rw-r--r--drivers/net/ethernet/sis/Kconfig4
-rw-r--r--drivers/net/ethernet/sis/sis900.c6
-rw-r--r--drivers/net/ethernet/smsc/Kconfig2
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c1
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c31
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c64
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c93
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c114
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c259
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c185
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c30
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c113
-rw-r--r--drivers/net/ethernet/sun/cassini.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c16
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c7
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c5
-rw-r--r--drivers/net/ethernet/tehuti/Kconfig15
-rw-r--r--drivers/net/ethernet/tehuti/Makefile3
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/tehuti/tn40.c1850
-rw-r--r--drivers/net/ethernet/tehuti/tn40.h233
-rw-r--r--drivers/net/ethernet/tehuti/tn40_mdio.c142
-rw-r--r--drivers/net/ethernet/tehuti/tn40_phy.c76
-rw-r--r--drivers/net/ethernet/tehuti/tn40_regs.h245
-rw-r--r--drivers/net/ethernet/ti/Kconfig19
-rw-r--r--drivers/net/ethernet/ti/Makefile22
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c15
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c715
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h15
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c19
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c107
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h11
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c3
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h2
-rw-r--r--drivers/net/ethernet/ti/icssg/icss_iep.c92
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_classifier.c121
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_common.c1284
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.c351
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_config.h82
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_ethtool.c108
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c4
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c1469
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.h130
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c1230
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_queues.c2
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_stats.c3
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switchdev.c477
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_switchdev.h13
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c46
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.h6
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c4
-rw-r--r--drivers/net/ethernet/via/Kconfig1
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_ethtool.c39
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.c37
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_hw.h2
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.c136
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_lib.h3
-rw-r--r--drivers/net/ethernet/wangxun/libwx/wx_type.h79
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c22
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_main.c5
-rw-r--r--drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c8
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/Makefile1
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c445
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c643
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h20
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c124
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_main.c58
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c2
-rw-r--r--drivers/net/ethernet/wangxun/txgbe/txgbe_type.h148
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h21
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c36
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c23
-rw-r--r--drivers/net/ethernet/xircom/Kconfig2
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c4
-rw-r--r--drivers/net/fddi/defxx.c2
-rw-r--r--drivers/net/fjes/fjes_main.c7
-rw-r--r--drivers/net/fjes/fjes_trace.h12
-rw-r--r--drivers/net/geneve.c56
-rw-r--r--drivers/net/gtp.c868
-rw-r--r--drivers/net/hamradio/Kconfig6
-rw-r--r--drivers/net/hamradio/baycom_epp.c1
-rw-r--r--drivers/net/hamradio/baycom_par.c1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/hyperv/netvsc_trace.h8
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.7.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.0.c5
-rw-r--r--drivers/net/ipa/data/ipa_data-v5.5.c5
-rw-r--r--drivers/net/ipa/gsi.c30
-rw-r--r--drivers/net/ipa/gsi.h12
-rw-r--r--drivers/net/ipa/gsi_private.h7
-rw-r--r--drivers/net/ipa/gsi_reg.c6
-rw-r--r--drivers/net/ipa/gsi_trans.c12
-rw-r--r--drivers/net/ipa/gsi_trans.h9
-rw-r--r--drivers/net/ipa/ipa.h15
-rw-r--r--drivers/net/ipa/ipa_cmd.c13
-rw-r--r--drivers/net/ipa/ipa_cmd.h18
-rw-r--r--drivers/net/ipa/ipa_data.h4
-rw-r--r--drivers/net/ipa/ipa_endpoint.c19
-rw-r--r--drivers/net/ipa/ipa_endpoint.h10
-rw-r--r--drivers/net/ipa/ipa_gsi.c7
-rw-r--r--drivers/net/ipa/ipa_interrupt.c54
-rw-r--r--drivers/net/ipa/ipa_interrupt.h6
-rw-r--r--drivers/net/ipa/ipa_main.c43
-rw-r--r--drivers/net/ipa/ipa_mem.c15
-rw-r--r--drivers/net/ipa/ipa_mem.h4
-rw-r--r--drivers/net/ipa/ipa_modem.c14
-rw-r--r--drivers/net/ipa/ipa_modem.h5
-rw-r--r--drivers/net/ipa/ipa_power.c27
-rw-r--r--drivers/net/ipa/ipa_power.h19
-rw-r--r--drivers/net/ipa/ipa_qmi.c10
-rw-r--r--drivers/net/ipa/ipa_qmi.h4
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c3
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h3
-rw-r--r--drivers/net/ipa/ipa_reg.c4
-rw-r--r--drivers/net/ipa/ipa_reg.h6
-rw-r--r--drivers/net/ipa/ipa_resource.c3
-rw-r--r--drivers/net/ipa/ipa_smp2p.c10
-rw-r--r--drivers/net/ipa/ipa_sysfs.c7
-rw-r--r--drivers/net/ipa/ipa_sysfs.h4
-rw-r--r--drivers/net/ipa/ipa_table.c27
-rw-r--r--drivers/net/ipa/ipa_table.h7
-rw-r--r--drivers/net/ipa/ipa_uc.c10
-rw-r--r--drivers/net/ipa/ipa_uc.h3
-rw-r--r--drivers/net/ipa/ipa_version.h22
-rw-r--r--drivers/net/ipa/reg.h8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.1.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v3.5.1.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.0.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.11.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.5.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v4.9.c8
-rw-r--r--drivers/net/ipa/reg/gsi_reg-v5.0.c8
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c6
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.7.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c20
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v5.0.c6
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v5.5.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c4
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c1
-rw-r--r--drivers/net/loopback.c5
-rw-r--r--drivers/net/macsec.c2
-rw-r--r--drivers/net/macvlan.c4
-rw-r--r--drivers/net/mctp/mctp-i2c.c45
-rw-r--r--drivers/net/mdio/mdio-gpio.c3
-rw-r--r--drivers/net/mdio/mdio-mscc-miim.c8
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/netconsole.c9
-rw-r--r--drivers/net/netdevsim/ethtool.c13
-rw-r--r--drivers/net/netdevsim/netdev.c338
-rw-r--r--drivers/net/netdevsim/netdevsim.h10
-rw-r--r--drivers/net/netkit.c30
-rw-r--r--drivers/net/ntb_netdev.c6
-rw-r--r--drivers/net/pcs/Kconfig6
-rw-r--r--drivers/net/pcs/Makefile3
-rw-r--r--drivers/net/pcs/pcs-lynx.c5
-rw-r--r--drivers/net/pcs/pcs-rzn1-miic.c28
-rw-r--r--drivers/net/pcs/pcs-xpcs-plat.c460
-rw-r--r--drivers/net/pcs/pcs-xpcs.c361
-rw-r--r--drivers/net/pcs/pcs-xpcs.h7
-rw-r--r--drivers/net/pfcp.c301
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/air_en8811h.c1090
-rw-r--r--drivers/net/phy/aquantia/Makefile2
-rw-r--r--drivers/net/phy/aquantia/aquantia.h84
-rw-r--r--drivers/net/phy/aquantia/aquantia_firmware.c4
-rw-r--r--drivers/net/phy/aquantia/aquantia_leds.c150
-rw-r--r--drivers/net/phy/aquantia/aquantia_main.c174
-rw-r--r--drivers/net/phy/bcm-phy-lib.c115
-rw-r--r--drivers/net/phy/bcm-phy-lib.h4
-rw-r--r--drivers/net/phy/bcm-phy-ptp.c5
-rw-r--r--drivers/net/phy/broadcom.c403
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/phy/dp83822.c37
-rw-r--r--drivers/net/phy/dp83td510.c264
-rw-r--r--drivers/net/phy/dp83tg720.c38
-rw-r--r--drivers/net/phy/marvell-88q2xxx.c119
-rw-r--r--drivers/net/phy/marvell.c397
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/mdio_device.c4
-rw-r--r--drivers/net/phy/mediatek-ge.c3
-rw-r--r--drivers/net/phy/micrel.c726
-rw-r--r--drivers/net/phy/microchip.c126
-rw-r--r--drivers/net/phy/microchip_t1.c2
-rw-r--r--drivers/net/phy/mscc/mscc_ptp.c5
-rw-r--r--drivers/net/phy/mxl-gpy.c58
-rw-r--r--drivers/net/phy/nxp-c45-tja11xx.c5
-rw-r--r--drivers/net/phy/phy-core.c4
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c13
-rw-r--r--drivers/net/phy/phylink.c50
-rw-r--r--drivers/net/phy/qcom/at803x.c3
-rw-r--r--drivers/net/phy/realtek.c446
-rw-r--r--drivers/net/phy/sfp-bus.c5
-rw-r--r--drivers/net/phy/sfp.c30
-rw-r--r--drivers/net/phy/vitesse.c14
-rw-r--r--drivers/net/phy/xilinx_gmii2rgmii.c7
-rw-r--r--drivers/net/plip/plip.c1
-rw-r--r--drivers/net/ppp/ppp_generic.c17
-rw-r--r--drivers/net/pse-pd/Kconfig23
-rw-r--r--drivers/net/pse-pd/Makefile2
-rw-r--r--drivers/net/pse-pd/pd692x0.c1538
-rw-r--r--drivers/net/pse-pd/pse_core.c698
-rw-r--r--drivers/net/pse-pd/pse_regulator.c49
-rw-r--r--drivers/net/pse-pd/tps23881.c823
-rw-r--r--drivers/net/slip/slip.c2
-rw-r--r--drivers/net/tap.c7
-rw-r--r--drivers/net/team/Makefile1
-rw-r--r--drivers/net/team/team_core.c (renamed from drivers/net/team/team.c)65
-rw-r--r--drivers/net/team/team_nl.c59
-rw-r--r--drivers/net/team/team_nl.h29
-rw-r--r--drivers/net/tun.c12
-rw-r--r--drivers/net/usb/aqc111.c10
-rw-r--r--drivers/net/usb/asix_devices.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c59
-rw-r--r--drivers/net/usb/cdc_ncm.c49
-rw-r--r--drivers/net/usb/ipheth.c20
-rw-r--r--drivers/net/usb/lan78xx.c56
-rw-r--r--drivers/net/usb/qmi_wwan.c16
-rw-r--r--drivers/net/usb/r8152.c27
-rw-r--r--drivers/net/usb/rtl8150.c3
-rw-r--r--drivers/net/usb/smsc75xx.c17
-rw-r--r--drivers/net/usb/smsc95xx.c26
-rw-r--r--drivers/net/usb/sr9700.c21
-rw-r--r--drivers/net/usb/usbnet.c3
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/virtio_net.c2437
-rw-r--r--drivers/net/vmxnet3/Makefile2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_defs.h61
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c221
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h33
-rw-r--r--drivers/net/vrf.c64
-rw-r--r--drivers/net/vsockmon.c2
-rw-r--r--drivers/net/vxlan/vxlan_core.c37
-rw-r--r--drivers/net/wan/Kconfig2
-rw-r--r--drivers/net/wan/fsl_qmc_hdlc.c37
-rw-r--r--drivers/net/wireguard/allowedips.c4
-rw-r--r--drivers/net/wireguard/main.c2
-rw-r--r--drivers/net/wireguard/queueing.h4
-rw-r--r--drivers/net/wireguard/send.c2
-rw-r--r--drivers/net/wireless/admtek/adm8211.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c16
-rw-r--r--drivers/net/wireless/ath/ath.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig7
-rw-r--r--drivers/net/wireless/ath/ath10k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath10k/ahb.c18
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c84
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h12
-rw-r--r--drivers/net/wireless/ath/ath10k/debugfs_sta.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.c90
-rw-r--r--drivers/net/wireless/ath/ath10k/leds.h34
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c48
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h6
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c23
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c13
-rw-r--r--drivers/net/wireless/ath/ath10k/targaddrs.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/trace.h64
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-ops.h32
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi-tlv.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c80
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h35
-rw-r--r--drivers/net/wireless/ath/ath11k/Kconfig1
-rw-r--r--drivers/net/wireless/ath/ath11k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath11k/ahb.c78
-rw-r--r--drivers/net/wireless/ath/ath11k/ce.h6
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c176
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h17
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c10
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c12
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c107
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c22
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/hal.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/hif.h14
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c414
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.c29
-rw-r--r--drivers/net/wireless/ath/ath11k/mhi.h5
-rw-r--r--drivers/net/wireless/ath/ath11k/p2p.c149
-rw-r--r--drivers/net/wireless/ath/ath11k/p2p.h22
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c44
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.h1
-rw-r--r--drivers/net/wireless/ath/ath11k/pcic.c34
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c24
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c18
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/thermal.c2
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h44
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c106
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h78
-rw-r--r--drivers/net/wireless/ath/ath12k/Kconfig10
-rw-r--r--drivers/net/wireless/ath/ath12k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.c396
-rw-r--r--drivers/net/wireless/ath/ath12k/acpi.h76
-rw-r--r--drivers/net/wireless/ath/ath12k/ce.h6
-rw-r--r--drivers/net/wireless/ath/ath12k/core.c304
-rw-r--r--drivers/net/wireless/ath/ath12k/core.h165
-rw-r--r--drivers/net/wireless/ath/ath12k/debug.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.c107
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs.h34
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c1540
-rw-r--r--drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h567
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.c198
-rw-r--r--drivers/net/wireless/ath/ath12k/dp.h17
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_mon.c46
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.c405
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_rx.h9
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.c243
-rw-r--r--drivers/net/wireless/ath/ath12k/dp_tx.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.c5
-rw-r--r--drivers/net/wireless/ath/ath12k/hal.h23
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_desc.h73
-rw-r--r--drivers/net/wireless/ath/ath12k/hal_tx.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/hif.h23
-rw-r--r--drivers/net/wireless/ath/ath12k/htc.c10
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.c41
-rw-r--r--drivers/net/wireless/ath/ath12k/hw.h14
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.c1976
-rw-r--r--drivers/net/wireless/ath/ath12k/mac.h9
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.c103
-rw-r--r--drivers/net/wireless/ath/ath12k/mhi.h5
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.c3
-rw-r--r--drivers/net/wireless/ath/ath12k/p2p.h1
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.c85
-rw-r--r--drivers/net/wireless/ath/ath12k/pci.h3
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.c117
-rw-r--r--drivers/net/wireless/ath/ath12k/qmi.h4
-rw-r--r--drivers/net/wireless/ath/ath12k/reg.c54
-rw-r--r--drivers/net/wireless/ath/ath12k/trace.h16
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c980
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.h733
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.c1026
-rw-r--r--drivers/net/wireless/ath/ath12k/wow.h62
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_mbox.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_pipe.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c20
-rw-r--r--drivers/net/wireless/ath/ath6kl/trace.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c6
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c10
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c32
-rw-r--r--drivers/net/wireless/ath/trace.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c6
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c25
-rw-r--r--drivers/net/wireless/ath/wil6210/fw.h1
-rw-r--r--drivers/net/wireless/ath/wil6210/fw_inc.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c21
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c19
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h4
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.c58
-rw-r--r--drivers/net/wireless/atmel/at76c50x-usb.h2
-rw-r--r--drivers/net/wireless/broadcom/b43/main.c2
-rw-r--r--drivers/net/wireless/broadcom/b43/sysfs.c13
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/sysfs.c16
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c20
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c13
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c8
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c5
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c29
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c24
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c4
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile2
-rw-r--r--drivers/net/wireless/intel/ipw2x00/libipw_tx.c3
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945.c2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c4
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/ax210.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/bz.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/sc.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/Makefile4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c (renamed from drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c)448
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c37
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/alive.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/binding.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/coex.h69
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/config.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h159
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h25
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/offload.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h93
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/scan.h49
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/debugfs.c3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.c142
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/regulatory.h29
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.c23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c394
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-fh.h34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-modparams.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c70
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c118
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h (renamed from drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h)17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.c448
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h657
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/coex.c112
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/constants.h21
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c344
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c136
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c240
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c268
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c170
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/link.c849
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c40
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c789
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c554
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c61
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h361
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/offloading.c8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c150
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c145
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c154
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c67
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c608
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c139
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tdls.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c433
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c110
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/time-event.c218
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c66
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c47
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c33
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c114
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h295
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c20
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c320
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c1188
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c1234
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.c1900
-rw-r--r--drivers/net/wireless/intel/iwlwifi/queue/tx.h191
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/tests/devinfo.c26
-rw-r--r--drivers/net/wireless/intersil/p54/fwio.c6
-rw-r--r--drivers/net/wireless/intersil/p54/main.c2
-rw-r--r--drivers/net/wireless/intersil/p54/p54pci.c8
-rw-r--r--drivers/net/wireless/intersil/p54/p54spi.c10
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/main.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sdio.c4
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c108
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.h9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h16
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/dma.c48
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/dma.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c85
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h22
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c80
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h45
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h47
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c10
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/soc.c1
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c68
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c178
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c121
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c24
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mac.c156
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/main.c1077
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.c985
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mcu.h67
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h31
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x.h116
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_core.c111
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_mac.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt792x_usb.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/dma.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mac.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/main.c36
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mcu.c76
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mmio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h4
-rw-r--r--drivers/net/wireless/mediatek/mt76/pci.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c17
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c3
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/main.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c29
-rw-r--r--drivers/net/wireless/microchip/wilc1000/fw.h13
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c7
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c72
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h10
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c150
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c17
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c57
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.c2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/mac.h2
-rw-r--r--drivers/net/wireless/purelifi/plfxlc/usb.c4
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/bus.h2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c16
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c12
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c6
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h12
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c2
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188e.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c)18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8188f.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c)33
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192c.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c)67
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192e.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c)18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8192f.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c)18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8710b.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c)18
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723a.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c)45
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/8723b.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c)41
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/Makefile6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/core.c (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c)84
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/regs.h (renamed from drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h)0
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Kconfig16
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/Makefile2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.c5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/cam.h6
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c19
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/efuse.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c195
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/def.h (renamed from drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h)0
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c1061
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h79
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c370
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h49
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c1225
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h24
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c856
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h111
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/reg.h (renamed from drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h)162
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c359
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c516
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h405
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c1072
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h91
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c375
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h37
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c1170
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c918
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h59
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c375
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h5
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c34
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c517
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h433
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile13
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c120
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c63
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c1212
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h22
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c10
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h9
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c3123
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h32
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c240
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h11
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c395
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c1675
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h29
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c372
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h60
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c45
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c39
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/wifi.h45
-rw-r--r--drivers/net/wireless/realtek/rtw88/Kconfig22
-rw-r--r--drivers/net/wireless/realtek/rtw88/Makefile9
-rw-r--r--drivers/net/wireless/realtek/rtw88/coex.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c14
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac.c20
-rw-r--r--drivers/net/wireless/realtek/rtw88/mac80211.c4
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c18
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h5
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/pci.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.c2110
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b.h102
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c902
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h14
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723cs.c34
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.c674
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h269
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.c721
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723x.h518
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c1
-rw-r--r--drivers/net/wireless/realtek/rtw88/rx.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/usb.c31
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig19
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile18
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.c47
-rw-r--r--drivers/net/wireless/realtek/rtw89/acpi.h21
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c200
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h71
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.c27
-rw-r--r--drivers/net/wireless/realtek/rtw89/chan.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c1991
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h108
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c163
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h419
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c45
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c523
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h507
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c174
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h18
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c67
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac_be.c25
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c239
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h37
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c128
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h17
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy_be.c18
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c3
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h63
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c346
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851b.c9
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8851be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ae.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.c1880
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b.h122
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.c2053
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_common.h388
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c21
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852be.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt.h13
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c4019
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h22
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c490
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h38
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c.c20
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c32
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852c_table.c2706
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852ce.c24
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a.c166
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c17
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8922ae.c2
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c8
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h4
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.c106
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h5
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.c733
-rw-r--r--drivers/net/wireless/realtek/rtw89/wow.h87
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c3
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c31
-rw-r--r--drivers/net/wireless/silabs/wfx/bus_sdio.c1
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.c2
-rw-r--r--drivers/net/wireless/silabs/wfx/sta.h2
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c2
-rw-r--r--drivers/net/wireless/st/cw1200/sta.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/acx.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/cmd.h2
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c20
-rw-r--r--drivers/net/wireless/ti/wl1251/wl12xx_80211.h1
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c71
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c13
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h62
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c16
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c120
-rw-r--r--drivers/net/wireless/ti/wlcore/sysfs.c11
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/wl12xx_80211.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h10
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.c144
-rw-r--r--drivers/net/wireless/virtual/mac80211_hwsim.h8
-rw-r--r--drivers/net/wireless/virtual/virt_wifi.c20
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_mac.h2
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c10
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c5
-rw-r--r--drivers/net/wwan/mhi_wwan_mbim.c19
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.c20
-rw-r--r--drivers/net/wwan/t7xx/t7xx_netdev.h2
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c1
-rw-r--r--drivers/net/xen-netback/common.h5
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netback/netback.c12
-rw-r--r--drivers/net/xen-netfront.c2
1666 files changed, 118525 insertions, 40206 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 8ca0bc223b30..9920b3a68ed1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -290,6 +290,19 @@ config GTP
To compile this drivers as a module, choose M here: the module
will be called gtp.
+config PFCP
+ tristate "Packet Forwarding Control Protocol (PFCP)"
+ depends on INET
+ select NET_UDP_TUNNEL
+ help
+ This allows one to create PFCP virtual interfaces that allows to
+ set up software and hardware offload of PFCP packets.
+ Note that this module does not support PFCP protocol in the kernel space.
+ There is no support for parsing any PFCP messages.
+
+ To compile this drivers as a module, choose M here: the module
+ will be called pfcp.
+
config AMT
tristate "Automatic Multicast Tunneling (AMT)"
depends on INET && IP_MULTICAST
@@ -507,7 +520,7 @@ source "drivers/net/ipa/Kconfig"
config NET_SB1000
tristate "General Instruments Surfboard 1000"
- depends on PNP
+ depends on ISA && PNP
help
This is a driver for the General Instrument (also known as
NextLevel) SURFboard 1000 internal
@@ -627,6 +640,7 @@ config NETDEVSIM
depends on PSAMPLE || PSAMPLE=n
depends on PTP_1588_CLOCK_MOCK || PTP_1588_CLOCK_MOCK=n
select NET_DEVLINK
+ select PAGE_POOL
help
This driver is a developer testing tool and software model that can
be used to test various control path networking APIs, especially
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 7cab36f94782..13743d0e83b5 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_GENEVE) += geneve.o
obj-$(CONFIG_BAREUDP) += bareudp.o
obj-$(CONFIG_GTP) += gtp.o
obj-$(CONFIG_NLMON) += nlmon.o
+obj-$(CONFIG_PFCP) += pfcp.o
obj-$(CONFIG_NET_VRF) += vrf.o
obj-$(CONFIG_VSOCKMON) += vsockmon.o
obj-$(CONFIG_MHI_NET) += mhi_net.o
@@ -48,7 +49,9 @@ obj-$(CONFIG_MHI_NET) += mhi_net.o
obj-$(CONFIG_ARCNET) += arcnet/
obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_CAN) += can/
-obj-$(CONFIG_NET_DSA) += dsa/
+ifdef CONFIG_NET_DSA
+obj-y += dsa/
+endif
obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
obj-$(CONFIG_HIPPI) += hippi/
diff --git a/drivers/net/arcnet/Kconfig b/drivers/net/arcnet/Kconfig
index a51b9dab6d3a..d1d07a1d4fbc 100644
--- a/drivers/net/arcnet/Kconfig
+++ b/drivers/net/arcnet/Kconfig
@@ -4,7 +4,7 @@
#
menuconfig ARCNET
- depends on NETDEVICES && (ISA || PCI || PCMCIA)
+ depends on NETDEVICES && (ISA || PCI || PCMCIA) && HAS_IOPORT
tristate "ARCnet support"
help
If you have a network card of this type, say Y and check out the
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index b54275389f8a..bee60b377d7c 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -16,6 +16,7 @@
#ifdef __KERNEL__
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
/*
* RECON_THRESHOLD is the maximum number of RECON messages to receive
@@ -268,7 +269,7 @@ struct arcnet_local {
struct net_device *dev;
int reply_status;
- struct tasklet_struct reply_tasklet;
+ struct work_struct reply_work;
/*
* Buffer management: an ARCnet card has 4 x 512-byte buffers, each of
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 166bfc3c8e6c..530c15d6a5eb 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -54,6 +54,7 @@
#include <linux/errqueue.h>
#include <linux/leds.h>
+#include <linux/workqueue.h>
#include "arcdevice.h"
#include "com9026.h"
@@ -424,9 +425,9 @@ out:
rtnl_unlock();
}
-static void arcnet_reply_tasklet(struct tasklet_struct *t)
+static void arcnet_reply_work(struct work_struct *t)
{
- struct arcnet_local *lp = from_tasklet(lp, t, reply_tasklet);
+ struct arcnet_local *lp = from_work(lp, t, reply_work);
struct sk_buff *ackskb, *skb;
struct sock_exterr_skb *serr;
@@ -527,7 +528,7 @@ int arcnet_open(struct net_device *dev)
arc_cont(D_PROTO, "\n");
}
- tasklet_setup(&lp->reply_tasklet, arcnet_reply_tasklet);
+ INIT_WORK(&lp->reply_work, arcnet_reply_work);
arc_printk(D_INIT, dev, "arcnet_open: resetting card.\n");
@@ -620,7 +621,7 @@ int arcnet_close(struct net_device *dev)
netif_stop_queue(dev);
netif_carrier_off(dev);
- tasklet_kill(&lp->reply_tasklet);
+ cancel_work_sync(&lp->reply_work);
/* flush TX and disable RX */
lp->hw.intmask(dev, 0);
@@ -984,7 +985,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
->ack_tx(dev, ackstatus);
}
lp->reply_status = ackstatus;
- tasklet_hi_schedule(&lp->reply_tasklet);
+ queue_work(system_bh_highpri_wq, &lp->reply_work);
}
if (lp->cur_tx != -1)
release_arcbuf(dev, lp->cur_tx);
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index 293a621e654c..fef2ac2852a8 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -137,6 +137,7 @@ module_param(backplane, int, 0);
module_param(clockp, int, 0);
module_param(clockm, int, 0);
+MODULE_DESCRIPTION("ARCnet COM20020 chipset ISA driver");
MODULE_LICENSE("GPL");
static struct net_device *my_dev;
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 339db6e4a1d5..d5c56ca91b77 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -61,6 +61,7 @@ struct bareudp_dev {
static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct metadata_dst *tun_dst = NULL;
+ IP_TUNNEL_DECLARE_FLAGS(key) = { };
struct bareudp_dev *bareudp;
unsigned short family;
unsigned int len;
@@ -137,7 +138,10 @@ static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
bareudp->dev->stats.rx_dropped++;
goto drop;
}
- tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
+
+ __set_bit(IP_TUNNEL_KEY_BIT, key);
+
+ tun_dst = udp_tun_rx_dst(skb, family, key, 0, 0);
if (!tun_dst) {
bareudp->dev->stats.rx_dropped++;
goto drop;
@@ -285,10 +289,10 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct bareudp_dev *bareudp,
const struct ip_tunnel_info *info)
{
+ bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct socket *sock = rcu_dereference(bareudp->sock);
- bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
const struct ip_tunnel_key *key = &info->key;
struct rtable *rt;
__be16 sport, df;
@@ -316,7 +320,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
ttl = key->ttl;
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ?
+ htons(IP_DF) : 0;
skb_scrub_packet(skb, xnet);
err = -ENOSPC;
@@ -338,7 +343,8 @@ static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, bareudp->port,
!net_eq(bareudp->net, dev_net(bareudp->dev)),
- !(info->key.tun_flags & TUNNEL_CSUM));
+ !test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags));
return 0;
free_dst:
@@ -350,10 +356,10 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct bareudp_dev *bareudp,
const struct ip_tunnel_info *info)
{
+ bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
struct socket *sock = rcu_dereference(bareudp->sock);
- bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
const struct ip_tunnel_key *key = &info->key;
struct dst_entry *dst = NULL;
struct in6_addr saddr, daddr;
@@ -402,7 +408,8 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
&saddr, &daddr, prio, ttl,
info->key.label, sport, bareudp->port,
- !(info->key.tun_flags & TUNNEL_CSUM));
+ !test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags));
return 0;
free_dst:
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2c5ed0a7cb18..f74bacf071fc 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -582,7 +582,6 @@ static void bond_ipsec_del_sa_all(struct bonding *bond)
} else {
slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
}
- ipsec->xs->xso.real_dev = NULL;
}
spin_unlock_bh(&bond->ipsec_lock);
rcu_read_unlock();
@@ -599,34 +598,30 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
struct net_device *real_dev;
struct slave *curr_active;
struct bonding *bond;
- int err;
+ bool ok = false;
bond = netdev_priv(bond_dev);
rcu_read_lock();
curr_active = rcu_dereference(bond->curr_active_slave);
+ if (!curr_active)
+ goto out;
real_dev = curr_active->dev;
- if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
- err = false;
+ if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
goto out;
- }
- if (!xs->xso.real_dev) {
- err = false;
+ if (!xs->xso.real_dev)
goto out;
- }
if (!real_dev->xfrmdev_ops ||
!real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
- netif_is_bond_master(real_dev)) {
- err = false;
+ netif_is_bond_master(real_dev))
goto out;
- }
- err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+ ok = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
out:
rcu_read_unlock();
- return err;
+ return ok;
}
static const struct xfrmdev_ops bond_xfrmdev_ops = {
@@ -1121,13 +1116,10 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
return bestslave;
}
+/* must be called in RCU critical section or with RTNL held */
static bool bond_should_notify_peers(struct bonding *bond)
{
- struct slave *slave;
-
- rcu_read_lock();
- slave = rcu_dereference(bond->curr_active_slave);
- rcu_read_unlock();
+ struct slave *slave = rcu_dereference_rtnl(bond->curr_active_slave);
if (!slave || !bond->send_peer_notif ||
bond->send_peer_notif %
@@ -3014,8 +3006,8 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
tags = NULL;
/* Find out through which dev should the packet go */
- rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
- RTO_ONLINK, 0);
+ rt = ip_route_output(dev_net(bond->dev), targets[i], 0, 0, 0,
+ RT_SCOPE_LINK);
if (IS_ERR(rt)) {
/* there's no route to target - try to send arp
* probe to generate any traffic (arp_validate=0)
@@ -4710,7 +4702,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
}
}
- bond_dev->mtu = new_mtu;
+ WRITE_ONCE(bond_dev->mtu, new_mtu);
return 0;
@@ -5245,7 +5237,7 @@ static inline int bond_slave_override(struct bonding *bond,
/* Find out if any slaves have the same mapping as this skb. */
bond_for_each_slave_rcu(bond, slave, iter) {
- if (slave->queue_id == skb_get_queue_mapping(skb)) {
+ if (READ_ONCE(slave->queue_id) == skb_get_queue_mapping(skb)) {
if (bond_slave_is_up(slave) &&
slave->link == BOND_LINK_UP) {
bond_dev_queue_xmit(bond, skb, slave->dev);
@@ -5755,10 +5747,10 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
}
static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct ethtool_ts_info ts_info;
+ struct kernel_ethtool_ts_info ts_info;
struct net_device *real_dev;
bool sw_tx_support = false;
struct list_head *iter;
@@ -5773,6 +5765,9 @@ static int bond_ethtool_get_ts_info(struct net_device *bond_dev,
if (real_dev) {
ret = ethtool_get_ts_info_by_layer(real_dev, info);
} else {
+ info->phc_index = -1;
+ info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
/* Check if all slaves support software tx timestamping */
rcu_read_lock();
bond_for_each_slave_rcu(bond, slave, iter) {
@@ -5933,7 +5928,7 @@ static void bond_uninit(struct net_device *bond_dev)
bond_set_slave_arr(bond, NULL, NULL);
- list_del(&bond->bond_list);
+ list_del_rcu(&bond->bond_list);
bond_debug_unregister(bond);
}
@@ -6347,7 +6342,7 @@ static int bond_init(struct net_device *bond_dev)
spin_lock_init(&bond->stats_lock);
netdev_lockdep_set_classes(bond_dev);
- list_add_tail(&bond->bond_list, &bn->dev_list);
+ list_add_tail_rcu(&bond->bond_list, &bn->dev_list);
bond_prepare_sysfs_group(bond);
@@ -6477,16 +6472,16 @@ static int __init bonding_init(void)
if (res)
goto out;
+ bond_create_debugfs();
+
res = register_pernet_subsys(&bond_net_ops);
if (res)
- goto out;
+ goto err_net_ops;
res = bond_netlink_init();
if (res)
goto err_link;
- bond_create_debugfs();
-
for (i = 0; i < max_bonds; i++) {
res = bond_create(&init_net, NULL);
if (res)
@@ -6501,10 +6496,11 @@ static int __init bonding_init(void)
out:
return res;
err:
- bond_destroy_debugfs();
bond_netlink_fini();
err_link:
unregister_pernet_subsys(&bond_net_ops);
+err_net_ops:
+ bond_destroy_debugfs();
goto out;
}
@@ -6513,11 +6509,11 @@ static void __exit bonding_exit(void)
{
unregister_netdevice_notifier(&bond_netdev_notifier);
- bond_destroy_debugfs();
-
bond_netlink_fini();
unregister_pernet_subsys(&bond_net_ops);
+ bond_destroy_debugfs();
+
#ifdef CONFIG_NET_POLL_CONTROLLER
/* Make sure we don't have an imbalance on our netpoll blocking */
WARN_ON(atomic_read(&netpoll_block_tx));
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 29b4c3d1b9b6..2a6a424806aa 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -51,7 +51,8 @@ static int bond_fill_slave_info(struct sk_buff *skb,
slave_dev->addr_len, slave->perm_hwaddr))
goto nla_put_failure;
- if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID, slave->queue_id))
+ if (nla_put_u16(skb, IFLA_BOND_SLAVE_QUEUE_ID,
+ READ_ONCE(slave->queue_id)))
goto nla_put_failure;
if (nla_put_s32(skb, IFLA_BOND_SLAVE_PRIO, slave->prio))
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 4cdbc7e084f4..95d59a18c022 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -936,7 +936,7 @@ static int bond_option_active_slave_set(struct bonding *bond,
/* check to see if we are clearing active */
if (!slave_dev) {
netdev_dbg(bond->dev, "Clearing current active slave\n");
- RCU_INIT_POINTER(bond->curr_active_slave, NULL);
+ bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
} else {
struct slave *old_active = rtnl_dereference(bond->curr_active_slave);
@@ -1214,9 +1214,9 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
__be32 target;
if (newval->string) {
- if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
- netdev_err(bond->dev, "invalid ARP target %pI4 specified\n",
- &target);
+ if (strlen(newval->string) < 1 ||
+ !in4_pton(newval->string + 1, -1, (u8 *)&target, -1, NULL)) {
+ netdev_err(bond->dev, "invalid ARP target specified\n");
return ret;
}
if (newval->string[0] == '+')
@@ -1589,7 +1589,7 @@ static int bond_option_queue_id_set(struct bonding *bond,
goto err_no_cmd;
/* Actually set the qids for the slave */
- update_slave->queue_id = qid;
+ WRITE_ONCE(update_slave->queue_id, qid);
out:
return ret;
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 43be458422b3..7edf72ec816a 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -209,7 +209,7 @@ static void bond_info_show_slave(struct seq_file *seq,
seq_printf(seq, "Permanent HW addr: %*phC\n",
slave->dev->addr_len, slave->perm_hwaddr);
- seq_printf(seq, "Slave queue ID: %d\n", slave->queue_id);
+ seq_printf(seq, "Slave queue ID: %d\n", READ_ONCE(slave->queue_id));
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
const struct port *port = &SLAVE_AD_INFO(slave)->port;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 2805135a7205..1e13bb170515 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -37,12 +37,12 @@ static ssize_t bonding_show_bonds(const struct class *cls,
{
const struct bond_net *bn =
container_of_const(attr, struct bond_net, class_attr_bonding_masters);
- int res = 0;
struct bonding *bond;
+ int res = 0;
- rtnl_lock();
+ rcu_read_lock();
- list_for_each_entry(bond, &bn->dev_list, bond_list) {
+ list_for_each_entry_rcu(bond, &bn->dev_list, bond_list) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
@@ -55,7 +55,7 @@ static ssize_t bonding_show_bonds(const struct class *cls,
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
- rtnl_unlock();
+ rcu_read_unlock();
return res;
}
@@ -170,10 +170,9 @@ static ssize_t bonding_show_slaves(struct device *d,
struct slave *slave;
int res = 0;
- if (!rtnl_trylock())
- return restart_syscall();
+ rcu_read_lock();
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
@@ -184,7 +183,7 @@ static ssize_t bonding_show_slaves(struct device *d,
res += sysfs_emit_at(buf, res, "%s ", slave->dev->name);
}
- rtnl_unlock();
+ rcu_read_unlock();
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -626,10 +625,9 @@ static ssize_t bonding_show_queue_id(struct device *d,
struct slave *slave;
int res = 0;
- if (!rtnl_trylock())
- return restart_syscall();
+ rcu_read_lock();
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
@@ -638,12 +636,13 @@ static ssize_t bonding_show_queue_id(struct device *d,
break;
}
res += sysfs_emit_at(buf, res, "%s:%d ",
- slave->dev->name, slave->queue_id);
+ slave->dev->name,
+ READ_ONCE(slave->queue_id));
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
- rtnl_unlock();
+ rcu_read_unlock();
return res;
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 313866f2c0e4..36d0e8440b5b 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -53,7 +53,7 @@ static SLAVE_ATTR_RO(perm_hwaddr);
static ssize_t queue_id_show(struct slave *slave, char *buf)
{
- return sysfs_emit(buf, "%d\n", slave->queue_id);
+ return sysfs_emit(buf, "%d\n", READ_ONCE(slave->queue_id));
}
static SLAVE_ATTR_RO(queue_id);
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index 0b0f234b0b50..7fea00c7ca8a 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -646,9 +646,7 @@ static inline void debugfs_init(struct cfv_info *cfv)
/* Setup CAIF for the a virtio device */
static int cfv_probe(struct virtio_device *vdev)
{
- vq_callback_t *vq_cbs = cfv_release_cb;
vrh_callback_t *vrh_cbs = cfv_recv;
- const char *names = "output";
const char *cfv_netdev_name = "cfvrt";
struct net_device *netdev;
struct cfv_info *cfv;
@@ -675,9 +673,11 @@ static int cfv_probe(struct virtio_device *vdev)
goto err;
/* Get the TX virtio ring. This is a "guest side vring". */
- err = virtio_find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, NULL);
- if (err)
+ cfv->vq_tx = virtio_find_single_vq(vdev, cfv_release_cb, "output");
+ if (IS_ERR(cfv->vq_tx)) {
+ err = PTR_ERR(cfv->vq_tx);
goto err;
+ }
/* Get the CAIF configuration from virtio config space, if available */
if (vdev->config->get) {
@@ -782,7 +782,6 @@ static struct virtio_driver caif_virtio_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = cfv_probe,
.remove = cfv_remove,
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 2e31db55d927..7f9b60a42d29 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -187,9 +187,8 @@ config CAN_SLCAN
slcand) can be found in the can-utils at the linux-can project, see
https://github.com/linux-can/can-utils for details.
- The slcan driver supports up to 10 CAN netdevices by default which
- can be changed by the 'maxdev=xx' module option. This driver can
- also be built as a module. If so, the module will be called slcan.
+ This driver can also be built as a module. If so, the module
+ will be called slcan.
config CAN_SUN4I
tristate "Allwinner A10 CAN controller"
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
index 9ef1359319f0..467ef19de1c1 100644
--- a/drivers/net/can/cc770/Kconfig
+++ b/drivers/net/can/cc770/Kconfig
@@ -7,6 +7,7 @@ if CAN_CC770
config CAN_CC770_ISA
tristate "ISA Bus based legacy CC770 driver"
+ depends on ISA
help
This driver adds legacy support for CC770 and AN82527 chips
connected to the ISA bus using I/O port, memory mapped or
diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
index 3a3be5cdfc1f..87828f953073 100644
--- a/drivers/net/can/dev/dev.c
+++ b/drivers/net/can/dev/dev.c
@@ -338,7 +338,7 @@ int can_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
EXPORT_SYMBOL_GPL(can_change_mtu);
@@ -376,7 +376,7 @@ EXPORT_SYMBOL(can_eth_ioctl_hwts);
* supporting hardware timestamps
*/
int can_ethtool_op_get_ts_info_hwts(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index 7b5028b67cd5..a60d9efd5f8d 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -29,10 +29,10 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices");
#define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U
#define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL
#define KVASER_PCIEFD_DMA_COUNT 2U
-
#define KVASER_PCIEFD_DMA_SIZE (4U * 1024U)
#define KVASER_PCIEFD_VENDOR 0x1a07
+
/* Altera based devices */
#define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d
#define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e
@@ -550,7 +550,7 @@ static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can)
spin_unlock_irqrestore(&can->lock, irq);
}
-static void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
+static inline void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can)
{
u32 msk;
@@ -711,17 +711,17 @@ static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can)
static int kvaser_pciefd_open(struct net_device *netdev)
{
- int err;
+ int ret;
struct kvaser_pciefd_can *can = netdev_priv(netdev);
- err = open_candev(netdev);
- if (err)
- return err;
+ ret = open_candev(netdev);
+ if (ret)
+ return ret;
- err = kvaser_pciefd_bus_on(can);
- if (err) {
+ ret = kvaser_pciefd_bus_on(can);
+ if (ret) {
close_candev(netdev);
- return err;
+ return ret;
}
return 0;
@@ -1032,15 +1032,15 @@ static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie)
int i;
for (i = 0; i < pcie->nr_channels; i++) {
- int err = register_candev(pcie->can[i]->can.dev);
+ int ret = register_candev(pcie->can[i]->can.dev);
- if (err) {
+ if (ret) {
int j;
/* Unregister all successfully registered devices. */
for (j = 0; j < i; j++)
unregister_candev(pcie->can[j]->can.dev);
- return err;
+ return ret;
}
}
@@ -1619,7 +1619,7 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos,
/* Position does not point to the end of the package,
* corrupted packet size?
*/
- if ((*start_pos + size) != pos)
+ if (unlikely((*start_pos + size) != pos))
return -EIO;
/* Point to the next packet header, if any */
@@ -1640,31 +1640,24 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf)
return res;
}
-static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
+static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie)
{
u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0)
kvaser_pciefd_read_buffer(pcie, 0);
- /* Reset DMA buffer 0 */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- }
- if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
+ if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1)
kvaser_pciefd_read_buffer(pcie, 1);
- /* Reset DMA buffer 1 */
- iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
- KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- }
- if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
- irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
- irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
- irq & KVASER_PCIEFD_SRB_IRQ_DUF1)
+ if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DOF1 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DUF0 ||
+ irq & KVASER_PCIEFD_SRB_IRQ_DUF1))
dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq);
iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
+ return irq;
}
static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can)
@@ -1691,27 +1684,33 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev)
{
struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev;
const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask;
- u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
+ u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie));
+ u32 srb_irq = 0;
int i;
- if (!(board_irq & irq_mask->all))
+ if (!(pci_irq & irq_mask->all))
return IRQ_NONE;
- if (board_irq & irq_mask->kcan_rx0)
- kvaser_pciefd_receive_irq(pcie);
+ if (pci_irq & irq_mask->kcan_rx0)
+ srb_irq = kvaser_pciefd_receive_irq(pcie);
for (i = 0; i < pcie->nr_channels; i++) {
- if (!pcie->can[i]) {
- dev_err(&pcie->pci->dev,
- "IRQ mask points to unallocated controller\n");
- break;
- }
-
- /* Check that mask matches channel (i) IRQ mask */
- if (board_irq & irq_mask->kcan_tx[i])
+ if (pci_irq & irq_mask->kcan_tx[i])
kvaser_pciefd_transmit_irq(pcie->can[i]);
}
+ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) {
+ /* Reset DMA buffer 0, may trigger new interrupt */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0,
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ }
+
+ if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) {
+ /* Reset DMA buffer 1, may trigger new interrupt */
+ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
+ KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
+ }
+
return IRQ_HANDLED;
}
@@ -1733,7 +1732,7 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie)
static int kvaser_pciefd_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- int err;
+ int ret;
struct kvaser_pciefd *pcie;
const struct kvaser_pciefd_irq_mask *irq_mask;
void __iomem *irq_en_base;
@@ -1747,39 +1746,52 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data;
irq_mask = pcie->driver_data->irq_mask;
- err = pci_enable_device(pdev);
- if (err)
- return err;
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
- err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
- if (err)
+ ret = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME);
+ if (ret)
goto err_disable_pci;
pcie->reg_base = pci_iomap(pdev, 0, 0);
if (!pcie->reg_base) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_release_regions;
}
- err = kvaser_pciefd_setup_board(pcie);
- if (err)
+ ret = kvaser_pciefd_setup_board(pcie);
+ if (ret)
goto err_pci_iounmap;
- err = kvaser_pciefd_setup_dma(pcie);
- if (err)
+ ret = kvaser_pciefd_setup_dma(pcie);
+ if (ret)
goto err_pci_iounmap;
pci_set_master(pdev);
- err = kvaser_pciefd_setup_can_ctrls(pcie);
- if (err)
+ ret = kvaser_pciefd_setup_can_ctrls(pcie);
+ if (ret)
goto err_teardown_can_ctrls;
- err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
- IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
- if (err)
+ ret = pci_alloc_irq_vectors(pcie->pci, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(&pcie->pci->dev, "Failed to allocate IRQ vectors.\n");
goto err_teardown_can_ctrls;
+ }
+
+ ret = pci_irq_vector(pcie->pci, 0);
+ if (ret < 0)
+ goto err_pci_free_irq_vectors;
+
+ pcie->pci->irq = ret;
+ ret = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler,
+ IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie);
+ if (ret) {
+ dev_err(&pcie->pci->dev, "Failed to request IRQ %d\n", pcie->pci->irq);
+ goto err_pci_free_irq_vectors;
+ }
iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG);
@@ -1797,8 +1809,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev,
iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1,
KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG);
- err = kvaser_pciefd_reg_candev(pcie);
- if (err)
+ ret = kvaser_pciefd_reg_candev(pcie);
+ if (ret)
goto err_free_irq;
return 0;
@@ -1808,6 +1820,9 @@ err_free_irq:
iowrite32(0, irq_en_base);
free_irq(pcie->pci->irq, pcie);
+err_pci_free_irq_vectors:
+ pci_free_irq_vectors(pcie->pci);
+
err_teardown_can_ctrls:
kvaser_pciefd_teardown_can_ctrls(pcie);
iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG);
@@ -1822,7 +1837,7 @@ err_release_regions:
err_disable_pci:
pci_disable_device(pdev);
- return err;
+ return ret;
}
static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie)
@@ -1853,7 +1868,7 @@ static void kvaser_pciefd_remove(struct pci_dev *pdev)
iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie));
free_irq(pcie->pci->irq, pcie);
-
+ pci_free_irq_vectors(pcie->pci);
pci_iounmap(pdev, pcie->reg_base);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 14b231c4d7ec..7f63f866083e 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -379,38 +379,72 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val)
return cdev->ops->read_fifo(cdev, addr_offset, val, 1);
}
-static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable)
-{
- u32 cccr = m_can_read(cdev, M_CAN_CCCR);
- u32 timeout = 10;
- u32 val = 0;
-
- /* Clear the Clock stop request if it was set */
- if (cccr & CCCR_CSR)
- cccr &= ~CCCR_CSR;
-
- if (enable) {
- /* enable m_can configuration */
- m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT);
- udelay(5);
- /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
- m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
- } else {
- m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
+static int m_can_cccr_update_bits(struct m_can_classdev *cdev, u32 mask, u32 val)
+{
+ u32 val_before = m_can_read(cdev, M_CAN_CCCR);
+ u32 val_after = (val_before & ~mask) | val;
+ size_t tries = 10;
+
+ if (!(mask & CCCR_INIT) && !(val_before & CCCR_INIT)) {
+ dev_err(cdev->dev,
+ "refusing to configure device when in normal mode\n");
+ return -EBUSY;
}
- /* there's a delay for module initialization */
- if (enable)
- val = CCCR_INIT | CCCR_CCE;
+ /* The chip should be in standby mode when changing the CCCR register,
+ * and some chips set the CSR and CSA bits when in standby. Furthermore,
+ * the CSR and CSA bits should be written as zeros, even when they read
+ * ones.
+ */
+ val_after &= ~(CCCR_CSR | CCCR_CSA);
- while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
- if (timeout == 0) {
- netdev_warn(cdev->net, "Failed to init module\n");
- return;
- }
- timeout--;
- udelay(1);
+ while (tries--) {
+ u32 val_read;
+
+ /* Write the desired value in each try, as setting some bits in
+ * the CCCR register require other bits to be set first. E.g.
+ * setting the NISO bit requires setting the CCE bit first.
+ */
+ m_can_write(cdev, M_CAN_CCCR, val_after);
+
+ val_read = m_can_read(cdev, M_CAN_CCCR) & ~(CCCR_CSR | CCCR_CSA);
+
+ if (val_read == val_after)
+ return 0;
+
+ usleep_range(1, 5);
}
+
+ return -ETIMEDOUT;
+}
+
+static int m_can_config_enable(struct m_can_classdev *cdev)
+{
+ int err;
+
+ /* CCCR_INIT must be set in order to set CCCR_CCE, but access to
+ * configuration registers should only be enabled when in standby mode,
+ * where CCCR_INIT is always set.
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_CCE, CCCR_CCE);
+ if (err)
+ netdev_err(cdev->net, "failed to enable configuration mode\n");
+
+ return err;
+}
+
+static int m_can_config_disable(struct m_can_classdev *cdev)
+{
+ int err;
+
+ /* Only clear CCCR_CCE, since CCCR_INIT cannot be cleared while in
+ * standby mode
+ */
+ err = m_can_cccr_update_bits(cdev, CCCR_CCE, 0);
+ if (err)
+ netdev_err(cdev->net, "failed to disable configuration registers\n");
+
+ return err;
}
static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts)
@@ -1403,7 +1437,9 @@ static int m_can_chip_config(struct net_device *dev)
interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF |
IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F);
- m_can_config_endisable(cdev, true);
+ err = m_can_config_enable(cdev);
+ if (err)
+ return err;
/* RX Buffer/FIFO Element Size 64 bytes data field */
m_can_write(cdev, M_CAN_RXESC,
@@ -1521,7 +1557,9 @@ static int m_can_chip_config(struct net_device *dev)
FIELD_PREP(TSCC_TCP_MASK, 0xf) |
FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL));
- m_can_config_endisable(cdev, false);
+ err = m_can_config_disable(cdev);
+ if (err)
+ return err;
if (cdev->ops->init)
cdev->ops->init(cdev);
@@ -1550,7 +1588,11 @@ static int m_can_start(struct net_device *dev)
cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK,
m_can_read(cdev, M_CAN_TXFQS));
- return 0;
+ ret = m_can_cccr_update_bits(cdev, CCCR_INIT, 0);
+ if (ret)
+ netdev_err(dev, "failed to enter normal mode\n");
+
+ return ret;
}
static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
@@ -1599,43 +1641,37 @@ static int m_can_check_core_release(struct m_can_classdev *cdev)
}
/* Selectable Non ISO support only in version 3.2.x
- * This function checks if the bit is writable.
+ * Return 1 if the bit is writable, 0 if it is not, or negative on error.
*/
-static bool m_can_niso_supported(struct m_can_classdev *cdev)
+static int m_can_niso_supported(struct m_can_classdev *cdev)
{
- u32 cccr_reg, cccr_poll = 0;
- int niso_timeout = -ETIMEDOUT;
- int i;
+ int ret, niso;
- m_can_config_endisable(cdev, true);
- cccr_reg = m_can_read(cdev, M_CAN_CCCR);
- cccr_reg |= CCCR_NISO;
- m_can_write(cdev, M_CAN_CCCR, cccr_reg);
+ ret = m_can_config_enable(cdev);
+ if (ret)
+ return ret;
- for (i = 0; i <= 10; i++) {
- cccr_poll = m_can_read(cdev, M_CAN_CCCR);
- if (cccr_poll == cccr_reg) {
- niso_timeout = 0;
- break;
- }
+ /* First try to set the NISO bit. */
+ niso = m_can_cccr_update_bits(cdev, CCCR_NISO, CCCR_NISO);
- usleep_range(1, 5);
+ /* Then clear the it again. */
+ ret = m_can_cccr_update_bits(cdev, CCCR_NISO, 0);
+ if (ret) {
+ dev_err(cdev->dev, "failed to revert the NON-ISO bit in CCCR\n");
+ return ret;
}
- /* Clear NISO */
- cccr_reg &= ~(CCCR_NISO);
- m_can_write(cdev, M_CAN_CCCR, cccr_reg);
-
- m_can_config_endisable(cdev, false);
+ ret = m_can_config_disable(cdev);
+ if (ret)
+ return ret;
- /* return false if time out (-ETIMEDOUT), else return true */
- return !niso_timeout;
+ return niso == 0;
}
static int m_can_dev_setup(struct m_can_classdev *cdev)
{
struct net_device *dev = cdev->net;
- int m_can_version, err;
+ int m_can_version, err, niso;
m_can_version = m_can_check_core_release(cdev);
/* return if unsupported version */
@@ -1684,9 +1720,11 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
cdev->can.bittiming_const = &m_can_bittiming_const_31X;
cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X;
- cdev->can.ctrlmode_supported |=
- (m_can_niso_supported(cdev) ?
- CAN_CTRLMODE_FD_NON_ISO : 0);
+ niso = m_can_niso_supported(cdev);
+ if (niso < 0)
+ return niso;
+ if (niso)
+ cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO;
break;
default:
dev_err(cdev->dev, "Unsupported version number: %2d",
@@ -1694,21 +1732,26 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
return -EINVAL;
}
- if (cdev->ops->init)
- cdev->ops->init(cdev);
-
- return 0;
+ /* Forcing standby mode should be redundant, as the chip should be in
+ * standby after a reset. Write the INIT bit anyways, should the chip
+ * be configured by previous stage.
+ */
+ return m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
}
static void m_can_stop(struct net_device *dev)
{
struct m_can_classdev *cdev = netdev_priv(dev);
+ int ret;
/* disable all interrupts */
m_can_disable_all_interrupts(cdev);
/* Set init mode to disengage from the network */
- m_can_config_endisable(cdev, true);
+ ret = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT);
+ if (ret)
+ netdev_err(dev, "failed to enter standby mode: %pe\n",
+ ERR_PTR(ret));
/* set the state as STOPPED */
cdev->can.state = CAN_STATE_STOPPED;
diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h
index 3a9edc292593..92b2bd8628e6 100644
--- a/drivers/net/can/m_can/m_can.h
+++ b/drivers/net/can/m_can/m_can.h
@@ -91,7 +91,7 @@ struct m_can_classdev {
ktime_t irq_timer_wait;
- struct m_can_ops *ops;
+ const struct m_can_ops *ops;
int version;
u32 irqstatus;
diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c
index 45400de4163d..d72fe771dfc7 100644
--- a/drivers/net/can/m_can/m_can_pci.c
+++ b/drivers/net/can/m_can/m_can_pci.c
@@ -77,7 +77,7 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
return 0;
}
-static struct m_can_ops m_can_pci_ops = {
+static const struct m_can_ops m_can_pci_ops = {
.read_reg = iomap_read_reg,
.write_reg = iomap_write_reg,
.write_fifo = iomap_write_fifo,
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index df0367124b4c..983ab80260dd 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -68,7 +68,7 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
return 0;
}
-static struct m_can_ops m_can_plat_ops = {
+static const struct m_can_ops m_can_plat_ops = {
.read_reg = iomap_read_reg,
.write_reg = iomap_write_reg,
.write_fifo = iomap_write_fifo,
diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c
index a42600dac70d..2f73bf3abad8 100644
--- a/drivers/net/can/m_can/tcan4x5x-core.c
+++ b/drivers/net/can/m_can/tcan4x5x-core.c
@@ -357,7 +357,7 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev,
return 0;
}
-static struct m_can_ops tcan4x5x_ops = {
+static const struct m_can_ops tcan4x5x_ops = {
.init = tcan4x5x_init,
.read_reg = tcan4x5x_read_reg,
.write_reg = tcan4x5x_write_reg,
@@ -453,10 +453,17 @@ static int tcan4x5x_can_probe(struct spi_device *spi)
goto out_power;
}
- ret = tcan4x5x_init(mcan_class);
+ tcan4x5x_check_wake(priv);
+
+ ret = tcan4x5x_write_tcan_reg(mcan_class, TCAN4X5X_INT_EN, 0);
if (ret) {
- dev_err(&spi->dev, "tcan initialization failed %pe\n",
- ERR_PTR(ret));
+ dev_err(&spi->dev, "Disabling interrupts failed %pe\n", ERR_PTR(ret));
+ goto out_power;
+ }
+
+ ret = tcan4x5x_clear_interrupts(mcan_class);
+ if (ret) {
+ dev_err(&spi->dev, "Clearing interrupts failed %pe\n", ERR_PTR(ret));
goto out_power;
}
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index a6829cdc0e81..8c2a7bc64d3d 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -34,12 +34,6 @@ static const struct can_bittiming_const mscan_bittiming_const = {
.brp_inc = 1,
};
-struct mscan_state {
- u8 mode;
- u8 canrier;
- u8 cantier;
-};
-
static enum can_state state_map[] = {
CAN_STATE_ERROR_ACTIVE,
CAN_STATE_ERROR_WARNING,
diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c
index 31c9c127e24b..b50005397463 100644
--- a/drivers/net/can/peak_canfd/peak_canfd.c
+++ b/drivers/net/can/peak_canfd/peak_canfd.c
@@ -777,7 +777,7 @@ static const struct net_device_ops peak_canfd_netdev_ops = {
};
static int peak_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index b82842718735..c919668bbe7a 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -508,12 +508,6 @@
*/
#define RCANFD_CFFIFO_IDX 0
-/* fCAN clock select register settings */
-enum rcar_canfd_fcanclk {
- RCANFD_CANFDCLK = 0, /* CANFD clock */
- RCANFD_EXTCLK, /* Externally input clock */
-};
-
struct rcar_canfd_global;
struct rcar_canfd_hw_info {
@@ -545,8 +539,8 @@ struct rcar_canfd_global {
struct platform_device *pdev; /* Respective platform device */
struct clk *clkp; /* Peripheral clock */
struct clk *can_clk; /* fCAN clock */
- enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */
unsigned long channels_mask; /* Enabled channels mask */
+ bool extclk; /* CANFD or Ext clock */
bool fdmode; /* CAN FD or Classical CAN only mode */
struct reset_control *rstc1;
struct reset_control *rstc2;
@@ -633,28 +627,28 @@ static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg)
static inline u32 rcar_canfd_read(void __iomem *base, u32 offset)
{
- return readl(base + (offset));
+ return readl(base + offset);
}
static inline void rcar_canfd_write(void __iomem *base, u32 offset, u32 val)
{
- writel(val, base + (offset));
+ writel(val, base + offset);
}
static void rcar_canfd_set_bit(void __iomem *base, u32 reg, u32 val)
{
- rcar_canfd_update(val, val, base + (reg));
+ rcar_canfd_update(val, val, base + reg);
}
static void rcar_canfd_clear_bit(void __iomem *base, u32 reg, u32 val)
{
- rcar_canfd_update(val, 0, base + (reg));
+ rcar_canfd_update(val, 0, base + reg);
}
static void rcar_canfd_update_bit(void __iomem *base, u32 reg,
u32 mask, u32 val)
{
- rcar_canfd_update(mask, val, base + (reg));
+ rcar_canfd_update(mask, val, base + reg);
}
static void rcar_canfd_get_data(struct rcar_canfd_channel *priv,
@@ -665,7 +659,7 @@ static void rcar_canfd_get_data(struct rcar_canfd_channel *priv,
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
*((u32 *)cf->data + i) =
- rcar_canfd_read(priv->base, off + (i * sizeof(u32)));
+ rcar_canfd_read(priv->base, off + i * sizeof(u32));
}
static void rcar_canfd_put_data(struct rcar_canfd_channel *priv,
@@ -675,7 +669,7 @@ static void rcar_canfd_put_data(struct rcar_canfd_channel *priv,
lwords = DIV_ROUND_UP(cf->len, sizeof(u32));
for (i = 0; i < lwords; i++)
- rcar_canfd_write(priv->base, off + (i * sizeof(u32)),
+ rcar_canfd_write(priv->base, off + i * sizeof(u32),
*((u32 *)cf->data + i));
}
@@ -777,7 +771,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv)
cfg |= RCANFD_GCFG_CMPOC;
/* Set External Clock if selected */
- if (gpriv->fcan != RCANFD_CANFDCLK)
+ if (gpriv->extclk)
cfg |= RCANFD_GCFG_DCS;
rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg);
@@ -1941,16 +1935,12 @@ static int rcar_canfd_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(gpriv->can_clk),
"cannot get canfd clock\n");
- gpriv->fcan = RCANFD_CANFDCLK;
-
+ /* CANFD clock may be further divided within the IP */
+ fcan_freq = clk_get_rate(gpriv->can_clk) / info->postdiv;
} else {
- gpriv->fcan = RCANFD_EXTCLK;
+ fcan_freq = clk_get_rate(gpriv->can_clk);
+ gpriv->extclk = true;
}
- fcan_freq = clk_get_rate(gpriv->can_clk);
-
- if (gpriv->fcan == RCANFD_CANFDCLK)
- /* CANFD clock is further divided by (1/2) within the IP */
- fcan_freq /= info->postdiv;
addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(addr)) {
@@ -2059,8 +2049,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, gpriv);
- dev_info(dev, "global operational state (clk %d, fdmode %d)\n",
- gpriv->fcan, gpriv->fdmode);
+ dev_info(dev, "global operational state (%s clk, %s mode)\n",
+ gpriv->extclk ? "ext" : "canfd",
+ gpriv->fdmode ? "fd" : "classical");
return 0;
fail_channel:
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 4b2f9cb17fc3..01168db4c106 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -87,6 +87,7 @@ config CAN_PLX_PCI
config CAN_SJA1000_ISA
tristate "ISA Bus based legacy SJA1000 driver"
+ depends on ISA
help
This driver adds legacy support for SJA1000 chips connected to
the ISA bus using I/O port, memory mapped or indirect access.
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 5de1ebb0c6f0..67e5316c6372 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -122,7 +122,6 @@ struct plx_pci_card {
#define TEWS_PCI_VENDOR_ID 0x1498
#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
-#define CTI_PCI_VENDOR_ID 0x12c4
#define CTI_PCI_DEVICE_ID_CRG001 0x0900
#define MOXA_PCI_VENDOR_ID 0x1393
@@ -358,7 +357,7 @@ static const struct pci_device_id plx_pci_tbl[] = {
{
/* Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card */
PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
- CTI_PCI_VENDOR_ID, CTI_PCI_DEVICE_ID_CRG001,
+ PCI_SUBVENDOR_ID_CONNECT_TECH, CTI_PCI_DEVICE_ID_CRG001,
0, 0,
(kernel_ulong_t)&plx_pci_card_info_cti
},
diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
index e1b8533a602e..148d974ebb21 100644
--- a/drivers/net/can/spi/hi311x.c
+++ b/drivers/net/can/spi/hi311x.c
@@ -830,7 +830,6 @@ static int hi3110_can_probe(struct spi_device *spi)
struct device *dev = &spi->dev;
struct net_device *net;
struct hi3110_priv *priv;
- const void *match;
struct clk *clk;
u32 freq;
int ret;
@@ -874,11 +873,7 @@ static int hi3110_can_probe(struct spi_device *spi)
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING;
- match = device_get_match_data(dev);
- if (match)
- priv->model = (enum hi3110_model)(uintptr_t)match;
- else
- priv->model = spi_get_device_id(spi)->driver_data;
+ priv->model = (enum hi3110_model)(uintptr_t)spi_get_device_match_data(spi);
priv->net = net;
priv->clk = clk;
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index 79c4bab5f724..3b8736ff0345 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -28,7 +28,6 @@
#include <linux/device.h>
#include <linux/ethtool.h>
#include <linux/freezer.h>
-#include <linux/gpio.h>
#include <linux/gpio/driver.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -482,9 +481,9 @@ static int mcp251x_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
if (mcp251x_gpio_is_input(offset))
- return GPIOF_DIR_IN;
+ return GPIO_LINE_DIRECTION_IN;
- return GPIOF_DIR_OUT;
+ return GPIO_LINE_DIRECTION_OUT;
}
static int mcp251x_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -1301,7 +1300,6 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
static int mcp251x_can_probe(struct spi_device *spi)
{
- const void *match = device_get_match_data(&spi->dev);
struct net_device *net;
struct mcp251x_priv *priv;
struct clk *clk;
@@ -1339,10 +1337,7 @@ static int mcp251x_can_probe(struct spi_device *spi)
priv->can.clock.freq = freq / 2;
priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
- if (match)
- priv->model = (enum mcp251x_model)(uintptr_t)match;
- else
- priv->model = spi_get_device_id(spi)->driver_data;
+ priv->model = (enum mcp251x_model)(uintptr_t)spi_get_device_match_data(spi);
priv->net = net;
priv->clk = clk;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
index 1d9057dc44f2..3e7526274e34 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -744,6 +744,7 @@ static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv,
mcp251xfd_chip_interrupts_disable(priv);
mcp251xfd_chip_rx_int_disable(priv);
+ mcp251xfd_timestamp_stop(priv);
mcp251xfd_chip_sleep(priv);
}
@@ -763,6 +764,8 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
if (err)
goto out_chip_stop;
+ mcp251xfd_timestamp_start(priv);
+
err = mcp251xfd_set_bittiming(priv);
if (err)
goto out_chip_stop;
@@ -791,7 +794,7 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv)
return 0;
- out_chip_stop:
+out_chip_stop:
mcp251xfd_dump(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
@@ -867,18 +870,18 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev,
static struct sk_buff *
mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv,
- struct can_frame **cf, u32 *timestamp)
+ struct can_frame **cf, u32 *ts_raw)
{
struct sk_buff *skb;
int err;
- err = mcp251xfd_get_timestamp(priv, timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, ts_raw);
if (err)
return NULL;
skb = alloc_can_err_skb(priv->ndev, cf);
if (skb)
- mcp251xfd_skb_set_timestamp(priv, skb, *timestamp);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, *ts_raw);
return skb;
}
@@ -889,7 +892,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
struct mcp251xfd_rx_ring *ring;
struct sk_buff *skb;
struct can_frame *cf;
- u32 timestamp, rxovif;
+ u32 ts_raw, rxovif;
int err, i;
stats->rx_over_errors++;
@@ -924,14 +927,14 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv)
return err;
}
- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
if (!skb)
return 0;
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -948,12 +951,12 @@ static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv)
static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
{
struct net_device_stats *stats = &priv->ndev->stats;
- u32 bdiag1, timestamp;
+ u32 bdiag1, ts_raw;
struct sk_buff *skb;
struct can_frame *cf = NULL;
int err;
- err = mcp251xfd_get_timestamp(priv, &timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
if (err)
return err;
@@ -1035,8 +1038,8 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv)
if (!cf)
return 0;
- mcp251xfd_skb_set_timestamp(priv, skb, timestamp);
- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, ts_raw);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -1049,7 +1052,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
struct sk_buff *skb;
struct can_frame *cf = NULL;
enum can_state new_state, rx_state, tx_state;
- u32 trec, timestamp;
+ u32 trec, ts_raw;
int err;
err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec);
@@ -1079,7 +1082,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
/* The skb allocation might fail, but can_change_state()
* handles cf == NULL.
*/
- skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp);
+ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw);
can_change_state(priv->ndev, cf, tx_state, rx_state);
if (new_state == CAN_STATE_BUS_OFF) {
@@ -1110,7 +1113,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv)
cf->data[7] = bec.rxerr;
}
- err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp);
+ err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw);
if (err)
stats->rx_fifo_errors++;
@@ -1135,7 +1138,7 @@ mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode)
return 0;
}
- /* According to MCP2517FD errata DS80000792B 1., during a TX
+ /* According to MCP2517FD errata DS80000792C 1., during a TX
* MAB underflow, the controller will transition to Restricted
* Operation Mode or Listen Only Mode (depending on SERR2LOM).
*
@@ -1180,7 +1183,7 @@ static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
/* TX MAB underflow
*
- * According to MCP2517FD Errata DS80000792B 1. a TX MAB
+ * According to MCP2517FD Errata DS80000792C 1. a TX MAB
* underflow is indicated by SERRIF and MODIF.
*
* In addition to the effects mentioned in the Errata, there
@@ -1224,7 +1227,7 @@ static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv)
/* RX MAB overflow
*
- * According to MCP2517FD Errata DS80000792B 1. a RX MAB
+ * According to MCP2517FD Errata DS80000792C 1. a RX MAB
* overflow is indicated by SERRIF.
*
* In addition to the effects mentioned in the Errata, (most
@@ -1331,7 +1334,8 @@ mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode)
return err;
/* Errata Reference:
- * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2.
+ * mcp2517fd: DS80000789C 3., mcp2518fd: DS80000792E 2.,
+ * mcp251863: DS80000984A 2.
*
* ECC single error correction does not work in all cases:
*
@@ -1576,7 +1580,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id)
handled = IRQ_HANDLED;
} while (1);
- out_fail:
+out_fail:
can_rx_offload_threaded_irq_finish(&priv->offload);
netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n",
@@ -1610,19 +1614,29 @@ static int mcp251xfd_open(struct net_device *ndev)
if (err)
goto out_mcp251xfd_ring_free;
+ mcp251xfd_timestamp_init(priv);
+
err = mcp251xfd_chip_start(priv);
if (err)
goto out_transceiver_disable;
- mcp251xfd_timestamp_init(priv);
clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
can_rx_offload_enable(&priv->offload);
+ priv->wq = alloc_ordered_workqueue("%s-mcp251xfd_wq",
+ WQ_FREEZABLE | WQ_MEM_RECLAIM,
+ dev_name(&spi->dev));
+ if (!priv->wq) {
+ err = -ENOMEM;
+ goto out_can_rx_offload_disable;
+ }
+ INIT_WORK(&priv->tx_work, mcp251xfd_tx_obj_write_sync);
+
err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&spi->dev), priv);
if (err)
- goto out_can_rx_offload_disable;
+ goto out_destroy_workqueue;
err = mcp251xfd_chip_interrupts_enable(priv);
if (err)
@@ -1632,20 +1646,21 @@ static int mcp251xfd_open(struct net_device *ndev)
return 0;
- out_free_irq:
+out_free_irq:
free_irq(spi->irq, priv);
- out_can_rx_offload_disable:
+out_destroy_workqueue:
+ destroy_workqueue(priv->wq);
+out_can_rx_offload_disable:
can_rx_offload_disable(&priv->offload);
set_bit(MCP251XFD_FLAGS_DOWN, priv->flags);
- mcp251xfd_timestamp_stop(priv);
- out_transceiver_disable:
+out_transceiver_disable:
mcp251xfd_transceiver_disable(priv);
- out_mcp251xfd_ring_free:
+out_mcp251xfd_ring_free:
mcp251xfd_ring_free(priv);
- out_pm_runtime_put:
+out_pm_runtime_put:
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
pm_runtime_put(ndev->dev.parent);
- out_close_candev:
+out_close_candev:
close_candev(ndev);
return err;
@@ -1661,8 +1676,8 @@ static int mcp251xfd_stop(struct net_device *ndev)
hrtimer_cancel(&priv->tx_irq_timer);
mcp251xfd_chip_interrupts_disable(priv);
free_irq(ndev->irq, priv);
+ destroy_workqueue(priv->wq);
can_rx_offload_disable(&priv->offload);
- mcp251xfd_timestamp_stop(priv);
mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED);
mcp251xfd_transceiver_disable(priv);
mcp251xfd_ring_free(priv);
@@ -1808,9 +1823,9 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id,
*effective_speed_hz_slow = xfer[0].effective_speed_hz;
*effective_speed_hz_fast = xfer[1].effective_speed_hz;
- out_kfree_buf_tx:
+out_kfree_buf_tx:
kfree(buf_tx);
- out_kfree_buf_rx:
+out_kfree_buf_rx:
kfree(buf_rx);
return err;
@@ -1924,13 +1939,13 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv)
return 0;
- out_unregister_candev:
+out_unregister_candev:
unregister_candev(ndev);
- out_chip_sleep:
+out_chip_sleep:
mcp251xfd_chip_sleep(priv);
- out_runtime_disable:
+out_runtime_disable:
pm_runtime_disable(ndev->dev.parent);
- out_runtime_put_noidle:
+out_runtime_put_noidle:
pm_runtime_put_noidle(ndev->dev.parent);
mcp251xfd_clks_and_vdd_disable(priv);
@@ -1989,7 +2004,6 @@ MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table);
static int mcp251xfd_probe(struct spi_device *spi)
{
- const void *match;
struct net_device *ndev;
struct mcp251xfd_priv *priv;
struct gpio_desc *rx_int;
@@ -2081,16 +2095,11 @@ static int mcp251xfd_probe(struct spi_device *spi)
priv->pll_enable = pll_enable;
priv->reg_vdd = reg_vdd;
priv->reg_xceiver = reg_xceiver;
-
- match = device_get_match_data(&spi->dev);
- if (match)
- priv->devtype_data = *(struct mcp251xfd_devtype_data *)match;
- else
- priv->devtype_data = *(struct mcp251xfd_devtype_data *)
- spi_get_device_id(spi)->driver_data;
+ priv->devtype_data = *(struct mcp251xfd_devtype_data *)spi_get_device_match_data(spi);
/* Errata Reference:
- * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4.
+ * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789E 4.,
+ * mcp251863: DS80000984A 4.
*
* The SPI can write corrupted data to the RAM at fast SPI
* speeds:
@@ -2150,9 +2159,9 @@ static int mcp251xfd_probe(struct spi_device *spi)
return 0;
- out_can_rx_offload_del:
+out_can_rx_offload_del:
can_rx_offload_del(&priv->offload);
- out_free_candev:
+out_free_candev:
spi->max_speed_hz = priv->spi_max_speed_hz_orig;
free_candev(ndev);
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
index 004eaf96262b..050321345304 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
@@ -94,7 +94,7 @@ static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv,
kfree(buf);
}
- out:
+out:
mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
index 92b7bc7f14b9..65150e762007 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
@@ -397,7 +397,7 @@ mcp251xfd_regmap_crc_read(void *context,
return err;
}
- out:
+out:
memcpy(val_buf, buf_rx->data, val_len);
return 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
index bfe4caa0c99d..7bd2bcb5cf87 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
@@ -206,6 +206,7 @@ mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr)
int i, j;
mcp251xfd_for_each_rx_ring(priv, rx_ring, i) {
+ rx_ring->last_valid = timecounter_read(&priv->tc);
rx_ring->head = 0;
rx_ring->tail = 0;
rx_ring->base = *base;
@@ -485,6 +486,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags);
}
+ tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) -
+ ilog2(tx_ring->obj_num);
tx_ring->obj_size = tx_obj_size;
rem = priv->rx_obj_num;
@@ -507,6 +510,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv)
}
rx_ring->obj_num = rx_obj_num;
+ rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) -
+ ilog2(rx_obj_num);
rx_ring->obj_size = rx_obj_size;
priv->rx[i] = rx_ring;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
index ced8d9c81f8c..fe897f3e4c12 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -16,23 +16,14 @@
#include "mcp251xfd.h"
-static inline int
-mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv,
- const struct mcp251xfd_rx_ring *ring,
- u8 *rx_head, bool *fifo_empty)
+static inline bool mcp251xfd_rx_fifo_sta_empty(const u32 fifo_sta)
{
- u32 fifo_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
- &fifo_sta);
- if (err)
- return err;
-
- *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
- *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+ return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+}
- return 0;
+static inline bool mcp251xfd_rx_fifo_sta_full(const u32 fifo_sta)
+{
+ return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF;
}
static inline int
@@ -80,29 +71,49 @@ mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv,
}
static int
-mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv,
- struct mcp251xfd_rx_ring *ring)
+mcp251xfd_get_rx_len(const struct mcp251xfd_priv *priv,
+ const struct mcp251xfd_rx_ring *ring,
+ u8 *len_p)
{
- u32 new_head;
- u8 chip_rx_head;
- bool fifo_empty;
+ const u8 shift = ring->obj_num_shift_to_u8;
+ u8 chip_head, tail, len;
+ u32 fifo_sta;
int err;
- err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head,
- &fifo_empty);
- if (err || fifo_empty)
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
+ &fifo_sta);
+ if (err)
+ return err;
+
+ if (mcp251xfd_rx_fifo_sta_empty(fifo_sta)) {
+ *len_p = 0;
+ return 0;
+ }
+
+ if (mcp251xfd_rx_fifo_sta_full(fifo_sta)) {
+ *len_p = ring->obj_num;
+ return 0;
+ }
+
+ chip_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ err = mcp251xfd_check_rx_tail(priv, ring);
+ if (err)
return err;
+ tail = mcp251xfd_get_rx_tail(ring);
- /* chip_rx_head, is the next RX-Object filled by the HW.
- * The new RX head must be >= the old head.
+ /* First shift to full u8. The subtraction works on signed
+ * values, that keeps the difference steady around the u8
+ * overflow. The right shift acts on len, which is an u8.
*/
- new_head = round_down(ring->head, ring->obj_num) + chip_rx_head;
- if (new_head <= ring->head)
- new_head += ring->obj_num;
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(chip_head));
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(tail));
+ BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(len));
- ring->head = new_head;
+ len = (chip_head << shift) - (tail << shift);
+ *len_p = len >> shift;
- return mcp251xfd_check_rx_tail(priv, ring);
+ return 0;
}
static void
@@ -148,8 +159,6 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv,
if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR))
memcpy(cfd->data, hw_rx_obj->data, cfd->len);
-
- mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts);
}
static int
@@ -160,8 +169,26 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
struct net_device_stats *stats = &priv->ndev->stats;
struct sk_buff *skb;
struct canfd_frame *cfd;
+ u64 timestamp;
int err;
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the RX FIFO head index
+ * might be corrupted and we might process past the RX FIFO's
+ * head into old CAN frames.
+ *
+ * Compare the timestamp of currently processed CAN frame with
+ * last valid frame received. Abort with -EBADMSG if an old
+ * CAN frame is detected.
+ */
+ timestamp = timecounter_cyc2time(&priv->tc, hw_rx_obj->ts);
+ if (timestamp <= ring->last_valid) {
+ stats->rx_fifo_errors++;
+
+ return -EBADMSG;
+ }
+ ring->last_valid = timestamp;
+
if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF)
skb = alloc_canfd_skb(priv->ndev, &cfd);
else
@@ -172,6 +199,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv,
return 0;
}
+ mcp251xfd_skb_set_timestamp(skb, timestamp);
mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb);
err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts);
if (err)
@@ -198,51 +226,80 @@ mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv,
}
static int
+mcp251xfd_handle_rxif_ring_uinc(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_rx_ring *ring,
+ u8 len)
+{
+ int offset;
+ int err;
+
+ if (!len)
+ return 0;
+
+ ring->head += len;
+
+ /* Increment the RX FIFO tail pointer 'len' times in a
+ * single SPI message.
+ *
+ * Note:
+ * Calculate offset, so that the SPI transfer ends on
+ * the last message of the uinc_xfer array, which has
+ * "cs_change == 0", to properly deactivate the chip
+ * select.
+ */
+ offset = ARRAY_SIZE(ring->uinc_xfer) - len;
+ err = spi_sync_transfer(priv->spi,
+ ring->uinc_xfer + offset, len);
+ if (err)
+ return err;
+
+ ring->tail += len;
+
+ return 0;
+}
+
+static int
mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv,
struct mcp251xfd_rx_ring *ring)
{
struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj;
- u8 rx_tail, len;
+ u8 rx_tail, len, l;
int err, i;
- err = mcp251xfd_rx_ring_update(priv, ring);
+ err = mcp251xfd_get_rx_len(priv, ring, &len);
if (err)
return err;
- while ((len = mcp251xfd_get_rx_linear_len(ring))) {
- int offset;
-
+ while ((l = mcp251xfd_get_rx_linear_len(ring, len))) {
rx_tail = mcp251xfd_get_rx_tail(ring);
err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj,
- rx_tail, len);
+ rx_tail, l);
if (err)
return err;
- for (i = 0; i < len; i++) {
+ for (i = 0; i < l; i++) {
err = mcp251xfd_handle_rxif_one(priv, ring,
(void *)hw_rx_obj +
i * ring->obj_size);
- if (err)
+
+ /* -EBADMSG means we're affected by mcp2518fd
+ * erratum DS80000789E 6., i.e. the timestamp
+ * in the RX object is older that the last
+ * valid received CAN frame. Don't process any
+ * further and mark processed frames as good.
+ */
+ if (err == -EBADMSG)
+ return mcp251xfd_handle_rxif_ring_uinc(priv, ring, i);
+ else if (err)
return err;
}
- /* Increment the RX FIFO tail pointer 'len' times in a
- * single SPI message.
- *
- * Note:
- * Calculate offset, so that the SPI transfer ends on
- * the last message of the uinc_xfer array, which has
- * "cs_change == 0", to properly deactivate the chip
- * select.
- */
- offset = ARRAY_SIZE(ring->uinc_xfer) - len;
- err = spi_sync_transfer(priv->spi,
- ring->uinc_xfer + offset, len);
+ err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, l);
if (err)
return err;
- ring->tail += len;
+ len -= l;
}
return 0;
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
index e5bd57b65aaf..f732556d233a 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2019, 2020, 2021 Pengutronix,
+// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
// Based on:
@@ -16,6 +16,11 @@
#include "mcp251xfd.h"
+static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta)
+{
+ return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF);
+}
+
static inline int
mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv,
u8 *tef_tail)
@@ -56,60 +61,43 @@ static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv)
}
static int
-mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq)
-{
- const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- u32 tef_sta;
- int err;
-
- err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta);
- if (err)
- return err;
-
- if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) {
- netdev_err(priv->ndev,
- "Transmit Event FIFO buffer overflow.\n");
- return -ENOBUFS;
- }
-
- netdev_info(priv->ndev,
- "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n",
- tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ?
- "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ?
- "not empty" : "empty",
- seq, priv->tef->tail, priv->tef->head, tx_ring->head);
-
- /* The Sequence Number in the TEF doesn't match our tef_tail. */
- return -EAGAIN;
-}
-
-static int
mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
const struct mcp251xfd_hw_tef_obj *hw_tef_obj,
unsigned int *frame_len_ptr)
{
struct net_device_stats *stats = &priv->ndev->stats;
+ u32 seq, tef_tail_masked, tef_tail;
struct sk_buff *skb;
- u32 seq, seq_masked, tef_tail_masked, tef_tail;
- seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK,
+ /* Use the MCP2517FD mask on the MCP2518FD, too. We only
+ * compare 7 bits, this is enough to detect old TEF objects.
+ */
+ seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK,
hw_tef_obj->flags);
-
- /* Use the MCP2517FD mask on the MCP2518FD, too. We only
- * compare 7 bits, this should be enough to detect
- * net-yet-completed, i.e. old TEF objects.
- */
- seq_masked = seq &
- field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
tef_tail_masked = priv->tef->tail &
field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK);
- if (seq_masked != tef_tail_masked)
- return mcp251xfd_handle_tefif_recover(priv, seq);
+
+ /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI
+ * bits of a FIFOSTA register, here the TX FIFO tail index
+ * might be corrupted and we might process past the TEF FIFO's
+ * head into old CAN frames.
+ *
+ * Compare the sequence number of the currently processed CAN
+ * frame with the expected sequence number. Abort with
+ * -EBADMSG if an old CAN frame is detected.
+ */
+ if (seq != tef_tail_masked) {
+ netdev_dbg(priv->ndev, "%s: chip=0x%02x ring=0x%02x\n", __func__,
+ seq, tef_tail_masked);
+ stats->tx_fifo_errors++;
+
+ return -EBADMSG;
+ }
tef_tail = mcp251xfd_get_tef_tail(priv);
skb = priv->can.echo_skb[tef_tail];
if (skb)
- mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts);
+ mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_tef_obj->ts);
stats->tx_bytes +=
can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload,
tef_tail, hw_tef_obj->ts,
@@ -120,28 +108,44 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv,
return 0;
}
-static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv)
+static int
+mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p)
{
const struct mcp251xfd_tx_ring *tx_ring = priv->tx;
- unsigned int new_head;
- u8 chip_tx_tail;
+ const u8 shift = tx_ring->obj_num_shift_to_u8;
+ u8 chip_tx_tail, tail, len;
+ u32 fifo_sta;
int err;
- err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail);
+ err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr),
+ &fifo_sta);
if (err)
return err;
- /* chip_tx_tail, is the next TX-Object send by the HW.
- * The new TEF head must be >= the old head, ...
+ if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) {
+ *len_p = tx_ring->obj_num;
+ return 0;
+ }
+
+ chip_tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta);
+
+ err = mcp251xfd_check_tef_tail(priv);
+ if (err)
+ return err;
+ tail = mcp251xfd_get_tef_tail(priv);
+
+ /* First shift to full u8. The subtraction works on signed
+ * values, that keeps the difference steady around the u8
+ * overflow. The right shift acts on len, which is an u8.
*/
- new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail;
- if (new_head <= priv->tef->head)
- new_head += tx_ring->obj_num;
+ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail));
+ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail));
+ BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len));
- /* ... but it cannot exceed the TX head. */
- priv->tef->head = min(new_head, tx_ring->head);
+ len = (chip_tx_tail << shift) - (tail << shift);
+ *len_p = len >> shift;
- return mcp251xfd_check_tef_tail(priv);
+ return 0;
}
static inline int
@@ -182,13 +186,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
u8 tef_tail, len, l;
int err, i;
- err = mcp251xfd_tef_ring_update(priv);
+ err = mcp251xfd_get_tef_len(priv, &len);
if (err)
return err;
tef_tail = mcp251xfd_get_tef_tail(priv);
- len = mcp251xfd_get_tef_len(priv);
- l = mcp251xfd_get_tef_linear_len(priv);
+ l = mcp251xfd_get_tef_linear_len(priv, len);
err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l);
if (err)
return err;
@@ -203,12 +206,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
unsigned int frame_len = 0;
err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len);
- /* -EAGAIN means the Sequence Number in the TEF
- * doesn't match our tef_tail. This can happen if we
- * read the TEF objects too early. Leave loop let the
- * interrupt handler call us again.
+ /* -EBADMSG means we're affected by mcp2518fd erratum
+ * DS80000789E 6., i.e. the Sequence Number in the TEF
+ * doesn't match our tef_tail. Don't process any
+ * further and mark processed frames as good.
*/
- if (err == -EAGAIN)
+ if (err == -EBADMSG)
goto out_netif_wake_queue;
if (err)
return err;
@@ -216,13 +219,15 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv)
total_frame_len += frame_len;
}
- out_netif_wake_queue:
+out_netif_wake_queue:
len = i; /* number of handled goods TEFs */
if (len) {
struct mcp251xfd_tef_ring *ring = priv->tef;
struct mcp251xfd_tx_ring *tx_ring = priv->tx;
int offset;
+ ring->head += len;
+
/* Increment the TEF FIFO tail pointer 'len' times in
* a single SPI message.
*
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
index 712e09186987..202ca0d24d03 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
@@ -2,7 +2,7 @@
//
// mcp251xfd - Microchip MCP251xFD Family CAN controller driver
//
-// Copyright (c) 2021 Pengutronix,
+// Copyright (c) 2021, 2023 Pengutronix,
// Marc Kleine-Budde <kernel@pengutronix.de>
//
@@ -11,20 +11,20 @@
#include "mcp251xfd.h"
-static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc)
+static u64 mcp251xfd_timestamp_raw_read(const struct cyclecounter *cc)
{
const struct mcp251xfd_priv *priv;
- u32 timestamp = 0;
+ u32 ts_raw = 0;
int err;
priv = container_of(cc, struct mcp251xfd_priv, cc);
- err = mcp251xfd_get_timestamp(priv, &timestamp);
+ err = mcp251xfd_get_timestamp_raw(priv, &ts_raw);
if (err)
netdev_err(priv->ndev,
"Error %d while reading timestamp. HW timestamps may be inaccurate.",
err);
- return timestamp;
+ return ts_raw;
}
static void mcp251xfd_timestamp_work(struct work_struct *work)
@@ -39,28 +39,21 @@ static void mcp251xfd_timestamp_work(struct work_struct *work)
MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
}
-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
- struct sk_buff *skb, u32 timestamp)
-{
- struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
- u64 ns;
-
- ns = timecounter_cyc2time(&priv->tc, timestamp);
- hwtstamps->hwtstamp = ns_to_ktime(ns);
-}
-
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv)
{
struct cyclecounter *cc = &priv->cc;
- cc->read = mcp251xfd_timestamp_read;
+ cc->read = mcp251xfd_timestamp_raw_read;
cc->mask = CYCLECOUNTER_MASK(32);
cc->shift = 1;
cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift);
- timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
-
INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work);
+}
+
+void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv)
+{
+ timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns());
schedule_delayed_work(&priv->timestamp,
MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ);
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
index 160528d3cc26..b1de8052a45c 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
@@ -131,6 +131,39 @@ mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv,
tx_obj->xfer[0].len = len;
}
+static void mcp251xfd_tx_failure_drop(const struct mcp251xfd_priv *priv,
+ struct mcp251xfd_tx_ring *tx_ring,
+ int err)
+{
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int frame_len = 0;
+ u8 tx_head;
+
+ tx_ring->head--;
+ stats->tx_dropped++;
+ tx_head = mcp251xfd_get_tx_head(tx_ring);
+ can_free_echo_skb(ndev, tx_head, &frame_len);
+ netdev_completed_queue(ndev, 1, frame_len);
+ netif_wake_queue(ndev);
+
+ if (net_ratelimit())
+ netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+}
+
+void mcp251xfd_tx_obj_write_sync(struct work_struct *work)
+{
+ struct mcp251xfd_priv *priv = container_of(work, struct mcp251xfd_priv,
+ tx_work);
+ struct mcp251xfd_tx_obj *tx_obj = priv->tx_work_obj;
+ struct mcp251xfd_tx_ring *tx_ring = priv->tx;
+ int err;
+
+ err = spi_sync(priv->spi, &tx_obj->msg);
+ if (err)
+ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
+}
+
static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv,
struct mcp251xfd_tx_obj *tx_obj)
{
@@ -162,6 +195,11 @@ static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv,
return false;
}
+static bool mcp251xfd_work_busy(struct work_struct *work)
+{
+ return work_busy(work);
+}
+
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
@@ -175,7 +213,8 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
if (can_dev_dropped_skb(ndev, skb))
return NETDEV_TX_OK;
- if (mcp251xfd_tx_busy(priv, tx_ring))
+ if (mcp251xfd_tx_busy(priv, tx_ring) ||
+ mcp251xfd_work_busy(&priv->tx_work))
return NETDEV_TX_BUSY;
tx_obj = mcp251xfd_get_tx_obj_next(tx_ring);
@@ -193,13 +232,13 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
netdev_sent_queue(priv->ndev, frame_len);
err = mcp251xfd_tx_obj_write(priv, tx_obj);
- if (err)
- goto out_err;
-
- return NETDEV_TX_OK;
-
- out_err:
- netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err);
+ if (err == -EBUSY) {
+ netif_stop_queue(ndev);
+ priv->tx_work_obj = tx_obj;
+ queue_work(priv->wq, &priv->tx_work);
+ } else if (err) {
+ mcp251xfd_tx_failure_drop(priv, tx_ring, err);
+ }
return NETDEV_TX_OK;
}
diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
index 24510b3b8020..dcbbd2b2fae8 100644
--- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
+++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h
@@ -2,7 +2,7 @@
*
* mcp251xfd - Microchip MCP251xFD Family CAN controller driver
*
- * Copyright (c) 2019, 2020, 2021 Pengutronix,
+ * Copyright (c) 2019, 2020, 2021, 2023 Pengutronix,
* Marc Kleine-Budde <kernel@pengutronix.de>
* Copyright (c) 2019 Martin Sperl <kernel@martin.sperl.org>
*/
@@ -524,6 +524,7 @@ struct mcp251xfd_tef_ring {
/* u8 obj_num equals tx_ring->obj_num */
/* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */
+ /* u8 obj_num_shift_to_u8 equals tx_ring->obj_num_shift_to_u8 */
union mcp251xfd_write_reg_buf irq_enable_buf;
struct spi_transfer irq_enable_xfer;
@@ -542,6 +543,7 @@ struct mcp251xfd_tx_ring {
u8 nr;
u8 fifo_nr;
u8 obj_num;
+ u8 obj_num_shift_to_u8;
u8 obj_size;
struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX];
@@ -552,10 +554,14 @@ struct mcp251xfd_rx_ring {
unsigned int head;
unsigned int tail;
+ /* timestamp of the last valid received CAN frame */
+ u64 last_valid;
+
u16 base;
u8 nr;
u8 fifo_nr;
u8 obj_num;
+ u8 obj_num_shift_to_u8;
u8 obj_size;
union mcp251xfd_write_reg_buf irq_enable_buf;
@@ -633,6 +639,10 @@ struct mcp251xfd_priv {
struct mcp251xfd_rx_ring *rx[MCP251XFD_FIFO_RX_NUM];
struct mcp251xfd_tx_ring tx[MCP251XFD_FIFO_TX_NUM];
+ struct workqueue_struct *wq;
+ struct work_struct tx_work;
+ struct mcp251xfd_tx_obj *tx_work_obj;
+
DECLARE_BITMAP(flags, __MCP251XFD_FLAGS_SIZE__);
u8 rx_ring_num;
@@ -805,10 +815,27 @@ mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv,
return data;
}
-static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv,
- u32 *timestamp)
+static inline int mcp251xfd_get_timestamp_raw(const struct mcp251xfd_priv *priv,
+ u32 *ts_raw)
+{
+ return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, ts_raw);
+}
+
+static inline void mcp251xfd_skb_set_timestamp(struct sk_buff *skb, u64 ns)
{
- return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp);
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static inline
+void mcp251xfd_skb_set_timestamp_raw(const struct mcp251xfd_priv *priv,
+ struct sk_buff *skb, u32 ts_raw)
+{
+ u64 ns;
+
+ ns = timecounter_cyc2time(&priv->tc, ts_raw);
+ mcp251xfd_skb_set_timestamp(skb, ns);
}
static inline u16 mcp251xfd_get_tef_obj_addr(u8 n)
@@ -857,17 +884,8 @@ static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv)
return priv->tef->tail & (priv->tx->obj_num - 1);
}
-static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv)
-{
- return priv->tef->head - priv->tef->tail;
-}
-
-static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv)
+static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv, u8 len)
{
- u8 len;
-
- len = mcp251xfd_get_tef_len(priv);
-
return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv));
}
@@ -910,18 +928,9 @@ static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring)
return ring->tail & (ring->obj_num - 1);
}
-static inline u8 mcp251xfd_get_rx_len(const struct mcp251xfd_rx_ring *ring)
-{
- return ring->head - ring->tail;
-}
-
static inline u8
-mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring)
+mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring, u8 len)
{
- u8 len;
-
- len = mcp251xfd_get_rx_len(ring);
-
return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring));
}
@@ -947,11 +956,11 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv);
int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv);
int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv);
int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv);
-void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv,
- struct sk_buff *skb, u32 timestamp);
void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv);
+void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv);
void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv);
+void mcp251xfd_tx_obj_write_sync(struct work_struct *work);
netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb,
struct net_device *ndev);
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bd58c636d465..3e1fba12c0c3 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -91,6 +91,7 @@ config CAN_KVASER_USB
- Kvaser Leaf Light R v2
- Kvaser Mini PCI Express HS
- Kvaser Mini PCI Express 2xHS
+ - Kvaser Mini PCIe 1xCAN
- Kvaser USBcan Light 2xHS
- Kvaser USBcan II HS/HS
- Kvaser USBcan II HS/LS
@@ -111,12 +112,14 @@ config CAN_KVASER_USB
- Kvaser USBcan Light 4xHS
- Kvaser USBcan Pro 2xHS v2
- Kvaser USBcan Pro 4xHS
+ - Kvaser USBcan Pro 5xCAN
- Kvaser USBcan Pro 5xHS
- Kvaser U100
- Kvaser U100P
- Kvaser U100S
- ATI Memorator Pro 2xHS v2
- ATI USBcan Pro 2xHS v2
+ - Vining 800
If unsure, say N.
diff --git a/drivers/net/can/usb/etas_es58x/es58x_devlink.c b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
index 635edeb8f68c..eee20839d96f 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_devlink.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_devlink.c
@@ -215,7 +215,7 @@ static int es58x_devlink_info_get(struct devlink *devlink,
struct es58x_sw_version *fw_ver = &es58x_dev->firmware_version;
struct es58x_sw_version *bl_ver = &es58x_dev->bootloader_version;
struct es58x_hw_revision *hw_rev = &es58x_dev->hardware_revision;
- char buf[max(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))];
+ char buf[MAX(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))];
int ret = 0;
if (es58x_sw_version_is_valid(fw_ver)) {
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 65c962f76898..bc86e9b329fd 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -40,6 +40,9 @@
#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0
#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
+#define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0
+#define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30
+
#define GS_USB_ENDPOINT_IN 1
#define GS_USB_ENDPOINT_OUT 2
@@ -1145,7 +1148,7 @@ static int gs_usb_set_phys_id(struct net_device *netdev,
}
static int gs_usb_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct gs_can *dev = netdev_priv(netdev);
@@ -1530,6 +1533,8 @@ static const struct usb_device_id gs_usb_table[] = {
USB_CES_CANEXT_FD_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID,
USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_XYLANTA_SAINT3_VENDOR_ID,
+ USB_XYLANTA_SAINT3_PRODUCT_ID, 0) },
{} /* Terminating entry */
};
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
index 8faf8a462c05..daa34b532aa8 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
@@ -89,6 +89,9 @@
#define USB_HYBRID_CANLIN_PRODUCT_ID 0x0115
#define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 0x0116
#define USB_LEAF_V3_PRODUCT_ID 0x0117
+#define USB_VINING_800_PRODUCT_ID 0x0119
+#define USB_USBCAN_PRO_5XCAN_PRODUCT_ID 0x011A
+#define USB_MINI_PCIE_1XCAN_PRODUCT_ID 0x011B
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = {
.quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP,
@@ -125,6 +128,7 @@ static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_liste
static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = {
.quirks = 0,
+ .family = KVASER_LEAF,
.ops = &kvaser_usb_leaf_dev_ops,
};
@@ -238,6 +242,12 @@ static const struct usb_device_id kvaser_usb_table[] = {
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_V3_PRODUCT_ID),
.driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_VINING_800_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5XCAN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
+ { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_1XCAN_PRODUCT_ID),
+ .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra },
{ }
};
MODULE_DEVICE_TABLE(usb, kvaser_usb_table);
@@ -294,7 +304,7 @@ int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd,
}
usb_free_urb(urb);
- return 0;
+ return err;
}
int kvaser_usb_can_rx_over_error(struct net_device *netdev)
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 1efa39e134f4..3d68fef46ded 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -897,7 +897,7 @@ int peak_usb_set_eeprom(struct net_device *netdev,
return 0;
}
-int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index f6cf84bb718f..abab00930b9d 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -145,7 +145,7 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv);
int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
-int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info);
+int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info);
/* common 32-bit CAN channel ID ethtool management */
int peak_usb_get_eeprom_len(struct net_device *netdev);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 285635c23443..f67e85807100 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -140,7 +140,7 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu)
!can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index f7fabba707ea..9e1b7d41005f 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -135,7 +135,7 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
!can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index fae0120473f8..d944911d7f05 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -6,7 +6,7 @@
* Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy
*
* Description:
- * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
+ * This driver is developed for AXI CAN IP, AXI CANFD IP, CANPS and CANFD PS Controller.
*/
#include <linux/bitfield.h>
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index 3092b391031a..2d10b4d6cfbb 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -102,6 +102,7 @@ config NET_DSA_SMSC_LAN9303
tristate
select NET_DSA_TAG_LAN9303
select REGMAP
+ imply SMSC_PHY
help
This enables support for the Microchip LAN9303/LAN9354 3 port ethernet
switch chips.
@@ -126,7 +127,7 @@ config NET_DSA_SMSC_LAN9303_MDIO
config NET_DSA_VITESSE_VSC73XX
tristate
- select NET_DSA_TAG_NONE
+ select NET_DSA_TAG_VSC73XX_8021Q
select FIXED_PHY
select VITESSE_PHY
select GPIOLIB
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index b2eeff04f4c8..0783fc121bbb 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1266,95 +1266,70 @@ static void b53_adjust_63xx_rgmii(struct dsa_switch *ds, int port,
phy_modes(interface));
}
-static void b53_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static void b53_adjust_531x5_rgmii(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
{
struct b53_device *dev = ds->priv;
- struct ethtool_keee *p = &dev->ports[port].eee;
- u8 rgmii_ctrl = 0, reg = 0, off;
- bool tx_pause = false;
- bool rx_pause = false;
-
- if (!phy_is_pseudo_fixed_link(phydev))
- return;
+ u8 rgmii_ctrl = 0, off;
- /* Enable flow control on BCM5301x's CPU port */
- if (is5301x(dev) && dsa_is_cpu_port(ds, port))
- tx_pause = rx_pause = true;
+ if (port == dev->imp_port)
+ off = B53_RGMII_CTRL_IMP;
+ else
+ off = B53_RGMII_CTRL_P(port);
- if (phydev->pause) {
- if (phydev->asym_pause)
- tx_pause = true;
- rx_pause = true;
- }
+ /* Configure the port RGMII clock delay by DLL disabled and
+ * tx_clk aligned timing (restoring to reset defaults)
+ */
+ b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
+ rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
+ RGMII_CTRL_TIMING_SEL);
- b53_force_port_config(dev, port, phydev->speed, phydev->duplex,
- tx_pause, rx_pause);
- b53_force_link(dev, port, phydev->link);
+ /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
+ * sure that we enable the port TX clock internal delay to
+ * account for this internal delay that is inserted, otherwise
+ * the switch won't be able to receive correctly.
+ *
+ * PHY_INTERFACE_MODE_RGMII means that we are not introducing
+ * any delay neither on transmission nor reception, so the
+ * BCM53125 must also be configured accordingly to account for
+ * the lack of delay and introduce
+ *
+ * The BCM53125 switch has its RX clock and TX clock control
+ * swapped, hence the reason why we modify the TX clock path in
+ * the "RGMII" case
+ */
+ if (interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
+ if (interface == PHY_INTERFACE_MODE_RGMII)
+ rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
+ rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
+ b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
- if (is63xx(dev) && port >= B53_63XX_RGMII0)
- b53_adjust_63xx_rgmii(ds, port, phydev->interface);
+ dev_info(ds->dev, "Configured port %d for %s\n", port,
+ phy_modes(interface));
+}
- if (is531x5(dev) && phy_interface_is_rgmii(phydev)) {
- if (port == dev->imp_port)
- off = B53_RGMII_CTRL_IMP;
- else
- off = B53_RGMII_CTRL_P(port);
+static void b53_adjust_5325_mii(struct dsa_switch *ds, int port)
+{
+ struct b53_device *dev = ds->priv;
+ u8 reg = 0;
- /* Configure the port RGMII clock delay by DLL disabled and
- * tx_clk aligned timing (restoring to reset defaults)
- */
- b53_read8(dev, B53_CTRL_PAGE, off, &rgmii_ctrl);
- rgmii_ctrl &= ~(RGMII_CTRL_DLL_RXC | RGMII_CTRL_DLL_TXC |
- RGMII_CTRL_TIMING_SEL);
-
- /* PHY_INTERFACE_MODE_RGMII_TXID means TX internal delay, make
- * sure that we enable the port TX clock internal delay to
- * account for this internal delay that is inserted, otherwise
- * the switch won't be able to receive correctly.
- *
- * PHY_INTERFACE_MODE_RGMII means that we are not introducing
- * any delay neither on transmission nor reception, so the
- * BCM53125 must also be configured accordingly to account for
- * the lack of delay and introduce
- *
- * The BCM53125 switch has its RX clock and TX clock control
- * swapped, hence the reason why we modify the TX clock path in
- * the "RGMII" case
- */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
- rgmii_ctrl |= RGMII_CTRL_DLL_TXC;
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
- rgmii_ctrl |= RGMII_CTRL_DLL_TXC | RGMII_CTRL_DLL_RXC;
- rgmii_ctrl |= RGMII_CTRL_TIMING_SEL;
- b53_write8(dev, B53_CTRL_PAGE, off, rgmii_ctrl);
-
- dev_info(ds->dev, "Configured port %d for %s\n", port,
- phy_modes(phydev->interface));
- }
+ b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ &reg);
- /* configure MII port if necessary */
- if (is5325(dev)) {
+ /* reverse mii needs to be enabled */
+ if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
+ b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
+ reg | PORT_OVERRIDE_RV_MII_25);
b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
&reg);
- /* reverse mii needs to be enabled */
if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
- b53_write8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
- reg | PORT_OVERRIDE_RV_MII_25);
- b53_read8(dev, B53_CTRL_PAGE, B53_PORT_OVERRIDE_CTRL,
- &reg);
-
- if (!(reg & PORT_OVERRIDE_RV_MII_25)) {
- dev_err(ds->dev,
- "Failed to enable reverse MII mode\n");
- return;
- }
+ dev_err(ds->dev,
+ "Failed to enable reverse MII mode\n");
+ return;
}
}
-
- /* Re-negotiate EEE if it was enabled already */
- p->eee_enabled = b53_eee_init(ds, port, phydev);
}
void b53_port_event(struct dsa_switch *ds, int port)
@@ -1408,30 +1383,48 @@ static void b53_phylink_get_caps(struct dsa_switch *ds, int port,
dev->ops->phylink_get_caps(dev, port, config);
}
-static struct phylink_pcs *b53_phylink_mac_select_pcs(struct dsa_switch *ds,
- int port,
+static struct phylink_pcs *b53_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
- struct b53_device *dev = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct b53_device *dev = dp->ds->priv;
if (!dev->ops->phylink_mac_select_pcs)
return NULL;
- return dev->ops->phylink_mac_select_pcs(dev, port, interface);
+ return dev->ops->phylink_mac_select_pcs(dev, dp->index, interface);
}
-void b53_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state)
+static void b53_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ phy_interface_t interface = state->interface;
+ struct dsa_switch *ds = dp->ds;
+ struct b53_device *dev = ds->priv;
+ int port = dp->index;
+
+ if (is63xx(dev) && port >= B53_63XX_RGMII0)
+ b53_adjust_63xx_rgmii(ds, port, interface);
+
+ if (mode == MLO_AN_FIXED) {
+ if (is531x5(dev) && phy_interface_mode_is_rgmii(interface))
+ b53_adjust_531x5_rgmii(ds, port, interface);
+
+ /* configure MII port if necessary */
+ if (is5325(dev))
+ b53_adjust_5325_mii(ds, port);
+ }
}
-EXPORT_SYMBOL(b53_phylink_mac_config);
-void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface)
+static void b53_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
{
- struct b53_device *dev = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct b53_device *dev = dp->ds->priv;
+ int port = dp->index;
if (mode == MLO_AN_PHY)
return;
@@ -1445,24 +1438,31 @@ void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
dev->ops->serdes_link_set)
dev->ops->serdes_link_set(dev, port, mode, interface, false);
}
-EXPORT_SYMBOL(b53_phylink_mac_link_down);
-void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause)
+static void b53_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
+ unsigned int mode,
+ phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct dsa_switch *ds = dp->ds;
struct b53_device *dev = ds->priv;
+ struct ethtool_keee *p = &dev->ports[dp->index].eee;
+ int port = dp->index;
- if (is63xx(dev) && port >= B53_63XX_RGMII0)
- b53_adjust_63xx_rgmii(ds, port, interface);
-
- if (mode == MLO_AN_PHY)
+ if (mode == MLO_AN_PHY) {
+ /* Re-negotiate EEE if it was enabled already */
+ p->eee_enabled = b53_eee_init(ds, port, phydev);
return;
+ }
if (mode == MLO_AN_FIXED) {
+ /* Force flow control on BCM5301x's CPU port */
+ if (is5301x(dev) && dsa_is_cpu_port(ds, port))
+ tx_pause = rx_pause = true;
+
b53_force_port_config(dev, port, speed, duplex,
tx_pause, rx_pause);
b53_force_link(dev, port, true);
@@ -1473,7 +1473,6 @@ void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
dev->ops->serdes_link_set)
dev->ops->serdes_link_set(dev, port, mode, interface, true);
}
-EXPORT_SYMBOL(b53_phylink_mac_link_up);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack)
@@ -2257,6 +2256,9 @@ static int b53_change_mtu(struct dsa_switch *ds, int port, int mtu)
if (is5325(dev) || is5365(dev))
return -EOPNOTSUPP;
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
enable_jumbo = (mtu >= JMS_MIN_SIZE);
allow_10_100 = (dev->chip_id == BCM583XX_DEVICE_ID);
@@ -2268,6 +2270,13 @@ static int b53_get_max_mtu(struct dsa_switch *ds, int port)
return JMS_MAX_SIZE;
}
+static const struct phylink_mac_ops b53_phylink_mac_ops = {
+ .mac_select_pcs = b53_phylink_mac_select_pcs,
+ .mac_config = b53_phylink_mac_config,
+ .mac_link_down = b53_phylink_mac_link_down,
+ .mac_link_up = b53_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
@@ -2278,12 +2287,7 @@ static const struct dsa_switch_ops b53_switch_ops = {
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.phy_read = b53_phy_read16,
.phy_write = b53_phy_write16,
- .adjust_link = b53_adjust_link,
.phylink_get_caps = b53_phylink_get_caps,
- .phylink_mac_select_pcs = b53_phylink_mac_select_pcs,
- .phylink_mac_config = b53_phylink_mac_config,
- .phylink_mac_link_down = b53_phylink_mac_link_down,
- .phylink_mac_link_up = b53_phylink_mac_link_up,
.port_enable = b53_enable_port,
.port_disable = b53_disable_port,
.get_mac_eee = b53_get_mac_eee,
@@ -2726,6 +2730,7 @@ struct b53_device *b53_switch_alloc(struct device *base,
dev->priv = priv;
dev->ops = ops;
ds->ops = &b53_switch_ops;
+ ds->phylink_mac_ops = &b53_phylink_mac_ops;
dev->vlan_enabled = true;
/* Let DSA handle the case were multiple bridges span the same switch
* device and different VLAN awareness settings are requested, which
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index c13a907947f1..05141176daf5 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -352,18 +352,6 @@ int b53_br_flags(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack);
int b53_setup_devlink_resources(struct dsa_switch *ds);
void b53_port_event(struct dsa_switch *ds, int port);
-void b53_phylink_mac_config(struct dsa_switch *ds, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
-void b53_phylink_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface);
-void b53_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev,
- int speed, int duplex,
- bool tx_pause, bool rx_pause);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
struct netlink_ext_ack *extack);
int b53_vlan_add(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index bc77ee9e6d0a..0e663ec0c12a 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -675,8 +675,10 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
of_remove_property(child, prop);
phydev = of_phy_find_device(child);
- if (phydev)
+ if (phydev) {
phy_device_remove(phydev);
+ phy_device_free(phydev);
+ }
}
err = mdiobus_register(priv->user_mii_bus);
@@ -740,16 +742,19 @@ static void bcm_sf2_sw_get_caps(struct dsa_switch *ds, int port,
MAC_10 | MAC_100 | MAC_1000;
}
-static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
+static void bcm_sf2_sw_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
u32 id_mode_dis = 0, port_mode;
+ struct bcm_sf2_priv *priv;
u32 reg_rgmii_ctrl;
u32 reg;
- if (port == core_readl(priv, CORE_IMP0_PRT_ID))
+ priv = bcm_sf2_to_priv(dp->ds);
+
+ if (dp->index == core_readl(priv, CORE_IMP0_PRT_ID))
return;
switch (state->interface) {
@@ -770,7 +775,7 @@ static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
return;
}
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, dp->index);
/* Clear id_mode_dis bit, and the existing port mode, let
* RGMII_MODE_EN bet set by mac_link_{up,down}
@@ -809,13 +814,16 @@ static void bcm_sf2_sw_mac_link_set(struct dsa_switch *ds, int port,
reg_writel(priv, reg, reg_rgmii_ctrl);
}
-static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
+static void bcm_sf2_sw_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct bcm_sf2_priv *priv;
+ int port = dp->index;
u32 reg, offset;
+ priv = bcm_sf2_to_priv(dp->ds);
if (priv->wol_ports_mask & BIT(port))
return;
@@ -824,23 +832,26 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
reg &= ~LINK_STS;
core_writel(priv, reg, offset);
- bcm_sf2_sw_mac_link_set(ds, port, interface, false);
+ bcm_sf2_sw_mac_link_set(dp->ds, port, interface, false);
}
-static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
+static void bcm_sf2_sw_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- struct ethtool_keee *p = &priv->dev->ports[port].eee;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct bcm_sf2_priv *priv;
u32 reg_rgmii_ctrl = 0;
+ struct ethtool_keee *p;
+ int port = dp->index;
u32 reg, offset;
- bcm_sf2_sw_mac_link_set(ds, port, interface, true);
+ bcm_sf2_sw_mac_link_set(dp->ds, port, interface, true);
+ priv = bcm_sf2_to_priv(dp->ds);
offset = bcm_sf2_port_override_offset(priv, port);
if (phy_interface_mode_is_rgmii(interface) ||
@@ -886,8 +897,10 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
core_writel(priv, reg, offset);
- if (mode == MLO_AN_PHY && phydev)
- p->eee_enabled = b53_eee_init(ds, port, phydev);
+ if (mode == MLO_AN_PHY && phydev) {
+ p = &priv->dev->ports[port].eee;
+ p->eee_enabled = b53_eee_init(dp->ds, port, phydev);
+ }
}
static void bcm_sf2_sw_fixed_state(struct dsa_switch *ds, int port,
@@ -1196,6 +1209,12 @@ static int bcm_sf2_sw_get_sset_count(struct dsa_switch *ds, int port,
return cnt;
}
+static const struct phylink_mac_ops bcm_sf2_phylink_mac_ops = {
+ .mac_config = bcm_sf2_sw_mac_config,
+ .mac_link_down = bcm_sf2_sw_mac_link_down,
+ .mac_link_up = bcm_sf2_sw_mac_link_up,
+};
+
static const struct dsa_switch_ops bcm_sf2_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = bcm_sf2_sw_setup,
@@ -1206,9 +1225,6 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.get_ethtool_phy_stats = b53_get_ethtool_phy_stats,
.get_phy_flags = bcm_sf2_sw_get_phy_flags,
.phylink_get_caps = bcm_sf2_sw_get_caps,
- .phylink_mac_config = bcm_sf2_sw_mac_config,
- .phylink_mac_link_down = bcm_sf2_sw_mac_link_down,
- .phylink_mac_link_up = bcm_sf2_sw_mac_link_up,
.phylink_fixed_state = bcm_sf2_sw_fixed_state,
.suspend = bcm_sf2_sw_suspend,
.resume = bcm_sf2_sw_resume,
@@ -1399,6 +1415,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
priv->dev = dev;
ds = dev->ds;
ds->ops = &bcm_sf2_ops;
+ ds->phylink_mac_ops = &bcm_sf2_phylink_mac_ops;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SF2_NUM_EGRESS_QUEUES;
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 6874cb9dc361..9c2ed2ba79da 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -12,14 +12,16 @@
#include <linux/bitmap.h>
#include <linux/bitops.h>
+#include <linux/container_of.h>
#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
#include <linux/leds.h>
+#include <linux/mutex.h>
#include <linux/platform_data/hirschmann-hellcreek.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/timecounter.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include <net/dsa.h>
#include <net/pkt_sched.h>
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
index bd7aacc71a63..ca2500aba96f 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c
@@ -16,7 +16,7 @@
#include "hellcreek_ptp.h"
int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct hellcreek *hellcreek = ds->priv;
diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
index 71af77efb28b..7d88da2134f2 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
+++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h
@@ -48,7 +48,7 @@ void hellcreek_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
int hellcreek_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp);
diff --git a/drivers/net/dsa/hirschmann/hellcreek_ptp.c b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
index 5249a1c2a80b..bfe21f9f7dcd 100644
--- a/drivers/net/dsa/hirschmann/hellcreek_ptp.c
+++ b/drivers/net/dsa/hirschmann/hellcreek_ptp.c
@@ -27,7 +27,8 @@ void hellcreek_ptp_write(struct hellcreek *hellcreek, u16 data,
}
/* Get nanoseconds from PTP clock */
-static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
+static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek,
+ struct ptp_system_timestamp *sts)
{
u16 nsl, nsh;
@@ -45,16 +46,19 @@ static u64 hellcreek_ptp_clock_read(struct hellcreek *hellcreek)
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
nsh = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ ptp_read_system_prets(sts);
nsl = hellcreek_ptp_read(hellcreek, PR_SS_SYNC_DATA_C);
+ ptp_read_system_postts(sts);
return (u64)nsl | ((u64)nsh << 16);
}
-static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek)
+static u64 __hellcreek_ptp_gettime(struct hellcreek *hellcreek,
+ struct ptp_system_timestamp *sts)
{
u64 ns;
- ns = hellcreek_ptp_clock_read(hellcreek);
+ ns = hellcreek_ptp_clock_read(hellcreek, sts);
if (ns < hellcreek->last_ts)
hellcreek->seconds++;
hellcreek->last_ts = ns;
@@ -72,7 +76,7 @@ u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
{
u64 s;
- __hellcreek_ptp_gettime(hellcreek);
+ __hellcreek_ptp_gettime(hellcreek, NULL);
if (hellcreek->last_ts > ns)
s = hellcreek->seconds * NSEC_PER_SEC;
else
@@ -81,14 +85,15 @@ u64 hellcreek_ptp_gettime_seconds(struct hellcreek *hellcreek, u64 ns)
return s;
}
-static int hellcreek_ptp_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
+static int hellcreek_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct hellcreek *hellcreek = ptp_to_hellcreek(ptp);
u64 ns;
mutex_lock(&hellcreek->ptp_lock);
- ns = __hellcreek_ptp_gettime(hellcreek);
+ ns = __hellcreek_ptp_gettime(hellcreek, sts);
mutex_unlock(&hellcreek->ptp_lock);
*ts = ns_to_timespec64(ns);
@@ -184,7 +189,7 @@ static int hellcreek_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
if (abs(delta) > MAX_SLOW_OFFSET_ADJ) {
struct timespec64 now, then = ns_to_timespec64(delta);
- hellcreek_ptp_gettime(ptp, &now);
+ hellcreek_ptp_gettimex(ptp, &now, NULL);
now = timespec64_add(now, then);
hellcreek_ptp_settime(ptp, &now);
@@ -233,7 +238,7 @@ static void hellcreek_ptp_overflow_check(struct work_struct *work)
hellcreek = dw_overflow_to_hellcreek(dw);
mutex_lock(&hellcreek->ptp_lock);
- __hellcreek_ptp_gettime(hellcreek);
+ __hellcreek_ptp_gettime(hellcreek, NULL);
mutex_unlock(&hellcreek->ptp_lock);
schedule_delayed_work(&hellcreek->overflow_work,
@@ -409,7 +414,7 @@ int hellcreek_ptp_setup(struct hellcreek *hellcreek)
hellcreek->ptp_clock_info.pps = 0;
hellcreek->ptp_clock_info.adjfine = hellcreek_ptp_adjfine;
hellcreek->ptp_clock_info.adjtime = hellcreek_ptp_adjtime;
- hellcreek->ptp_clock_info.gettime64 = hellcreek_ptp_gettime;
+ hellcreek->ptp_clock_info.gettimex64 = hellcreek_ptp_gettimex;
hellcreek->ptp_clock_info.settime64 = hellcreek_ptp_settime;
hellcreek->ptp_clock_info.enable = hellcreek_ptp_enable;
hellcreek->ptp_clock_info.do_aux_work = hellcreek_hwtstamp_work;
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index fcb20eac332a..268949939636 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1007,15 +1007,14 @@ static const struct lan9303_mib_desc lan9303_mib[] = {
static void lan9303_get_strings(struct dsa_switch *ds, int port,
u32 stringset, uint8_t *data)
{
+ u8 *buf = data;
unsigned int u;
if (stringset != ETH_SS_STATS)
return;
- for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++) {
- strncpy(data + u * ETH_GSTRING_LEN, lan9303_mib[u].name,
- ETH_GSTRING_LEN);
- }
+ for (u = 0; u < ARRAY_SIZE(lan9303_mib); u++)
+ ethtool_puts(&buf, lan9303_mib[u].name);
}
static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
@@ -1048,31 +1047,31 @@ static int lan9303_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(lan9303_mib);
}
-static int lan9303_phy_read(struct dsa_switch *ds, int phy, int regnum)
+static int lan9303_phy_read(struct dsa_switch *ds, int port, int regnum)
{
struct lan9303 *chip = ds->priv;
int phy_base = chip->phy_addr_base;
- if (phy == phy_base)
+ if (port == 0)
return lan9303_virt_phy_reg_read(chip, regnum);
- if (phy > phy_base + 2)
+ if (port > 2)
return -ENODEV;
- return chip->ops->phy_read(chip, phy, regnum);
+ return chip->ops->phy_read(chip, phy_base + port, regnum);
}
-static int lan9303_phy_write(struct dsa_switch *ds, int phy, int regnum,
+static int lan9303_phy_write(struct dsa_switch *ds, int port, int regnum,
u16 val)
{
struct lan9303 *chip = ds->priv;
int phy_base = chip->phy_addr_base;
- if (phy == phy_base)
+ if (port == 0)
return lan9303_virt_phy_reg_write(chip, regnum, val);
- if (phy > phy_base + 2)
+ if (port > 2)
return -ENODEV;
- return chip->ops->phy_write(chip, phy, regnum, val);
+ return chip->ops->phy_write(chip, phy_base + port, regnum, val);
}
static int lan9303_port_enable(struct dsa_switch *ds, int port,
@@ -1100,7 +1099,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
vlan_vid_del(dsa_port_to_conduit(dp), htons(ETH_P_8021Q), port);
lan9303_disable_processing_port(chip, port);
- lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
+ lan9303_phy_write(ds, port, MII_BMCR, BMCR_PDOWN);
}
static int lan9303_port_bridge_join(struct dsa_switch *ds, int port,
@@ -1293,14 +1292,29 @@ static void lan9303_phylink_get_caps(struct dsa_switch *ds, int port,
}
}
-static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void lan9303_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void lan9303_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+}
+
+static void lan9303_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause)
{
- struct lan9303 *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct lan9303 *chip = dp->ds->priv;
+ struct dsa_switch *ds = dp->ds;
+ int port = dp->index;
u32 ctl;
u32 reg;
@@ -1330,6 +1344,12 @@ static void lan9303_phylink_mac_link_up(struct dsa_switch *ds, int port,
regmap_write(chip->regmap, flow_ctl_reg[port], reg);
}
+static const struct phylink_mac_ops lan9303_phylink_mac_ops = {
+ .mac_config = lan9303_phylink_mac_config,
+ .mac_link_down = lan9303_phylink_mac_link_down,
+ .mac_link_up = lan9303_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops lan9303_switch_ops = {
.get_tag_protocol = lan9303_get_tag_protocol,
.setup = lan9303_setup,
@@ -1337,7 +1357,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
.phy_read = lan9303_phy_read,
.phy_write = lan9303_phy_write,
.phylink_get_caps = lan9303_phylink_get_caps,
- .phylink_mac_link_up = lan9303_phylink_mac_link_up,
.get_ethtool_stats = lan9303_get_ethtool_stats,
.get_sset_count = lan9303_get_sset_count,
.port_enable = lan9303_port_enable,
@@ -1355,8 +1374,6 @@ static const struct dsa_switch_ops lan9303_switch_ops = {
static int lan9303_register_switch(struct lan9303 *chip)
{
- int base;
-
chip->ds = devm_kzalloc(chip->dev, sizeof(*chip->ds), GFP_KERNEL);
if (!chip->ds)
return -ENOMEM;
@@ -1365,8 +1382,8 @@ static int lan9303_register_switch(struct lan9303 *chip)
chip->ds->num_ports = LAN9303_NUM_PORTS;
chip->ds->priv = chip;
chip->ds->ops = &lan9303_switch_ops;
- base = chip->phy_addr_base;
- chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1 + base, base);
+ chip->ds->phylink_mac_ops = &lan9303_phylink_mac_ops;
+ chip->ds->phys_mii_mask = GENMASK(LAN9303_NUM_PORTS - 1, 0);
return dsa_register_switch(chip->ds);
}
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index bbbec322bc4f..c62d27cdc117 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -89,7 +89,7 @@ static void lan9303_i2c_shutdown(struct i2c_client *client)
/*-------------------------------------------------------------------------*/
static const struct i2c_device_id lan9303_i2c_id[] = {
- { "lan9303", 0 },
+ { "lan9303" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(i2c, lan9303_i2c_id);
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index 167a86f39f27..0ac4857e5ee8 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -58,19 +58,19 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val)
return 0;
}
-static int lan9303_mdio_phy_write(struct lan9303 *chip, int phy, int reg,
+static int lan9303_mdio_phy_write(struct lan9303 *chip, int addr, int reg,
u16 val)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
- return mdiobus_write_nested(sw_dev->device->bus, phy, reg, val);
+ return mdiobus_write_nested(sw_dev->device->bus, addr, reg, val);
}
-static int lan9303_mdio_phy_read(struct lan9303 *chip, int phy, int reg)
+static int lan9303_mdio_phy_read(struct lan9303 *chip, int addr, int reg)
{
struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev);
- return mdiobus_read_nested(sw_dev->device->bus, phy, reg);
+ return mdiobus_read_nested(sw_dev->device->bus, addr, reg);
}
static const struct lan9303_phy_ops lan9303_mdio_phy_ops = {
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index de48b194048f..fcd4505f4925 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -236,7 +236,9 @@
#define GSWIP_TABLE_ACTIVE_VLAN 0x01
#define GSWIP_TABLE_VLAN_MAPPING 0x02
#define GSWIP_TABLE_MAC_BRIDGE 0x0b
-#define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */
+#define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */
+#define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */
#define XRX200_GPHY_FW_ALIGN (16 * 1024)
@@ -653,14 +655,8 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add)
struct gswip_pce_table_entry vlan_active = {0,};
struct gswip_pce_table_entry vlan_mapping = {0,};
unsigned int cpu_port = priv->hw_info->cpu_port;
- unsigned int max_ports = priv->hw_info->max_ports;
int err;
- if (port >= max_ports) {
- dev_err(priv->dev, "single port for %i supported\n", port);
- return -EIO;
- }
-
vlan_active.index = port + 1;
vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN;
vlan_active.key[0] = 0; /* vid */
@@ -695,13 +691,18 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
struct gswip_priv *priv = ds->priv;
int err;
- if (!dsa_is_user_port(ds, port))
- return 0;
-
if (!dsa_is_cpu_port(ds, port)) {
+ u32 mdio_phy = 0;
+
err = gswip_add_single_port_br(priv, port, true);
if (err)
return err;
+
+ if (phydev)
+ mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
+
+ gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
+ GSWIP_MDIO_PHYp(port));
}
/* RMON Counter Enable for port */
@@ -714,16 +715,6 @@ static int gswip_port_enable(struct dsa_switch *ds, int port,
gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN,
GSWIP_SDMA_PCTRLp(port));
- if (!dsa_is_cpu_port(ds, port)) {
- u32 mdio_phy = 0;
-
- if (phydev)
- mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK;
-
- gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy,
- GSWIP_MDIO_PHYp(port));
- }
-
return 0;
}
@@ -731,9 +722,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port)
{
struct gswip_priv *priv = ds->priv;
- if (!dsa_is_user_port(ds, port))
- return;
-
gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0,
GSWIP_FDMA_PCTRLp(port));
gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0,
@@ -792,7 +780,7 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
}
if (vlan_filtering) {
- /* Use port based VLAN tag */
+ /* Use tag based VLAN */
gswip_switch_mask(priv,
GSWIP_PCE_VCTRL_VSR,
GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
@@ -801,7 +789,7 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port,
gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0,
GSWIP_PCE_PCTRL_0p(port));
} else {
- /* Use port based VLAN tag */
+ /* Use port based VLAN */
gswip_switch_mask(priv,
GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR |
GSWIP_PCE_VCTRL_VEMR,
@@ -836,7 +824,7 @@ static int gswip_setup(struct dsa_switch *ds)
err = gswip_pce_load_microcode(priv);
if (err) {
- dev_err(priv->dev, "writing PCE microcode failed, %i", err);
+ dev_err(priv->dev, "writing PCE microcode failed, %i\n", err);
return err;
}
@@ -898,8 +886,6 @@ static int gswip_setup(struct dsa_switch *ds)
ds->mtu_enforcement_ingress = true;
- gswip_port_enable(ds, cpu_port, NULL);
-
ds->configure_vlan_while_not_filtering = false;
return 0;
@@ -1314,10 +1300,11 @@ static void gswip_port_fast_age(struct dsa_switch *ds, int port)
if (!mac_bridge.valid)
continue;
- if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC)
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC)
continue;
- if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port)
+ if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
+ mac_bridge.val[0]))
continue;
mac_bridge.valid = false;
@@ -1383,7 +1370,8 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
}
if (fid == -1) {
- dev_err(priv->dev, "Port not part of a bridge\n");
+ dev_err(priv->dev, "no FID found for bridge %s\n",
+ bridge->name);
return -EINVAL;
}
@@ -1392,9 +1380,9 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port,
mac_bridge.key[0] = addr[5] | (addr[4] << 8);
mac_bridge.key[1] = addr[3] | (addr[2] << 8);
mac_bridge.key[2] = addr[1] | (addr[0] << 8);
- mac_bridge.key[3] = fid;
+ mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid);
mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */
- mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC;
+ mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC;
mac_bridge.valid = add;
err = gswip_pce_table_entry_write(priv, &mac_bridge);
@@ -1423,7 +1411,7 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
{
struct gswip_priv *priv = ds->priv;
struct gswip_pce_table_entry mac_bridge = {0,};
- unsigned char addr[6];
+ unsigned char addr[ETH_ALEN];
int i;
int err;
@@ -1448,14 +1436,15 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port,
addr[2] = (mac_bridge.key[1] >> 8) & 0xff;
addr[1] = mac_bridge.key[2] & 0xff;
addr[0] = (mac_bridge.key[2] >> 8) & 0xff;
- if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) {
+ if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) {
if (mac_bridge.val[0] & BIT(port)) {
err = cb(addr, 0, true, data);
if (err)
return err;
}
} else {
- if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) {
+ if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT,
+ mac_bridge.val[0])) {
err = cb(addr, 0, false, data);
if (err)
return err;
@@ -1474,12 +1463,11 @@ static int gswip_port_max_mtu(struct dsa_switch *ds, int port)
static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct gswip_priv *priv = ds->priv;
- int cpu_port = priv->hw_info->cpu_port;
/* CPU port always has maximum mtu of user ports, so use it to set
* switch frame size, including 8 byte special header.
*/
- if (port == cpu_port) {
+ if (dsa_is_cpu_port(ds, port)) {
new_mtu += 8;
gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN,
GSWIP_MAC_FLEN);
@@ -1516,6 +1504,7 @@ static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port,
case 2:
case 3:
case 4:
+ case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
@@ -1547,6 +1536,7 @@ static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port,
case 2:
case 3:
case 4:
+ case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
break;
@@ -1670,11 +1660,13 @@ static void gswip_port_set_pause(struct gswip_priv *priv, int port,
mdio_phy, GSWIP_MDIO_PHYp(port));
}
-static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
+static void gswip_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct gswip_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
u32 miicfg = 0;
miicfg |= GSWIP_MII_CFG_LDCLKDIS;
@@ -1700,7 +1692,7 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
miicfg |= GSWIP_MII_CFG_MODE_GMII;
break;
default:
- dev_err(ds->dev,
+ dev_err(dp->ds->dev,
"Unsupported interface: %d\n", state->interface);
return;
}
@@ -1726,28 +1718,32 @@ static void gswip_phylink_mac_config(struct dsa_switch *ds, int port,
}
}
-static void gswip_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void gswip_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct gswip_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
gswip_mii_mask_cfg(priv, GSWIP_MII_CFG_EN, 0, port);
- if (!dsa_is_cpu_port(ds, port))
+ if (!dsa_port_is_cpu(dp))
gswip_port_set_link(priv, port, false);
}
-static void gswip_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void gswip_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct gswip_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct gswip_priv *priv = dp->ds->priv;
+ int port = dp->index;
- if (!dsa_is_cpu_port(ds, port)) {
+ if (!dsa_port_is_cpu(dp)) {
gswip_port_set_link(priv, port, true);
gswip_port_set_speed(priv, port, speed, interface);
gswip_port_set_duplex(priv, port, duplex);
@@ -1784,7 +1780,7 @@ static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table,
err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL,
GSWIP_BM_RAM_CTRL_BAS);
if (err) {
- dev_err(priv->dev, "timeout while reading table: %u, index: %u",
+ dev_err(priv->dev, "timeout while reading table: %u, index: %u\n",
table, index);
return 0;
}
@@ -1824,6 +1820,12 @@ static int gswip_get_sset_count(struct dsa_switch *ds, int port, int sset)
return ARRAY_SIZE(gswip_rmon_cnt);
}
+static const struct phylink_mac_ops gswip_phylink_mac_ops = {
+ .mac_config = gswip_phylink_mac_config,
+ .mac_link_down = gswip_phylink_mac_link_down,
+ .mac_link_up = gswip_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.get_tag_protocol = gswip_get_tag_protocol,
.setup = gswip_setup,
@@ -1842,9 +1844,6 @@ static const struct dsa_switch_ops gswip_xrx200_switch_ops = {
.port_change_mtu = gswip_port_change_mtu,
.port_max_mtu = gswip_port_max_mtu,
.phylink_get_caps = gswip_xrx200_phylink_get_caps,
- .phylink_mac_config = gswip_phylink_mac_config,
- .phylink_mac_link_down = gswip_phylink_mac_link_down,
- .phylink_mac_link_up = gswip_phylink_mac_link_up,
.get_strings = gswip_get_strings,
.get_ethtool_stats = gswip_get_ethtool_stats,
.get_sset_count = gswip_get_sset_count,
@@ -1868,9 +1867,6 @@ static const struct dsa_switch_ops gswip_xrx300_switch_ops = {
.port_change_mtu = gswip_port_change_mtu,
.port_max_mtu = gswip_port_max_mtu,
.phylink_get_caps = gswip_xrx300_phylink_get_caps,
- .phylink_mac_config = gswip_phylink_mac_config,
- .phylink_mac_link_down = gswip_phylink_mac_link_down,
- .phylink_mac_link_up = gswip_phylink_mac_link_up,
.get_strings = gswip_get_strings,
.get_ethtool_stats = gswip_get_ethtool_stats,
.get_sset_count = gswip_get_sset_count,
@@ -1923,11 +1919,9 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
msleep(200);
ret = request_firmware(&fw, gphy_fw->fw_name, dev);
- if (ret) {
- dev_err(dev, "failed to load firmware: %s, error: %i\n",
- gphy_fw->fw_name, ret);
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to load firmware: %s\n",
+ gphy_fw->fw_name);
/* GPHY cores need the firmware code in a persistent and contiguous
* memory area with a 16 kB boundary aligned start address.
@@ -1940,9 +1934,9 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph
dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN);
memcpy(fw_addr, fw->data, fw->size);
} else {
- dev_err(dev, "failed to alloc firmware memory\n");
release_firmware(fw);
- return -ENOMEM;
+ return dev_err_probe(dev, -ENOMEM,
+ "failed to alloc firmware memory\n");
}
release_firmware(fw);
@@ -1969,8 +1963,8 @@ static int gswip_gphy_fw_probe(struct gswip_priv *priv,
gphy_fw->clk_gate = devm_clk_get(dev, gphyname);
if (IS_ERR(gphy_fw->clk_gate)) {
- dev_err(dev, "Failed to lookup gate clock\n");
- return PTR_ERR(gphy_fw->clk_gate);
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->clk_gate),
+ "Failed to lookup gate clock\n");
}
ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset);
@@ -1990,8 +1984,8 @@ static int gswip_gphy_fw_probe(struct gswip_priv *priv,
gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name;
break;
default:
- dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode);
- return -EINVAL;
+ return dev_err_probe(dev, -EINVAL, "Unknown GPHY mode %d\n",
+ gphy_mode);
}
gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
@@ -2013,7 +2007,7 @@ static void gswip_gphy_fw_remove(struct gswip_priv *priv,
ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0);
if (ret)
- dev_err(priv->dev, "can not reset GPHY FW pointer");
+ dev_err(priv->dev, "can not reset GPHY FW pointer\n");
clk_disable_unprepare(gphy_fw->clk_gate);
@@ -2042,8 +2036,9 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data;
break;
default:
- dev_err(dev, "unknown GSWIP version: 0x%x", version);
- return -ENOENT;
+ return dev_err_probe(dev, -ENOENT,
+ "unknown GSWIP version: 0x%x\n",
+ version);
}
}
@@ -2051,10 +2046,9 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv,
if (match && match->data)
priv->gphy_fw_name_cfg = match->data;
- if (!priv->gphy_fw_name_cfg) {
- dev_err(dev, "GPHY compatible type not supported");
- return -ENOENT;
- }
+ if (!priv->gphy_fw_name_cfg)
+ return dev_err_probe(dev, -ENOENT,
+ "GPHY compatible type not supported\n");
priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np);
if (!priv->num_gphy_fw)
@@ -2136,6 +2130,7 @@ static int gswip_probe(struct platform_device *pdev)
priv->ds->num_ports = priv->hw_info->max_ports;
priv->ds->priv = priv;
priv->ds->ops = priv->hw_info->ops;
+ priv->ds->phylink_mac_ops = &gswip_phylink_mac_ops;
priv->dev = dev;
mutex_init(&priv->pce_table_lock);
version = gswip_switch_r(priv, GSWIP_VERSION);
@@ -2154,8 +2149,8 @@ static int gswip_probe(struct platform_device *pdev)
return -EINVAL;
break;
default:
- dev_err(dev, "unknown GSWIP version: 0x%x", version);
- return -ENOENT;
+ return dev_err_probe(dev, -ENOENT,
+ "unknown GSWIP version: 0x%x\n", version);
}
/* bring up the mdio bus */
@@ -2163,28 +2158,27 @@ static int gswip_probe(struct platform_device *pdev)
if (gphy_fw_np) {
err = gswip_gphy_fw_list(priv, gphy_fw_np, version);
of_node_put(gphy_fw_np);
- if (err) {
- dev_err(dev, "gphy fw probe failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err,
+ "gphy fw probe failed\n");
}
/* bring up the mdio bus */
err = gswip_mdio(priv);
if (err) {
- dev_err(dev, "mdio probe failed\n");
+ dev_err_probe(dev, err, "mdio probe failed\n");
goto gphy_fw_remove;
}
err = dsa_register_switch(priv->ds);
if (err) {
- dev_err(dev, "dsa switch register failed: %i\n", err);
+ dev_err_probe(dev, err, "dsa switch registration failed\n");
goto gphy_fw_remove;
}
if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) {
- dev_err(dev, "wrong CPU port defined, HW only supports port: %i",
- priv->hw_info->cpu_port);
- err = -EINVAL;
+ err = dev_err_probe(dev, -EINVAL,
+ "wrong CPU port defined, HW only supports port: %i\n",
+ priv->hw_info->cpu_port);
goto disable_switch;
}
diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig
index 394ca8678d2b..c1b906c05a02 100644
--- a/drivers/net/dsa/microchip/Kconfig
+++ b/drivers/net/dsa/microchip/Kconfig
@@ -4,6 +4,8 @@ menuconfig NET_DSA_MICROCHIP_KSZ_COMMON
depends on NET_DSA
select NET_DSA_TAG_KSZ
select NET_DSA_TAG_NONE
+ select NET_IEEE8021Q_HELPERS
+ select DCB
help
This driver adds support for Microchip KSZ9477 series switch and
KSZ8795/KSZ88x3 switch chips.
diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile
index 49459a50dbc8..1cfba1ec9355 100644
--- a/drivers/net/dsa/microchip/Makefile
+++ b/drivers/net/dsa/microchip/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_switch.o
-ksz_switch-objs := ksz_common.o
+ksz_switch-objs := ksz_common.o ksz_dcb.o
ksz_switch-objs += ksz9477.o ksz9477_acl.o ksz9477_tc_flower.o
ksz_switch-objs += ksz8795.o
ksz_switch-objs += lan937x_main.o
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 1a5225264e6a..ae43077e76c3 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -19,8 +19,6 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port);
void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port);
int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
-int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
void ksz8_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
@@ -56,9 +54,10 @@ int ksz8_reset_switch(struct ksz_device *dev);
int ksz8_switch_init(struct ksz_device *dev);
void ksz8_switch_exit(struct ksz_device *dev);
int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu);
-void ksz8_phylink_mac_link_up(struct ksz_device *dev, int port,
- unsigned int mode, phy_interface_t interface,
- struct phy_device *phydev, int speed, int duplex,
+void ksz8_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
bool tx_pause, bool rx_pause);
+int ksz8_all_queues_split(struct ksz_device *dev, int queues);
#endif
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index 14923535ca7e..d27b9c36d73f 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -127,37 +127,71 @@ int ksz8_change_mtu(struct ksz_device *dev, int port, int mtu)
return -EOPNOTSUPP;
}
-static void ksz8795_set_prio_queue(struct ksz_device *dev, int port, int queue)
+static int ksz8_port_queue_split(struct ksz_device *dev, int port, int queues)
{
- u8 hi, lo;
+ u8 mask_4q, mask_2q;
+ u8 reg_4q, reg_2q;
+ u8 data_4q = 0;
+ u8 data_2q = 0;
+ int ret;
- /* Number of queues can only be 1, 2, or 4. */
- switch (queue) {
- case 4:
- case 3:
- queue = PORT_QUEUE_SPLIT_4;
- break;
- case 2:
- queue = PORT_QUEUE_SPLIT_2;
- break;
- default:
- queue = PORT_QUEUE_SPLIT_1;
+ if (ksz_is_ksz88x3(dev)) {
+ mask_4q = KSZ8873_PORT_4QUEUE_SPLIT_EN;
+ mask_2q = KSZ8873_PORT_2QUEUE_SPLIT_EN;
+ reg_4q = REG_PORT_CTRL_0;
+ reg_2q = REG_PORT_CTRL_2;
+
+ /* KSZ8795 family switches have Weighted Fair Queueing (WFQ)
+ * enabled by default. Enable it for KSZ8873 family switches
+ * too. Default value for KSZ8873 family is strict priority,
+ * which should be enabled by using TC_SETUP_QDISC_ETS, not
+ * by default.
+ */
+ ret = ksz_rmw8(dev, REG_SW_CTRL_3, WEIGHTED_FAIR_QUEUE_ENABLE,
+ WEIGHTED_FAIR_QUEUE_ENABLE);
+ if (ret)
+ return ret;
+ } else {
+ mask_4q = KSZ8795_PORT_4QUEUE_SPLIT_EN;
+ mask_2q = KSZ8795_PORT_2QUEUE_SPLIT_EN;
+ reg_4q = REG_PORT_CTRL_13;
+ reg_2q = REG_PORT_CTRL_0;
+
+ /* TODO: this is legacy from initial KSZ8795 driver, should be
+ * moved to appropriate place in the future.
+ */
+ ret = ksz_rmw8(dev, REG_SW_CTRL_19,
+ SW_OUT_RATE_LIMIT_QUEUE_BASED,
+ SW_OUT_RATE_LIMIT_QUEUE_BASED);
+ if (ret)
+ return ret;
}
- ksz_pread8(dev, port, REG_PORT_CTRL_0, &lo);
- ksz_pread8(dev, port, P_DROP_TAG_CTRL, &hi);
- lo &= ~PORT_QUEUE_SPLIT_L;
- if (queue & PORT_QUEUE_SPLIT_2)
- lo |= PORT_QUEUE_SPLIT_L;
- hi &= ~PORT_QUEUE_SPLIT_H;
- if (queue & PORT_QUEUE_SPLIT_4)
- hi |= PORT_QUEUE_SPLIT_H;
- ksz_pwrite8(dev, port, REG_PORT_CTRL_0, lo);
- ksz_pwrite8(dev, port, P_DROP_TAG_CTRL, hi);
-
- /* Default is port based for egress rate limit. */
- if (queue != PORT_QUEUE_SPLIT_1)
- ksz_cfg(dev, REG_SW_CTRL_19, SW_OUT_RATE_LIMIT_QUEUE_BASED,
- true);
+
+ if (queues == 4)
+ data_4q = mask_4q;
+ else if (queues == 2)
+ data_2q = mask_2q;
+
+ ret = ksz_prmw8(dev, port, reg_4q, mask_4q, data_4q);
+ if (ret)
+ return ret;
+
+ return ksz_prmw8(dev, port, reg_2q, mask_2q, data_2q);
+}
+
+int ksz8_all_queues_split(struct ksz_device *dev, int queues)
+{
+ struct dsa_switch *ds = dev->ds;
+ const struct dsa_port *dp;
+
+ dsa_switch_for_each_port(dp, ds) {
+ int ret = ksz8_port_queue_split(dev, dp->index, queues);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
void ksz8_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt)
@@ -385,39 +419,39 @@ static int ksz8_valid_dyn_entry(struct ksz_device *dev, u8 *data)
int timeout = 100;
const u32 *masks;
const u16 *regs;
+ int ret;
masks = dev->info->masks;
regs = dev->info->regs;
do {
- ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
+ ret = ksz_read8(dev, regs[REG_IND_DATA_CHECK], data);
+ if (ret)
+ return ret;
+
timeout--;
} while ((*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) && timeout);
/* Entry is not ready for accessing. */
- if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY]) {
- return -EAGAIN;
- /* Entry is ready for accessing. */
- } else {
- ksz_read8(dev, regs[REG_IND_DATA_8], data);
+ if (*data & masks[DYNAMIC_MAC_TABLE_NOT_READY])
+ return -ETIMEDOUT;
- /* There is no valid entry in the table. */
- if (*data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY])
- return -ENXIO;
- }
- return 0;
+ /* Entry is ready for accessing. */
+ return ksz_read8(dev, regs[REG_IND_DATA_8], data);
}
-int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
- u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries)
+static int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
+ u8 *fid, u8 *src_port, u16 *entries)
{
u32 data_hi, data_lo;
const u8 *shifts;
const u32 *masks;
const u16 *regs;
u16 ctrl_addr;
+ u64 buf = 0;
u8 data;
- int rc;
+ int cnt;
+ int ret;
shifts = dev->info->shifts;
masks = dev->info->masks;
@@ -426,49 +460,50 @@ int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
ctrl_addr = IND_ACC_TABLE(TABLE_DYNAMIC_MAC | TABLE_READ) | addr;
mutex_lock(&dev->alu_mutex);
- ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ ret = ksz_write16(dev, regs[REG_IND_CTRL_0], ctrl_addr);
+ if (ret)
+ goto unlock_alu;
+
+ ret = ksz8_valid_dyn_entry(dev, &data);
+ if (ret)
+ goto unlock_alu;
- rc = ksz8_valid_dyn_entry(dev, &data);
- if (rc == -EAGAIN) {
- if (addr == 0)
- *entries = 0;
- } else if (rc == -ENXIO) {
+ if (data & masks[DYNAMIC_MAC_TABLE_MAC_EMPTY]) {
*entries = 0;
- /* At least one valid entry in the table. */
- } else {
- u64 buf = 0;
- int cnt;
-
- ksz_read64(dev, regs[REG_IND_DATA_HI], &buf);
- data_hi = (u32)(buf >> 32);
- data_lo = (u32)buf;
-
- /* Check out how many valid entry in the table. */
- cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H];
- cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H];
- cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >>
- shifts[DYNAMIC_MAC_ENTRIES];
- *entries = cnt + 1;
-
- *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >>
- shifts[DYNAMIC_MAC_FID];
- *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >>
- shifts[DYNAMIC_MAC_SRC_PORT];
- *timestamp = (data_hi & masks[DYNAMIC_MAC_TABLE_TIMESTAMP]) >>
- shifts[DYNAMIC_MAC_TIMESTAMP];
-
- mac_addr[5] = (u8)data_lo;
- mac_addr[4] = (u8)(data_lo >> 8);
- mac_addr[3] = (u8)(data_lo >> 16);
- mac_addr[2] = (u8)(data_lo >> 24);
-
- mac_addr[1] = (u8)data_hi;
- mac_addr[0] = (u8)(data_hi >> 8);
- rc = 0;
+ goto unlock_alu;
}
+
+ ret = ksz_read64(dev, regs[REG_IND_DATA_HI], &buf);
+ if (ret)
+ goto unlock_alu;
+
+ data_hi = (u32)(buf >> 32);
+ data_lo = (u32)buf;
+
+ /* Check out how many valid entry in the table. */
+ cnt = data & masks[DYNAMIC_MAC_TABLE_ENTRIES_H];
+ cnt <<= shifts[DYNAMIC_MAC_ENTRIES_H];
+ cnt |= (data_hi & masks[DYNAMIC_MAC_TABLE_ENTRIES]) >>
+ shifts[DYNAMIC_MAC_ENTRIES];
+ *entries = cnt + 1;
+
+ *fid = (data_hi & masks[DYNAMIC_MAC_TABLE_FID]) >>
+ shifts[DYNAMIC_MAC_FID];
+ *src_port = (data_hi & masks[DYNAMIC_MAC_TABLE_SRC_PORT]) >>
+ shifts[DYNAMIC_MAC_SRC_PORT];
+
+ mac_addr[5] = (u8)data_lo;
+ mac_addr[4] = (u8)(data_lo >> 8);
+ mac_addr[3] = (u8)(data_lo >> 16);
+ mac_addr[2] = (u8)(data_lo >> 24);
+
+ mac_addr[1] = (u8)data_hi;
+ mac_addr[0] = (u8)(data_hi >> 8);
+
+unlock_alu:
mutex_unlock(&dev->alu_mutex);
- return rc;
+ return ret;
}
static int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
@@ -1193,28 +1228,28 @@ void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port)
int ksz8_fdb_dump(struct ksz_device *dev, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
- int ret = 0;
- u16 i = 0;
- u16 entries = 0;
- u8 timestamp = 0;
- u8 fid;
- u8 src_port;
u8 mac[ETH_ALEN];
+ u8 src_port, fid;
+ u16 entries = 0;
+ int ret, i;
- do {
+ for (i = 0; i < KSZ8_DYN_MAC_ENTRIES; i++) {
ret = ksz8_r_dyn_mac_table(dev, i, mac, &fid, &src_port,
- &timestamp, &entries);
- if (!ret && port == src_port) {
+ &entries);
+ if (ret)
+ return ret;
+
+ if (i >= entries)
+ return 0;
+
+ if (port == src_port) {
ret = cb(mac, fid, false, data);
if (ret)
- break;
+ return ret;
}
- i++;
- } while (i < entries);
- if (i >= entries)
- ret = 0;
+ }
- return ret;
+ return 0;
}
static int ksz8_add_sta_mac(struct ksz_device *dev, int port,
@@ -1512,6 +1547,7 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
const u32 *masks;
+ int queues;
u8 member;
masks = dev->info->masks;
@@ -1519,19 +1555,20 @@ void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
- if (!ksz_is_ksz88x3(dev))
- ksz8795_set_prio_queue(dev, port, 4);
+ /* For KSZ88x3 enable only one queue by default, otherwise we won't
+ * be able to get rid of PCP prios on Port 2.
+ */
+ if (ksz_is_ksz88x3(dev))
+ queues = 1;
+ else
+ queues = dev->info->num_tx_queues;
- /* disable DiffServ priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_ENABLE, false);
+ ksz8_port_queue_split(dev, port, queues);
/* replace priority */
ksz_port_cfg(dev, port, P_802_1P_CTRL,
masks[PORT_802_1P_REMAPPING], false);
- /* enable 802.1p priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true);
-
if (cpu_port)
member = dsa_user_ports(ds);
else
@@ -1701,11 +1738,15 @@ static void ksz8_cpu_port_link_up(struct ksz_device *dev, int speed, int duplex,
SW_10_MBIT, ctrl);
}
-void ksz8_phylink_mac_link_up(struct ksz_device *dev, int port,
- unsigned int mode, phy_interface_t interface,
- struct phy_device *phydev, int speed, int duplex,
+void ksz8_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
bool tx_pause, bool rx_pause)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ int port = dp->index;
+
/* If the port is the CPU port, apply special handling. Only the CPU
* port is configured via global registers.
*/
diff --git a/drivers/net/dsa/microchip/ksz8795_reg.h b/drivers/net/dsa/microchip/ksz8795_reg.h
index 7c9341ef73b0..69566a5d9cda 100644
--- a/drivers/net/dsa/microchip/ksz8795_reg.h
+++ b/drivers/net/dsa/microchip/ksz8795_reg.h
@@ -124,7 +124,8 @@
#define PORT_BASED_PRIO_3 3
#define PORT_INSERT_TAG BIT(2)
#define PORT_REMOVE_TAG BIT(1)
-#define PORT_QUEUE_SPLIT_L BIT(0)
+#define KSZ8795_PORT_2QUEUE_SPLIT_EN BIT(0)
+#define KSZ8873_PORT_4QUEUE_SPLIT_EN BIT(0)
#define REG_PORT_1_CTRL_1 0x11
#define REG_PORT_2_CTRL_1 0x21
@@ -143,6 +144,7 @@
#define REG_PORT_4_CTRL_2 0x42
#define REG_PORT_5_CTRL_2 0x52
+#define KSZ8873_PORT_2QUEUE_SPLIT_EN BIT(7)
#define PORT_INGRESS_FILTER BIT(6)
#define PORT_DISCARD_NON_VID BIT(5)
#define PORT_FORCE_FLOW_CTRL BIT(4)
@@ -463,10 +465,7 @@
#define REG_PORT_4_CTRL_13 0xE1
#define REG_PORT_5_CTRL_13 0xF1
-#define PORT_QUEUE_SPLIT_H BIT(1)
-#define PORT_QUEUE_SPLIT_1 0
-#define PORT_QUEUE_SPLIT_2 1
-#define PORT_QUEUE_SPLIT_4 2
+#define KSZ8795_PORT_4QUEUE_SPLIT_EN BIT(1)
#define PORT_DROP_TAG BIT(0)
#define REG_PORT_1_CTRL_14 0xB2
@@ -794,5 +793,6 @@
#define TAIL_TAG_LOOKUP BIT(7)
#define FID_ENTRIES 128
+#define KSZ8_DYN_MAC_ENTRIES 1024
#endif
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index 7f745628c84d..425e20daf1e9 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -355,10 +355,8 @@ int ksz9477_reset_switch(struct ksz_device *dev)
SPI_AUTO_EDGE_DETECTION, 0);
/* default configuration */
- ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8);
- data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING |
- SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE;
- ksz_write8(dev, REG_SW_LUE_CTRL_1, data8);
+ ksz_write8(dev, REG_SW_LUE_CTRL_1,
+ SW_AGING_ENABLE | SW_LINK_AUTO_AGING | SW_SRC_ADDR_FILTER);
/* disable interrupts */
ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK);
@@ -429,6 +427,57 @@ void ksz9477_freeze_mib(struct ksz_device *dev, int port, bool freeze)
mutex_unlock(&p->mib.cnt_mutex);
}
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col)
+{
+ u32 pmavbc;
+ u8 status;
+ u16 pqm;
+ int ret;
+
+ ret = ksz_pread8(dev, port, REG_PORT_STATUS_0, &status);
+ if (ret)
+ return ret;
+ if (!(FIELD_GET(PORT_INTF_SPEED_MASK, status) == PORT_INTF_SPEED_NONE) &&
+ !(status & PORT_INTF_FULL_DUPLEX)) {
+ /* Errata DS80000754 recommends monitoring potential faults in
+ * half-duplex mode. The switch might not be able to communicate anymore
+ * in these states.
+ * If you see this message, please read the errata-sheet for more information:
+ * https://ww1.microchip.com/downloads/aemDocuments/documents/UNG/ProductDocuments/Errata/KSZ9477S-Errata-DS80000754.pdf
+ * To workaround this issue, half-duplex mode should be avoided.
+ * A software reset could be implemented to recover from this state.
+ */
+ dev_warn_once(dev->dev,
+ "Half-duplex detected on port %d, transmission halt may occur\n",
+ port);
+ if (tx_late_col != 0) {
+ /* Transmission halt with late collisions */
+ dev_crit_once(dev->dev,
+ "TX late collisions detected, transmission may be halted on port %d\n",
+ port);
+ }
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &status);
+ if (ret)
+ return ret;
+ if (status & SW_VLAN_ENABLE) {
+ ret = ksz_pread16(dev, port, REG_PORT_QM_TX_CNT_0__4, &pqm);
+ if (ret)
+ return ret;
+ ret = ksz_read32(dev, REG_PMAVBC, &pmavbc);
+ if (ret)
+ return ret;
+ if ((FIELD_GET(PMAVBC_MASK, pmavbc) <= PMAVBC_MIN) ||
+ (FIELD_GET(PORT_QM_TX_CNT_M, pqm) >= PORT_QM_TX_CNT_MAX)) {
+ /* Transmission halt with Half-Duplex and VLAN */
+ dev_crit_once(dev->dev,
+ "resources out of limits, transmission may be halted\n");
+ }
+ }
+ }
+ return ret;
+}
+
void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
{
struct ksz_port_mib *mib = &dev->ports[port].mib;
@@ -1158,18 +1207,12 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable broadcast storm limit */
ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true);
- /* disable DiffServ priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false);
-
/* replace priority */
ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING,
false);
ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4,
MTI_PVID_REPLACE, false);
- /* enable 802.1p priority */
- ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
-
/* force flow control for non-PHY ports only */
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
@@ -1305,6 +1348,10 @@ int ksz9477_setup(struct dsa_switch *ds)
/* Enable REG_SW_MTU__2 reg by setting SW_JUMBO_PACKET */
ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_JUMBO_PACKET, true);
+ /* Use collision based back pressure mode. */
+ ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_BACK_PRESSURE,
+ SW_BACK_PRESSURE_COLLISION);
+
/* Now we can configure default MTU value */
ret = regmap_update_bits(ksz_regmap_16(dev), REG_SW_MTU__2, REG_SW_MTU_MASK,
VLAN_ETH_FRAME_LEN + ETH_FCS_LEN);
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index ce1e656b800b..239a281da10b 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -36,6 +36,8 @@ int ksz9477_port_mirror_add(struct ksz_device *dev, int port,
bool ingress, struct netlink_ext_ack *extack);
void ksz9477_port_mirror_del(struct ksz_device *dev, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+int ksz9477_errata_monitor(struct ksz_device *dev, int port,
+ u64 tx_late_col);
void ksz9477_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config);
int ksz9477_fdb_dump(struct ksz_device *dev, int port,
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 82bebee4615c..7d7560f23a73 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -72,8 +72,8 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c)
}
static const struct i2c_device_id ksz9477_i2c_id[] = {
- { "ksz9477-switch", 0 },
- {},
+ { "ksz9477-switch" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id);
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index f3a205ee483f..d5354c600ea1 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -247,6 +247,7 @@
#define REG_SW_MAC_CTRL_1 0x0331
#define SW_BACK_PRESSURE BIT(5)
+#define SW_BACK_PRESSURE_COLLISION 0
#define FAIR_FLOW_CTRL BIT(4)
#define NO_EXC_COLLISION_DROP BIT(3)
#define SW_JUMBO_PACKET BIT(2)
@@ -842,8 +843,8 @@
#define REG_PORT_STATUS_0 0x0030
-#define PORT_INTF_SPEED_M 0x3
-#define PORT_INTF_SPEED_S 3
+#define PORT_INTF_SPEED_MASK GENMASK(4, 3)
+#define PORT_INTF_SPEED_NONE GENMASK(1, 0)
#define PORT_INTF_FULL_DUPLEX BIT(2)
#define PORT_TX_FLOW_CTRL BIT(1)
#define PORT_RX_FLOW_CTRL BIT(0)
@@ -1167,6 +1168,11 @@
#define PORT_RMII_CLK_SEL BIT(7)
#define PORT_MII_SEL_EDGE BIT(5)
+#define REG_PMAVBC 0x03AC
+
+#define PMAVBC_MASK GENMASK(26, 16)
+#define PMAVBC_MIN 0x580
+
/* 4 - MAC */
#define REG_PORT_MAC_CTRL_0 0x0400
@@ -1494,6 +1500,7 @@
#define PORT_QM_TX_CNT_USED_S 0
#define PORT_QM_TX_CNT_M (BIT(11) - 1)
+#define PORT_QM_TX_CNT_MAX 0x200
#define REG_PORT_QM_TX_CNT_1__4 0x0A14
diff --git a/drivers/net/dsa/microchip/ksz9477_tc_flower.c b/drivers/net/dsa/microchip/ksz9477_tc_flower.c
index 8b2f5be667e0..ca7830ab168a 100644
--- a/drivers/net/dsa/microchip/ksz9477_tc_flower.c
+++ b/drivers/net/dsa/microchip/ksz9477_tc_flower.c
@@ -124,6 +124,9 @@ static int ksz9477_flower_parse_key(struct ksz_device *dev, int port,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) ||
flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
ret = ksz9477_flower_parse_key_l2(dev, port, extack, rule,
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 2b510f150dd8..1491099528be 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -24,10 +24,12 @@
#include <linux/of_net.h>
#include <linux/micrel_phy.h>
#include <net/dsa.h>
+#include <net/ieee8021q.h>
#include <net/pkt_cls.h>
#include <net/switchdev.h>
#include "ksz_common.h"
+#include "ksz_dcb.h"
#include "ksz_ptp.h"
#include "ksz8.h"
#include "ksz9477.h"
@@ -253,6 +255,28 @@ static const struct ksz_drive_strength ksz8830_drive_strengths[] = {
{ KSZ8873_DRIVE_STRENGTH_16MA, 16000 },
};
+static void ksz8830_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+static void ksz_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state);
+static void ksz_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface);
+
+static const struct phylink_mac_ops ksz8830_phylink_mac_ops = {
+ .mac_config = ksz8830_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz8_phylink_mac_link_up,
+};
+
+static const struct phylink_mac_ops ksz8_phylink_mac_ops = {
+ .mac_config = ksz_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz8_phylink_mac_link_up,
+};
+
static const struct ksz_dev_ops ksz8_dev_ops = {
.setup = ksz8_setup,
.get_port_addr = ksz8_get_port_addr,
@@ -277,7 +301,6 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.mirror_add = ksz8_port_mirror_add,
.mirror_del = ksz8_port_mirror_del,
.get_caps = ksz8_get_caps,
- .phylink_mac_link_up = ksz8_phylink_mac_link_up,
.config_cpu_port = ksz8_config_cpu_port,
.enable_stp_addr = ksz8_enable_stp_addr,
.reset = ksz8_reset_switch,
@@ -286,13 +309,19 @@ static const struct ksz_dev_ops ksz8_dev_ops = {
.change_mtu = ksz8_change_mtu,
};
-static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause);
+static const struct phylink_mac_ops ksz9477_phylink_mac_ops = {
+ .mac_config = ksz_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz9477_phylink_mac_link_up,
+};
+
static const struct ksz_dev_ops ksz9477_dev_ops = {
.setup = ksz9477_setup,
.get_port_addr = ksz9477_get_port_addr,
@@ -319,7 +348,6 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = ksz9477_change_mtu,
- .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.get_wol = ksz9477_get_wol,
.set_wol = ksz9477_set_wol,
.wol_pre_shutdown = ksz9477_wol_pre_shutdown,
@@ -331,6 +359,12 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.exit = ksz9477_switch_exit,
};
+static const struct phylink_mac_ops lan937x_phylink_mac_ops = {
+ .mac_config = ksz_phylink_mac_config,
+ .mac_link_down = ksz_phylink_mac_link_down,
+ .mac_link_up = ksz9477_phylink_mac_link_up,
+};
+
static const struct ksz_dev_ops lan937x_dev_ops = {
.setup = lan937x_setup,
.teardown = lan937x_teardown,
@@ -359,7 +393,6 @@ static const struct ksz_dev_ops lan937x_dev_ops = {
.mdb_add = ksz9477_mdb_add,
.mdb_del = ksz9477_mdb_del,
.change_mtu = lan937x_change_mtu,
- .phylink_mac_link_up = ksz9477_phylink_mac_link_up,
.config_cpu_port = lan937x_config_cpu_port,
.tc_cbs_set_cinc = lan937x_tc_cbs_set_cinc,
.enable_stp_addr = ksz9477_enable_stp_addr,
@@ -1194,9 +1227,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 3, /* total port count */
.port_nirqs = 3,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1223,7 +1257,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
+ .num_ipms = 4,
.ops = &ksz8_dev_ops,
+ .phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1262,7 +1298,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
+ .num_ipms = 4,
.ops = &ksz8_dev_ops,
+ .phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1287,7 +1325,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total cpu and user ports */
.num_tx_queues = 4,
+ .num_ipms = 4,
.ops = &ksz8_dev_ops,
+ .phylink_mac_ops = &ksz8_phylink_mac_ops,
.ksz87xx_eee_link_erratum = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1312,7 +1352,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.cpu_ports = 0x4, /* can be configured as cpu port */
.port_cnt = 3,
.num_tx_queues = 4,
+ .num_ipms = 4,
.ops = &ksz8_dev_ops,
+ .phylink_mac_ops = &ksz8830_phylink_mac_ops,
.mib_names = ksz88xx_mib_names,
.mib_cnt = ARRAY_SIZE(ksz88xx_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1336,9 +1378,11 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 7, /* total physical port count */
.port_nirqs = 4,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1370,7 +1414,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 6, /* total physical port count */
.port_nirqs = 2,
.num_tx_queues = 4,
+ .num_ipms = 8,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1402,7 +1449,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 7, /* total physical port count */
.port_nirqs = 2,
.num_tx_queues = 4,
+ .num_ipms = 8,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1432,7 +1482,9 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 3, /* total port count */
.port_nirqs = 2,
.num_tx_queues = 4,
+ .num_ipms = 8,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1458,9 +1510,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 3, /* total port count */
.port_nirqs = 3,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1486,9 +1539,11 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 7, /* total port count */
.port_nirqs = 3,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
+ .phylink_mac_ops = &ksz9477_phylink_mac_ops,
+ .phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1519,8 +1574,8 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 7, /* total physical port count */
.port_nirqs = 3,
.num_tx_queues = 4,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -1551,9 +1606,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 5, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1578,9 +1634,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 6, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1605,9 +1662,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 8, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1636,9 +1694,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 5, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1667,9 +1726,10 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.port_cnt = 8, /* total physical port count */
.port_nirqs = 6,
.num_tx_queues = 8,
+ .num_ipms = 8,
.tc_cbs_supported = true,
- .tc_ets_supported = true,
.ops = &lan937x_dev_ops,
+ .phylink_mac_ops = &lan937x_phylink_mac_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
.reg_mib_cnt = MIB_COUNTER_NUM,
@@ -1764,6 +1824,7 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
struct rtnl_link_stats64 *stats;
struct ksz_stats_raw *raw;
struct ksz_port_mib *mib;
+ int ret;
mib = &dev->ports[port].mib;
stats = &mib->stats64;
@@ -1805,6 +1866,12 @@ void ksz_r_mib_stats64(struct ksz_device *dev, int port)
pstats->rx_pause_frames = raw->rx_pause;
spin_unlock(&mib->stats64_lock);
+
+ if (dev->info->phy_errata_9477) {
+ ret = ksz9477_errata_monitor(dev, port, raw->tx_late_col);
+ if (ret)
+ dev_err(dev->dev, "Failed to monitor transmission halt\n");
+ }
}
void ksz88xx_r_mib_stats64(struct ksz_device *dev, int port)
@@ -2129,7 +2196,7 @@ static void ksz_irq_bus_sync_unlock(struct irq_data *d)
struct ksz_device *dev = kirq->dev;
int ret;
- ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
+ ret = ksz_write8(dev, kirq->reg_mask, kirq->masked);
if (ret)
dev_err(dev->dev, "failed to change IRQ mask\n");
@@ -2304,6 +2371,7 @@ static int ksz_setup(struct dsa_switch *ds)
ksz_init_mib_timer(dev);
ds->configure_vlan_while_not_filtering = false;
+ ds->dscp_prio_mapping_is_global = true;
if (dev->dev_ops->setup) {
ret = dev->dev_ops->setup(ds);
@@ -2347,6 +2415,10 @@ static int ksz_setup(struct dsa_switch *ds)
goto out_ptp_clock_unregister;
}
+ ret = ksz_dcb_init(dev);
+ if (ret)
+ goto out_ptp_clock_unregister;
+
/* start switch */
regmap_update_bits(ksz_regmap_8(dev), regs[S_START_CTRL],
SW_START, SW_START);
@@ -2506,7 +2578,11 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
if (!port)
return MICREL_KSZ8_P1_ERRATA;
break;
+ case KSZ8567_CHIP_ID:
case KSZ9477_CHIP_ID:
+ case KSZ9567_CHIP_ID:
+ case KSZ9896_CHIP_ID:
+ case KSZ9897_CHIP_ID:
/* KSZ9477 Errata DS80000754C
*
* Module 4: Energy Efficient Ethernet (EEE) feature select must
@@ -2516,6 +2592,13 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
* controls. If not disabled, the PHY ports can auto-negotiate
* to enable EEE, and this feature can cause link drops when
* linked to another device supporting EEE.
+ *
+ * The same item appears in the errata for the KSZ9567, KSZ9896,
+ * and KSZ9897.
+ *
+ * A similar item appears in the errata for the KSZ8567, but
+ * provides an alternative workaround. For now, use the simple
+ * workaround of disabling the EEE feature for this device too.
*/
return MICREL_NO_EEE;
}
@@ -2523,14 +2606,15 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
return 0;
}
-static void ksz_mac_link_down(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface)
+static void ksz_phylink_mac_link_down(struct phylink_config *config,
+ unsigned int mode,
+ phy_interface_t interface)
{
- struct ksz_device *dev = ds->priv;
- struct ksz_port *p = &dev->ports[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
/* Read all MIB counters when the link is going down. */
- p->read = true;
+ dev->ports[dp->index].read = true;
/* timer started */
if (dev->mib_read_interval)
schedule_delayed_work(&dev->mib_read, 0);
@@ -2660,9 +2744,33 @@ static int ksz_port_mdb_del(struct dsa_switch *ds, int port,
return dev->dev_ops->mdb_del(dev, port, mdb, db);
}
+static int ksz9477_set_default_prio_queue_mapping(struct ksz_device *dev,
+ int port)
+{
+ u32 queue_map = 0;
+ int ipm;
+
+ for (ipm = 0; ipm < dev->info->num_ipms; ipm++) {
+ int queue;
+
+ /* Traffic Type (TT) is corresponding to the Internal Priority
+ * Map (IPM) in the switch. Traffic Class (TC) is
+ * corresponding to the queue in the switch.
+ */
+ queue = ieee8021q_tt_to_tc(ipm, dev->info->num_tx_queues);
+ if (queue < 0)
+ return queue;
+
+ queue_map |= queue << (ipm * KSZ9477_PORT_TC_MAP_S);
+ }
+
+ return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map);
+}
+
static int ksz_port_setup(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
+ int ret;
if (!dsa_is_user_port(ds, port))
return 0;
@@ -2670,11 +2778,17 @@ static int ksz_port_setup(struct dsa_switch *ds, int port)
/* setup user port */
dev->dev_ops->port_setup(dev, port, false);
+ if (!is_ksz8(dev)) {
+ ret = ksz9477_set_default_prio_queue_mapping(dev, port);
+ if (ret)
+ return ret;
+ }
+
/* port_stp_state_set() will be called after to enable the port so
* there is no need to do anything.
*/
- return 0;
+ return ksz_dcb_init_port(dev, port);
}
void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
@@ -3013,7 +3127,8 @@ static void ksz_set_xmii(struct ksz_device *dev, int port,
/* On KSZ9893, disable RGMII in-band status support */
if (dev->chip_id == KSZ9893_CHIP_ID ||
dev->chip_id == KSZ8563_CHIP_ID ||
- dev->chip_id == KSZ9563_CHIP_ID)
+ dev->chip_id == KSZ9563_CHIP_ID ||
+ is_lan937x(dev))
data8 &= ~P_MII_MAC_MODE;
break;
default:
@@ -3050,7 +3165,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
else
interface = PHY_INTERFACE_MODE_MII;
} else if (val == bitval[P_RMII_SEL]) {
- interface = PHY_INTERFACE_MODE_RGMII;
+ interface = PHY_INTERFACE_MODE_RMII;
} else {
interface = PHY_INTERFACE_MODE_RGMII;
if (data8 & P_RGMII_ID_EG_ENABLE)
@@ -3065,16 +3180,23 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
return interface;
}
-static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
+static void ksz8830_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+
+ dev->ports[dp->index].manual_flow = !(state->pause & MLO_PAUSE_AN);
+}
+
+static void ksz_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct ksz_device *dev = ds->priv;
-
- if (ksz_is_ksz88x3(dev)) {
- dev->ports[port].manual_flow = !(state->pause & MLO_PAUSE_AN);
- return;
- }
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ int port = dp->index;
/* Internal PHYs */
if (dev->info->internal_phy[port])
@@ -3087,9 +3209,6 @@ static void ksz_phylink_mac_config(struct dsa_switch *ds, int port,
ksz_set_xmii(dev, port, state->interface);
- if (dev->dev_ops->phylink_mac_config)
- dev->dev_ops->phylink_mac_config(dev, port, mode, state);
-
if (dev->dev_ops->setup_rgmii_delay)
dev->dev_ops->setup_rgmii_delay(dev, port);
}
@@ -3187,13 +3306,16 @@ static void ksz_duplex_flowctrl(struct ksz_device *dev, int port, int duplex,
ksz_prmw8(dev, port, regs[P_XMII_CTRL_0], mask, val);
}
-static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
+static void ksz9477_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause)
{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ksz_device *dev = dp->ds->priv;
+ int port = dp->index;
struct ksz_port *p;
p = &dev->ports[port];
@@ -3209,18 +3331,6 @@ static void ksz9477_phylink_mac_link_up(struct ksz_device *dev, int port,
ksz_duplex_flowctrl(dev, port, duplex, tx_pause, rx_pause);
}
-static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode,
- phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause, bool rx_pause)
-{
- struct ksz_device *dev = ds->priv;
-
- dev->dev_ops->phylink_mac_link_up(dev, port, mode, interface, phydev,
- speed, duplex, tx_pause, rx_pause);
-}
-
static int ksz_switch_detect(struct ksz_device *dev)
{
u8 id1, id2, id4;
@@ -3522,7 +3632,7 @@ static int ksz_tc_ets_add(struct ksz_device *dev, int port,
for (tc_prio = 0; tc_prio < ARRAY_SIZE(p->priomap); tc_prio++) {
int queue;
- if (tc_prio > KSZ9477_MAX_TC_PRIO)
+ if (tc_prio >= dev->info->num_ipms)
break;
queue = ksz_ets_band_to_queue(p, p->priomap[tc_prio]);
@@ -3534,8 +3644,7 @@ static int ksz_tc_ets_add(struct ksz_device *dev, int port,
static int ksz_tc_ets_del(struct ksz_device *dev, int port)
{
- int ret, queue, tc_prio, s;
- u32 queue_map = 0;
+ int ret, queue;
/* To restore the default chip configuration, set all queues to use the
* WRR scheduler with a weight of 1.
@@ -3547,31 +3656,10 @@ static int ksz_tc_ets_del(struct ksz_device *dev, int port)
return ret;
}
- switch (dev->info->num_tx_queues) {
- case 2:
- s = 2;
- break;
- case 4:
- s = 1;
- break;
- case 8:
- s = 0;
- break;
- default:
- return -EINVAL;
- }
-
/* Revert the queue mapping for TC-priority to its default setting on
* the chip.
*/
- for (tc_prio = 0; tc_prio <= KSZ9477_MAX_TC_PRIO; tc_prio++) {
- int queue;
-
- queue = tc_prio >> s;
- queue_map |= queue << (tc_prio * KSZ9477_PORT_TC_MAP_S);
- }
-
- return ksz_pwrite32(dev, port, KSZ9477_PORT_MRI_TC_MAP__4, queue_map);
+ return ksz9477_set_default_prio_queue_mapping(dev, port);
}
static int ksz_tc_ets_validate(struct ksz_device *dev, int port,
@@ -3616,7 +3704,7 @@ static int ksz_tc_setup_qdisc_ets(struct dsa_switch *ds, int port,
struct ksz_device *dev = ds->priv;
int ret;
- if (!dev->info->tc_ets_supported)
+ if (is_ksz8(dev))
return -EOPNOTSUPP;
if (qopt->parent != TC_H_ROOT) {
@@ -3687,6 +3775,11 @@ static int ksz_port_set_mac_address(struct dsa_switch *ds, int port,
return -EBUSY;
}
+ /* Need to initialize variable as the code to fill in settings may
+ * not be executed.
+ */
+ wol.wolopts = 0;
+
ksz_get_wol(ds, dp->index, &wol);
if (wol.wolopts & WAKE_MAGIC) {
dev_err(ds->dev,
@@ -3841,6 +3934,13 @@ static int ksz_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr,
return -EOPNOTSUPP;
}
+ /* KSZ9477 can only perform HSR offloading for up to two ports */
+ if (hweight8(dev->hsr_ports) >= 2) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than two ports - using software HSR");
+ return -EOPNOTSUPP;
+ }
+
/* Self MAC address filtering, to avoid frames traversing
* the HSR ring more than once.
*/
@@ -3881,9 +3981,6 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.phy_read = ksz_phy_read16,
.phy_write = ksz_phy_write16,
.phylink_get_caps = ksz_phylink_get_caps,
- .phylink_mac_config = ksz_phylink_mac_config,
- .phylink_mac_link_up = ksz_phylink_mac_link_up,
- .phylink_mac_link_down = ksz_mac_link_down,
.port_setup = ksz_port_setup,
.set_ageing_time = ksz_set_ageing_time,
.get_strings = ksz_get_strings,
@@ -3925,6 +4022,13 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.port_setup_tc = ksz_setup_tc,
.get_mac_eee = ksz_get_mac_eee,
.set_mac_eee = ksz_set_mac_eee,
+ .port_get_default_prio = ksz_port_get_default_prio,
+ .port_set_default_prio = ksz_port_set_default_prio,
+ .port_get_dscp_prio = ksz_port_get_dscp_prio,
+ .port_add_dscp_prio = ksz_port_add_dscp_prio,
+ .port_del_dscp_prio = ksz_port_del_dscp_prio,
+ .port_get_apptrust = ksz_port_get_apptrust,
+ .port_set_apptrust = ksz_port_set_apptrust,
};
struct ksz_device *ksz_switch_alloc(struct device *base, void *priv)
@@ -4328,6 +4432,9 @@ int ksz_switch_register(struct ksz_device *dev)
/* set the real number of ports */
dev->ds->num_ports = dev->info->port_cnt;
+ /* set the phylink ops */
+ dev->ds->phylink_mac_ops = dev->info->phylink_mac_ops;
+
/* Host port interface will be self detected, or specifically set in
* device tree.
*/
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 40c11b0d6b62..5f0a628b9849 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -19,9 +19,14 @@
#include "ksz_ptp.h"
#define KSZ_MAX_NUM_PORTS 8
+/* all KSZ switches count ports from 1 */
+#define KSZ_PORT_1 0
+#define KSZ_PORT_2 1
+#define KSZ_PORT_4 3
struct ksz_device;
struct ksz_port;
+struct phylink_mac_ops;
enum ksz_regmap_width {
KSZ_REGMAP_8,
@@ -58,9 +63,11 @@ struct ksz_chip_data {
int port_cnt;
u8 port_nirqs;
u8 num_tx_queues;
+ u8 num_ipms; /* number of Internal Priority Maps */
bool tc_cbs_supported;
- bool tc_ets_supported;
const struct ksz_dev_ops *ops;
+ const struct phylink_mac_ops *phylink_mac_ops;
+ bool phy_errata_9477;
bool ksz87xx_eee_link_erratum;
const struct ksz_mib_names *mib_names;
int mib_cnt;
@@ -349,9 +356,6 @@ struct ksz_dev_ops {
int (*change_mtu)(struct ksz_device *dev, int port, int mtu);
void (*freeze_mib)(struct ksz_device *dev, int port, bool freeze);
void (*port_init_cnt)(struct ksz_device *dev, int port);
- void (*phylink_mac_config)(struct ksz_device *dev, int port,
- unsigned int mode,
- const struct phylink_link_state *state);
void (*phylink_mac_link_up)(struct ksz_device *dev, int port,
unsigned int mode,
phy_interface_t interface,
@@ -620,6 +624,11 @@ static inline bool ksz_is_ksz88x3(struct ksz_device *dev)
return dev->chip_id == KSZ8830_CHIP_ID;
}
+static inline bool is_ksz8(struct ksz_device *dev)
+{
+ return ksz_is_ksz87xx(dev) || ksz_is_ksz88x3(dev);
+}
+
static inline int is_lan937x(struct ksz_device *dev)
{
return dev->chip_id == LAN9370_CHIP_ID ||
@@ -629,6 +638,12 @@ static inline int is_lan937x(struct ksz_device *dev)
dev->chip_id == LAN9374_CHIP_ID;
}
+static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port)
+{
+ return (dev->chip_id == LAN9371_CHIP_ID ||
+ dev->chip_id == LAN9372_CHIP_ID) && port == KSZ_PORT_4;
+}
+
/* STP State Defines */
#define PORT_TX_ENABLE BIT(2)
#define PORT_RX_ENABLE BIT(1)
@@ -722,7 +737,6 @@ static inline int is_lan937x(struct ksz_device *dev)
#define KSZ9477_PORT_MRI_TC_MAP__4 0x0808
#define KSZ9477_PORT_TC_MAP_S 4
-#define KSZ9477_MAX_TC_PRIO 7
/* CBS related registers */
#define REG_PORT_MTI_QUEUE_INDEX__4 0x0900
diff --git a/drivers/net/dsa/microchip/ksz_dcb.c b/drivers/net/dsa/microchip/ksz_dcb.c
new file mode 100644
index 000000000000..086bc9b3cf53
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_dcb.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de>
+
+#include <linux/dsa/ksz_common.h>
+#include <net/dsa.h>
+#include <net/dscp.h>
+#include <net/ieee8021q.h>
+
+#include "ksz_common.h"
+#include "ksz_dcb.h"
+#include "ksz8.h"
+
+#define KSZ8_REG_PORT_1_CTRL_0 0x10
+#define KSZ8_PORT_DIFFSERV_ENABLE BIT(6)
+#define KSZ8_PORT_802_1P_ENABLE BIT(5)
+#define KSZ8_PORT_BASED_PRIO_M GENMASK(4, 3)
+
+#define KSZ88X3_REG_TOS_DSCP_CTRL 0x60
+#define KSZ8765_REG_TOS_DSCP_CTRL 0x90
+
+#define KSZ9477_REG_SW_MAC_TOS_CTRL 0x033e
+#define KSZ9477_SW_TOS_DSCP_REMAP BIT(0)
+#define KSZ9477_SW_TOS_DSCP_DEFAULT_PRIO_M GENMASK(5, 3)
+
+#define KSZ9477_REG_DIFFSERV_PRIO_MAP 0x0340
+
+#define KSZ9477_REG_PORT_MRI_PRIO_CTRL 0x0801
+#define KSZ9477_PORT_HIGHEST_PRIO BIT(7)
+#define KSZ9477_PORT_OR_PRIO BIT(6)
+#define KSZ9477_PORT_MAC_PRIO_ENABLE BIT(4)
+#define KSZ9477_PORT_VLAN_PRIO_ENABLE BIT(3)
+#define KSZ9477_PORT_802_1P_PRIO_ENABLE BIT(2)
+#define KSZ9477_PORT_DIFFSERV_PRIO_ENABLE BIT(1)
+#define KSZ9477_PORT_ACL_PRIO_ENABLE BIT(0)
+
+#define KSZ9477_REG_PORT_MRI_MAC_CTRL 0x0802
+#define KSZ9477_PORT_BASED_PRIO_M GENMASK(2, 0)
+
+struct ksz_apptrust_map {
+ u8 apptrust;
+ u8 bit;
+};
+
+static const struct ksz_apptrust_map ksz8_apptrust_map_to_bit[] = {
+ { DCB_APP_SEL_PCP, KSZ8_PORT_802_1P_ENABLE },
+ { IEEE_8021QAZ_APP_SEL_DSCP, KSZ8_PORT_DIFFSERV_ENABLE },
+};
+
+static const struct ksz_apptrust_map ksz9477_apptrust_map_to_bit[] = {
+ { DCB_APP_SEL_PCP, KSZ9477_PORT_802_1P_PRIO_ENABLE },
+ { IEEE_8021QAZ_APP_SEL_DSCP, KSZ9477_PORT_DIFFSERV_PRIO_ENABLE },
+};
+
+/* ksz_supported_apptrust[] - Supported apptrust selectors and Priority Order
+ * of Internal Priority Map (IPM) sources.
+ *
+ * This array defines the apptrust selectors supported by the hardware, where
+ * the index within the array indicates the priority of the selector - lower
+ * indices correspond to higher priority. This fixed priority scheme is due to
+ * the hardware's design, which does not support configurable priority among
+ * different priority sources.
+ *
+ * The priority sources, including Tail Tag, ACL, VLAN PCP and DSCP are ordered
+ * by the hardware's fixed logic, as detailed below. The order reflects a
+ * non-configurable precedence where certain types of priority information
+ * override others:
+ *
+ * 1. Tail Tag - Highest priority, overrides ACL, VLAN PCP, and DSCP priorities.
+ * 2. ACL - Overrides VLAN PCP and DSCP priorities.
+ * 3. VLAN PCP - Overrides DSCP priority.
+ * 4. DSCP - Lowest priority, does not override any other priority source.
+ *
+ * In this context, the array's lower index (higher priority) for
+ * 'DCB_APP_SEL_PCP' suggests its relative priority over
+ * 'IEEE_8021QAZ_APP_SEL_DSCP' within the system's fixed priority scheme.
+ *
+ * DCB_APP_SEL_PCP - Priority Code Point selector
+ * IEEE_8021QAZ_APP_SEL_DSCP - Differentiated Services Code Point selector
+ */
+static const u8 ksz_supported_apptrust[] = {
+ DCB_APP_SEL_PCP,
+ IEEE_8021QAZ_APP_SEL_DSCP,
+};
+
+static const char * const ksz_supported_apptrust_variants[] = {
+ "empty", "dscp", "pcp", "dscp pcp"
+};
+
+static void ksz_get_default_port_prio_reg(struct ksz_device *dev, int *reg,
+ u8 *mask, int *shift)
+{
+ if (is_ksz8(dev)) {
+ *reg = KSZ8_REG_PORT_1_CTRL_0;
+ *mask = KSZ8_PORT_BASED_PRIO_M;
+ *shift = __bf_shf(KSZ8_PORT_BASED_PRIO_M);
+ } else {
+ *reg = KSZ9477_REG_PORT_MRI_MAC_CTRL;
+ *mask = KSZ9477_PORT_BASED_PRIO_M;
+ *shift = __bf_shf(KSZ9477_PORT_BASED_PRIO_M);
+ }
+}
+
+/**
+ * ksz_get_dscp_prio_reg - Retrieves the DSCP-to-priority-mapping register
+ * @dev: Pointer to the KSZ switch device structure
+ * @reg: Pointer to the register address to be set
+ * @per_reg: Pointer to the number of DSCP values per register
+ * @mask: Pointer to the mask to be set
+ *
+ * This function retrieves the DSCP to priority mapping register, the number of
+ * DSCP values per register, and the mask to be set.
+ */
+static void ksz_get_dscp_prio_reg(struct ksz_device *dev, int *reg,
+ int *per_reg, u8 *mask)
+{
+ if (ksz_is_ksz87xx(dev)) {
+ *reg = KSZ8765_REG_TOS_DSCP_CTRL;
+ *per_reg = 4;
+ *mask = GENMASK(1, 0);
+ } else if (ksz_is_ksz88x3(dev)) {
+ *reg = KSZ88X3_REG_TOS_DSCP_CTRL;
+ *per_reg = 4;
+ *mask = GENMASK(1, 0);
+ } else {
+ *reg = KSZ9477_REG_DIFFSERV_PRIO_MAP;
+ *per_reg = 2;
+ *mask = GENMASK(2, 0);
+ }
+}
+
+/**
+ * ksz_get_apptrust_map_and_reg - Retrieves the apptrust map and register
+ * @dev: Pointer to the KSZ switch device structure
+ * @map: Pointer to the apptrust map to be set
+ * @reg: Pointer to the register address to be set
+ * @mask: Pointer to the mask to be set
+ *
+ * This function retrieves the apptrust map and register address for the
+ * apptrust configuration.
+ */
+static void ksz_get_apptrust_map_and_reg(struct ksz_device *dev,
+ const struct ksz_apptrust_map **map,
+ int *reg, u8 *mask)
+{
+ if (is_ksz8(dev)) {
+ *map = ksz8_apptrust_map_to_bit;
+ *reg = KSZ8_REG_PORT_1_CTRL_0;
+ *mask = KSZ8_PORT_DIFFSERV_ENABLE | KSZ8_PORT_802_1P_ENABLE;
+ } else {
+ *map = ksz9477_apptrust_map_to_bit;
+ *reg = KSZ9477_REG_PORT_MRI_PRIO_CTRL;
+ *mask = KSZ9477_PORT_802_1P_PRIO_ENABLE |
+ KSZ9477_PORT_DIFFSERV_PRIO_ENABLE;
+ }
+}
+
+/**
+ * ksz_port_get_default_prio - Retrieves the default priority for a port on a
+ * KSZ switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number from which to get the default priority
+ *
+ * This function fetches the default priority for the specified port on a KSZ
+ * switch.
+ *
+ * Return: The default priority of the port on success, or a negative error
+ * code on failure.
+ */
+int ksz_port_get_default_prio(struct dsa_switch *ds, int port)
+{
+ struct ksz_device *dev = ds->priv;
+ int ret, reg, shift;
+ u8 data, mask;
+
+ ksz_get_default_port_prio_reg(dev, &reg, &mask, &shift);
+
+ ret = ksz_pread8(dev, port, reg, &data);
+ if (ret)
+ return ret;
+
+ return (data & mask) >> shift;
+}
+
+/**
+ * ksz88x3_port_set_default_prio_quirks - Quirks for default priority
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to set the default priority
+ * @prio: Priority value to set
+ *
+ * This function implements quirks for setting the default priority on KSZ88x3
+ * devices. On Port 2, no other priority providers are working
+ * except of PCP. So, configuring default priority on Port 2 is not possible.
+ * On Port 1, it is not possible to configure port priority if PCP
+ * apptrust on Port 2 is disabled. Since we disable multiple queues on the
+ * switch to disable PCP on Port 2, we need to ensure that the default priority
+ * configuration on Port 1 is in agreement with the configuration on Port 2.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_port_set_default_prio_quirks(struct ksz_device *dev, int port,
+ u8 prio)
+{
+ if (!prio)
+ return 0;
+
+ if (port == KSZ_PORT_2) {
+ dev_err(dev->dev, "Port priority configuration is not working on Port 2\n");
+ return -EINVAL;
+ } else if (port == KSZ_PORT_1) {
+ u8 port2_data;
+ int ret;
+
+ ret = ksz_pread8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0,
+ &port2_data);
+ if (ret)
+ return ret;
+
+ if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) {
+ dev_err(dev->dev, "Not possible to configure port priority on Port 1 if PCP apptrust on Port 2 is disabled\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_port_set_default_prio - Sets the default priority for a port on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to set the default priority
+ * @prio: Priority value to set
+ *
+ * This function sets the default priority for the specified port on a KSZ
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio)
+{
+ struct ksz_device *dev = ds->priv;
+ int reg, shift, ret;
+ u8 mask;
+
+ if (prio >= dev->info->num_ipms)
+ return -EINVAL;
+
+ if (ksz_is_ksz88x3(dev)) {
+ ret = ksz88x3_port_set_default_prio_quirks(dev, port, prio);
+ if (ret)
+ return ret;
+ }
+
+ ksz_get_default_port_prio_reg(dev, &reg, &mask, &shift);
+
+ return ksz_prmw8(dev, port, reg, mask, (prio << shift) & mask);
+}
+
+/**
+ * ksz_port_get_dscp_prio - Retrieves the priority for a DSCP value on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to get the priority
+ * @dscp: DSCP value for which to get the priority
+ *
+ * This function fetches the priority value from switch global DSCP-to-priorty
+ * mapping table for the specified DSCP value.
+ *
+ * Return: The priority value for the DSCP on success, or a negative error
+ * code on failure.
+ */
+int ksz_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp)
+{
+ struct ksz_device *dev = ds->priv;
+ int reg, per_reg, ret, shift;
+ u8 data, mask;
+
+ ksz_get_dscp_prio_reg(dev, &reg, &per_reg, &mask);
+
+ /* If DSCP remapping is disabled, DSCP bits 3-5 are used as Internal
+ * Priority Map (IPM)
+ */
+ if (!is_ksz8(dev)) {
+ ret = ksz_read8(dev, KSZ9477_REG_SW_MAC_TOS_CTRL, &data);
+ if (ret)
+ return ret;
+
+ /* If DSCP remapping is disabled, DSCP bits 3-5 are used as
+ * Internal Priority Map (IPM)
+ */
+ if (!(data & KSZ9477_SW_TOS_DSCP_REMAP))
+ return FIELD_GET(KSZ9477_SW_TOS_DSCP_DEFAULT_PRIO_M,
+ dscp);
+ }
+
+ /* In case DSCP remapping is enabled, we need to write the DSCP to
+ * priority mapping table.
+ */
+ reg += dscp / per_reg;
+ ret = ksz_read8(dev, reg, &data);
+ if (ret)
+ return ret;
+
+ shift = (dscp % per_reg) * (8 / per_reg);
+
+ return (data >> shift) & mask;
+}
+
+/**
+ * ksz_set_global_dscp_entry - Sets the global DSCP-to-priority mapping entry
+ * @dev: Pointer to the KSZ switch device structure
+ * @dscp: DSCP value for which to set the priority
+ * @ipm: Priority value to set
+ *
+ * This function sets the global DSCP-to-priority mapping entry for the
+ * specified DSCP value.
+ *
+ * Return: 0 on success, or a negative error code on failure.
+ */
+static int ksz_set_global_dscp_entry(struct ksz_device *dev, u8 dscp, u8 ipm)
+{
+ int reg, per_reg, shift;
+ u8 mask;
+
+ ksz_get_dscp_prio_reg(dev, &reg, &per_reg, &mask);
+
+ shift = (dscp % per_reg) * (8 / per_reg);
+
+ return ksz_rmw8(dev, reg + (dscp / per_reg), mask << shift,
+ ipm << shift);
+}
+
+/**
+ * ksz_init_global_dscp_map - Initializes the global DSCP-to-priority mapping
+ * @dev: Pointer to the KSZ switch device structure
+ *
+ * This function initializes the global DSCP-to-priority mapping table for the
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz_init_global_dscp_map(struct ksz_device *dev)
+{
+ int ret, dscp;
+
+ /* On KSZ9xxx variants, DSCP remapping is disabled by default.
+ * Enable to have, predictable and reproducible behavior across
+ * different devices.
+ */
+ if (!is_ksz8(dev)) {
+ ret = ksz_rmw8(dev, KSZ9477_REG_SW_MAC_TOS_CTRL,
+ KSZ9477_SW_TOS_DSCP_REMAP,
+ KSZ9477_SW_TOS_DSCP_REMAP);
+ if (ret)
+ return ret;
+ }
+
+ for (dscp = 0; dscp < DSCP_MAX; dscp++) {
+ int ipm, tt;
+
+ /* Map DSCP to Traffic Type, which is corresponding to the
+ * Internal Priority Map (IPM) in the switch.
+ */
+ if (!is_ksz8(dev)) {
+ ipm = ietf_dscp_to_ieee8021q_tt(dscp);
+ } else {
+ /* On KSZ8xxx variants we do not have IPM to queue
+ * remapping table. We need to convert DSCP to Traffic
+ * Type and then to queue.
+ */
+ tt = ietf_dscp_to_ieee8021q_tt(dscp);
+ if (tt < 0)
+ return tt;
+
+ ipm = ieee8021q_tt_to_tc(tt, dev->info->num_tx_queues);
+ }
+
+ if (ipm < 0)
+ return ipm;
+
+ ret = ksz_set_global_dscp_entry(dev, dscp, ipm);
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_port_add_dscp_prio - Adds a DSCP-to-priority mapping entry for a port on
+ * a KSZ switch.
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to add the DSCP-to-priority mapping entry
+ * @dscp: DSCP value for which to add the priority
+ * @prio: Priority value to set
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (prio >= dev->info->num_ipms)
+ return -ERANGE;
+
+ return ksz_set_global_dscp_entry(dev, dscp, prio);
+}
+
+/**
+ * ksz_port_del_dscp_prio - Deletes a DSCP-to-priority mapping entry for a port
+ * on a KSZ switch.
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to delete the DSCP-to-priority mapping entry
+ * @dscp: DSCP value for which to delete the priority
+ * @prio: Priority value to delete
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio)
+{
+ struct ksz_device *dev = ds->priv;
+ int ipm;
+
+ if (ksz_port_get_dscp_prio(ds, port, dscp) != prio)
+ return 0;
+
+ if (is_ksz8(dev)) {
+ ipm = ieee8021q_tt_to_tc(IEEE8021Q_TT_BE,
+ dev->info->num_tx_queues);
+ if (ipm < 0)
+ return ipm;
+ } else {
+ ipm = IEEE8021Q_TT_BE;
+ }
+
+ return ksz_set_global_dscp_entry(dev, dscp, ipm);
+}
+
+/**
+ * ksz_apptrust_error - Prints an error message for an invalid apptrust selector
+ * @dev: Pointer to the KSZ switch device structure
+ *
+ * This function prints an error message when an invalid apptrust selector is
+ * provided.
+ */
+static void ksz_apptrust_error(struct ksz_device *dev)
+{
+ char supported_apptrust_variants[64];
+ int i;
+
+ supported_apptrust_variants[0] = '\0';
+ for (i = 0; i < ARRAY_SIZE(ksz_supported_apptrust_variants); i++) {
+ if (i > 0)
+ strlcat(supported_apptrust_variants, ", ",
+ sizeof(supported_apptrust_variants));
+ strlcat(supported_apptrust_variants,
+ ksz_supported_apptrust_variants[i],
+ sizeof(supported_apptrust_variants));
+ }
+
+ dev_err(dev->dev, "Invalid apptrust selector or priority order. Supported: %s\n",
+ supported_apptrust_variants);
+}
+
+/**
+ * ksz_port_set_apptrust_validate - Validates the apptrust selectors
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to set the apptrust selectors
+ * @sel: Array of apptrust selectors to validate
+ * @nsel: Number of apptrust selectors in the array
+ *
+ * This function validates the apptrust selectors provided and ensures that
+ * they are in the correct order.
+ *
+ * This family of switches supports two apptrust selectors: DCB_APP_SEL_PCP and
+ * IEEE_8021QAZ_APP_SEL_DSCP. The priority order of the selectors is fixed and
+ * cannot be changed. The order is as follows:
+ * 1. DCB_APP_SEL_PCP - Priority Code Point selector (highest priority)
+ * 2. IEEE_8021QAZ_APP_SEL_DSCP - Differentiated Services Code Point selector
+ * (lowest priority)
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz_port_set_apptrust_validate(struct ksz_device *dev, int port,
+ const u8 *sel, int nsel)
+{
+ int i, j, found;
+ int j_prev = 0;
+
+ /* Iterate through the requested selectors */
+ for (i = 0; i < nsel; i++) {
+ found = 0;
+
+ /* Check if the current selector is supported by the hardware */
+ for (j = 0; j < sizeof(ksz_supported_apptrust); j++) {
+ if (sel[i] != ksz_supported_apptrust[j])
+ continue;
+
+ found = 1;
+
+ /* Ensure that no higher priority selector (lower index)
+ * precedes a lower priority one
+ */
+ if (i > 0 && j <= j_prev)
+ goto err_sel_not_vaild;
+
+ j_prev = j;
+ break;
+ }
+
+ if (!found)
+ goto err_sel_not_vaild;
+ }
+
+ return 0;
+
+err_sel_not_vaild:
+ ksz_apptrust_error(dev);
+
+ return -EINVAL;
+}
+
+/**
+ * ksz88x3_port1_apptrust_quirk - Quirk for apptrust configuration on Port 1
+ * of KSZ88x3 devices
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to set the apptrust selectors
+ * @reg: Register address for the apptrust configuration
+ * @port1_data: Data to set for the apptrust configuration
+ *
+ * This function implements a quirk for apptrust configuration on Port 1 of
+ * KSZ88x3 devices. It ensures that apptrust configuration on Port 1 is not
+ * possible if PCP apptrust on Port 2 is disabled. This is because the Port 2
+ * seems to be permanently hardwired to PCP classification, so we need to
+ * do Port 1 configuration always in agreement with Port 2 configuration.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_port1_apptrust_quirk(struct ksz_device *dev, int port,
+ int reg, u8 port1_data)
+{
+ u8 port2_data;
+ int ret;
+
+ /* If no apptrust is requested for Port 1, no need to care about Port 2
+ * configuration.
+ */
+ if (!(port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)))
+ return 0;
+
+ /* We got request to enable any apptrust on Port 1. To make it possible,
+ * we need to enable multiple queues on the switch. If we enable
+ * multiqueue support, PCP classification on Port 2 will be
+ * automatically activated by HW.
+ */
+ ret = ksz_pread8(dev, KSZ_PORT_2, reg, &port2_data);
+ if (ret)
+ return ret;
+
+ /* If KSZ8_PORT_802_1P_ENABLE bit is set on Port 2, the driver showed
+ * the interest in PCP classification on Port 2. In this case,
+ * multiqueue support is enabled and we can enable any apptrust on
+ * Port 1.
+ * If KSZ8_PORT_802_1P_ENABLE bit is not set on Port 2, the PCP
+ * classification on Port 2 is still active, but the driver disabled
+ * multiqueue support and made frame prioritization inactive for
+ * all ports. In this case, we can't enable any apptrust on Port 1.
+ */
+ if (!(port2_data & KSZ8_PORT_802_1P_ENABLE)) {
+ dev_err(dev->dev, "Not possible to enable any apptrust on Port 1 if PCP apptrust on Port 2 is disabled\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ksz88x3_port2_apptrust_quirk - Quirk for apptrust configuration on Port 2
+ * of KSZ88x3 devices
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to set the apptrust selectors
+ * @reg: Register address for the apptrust configuration
+ * @port2_data: Data to set for the apptrust configuration
+ *
+ * This function implements a quirk for apptrust configuration on Port 2 of
+ * KSZ88x3 devices. It ensures that DSCP apptrust is not working on Port 2 and
+ * that it is not possible to disable PCP on Port 2. The only way to disable PCP
+ * on Port 2 is to disable multiple queues on the switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_port2_apptrust_quirk(struct ksz_device *dev, int port,
+ int reg, u8 port2_data)
+{
+ struct dsa_switch *ds = dev->ds;
+ u8 port1_data;
+ int ret;
+
+ /* First validate Port 2 configuration. DiffServ/DSCP is not working
+ * on this port.
+ */
+ if (port2_data & KSZ8_PORT_DIFFSERV_ENABLE) {
+ dev_err(dev->dev, "DSCP apptrust is not working on Port 2\n");
+ return -EINVAL;
+ }
+
+ /* If PCP support is requested, we need to enable all queues on the
+ * switch to make PCP priority working on Port 2.
+ */
+ if (port2_data & KSZ8_PORT_802_1P_ENABLE)
+ return ksz8_all_queues_split(dev, dev->info->num_tx_queues);
+
+ /* We got request to disable PCP priority on Port 2.
+ * Now, we need to compare Port 2 configuration with Port 1
+ * configuration.
+ */
+ ret = ksz_pread8(dev, KSZ_PORT_1, reg, &port1_data);
+ if (ret)
+ return ret;
+
+ /* If Port 1 has any apptrust enabled, we can't disable multiple queues
+ * on the switch, so we can't disable PCP on Port 2.
+ */
+ if (port1_data & (KSZ8_PORT_802_1P_ENABLE | KSZ8_PORT_DIFFSERV_ENABLE)) {
+ dev_err(dev->dev, "Not possible to disable PCP on Port 2 if any apptrust is enabled on Port 1\n");
+ return -EINVAL;
+ }
+
+ /* Now we need to ensure that default priority on Port 1 is set to 0
+ * otherwise we can't disable multiqueue support on the switch.
+ */
+ ret = ksz_port_get_default_prio(ds, KSZ_PORT_1);
+ if (ret < 0) {
+ return ret;
+ } else if (ret) {
+ dev_err(dev->dev, "Not possible to disable PCP on Port 2 if non zero default priority is set on Port 1\n");
+ return -EINVAL;
+ }
+
+ /* Port 1 has no apptrust or default priority set and we got request to
+ * disable PCP on Port 2. We can disable multiqueue support to disable
+ * PCP on Port 2.
+ */
+ return ksz8_all_queues_split(dev, 1);
+}
+
+/**
+ * ksz88x3_port_apptrust_quirk - Quirk for apptrust configuration on KSZ88x3
+ * devices
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to set the apptrust selectors
+ * @reg: Register address for the apptrust configuration
+ * @data: Data to set for the apptrust configuration
+ *
+ * This function implements a quirk for apptrust configuration on KSZ88x3
+ * devices. It ensures that apptrust configuration on Port 1 and
+ * Port 2 is done in agreement with each other.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+static int ksz88x3_port_apptrust_quirk(struct ksz_device *dev, int port,
+ int reg, u8 data)
+{
+ if (port == KSZ_PORT_1)
+ return ksz88x3_port1_apptrust_quirk(dev, port, reg, data);
+ else if (port == KSZ_PORT_2)
+ return ksz88x3_port2_apptrust_quirk(dev, port, reg, data);
+
+ return 0;
+}
+
+/**
+ * ksz_port_set_apptrust - Sets the apptrust selectors for a port on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to set the apptrust selectors
+ * @sel: Array of apptrust selectors to set
+ * @nsel: Number of apptrust selectors in the array
+ *
+ * This function sets the apptrust selectors for the specified port on a KSZ
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_set_apptrust(struct dsa_switch *ds, int port,
+ const u8 *sel, int nsel)
+{
+ const struct ksz_apptrust_map *map;
+ struct ksz_device *dev = ds->priv;
+ int reg, i, ret;
+ u8 data = 0;
+ u8 mask;
+
+ ret = ksz_port_set_apptrust_validate(dev, port, sel, nsel);
+ if (ret)
+ return ret;
+
+ ksz_get_apptrust_map_and_reg(dev, &map, &reg, &mask);
+
+ for (i = 0; i < nsel; i++) {
+ int j;
+
+ for (j = 0; j < ARRAY_SIZE(ksz_supported_apptrust); j++) {
+ if (sel[i] != ksz_supported_apptrust[j])
+ continue;
+
+ data |= map[j].bit;
+ break;
+ }
+ }
+
+ if (ksz_is_ksz88x3(dev)) {
+ ret = ksz88x3_port_apptrust_quirk(dev, port, reg, data);
+ if (ret)
+ return ret;
+ }
+
+ return ksz_prmw8(dev, port, reg, mask, data);
+}
+
+/**
+ * ksz_port_get_apptrust - Retrieves the apptrust selectors for a port on a KSZ
+ * switch
+ * @ds: Pointer to the DSA switch structure
+ * @port: Port number for which to get the apptrust selectors
+ * @sel: Array to store the apptrust selectors
+ * @nsel: Number of apptrust selectors in the array
+ *
+ * This function fetches the apptrust selectors for the specified port on a KSZ
+ * switch.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel, int *nsel)
+{
+ const struct ksz_apptrust_map *map;
+ struct ksz_device *dev = ds->priv;
+ int reg, i, ret;
+ u8 data;
+ u8 mask;
+
+ ksz_get_apptrust_map_and_reg(dev, &map, &reg, &mask);
+
+ ret = ksz_pread8(dev, port, reg, &data);
+ if (ret)
+ return ret;
+
+ *nsel = 0;
+ for (i = 0; i < ARRAY_SIZE(ksz_supported_apptrust); i++) {
+ if (data & map[i].bit)
+ sel[(*nsel)++] = ksz_supported_apptrust[i];
+ }
+
+ return 0;
+}
+
+/**
+ * ksz_dcb_init_port - Initializes the DCB configuration for a port on a KSZ
+ * @dev: Pointer to the KSZ switch device structure
+ * @port: Port number for which to initialize the DCB configuration
+ *
+ * This function initializes the DCB configuration for the specified port on a
+ * KSZ switch. Particular DCB configuration is set for the port, including the
+ * default priority and apptrust selectors.
+ * The default priority is set to Best Effort, and the apptrust selectors are
+ * set to all supported selectors.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_dcb_init_port(struct ksz_device *dev, int port)
+{
+ const u8 ksz_default_apptrust[] = { DCB_APP_SEL_PCP };
+ int ret, ipm;
+
+ if (is_ksz8(dev)) {
+ ipm = ieee8021q_tt_to_tc(IEEE8021Q_TT_BE,
+ dev->info->num_tx_queues);
+ if (ipm < 0)
+ return ipm;
+ } else {
+ ipm = IEEE8021Q_TT_BE;
+ }
+
+ /* Set the default priority for the port to Best Effort */
+ ret = ksz_port_set_default_prio(dev->ds, port, ipm);
+ if (ret)
+ return ret;
+
+ return ksz_port_set_apptrust(dev->ds, port, ksz_default_apptrust,
+ ARRAY_SIZE(ksz_default_apptrust));
+}
+
+/**
+ * ksz_dcb_init - Initializes the DCB configuration for a KSZ switch
+ * @dev: Pointer to the KSZ switch device structure
+ *
+ * This function initializes the DCB configuration for a KSZ switch. The global
+ * DSCP-to-priority mapping table is initialized.
+ *
+ * Return: 0 on success, or a negative error code on failure
+ */
+int ksz_dcb_init(struct ksz_device *dev)
+{
+ int ret;
+
+ ret = ksz_init_global_dscp_map(dev);
+ if (ret)
+ return ret;
+
+ /* Enable 802.1p priority control on Port 2 during switch initialization.
+ * This setup is critical for the apptrust functionality on Port 1, which
+ * relies on the priority settings of Port 2. Note: Port 1 is naturally
+ * configured before Port 2, necessitating this configuration order.
+ */
+ if (ksz_is_ksz88x3(dev))
+ return ksz_prmw8(dev, KSZ_PORT_2, KSZ8_REG_PORT_1_CTRL_0,
+ KSZ8_PORT_802_1P_ENABLE,
+ KSZ8_PORT_802_1P_ENABLE);
+
+ return 0;
+}
diff --git a/drivers/net/dsa/microchip/ksz_dcb.h b/drivers/net/dsa/microchip/ksz_dcb.h
new file mode 100644
index 000000000000..e2065223ba90
--- /dev/null
+++ b/drivers/net/dsa/microchip/ksz_dcb.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024 Pengutronix, Oleksij Rempel <kernel@pengutronix.de> */
+
+#ifndef __KSZ_DCB_H
+#define __KSZ_DCB_H
+
+#include <net/dsa.h>
+
+#include "ksz_common.h"
+
+int ksz_port_get_default_prio(struct dsa_switch *ds, int port);
+int ksz_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio);
+int ksz_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp);
+int ksz_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio);
+int ksz_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio);
+int ksz_port_set_apptrust(struct dsa_switch *ds, int port,
+ const unsigned char *sel,
+ int nsel);
+int ksz_port_get_apptrust(struct dsa_switch *ds, int port, u8 *sel, int *nsel);
+int ksz_dcb_init_port(struct ksz_device *dev, int port);
+int ksz_dcb_init(struct ksz_device *dev);
+
+#endif /* __KSZ_DCB_H */
diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c
index 1fe105913c75..050f17c43ef6 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.c
+++ b/drivers/net/dsa/microchip/ksz_ptp.c
@@ -266,7 +266,6 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
struct ksz_port *prt;
struct dsa_port *dp;
bool tag_en = false;
- int ret;
dsa_switch_for_each_user_port(dp, dev->ds) {
prt = &dev->ports[dp->index];
@@ -277,9 +276,7 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
}
if (tag_en) {
- ret = ptp_schedule_worker(ptp_data->clock, 0);
- if (ret)
- return ret;
+ ptp_schedule_worker(ptp_data->clock, 0);
} else {
ptp_cancel_worker_sync(ptp_data->clock);
}
@@ -293,7 +290,7 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev)
/* The function is return back the capability of timestamping feature when
* requested through ethtool -T <interface> utility
*/
-int ksz_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts)
+int ksz_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_info *ts)
{
struct ksz_device *dev = ds->priv;
struct ksz_ptp_data *ptp_data;
diff --git a/drivers/net/dsa/microchip/ksz_ptp.h b/drivers/net/dsa/microchip/ksz_ptp.h
index 0ca8ca4f804e..2f1783c0d723 100644
--- a/drivers/net/dsa/microchip/ksz_ptp.h
+++ b/drivers/net/dsa/microchip/ksz_ptp.h
@@ -38,7 +38,7 @@ int ksz_ptp_clock_register(struct dsa_switch *ds);
void ksz_ptp_clock_unregister(struct dsa_switch *ds);
int ksz_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *ts);
+ struct kernel_ethtool_ts_info *ts);
int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr);
int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr);
void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb);
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index c8166fb440ab..8e8d83213b04 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -222,7 +222,6 @@ MODULE_DEVICE_TABLE(spi, ksz_spi_ids);
static struct spi_driver ksz_spi_driver = {
.driver = {
.name = "ksz-switch",
- .owner = THIS_MODULE,
.of_match_table = ksz_dt_ids,
},
.id_table = ksz_spi_ids,
@@ -233,13 +232,6 @@ static struct spi_driver ksz_spi_driver = {
module_spi_driver(ksz_spi_driver);
-MODULE_ALIAS("spi:ksz9477");
-MODULE_ALIAS("spi:ksz9896");
-MODULE_ALIAS("spi:ksz9897");
-MODULE_ALIAS("spi:ksz9893");
-MODULE_ALIAS("spi:ksz9563");
-MODULE_ALIAS("spi:ksz8563");
-MODULE_ALIAS("spi:ksz9567");
MODULE_ALIAS("spi:lan937x");
MODULE_AUTHOR("Tristram Ha <Tristram.Ha@microchip.com>");
MODULE_DESCRIPTION("Microchip ksz Series Switch SPI Driver");
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index b479a628b1ae..824d9309a3d3 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -55,6 +55,9 @@ static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg)
u16 addr_base = REG_PORT_T1_PHY_CTRL_BASE;
u16 temp;
+ if (is_lan937x_tx_phy(dev, addr))
+ addr_base = REG_PORT_TX_PHY_CTRL_BASE;
+
/* get register address based on the logical port */
temp = PORT_CTRL_ADDR(addr, (addr_base + (reg << 2)));
@@ -320,6 +323,9 @@ void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
/* MII/RMII/RGMII ports */
config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_100HD | MAC_10 | MAC_1000FD;
+ } else if (is_lan937x_tx_phy(dev, port)) {
+ config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_100HD | MAC_10;
}
}
@@ -370,23 +376,33 @@ int lan937x_setup(struct dsa_switch *ds)
ds->vlan_filtering_is_global = true;
/* Enable aggressive back off for half duplex & UNH mode */
- lan937x_cfg(dev, REG_SW_MAC_CTRL_0,
- (SW_PAUSE_UNH_MODE | SW_NEW_BACKOFF | SW_AGGR_BACKOFF),
- true);
+ ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_0, (SW_PAUSE_UNH_MODE |
+ SW_NEW_BACKOFF |
+ SW_AGGR_BACKOFF), true);
+ if (ret < 0)
+ return ret;
/* If NO_EXC_COLLISION_DROP bit is set, the switch will not drop
* packets when 16 or more collisions occur
*/
- lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
+ ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true);
+ if (ret < 0)
+ return ret;
/* enable global MIB counter freeze function */
- lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+ ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true);
+ if (ret < 0)
+ return ret;
/* disable CLK125 & CLK25, 1: disable, 0: enable */
- lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
- (SW_CLK125_ENB | SW_CLK25_ENB), true);
+ ret = lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1,
+ (SW_CLK125_ENB | SW_CLK25_ENB), true);
+ if (ret < 0)
+ return ret;
- return 0;
+ /* Disable global VPHY support. Related to CPU interface only? */
+ return ksz_rmw32(dev, REG_SW_CFG_STRAP_OVR, SW_VPHY_DISABLE,
+ SW_VPHY_DISABLE);
}
void lan937x_teardown(struct dsa_switch *ds)
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
index 45b606b6429f..2f22a9d01de3 100644
--- a/drivers/net/dsa/microchip/lan937x_reg.h
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -37,6 +37,10 @@
#define SW_CLK125_ENB BIT(1)
#define SW_CLK25_ENB BIT(0)
+/* 2 - PHY Control */
+#define REG_SW_CFG_STRAP_OVR 0x0214
+#define SW_VPHY_DISABLE BIT(31)
+
/* 3 - Operation Control */
#define REG_SW_OPERATION 0x0300
@@ -147,6 +151,7 @@
/* 1 - Phy */
#define REG_PORT_T1_PHY_CTRL_BASE 0x0100
+#define REG_PORT_TX_PHY_CTRL_BASE 0x0280
/* 3 - xMII */
#define PORT_SGMII_SEL BIT(7)
diff --git a/drivers/net/dsa/mt7530-mdio.c b/drivers/net/dsa/mt7530-mdio.c
index fa3ee85a99c1..51df42ccdbe6 100644
--- a/drivers/net/dsa/mt7530-mdio.c
+++ b/drivers/net/dsa/mt7530-mdio.c
@@ -18,7 +18,8 @@
static int
mt7530_regmap_write(void *context, unsigned int reg, unsigned int val)
{
- struct mii_bus *bus = context;
+ struct mt7530_priv *priv = context;
+ struct mii_bus *bus = priv->bus;
u16 page, r, lo, hi;
int ret;
@@ -27,36 +28,35 @@ mt7530_regmap_write(void *context, unsigned int reg, unsigned int val)
lo = val & 0xffff;
hi = val >> 16;
- /* MT7530 uses 31 as the pseudo port */
- ret = bus->write(bus, 0x1f, 0x1f, page);
+ ret = bus->write(bus, priv->mdiodev->addr, 0x1f, page);
if (ret < 0)
return ret;
- ret = bus->write(bus, 0x1f, r, lo);
+ ret = bus->write(bus, priv->mdiodev->addr, r, lo);
if (ret < 0)
return ret;
- ret = bus->write(bus, 0x1f, 0x10, hi);
+ ret = bus->write(bus, priv->mdiodev->addr, 0x10, hi);
return ret;
}
static int
mt7530_regmap_read(void *context, unsigned int reg, unsigned int *val)
{
- struct mii_bus *bus = context;
+ struct mt7530_priv *priv = context;
+ struct mii_bus *bus = priv->bus;
u16 page, r, lo, hi;
int ret;
page = (reg >> 6) & 0x3ff;
r = (reg >> 2) & 0xf;
- /* MT7530 uses 31 as the pseudo port */
- ret = bus->write(bus, 0x1f, 0x1f, page);
+ ret = bus->write(bus, priv->mdiodev->addr, 0x1f, page);
if (ret < 0)
return ret;
- lo = bus->read(bus, 0x1f, r);
- hi = bus->read(bus, 0x1f, 0x10);
+ lo = bus->read(bus, priv->mdiodev->addr, r);
+ hi = bus->read(bus, priv->mdiodev->addr, 0x10);
*val = (hi << 16) | (lo & 0xffff);
@@ -107,8 +107,7 @@ mt7531_create_sgmii(struct mt7530_priv *priv)
mt7531_pcs_config[i]->unlock = mt7530_mdio_regmap_unlock;
mt7531_pcs_config[i]->lock_arg = &priv->bus->mdio_lock;
- regmap = devm_regmap_init(priv->dev,
- &mt7530_regmap_bus, priv->bus,
+ regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, priv,
mt7531_pcs_config[i]);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
@@ -153,6 +152,7 @@ mt7530_probe(struct mdio_device *mdiodev)
priv->bus = mdiodev->bus;
priv->dev = &mdiodev->dev;
+ priv->mdiodev = mdiodev;
ret = mt7530_probe_common(priv);
if (ret)
@@ -203,8 +203,8 @@ mt7530_probe(struct mdio_device *mdiodev)
regmap_config->reg_stride = 4;
regmap_config->max_register = MT7530_CREV;
regmap_config->disable_locking = true;
- priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus,
- priv->bus, regmap_config);
+ priv->regmap = devm_regmap_init(priv->dev, &mt7530_regmap_bus, priv,
+ regmap_config);
if (IS_ERR(priv->regmap))
return PTR_ERR(priv->regmap);
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 8090390edaf9..ec18e68bf3a8 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -74,108 +74,94 @@ static const struct mt7530_mib_desc mt7530_mib[] = {
MIB_DESC(1, 0xb8, "RxArlDrop"),
};
-/* Since phy_device has not yet been created and
- * phy_{read,write}_mmd_indirect is not available, we provide our own
- * core_{read,write}_mmd_indirect with core_{clear,write,set} wrappers
- * to complete this function.
- */
-static int
-core_read_mmd_indirect(struct mt7530_priv *priv, int prtad, int devad)
+static void
+mt7530_mutex_lock(struct mt7530_priv *priv)
+{
+ if (priv->bus)
+ mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
+}
+
+static void
+mt7530_mutex_unlock(struct mt7530_priv *priv)
+{
+ if (priv->bus)
+ mutex_unlock(&priv->bus->mdio_lock);
+}
+
+static void
+core_write(struct mt7530_priv *priv, u32 reg, u32 val)
{
struct mii_bus *bus = priv->bus;
- int value, ret;
+ int ret;
+
+ mt7530_mutex_lock(priv);
/* Write the desired MMD Devad */
- ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2);
if (ret < 0)
goto err;
/* Write the desired MMD register address */
- ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, reg);
if (ret < 0)
goto err;
/* Select the Function : DATA with no post increment */
- ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR);
if (ret < 0)
goto err;
- /* Read the content of the MMD's selected register */
- value = bus->read(bus, 0, MII_MMD_DATA);
-
- return value;
+ /* Write the data into MMD's selected register */
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, val);
err:
- dev_err(&bus->dev, "failed to read mmd register\n");
+ if (ret < 0)
+ dev_err(&bus->dev, "failed to write mmd register\n");
- return ret;
+ mt7530_mutex_unlock(priv);
}
-static int
-core_write_mmd_indirect(struct mt7530_priv *priv, int prtad,
- int devad, u32 data)
+static void
+core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
{
struct mii_bus *bus = priv->bus;
+ u32 val;
int ret;
+ mt7530_mutex_lock(priv);
+
/* Write the desired MMD Devad */
- ret = bus->write(bus, 0, MII_MMD_CTRL, devad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2);
if (ret < 0)
goto err;
/* Write the desired MMD register address */
- ret = bus->write(bus, 0, MII_MMD_DATA, prtad);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, reg);
if (ret < 0)
goto err;
/* Select the Function : DATA with no post increment */
- ret = bus->write(bus, 0, MII_MMD_CTRL, (devad | MII_MMD_CTRL_NOINCR));
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_CTRL, MDIO_MMD_VEND2 | MII_MMD_CTRL_NOINCR);
if (ret < 0)
goto err;
+ /* Read the content of the MMD's selected register */
+ val = bus->read(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA);
+ val &= ~mask;
+ val |= set;
/* Write the data into MMD's selected register */
- ret = bus->write(bus, 0, MII_MMD_DATA, data);
+ ret = bus->write(bus, MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MII_MMD_DATA, val);
err:
if (ret < 0)
- dev_err(&bus->dev,
- "failed to write mmd register\n");
- return ret;
-}
-
-static void
-mt7530_mutex_lock(struct mt7530_priv *priv)
-{
- if (priv->bus)
- mutex_lock_nested(&priv->bus->mdio_lock, MDIO_MUTEX_NESTED);
-}
-
-static void
-mt7530_mutex_unlock(struct mt7530_priv *priv)
-{
- if (priv->bus)
- mutex_unlock(&priv->bus->mdio_lock);
-}
-
-static void
-core_write(struct mt7530_priv *priv, u32 reg, u32 val)
-{
- mt7530_mutex_lock(priv);
-
- core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
-
- mt7530_mutex_unlock(priv);
-}
-
-static void
-core_rmw(struct mt7530_priv *priv, u32 reg, u32 mask, u32 set)
-{
- u32 val;
-
- mt7530_mutex_lock(priv);
-
- val = core_read_mmd_indirect(priv, reg, MDIO_MMD_VEND2);
- val &= ~mask;
- val |= set;
- core_write_mmd_indirect(priv, reg, MDIO_MMD_VEND2, val);
+ dev_err(&bus->dev, "failed to write mmd register\n");
mt7530_mutex_unlock(priv);
}
@@ -431,23 +417,23 @@ mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(1));
- xtal = mt7530_read(priv, MT7530_MHWTRAP) & HWTRAP_XTAL_MASK;
+ xtal = mt7530_read(priv, MT753X_MTRAP) & MT7530_XTAL_MASK;
- if (xtal == HWTRAP_XTAL_25MHZ)
+ if (xtal == MT7530_XTAL_25MHZ)
ssc_delta = 0x57;
else
ssc_delta = 0x87;
if (priv->id == ID_MT7621) {
/* PLL frequency: 125MHz: 1.0GBit */
- if (xtal == HWTRAP_XTAL_40MHZ)
+ if (xtal == MT7530_XTAL_40MHZ)
ncpo1 = 0x0640;
- if (xtal == HWTRAP_XTAL_25MHZ)
+ if (xtal == MT7530_XTAL_25MHZ)
ncpo1 = 0x0a00;
} else { /* PLL frequency: 250MHz: 2.0Gbit */
- if (xtal == HWTRAP_XTAL_40MHZ)
+ if (xtal == MT7530_XTAL_40MHZ)
ncpo1 = 0x0c80;
- if (xtal == HWTRAP_XTAL_25MHZ)
+ if (xtal == MT7530_XTAL_25MHZ)
ncpo1 = 0x1400;
}
@@ -470,19 +456,20 @@ mt7530_setup_port6(struct dsa_switch *ds, phy_interface_t interface)
static void
mt7531_pll_setup(struct mt7530_priv *priv)
{
+ enum mt7531_xtal_fsel xtal;
u32 top_sig;
u32 hwstrap;
- u32 xtal;
u32 val;
val = mt7530_read(priv, MT7531_CREV);
top_sig = mt7530_read(priv, MT7531_TOP_SIG_SR);
- hwstrap = mt7530_read(priv, MT7531_HWTRAP);
+ hwstrap = mt7530_read(priv, MT753X_TRAP);
if ((val & CHIP_REV_M) > 0)
- xtal = (top_sig & PAD_MCM_SMI_EN) ? HWTRAP_XTAL_FSEL_40MHZ :
- HWTRAP_XTAL_FSEL_25MHZ;
+ xtal = (top_sig & PAD_MCM_SMI_EN) ? MT7531_XTAL_FSEL_40MHZ :
+ MT7531_XTAL_FSEL_25MHZ;
else
- xtal = hwstrap & HWTRAP_XTAL_FSEL_MASK;
+ xtal = (hwstrap & MT7531_XTAL25) ? MT7531_XTAL_FSEL_25MHZ :
+ MT7531_XTAL_FSEL_40MHZ;
/* Step 1 : Disable MT7531 COREPLL */
val = mt7530_read(priv, MT7531_PLLGP_EN);
@@ -511,13 +498,13 @@ mt7531_pll_setup(struct mt7530_priv *priv)
usleep_range(25, 35);
switch (xtal) {
- case HWTRAP_XTAL_FSEL_25MHZ:
+ case MT7531_XTAL_FSEL_25MHZ:
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_SDM_PCW_M;
val |= 0x140000 << RG_COREPLL_SDM_PCW_S;
mt7530_write(priv, MT7531_PLLGP_CR0, val);
break;
- case HWTRAP_XTAL_FSEL_40MHZ:
+ case MT7531_XTAL_FSEL_40MHZ:
val = mt7530_read(priv, MT7531_PLLGP_CR0);
val &= ~RG_COREPLL_SDM_PCW_M;
val |= 0x190000 << RG_COREPLL_SDM_PCW_S;
@@ -871,19 +858,15 @@ mt7530_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
return 0;
}
-static const char *p5_intf_modes(unsigned int p5_interface)
+static const char *mt7530_p5_mode_str(unsigned int mode)
{
- switch (p5_interface) {
- case P5_DISABLED:
- return "DISABLED";
- case P5_INTF_SEL_PHY_P0:
- return "PHY P0";
- case P5_INTF_SEL_PHY_P4:
- return "PHY P4";
- case P5_INTF_SEL_GMAC5:
- return "GMAC5";
+ switch (mode) {
+ case MUX_PHY_P0:
+ return "MUX PHY P0";
+ case MUX_PHY_P4:
+ return "MUX PHY P4";
default:
- return "unknown";
+ return "GMAC5";
}
}
@@ -895,34 +878,31 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
mutex_lock(&priv->reg_mutex);
- val = mt7530_read(priv, MT7530_MHWTRAP);
+ val = mt7530_read(priv, MT753X_MTRAP);
- val |= MHWTRAP_MANUAL | MHWTRAP_P5_MAC_SEL | MHWTRAP_P5_DIS;
- val &= ~MHWTRAP_P5_RGMII_MODE & ~MHWTRAP_PHY0_SEL;
+ val &= ~MT7530_P5_PHY0_SEL & ~MT7530_P5_MAC_SEL & ~MT7530_P5_RGMII_MODE;
- switch (priv->p5_intf_sel) {
- case P5_INTF_SEL_PHY_P0:
- /* MT7530_P5_MODE_GPHY_P0: 2nd GMAC -> P5 -> P0 */
- val |= MHWTRAP_PHY0_SEL;
+ switch (priv->p5_mode) {
+ /* MUX_PHY_P0: P0 -> P5 -> SoC MAC */
+ case MUX_PHY_P0:
+ val |= MT7530_P5_PHY0_SEL;
fallthrough;
- case P5_INTF_SEL_PHY_P4:
- /* MT7530_P5_MODE_GPHY_P4: 2nd GMAC -> P5 -> P4 */
- val &= ~MHWTRAP_P5_MAC_SEL & ~MHWTRAP_P5_DIS;
+ /* MUX_PHY_P4: P4 -> P5 -> SoC MAC */
+ case MUX_PHY_P4:
/* Setup the MAC by default for the cpu port */
- mt7530_write(priv, MT7530_PMCR_P(5), 0x56300);
- break;
- case P5_INTF_SEL_GMAC5:
- /* MT7530_P5_MODE_GMAC: P5 -> External phy or 2nd GMAC */
- val &= ~MHWTRAP_P5_DIS;
+ mt7530_write(priv, MT753X_PMCR_P(5), 0x56300);
break;
+
+ /* GMAC5: P5 -> SoC MAC or external PHY */
default:
+ val |= MT7530_P5_MAC_SEL;
break;
}
/* Setup RGMII settings */
if (phy_interface_mode_is_rgmii(interface)) {
- val |= MHWTRAP_P5_RGMII_MODE;
+ val |= MT7530_P5_RGMII_MODE;
/* P5 RGMII RX Clock Control: delay setting for 1000M */
mt7530_write(priv, MT7530_P5RGMIIRXCR, CSR_RGMII_EDGE_ALIGN);
@@ -942,10 +922,10 @@ static void mt7530_setup_port5(struct dsa_switch *ds, phy_interface_t interface)
P5_IO_CLK_DRV(1) | P5_IO_DATA_DRV(1));
}
- mt7530_write(priv, MT7530_MHWTRAP, val);
+ mt7530_write(priv, MT753X_MTRAP, val);
- dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, intf_sel=%s, phy-mode=%s\n",
- val, p5_intf_modes(priv->p5_intf_sel), phy_modes(interface));
+ dev_dbg(ds->dev, "Setup P5, HWTRAP=0x%x, mode=%s, phy-mode=%s\n", val,
+ mt7530_p5_mode_str(priv->p5_mode), phy_modes(interface));
mutex_unlock(&priv->reg_mutex);
}
@@ -1125,42 +1105,34 @@ mt753x_trap_frames(struct mt7530_priv *priv)
* VLAN-untagged.
*/
mt7530_rmw(priv, MT753X_BPC,
- MT753X_PAE_BPDU_FR | MT753X_PAE_EG_TAG_MASK |
- MT753X_PAE_PORT_FW_MASK | MT753X_BPDU_EG_TAG_MASK |
- MT753X_BPDU_PORT_FW_MASK,
- MT753X_PAE_BPDU_FR |
- MT753X_PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_PAE_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ PAE_BPDU_FR | PAE_EG_TAG_MASK | PAE_PORT_FW_MASK |
+ BPDU_EG_TAG_MASK | BPDU_PORT_FW_MASK,
+ PAE_BPDU_FR | PAE_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ PAE_PORT_FW(TO_CPU_FW_CPU_ONLY) |
+ BPDU_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ TO_CPU_FW_CPU_ONLY);
/* Trap frames with :01 and :02 MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
mt7530_rmw(priv, MT753X_RGAC1,
- MT753X_R02_BPDU_FR | MT753X_R02_EG_TAG_MASK |
- MT753X_R02_PORT_FW_MASK | MT753X_R01_BPDU_FR |
- MT753X_R01_EG_TAG_MASK | MT753X_R01_PORT_FW_MASK,
- MT753X_R02_BPDU_FR |
- MT753X_R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R02_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R01_BPDU_FR |
- MT753X_R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ R02_BPDU_FR | R02_EG_TAG_MASK | R02_PORT_FW_MASK |
+ R01_BPDU_FR | R01_EG_TAG_MASK | R01_PORT_FW_MASK,
+ R02_BPDU_FR | R02_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ R02_PORT_FW(TO_CPU_FW_CPU_ONLY) | R01_BPDU_FR |
+ R01_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ TO_CPU_FW_CPU_ONLY);
/* Trap frames with :03 and :0E MAC DAs to the CPU port(s) and egress
* them VLAN-untagged.
*/
mt7530_rmw(priv, MT753X_RGAC2,
- MT753X_R0E_BPDU_FR | MT753X_R0E_EG_TAG_MASK |
- MT753X_R0E_PORT_FW_MASK | MT753X_R03_BPDU_FR |
- MT753X_R03_EG_TAG_MASK | MT753X_R03_PORT_FW_MASK,
- MT753X_R0E_BPDU_FR |
- MT753X_R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_R0E_PORT_FW(MT753X_BPDU_CPU_ONLY) |
- MT753X_R03_BPDU_FR |
- MT753X_R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
- MT753X_BPDU_CPU_ONLY);
+ R0E_BPDU_FR | R0E_EG_TAG_MASK | R0E_PORT_FW_MASK |
+ R03_BPDU_FR | R03_EG_TAG_MASK | R03_PORT_FW_MASK,
+ R0E_BPDU_FR | R0E_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ R0E_PORT_FW(TO_CPU_FW_CPU_ONLY) | R03_BPDU_FR |
+ R03_EG_TAG(MT7530_VLAN_EG_UNTAGGED) |
+ TO_CPU_FW_CPU_ONLY);
}
static void
@@ -1173,7 +1145,7 @@ mt753x_cpu_port_enable(struct dsa_switch *ds, int port)
PORT_SPEC_TAG);
/* Enable flooding on the CPU port */
- mt7530_set(priv, MT7530_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
+ mt7530_set(priv, MT753X_MFC, BC_FFP(BIT(port)) | UNM_FFP(BIT(port)) |
UNU_FFP(BIT(port)));
/* Add the CPU port to the CPU port bitmap for MT7531 and the switch on
@@ -1218,6 +1190,14 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
mutex_unlock(&priv->reg_mutex);
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return 0;
+
+ if (port == 5)
+ mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS);
+ else if (port == 6)
+ mt7530_clear(priv, MT753X_MTRAP, MT7530_P6_DIS);
+
return 0;
}
@@ -1236,6 +1216,15 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
PCR_MATRIX_CLR);
mutex_unlock(&priv->reg_mutex);
+
+ if (priv->id != ID_MT7530 && priv->id != ID_MT7621)
+ return;
+
+ /* Do not set MT7530_P5_DIS when port 5 is being used for PHY muxing. */
+ if (port == 5 && priv->p5_mode == GMAC5)
+ mt7530_set(priv, MT753X_MTRAP, MT7530_P5_DIS);
+ else if (port == 6)
+ mt7530_set(priv, MT753X_MTRAP, MT7530_P6_DIS);
}
static int
@@ -1313,13 +1302,62 @@ mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state)
FID_PST(FID_BRIDGED, stp_state));
}
+static void mt7530_update_port_member(struct mt7530_priv *priv, int port,
+ const struct net_device *bridge_dev,
+ bool join) __must_hold(&priv->reg_mutex)
+{
+ struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp;
+ struct mt7530_port *p = &priv->ports[port], *other_p;
+ struct dsa_port *cpu_dp = dp->cpu_dp;
+ u32 port_bitmap = BIT(cpu_dp->index);
+ int other_port;
+ bool isolated;
+
+ dsa_switch_for_each_user_port(other_dp, priv->ds) {
+ other_port = other_dp->index;
+ other_p = &priv->ports[other_port];
+
+ if (dp == other_dp)
+ continue;
+
+ /* Add/remove this port to/from the port matrix of the other
+ * ports in the same bridge. If the port is disabled, port
+ * matrix is kept and not being setup until the port becomes
+ * enabled.
+ */
+ if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev))
+ continue;
+
+ isolated = p->isolated && other_p->isolated;
+
+ if (join && !isolated) {
+ other_p->pm |= PCR_MATRIX(BIT(port));
+ port_bitmap |= BIT(other_port);
+ } else {
+ other_p->pm &= ~PCR_MATRIX(BIT(port));
+ }
+
+ if (other_p->enable)
+ mt7530_rmw(priv, MT7530_PCR_P(other_port),
+ PCR_MATRIX_MASK, other_p->pm);
+ }
+
+ /* Add/remove the all other ports to this port matrix. For !join
+ * (leaving the bridge), only the CPU port will remain in the port matrix
+ * of this port.
+ */
+ p->pm = PCR_MATRIX(port_bitmap);
+ if (priv->ports[port].enable)
+ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, p->pm);
+}
+
static int
mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
- BR_BCAST_FLOOD))
+ BR_BCAST_FLOOD | BR_ISOLATED))
return -EINVAL;
return 0;
@@ -1337,17 +1375,28 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port,
flags.val & BR_LEARNING ? 0 : SA_DIS);
if (flags.mask & BR_FLOOD)
- mt7530_rmw(priv, MT7530_MFC, UNU_FFP(BIT(port)),
+ mt7530_rmw(priv, MT753X_MFC, UNU_FFP(BIT(port)),
flags.val & BR_FLOOD ? UNU_FFP(BIT(port)) : 0);
if (flags.mask & BR_MCAST_FLOOD)
- mt7530_rmw(priv, MT7530_MFC, UNM_FFP(BIT(port)),
+ mt7530_rmw(priv, MT753X_MFC, UNM_FFP(BIT(port)),
flags.val & BR_MCAST_FLOOD ? UNM_FFP(BIT(port)) : 0);
if (flags.mask & BR_BCAST_FLOOD)
- mt7530_rmw(priv, MT7530_MFC, BC_FFP(BIT(port)),
+ mt7530_rmw(priv, MT753X_MFC, BC_FFP(BIT(port)),
flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0);
+ if (flags.mask & BR_ISOLATED) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
+
+ priv->ports[port].isolated = !!(flags.val & BR_ISOLATED);
+
+ mutex_lock(&priv->reg_mutex);
+ mt7530_update_port_member(priv, port, bridge_dev, true);
+ mutex_unlock(&priv->reg_mutex);
+ }
+
return 0;
}
@@ -1356,39 +1405,11 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port,
struct dsa_bridge bridge, bool *tx_fwd_offload,
struct netlink_ext_ack *extack)
{
- struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
- struct dsa_port *cpu_dp = dp->cpu_dp;
- u32 port_bitmap = BIT(cpu_dp->index);
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
- dsa_switch_for_each_user_port(other_dp, ds) {
- int other_port = other_dp->index;
-
- if (dp == other_dp)
- continue;
-
- /* Add this port to the port matrix of the other ports in the
- * same bridge. If the port is disabled, port matrix is kept
- * and not being setup until the port becomes enabled.
- */
- if (!dsa_port_offloads_bridge(other_dp, &bridge))
- continue;
-
- if (priv->ports[other_port].enable)
- mt7530_set(priv, MT7530_PCR_P(other_port),
- PCR_MATRIX(BIT(port)));
- priv->ports[other_port].pm |= PCR_MATRIX(BIT(port));
-
- port_bitmap |= BIT(other_port);
- }
-
- /* Add the all other ports to this port matrix. */
- if (priv->ports[port].enable)
- mt7530_rmw(priv, MT7530_PCR_P(port),
- PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap));
- priv->ports[port].pm |= PCR_MATRIX(port_bitmap);
+ mt7530_update_port_member(priv, port, bridge.dev, true);
/* Set to fallback mode for independent VLAN learning */
mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
@@ -1423,7 +1444,7 @@ mt7530_port_set_vlan_unaware(struct dsa_switch *ds, int port)
mt7530_rmw(priv, MT7530_PPBV1_P(port), G0_PORT_VID_MASK,
G0_PORT_VID_DEF);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ for (i = 0; i < priv->ds->num_ports; i++) {
if (dsa_is_user_port(ds, i) &&
dsa_port_is_vlan_filtering(dsa_to_port(ds, i))) {
all_user_ports_removed = false;
@@ -1489,38 +1510,11 @@ static void
mt7530_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
- struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
- struct dsa_port *cpu_dp = dp->cpu_dp;
struct mt7530_priv *priv = ds->priv;
mutex_lock(&priv->reg_mutex);
- dsa_switch_for_each_user_port(other_dp, ds) {
- int other_port = other_dp->index;
-
- if (dp == other_dp)
- continue;
-
- /* Remove this port from the port matrix of the other ports
- * in the same bridge. If the port is disabled, port matrix
- * is kept and not being setup until the port becomes enabled.
- */
- if (!dsa_port_offloads_bridge(other_dp, &bridge))
- continue;
-
- if (priv->ports[other_port].enable)
- mt7530_clear(priv, MT7530_PCR_P(other_port),
- PCR_MATRIX(BIT(port)));
- priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port));
- }
-
- /* Set the cpu port to be the only one in the port matrix of
- * this port.
- */
- if (priv->ports[port].enable)
- mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK,
- PCR_MATRIX(BIT(cpu_dp->index)));
- priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index));
+ mt7530_update_port_member(priv, port, bridge.dev, false);
/* When a port is removed from the bridge, the port would be set up
* back to the default as is at initial boot which is a VLAN-unaware
@@ -1881,20 +1875,6 @@ mt7530_port_vlan_del(struct dsa_switch *ds, int port,
return 0;
}
-static int mt753x_mirror_port_get(unsigned int id, u32 val)
-{
- return (id == ID_MT7531 || id == ID_MT7988) ?
- MT7531_MIRROR_PORT_GET(val) :
- MIRROR_PORT(val);
-}
-
-static int mt753x_mirror_port_set(unsigned int id, u32 val)
-{
- return (id == ID_MT7531 || id == ID_MT7988) ?
- MT7531_MIRROR_PORT_SET(val) :
- MIRROR_PORT(val);
-}
-
static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror,
bool ingress, struct netlink_ext_ack *extack)
@@ -1910,14 +1890,14 @@ static int mt753x_port_mirror_add(struct dsa_switch *ds, int port,
val = mt7530_read(priv, MT753X_MIRROR_REG(priv->id));
/* MT7530 only supports one monitor port */
- monitor_port = mt753x_mirror_port_get(priv->id, val);
+ monitor_port = MT753X_MIRROR_PORT_GET(priv->id, val);
if (val & MT753X_MIRROR_EN(priv->id) &&
monitor_port != mirror->to_local_port)
return -EEXIST;
val |= MT753X_MIRROR_EN(priv->id);
- val &= ~MT753X_MIRROR_MASK(priv->id);
- val |= mt753x_mirror_port_set(priv->id, mirror->to_local_port);
+ val &= ~MT753X_MIRROR_PORT_MASK(priv->id);
+ val |= MT753X_MIRROR_PORT_SET(priv->id, mirror->to_local_port);
mt7530_write(priv, MT753X_MIRROR_REG(priv->id), val);
val = mt7530_read(priv, MT7530_PCR_P(port));
@@ -2405,7 +2385,7 @@ mt7530_setup(struct dsa_switch *ds)
}
/* Waiting for MT7530 got to stable */
- INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP);
ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
20, 1000000);
if (ret < 0) {
@@ -2420,7 +2400,7 @@ mt7530_setup(struct dsa_switch *ds)
return -ENODEV;
}
- if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_20MHZ) {
+ if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_20MHZ) {
dev_err(priv->dev,
"MT7530 with a 20MHz XTAL is not supported!\n");
return -EINVAL;
@@ -2440,13 +2420,13 @@ mt7530_setup(struct dsa_switch *ds)
mt7530_rmw(priv, MT7530_TRGMII_RD(i),
RD_TAP_MASK, RD_TAP(16));
- /* Enable port 6 */
- val = mt7530_read(priv, MT7530_MHWTRAP);
- val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS;
- val |= MHWTRAP_MANUAL;
- mt7530_write(priv, MT7530_MHWTRAP, val);
+ /* Allow modifying the trap and directly access PHY registers via the
+ * MDIO bus the switch is on.
+ */
+ mt7530_rmw(priv, MT753X_MTRAP, MT7530_CHG_TRAP |
+ MT7530_PHY_INDIRECT_ACCESS, MT7530_CHG_TRAP);
- if ((val & HWTRAP_XTAL_MASK) == HWTRAP_XTAL_40MHZ)
+ if ((val & MT7530_XTAL_MASK) == MT7530_XTAL_40MHZ)
mt7530_pll_setup(priv);
mt753x_trap_frames(priv);
@@ -2454,12 +2434,12 @@ mt7530_setup(struct dsa_switch *ds)
/* Enable and reset MIB counters */
mt7530_mib_reset(ds);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ for (i = 0; i < priv->ds->num_ports; i++) {
/* Clear link settings and enable force mode to force link down
* on all ports until they're enabled later.
*/
- mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
- PMCR_FORCE_MODE, PMCR_FORCE_MODE);
+ mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
+ MT7530_FORCE_MODE, MT7530_FORCE_MODE);
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -2490,13 +2470,11 @@ mt7530_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Setup port 5 */
- if (!dsa_is_unused_port(ds, 5)) {
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
- } else {
+ /* Check for PHY muxing on port 5 */
+ if (dsa_is_unused_port(ds, 5)) {
/* Scan the ethernet nodes. Look for GMAC1, lookup the used PHY.
- * Set priv->p5_intf_sel to the appropriate value if PHY muxing
- * is detected.
+ * Set priv->p5_mode to the appropriate value if PHY muxing is
+ * detected.
*/
for_each_child_of_node(dn, mac_np) {
if (!of_device_is_compatible(mac_np,
@@ -2511,7 +2489,8 @@ mt7530_setup(struct dsa_switch *ds)
if (!phy_node)
continue;
- if (phy_node->parent == priv->dev->of_node->parent) {
+ if (phy_node->parent == priv->dev->of_node->parent ||
+ phy_node->parent->parent == priv->dev->of_node) {
ret = of_get_phy_mode(mac_np, &interface);
if (ret && ret != -ENODEV) {
of_node_put(mac_np);
@@ -2520,18 +2499,20 @@ mt7530_setup(struct dsa_switch *ds)
}
id = of_mdio_parse_addr(ds->dev, phy_node);
if (id == 0)
- priv->p5_intf_sel = P5_INTF_SEL_PHY_P0;
+ priv->p5_mode = MUX_PHY_P0;
if (id == 4)
- priv->p5_intf_sel = P5_INTF_SEL_PHY_P4;
+ priv->p5_mode = MUX_PHY_P4;
}
of_node_put(mac_np);
of_node_put(phy_node);
break;
}
- if (priv->p5_intf_sel == P5_INTF_SEL_PHY_P0 ||
- priv->p5_intf_sel == P5_INTF_SEL_PHY_P4)
+ if (priv->p5_mode == MUX_PHY_P0 ||
+ priv->p5_mode == MUX_PHY_P4) {
+ mt7530_clear(priv, MT753X_MTRAP, MT7530_P5_DIS);
mt7530_setup_port5(ds, interface);
+ }
}
#ifdef CONFIG_GPIOLIB
@@ -2562,15 +2543,15 @@ mt7531_setup_common(struct dsa_switch *ds)
mt7530_mib_reset(ds);
/* Disable flooding on all ports */
- mt7530_clear(priv, MT7530_MFC, BC_FFP_MASK | UNM_FFP_MASK |
+ mt7530_clear(priv, MT753X_MFC, BC_FFP_MASK | UNM_FFP_MASK |
UNU_FFP_MASK);
- for (i = 0; i < MT7530_NUM_PORTS; i++) {
+ for (i = 0; i < priv->ds->num_ports; i++) {
/* Clear link settings and enable force mode to force link down
* on all ports until they're enabled later.
*/
- mt7530_rmw(priv, MT7530_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
- MT7531_FORCE_MODE, MT7531_FORCE_MODE);
+ mt7530_rmw(priv, MT753X_PMCR_P(i), PMCR_LINK_SETTINGS_MASK |
+ MT7531_FORCE_MODE_MASK, MT7531_FORCE_MODE_MASK);
/* Disable forwarding by default on all ports */
mt7530_rmw(priv, MT7530_PCR_P(i), PCR_MATRIX_MASK,
@@ -2629,7 +2610,7 @@ mt7531_setup(struct dsa_switch *ds)
}
/* Waiting for MT7530 got to stable */
- INIT_MT7530_DUMMY_POLL(&p, priv, MT7530_HWTRAP);
+ INIT_MT7530_DUMMY_POLL(&p, priv, MT753X_TRAP);
ret = readx_poll_timeout(_mt7530_read, &p, val, val != 0,
20, 1000000);
if (ret < 0) {
@@ -2652,8 +2633,8 @@ mt7531_setup(struct dsa_switch *ds)
priv->p5_sgmii = !!(val & PAD_DUAL_SGMII_EN);
/* Force link down on all ports before internal reset */
- for (i = 0; i < MT7530_NUM_PORTS; i++)
- mt7530_write(priv, MT7530_PMCR_P(i), MT7531_FORCE_LNK);
+ for (i = 0; i < priv->ds->num_ports; i++)
+ mt7530_write(priv, MT753X_PMCR_P(i), MT7531_FORCE_MODE_LNK);
/* Reset the switch through internal reset */
mt7530_write(priv, MT7530_SYS_CTRL, SYS_CTRL_SW_RST | SYS_CTRL_REG_RST);
@@ -2661,16 +2642,16 @@ mt7531_setup(struct dsa_switch *ds)
if (!priv->p5_sgmii) {
mt7531_pll_setup(priv);
} else {
- /* Let ds->user_mii_bus be able to access external phy. */
+ /* Unlike MT7531BE, the GPIO 6-12 pins are not used for RGMII on
+ * MT7531AE. Set the GPIO 11-12 pins to function as MDC and MDIO
+ * to expose the MDIO bus of the switch.
+ */
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO11_RG_RXD2_MASK,
MT7531_EXT_P_MDC_11);
mt7530_rmw(priv, MT7531_GPIO_MODE1, MT7531_GPIO12_RG_RXD3_MASK,
MT7531_EXT_P_MDIO_12);
}
- if (!dsa_is_unused_port(ds, 5))
- priv->p5_intf_sel = P5_INTF_SEL_GMAC5;
-
mt7530_rmw(priv, MT7531_GPIO_MODE0, MT7531_GPIO0_MASK,
MT7531_GPIO0_INTERRUPT);
@@ -2679,21 +2660,26 @@ mt7531_setup(struct dsa_switch *ds)
* phy_[read,write]_mmd_indirect is called, we provide our own
* mt7531_ind_mmd_phy_[read,write] to complete this function.
*/
- val = mt7531_ind_c45_phy_read(priv, MT753X_CTRL_PHY_ADDR,
+ val = mt7531_ind_c45_phy_read(priv,
+ MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
MDIO_MMD_VEND2, CORE_PLL_GROUP4);
val |= MT7531_RG_SYSPLL_DMY2 | MT7531_PHY_PLL_BYPASS_MODE;
val &= ~MT7531_PHY_PLL_OFF;
- mt7531_ind_c45_phy_write(priv, MT753X_CTRL_PHY_ADDR, MDIO_MMD_VEND2,
- CORE_PLL_GROUP4, val);
+ mt7531_ind_c45_phy_write(priv,
+ MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr),
+ MDIO_MMD_VEND2, CORE_PLL_GROUP4, val);
/* Disable EEE advertisement on the switch PHYs. */
- for (i = MT753X_CTRL_PHY_ADDR;
- i < MT753X_CTRL_PHY_ADDR + MT7530_NUM_PHYS; i++) {
+ for (i = MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr);
+ i < MT753X_CTRL_PHY_ADDR(priv->mdiodev->addr) + MT7530_NUM_PHYS;
+ i++) {
mt7531_ind_c45_phy_write(priv, i, MDIO_MMD_AN, MDIO_AN_EEE_ADV,
0);
}
- mt7531_setup_common(ds);
+ ret = mt7531_setup_common(ds);
+ if (ret)
+ return ret;
/* Setup VLAN ID 0 for VLAN-unaware bridges */
ret = mt7530_setup_vlan0(priv);
@@ -2709,6 +2695,8 @@ mt7531_setup(struct dsa_switch *ds)
static void mt7530_mac_port_get_caps(struct dsa_switch *ds, int port,
struct phylink_config *config)
{
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+
switch (port) {
/* Ports which are connected to switch PHYs. There is no MII pinout. */
case 0 ... 4:
@@ -2740,6 +2728,8 @@ static void mt7531_mac_port_get_caps(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+
switch (port) {
/* Ports which are connected to switch PHYs. There is no MII pinout. */
case 0 ... 4:
@@ -2779,14 +2769,17 @@ static void mt7988_mac_port_get_caps(struct dsa_switch *ds, int port,
case 0 ... 3:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
+
+ config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
break;
/* Port 6 is connected to SoC's XGMII MAC. There is no MII pinout. */
case 6:
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
config->supported_interfaces);
- config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10000FD;
+
+ config->mac_capabilities |= MAC_10000FD;
+ break;
}
}
@@ -2802,7 +2795,7 @@ mt7530_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
mt7530_setup_port6(priv->ds, interface);
}
-static void mt7531_rgmii_setup(struct mt7530_priv *priv, u32 port,
+static void mt7531_rgmii_setup(struct mt7530_priv *priv,
phy_interface_t interface,
struct phy_device *phydev)
{
@@ -2853,62 +2846,70 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
if (phy_interface_mode_is_rgmii(interface)) {
dp = dsa_to_port(ds, port);
phydev = dp->user->phydev;
- mt7531_rgmii_setup(priv, port, interface, phydev);
+ mt7531_rgmii_setup(priv, interface, phydev);
}
}
static struct phylink_pcs *
-mt753x_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+mt753x_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
switch (interface) {
case PHY_INTERFACE_MODE_TRGMII:
- return &priv->pcs[port].pcs;
+ return &priv->pcs[dp->index].pcs;
case PHY_INTERFACE_MODE_SGMII:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- return priv->ports[port].sgmii_pcs;
+ return priv->ports[dp->index].sgmii_pcs;
default:
return NULL;
}
}
static void
-mt753x_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+mt753x_phylink_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct dsa_switch *ds = dp->ds;
+ struct mt7530_priv *priv;
+ int port = dp->index;
+
+ priv = ds->priv;
if ((port == 5 || port == 6) && priv->info->mac_port_config)
priv->info->mac_port_config(ds, port, mode, state->interface);
/* Are we connected to external phy */
if (port == 5 && dsa_is_user_port(ds, 5))
- mt7530_set(priv, MT7530_PMCR_P(port), PMCR_EXT_PHY);
+ mt7530_set(priv, MT753X_PMCR_P(port), PMCR_EXT_PHY);
}
-static void mt753x_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void mt753x_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
- mt7530_clear(priv, MT7530_PMCR_P(port), PMCR_LINK_SETTINGS_MASK);
+ mt7530_clear(priv, MT753X_PMCR_P(dp->index), PMCR_LINK_SETTINGS_MASK);
}
-static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void mt753x_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct mt7530_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mt7530_priv *priv = dp->ds->priv;
u32 mcr;
- mcr = PMCR_RX_EN | PMCR_TX_EN | PMCR_FORCE_LNK;
+ mcr = PMCR_MAC_RX_EN | PMCR_MAC_TX_EN | PMCR_FORCE_LNK;
switch (speed) {
case SPEED_1000:
@@ -2923,9 +2924,9 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
if (duplex == DUPLEX_FULL) {
mcr |= PMCR_FORCE_FDX;
if (tx_pause)
- mcr |= PMCR_TX_FC_EN;
+ mcr |= PMCR_FORCE_TX_FC_EN;
if (rx_pause)
- mcr |= PMCR_RX_FC_EN;
+ mcr |= PMCR_FORCE_RX_FC_EN;
}
if (mode == MLO_AN_PHY && phydev && phy_init_eee(phydev, false) >= 0) {
@@ -2940,7 +2941,7 @@ static void mt753x_phylink_mac_link_up(struct dsa_switch *ds, int port,
}
}
- mt7530_set(priv, MT7530_PMCR_P(port), mcr);
+ mt7530_set(priv, MT753X_PMCR_P(dp->index), mcr);
}
static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
@@ -2948,9 +2949,7 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
- /* This switch only supports full-duplex at 1Gbps */
- config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
- MAC_10 | MAC_100 | MAC_1000FD;
+ config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
priv->info->mac_port_get_caps(ds, port, config);
}
@@ -3038,6 +3037,8 @@ mt753x_setup(struct dsa_switch *ds)
ret = mt7530_setup_mdio(priv);
if (ret && priv->irq)
mt7530_free_irq_common(priv);
+ if (ret)
+ return ret;
/* Initialise the PCS devices */
for (i = 0; i < priv->ds->num_ports; i++) {
@@ -3060,10 +3061,10 @@ static int mt753x_get_mac_eee(struct dsa_switch *ds, int port,
struct ethtool_keee *e)
{
struct mt7530_priv *priv = ds->priv;
- u32 eeecr = mt7530_read(priv, MT7530_PMEEECR_P(port));
+ u32 eeecr = mt7530_read(priv, MT753X_PMEEECR_P(port));
e->tx_lpi_enabled = !(eeecr & LPI_MODE_EN);
- e->tx_lpi_timer = GET_LPI_THRESH(eeecr);
+ e->tx_lpi_timer = LPI_THRESH_GET(eeecr);
return 0;
}
@@ -3077,11 +3078,11 @@ static int mt753x_set_mac_eee(struct dsa_switch *ds, int port,
if (e->tx_lpi_timer > 0xFFF)
return -EINVAL;
- set = SET_LPI_THRESH(e->tx_lpi_timer);
+ set = LPI_THRESH_SET(e->tx_lpi_timer);
if (!e->tx_lpi_enabled)
/* Force LPI Mode without a delay */
set |= LPI_MODE_EN;
- mt7530_rmw(priv, MT7530_PMEEECR_P(port), mask, set);
+ mt7530_rmw(priv, MT753X_PMEEECR_P(port), mask, set);
return 0;
}
@@ -3110,10 +3111,12 @@ mt753x_conduit_state_change(struct dsa_switch *ds,
else
priv->active_cpu_ports &= ~mask;
- if (priv->active_cpu_ports)
- val = CPU_EN | CPU_PORT(__ffs(priv->active_cpu_ports));
+ if (priv->active_cpu_ports) {
+ val = MT7530_CPU_EN |
+ MT7530_CPU_PORT(__ffs(priv->active_cpu_ports));
+ }
- mt7530_rmw(priv, MT7530_MFC, CPU_EN | CPU_PORT_MASK, val);
+ mt7530_rmw(priv, MT753X_MFC, MT7530_CPU_EN | MT7530_CPU_PORT_MASK, val);
}
static int mt7988_setup(struct dsa_switch *ds)
@@ -3160,16 +3163,19 @@ const struct dsa_switch_ops mt7530_switch_ops = {
.port_mirror_add = mt753x_port_mirror_add,
.port_mirror_del = mt753x_port_mirror_del,
.phylink_get_caps = mt753x_phylink_get_caps,
- .phylink_mac_select_pcs = mt753x_phylink_mac_select_pcs,
- .phylink_mac_config = mt753x_phylink_mac_config,
- .phylink_mac_link_down = mt753x_phylink_mac_link_down,
- .phylink_mac_link_up = mt753x_phylink_mac_link_up,
.get_mac_eee = mt753x_get_mac_eee,
.set_mac_eee = mt753x_set_mac_eee,
.conduit_state_change = mt753x_conduit_state_change,
};
EXPORT_SYMBOL_GPL(mt7530_switch_ops);
+static const struct phylink_mac_ops mt753x_phylink_mac_ops = {
+ .mac_select_pcs = mt753x_phylink_mac_select_pcs,
+ .mac_config = mt753x_phylink_mac_config,
+ .mac_link_down = mt753x_phylink_mac_link_down,
+ .mac_link_up = mt753x_phylink_mac_link_up,
+};
+
const struct mt753x_info mt753x_table[] = {
[ID_MT7621] = {
.id = ID_MT7621,
@@ -3236,17 +3242,11 @@ mt7530_probe_common(struct mt7530_priv *priv)
if (!priv->info)
return -EINVAL;
- /* Sanity check if these required device operations are filled
- * properly.
- */
- if (!priv->info->sw_setup || !priv->info->phy_read_c22 ||
- !priv->info->phy_write_c22 || !priv->info->mac_port_get_caps)
- return -EINVAL;
-
priv->id = priv->info->id;
priv->dev = dev;
priv->ds->priv = priv;
priv->ds->ops = &mt7530_switch_ops;
+ priv->ds->phylink_mac_ops = &mt753x_phylink_mac_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(dev, priv);
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index a08053390b28..28592123070b 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -36,78 +36,97 @@ enum mt753x_id {
#define MT753X_AGC 0xc
#define LOCAL_EN BIT(7)
-/* Registers to mac forward control for unknown frames */
-#define MT7530_MFC 0x10
-#define BC_FFP(x) (((x) & 0xff) << 24)
-#define BC_FFP_MASK BC_FFP(~0)
-#define UNM_FFP(x) (((x) & 0xff) << 16)
-#define UNM_FFP_MASK UNM_FFP(~0)
-#define UNU_FFP(x) (((x) & 0xff) << 8)
-#define UNU_FFP_MASK UNU_FFP(~0)
-#define CPU_EN BIT(7)
-#define CPU_PORT_MASK GENMASK(6, 4)
-#define CPU_PORT(x) FIELD_PREP(CPU_PORT_MASK, x)
-#define MIRROR_EN BIT(3)
-#define MIRROR_PORT(x) ((x) & 0x7)
-#define MIRROR_MASK 0x7
-
-/* Registers for CPU forward control */
+/* Register for MAC forward control */
+#define MT753X_MFC 0x10
+#define BC_FFP_MASK GENMASK(31, 24)
+#define BC_FFP(x) FIELD_PREP(BC_FFP_MASK, x)
+#define UNM_FFP_MASK GENMASK(23, 16)
+#define UNM_FFP(x) FIELD_PREP(UNM_FFP_MASK, x)
+#define UNU_FFP_MASK GENMASK(15, 8)
+#define UNU_FFP(x) FIELD_PREP(UNU_FFP_MASK, x)
+#define MT7530_CPU_EN BIT(7)
+#define MT7530_CPU_PORT_MASK GENMASK(6, 4)
+#define MT7530_CPU_PORT(x) FIELD_PREP(MT7530_CPU_PORT_MASK, x)
+#define MT7530_MIRROR_EN BIT(3)
+#define MT7530_MIRROR_PORT_MASK GENMASK(2, 0)
+#define MT7530_MIRROR_PORT_GET(x) FIELD_GET(MT7530_MIRROR_PORT_MASK, x)
+#define MT7530_MIRROR_PORT_SET(x) FIELD_PREP(MT7530_MIRROR_PORT_MASK, x)
+#define MT7531_QRY_FFP_MASK GENMASK(7, 0)
+#define MT7531_QRY_FFP(x) FIELD_PREP(MT7531_QRY_FFP_MASK, x)
+
+/* Register for CPU forward control */
#define MT7531_CFC 0x4
#define MT7531_MIRROR_EN BIT(19)
-#define MT7531_MIRROR_MASK (MIRROR_MASK << 16)
-#define MT7531_MIRROR_PORT_GET(x) (((x) >> 16) & MIRROR_MASK)
-#define MT7531_MIRROR_PORT_SET(x) (((x) & MIRROR_MASK) << 16)
+#define MT7531_MIRROR_PORT_MASK GENMASK(18, 16)
+#define MT7531_MIRROR_PORT_GET(x) FIELD_GET(MT7531_MIRROR_PORT_MASK, x)
+#define MT7531_MIRROR_PORT_SET(x) FIELD_PREP(MT7531_MIRROR_PORT_MASK, x)
#define MT7531_CPU_PMAP_MASK GENMASK(7, 0)
#define MT7531_CPU_PMAP(x) FIELD_PREP(MT7531_CPU_PMAP_MASK, x)
-#define MT753X_MIRROR_REG(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_CFC : MT7530_MFC)
-#define MT753X_MIRROR_EN(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_MIRROR_EN : MIRROR_EN)
-#define MT753X_MIRROR_MASK(id) ((((id) == ID_MT7531) || ((id) == ID_MT7988)) ? \
- MT7531_MIRROR_MASK : MIRROR_MASK)
+#define MT753X_MIRROR_REG(id) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_CFC : MT753X_MFC)
-/* Registers for BPDU and PAE frame control*/
+#define MT753X_MIRROR_EN(id) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_MIRROR_EN : MT7530_MIRROR_EN)
+
+#define MT753X_MIRROR_PORT_MASK(id) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_MIRROR_PORT_MASK : \
+ MT7530_MIRROR_PORT_MASK)
+
+#define MT753X_MIRROR_PORT_GET(id, val) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_MIRROR_PORT_GET(val) : \
+ MT7530_MIRROR_PORT_GET(val))
+
+#define MT753X_MIRROR_PORT_SET(id, val) ((id == ID_MT7531 || \
+ id == ID_MT7988) ? \
+ MT7531_MIRROR_PORT_SET(val) : \
+ MT7530_MIRROR_PORT_SET(val))
+
+/* Register for BPDU and PAE frame control */
#define MT753X_BPC 0x24
-#define MT753X_PAE_BPDU_FR BIT(25)
-#define MT753X_PAE_EG_TAG_MASK GENMASK(24, 22)
-#define MT753X_PAE_EG_TAG(x) FIELD_PREP(MT753X_PAE_EG_TAG_MASK, x)
-#define MT753X_PAE_PORT_FW_MASK GENMASK(18, 16)
-#define MT753X_PAE_PORT_FW(x) FIELD_PREP(MT753X_PAE_PORT_FW_MASK, x)
-#define MT753X_BPDU_EG_TAG_MASK GENMASK(8, 6)
-#define MT753X_BPDU_EG_TAG(x) FIELD_PREP(MT753X_BPDU_EG_TAG_MASK, x)
-#define MT753X_BPDU_PORT_FW_MASK GENMASK(2, 0)
-
-/* Register for :01 and :02 MAC DA frame control */
+#define PAE_BPDU_FR BIT(25)
+#define PAE_EG_TAG_MASK GENMASK(24, 22)
+#define PAE_EG_TAG(x) FIELD_PREP(PAE_EG_TAG_MASK, x)
+#define PAE_PORT_FW_MASK GENMASK(18, 16)
+#define PAE_PORT_FW(x) FIELD_PREP(PAE_PORT_FW_MASK, x)
+#define BPDU_EG_TAG_MASK GENMASK(8, 6)
+#define BPDU_EG_TAG(x) FIELD_PREP(BPDU_EG_TAG_MASK, x)
+#define BPDU_PORT_FW_MASK GENMASK(2, 0)
+
+/* Register for 01-80-C2-00-00-[01,02] MAC DA frame control */
#define MT753X_RGAC1 0x28
-#define MT753X_R02_BPDU_FR BIT(25)
-#define MT753X_R02_EG_TAG_MASK GENMASK(24, 22)
-#define MT753X_R02_EG_TAG(x) FIELD_PREP(MT753X_R02_EG_TAG_MASK, x)
-#define MT753X_R02_PORT_FW_MASK GENMASK(18, 16)
-#define MT753X_R02_PORT_FW(x) FIELD_PREP(MT753X_R02_PORT_FW_MASK, x)
-#define MT753X_R01_BPDU_FR BIT(9)
-#define MT753X_R01_EG_TAG_MASK GENMASK(8, 6)
-#define MT753X_R01_EG_TAG(x) FIELD_PREP(MT753X_R01_EG_TAG_MASK, x)
-#define MT753X_R01_PORT_FW_MASK GENMASK(2, 0)
-
-/* Register for :03 and :0E MAC DA frame control */
+#define R02_BPDU_FR BIT(25)
+#define R02_EG_TAG_MASK GENMASK(24, 22)
+#define R02_EG_TAG(x) FIELD_PREP(R02_EG_TAG_MASK, x)
+#define R02_PORT_FW_MASK GENMASK(18, 16)
+#define R02_PORT_FW(x) FIELD_PREP(R02_PORT_FW_MASK, x)
+#define R01_BPDU_FR BIT(9)
+#define R01_EG_TAG_MASK GENMASK(8, 6)
+#define R01_EG_TAG(x) FIELD_PREP(R01_EG_TAG_MASK, x)
+#define R01_PORT_FW_MASK GENMASK(2, 0)
+
+/* Register for 01-80-C2-00-00-[03,0E] MAC DA frame control */
#define MT753X_RGAC2 0x2c
-#define MT753X_R0E_BPDU_FR BIT(25)
-#define MT753X_R0E_EG_TAG_MASK GENMASK(24, 22)
-#define MT753X_R0E_EG_TAG(x) FIELD_PREP(MT753X_R0E_EG_TAG_MASK, x)
-#define MT753X_R0E_PORT_FW_MASK GENMASK(18, 16)
-#define MT753X_R0E_PORT_FW(x) FIELD_PREP(MT753X_R0E_PORT_FW_MASK, x)
-#define MT753X_R03_BPDU_FR BIT(9)
-#define MT753X_R03_EG_TAG_MASK GENMASK(8, 6)
-#define MT753X_R03_EG_TAG(x) FIELD_PREP(MT753X_R03_EG_TAG_MASK, x)
-#define MT753X_R03_PORT_FW_MASK GENMASK(2, 0)
-
-enum mt753x_bpdu_port_fw {
- MT753X_BPDU_FOLLOW_MFC,
- MT753X_BPDU_CPU_EXCLUDE = 4,
- MT753X_BPDU_CPU_INCLUDE = 5,
- MT753X_BPDU_CPU_ONLY = 6,
- MT753X_BPDU_DROP = 7,
+#define R0E_BPDU_FR BIT(25)
+#define R0E_EG_TAG_MASK GENMASK(24, 22)
+#define R0E_EG_TAG(x) FIELD_PREP(R0E_EG_TAG_MASK, x)
+#define R0E_PORT_FW_MASK GENMASK(18, 16)
+#define R0E_PORT_FW(x) FIELD_PREP(R0E_PORT_FW_MASK, x)
+#define R03_BPDU_FR BIT(9)
+#define R03_EG_TAG_MASK GENMASK(8, 6)
+#define R03_EG_TAG(x) FIELD_PREP(R03_EG_TAG_MASK, x)
+#define R03_PORT_FW_MASK GENMASK(2, 0)
+
+enum mt753x_to_cpu_fw {
+ TO_CPU_FW_SYSTEM_DEFAULT,
+ TO_CPU_FW_CPU_EXCLUDE = 4,
+ TO_CPU_FW_CPU_INCLUDE = 5,
+ TO_CPU_FW_CPU_ONLY = 6,
+ TO_CPU_FW_DROP = 7,
};
/* Registers for address table access */
@@ -304,48 +323,55 @@ enum mt7530_vlan_port_acc_frm {
#define G0_PORT_VID_DEF G0_PORT_VID(0)
/* Register for port MAC control register */
-#define MT7530_PMCR_P(x) (0x3000 + ((x) * 0x100))
-#define PMCR_IFG_XMIT(x) (((x) & 0x3) << 18)
+#define MT753X_PMCR_P(x) (0x3000 + ((x) * 0x100))
+#define PMCR_IFG_XMIT_MASK GENMASK(19, 18)
+#define PMCR_IFG_XMIT(x) FIELD_PREP(PMCR_IFG_XMIT_MASK, x)
#define PMCR_EXT_PHY BIT(17)
#define PMCR_MAC_MODE BIT(16)
-#define PMCR_FORCE_MODE BIT(15)
-#define PMCR_TX_EN BIT(14)
-#define PMCR_RX_EN BIT(13)
+#define MT7530_FORCE_MODE BIT(15)
+#define PMCR_MAC_TX_EN BIT(14)
+#define PMCR_MAC_RX_EN BIT(13)
#define PMCR_BACKOFF_EN BIT(9)
#define PMCR_BACKPR_EN BIT(8)
#define PMCR_FORCE_EEE1G BIT(7)
#define PMCR_FORCE_EEE100 BIT(6)
-#define PMCR_TX_FC_EN BIT(5)
-#define PMCR_RX_FC_EN BIT(4)
+#define PMCR_FORCE_RX_FC_EN BIT(5)
+#define PMCR_FORCE_TX_FC_EN BIT(4)
#define PMCR_FORCE_SPEED_1000 BIT(3)
#define PMCR_FORCE_SPEED_100 BIT(2)
#define PMCR_FORCE_FDX BIT(1)
#define PMCR_FORCE_LNK BIT(0)
-#define PMCR_SPEED_MASK (PMCR_FORCE_SPEED_100 | \
- PMCR_FORCE_SPEED_1000)
-#define MT7531_FORCE_LNK BIT(31)
-#define MT7531_FORCE_SPD BIT(30)
-#define MT7531_FORCE_DPX BIT(29)
-#define MT7531_FORCE_RX_FC BIT(28)
-#define MT7531_FORCE_TX_FC BIT(27)
-#define MT7531_FORCE_MODE (MT7531_FORCE_LNK | \
- MT7531_FORCE_SPD | \
- MT7531_FORCE_DPX | \
- MT7531_FORCE_RX_FC | \
- MT7531_FORCE_TX_FC)
-#define PMCR_LINK_SETTINGS_MASK (PMCR_TX_EN | PMCR_FORCE_SPEED_1000 | \
- PMCR_RX_EN | PMCR_FORCE_SPEED_100 | \
- PMCR_TX_FC_EN | PMCR_RX_FC_EN | \
- PMCR_FORCE_FDX | PMCR_FORCE_LNK | \
- PMCR_FORCE_EEE1G | PMCR_FORCE_EEE100)
-
-#define MT7530_PMEEECR_P(x) (0x3004 + (x) * 0x100)
-#define WAKEUP_TIME_1000(x) (((x) & 0xFF) << 24)
-#define WAKEUP_TIME_100(x) (((x) & 0xFF) << 16)
+#define MT7531_FORCE_MODE_LNK BIT(31)
+#define MT7531_FORCE_MODE_SPD BIT(30)
+#define MT7531_FORCE_MODE_DPX BIT(29)
+#define MT7531_FORCE_MODE_RX_FC BIT(28)
+#define MT7531_FORCE_MODE_TX_FC BIT(27)
+#define MT7531_FORCE_MODE_EEE100 BIT(26)
+#define MT7531_FORCE_MODE_EEE1G BIT(25)
+#define MT7531_FORCE_MODE_MASK (MT7531_FORCE_MODE_LNK | \
+ MT7531_FORCE_MODE_SPD | \
+ MT7531_FORCE_MODE_DPX | \
+ MT7531_FORCE_MODE_RX_FC | \
+ MT7531_FORCE_MODE_TX_FC | \
+ MT7531_FORCE_MODE_EEE100 | \
+ MT7531_FORCE_MODE_EEE1G)
+#define PMCR_LINK_SETTINGS_MASK (PMCR_MAC_TX_EN | PMCR_MAC_RX_EN | \
+ PMCR_FORCE_EEE1G | \
+ PMCR_FORCE_EEE100 | \
+ PMCR_FORCE_RX_FC_EN | \
+ PMCR_FORCE_TX_FC_EN | \
+ PMCR_FORCE_SPEED_1000 | \
+ PMCR_FORCE_SPEED_100 | \
+ PMCR_FORCE_FDX | PMCR_FORCE_LNK)
+
+#define MT753X_PMEEECR_P(x) (0x3004 + (x) * 0x100)
+#define WAKEUP_TIME_1000_MASK GENMASK(31, 24)
+#define WAKEUP_TIME_1000(x) FIELD_PREP(WAKEUP_TIME_1000_MASK, x)
+#define WAKEUP_TIME_100_MASK GENMASK(23, 16)
+#define WAKEUP_TIME_100(x) FIELD_PREP(WAKEUP_TIME_100_MASK, x)
#define LPI_THRESH_MASK GENMASK(15, 4)
-#define LPI_THRESH_SHT 4
-#define SET_LPI_THRESH(x) (((x) << LPI_THRESH_SHT) & LPI_THRESH_MASK)
-#define GET_LPI_THRESH(x) (((x) & LPI_THRESH_MASK) >> LPI_THRESH_SHT)
+#define LPI_THRESH_GET(x) FIELD_GET(LPI_THRESH_MASK, x)
+#define LPI_THRESH_SET(x) FIELD_PREP(LPI_THRESH_MASK, x)
#define LPI_MODE_EN BIT(0)
#define MT7530_PMSR_P(x) (0x3008 + (x) * 0x100)
@@ -470,32 +496,30 @@ enum mt7531_clk_skew {
MT7531_CLK_SKEW_REVERSE = 3,
};
-/* Register for hw trap status */
-#define MT7530_HWTRAP 0x7800
-#define HWTRAP_XTAL_MASK (BIT(10) | BIT(9))
-#define HWTRAP_XTAL_25MHZ (BIT(10) | BIT(9))
-#define HWTRAP_XTAL_40MHZ (BIT(10))
-#define HWTRAP_XTAL_20MHZ (BIT(9))
-
-#define MT7531_HWTRAP 0x7800
-#define HWTRAP_XTAL_FSEL_MASK BIT(7)
-#define HWTRAP_XTAL_FSEL_25MHZ BIT(7)
-#define HWTRAP_XTAL_FSEL_40MHZ 0
-/* Unique fields of (M)HWSTRAP for MT7531 */
-#define XTAL_FSEL_S 7
-#define XTAL_FSEL_M BIT(7)
-#define PHY_EN BIT(6)
-#define CHG_STRAP BIT(8)
-
-/* Register for hw trap modification */
-#define MT7530_MHWTRAP 0x7804
-#define MHWTRAP_PHY0_SEL BIT(20)
-#define MHWTRAP_MANUAL BIT(16)
-#define MHWTRAP_P5_MAC_SEL BIT(13)
-#define MHWTRAP_P6_DIS BIT(8)
-#define MHWTRAP_P5_RGMII_MODE BIT(7)
-#define MHWTRAP_P5_DIS BIT(6)
-#define MHWTRAP_PHY_ACCESS BIT(5)
+/* Register for trap status */
+#define MT753X_TRAP 0x7800
+#define MT7530_XTAL_MASK (BIT(10) | BIT(9))
+#define MT7530_XTAL_25MHZ (BIT(10) | BIT(9))
+#define MT7530_XTAL_40MHZ BIT(10)
+#define MT7530_XTAL_20MHZ BIT(9)
+#define MT7531_XTAL25 BIT(7)
+
+/* Register for trap modification */
+#define MT753X_MTRAP 0x7804
+#define MT7530_P5_PHY0_SEL BIT(20)
+#define MT7530_CHG_TRAP BIT(16)
+#define MT7530_P5_MAC_SEL BIT(13)
+#define MT7530_P6_DIS BIT(8)
+#define MT7530_P5_RGMII_MODE BIT(7)
+#define MT7530_P5_DIS BIT(6)
+#define MT7530_PHY_INDIRECT_ACCESS BIT(5)
+#define MT7531_CHG_STRAP BIT(8)
+#define MT7531_PHY_EN BIT(6)
+
+enum mt7531_xtal_fsel {
+ MT7531_XTAL_FSEL_25MHZ,
+ MT7531_XTAL_FSEL_40MHZ,
+};
/* Register for TOP signal control */
#define MT7530_TOP_SIG_CTRL 0x7808
@@ -629,7 +653,7 @@ enum mt7531_clk_skew {
#define MT7531_PHY_PLL_OFF BIT(5)
#define MT7531_PHY_PLL_BYPASS_MODE BIT(4)
-#define MT753X_CTRL_PHY_ADDR 0
+#define MT753X_CTRL_PHY_ADDR(addr) ((addr + 1) & 0x1f)
#define CORE_PLL_GROUP5 0x404
#define RG_LCDDS_PCW_NCPO1(x) ((x) & 0xffff)
@@ -697,17 +721,17 @@ struct mt7530_fdb {
*/
struct mt7530_port {
bool enable;
+ bool isolated;
u32 pm;
u16 pvid;
struct phylink_pcs *sgmii_pcs;
};
-/* Port 5 interface select definitions */
-enum p5_interface_select {
- P5_DISABLED,
- P5_INTF_SEL_PHY_P0,
- P5_INTF_SEL_PHY_P4,
- P5_INTF_SEL_GMAC5,
+/* Port 5 mode definitions of the MT7530 switch */
+enum mt7530_p5_mode {
+ GMAC5,
+ MUX_PHY_P0,
+ MUX_PHY_P4,
};
struct mt7530_priv;
@@ -720,15 +744,14 @@ struct mt753x_pcs {
/* struct mt753x_info - This is the main data structure for holding the specific
* part for each supported device
+ * @id: Holding the identifier to a switch model
+ * @pcs_ops: Holding the pointer to the MAC PCS operations structure
* @sw_setup: Holding the handler to a device initialization
* @phy_read_c22: Holding the way reading PHY port using C22
* @phy_write_c22: Holding the way writing PHY port using C22
* @phy_read_c45: Holding the way reading PHY port using C45
* @phy_write_c45: Holding the way writing PHY port using C45
- * @phy_mode_supported: Check if the PHY type is being supported on a certain
- * port
- * @mac_port_validate: Holding the way to set addition validate type for a
- * certan MAC port
+ * @mac_port_get_caps: Holding the handler that provides MAC capabilities
* @mac_port_config: Holding the way setting up the PHY attribute to a
* certain MAC port
*/
@@ -747,9 +770,6 @@ struct mt753x_info {
int regnum, u16 val);
void (*mac_port_get_caps)(struct dsa_switch *ds, int port,
struct phylink_config *config);
- void (*mac_port_validate)(struct dsa_switch *ds, int port,
- phy_interface_t interface,
- unsigned long *supported);
void (*mac_port_config)(struct dsa_switch *ds, int port,
unsigned int mode,
phy_interface_t interface);
@@ -770,7 +790,7 @@ struct mt753x_info {
* @ports: Holding the state among ports
* @reg_mutex: The lock for protecting among process accessing
* registers
- * @p5_intf_sel: Holding the current port 5 interface select
+ * @p5_mode: Holding the current mode of port 5 of the MT7530 switch
* @p5_sgmii: Flag for distinguishing if port 5 of the MT7531 switch
* has got SGMII
* @irq: IRQ number of the switch
@@ -778,6 +798,7 @@ struct mt753x_info {
* @irq_enable: IRQ enable bits, synced to SYS_INT_EN
* @create_sgmii: Pointer to function creating SGMII PCS instance(s)
* @active_cpu_ports: Holding the active CPU ports
+ * @mdiodev: The pointer to the MDIO device structure
*/
struct mt7530_priv {
struct device *dev;
@@ -791,7 +812,7 @@ struct mt7530_priv {
const struct mt753x_info *info;
unsigned int id;
bool mcm;
- enum p5_interface_select p5_intf_sel;
+ enum mt7530_p5_mode p5_mode;
bool p5_sgmii;
u8 mirror_rx;
u8 mirror_tx;
@@ -804,6 +825,7 @@ struct mt7530_priv {
u32 irq_enable;
int (*create_sgmii)(struct mt7530_priv *priv);
u8 active_cpu_ports;
+ struct mdio_device *mdiodev;
};
struct mt7530_hw_vlan_entry {
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 14daf432f30b..5b4e2ce5470d 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -131,8 +131,8 @@ struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
{
struct mv88e6xxx_mdio_bus *mdio_bus;
- mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
- list);
+ mdio_bus = list_first_entry_or_null(&chip->mdios,
+ struct mv88e6xxx_mdio_bus, list);
if (!mdio_bus)
return NULL;
@@ -637,12 +637,12 @@ static void mv88e6351_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
MAC_1000FD;
}
-static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
+static int mv88e63xx_get_port_serdes_cmode(struct mv88e6xxx_chip *chip, int port)
{
u16 reg, val;
int err;
- err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &reg);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
if (err)
return err;
@@ -651,16 +651,16 @@ static int mv88e6352_get_port4_serdes_cmode(struct mv88e6xxx_chip *chip)
return 0xf;
val = reg & ~MV88E6XXX_PORT_STS_PHY_DETECT;
- err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, val);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, val);
if (err)
return err;
- err = mv88e6xxx_port_read(chip, 4, MV88E6XXX_PORT_STS, &val);
+ err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &val);
if (err)
return err;
/* Restore PHY_DETECT value */
- err = mv88e6xxx_port_write(chip, 4, MV88E6XXX_PORT_STS, reg);
+ err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_STS, reg);
if (err)
return err;
@@ -688,7 +688,30 @@ static void mv88e6352_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
if (err <= 0)
return;
- cmode = mv88e6352_get_port4_serdes_cmode(chip);
+ cmode = mv88e63xx_get_port_serdes_cmode(chip, port);
+ if (cmode < 0)
+ dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
+ port);
+ else
+ mv88e6xxx_translate_cmode(cmode, supported);
+ }
+}
+
+static void mv88e632x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
+ struct phylink_config *config)
+{
+ unsigned long *supported = config->supported_interfaces;
+ int cmode;
+
+ /* Translate the default cmode */
+ mv88e6xxx_translate_cmode(chip->ports[port].cmode, supported);
+
+ config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 |
+ MAC_1000FD;
+
+ /* Port 0/1 are serdes only ports */
+ if (port == 0 || port == 1) {
+ cmode = mv88e63xx_get_port_serdes_cmode(chip, port);
if (cmode < 0)
dev_err(chip->dev, "p%d: failed to read serdes cmode\n",
port);
@@ -838,24 +861,27 @@ static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
}
}
-static struct phylink_pcs *mv88e6xxx_mac_select_pcs(struct dsa_switch *ds,
- int port,
- phy_interface_t interface)
+static struct phylink_pcs *
+mv88e6xxx_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
struct phylink_pcs *pcs = ERR_PTR(-EOPNOTSUPP);
if (chip->info->ops->pcs_ops)
- pcs = chip->info->ops->pcs_ops->pcs_select(chip, port,
+ pcs = chip->info->ops->pcs_ops->pcs_select(chip, dp->index,
interface);
return pcs;
}
-static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port,
+static int mv88e6xxx_mac_prepare(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ int port = dp->index;
int err = 0;
/* In inband mode, the link may come up at any time while the link
@@ -874,11 +900,13 @@ static int mv88e6xxx_mac_prepare(struct dsa_switch *ds, int port,
return err;
}
-static void mv88e6xxx_mac_config(struct dsa_switch *ds, int port,
+static void mv88e6xxx_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ int port = dp->index;
int err = 0;
mv88e6xxx_reg_lock(chip);
@@ -894,13 +922,15 @@ err_unlock:
mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
- dev_err(ds->dev, "p%d: failed to configure MAC/PCS\n", port);
+ dev_err(chip->dev, "p%d: failed to configure MAC/PCS\n", port);
}
-static int mv88e6xxx_mac_finish(struct dsa_switch *ds, int port,
+static int mv88e6xxx_mac_finish(struct phylink_config *config,
unsigned int mode, phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
+ int port = dp->index;
int err = 0;
/* Undo the forced down state above after completing configuration
@@ -924,12 +954,14 @@ static int mv88e6xxx_mac_finish(struct dsa_switch *ds, int port,
return err;
}
-static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
+static void mv88e6xxx_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
const struct mv88e6xxx_ops *ops;
+ int port = dp->index;
int err = 0;
ops = chip->info->ops;
@@ -952,14 +984,16 @@ static void mv88e6xxx_mac_link_down(struct dsa_switch *ds, int port,
"p%d: failed to force MAC link down\n", port);
}
-static void mv88e6xxx_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
+static void mv88e6xxx_mac_link_up(struct phylink_config *config,
struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct mv88e6xxx_chip *chip = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct mv88e6xxx_chip *chip = dp->ds->priv;
const struct mv88e6xxx_ops *ops;
+ int port = dp->index;
int err = 0;
ops = chip->info->ops;
@@ -985,7 +1019,7 @@ error:
mv88e6xxx_reg_unlock(chip);
if (err && err != -EOPNOTSUPP)
- dev_err(ds->dev,
+ dev_err(chip->dev,
"p%d: failed to configure MAC link up\n", port);
}
@@ -3123,6 +3157,7 @@ static int mv88e6xxx_software_reset(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
{
struct gpio_desc *gpiod = chip->reset;
+ int err;
/* If there is a GPIO connected to the reset pin, toggle it */
if (gpiod) {
@@ -3131,17 +3166,26 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
* mid-byte, causing the first EEPROM read after the reset
* from the wrong location resulting in the switch booting
* to wrong mode and inoperable.
+ * For this reason, switch families with EEPROM support
+ * generally wait for EEPROM loads to complete as their pre-
+ * and post-reset handlers.
*/
- if (chip->info->ops->get_eeprom)
- mv88e6xxx_g2_eeprom_wait(chip);
+ if (chip->info->ops->hardware_reset_pre) {
+ err = chip->info->ops->hardware_reset_pre(chip);
+ if (err)
+ dev_err(chip->dev, "pre-reset error: %d\n", err);
+ }
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
- if (chip->info->ops->get_eeprom)
- mv88e6xxx_g2_eeprom_wait(chip);
+ if (chip->info->ops->hardware_reset_post) {
+ err = chip->info->ops->hardware_reset_post(chip);
+ if (err)
+ dev_err(chip->dev, "post-reset error: %d\n", err);
+ }
}
}
@@ -3582,7 +3626,8 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
mv88e6xxx_reg_lock(chip);
if (chip->info->ops->port_set_jumbo_size)
ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu);
- else if (chip->info->ops->set_max_frame_size)
+ else if (chip->info->ops->set_max_frame_size &&
+ dsa_is_cpu_port(ds, port))
ret = chip->info->ops->set_max_frame_size(chip, new_mtu);
mv88e6xxx_reg_unlock(chip);
@@ -4371,6 +4416,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4561,6 +4608,8 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4661,6 +4710,8 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4755,6 +4806,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4813,6 +4866,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4869,6 +4924,8 @@ static const struct mv88e6xxx_ops mv88e6191_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4928,6 +4985,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -4981,6 +5040,8 @@ static const struct mv88e6xxx_ops mv88e6250_ops = {
.watchdog_ops = &mv88e6250_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6250_g1_wait_eeprom_done_prereset,
+ .hardware_reset_post = mv88e6xxx_g1_wait_eeprom_done,
.reset = mv88e6250_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
@@ -5028,6 +5089,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5087,13 +5150,15 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e632x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
@@ -5133,13 +5198,15 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.set_egress_port = mv88e6095_g1_set_egress_port,
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6352_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
- .phylink_get_caps = mv88e6185_phylink_get_caps,
+ .phylink_get_caps = mv88e632x_phylink_get_caps,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
@@ -5183,6 +5250,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5338,6 +5407,8 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.watchdog_ops = &mv88e6097_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6352_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5400,6 +5471,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5462,6 +5535,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.watchdog_ops = &mv88e6390_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -5527,6 +5602,8 @@ static const struct mv88e6xxx_ops mv88e6393x_ops = {
.watchdog_ops = &mv88e6393x_watchdog_ops,
.mgmt_rsvd2cpu = mv88e6393x_port_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
+ .hardware_reset_pre = mv88e6xxx_g2_eeprom_wait,
+ .hardware_reset_post = mv88e6xxx_g2_eeprom_wait,
.reset = mv88e6352_g1_reset,
.rmu_disable = mv88e6390_g1_rmu_disable,
.atu_get_hash = mv88e6165_g1_atu_get_hash,
@@ -6970,6 +7047,15 @@ static int mv88e6xxx_crosschip_lag_leave(struct dsa_switch *ds, int sw_index,
return err_sync ? : err_pvt;
}
+static const struct phylink_mac_ops mv88e6xxx_phylink_mac_ops = {
+ .mac_select_pcs = mv88e6xxx_mac_select_pcs,
+ .mac_prepare = mv88e6xxx_mac_prepare,
+ .mac_config = mv88e6xxx_mac_config,
+ .mac_finish = mv88e6xxx_mac_finish,
+ .mac_link_down = mv88e6xxx_mac_link_down,
+ .mac_link_up = mv88e6xxx_mac_link_up,
+};
+
static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_tag_protocol = mv88e6xxx_get_tag_protocol,
.change_tag_protocol = mv88e6xxx_change_tag_protocol,
@@ -6978,12 +7064,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_setup = mv88e6xxx_port_setup,
.port_teardown = mv88e6xxx_port_teardown,
.phylink_get_caps = mv88e6xxx_get_caps,
- .phylink_mac_select_pcs = mv88e6xxx_mac_select_pcs,
- .phylink_mac_prepare = mv88e6xxx_mac_prepare,
- .phylink_mac_config = mv88e6xxx_mac_config,
- .phylink_mac_finish = mv88e6xxx_mac_finish,
- .phylink_mac_link_down = mv88e6xxx_mac_link_down,
- .phylink_mac_link_up = mv88e6xxx_mac_link_up,
.get_strings = mv88e6xxx_get_strings,
.get_ethtool_stats = mv88e6xxx_get_ethtool_stats,
.get_eth_mac_stats = mv88e6xxx_get_eth_mac_stats,
@@ -7052,6 +7132,7 @@ static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
ds->priv = chip;
ds->dev = dev;
ds->ops = &mv88e6xxx_switch_ops;
+ ds->phylink_mac_ops = &mv88e6xxx_phylink_mac_ops;
ds->ageing_time_min = chip->info->age_time_coeff;
ds->ageing_time_max = chip->info->age_time_coeff * U8_MAX;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 85eb293381a7..c34caf9815c5 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -487,6 +487,12 @@ struct mv88e6xxx_ops {
int (*ppu_enable)(struct mv88e6xxx_chip *chip);
int (*ppu_disable)(struct mv88e6xxx_chip *chip);
+ /* Additional handlers to run before and after hard reset, to make sure
+ * that the switch and EEPROM are in a good state.
+ */
+ int (*hardware_reset_pre)(struct mv88e6xxx_chip *chip);
+ int (*hardware_reset_post)(struct mv88e6xxx_chip *chip);
+
/* Switch Software Reset */
int (*reset)(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 49444a72ff09..9820cd596757 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -75,6 +75,95 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
+static int mv88e6250_g1_eeprom_reload(struct mv88e6xxx_chip *chip)
+{
+ /* MV88E6185_G1_CTL1_RELOAD_EEPROM is also valid for 88E6250 */
+ int bit = __bf_shf(MV88E6185_G1_CTL1_RELOAD_EEPROM);
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &val);
+ if (err)
+ return err;
+
+ val |= MV88E6185_G1_CTL1_RELOAD_EEPROM;
+
+ err = mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, val);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_CTL1, bit, 0);
+}
+
+/* Returns 0 when done, -EBUSY when waiting, other negative codes on error */
+static int mv88e6xxx_g1_is_eeprom_done(struct mv88e6xxx_chip *chip)
+{
+ u16 val;
+ int err;
+
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
+ if (err < 0) {
+ dev_err(chip->dev, "Error reading status");
+ return err;
+ }
+
+ /* If the switch is still resetting, it may not
+ * respond on the bus, and so MDIO read returns
+ * 0xffff. Differentiate between that, and waiting for
+ * the EEPROM to be done by bit 0 being set.
+ */
+ if (val == 0xffff || !(val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE)))
+ return -EBUSY;
+
+ return 0;
+}
+
+/* As the EEInt (EEPROM done) flag clears on read if the status register, this
+ * function must be called directly after a hard reset or EEPROM ReLoad request,
+ * or the done condition may have been missed
+ */
+int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
+{
+ const unsigned long timeout = jiffies + 1 * HZ;
+ int ret;
+
+ /* Wait up to 1 second for the switch to finish reading the
+ * EEPROM.
+ */
+ while (time_before(jiffies, timeout)) {
+ ret = mv88e6xxx_g1_is_eeprom_done(chip);
+ if (ret != -EBUSY)
+ return ret;
+ }
+
+ dev_err(chip->dev, "Timeout waiting for EEPROM done");
+ return -ETIMEDOUT;
+}
+
+int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip)
+{
+ int ret;
+
+ ret = mv88e6xxx_g1_is_eeprom_done(chip);
+ if (ret != -EBUSY)
+ return ret;
+
+ /* Pre-reset, we don't know the state of the switch - when
+ * mv88e6xxx_g1_is_eeprom_done() returns -EBUSY, that may be because
+ * the switch is actually busy reading the EEPROM, or because
+ * MV88E6XXX_G1_STS_IRQ_EEPROM_DONE has been cleared by an unrelated
+ * status register read already.
+ *
+ * To account for the latter case, trigger another EEPROM reload for
+ * another chance at seeing the done flag.
+ */
+ ret = mv88e6250_g1_eeprom_reload(chip);
+ if (ret)
+ return ret;
+
+ return mv88e6xxx_g1_wait_eeprom_done(chip);
+}
+
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index 1095261f5b49..3dbb7a1b8fe1 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -282,6 +282,8 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
+int mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
+int mv88e6250_g1_wait_eeprom_done_prereset(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index ce3b3690c3c0..c47f068f56b3 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -457,7 +457,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
trace_mv88e6xxx_atu_full_violation(chip->dev, spid,
entry.portvec, entry.mac,
fid);
- chip->ports[spid].atu_full_violation++;
+ if (spid < ARRAY_SIZE(chip->ports))
+ chip->ports[spid].atu_full_violation++;
}
return IRQ_HANDLED;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
index 331b4ca089ff..49e6e1355142 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -64,7 +64,7 @@ static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr,
#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40)
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
const struct mv88e6xxx_ptp_ops *ptp_ops;
struct mv88e6xxx_chip *chip;
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
index cf7fb6d660b1..85acc758e3eb 100644
--- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -121,7 +121,7 @@ void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
struct sk_buff *skb);
int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip);
@@ -157,7 +157,7 @@ static inline void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
}
static inline int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
return -EOPNOTSUPP;
}
diff --git a/drivers/net/dsa/mv88e6xxx/trace.h b/drivers/net/dsa/mv88e6xxx/trace.h
index f59ca04768e7..5bd015b2b97a 100644
--- a/drivers/net/dsa/mv88e6xxx/trace.h
+++ b/drivers/net/dsa/mv88e6xxx/trace.h
@@ -28,7 +28,7 @@ DECLARE_EVENT_CLASS(mv88e6xxx_atu_violation,
),
TP_fast_assign(
- __assign_str(name, dev_name(dev));
+ __assign_str(name);
__entry->spid = spid;
__entry->portvec = portvec;
memcpy(__entry->addr, addr, ETH_ALEN);
@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(mv88e6xxx_vtu_violation,
),
TP_fast_assign(
- __assign_str(name, dev_name(dev));
+ __assign_str(name);
__entry->spid = spid;
__entry->vid = vid;
),
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 61e95487732d..4a705f7333f4 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -61,11 +61,46 @@ static int felix_cpu_port_for_conduit(struct dsa_switch *ds,
return cpu_dp->index;
}
+/**
+ * felix_update_tag_8021q_rx_rule - Update VCAP ES0 tag_8021q rule after
+ * vlan_filtering change
+ * @outer_tagging_rule: Pointer to VCAP filter on which the update is performed
+ * @vlan_filtering: Current bridge VLAN filtering setting
+ *
+ * Source port identification for tag_8021q is done using VCAP ES0 rules on the
+ * CPU port(s). The ES0 tag B (inner tag from the packet) can be configured as
+ * either:
+ * - push_inner_tag=0: the inner tag is never pushed into the frame
+ * (and we lose info about the classified VLAN). This is
+ * good when the classified VLAN is a discardable quantity
+ * for the software RX path: it is either set to
+ * OCELOT_STANDALONE_PVID, or to
+ * ocelot_vlan_unaware_pvid(bridge).
+ * - push_inner_tag=1: the inner tag is always pushed. This is good when the
+ * classified VLAN is not a discardable quantity (the port
+ * is under a VLAN-aware bridge, and software needs to
+ * continue processing the packet in the same VLAN as the
+ * hardware).
+ * The point is that what is good for a VLAN-unaware port is not good for a
+ * VLAN-aware port, and vice versa. Thus, the RX tagging rules must be kept in
+ * sync with the VLAN filtering state of the port.
+ */
+static void
+felix_update_tag_8021q_rx_rule(struct ocelot_vcap_filter *outer_tagging_rule,
+ bool vlan_filtering)
+{
+ if (vlan_filtering)
+ outer_tagging_rule->action.push_inner_tag = OCELOT_ES0_TAG;
+ else
+ outer_tagging_rule->action.push_inner_tag = OCELOT_NO_ES0_TAG;
+}
+
/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
* the tagger can perform RX source port identification.
*/
static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
- int upstream, u16 vid)
+ int upstream, u16 vid,
+ bool vlan_filtering)
{
struct ocelot_vcap_filter *outer_tagging_rule;
struct ocelot *ocelot = ds->priv;
@@ -96,6 +131,14 @@ static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port,
outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD;
outer_tagging_rule->action.tag_a_vid_sel = 1;
outer_tagging_rule->action.vid_a_val = vid;
+ felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering);
+ outer_tagging_rule->action.tag_b_tpid_sel = OCELOT_TAG_TPID_SEL_8021Q;
+ /* Leave TAG_B_VID_SEL at 0 (Classified VID + VID_B_VAL). Since we also
+ * leave VID_B_VAL at 0, this makes ES0 tag B (the inner tag) equal to
+ * the classified VID, which we need to see in the DSA tagger's receive
+ * path. Note: the inner tag is only visible in the packet when pushed
+ * (push_inner_tag == OCELOT_ES0_TAG).
+ */
err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL);
if (err)
@@ -227,6 +270,7 @@ static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid)
static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
u16 flags)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct dsa_port *cpu_dp;
int err;
@@ -234,11 +278,12 @@ static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
* membership, which we aren't. So we don't need to add any VCAP filter
* for the CPU port.
*/
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
- err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
+ err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid,
+ dsa_port_is_vlan_filtering(dp));
if (err)
return err;
}
@@ -258,10 +303,11 @@ add_tx_failed:
static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct dsa_port *cpu_dp;
int err;
- if (!dsa_is_user_port(ds, port))
+ if (!dsa_port_is_user(dp))
return 0;
dsa_switch_for_each_cpu_port(cpu_dp, ds) {
@@ -278,11 +324,41 @@ static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
del_tx_failed:
dsa_switch_for_each_cpu_port(cpu_dp, ds)
- felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid);
+ felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid,
+ dsa_port_is_vlan_filtering(dp));
return err;
}
+static int felix_update_tag_8021q_rx_rules(struct dsa_switch *ds, int port,
+ bool vlan_filtering)
+{
+ struct ocelot_vcap_filter *outer_tagging_rule;
+ struct ocelot_vcap_block *block_vcap_es0;
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ unsigned long cookie;
+ int err;
+
+ block_vcap_es0 = &ocelot->block[VCAP_ES0];
+
+ dsa_switch_for_each_cpu_port(cpu_dp, ds) {
+ cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port,
+ cpu_dp->index);
+
+ outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0,
+ cookie, false);
+
+ felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering);
+
+ err = ocelot_vcap_filter_replace(ocelot, outer_tagging_rule);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
static int felix_trap_get_cpu_port(struct dsa_switch *ds,
const struct ocelot_vcap_filter *trap)
{
@@ -528,7 +604,19 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds)
* so we need to be careful that there are no extra frames to be
* dequeued over MMIO, since we would never know to discard them.
*/
+ ocelot_lock_xtr_grp_bh(ocelot, 0);
ocelot_drain_cpu_queue(ocelot, 0);
+ ocelot_unlock_xtr_grp_bh(ocelot, 0);
+
+ /* Problem: when using push_inner_tag=1 for ES0 tag B, we lose info
+ * about whether the received packets were VLAN-tagged on the wire,
+ * since they are always tagged on egress towards the CPU port.
+ *
+ * Since using push_inner_tag=1 is unavoidable for VLAN-aware bridges,
+ * we must work around the fallout by untagging in software to make
+ * untagged reception work more or less as expected.
+ */
+ ds->untag_vlan_aware_bridge_pvid = true;
return 0;
}
@@ -554,6 +642,8 @@ static void felix_tag_8021q_teardown(struct dsa_switch *ds)
ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
dsa_tag_8021q_unregister(ds);
+
+ ds->untag_vlan_aware_bridge_pvid = false;
}
static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
@@ -1008,8 +1098,23 @@ static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
+ bool using_tag_8021q;
+ struct felix *felix;
+ int err;
+
+ err = ocelot_port_vlan_filtering(ocelot, port, enabled, extack);
+ if (err)
+ return err;
+
+ felix = ocelot_to_felix(ocelot);
+ using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q;
+ if (using_tag_8021q) {
+ err = felix_update_tag_8021q_rx_rules(ds, port, enabled);
+ if (err)
+ return err;
+ }
- return ocelot_port_vlan_filtering(ocelot, port, enabled, extack);
+ return 0;
}
static int felix_vlan_add(struct dsa_switch *ds, int port,
@@ -1050,24 +1155,32 @@ static void felix_phylink_get_caps(struct dsa_switch *ds, int port,
config->supported_interfaces);
}
-static void felix_phylink_mac_config(struct dsa_switch *ds, int port,
+static void felix_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
if (felix->info->phylink_mac_config)
felix->info->phylink_mac_config(ocelot, port, mode, state);
}
-static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
- int port,
- phy_interface_t iface)
+static struct phylink_pcs *
+felix_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t iface)
{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
struct phylink_pcs *pcs = NULL;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
if (felix->pcs && felix->pcs[port])
pcs = felix->pcs[port];
@@ -1075,11 +1188,13 @@ static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds,
return pcs;
}
-static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void felix_phylink_mac_link_down(struct phylink_config *config,
unsigned int link_an_mode,
phy_interface_t interface)
{
- struct ocelot *ocelot = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ int port = dp->index;
struct felix *felix;
felix = ocelot_to_felix(ocelot);
@@ -1088,15 +1203,19 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port,
felix->info->quirks);
}
-static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void felix_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int link_an_mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct ocelot *ocelot = ds->priv;
- struct felix *felix = ocelot_to_felix(ocelot);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ocelot *ocelot = dp->ds->priv;
+ int port = dp->index;
+ struct felix *felix;
+
+ felix = ocelot_to_felix(ocelot);
ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode,
interface, speed, duplex, tx_pause, rx_pause,
@@ -1220,7 +1339,7 @@ static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset)
}
static int felix_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ocelot *ocelot = ds->priv;
@@ -1504,6 +1623,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
int port = xmit_work->dp->index;
int retries = 10;
+ ocelot_lock_inj_grp(ocelot, 0);
+
do {
if (ocelot_can_inject(ocelot, 0))
break;
@@ -1512,6 +1633,7 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
} while (--retries);
if (!retries) {
+ ocelot_unlock_inj_grp(ocelot, 0);
dev_err(ocelot->dev, "port %d failed to inject skb\n",
port);
ocelot_port_purge_txtstamp_skb(ocelot, port, skb);
@@ -1521,6 +1643,8 @@ static void felix_port_deferred_xmit(struct kthread_work *work)
ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb);
+ ocelot_unlock_inj_grp(ocelot, 0);
+
consume_skb(skb);
kfree(xmit_work);
}
@@ -1583,6 +1707,15 @@ static int felix_setup(struct dsa_switch *ds)
felix_port_qos_map_init(ocelot, dp->index);
}
+ if (felix->info->request_irq) {
+ err = felix->info->request_irq(ocelot);
+ if (err) {
+ dev_err(ocelot->dev, "Failed to request IRQ: %pe\n",
+ ERR_PTR(err));
+ goto out_deinit_ports;
+ }
+ }
+
err = ocelot_devlink_sb_register(ocelot);
if (err)
goto out_deinit_ports;
@@ -1671,6 +1804,8 @@ static bool felix_check_xtr_pkt(struct ocelot *ocelot)
if (!felix->info->quirk_no_xtr_irq)
return false;
+ ocelot_lock_xtr_grp(ocelot, grp);
+
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
unsigned int type;
@@ -1707,6 +1842,8 @@ out:
ocelot_drain_cpu_queue(ocelot, 0);
}
+ ocelot_unlock_xtr_grp(ocelot, grp);
+
return true;
}
@@ -2083,7 +2220,14 @@ static void felix_get_mm_stats(struct dsa_switch *ds, int port,
ocelot_port_get_mm_stats(ocelot, port, stats);
}
-const struct dsa_switch_ops felix_switch_ops = {
+static const struct phylink_mac_ops felix_phylink_mac_ops = {
+ .mac_select_pcs = felix_phylink_mac_select_pcs,
+ .mac_config = felix_phylink_mac_config,
+ .mac_link_down = felix_phylink_mac_link_down,
+ .mac_link_up = felix_phylink_mac_link_up,
+};
+
+static const struct dsa_switch_ops felix_switch_ops = {
.get_tag_protocol = felix_get_tag_protocol,
.change_tag_protocol = felix_change_tag_protocol,
.connect_tag_protocol = felix_connect_tag_protocol,
@@ -2104,10 +2248,6 @@ const struct dsa_switch_ops felix_switch_ops = {
.get_sset_count = felix_get_sset_count,
.get_ts_info = felix_get_ts_info,
.phylink_get_caps = felix_phylink_get_caps,
- .phylink_mac_config = felix_phylink_mac_config,
- .phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
- .phylink_mac_link_down = felix_phylink_mac_link_down,
- .phylink_mac_link_up = felix_phylink_mac_link_up,
.port_enable = felix_port_enable,
.port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
@@ -2166,7 +2306,53 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_set_host_flood = felix_port_set_host_flood,
.port_change_conduit = felix_port_change_conduit,
};
-EXPORT_SYMBOL_GPL(felix_switch_ops);
+
+int felix_register_switch(struct device *dev, resource_size_t switch_base,
+ int num_flooding_pgids, bool ptp,
+ bool mm_supported,
+ enum dsa_tag_protocol init_tag_proto,
+ const struct felix_info *info)
+{
+ struct dsa_switch *ds;
+ struct ocelot *ocelot;
+ struct felix *felix;
+ int err;
+
+ felix = devm_kzalloc(dev, sizeof(*felix), GFP_KERNEL);
+ if (!felix)
+ return -ENOMEM;
+
+ ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ if (!ds)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, felix);
+
+ ocelot = &felix->ocelot;
+ ocelot->dev = dev;
+ ocelot->num_flooding_pgids = num_flooding_pgids;
+ ocelot->ptp = ptp;
+ ocelot->mm_supported = mm_supported;
+
+ felix->info = info;
+ felix->switch_base = switch_base;
+ felix->ds = ds;
+ felix->tag_proto = init_tag_proto;
+
+ ds->dev = dev;
+ ds->num_ports = info->num_ports;
+ ds->num_tx_queues = OCELOT_NUM_TC;
+ ds->ops = &felix_switch_ops;
+ ds->phylink_mac_ops = &felix_phylink_mac_ops;
+ ds->priv = ocelot;
+
+ err = dsa_register_switch(ds);
+ if (err)
+ dev_err_probe(dev, err, "Failed to register DSA switch\n");
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(felix_register_switch);
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
{
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index dbf5872fe367..211991f494e3 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -32,7 +32,6 @@ struct felix_info {
const u32 *port_modes;
int num_mact_rows;
int num_ports;
- int num_tx_queues;
struct vcap_props *vcap;
u16 vcap_pol_base;
u16 vcap_pol_max;
@@ -64,6 +63,7 @@ struct felix_info {
const struct phylink_link_state *state);
int (*configure_serdes)(struct ocelot *ocelot, int port,
struct device_node *portnp);
+ int (*request_irq)(struct ocelot *ocelot);
};
/* Methods for initializing the hardware resources specific to a tagging
@@ -82,8 +82,6 @@ struct felix_tag_proto_ops {
struct netlink_ext_ack *extack);
};
-extern const struct dsa_switch_ops felix_switch_ops;
-
/* DSA glue / front-end for struct ocelot */
struct felix {
struct dsa_switch *ds;
@@ -99,6 +97,11 @@ struct felix {
unsigned long host_flood_mc_mask;
};
+int felix_register_switch(struct device *dev, resource_size_t switch_base,
+ int num_flooding_pgids, bool ptp,
+ bool mm_supported,
+ enum dsa_tag_protocol init_tag_proto,
+ const struct felix_info *info);
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port);
int felix_netdev_to_port(struct net_device *dev);
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 3c5509e75a54..ba37a566da39 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -1755,6 +1755,9 @@ static int vsc9959_stream_identify(struct flow_cls_offload *f,
BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS)))
return -EOPNOTSUPP;
+ if (flow_rule_match_has_control_flags(rule, f->common.extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@@ -2602,6 +2605,28 @@ set:
}
}
+/* The INTB interrupt is shared between for PTP TX timestamp availability
+ * notification and MAC Merge status change on each port.
+ */
+static irqreturn_t vsc9959_irq_handler(int irq, void *data)
+{
+ struct ocelot *ocelot = data;
+
+ ocelot_get_txtstamp(ocelot);
+ ocelot_mm_irq(ocelot);
+
+ return IRQ_HANDLED;
+}
+
+static int vsc9959_request_irq(struct ocelot *ocelot)
+{
+ struct pci_dev *pdev = to_pci_dev(ocelot->dev);
+
+ return devm_request_threaded_irq(ocelot->dev, pdev->irq, NULL,
+ &vsc9959_irq_handler, IRQF_ONESHOT,
+ "felix-intb", ocelot);
+}
+
static const struct ocelot_ops vsc9959_ops = {
.reset = vsc9959_reset,
.wm_enc = vsc9959_wm_enc,
@@ -2633,7 +2658,6 @@ static const struct felix_info felix_info_vsc9959 = {
.vcap_pol_max2 = 0,
.num_mact_rows = 2048,
.num_ports = VSC9959_NUM_PORTS,
- .num_tx_queues = OCELOT_NUM_TC,
.quirks = FELIX_MAC_QUIRKS,
.quirk_no_xtr_irq = true,
.ptp_caps = &vsc9959_ptp_caps,
@@ -2642,98 +2666,36 @@ static const struct felix_info felix_info_vsc9959 = {
.port_modes = vsc9959_port_modes,
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
+ .request_irq = vsc9959_request_irq,
};
-/* The INTB interrupt is shared between for PTP TX timestamp availability
- * notification and MAC Merge status change on each port.
- */
-static irqreturn_t felix_irq_handler(int irq, void *data)
-{
- struct ocelot *ocelot = (struct ocelot *)data;
-
- ocelot_get_txtstamp(ocelot);
- ocelot_mm_irq(ocelot);
-
- return IRQ_HANDLED;
-}
-
static int felix_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
- struct dsa_switch *ds;
- struct ocelot *ocelot;
- struct felix *felix;
+ struct device *dev = &pdev->dev;
+ resource_size_t switch_base;
int err;
- if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) {
- dev_info(&pdev->dev, "device is disabled, skipping\n");
- return -ENODEV;
- }
-
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "device enable failed\n");
- goto err_pci_enable;
- }
-
- felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
- if (!felix) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate driver memory\n");
- goto err_alloc_felix;
+ dev_err(dev, "device enable failed: %pe\n", ERR_PTR(err));
+ return err;
}
- pci_set_drvdata(pdev, felix);
- ocelot = &felix->ocelot;
- ocelot->dev = &pdev->dev;
- ocelot->num_flooding_pgids = OCELOT_NUM_TC;
- felix->info = &felix_info_vsc9959;
- felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
-
pci_set_master(pdev);
- err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL,
- &felix_irq_handler, IRQF_ONESHOT,
- "felix-intb", ocelot);
- if (err) {
- dev_err(&pdev->dev, "Failed to request irq\n");
- goto err_alloc_irq;
- }
-
- ocelot->ptp = 1;
- ocelot->mm_supported = true;
+ switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
- ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
- if (!ds) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
- goto err_alloc_ds;
- }
-
- ds->dev = &pdev->dev;
- ds->num_ports = felix->info->num_ports;
- ds->num_tx_queues = felix->info->num_tx_queues;
- ds->ops = &felix_switch_ops;
- ds->priv = ocelot;
- felix->ds = ds;
- felix->tag_proto = DSA_TAG_PROTO_OCELOT;
-
- err = dsa_register_switch(ds);
- if (err) {
- dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n");
- goto err_register_ds;
- }
+ err = felix_register_switch(dev, switch_base, OCELOT_NUM_TC,
+ true, true, DSA_TAG_PROTO_OCELOT,
+ &felix_info_vsc9959);
+ if (err)
+ goto out_disable;
return 0;
-err_register_ds:
- kfree(ds);
-err_alloc_ds:
-err_alloc_irq:
- kfree(felix);
-err_alloc_felix:
+out_disable:
pci_disable_device(pdev);
-err_pci_enable:
return err;
}
@@ -2746,9 +2708,6 @@ static void felix_pci_remove(struct pci_dev *pdev)
dsa_unregister_switch(felix->ds);
- kfree(felix->ds);
- kfree(felix);
-
pci_disable_device(pdev);
}
diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c
index 22187d831c4b..5632a7248cd4 100644
--- a/drivers/net/dsa/ocelot/ocelot_ext.c
+++ b/drivers/net/dsa/ocelot/ocelot_ext.c
@@ -57,7 +57,6 @@ static const struct felix_info vsc7512_info = {
.vcap = vsc7514_vcap_props,
.num_mact_rows = 1024,
.num_ports = VSC7514_NUM_PORTS,
- .num_tx_queues = OCELOT_NUM_TC,
.port_modes = vsc7512_port_modes,
.phylink_mac_config = ocelot_phylink_mac_config,
.configure_serdes = ocelot_port_configure_serdes,
@@ -65,54 +64,8 @@ static const struct felix_info vsc7512_info = {
static int ocelot_ext_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct dsa_switch *ds;
- struct ocelot *ocelot;
- struct felix *felix;
- int err;
-
- felix = kzalloc(sizeof(*felix), GFP_KERNEL);
- if (!felix)
- return -ENOMEM;
-
- dev_set_drvdata(dev, felix);
-
- ocelot = &felix->ocelot;
- ocelot->dev = dev;
-
- ocelot->num_flooding_pgids = 1;
-
- felix->info = &vsc7512_info;
-
- ds = kzalloc(sizeof(*ds), GFP_KERNEL);
- if (!ds) {
- err = -ENOMEM;
- dev_err_probe(dev, err, "Failed to allocate DSA switch\n");
- goto err_free_felix;
- }
-
- ds->dev = dev;
- ds->num_ports = felix->info->num_ports;
- ds->num_tx_queues = felix->info->num_tx_queues;
-
- ds->ops = &felix_switch_ops;
- ds->priv = ocelot;
- felix->ds = ds;
- felix->tag_proto = DSA_TAG_PROTO_OCELOT;
-
- err = dsa_register_switch(ds);
- if (err) {
- dev_err_probe(dev, err, "Failed to register DSA switch\n");
- goto err_free_ds;
- }
-
- return 0;
-
-err_free_ds:
- kfree(ds);
-err_free_felix:
- kfree(felix);
- return err;
+ return felix_register_switch(&pdev->dev, 0, 1, false, false,
+ DSA_TAG_PROTO_OCELOT, &vsc7512_info);
}
static void ocelot_ext_remove(struct platform_device *pdev)
@@ -123,9 +76,6 @@ static void ocelot_ext_remove(struct platform_device *pdev)
return;
dsa_unregister_switch(felix->ds);
-
- kfree(felix->ds);
- kfree(felix);
}
static void ocelot_ext_shutdown(struct platform_device *pdev)
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index 049930da0521..70782649c395 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -963,7 +963,6 @@ static const struct felix_info seville_info_vsc9953 = {
.quirks = FELIX_MAC_QUIRKS,
.num_mact_rows = 2048,
.num_ports = VSC9953_NUM_PORTS,
- .num_tx_queues = OCELOT_NUM_TC,
.mdio_bus_alloc = vsc9953_mdio_bus_alloc,
.mdio_bus_free = vsc9953_mdio_bus_free,
.port_modes = vsc9953_port_modes,
@@ -971,62 +970,18 @@ static const struct felix_info seville_info_vsc9953 = {
static int seville_probe(struct platform_device *pdev)
{
- struct dsa_switch *ds;
- struct ocelot *ocelot;
+ struct device *dev = &pdev->dev;
struct resource *res;
- struct felix *felix;
- int err;
-
- felix = kzalloc(sizeof(struct felix), GFP_KERNEL);
- if (!felix) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate driver memory\n");
- goto err_alloc_felix;
- }
-
- platform_set_drvdata(pdev, felix);
-
- ocelot = &felix->ocelot;
- ocelot->dev = &pdev->dev;
- ocelot->num_flooding_pgids = 1;
- felix->info = &seville_info_vsc9953;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- err = -EINVAL;
- dev_err(&pdev->dev, "Invalid resource\n");
- goto err_alloc_felix;
- }
- felix->switch_base = res->start;
-
- ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL);
- if (!ds) {
- err = -ENOMEM;
- dev_err(&pdev->dev, "Failed to allocate DSA switch\n");
- goto err_alloc_ds;
- }
-
- ds->dev = &pdev->dev;
- ds->num_ports = felix->info->num_ports;
- ds->ops = &felix_switch_ops;
- ds->priv = ocelot;
- felix->ds = ds;
- felix->tag_proto = DSA_TAG_PROTO_SEVILLE;
-
- err = dsa_register_switch(ds);
- if (err) {
- dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err);
- goto err_register_ds;
+ dev_err(dev, "Invalid resource\n");
+ return -EINVAL;
}
- return 0;
-
-err_register_ds:
- kfree(ds);
-err_alloc_ds:
-err_alloc_felix:
- kfree(felix);
- return err;
+ return felix_register_switch(dev, res->start, 1, false, false,
+ DSA_TAG_PROTO_SEVILLE,
+ &seville_info_vsc9953);
}
static void seville_remove(struct platform_device *pdev)
@@ -1037,9 +992,6 @@ static void seville_remove(struct platform_device *pdev)
return;
dsa_unregister_switch(felix->ds);
-
- kfree(felix->ds);
- kfree(felix);
}
static void seville_shutdown(struct platform_device *pdev)
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 8d9d271ac3af..e9f2c67bc15f 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -523,28 +523,30 @@ static void ar9331_sw_phylink_get_caps(struct dsa_switch *ds, int port,
}
}
-static void ar9331_sw_phylink_mac_config(struct dsa_switch *ds, int port,
+static void ar9331_sw_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct ar9331_sw_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ar9331_sw_priv *priv = dp->ds->priv;
struct regmap *regmap = priv->regmap;
int ret;
- ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
+ ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(dp->index),
AR9331_SW_PORT_STATUS_LINK_EN |
AR9331_SW_PORT_STATUS_FLOW_LINK_EN, 0);
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
}
-static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void ar9331_sw_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct ar9331_sw_priv *priv = ds->priv;
- struct ar9331_sw_port *p = &priv->port[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ar9331_sw_priv *priv = dp->ds->priv;
struct regmap *regmap = priv->regmap;
+ int port = dp->index;
int ret;
ret = regmap_update_bits(regmap, AR9331_SW_REG_PORT_STATUS(port),
@@ -552,23 +554,24 @@ static void ar9331_sw_phylink_mac_link_down(struct dsa_switch *ds, int port,
if (ret)
dev_err_ratelimited(priv->dev, "%s: %i\n", __func__, ret);
- cancel_delayed_work_sync(&p->mib_read);
+ cancel_delayed_work_sync(&priv->port[port].mib_read);
}
-static void ar9331_sw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void ar9331_sw_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct ar9331_sw_priv *priv = ds->priv;
- struct ar9331_sw_port *p = &priv->port[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct ar9331_sw_priv *priv = dp->ds->priv;
struct regmap *regmap = priv->regmap;
+ int port = dp->index;
u32 val;
int ret;
- schedule_delayed_work(&p->mib_read, 0);
+ schedule_delayed_work(&priv->port[port].mib_read, 0);
val = AR9331_SW_PORT_STATUS_MAC_MASK;
switch (speed) {
@@ -684,14 +687,17 @@ static void ar9331_get_pause_stats(struct dsa_switch *ds, int port,
spin_unlock(&p->stats_lock);
}
+static const struct phylink_mac_ops ar9331_phylink_mac_ops = {
+ .mac_config = ar9331_sw_phylink_mac_config,
+ .mac_link_down = ar9331_sw_phylink_mac_link_down,
+ .mac_link_up = ar9331_sw_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops ar9331_sw_ops = {
.get_tag_protocol = ar9331_sw_get_tag_protocol,
.setup = ar9331_sw_setup,
.port_disable = ar9331_sw_port_disable,
.phylink_get_caps = ar9331_sw_phylink_get_caps,
- .phylink_mac_config = ar9331_sw_phylink_mac_config,
- .phylink_mac_link_down = ar9331_sw_phylink_mac_link_down,
- .phylink_mac_link_up = ar9331_sw_phylink_mac_link_up,
.get_stats64 = ar9331_get_stats64,
.get_pause_stats = ar9331_get_pause_stats,
};
@@ -1015,7 +1021,7 @@ static const struct regmap_config ar9331_mdio_regmap_config = {
.cache_type = REGCACHE_MAPLE,
};
-static struct regmap_bus ar9331_sw_bus = {
+static const struct regmap_bus ar9331_sw_bus = {
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
.read = ar9331_mdio_read,
@@ -1059,6 +1065,7 @@ static int ar9331_sw_probe(struct mdio_device *mdiodev)
ds->priv = priv;
priv->ops = ar9331_sw_ops;
ds->ops = &priv->ops;
+ ds->phylink_mac_ops = &ar9331_phylink_mac_ops;
dev_set_drvdata(&mdiodev->dev, priv);
for (i = 0; i < ARRAY_SIZE(priv->port); i++) {
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index dab66c0c6f64..f8d8c70642c4 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -565,7 +565,7 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_
return qca8k_regmap_update_bits_mii(priv, reg, mask, write_val);
}
-static struct regmap_config qca8k_regmap_config = {
+static const struct regmap_config qca8k_regmap_config = {
.reg_bits = 16,
.val_bits = 32,
.reg_stride = 4,
@@ -1283,11 +1283,13 @@ qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_inde
}
static struct phylink_pcs *
-qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+qca8k_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
- struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct qca8k_priv *priv = dp->ds->priv;
struct phylink_pcs *pcs = NULL;
+ int port = dp->index;
switch (interface) {
case PHY_INTERFACE_MODE_SGMII:
@@ -1311,13 +1313,18 @@ qca8k_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
}
static void
-qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
+qca8k_phylink_mac_config(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state)
{
- struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct dsa_switch *ds = dp->ds;
+ struct qca8k_priv *priv;
+ int port = dp->index;
int cpu_port_index;
u32 reg;
+ priv = ds->priv;
+
switch (port) {
case 0: /* 1st CPU port */
if (state->interface != PHY_INTERFACE_MODE_RGMII &&
@@ -1426,20 +1433,24 @@ static void qca8k_phylink_get_caps(struct dsa_switch *ds, int port,
}
static void
-qca8k_phylink_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+qca8k_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
- struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct qca8k_priv *priv = dp->ds->priv;
- qca8k_port_set_status(priv, port, 0);
+ qca8k_port_set_status(priv, dp->index, 0);
}
static void
-qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface, struct phy_device *phydev,
- int speed, int duplex, bool tx_pause, bool rx_pause)
+qca8k_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
{
- struct qca8k_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct qca8k_priv *priv = dp->ds->priv;
+ int port = dp->index;
u32 reg;
if (phylink_autoneg_inband(mode)) {
@@ -1463,10 +1474,10 @@ qca8k_phylink_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
if (duplex == DUPLEX_FULL)
reg |= QCA8K_PORT_STATUS_DUPLEX;
- if (rx_pause || dsa_is_cpu_port(ds, port))
+ if (rx_pause || dsa_port_is_cpu(dp))
reg |= QCA8K_PORT_STATUS_RXFLOW;
- if (tx_pause || dsa_is_cpu_port(ds, port))
+ if (tx_pause || dsa_port_is_cpu(dp))
reg |= QCA8K_PORT_STATUS_TXFLOW;
}
@@ -1991,6 +2002,13 @@ qca8k_setup(struct dsa_switch *ds)
return 0;
}
+static const struct phylink_mac_ops qca8k_phylink_mac_ops = {
+ .mac_select_pcs = qca8k_phylink_mac_select_pcs,
+ .mac_config = qca8k_phylink_mac_config,
+ .mac_link_down = qca8k_phylink_mac_link_down,
+ .mac_link_up = qca8k_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops qca8k_switch_ops = {
.get_tag_protocol = qca8k_get_tag_protocol,
.setup = qca8k_setup,
@@ -2021,10 +2039,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
.port_vlan_add = qca8k_port_vlan_add,
.port_vlan_del = qca8k_port_vlan_del,
.phylink_get_caps = qca8k_phylink_get_caps,
- .phylink_mac_select_pcs = qca8k_phylink_mac_select_pcs,
- .phylink_mac_config = qca8k_phylink_mac_config,
- .phylink_mac_link_down = qca8k_phylink_mac_link_down,
- .phylink_mac_link_up = qca8k_phylink_mac_link_up,
.get_phy_flags = qca8k_get_phy_flags,
.port_lag_join = qca8k_port_lag_join,
.port_lag_leave = qca8k_port_lag_leave,
@@ -2091,6 +2105,7 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
priv->ds->num_ports = QCA8K_NUM_PORTS;
priv->ds->priv = priv;
priv->ds->ops = &qca8k_switch_ops;
+ priv->ds->phylink_mac_ops = &qca8k_phylink_mac_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index 7f80035c5441..560c74c4ac3d 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -614,11 +614,57 @@ void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
qca8k_port_configure_learning(ds, port, learning);
}
+static int qca8k_update_port_member(struct qca8k_priv *priv, int port,
+ const struct net_device *bridge_dev,
+ bool join)
+{
+ bool isolated = !!(priv->port_isolated_map & BIT(port)), other_isolated;
+ struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp;
+ u32 port_mask = BIT(dp->cpu_dp->index);
+ int i, ret;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (i == port)
+ continue;
+ if (dsa_is_cpu_port(priv->ds, i))
+ continue;
+
+ other_dp = dsa_to_port(priv->ds, i);
+ if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev))
+ continue;
+
+ other_isolated = !!(priv->port_isolated_map & BIT(i));
+
+ /* Add/remove this port to/from the portvlan mask of the other
+ * ports in the bridge
+ */
+ if (join && !(isolated && other_isolated)) {
+ port_mask |= BIT(i);
+ ret = regmap_set_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ } else {
+ ret = regmap_clear_bits(priv->regmap,
+ QCA8K_PORT_LOOKUP_CTRL(i),
+ BIT(port));
+ }
+
+ if (ret)
+ return ret;
+ }
+
+ /* Add/remove all other ports to/from this port's portvlan mask */
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
+ QCA8K_PORT_LOOKUP_MEMBER, port_mask);
+
+ return ret;
+}
+
int qca8k_port_pre_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
- if (flags.mask & ~BR_LEARNING)
+ if (flags.mask & ~(BR_LEARNING | BR_ISOLATED))
return -EINVAL;
return 0;
@@ -628,6 +674,7 @@ int qca8k_port_bridge_flags(struct dsa_switch *ds, int port,
struct switchdev_brport_flags flags,
struct netlink_ext_ack *extack)
{
+ struct qca8k_priv *priv = ds->priv;
int ret;
if (flags.mask & BR_LEARNING) {
@@ -637,6 +684,20 @@ int qca8k_port_bridge_flags(struct dsa_switch *ds, int port,
return ret;
}
+ if (flags.mask & BR_ISOLATED) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp);
+
+ if (flags.val & BR_ISOLATED)
+ priv->port_isolated_map |= BIT(port);
+ else
+ priv->port_isolated_map &= ~BIT(port);
+
+ ret = qca8k_update_port_member(priv, port, bridge_dev, true);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -646,62 +707,21 @@ int qca8k_port_bridge_join(struct dsa_switch *ds, int port,
struct netlink_ext_ack *extack)
{
struct qca8k_priv *priv = ds->priv;
- int port_mask, cpu_port;
- int i, ret;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
- port_mask = BIT(cpu_port);
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
- continue;
- /* Add this port to the portvlan mask of the other ports
- * in the bridge
- */
- ret = regmap_set_bits(priv->regmap,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- if (ret)
- return ret;
- if (i != port)
- port_mask |= BIT(i);
- }
- /* Add all other ports to this ports portvlan mask */
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, port_mask);
-
- return ret;
+ return qca8k_update_port_member(priv, port, bridge.dev, true);
}
void qca8k_port_bridge_leave(struct dsa_switch *ds, int port,
struct dsa_bridge bridge)
{
struct qca8k_priv *priv = ds->priv;
- int cpu_port, i;
-
- cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
-
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
- if (dsa_is_cpu_port(ds, i))
- continue;
- if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge))
- continue;
- /* Remove this port to the portvlan mask of the other ports
- * in the bridge
- */
- regmap_clear_bits(priv->regmap,
- QCA8K_PORT_LOOKUP_CTRL(i),
- BIT(port));
- }
+ int err;
- /* Set the cpu port to be the only one in the portvlan mask of
- * this port
- */
- qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
+ err = qca8k_update_port_member(priv, port, bridge.dev, false);
+ if (err)
+ dev_err(priv->dev,
+ "Failed to update switch config for bridge leave: %d\n",
+ err);
}
void qca8k_port_fast_age(struct dsa_switch *ds, int port)
diff --git a/drivers/net/dsa/qca/qca8k-leds.c b/drivers/net/dsa/qca/qca8k-leds.c
index 811ebeeff4ed..43ac68052baf 100644
--- a/drivers/net/dsa/qca/qca8k-leds.c
+++ b/drivers/net/dsa/qca/qca8k-leds.c
@@ -431,8 +431,11 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
init_data.devicename = kasprintf(GFP_KERNEL, "%s:0%d",
priv->internal_mdio_bus->id,
port_num);
- if (!init_data.devicename)
+ if (!init_data.devicename) {
+ fwnode_handle_put(led);
+ fwnode_handle_put(leds);
return -ENOMEM;
+ }
ret = devm_led_classdev_register_ext(priv->dev, &port_led->cdev, &init_data);
if (ret)
@@ -441,6 +444,7 @@ qca8k_parse_port_leds(struct qca8k_priv *priv, struct fwnode_handle *port, int p
kfree(init_data.devicename);
}
+ fwnode_handle_put(leds);
return 0;
}
@@ -471,9 +475,13 @@ qca8k_setup_led_ctrl(struct qca8k_priv *priv)
* the correct port for LED setup.
*/
ret = qca8k_parse_port_leds(priv, port, qca8k_port_to_phy(port_num));
- if (ret)
+ if (ret) {
+ fwnode_handle_put(port);
+ fwnode_handle_put(ports);
return ret;
+ }
}
+ fwnode_handle_put(ports);
return 0;
}
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index 2184d8d2d5a9..3664a2e2f1f6 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -451,6 +451,7 @@ struct qca8k_priv {
* Bit 1: port enabled. Bit 0: port disabled.
*/
u8 port_enabled_map;
+ u8 port_isolated_map;
struct qca8k_ports_config ports_config;
struct regmap *regmap;
struct mii_bus *bus;
diff --git a/drivers/net/dsa/realtek/realtek.h b/drivers/net/dsa/realtek/realtek.h
index e0b1aa01337b..a1b2e0b529d5 100644
--- a/drivers/net/dsa/realtek/realtek.h
+++ b/drivers/net/dsa/realtek/realtek.h
@@ -17,6 +17,7 @@
#define REALTEK_HW_STOP_DELAY 25 /* msecs */
#define REALTEK_HW_START_DELAY 100 /* msecs */
+struct phylink_mac_ops;
struct realtek_ops;
struct dentry;
struct inode;
@@ -117,6 +118,7 @@ struct realtek_ops {
struct realtek_variant {
const struct dsa_switch_ops *ds_ops;
const struct realtek_ops *ops;
+ const struct phylink_mac_ops *phylink_mac_ops;
unsigned int clk_delay;
u8 cmd_read;
u8 cmd_write;
diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
index 12665a8a3412..b9674f68b756 100644
--- a/drivers/net/dsa/realtek/rtl8365mb.c
+++ b/drivers/net/dsa/realtek/rtl8365mb.c
@@ -1048,11 +1048,13 @@ static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port,
phy_interface_set_rgmii(config->supported_interfaces);
}
-static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
+static void rtl8365mb_phylink_mac_config(struct phylink_config *config,
unsigned int mode,
const struct phylink_link_state *state)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
+ u8 port = dp->index;
int ret;
if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
@@ -1076,13 +1078,15 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
*/
}
-static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void rtl8365mb_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
+ u8 port = dp->index;
int ret;
mb = priv->chip_data;
@@ -1101,16 +1105,18 @@ static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
}
}
-static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void rtl8365mb_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause,
+ int speed, int duplex, bool tx_pause,
bool rx_pause)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
struct rtl8365mb_port *p;
struct rtl8365mb *mb;
+ u8 port = dp->index;
int ret;
mb = priv->chip_data;
@@ -2106,15 +2112,18 @@ static int rtl8365mb_detect(struct realtek_priv *priv)
return 0;
}
+static const struct phylink_mac_ops rtl8365mb_phylink_mac_ops = {
+ .mac_config = rtl8365mb_phylink_mac_config,
+ .mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .mac_link_up = rtl8365mb_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops rtl8365mb_switch_ops = {
.get_tag_protocol = rtl8365mb_get_tag_protocol,
.change_tag_protocol = rtl8365mb_change_tag_protocol,
.setup = rtl8365mb_setup,
.teardown = rtl8365mb_teardown,
.phylink_get_caps = rtl8365mb_phylink_get_caps,
- .phylink_mac_config = rtl8365mb_phylink_mac_config,
- .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
- .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
.port_stp_state_set = rtl8365mb_port_stp_state_set,
.get_strings = rtl8365mb_get_strings,
.get_ethtool_stats = rtl8365mb_get_ethtool_stats,
@@ -2136,6 +2145,7 @@ static const struct realtek_ops rtl8365mb_ops = {
const struct realtek_variant rtl8365mb_variant = {
.ds_ops = &rtl8365mb_switch_ops,
.ops = &rtl8365mb_ops,
+ .phylink_mac_ops = &rtl8365mb_phylink_mac_ops,
.clk_delay = 10,
.cmd_read = 0xb9,
.cmd_write = 0xb8,
diff --git a/drivers/net/dsa/realtek/rtl8366rb.c b/drivers/net/dsa/realtek/rtl8366rb.c
index e10ae94cf771..9e821b42e5f3 100644
--- a/drivers/net/dsa/realtek/rtl8366rb.c
+++ b/drivers/net/dsa/realtek/rtl8366rb.c
@@ -176,6 +176,7 @@
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
/* LED control registers */
+/* The LED blink rate is global; it is used by all triggers in all groups. */
#define RTL8366RB_LED_BLINKRATE_REG 0x0430
#define RTL8366RB_LED_BLINKRATE_MASK 0x0007
#define RTL8366RB_LED_BLINKRATE_28MS 0x0000
@@ -185,27 +186,27 @@
#define RTL8366RB_LED_BLINKRATE_222MS 0x0004
#define RTL8366RB_LED_BLINKRATE_446MS 0x0005
+/* LED trigger event for each group */
#define RTL8366RB_LED_CTRL_REG 0x0431
-#define RTL8366RB_LED_OFF 0x0
-#define RTL8366RB_LED_DUP_COL 0x1
-#define RTL8366RB_LED_LINK_ACT 0x2
-#define RTL8366RB_LED_SPD1000 0x3
-#define RTL8366RB_LED_SPD100 0x4
-#define RTL8366RB_LED_SPD10 0x5
-#define RTL8366RB_LED_SPD1000_ACT 0x6
-#define RTL8366RB_LED_SPD100_ACT 0x7
-#define RTL8366RB_LED_SPD10_ACT 0x8
-#define RTL8366RB_LED_SPD100_10_ACT 0x9
-#define RTL8366RB_LED_FIBER 0xa
-#define RTL8366RB_LED_AN_FAULT 0xb
-#define RTL8366RB_LED_LINK_RX 0xc
-#define RTL8366RB_LED_LINK_TX 0xd
-#define RTL8366RB_LED_MASTER 0xe
-#define RTL8366RB_LED_FORCE 0xf
+#define RTL8366RB_LED_CTRL_OFFSET(led_group) \
+ (4 * (led_group))
+#define RTL8366RB_LED_CTRL_MASK(led_group) \
+ (0xf << RTL8366RB_LED_CTRL_OFFSET(led_group))
+
+/* The RTL8366RB_LED_X_X registers are used to manually set the LED state only
+ * when the corresponding LED group in RTL8366RB_LED_CTRL_REG is
+ * RTL8366RB_LEDGROUP_FORCE. Otherwise, it is ignored.
+ */
#define RTL8366RB_LED_0_1_CTRL_REG 0x0432
-#define RTL8366RB_LED_1_OFFSET 6
#define RTL8366RB_LED_2_3_CTRL_REG 0x0433
-#define RTL8366RB_LED_3_OFFSET 6
+#define RTL8366RB_LED_X_X_CTRL_REG(led_group) \
+ ((led_group) <= 1 ? \
+ RTL8366RB_LED_0_1_CTRL_REG : \
+ RTL8366RB_LED_2_3_CTRL_REG)
+#define RTL8366RB_LED_0_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_1_CTRL_MASK GENMASK(11, 6)
+#define RTL8366RB_LED_2_X_CTRL_MASK GENMASK(5, 0)
+#define RTL8366RB_LED_X_3_CTRL_MASK GENMASK(11, 6)
#define RTL8366RB_MIB_COUNT 33
#define RTL8366RB_GLOBAL_MIB_COUNT 1
@@ -349,14 +350,44 @@
#define RTL8366RB_GREEN_FEATURE_TX BIT(0)
#define RTL8366RB_GREEN_FEATURE_RX BIT(2)
+enum rtl8366_ledgroup_mode {
+ RTL8366RB_LEDGROUP_OFF = 0x0,
+ RTL8366RB_LEDGROUP_DUP_COL = 0x1,
+ RTL8366RB_LEDGROUP_LINK_ACT = 0x2,
+ RTL8366RB_LEDGROUP_SPD1000 = 0x3,
+ RTL8366RB_LEDGROUP_SPD100 = 0x4,
+ RTL8366RB_LEDGROUP_SPD10 = 0x5,
+ RTL8366RB_LEDGROUP_SPD1000_ACT = 0x6,
+ RTL8366RB_LEDGROUP_SPD100_ACT = 0x7,
+ RTL8366RB_LEDGROUP_SPD10_ACT = 0x8,
+ RTL8366RB_LEDGROUP_SPD100_10_ACT = 0x9,
+ RTL8366RB_LEDGROUP_FIBER = 0xa,
+ RTL8366RB_LEDGROUP_AN_FAULT = 0xb,
+ RTL8366RB_LEDGROUP_LINK_RX = 0xc,
+ RTL8366RB_LEDGROUP_LINK_TX = 0xd,
+ RTL8366RB_LEDGROUP_MASTER = 0xe,
+ RTL8366RB_LEDGROUP_FORCE = 0xf,
+
+ __RTL8366RB_LEDGROUP_MODE_MAX
+};
+
+struct rtl8366rb_led {
+ u8 port_num;
+ u8 led_group;
+ struct realtek_priv *priv;
+ struct led_classdev cdev;
+};
+
/**
* struct rtl8366rb - RTL8366RB-specific data
* @max_mtu: per-port max MTU setting
* @pvid_enabled: if PVID is set for respective port
+ * @leds: per-port and per-ledgroup led info
*/
struct rtl8366rb {
unsigned int max_mtu[RTL8366RB_NUM_PORTS];
bool pvid_enabled[RTL8366RB_NUM_PORTS];
+ struct rtl8366rb_led leds[RTL8366RB_NUM_PORTS][RTL8366RB_NUM_LEDGROUPS];
};
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
@@ -799,6 +830,217 @@ static int rtl8366rb_jam_table(const struct rtl8366rb_jam_tbl_entry *jam_table,
return 0;
}
+static int rb8366rb_set_ledgroup_mode(struct realtek_priv *priv,
+ u8 led_group,
+ enum rtl8366_ledgroup_mode mode)
+{
+ int ret;
+ u32 val;
+
+ val = mode << RTL8366RB_LED_CTRL_OFFSET(led_group);
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_CTRL_REG,
+ RTL8366RB_LED_CTRL_MASK(led_group),
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static inline u32 rtl8366rb_led_group_port_mask(u8 led_group, u8 port)
+{
+ switch (led_group) {
+ case 0:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 1:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 2:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ case 3:
+ return FIELD_PREP(RTL8366RB_LED_0_X_CTRL_MASK, BIT(port));
+ default:
+ return 0;
+ }
+}
+
+static int rb8366rb_get_port_led(struct rtl8366rb_led *led)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+ u32 val;
+
+ ret = regmap_read(priv->map, RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ &val);
+ if (ret) {
+ dev_err(priv->dev, "error reading LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ return !!(val & rtl8366rb_led_group_port_mask(led_group, port_num));
+}
+
+static int rb8366rb_set_port_led(struct rtl8366rb_led *led, bool enable)
+{
+ struct realtek_priv *priv = led->priv;
+ u8 led_group = led->led_group;
+ u8 port_num = led->port_num;
+ int ret;
+
+ ret = regmap_update_bits(priv->map,
+ RTL8366RB_LED_X_X_CTRL_REG(led_group),
+ rtl8366rb_led_group_port_mask(led_group,
+ port_num),
+ enable ? 0xffff : 0);
+ if (ret) {
+ dev_err(priv->dev, "error updating LED on port %d group %d\n",
+ led_group, port_num);
+ return ret;
+ }
+
+ /* Change the LED group to manual controlled LEDs if required */
+ ret = rb8366rb_set_ledgroup_mode(priv, led_group,
+ RTL8366RB_LEDGROUP_FORCE);
+
+ if (ret) {
+ dev_err(priv->dev, "error updating LED GROUP group %d\n",
+ led_group);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+rtl8366rb_cled_brightness_set_blocking(struct led_classdev *ldev,
+ enum led_brightness brightness)
+{
+ struct rtl8366rb_led *led = container_of(ldev, struct rtl8366rb_led,
+ cdev);
+
+ return rb8366rb_set_port_led(led, brightness == LED_ON);
+}
+
+static int rtl8366rb_setup_led(struct realtek_priv *priv, struct dsa_port *dp,
+ struct fwnode_handle *led_fwnode)
+{
+ struct rtl8366rb *rb = priv->chip_data;
+ struct led_init_data init_data = { };
+ enum led_default_state state;
+ struct rtl8366rb_led *led;
+ u32 led_group;
+ int ret;
+
+ ret = fwnode_property_read_u32(led_fwnode, "reg", &led_group);
+ if (ret)
+ return ret;
+
+ if (led_group >= RTL8366RB_NUM_LEDGROUPS) {
+ dev_warn(priv->dev, "Invalid LED reg %d defined for port %d",
+ led_group, dp->index);
+ return -EINVAL;
+ }
+
+ led = &rb->leds[dp->index][led_group];
+ led->port_num = dp->index;
+ led->led_group = led_group;
+ led->priv = priv;
+
+ state = led_init_default_state_get(led_fwnode);
+ switch (state) {
+ case LEDS_DEFSTATE_ON:
+ led->cdev.brightness = 1;
+ rb8366rb_set_port_led(led, 1);
+ break;
+ case LEDS_DEFSTATE_KEEP:
+ led->cdev.brightness =
+ rb8366rb_get_port_led(led);
+ break;
+ case LEDS_DEFSTATE_OFF:
+ default:
+ led->cdev.brightness = 0;
+ rb8366rb_set_port_led(led, 0);
+ }
+
+ led->cdev.max_brightness = 1;
+ led->cdev.brightness_set_blocking =
+ rtl8366rb_cled_brightness_set_blocking;
+ init_data.fwnode = led_fwnode;
+ init_data.devname_mandatory = true;
+
+ init_data.devicename = kasprintf(GFP_KERNEL, "Realtek-%d:0%d:%d",
+ dp->ds->index, dp->index, led_group);
+ if (!init_data.devicename)
+ return -ENOMEM;
+
+ ret = devm_led_classdev_register_ext(priv->dev, &led->cdev, &init_data);
+ if (ret) {
+ dev_warn(priv->dev, "Failed to init LED %d for port %d",
+ led_group, dp->index);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtl8366rb_setup_all_leds_off(struct realtek_priv *priv)
+{
+ int ret = 0;
+ int i;
+
+ regmap_update_bits(priv->map,
+ RTL8366RB_INTERRUPT_CONTROL_REG,
+ RTL8366RB_P4_RGMII_LED,
+ 0);
+
+ for (i = 0; i < RTL8366RB_NUM_LEDGROUPS; i++) {
+ ret = rb8366rb_set_ledgroup_mode(priv, i,
+ RTL8366RB_LEDGROUP_OFF);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int rtl8366rb_setup_leds(struct realtek_priv *priv)
+{
+ struct device_node *leds_np, *led_np;
+ struct dsa_switch *ds = &priv->ds;
+ struct dsa_port *dp;
+ int ret = 0;
+
+ dsa_switch_for_each_port(dp, ds) {
+ if (!dp->dn)
+ continue;
+
+ leds_np = of_get_child_by_name(dp->dn, "leds");
+ if (!leds_np) {
+ dev_dbg(priv->dev, "No leds defined for port %d",
+ dp->index);
+ continue;
+ }
+
+ for_each_child_of_node(leds_np, led_np) {
+ ret = rtl8366rb_setup_led(priv, dp,
+ of_fwnode_handle(led_np));
+ if (ret) {
+ of_node_put(led_np);
+ break;
+ }
+ }
+
+ of_node_put(leds_np);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
static int rtl8366rb_setup(struct dsa_switch *ds)
{
struct realtek_priv *priv = ds->priv;
@@ -807,7 +1049,6 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
u32 chip_ver = 0;
u32 chip_id = 0;
int jam_size;
- u32 val;
int ret;
int i;
@@ -987,7 +1228,9 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Set blinking, TODO: make this configurable */
+ /* Set blinking, used by all LED groups using HW triggers.
+ * TODO: make this configurable
+ */
ret = regmap_update_bits(priv->map, RTL8366RB_LED_BLINKRATE_REG,
RTL8366RB_LED_BLINKRATE_MASK,
RTL8366RB_LED_BLINKRATE_56MS);
@@ -995,32 +1238,17 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return ret;
/* Set up LED activity:
- * Each port has 4 LEDs, we configure all ports to the same
- * behaviour (no individual config) but we can set up each
- * LED separately.
+ * Each port has 4 LEDs on fixed groups. Each group shares the same
+ * hardware trigger across all ports. LEDs can only be indiviually
+ * controlled setting the LED group to fixed mode and using the driver
+ * to toggle them LEDs on/off.
*/
if (priv->leds_disabled) {
- /* Turn everything off */
- regmap_update_bits(priv->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x0FFF, 0);
- regmap_update_bits(priv->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x0FFF, 0);
- regmap_update_bits(priv->map,
- RTL8366RB_INTERRUPT_CONTROL_REG,
- RTL8366RB_P4_RGMII_LED,
- 0);
- val = RTL8366RB_LED_OFF;
+ ret = rtl8366rb_setup_all_leds_off(priv);
+ if (ret)
+ return ret;
} else {
- /* TODO: make this configurable per LED */
- val = RTL8366RB_LED_FORCE;
- }
- for (i = 0; i < 4; i++) {
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_CTRL_REG,
- 0xf << (i * 4),
- val << (i * 4));
+ ret = rtl8366rb_setup_leds(priv);
if (ret)
return ret;
}
@@ -1077,11 +1305,19 @@ static void rtl8366rb_phylink_get_caps(struct dsa_switch *ds, int port,
}
static void
-rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
- phy_interface_t interface, struct phy_device *phydev,
+rtl8366rb_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void
+rtl8366rb_mac_link_up(struct phylink_config *config, struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
int speed, int duplex, bool tx_pause, bool rx_pause)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
+ int port = dp->index;
unsigned int val;
int ret;
@@ -1147,10 +1383,12 @@ rtl8366rb_mac_link_up(struct dsa_switch *ds, int port, unsigned int mode,
}
static void
-rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
+rtl8366rb_mac_link_down(struct phylink_config *config, unsigned int mode,
phy_interface_t interface)
{
- struct realtek_priv *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct realtek_priv *priv = dp->ds->priv;
+ int port = dp->index;
int ret;
if (port != priv->cpu_port)
@@ -1167,52 +1405,6 @@ rtl8366rb_mac_link_down(struct dsa_switch *ds, int port, unsigned int mode,
}
}
-static void rb8366rb_set_port_led(struct realtek_priv *priv,
- int port, bool enable)
-{
- u16 val = enable ? 0x3f : 0;
- int ret;
-
- if (priv->leds_disabled)
- return;
-
- switch (port) {
- case 0:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x3F, val);
- break;
- case 1:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_0_1_CTRL_REG,
- 0x3F << RTL8366RB_LED_1_OFFSET,
- val << RTL8366RB_LED_1_OFFSET);
- break;
- case 2:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x3F, val);
- break;
- case 3:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_LED_2_3_CTRL_REG,
- 0x3F << RTL8366RB_LED_3_OFFSET,
- val << RTL8366RB_LED_3_OFFSET);
- break;
- case 4:
- ret = regmap_update_bits(priv->map,
- RTL8366RB_INTERRUPT_CONTROL_REG,
- RTL8366RB_P4_RGMII_LED,
- enable ? RTL8366RB_P4_RGMII_LED : 0);
- break;
- default:
- dev_err(priv->dev, "no LED for port %d\n", port);
- return;
- }
- if (ret)
- dev_err(priv->dev, "error updating LED on port %d\n", port);
-}
-
static int
rtl8366rb_port_enable(struct dsa_switch *ds, int port,
struct phy_device *phy)
@@ -1226,7 +1418,6 @@ rtl8366rb_port_enable(struct dsa_switch *ds, int port,
if (ret)
return ret;
- rb8366rb_set_port_led(priv, port, true);
return 0;
}
@@ -1241,8 +1432,6 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
BIT(port));
if (ret)
return;
-
- rb8366rb_set_port_led(priv, port, false);
}
static int
@@ -1849,12 +2038,16 @@ static int rtl8366rb_detect(struct realtek_priv *priv)
return 0;
}
+static const struct phylink_mac_ops rtl8366rb_phylink_mac_ops = {
+ .mac_config = rtl8366rb_mac_config,
+ .mac_link_down = rtl8366rb_mac_link_down,
+ .mac_link_up = rtl8366rb_mac_link_up,
+};
+
static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_tag_protocol = rtl8366_get_tag_protocol,
.setup = rtl8366rb_setup,
.phylink_get_caps = rtl8366rb_phylink_get_caps,
- .phylink_mac_link_up = rtl8366rb_mac_link_up,
- .phylink_mac_link_down = rtl8366rb_mac_link_down,
.get_strings = rtl8366_get_strings,
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
@@ -1892,6 +2085,7 @@ static const struct realtek_ops rtl8366rb_ops = {
const struct realtek_variant rtl8366rb_variant = {
.ds_ops = &rtl8366rb_switch_ops,
.ops = &rtl8366rb_ops,
+ .phylink_mac_ops = &rtl8366rb_phylink_mac_ops,
.clk_delay = 10,
.cmd_read = 0xa9,
.cmd_write = 0xa8,
diff --git a/drivers/net/dsa/realtek/rtl83xx.c b/drivers/net/dsa/realtek/rtl83xx.c
index d2e876805393..35709a1756ae 100644
--- a/drivers/net/dsa/realtek/rtl83xx.c
+++ b/drivers/net/dsa/realtek/rtl83xx.c
@@ -236,6 +236,7 @@ int rtl83xx_register_switch(struct realtek_priv *priv)
ds->priv = priv;
ds->dev = priv->dev;
ds->ops = priv->variant->ds_ops;
+ ds->phylink_mac_ops = priv->variant->phylink_mac_ops;
ds->num_ports = priv->num_ports;
ret = dsa_register_switch(ds);
@@ -290,16 +291,13 @@ EXPORT_SYMBOL_NS_GPL(rtl83xx_shutdown, REALTEK_DSA);
* rtl83xx_remove() - Cleanup a realtek switch driver
* @priv: realtek_priv pointer
*
- * If a method is provided, this function asserts the hard reset of the switch
- * in order to avoid leaking traffic when the driver is gone.
+ * Placehold for common cleanup procedures.
*
- * Context: Might sleep if priv->gdev->chip->can_sleep.
+ * Context: Any
* Return: nothing
*/
void rtl83xx_remove(struct realtek_priv *priv)
{
- /* leave the device reset asserted */
- rtl83xx_reset_assert(priv);
}
EXPORT_SYMBOL_NS_GPL(rtl83xx_remove, REALTEK_DSA);
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 10092ea85e46..92e032972b34 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -239,23 +239,31 @@ static void a5psw_phylink_get_caps(struct dsa_switch *ds, int port,
}
static struct phylink_pcs *
-a5psw_phylink_mac_select_pcs(struct dsa_switch *ds, int port,
+a5psw_phylink_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
- struct dsa_port *dp = dsa_to_port(ds, port);
- struct a5psw *a5psw = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct a5psw *a5psw = dp->ds->priv;
- if (!dsa_port_is_cpu(dp) && a5psw->pcs[port])
- return a5psw->pcs[port];
+ if (dsa_port_is_cpu(dp))
+ return NULL;
- return NULL;
+ return a5psw->pcs[dp->index];
+}
+
+static void a5psw_phylink_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
}
-static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
+static void a5psw_phylink_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- struct a5psw *a5psw = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct a5psw *a5psw = dp->ds->priv;
+ int port = dp->index;
u32 cmd_cfg;
cmd_cfg = a5psw_reg_readl(a5psw, A5PSW_CMD_CFG(port));
@@ -263,15 +271,17 @@ static void a5psw_phylink_mac_link_down(struct dsa_switch *ds, int port,
a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
}
-static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
+static void a5psw_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev, int speed,
- int duplex, bool tx_pause, bool rx_pause)
+ int speed, int duplex, bool tx_pause,
+ bool rx_pause)
{
u32 cmd_cfg = A5PSW_CMD_CFG_RX_ENA | A5PSW_CMD_CFG_TX_ENA |
A5PSW_CMD_CFG_TX_CRC_APPEND;
- struct a5psw *a5psw = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct a5psw *a5psw = dp->ds->priv;
if (speed == SPEED_1000)
cmd_cfg |= A5PSW_CMD_CFG_ETH_SPEED;
@@ -284,7 +294,7 @@ static void a5psw_phylink_mac_link_up(struct dsa_switch *ds, int port,
if (!rx_pause)
cmd_cfg &= ~A5PSW_CMD_CFG_PAUSE_IGNORE;
- a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(port), cmd_cfg);
+ a5psw_reg_writel(a5psw, A5PSW_CMD_CFG(dp->index), cmd_cfg);
}
static int a5psw_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
@@ -992,15 +1002,19 @@ static int a5psw_setup(struct dsa_switch *ds)
return 0;
}
+static const struct phylink_mac_ops a5psw_phylink_mac_ops = {
+ .mac_select_pcs = a5psw_phylink_mac_select_pcs,
+ .mac_config = a5psw_phylink_mac_config,
+ .mac_link_down = a5psw_phylink_mac_link_down,
+ .mac_link_up = a5psw_phylink_mac_link_up,
+};
+
static const struct dsa_switch_ops a5psw_switch_ops = {
.get_tag_protocol = a5psw_get_tag_protocol,
.setup = a5psw_setup,
.port_disable = a5psw_port_disable,
.port_enable = a5psw_port_enable,
.phylink_get_caps = a5psw_phylink_get_caps,
- .phylink_mac_select_pcs = a5psw_phylink_mac_select_pcs,
- .phylink_mac_link_down = a5psw_phylink_mac_link_down,
- .phylink_mac_link_up = a5psw_phylink_mac_link_up,
.port_change_mtu = a5psw_port_change_mtu,
.port_max_mtu = a5psw_port_max_mtu,
.get_sset_count = a5psw_get_sset_count,
@@ -1252,6 +1266,7 @@ static int a5psw_probe(struct platform_device *pdev)
ds->dev = dev;
ds->num_ports = A5PSW_PORTS_NUM;
ds->ops = &a5psw_switch_ops;
+ ds->phylink_mac_ops = &a5psw_phylink_mac_ops;
ds->priv = a5psw;
ret = dsa_register_switch(ds);
diff --git a/drivers/net/dsa/sja1105/sja1105_flower.c b/drivers/net/dsa/sja1105/sja1105_flower.c
index 9e8ca182c722..05d8ed3121e7 100644
--- a/drivers/net/dsa/sja1105/sja1105_flower.c
+++ b/drivers/net/dsa/sja1105/sja1105_flower.c
@@ -214,6 +214,9 @@ static int sja1105_flower_parse_key(struct sja1105_private *priv,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 6646f7fb0f90..c7282ce3d11c 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1358,10 +1358,11 @@ static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
}
static struct phylink_pcs *
-sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface)
+sja1105_mac_select_pcs(struct phylink_config *config, phy_interface_t iface)
{
- struct sja1105_private *priv = ds->priv;
- struct dw_xpcs *xpcs = priv->xpcs[port];
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct sja1105_private *priv = dp->ds->priv;
+ struct dw_xpcs *xpcs = priv->xpcs[dp->index];
if (xpcs)
return &xpcs->pcs;
@@ -1369,21 +1370,31 @@ sja1105_mac_select_pcs(struct dsa_switch *ds, int port, phy_interface_t iface)
return NULL;
}
-static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
+static void sja1105_mac_config(struct phylink_config *config,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void sja1105_mac_link_down(struct phylink_config *config,
unsigned int mode,
phy_interface_t interface)
{
- sja1105_inhibit_tx(ds->priv, BIT(port), true);
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+
+ sja1105_inhibit_tx(dp->ds->priv, BIT(dp->index), true);
}
-static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
+static void sja1105_mac_link_up(struct phylink_config *config,
+ struct phy_device *phydev,
unsigned int mode,
phy_interface_t interface,
- struct phy_device *phydev,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct sja1105_private *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct sja1105_private *priv = dp->ds->priv;
+ int port = dp->index;
sja1105_adjust_port_config(priv, port, speed);
@@ -2122,14 +2133,13 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port,
if (rc)
return rc;
- rc = dsa_tag_8021q_bridge_join(ds, port, bridge);
+ rc = dsa_tag_8021q_bridge_join(ds, port, bridge, tx_fwd_offload,
+ extack);
if (rc) {
sja1105_bridge_member(ds, port, bridge, false);
return rc;
}
- *tx_fwd_offload = true;
-
return 0;
}
@@ -3156,8 +3166,7 @@ static int sja1105_setup(struct dsa_switch *ds)
ds->vlan_filtering_is_global = true;
ds->untag_bridge_pvid = true;
ds->fdb_isolation = true;
- /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */
- ds->max_num_bridges = 7;
+ ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
/* Advertise the 8 egress queues */
ds->num_tx_queues = SJA1105_NUM_TC;
@@ -3198,6 +3207,13 @@ static void sja1105_teardown(struct dsa_switch *ds)
sja1105_static_config_free(&priv->static_config);
}
+static const struct phylink_mac_ops sja1105_phylink_mac_ops = {
+ .mac_select_pcs = sja1105_mac_select_pcs,
+ .mac_config = sja1105_mac_config,
+ .mac_link_up = sja1105_mac_link_up,
+ .mac_link_down = sja1105_mac_link_down,
+};
+
static const struct dsa_switch_ops sja1105_switch_ops = {
.get_tag_protocol = sja1105_get_tag_protocol,
.connect_tag_protocol = sja1105_connect_tag_protocol,
@@ -3207,9 +3223,6 @@ static const struct dsa_switch_ops sja1105_switch_ops = {
.port_change_mtu = sja1105_change_mtu,
.port_max_mtu = sja1105_get_max_mtu,
.phylink_get_caps = sja1105_phylink_get_caps,
- .phylink_mac_select_pcs = sja1105_mac_select_pcs,
- .phylink_mac_link_up = sja1105_mac_link_up,
- .phylink_mac_link_down = sja1105_mac_link_down,
.get_strings = sja1105_get_strings,
.get_ethtool_stats = sja1105_get_ethtool_stats,
.get_sset_count = sja1105_get_sset_count,
@@ -3375,6 +3388,7 @@ static int sja1105_probe(struct spi_device *spi)
ds->dev = dev;
ds->num_ports = priv->info->num_ports;
ds->ops = &sja1105_switch_ops;
+ ds->phylink_mac_ops = &sja1105_phylink_mac_ops;
ds->priv = priv;
priv->ds = ds;
@@ -3456,7 +3470,6 @@ MODULE_DEVICE_TABLE(spi, sja1105_spi_ids);
static struct spi_driver sja1105_driver = {
.driver = {
.name = "sja1105",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(sja1105_dt_ids),
},
.id_table = sja1105_spi_ids,
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c
index a7d41e781398..a1f4ca6ad888 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.c
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.c
@@ -111,7 +111,7 @@ int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr)
}
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct sja1105_private *priv = ds->priv;
struct sja1105_ptp_data *ptp_data = &priv->ptp_data;
diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h
index 416461ee95d2..8add2bd5f728 100644
--- a/drivers/net/dsa/sja1105/sja1105_ptp.h
+++ b/drivers/net/dsa/sja1105/sja1105_ptp.h
@@ -101,7 +101,7 @@ void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd,
enum packing_op op);
int sja1105_get_ts_info(struct dsa_switch *ds, int port,
- struct ethtool_ts_info *ts);
+ struct kernel_ethtool_ts_info *ts);
void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot,
struct sk_buff *clone);
diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c
index ae70eac3be28..e3f95d2cc2c1 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-core.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-core.c
@@ -17,13 +17,16 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
+#include <linux/iopoll.h>
#include <linux/of.h>
#include <linux/of_mdio.h>
#include <linux/bitops.h>
#include <linux/if_bridge.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/gpio/consumer.h>
#include <linux/gpio/driver.h>
+#include <linux/dsa/8021q.h>
#include <linux/random.h>
#include <net/dsa.h>
@@ -37,6 +40,10 @@
#define VSC73XX_BLOCK_ARBITER 0x5 /* Only subblock 0 */
#define VSC73XX_BLOCK_SYSTEM 0x7 /* Only subblock 0 */
+/* MII Block subblock */
+#define VSC73XX_BLOCK_MII_INTERNAL 0x0 /* Internal MDIO subblock */
+#define VSC73XX_BLOCK_MII_EXTERNAL 0x1 /* External MDIO subblock */
+
#define CPU_PORT 6 /* CPU port */
/* MAC Block registers */
@@ -61,6 +68,8 @@
#define VSC73XX_CAT_DROP 0x6e
#define VSC73XX_CAT_PR_MISC_L2 0x6f
#define VSC73XX_CAT_PR_USR_PRIO 0x75
+#define VSC73XX_CAT_VLAN_MISC 0x79
+#define VSC73XX_CAT_PORT_VLAN 0x7a
#define VSC73XX_Q_MISC_CONF 0xdf
/* MAC_CFG register bits */
@@ -121,6 +130,17 @@
#define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1)
#define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0)
+/* TXUPDCFG transmit modify setup bits */
+#define VSC73XX_TXUPDCFG_DSCP_REWR_MODE GENMASK(20, 19)
+#define VSC73XX_TXUPDCFG_DSCP_REWR_ENA BIT(18)
+#define VSC73XX_TXUPDCFG_TX_INT_TO_USRPRIO_ENA BIT(17)
+#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID GENMASK(15, 4)
+#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA BIT(3)
+#define VSC73XX_TXUPDCFG_TX_UPDATE_CRC_CPU_ENA BIT(1)
+#define VSC73XX_TXUPDCFG_TX_INSERT_TAG BIT(0)
+
+#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_SHIFT 4
+
/* CAT_DROP categorizer frame dropping register bits */
#define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6)
#define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4)
@@ -134,6 +154,15 @@
#define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1)
#define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0)
+/* CAT_VLAN_MISC categorizer VLAN miscellaneous bits */
+#define VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA BIT(8)
+#define VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA BIT(7)
+
+/* CAT_PORT_VLAN categorizer port VLAN */
+#define VSC73XX_CAT_PORT_VLAN_VLAN_CFI BIT(15)
+#define VSC73XX_CAT_PORT_VLAN_VLAN_USR_PRIO GENMASK(14, 12)
+#define VSC73XX_CAT_PORT_VLAN_VLAN_VID GENMASK(11, 0)
+
/* Frame analyzer block 2 registers */
#define VSC73XX_STORMLIMIT 0x02
#define VSC73XX_ADVLEARN 0x03
@@ -163,6 +192,10 @@
#define VSC73XX_AGENCTRL 0xf0
#define VSC73XX_CAPRST 0xff
+#define VSC73XX_SRCMASKS_CPU_COPY BIT(27)
+#define VSC73XX_SRCMASKS_MIRROR BIT(26)
+#define VSC73XX_SRCMASKS_PORTS_MASK GENMASK(7, 0)
+
#define VSC73XX_MACACCESS_CPU_COPY BIT(14)
#define VSC73XX_MACACCESS_FWD_KILL BIT(13)
#define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12)
@@ -184,7 +217,8 @@
#define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29)
#define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28)
#define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2)
-#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0)
+#define VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT 2
+#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(1, 0)
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1
#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2
@@ -195,6 +229,8 @@
#define VSC73XX_MII_CMD 0x1
#define VSC73XX_MII_DATA 0x2
+#define VSC73XX_MII_STAT_BUSY BIT(3)
+
/* Arbiter block 5 registers */
#define VSC73XX_ARBEMPTY 0x0c
#define VSC73XX_ARBDISC 0x0e
@@ -268,6 +304,10 @@
#define IS_7398(a) ((a)->chipid == VSC73XX_CHIPID_ID_7398)
#define IS_739X(a) (IS_7395(a) || IS_7398(a))
+#define VSC73XX_POLL_SLEEP_US 1000
+#define VSC73XX_MDIO_POLL_SLEEP_US 5
+#define VSC73XX_POLL_TIMEOUT_US 10000
+
struct vsc73xx_counter {
u8 counter;
const char *name;
@@ -339,6 +379,17 @@ static const struct vsc73xx_counter vsc73xx_tx_counters[] = {
{ 29, "TxQoSClass3" }, /* non-standard counter */
};
+struct vsc73xx_vlan_summary {
+ size_t num_tagged;
+ size_t num_untagged;
+};
+
+enum vsc73xx_port_vlan_conf {
+ VSC73XX_VLAN_FILTER,
+ VSC73XX_VLAN_FILTER_UNTAG_ALL,
+ VSC73XX_VLAN_IGNORE,
+};
+
int vsc73xx_is_addr_valid(u8 block, u8 subblock)
{
switch (block) {
@@ -483,6 +534,22 @@ static int vsc73xx_detect(struct vsc73xx *vsc)
return 0;
}
+static int vsc73xx_mdio_busy_check(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 || !(val & VSC73XX_MII_STAT_BUSY),
+ VSC73XX_MDIO_POLL_SLEEP_US,
+ VSC73XX_POLL_TIMEOUT_US, false, vsc,
+ VSC73XX_BLOCK_MII, VSC73XX_BLOCK_MII_INTERNAL,
+ VSC73XX_MII_STAT, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
{
struct vsc73xx *vsc = ds->priv;
@@ -490,12 +557,20 @@ static int vsc73xx_phy_read(struct dsa_switch *ds, int phy, int regnum)
u32 val;
int ret;
+ ret = vsc73xx_mdio_busy_check(vsc);
+ if (ret)
+ return ret;
+
/* Setting bit 26 means "read" */
cmd = BIT(26) | (phy << 21) | (regnum << 16);
ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
if (ret)
return ret;
- msleep(2);
+
+ ret = vsc73xx_mdio_busy_check(vsc);
+ if (ret)
+ return ret;
+
ret = vsc73xx_read(vsc, VSC73XX_BLOCK_MII, 0, 2, &val);
if (ret)
return ret;
@@ -519,18 +594,11 @@ static int vsc73xx_phy_write(struct dsa_switch *ds, int phy, int regnum,
u32 cmd;
int ret;
- /* It was found through tedious experiments that this router
- * chip really hates to have it's PHYs reset. They
- * never recover if that happens: autonegotiation stops
- * working after a reset. Just filter out this command.
- * (Resetting the whole chip is OK.)
- */
- if (regnum == 0 && (val & BIT(15))) {
- dev_info(vsc->dev, "reset PHY - disallowed\n");
- return 0;
- }
+ ret = vsc73xx_mdio_busy_check(vsc);
+ if (ret)
+ return ret;
- cmd = (phy << 21) | (regnum << 16);
+ cmd = (phy << 21) | (regnum << 16) | val;
ret = vsc73xx_write(vsc, VSC73XX_BLOCK_MII, 0, 1, cmd);
if (ret)
return ret;
@@ -553,16 +621,103 @@ static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds,
* cannot access the tag. (See "Internal frame header" section
* 3.9.1 in the manual.)
*/
- return DSA_TAG_PROTO_NONE;
+ return DSA_TAG_PROTO_VSC73XX_8021Q;
+}
+
+static int vsc73xx_wait_for_vlan_table_cmd(struct vsc73xx *vsc)
+{
+ int ret, err;
+ u32 val;
+
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 ||
+ ((val & VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK) ==
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE),
+ VSC73XX_POLL_SLEEP_US, VSC73XX_POLL_TIMEOUT_US,
+ false, vsc, VSC73XX_BLOCK_ANALYZER,
+ 0, VSC73XX_VLANACCESS, &val);
+ if (ret)
+ return ret;
+ return err;
+}
+
+static int
+vsc73xx_read_vlan_table_entry(struct vsc73xx *vsc, u16 vid, u8 *portmap)
+{
+ u32 val;
+ int ret;
+
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANTIDX, vid);
+
+ ret = vsc73xx_wait_for_vlan_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY);
+
+ ret = vsc73xx_wait_for_vlan_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS, &val);
+ *portmap = (val & VSC73XX_VLANACCESS_VLAN_PORT_MASK) >>
+ VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT;
+
+ return 0;
+}
+
+static int
+vsc73xx_write_vlan_table_entry(struct vsc73xx *vsc, u16 vid, u8 portmap)
+{
+ int ret;
+
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANTIDX, vid);
+
+ ret = vsc73xx_wait_for_vlan_table_cmd(vsc);
+ if (ret)
+ return ret;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK |
+ VSC73XX_VLANACCESS_VLAN_SRC_CHECK |
+ VSC73XX_VLANACCESS_VLAN_PORT_MASK,
+ VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY |
+ VSC73XX_VLANACCESS_VLAN_SRC_CHECK |
+ (portmap << VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT));
+
+ return vsc73xx_wait_for_vlan_table_cmd(vsc);
+}
+
+static int
+vsc73xx_update_vlan_table(struct vsc73xx *vsc, int port, u16 vid, bool set)
+{
+ u8 portmap;
+ int ret;
+
+ ret = vsc73xx_read_vlan_table_entry(vsc, vid, &portmap);
+ if (ret)
+ return ret;
+
+ if (set)
+ portmap |= BIT(port);
+ else
+ portmap &= ~BIT(port);
+
+ return vsc73xx_write_vlan_table_entry(vsc, vid, portmap);
}
static int vsc73xx_setup(struct dsa_switch *ds)
{
struct vsc73xx *vsc = ds->priv;
- int i;
+ int i, ret;
dev_info(vsc->dev, "set up the switch\n");
+ ds->untag_bridge_pvid = true;
+ ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES;
+
/* Issue RESET */
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET,
VSC73XX_GLORESET_MASTER_RESET);
@@ -590,7 +745,7 @@ static int vsc73xx_setup(struct dsa_switch *ds)
VSC73XX_MACACCESS,
VSC73XX_MACACCESS_CMD_CLEAR_TABLE);
- /* Clear VLAN table */
+ /* Set VLAN table to default values */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0,
VSC73XX_VLANACCESS,
VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE);
@@ -619,9 +774,9 @@ static int vsc73xx_setup(struct dsa_switch *ds)
vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY,
VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS |
VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS);
- /* Enable reception of frames on all ports */
- vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK,
- 0x5f);
+ /* Ingess VLAN reception mask (table 145) */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANMASK,
+ 0xff);
/* IP multicast flood mask (table 144) */
vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK,
0xff);
@@ -634,7 +789,24 @@ static int vsc73xx_setup(struct dsa_switch *ds)
udelay(4);
- return 0;
+ /* Clear VLAN table */
+ for (i = 0; i < VLAN_N_VID; i++)
+ vsc73xx_write_vlan_table_entry(vsc, i, 0);
+
+ INIT_LIST_HEAD(&vsc->vlans);
+
+ rtnl_lock();
+ ret = dsa_tag_8021q_register(ds, htons(ETH_P_8021Q));
+ rtnl_unlock();
+
+ return ret;
+}
+
+static void vsc73xx_teardown(struct dsa_switch *ds)
+{
+ rtnl_lock();
+ dsa_tag_8021q_unregister(ds);
+ rtnl_unlock();
}
static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
@@ -713,22 +885,123 @@ static void vsc73xx_init_port(struct vsc73xx *vsc, int port)
port, VSC73XX_C_RX0, 0);
}
-static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
- int port, struct phy_device *phydev,
- u32 initval)
+static void vsc73xx_reset_port(struct vsc73xx *vsc, int port, u32 initval)
{
- u32 val = initval;
+ int ret, err;
+ u32 val;
+
+ /* Disable RX on this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RX_EN, 0);
+
+ /* Discard packets */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), BIT(port));
+
+ /* Wait until queue is empty */
+ ret = read_poll_timeout(vsc73xx_read, err,
+ err < 0 || (val & BIT(port)),
+ VSC73XX_POLL_SLEEP_US,
+ VSC73XX_POLL_TIMEOUT_US, false,
+ vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBEMPTY, &val);
+ if (ret)
+ dev_err(vsc->dev,
+ "timeout waiting for block arbiter\n");
+ else if (err < 0)
+ dev_err(vsc->dev, "error reading arbiter\n");
+
+ /* Put this port into reset */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
+ VSC73XX_MAC_CFG_RESET | initval);
+}
+
+static void vsc73xx_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct vsc73xx *vsc = dp->ds->priv;
+ int port = dp->index;
+
+ /* Special handling of the CPU-facing port */
+ if (port == CPU_PORT) {
+ /* Other ports are already initialized but not this one */
+ vsc73xx_init_port(vsc, CPU_PORT);
+ /* Select the external port for this interface (EXT_PORT)
+ * Enable the GMII GTX external clock
+ * Use double data rate (DDR mode)
+ */
+ vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
+ CPU_PORT,
+ VSC73XX_ADVPORTM,
+ VSC73XX_ADVPORTM_EXT_PORT |
+ VSC73XX_ADVPORTM_ENA_GTX |
+ VSC73XX_ADVPORTM_DDR_MODE);
+ }
+}
+
+static void vsc73xx_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct vsc73xx *vsc = dp->ds->priv;
+ int port = dp->index;
+
+ /* This routine is described in the datasheet (below ARBDISC register
+ * description)
+ */
+ vsc73xx_reset_port(vsc, port, 0);
+
+ /* Allow backward dropping of frames from this port */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_SBACKWDROP, BIT(port), BIT(port));
+}
+
+static void vsc73xx_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct vsc73xx *vsc = dp->ds->priv;
+ int port = dp->index;
+ u32 val;
u8 seed;
- /* Reset this port FIXME: break out subroutine */
- val |= VSC73XX_MAC_CFG_RESET;
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
+ if (speed == SPEED_1000)
+ val = VSC73XX_MAC_CFG_GIGA_MODE | VSC73XX_MAC_CFG_TX_IPG_1000M;
+ else
+ val = VSC73XX_MAC_CFG_TX_IPG_100_10M;
+
+ if (phy_interface_mode_is_rgmii(interface))
+ val |= VSC73XX_MAC_CFG_CLK_SEL_1000M;
+ else
+ val |= VSC73XX_MAC_CFG_CLK_SEL_EXT;
+
+ if (duplex == DUPLEX_FULL)
+ val |= VSC73XX_MAC_CFG_FDX;
+ else
+ /* In datasheet description ("Port Mode Procedure" in 5.6.2)
+ * this bit is configured only for half duplex.
+ */
+ val |= VSC73XX_MAC_CFG_WEXC_DIS;
+
+ /* This routine is described in the datasheet (below ARBDISC register
+ * description)
+ */
+ vsc73xx_reset_port(vsc, port, val);
/* Seed the port randomness with randomness */
get_random_bytes(&seed, 1);
val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET;
val |= VSC73XX_MAC_CFG_SEED_LOAD;
- val |= VSC73XX_MAC_CFG_WEXC_DIS;
+
+ /* Those bits are responsible for MTU only. Kernel takes care about MTU,
+ * let's enable +8 bytes frame length unconditionally.
+ */
+ val |= VSC73XX_MAC_CFG_VLAN_AWR | VSC73XX_MAC_CFG_VLAN_DBLAWR;
+
vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val);
/* Flow control for the PHY facing ports:
@@ -741,6 +1014,10 @@ static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
VSC73XX_FCCONF_FLOW_CTRL_OBEY |
0xff);
+ /* Accept packets again */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
+ VSC73XX_ARBDISC, BIT(port), 0);
+
/* Disallow backward dropping of frames from this port */
vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
VSC73XX_SBACKWDROP, BIT(port), 0);
@@ -753,125 +1030,255 @@ static void vsc73xx_adjust_enable_port(struct vsc73xx *vsc,
VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN);
}
-static void vsc73xx_adjust_link(struct dsa_switch *ds, int port,
- struct phy_device *phydev)
+static bool vsc73xx_tag_8021q_active(struct dsa_port *dp)
{
- struct vsc73xx *vsc = ds->priv;
- u32 val;
+ return !dsa_port_is_vlan_filtering(dp);
+}
- /* Special handling of the CPU-facing port */
- if (port == CPU_PORT) {
- /* Other ports are already initialized but not this one */
- vsc73xx_init_port(vsc, CPU_PORT);
- /* Select the external port for this interface (EXT_PORT)
- * Enable the GMII GTX external clock
- * Use double data rate (DDR mode)
- */
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC,
- CPU_PORT,
- VSC73XX_ADVPORTM,
- VSC73XX_ADVPORTM_EXT_PORT |
- VSC73XX_ADVPORTM_ENA_GTX |
- VSC73XX_ADVPORTM_DDR_MODE);
+static struct vsc73xx_bridge_vlan *
+vsc73xx_bridge_vlan_find(struct vsc73xx *vsc, u16 vid)
+{
+ struct vsc73xx_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &vsc->vlans, list)
+ if (vlan->vid == vid)
+ return vlan;
+
+ return NULL;
+}
+
+static void
+vsc73xx_bridge_vlan_remove_port(struct vsc73xx_bridge_vlan *vsc73xx_vlan,
+ int port)
+{
+ vsc73xx_vlan->portmask &= ~BIT(port);
+
+ if (vsc73xx_vlan->portmask)
+ return;
+
+ list_del(&vsc73xx_vlan->list);
+ kfree(vsc73xx_vlan);
+}
+
+static void vsc73xx_bridge_vlan_summary(struct vsc73xx *vsc, int port,
+ struct vsc73xx_vlan_summary *summary,
+ u16 ignored_vid)
+{
+ size_t num_tagged = 0, num_untagged = 0;
+ struct vsc73xx_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &vsc->vlans, list) {
+ if (!(vlan->portmask & BIT(port)) || vlan->vid == ignored_vid)
+ continue;
+
+ if (vlan->untagged & BIT(port))
+ num_untagged++;
+ else
+ num_tagged++;
}
- /* This is the MAC confiuration that always need to happen
- * after a PHY or the CPU port comes up or down.
- */
- if (!phydev->link) {
- int maxloop = 10;
-
- dev_dbg(vsc->dev, "port %d: went down\n",
- port);
-
- /* Disable RX on this port */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
- VSC73XX_MAC_CFG,
- VSC73XX_MAC_CFG_RX_EN, 0);
-
- /* Discard packets */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBDISC, BIT(port), BIT(port));
-
- /* Wait until queue is empty */
- vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBEMPTY, &val);
- while (!(val & BIT(port))) {
- msleep(1);
- vsc73xx_read(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBEMPTY, &val);
- if (--maxloop == 0) {
- dev_err(vsc->dev,
- "timeout waiting for block arbiter\n");
- /* Continue anyway */
- break;
- }
- }
+ summary->num_untagged = num_untagged;
+ summary->num_tagged = num_tagged;
+}
- /* Put this port into reset */
- vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG,
- VSC73XX_MAC_CFG_RESET);
+static u16 vsc73xx_find_first_vlan_untagged(struct vsc73xx *vsc, int port)
+{
+ struct vsc73xx_bridge_vlan *vlan;
- /* Accept packets again */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_ARBDISC, BIT(port), 0);
+ list_for_each_entry(vlan, &vsc->vlans, list)
+ if ((vlan->portmask & BIT(port)) &&
+ (vlan->untagged & BIT(port)))
+ return vlan->vid;
- /* Allow backward dropping of frames from this port */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0,
- VSC73XX_SBACKWDROP, BIT(port), BIT(port));
+ return VLAN_N_VID;
+}
- /* Receive mask (disable forwarding) */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
- VSC73XX_RECVMASK, BIT(port), 0);
+static int vsc73xx_set_vlan_conf(struct vsc73xx *vsc, int port,
+ enum vsc73xx_port_vlan_conf port_vlan_conf)
+{
+ u32 val = 0;
+ int ret;
- return;
+ if (port_vlan_conf == VSC73XX_VLAN_IGNORE)
+ val = VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA |
+ VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_CAT_VLAN_MISC,
+ VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA |
+ VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA, val);
+ if (ret)
+ return ret;
+
+ val = (port_vlan_conf == VSC73XX_VLAN_FILTER) ?
+ VSC73XX_TXUPDCFG_TX_INSERT_TAG : 0;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_TXUPDCFG,
+ VSC73XX_TXUPDCFG_TX_INSERT_TAG, val);
+}
+
+/**
+ * vsc73xx_vlan_commit_conf - Update VLAN configuration of a port
+ * @vsc: Switch private data structure
+ * @port: Port index on which to operate
+ *
+ * Update the VLAN behavior of a port to make sure that when it is under
+ * a VLAN filtering bridge, the port is either filtering with tag
+ * preservation, or filtering with all VLANs egress-untagged. Otherwise,
+ * the port ignores VLAN tags from packets and applies the port-based
+ * VID.
+ *
+ * Must be called when changes are made to:
+ * - the bridge VLAN filtering state of the port
+ * - the number or attributes of VLANs from the bridge VLAN table,
+ * while the port is currently VLAN-aware
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int vsc73xx_vlan_commit_conf(struct vsc73xx *vsc, int port)
+{
+ enum vsc73xx_port_vlan_conf port_vlan_conf = VSC73XX_VLAN_IGNORE;
+ struct dsa_port *dp = dsa_to_port(vsc->ds, port);
+
+ if (port == CPU_PORT) {
+ port_vlan_conf = VSC73XX_VLAN_FILTER;
+ } else if (dsa_port_is_vlan_filtering(dp)) {
+ struct vsc73xx_vlan_summary summary;
+
+ port_vlan_conf = VSC73XX_VLAN_FILTER;
+
+ vsc73xx_bridge_vlan_summary(vsc, port, &summary, VLAN_N_VID);
+ if (summary.num_tagged == 0)
+ port_vlan_conf = VSC73XX_VLAN_FILTER_UNTAG_ALL;
}
- /* Figure out what speed was negotiated */
- if (phydev->speed == SPEED_1000) {
- dev_dbg(vsc->dev, "port %d: 1000 Mbit mode full duplex\n",
- port);
+ return vsc73xx_set_vlan_conf(vsc, port, port_vlan_conf);
+}
+
+static int
+vsc73xx_vlan_change_untagged(struct vsc73xx *vsc, int port, u16 vid, bool set)
+{
+ u32 val = 0;
- /* Set up default for internal port or external RGMII */
- if (phydev->interface == PHY_INTERFACE_MODE_RGMII)
- val = VSC73XX_MAC_CFG_1000M_F_RGMII;
- else
- val = VSC73XX_MAC_CFG_1000M_F_PHY;
- vsc73xx_adjust_enable_port(vsc, port, phydev, val);
- } else if (phydev->speed == SPEED_100) {
- if (phydev->duplex == DUPLEX_FULL) {
- val = VSC73XX_MAC_CFG_100_10M_F_PHY;
- dev_dbg(vsc->dev,
- "port %d: 100 Mbit full duplex mode\n",
- port);
- } else {
- val = VSC73XX_MAC_CFG_100_10M_H_PHY;
- dev_dbg(vsc->dev,
- "port %d: 100 Mbit half duplex mode\n",
- port);
- }
- vsc73xx_adjust_enable_port(vsc, port, phydev, val);
- } else if (phydev->speed == SPEED_10) {
- if (phydev->duplex == DUPLEX_FULL) {
- val = VSC73XX_MAC_CFG_100_10M_F_PHY;
- dev_dbg(vsc->dev,
- "port %d: 10 Mbit full duplex mode\n",
- port);
- } else {
- val = VSC73XX_MAC_CFG_100_10M_H_PHY;
- dev_dbg(vsc->dev,
- "port %d: 10 Mbit half duplex mode\n",
- port);
- }
- vsc73xx_adjust_enable_port(vsc, port, phydev, val);
- } else {
- dev_err(vsc->dev,
- "could not adjust link: unknown speed\n");
+ if (set)
+ val = VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA |
+ ((vid << VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_SHIFT) &
+ VSC73XX_TXUPDCFG_TX_UNTAGGED_VID);
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_TXUPDCFG,
+ VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA |
+ VSC73XX_TXUPDCFG_TX_UNTAGGED_VID, val);
+}
+
+/**
+ * vsc73xx_vlan_commit_untagged - Update native VLAN of a port
+ * @vsc: Switch private data structure
+ * @port: Port index on which to operate
+ *
+ * Update the native VLAN of a port (the one VLAN which is transmitted
+ * as egress-tagged on a trunk port) when port is in VLAN filtering mode and
+ * only one untagged vid is configured.
+ * In other cases no need to configure it because switch can untag all vlans on
+ * the port.
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int vsc73xx_vlan_commit_untagged(struct vsc73xx *vsc, int port)
+{
+ struct dsa_port *dp = dsa_to_port(vsc->ds, port);
+ struct vsc73xx_vlan_summary summary;
+ u16 vid = 0;
+ bool valid;
+
+ if (!dsa_port_is_vlan_filtering(dp))
+ /* Port is configured to untag all vlans in that case.
+ * No need to commit untagged config change.
+ */
+ return 0;
+
+ vsc73xx_bridge_vlan_summary(vsc, port, &summary, VLAN_N_VID);
+
+ if (summary.num_untagged > 1)
+ /* Port must untag all vlans in that case.
+ * No need to commit untagged config change.
+ */
+ return 0;
+
+ valid = (summary.num_untagged == 1);
+ if (valid)
+ vid = vsc73xx_find_first_vlan_untagged(vsc, port);
+
+ return vsc73xx_vlan_change_untagged(vsc, port, vid, valid);
+}
+
+static int
+vsc73xx_vlan_change_pvid(struct vsc73xx *vsc, int port, u16 vid, bool set)
+{
+ u32 val = 0;
+ int ret;
+
+ val = set ? 0 : VSC73XX_CAT_DROP_UNTAGGED_ENA;
+
+ ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_CAT_DROP,
+ VSC73XX_CAT_DROP_UNTAGGED_ENA, val);
+ if (!set || ret)
+ return ret;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port,
+ VSC73XX_CAT_PORT_VLAN,
+ VSC73XX_CAT_PORT_VLAN_VLAN_VID,
+ vid & VSC73XX_CAT_PORT_VLAN_VLAN_VID);
+}
+
+/**
+ * vsc73xx_vlan_commit_pvid - Update port-based default VLAN of a port
+ * @vsc: Switch private data structure
+ * @port: Port index on which to operate
+ *
+ * Update the PVID of a port so that it follows either the bridge PVID
+ * configuration, when the bridge is currently VLAN-aware, or the PVID
+ * from tag_8021q, when the port is standalone or under a VLAN-unaware
+ * bridge. A port with no PVID drops all untagged and VID 0 tagged
+ * traffic.
+ *
+ * Must be called when changes are made to:
+ * - the bridge VLAN filtering state of the port
+ * - the number or attributes of VLANs from the bridge VLAN table,
+ * while the port is currently VLAN-aware
+ *
+ * Return: 0 on success, or negative errno on error.
+ */
+static int vsc73xx_vlan_commit_pvid(struct vsc73xx *vsc, int port)
+{
+ struct vsc73xx_portinfo *portinfo = &vsc->portinfo[port];
+ bool valid = portinfo->pvid_tag_8021q_configured;
+ struct dsa_port *dp = dsa_to_port(vsc->ds, port);
+ u16 vid = portinfo->pvid_tag_8021q;
+
+ if (dsa_port_is_vlan_filtering(dp)) {
+ vid = portinfo->pvid_vlan_filtering;
+ valid = portinfo->pvid_vlan_filtering_configured;
}
- /* Enable port (forwarding) in the receieve mask */
- vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
- VSC73XX_RECVMASK, BIT(port), BIT(port));
+ return vsc73xx_vlan_change_pvid(vsc, port, vid, valid);
+}
+
+static int vsc73xx_vlan_commit_settings(struct vsc73xx *vsc, int port)
+{
+ int ret;
+
+ ret = vsc73xx_vlan_commit_untagged(vsc, port);
+ if (ret)
+ return ret;
+
+ ret = vsc73xx_vlan_commit_pvid(vsc, port);
+ if (ret)
+ return ret;
+
+ return vsc73xx_vlan_commit_conf(vsc, port);
}
static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
@@ -882,7 +1289,7 @@ static int vsc73xx_port_enable(struct dsa_switch *ds, int port,
dev_info(vsc->dev, "enable port %d\n", port);
vsc73xx_init_port(vsc, port);
- return 0;
+ return vsc73xx_vlan_commit_settings(vsc, port);
}
static void vsc73xx_port_disable(struct dsa_switch *ds, int port)
@@ -1053,20 +1460,333 @@ static void vsc73xx_phylink_get_caps(struct dsa_switch *dsa, int port,
config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000;
}
+static int
+vsc73xx_port_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering, struct netlink_ext_ack *extack)
+{
+ struct vsc73xx *vsc = ds->priv;
+
+ /* The commit to hardware processed below is required because vsc73xx
+ * is using tag_8021q. When vlan_filtering is disabled, tag_8021q uses
+ * pvid/untagged vlans for port recognition. The values configured for
+ * vlans and pvid/untagged states are stored in portinfo structure.
+ * When vlan_filtering is enabled, we need to restore pvid/untagged from
+ * portinfo structure. Analogous routine is processed when
+ * vlan_filtering is disabled, but values used for tag_8021q are
+ * restored.
+ */
+
+ return vsc73xx_vlan_commit_settings(vsc, port);
+}
+
+static int vsc73xx_port_vlan_add(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan,
+ struct netlink_ext_ack *extack)
+{
+ bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct vsc73xx_bridge_vlan *vsc73xx_vlan;
+ struct vsc73xx_vlan_summary summary;
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+ bool commit_to_hardware;
+ int ret = 0;
+
+ /* Be sure to deny alterations to the configuration done by tag_8021q.
+ */
+ if (vid_is_dsa_8021q(vlan->vid)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Range 3072-4095 reserved for dsa_8021q operation");
+ return -EBUSY;
+ }
+
+ /* The processed vlan->vid is excluded from the search because the VLAN
+ * can be re-added with a different set of flags, so it's easiest to
+ * ignore its old flags from the VLAN database software copy.
+ */
+ vsc73xx_bridge_vlan_summary(vsc, port, &summary, vlan->vid);
+
+ /* VSC73XX allows only three untagged states: none, one or all */
+ if ((untagged && summary.num_tagged > 0 && summary.num_untagged > 0) ||
+ (!untagged && summary.num_untagged > 1)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port can have only none, one or all untagged vlan");
+ return -EBUSY;
+ }
+
+ vsc73xx_vlan = vsc73xx_bridge_vlan_find(vsc, vlan->vid);
+
+ if (!vsc73xx_vlan) {
+ vsc73xx_vlan = kzalloc(sizeof(*vsc73xx_vlan), GFP_KERNEL);
+ if (!vsc73xx_vlan)
+ return -ENOMEM;
+
+ vsc73xx_vlan->vid = vlan->vid;
+
+ list_add_tail(&vsc73xx_vlan->list, &vsc->vlans);
+ }
+
+ vsc73xx_vlan->portmask |= BIT(port);
+
+ /* CPU port must be always tagged because source port identification is
+ * based on tag_8021q.
+ */
+ if (port == CPU_PORT)
+ goto update_vlan_table;
+
+ if (untagged)
+ vsc73xx_vlan->untagged |= BIT(port);
+ else
+ vsc73xx_vlan->untagged &= ~BIT(port);
+
+ portinfo = &vsc->portinfo[port];
+
+ if (pvid) {
+ portinfo->pvid_vlan_filtering_configured = true;
+ portinfo->pvid_vlan_filtering = vlan->vid;
+ } else if (portinfo->pvid_vlan_filtering_configured &&
+ portinfo->pvid_vlan_filtering == vlan->vid) {
+ portinfo->pvid_vlan_filtering_configured = false;
+ }
+
+ commit_to_hardware = !vsc73xx_tag_8021q_active(dp);
+ if (commit_to_hardware) {
+ ret = vsc73xx_vlan_commit_settings(vsc, port);
+ if (ret)
+ goto err;
+ }
+
+update_vlan_table:
+ ret = vsc73xx_update_vlan_table(vsc, port, vlan->vid, true);
+ if (!ret)
+ return 0;
+err:
+ vsc73xx_bridge_vlan_remove_port(vsc73xx_vlan, port);
+ return ret;
+}
+
+static int vsc73xx_port_vlan_del(struct dsa_switch *ds, int port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ struct vsc73xx_bridge_vlan *vsc73xx_vlan;
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+ bool commit_to_hardware;
+ int ret;
+
+ ret = vsc73xx_update_vlan_table(vsc, port, vlan->vid, false);
+ if (ret)
+ return ret;
+
+ portinfo = &vsc->portinfo[port];
+
+ if (portinfo->pvid_vlan_filtering_configured &&
+ portinfo->pvid_vlan_filtering == vlan->vid)
+ portinfo->pvid_vlan_filtering_configured = false;
+
+ vsc73xx_vlan = vsc73xx_bridge_vlan_find(vsc, vlan->vid);
+
+ if (vsc73xx_vlan)
+ vsc73xx_bridge_vlan_remove_port(vsc73xx_vlan, port);
+
+ commit_to_hardware = !vsc73xx_tag_8021q_active(dsa_to_port(ds, port));
+
+ if (commit_to_hardware)
+ return vsc73xx_vlan_commit_settings(vsc, port);
+
+ return 0;
+}
+
+static int vsc73xx_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
+ u16 flags)
+{
+ bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+ bool commit_to_hardware;
+ int ret;
+
+ portinfo = &vsc->portinfo[port];
+
+ if (pvid) {
+ portinfo->pvid_tag_8021q_configured = true;
+ portinfo->pvid_tag_8021q = vid;
+ }
+
+ commit_to_hardware = vsc73xx_tag_8021q_active(dsa_to_port(ds, port));
+ if (commit_to_hardware) {
+ ret = vsc73xx_vlan_commit_settings(vsc, port);
+ if (ret)
+ return ret;
+ }
+
+ return vsc73xx_update_vlan_table(vsc, port, vid, true);
+}
+
+static int vsc73xx_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
+{
+ struct vsc73xx_portinfo *portinfo;
+ struct vsc73xx *vsc = ds->priv;
+
+ portinfo = &vsc->portinfo[port];
+
+ if (portinfo->pvid_tag_8021q_configured &&
+ portinfo->pvid_tag_8021q == vid) {
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ bool commit_to_hardware;
+ int err;
+
+ portinfo->pvid_tag_8021q_configured = false;
+
+ commit_to_hardware = vsc73xx_tag_8021q_active(dp);
+ if (commit_to_hardware) {
+ err = vsc73xx_vlan_commit_settings(vsc, port);
+ if (err)
+ return err;
+ }
+ }
+
+ return vsc73xx_update_vlan_table(vsc, port, vid, false);
+}
+
+static int vsc73xx_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & ~BR_LEARNING)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vsc73xx_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ if (flags.mask & BR_LEARNING) {
+ u32 val = flags.val & BR_LEARNING ? BIT(port) : 0;
+ struct vsc73xx *vsc = ds->priv;
+
+ return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_LEARNMASK, BIT(port), val);
+ }
+
+ return 0;
+}
+
+static void vsc73xx_refresh_fwd_map(struct dsa_switch *ds, int port, u8 state)
+{
+ struct dsa_port *other_dp, *dp = dsa_to_port(ds, port);
+ struct vsc73xx *vsc = ds->priv;
+ u16 mask;
+
+ if (state != BR_STATE_FORWARDING) {
+ /* Ports that aren't in the forwarding state must not
+ * forward packets anywhere.
+ */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + port,
+ VSC73XX_SRCMASKS_PORTS_MASK, 0);
+
+ dsa_switch_for_each_available_port(other_dp, ds) {
+ if (other_dp == dp)
+ continue;
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + other_dp->index,
+ BIT(port), 0);
+ }
+
+ return;
+ }
+
+ /* Forwarding ports must forward to the CPU and to other ports
+ * in the same bridge
+ */
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + CPU_PORT, BIT(port), BIT(port));
+
+ mask = BIT(CPU_PORT);
+
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ int other_port = other_dp->index;
+
+ if (port == other_port || !dsa_port_bridge_same(dp, other_dp) ||
+ other_dp->stp_state != BR_STATE_FORWARDING)
+ continue;
+
+ mask |= BIT(other_port);
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + other_port,
+ BIT(port), BIT(port));
+ }
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_SRCMASKS + port,
+ VSC73XX_SRCMASKS_PORTS_MASK, mask);
+}
+
+/* FIXME: STP frames aren't forwarded at this moment. BPDU frames are
+ * forwarded only from and to PI/SI interface. For more info see chapter
+ * 2.7.1 (CPU Forwarding) in datasheet.
+ * This function is required for tag_8021q operations.
+ */
+static void vsc73xx_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct vsc73xx *vsc = ds->priv;
+ u32 val = 0;
+
+ if (state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING)
+ val = dp->learning ? BIT(port) : 0;
+
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_LEARNMASK, BIT(port), val);
+
+ val = (state == BR_STATE_BLOCKING || state == BR_STATE_DISABLED) ?
+ 0 : BIT(port);
+ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0,
+ VSC73XX_RECVMASK, BIT(port), val);
+
+ /* CPU Port should always forward packets when user ports are forwarding
+ * so let's configure it from other ports only.
+ */
+ if (port != CPU_PORT)
+ vsc73xx_refresh_fwd_map(ds, port, state);
+}
+
+static const struct phylink_mac_ops vsc73xx_phylink_mac_ops = {
+ .mac_config = vsc73xx_mac_config,
+ .mac_link_down = vsc73xx_mac_link_down,
+ .mac_link_up = vsc73xx_mac_link_up,
+};
+
static const struct dsa_switch_ops vsc73xx_ds_ops = {
.get_tag_protocol = vsc73xx_get_tag_protocol,
.setup = vsc73xx_setup,
+ .teardown = vsc73xx_teardown,
.phy_read = vsc73xx_phy_read,
.phy_write = vsc73xx_phy_write,
- .adjust_link = vsc73xx_adjust_link,
.get_strings = vsc73xx_get_strings,
.get_ethtool_stats = vsc73xx_get_ethtool_stats,
.get_sset_count = vsc73xx_get_sset_count,
.port_enable = vsc73xx_port_enable,
.port_disable = vsc73xx_port_disable,
+ .port_pre_bridge_flags = vsc73xx_port_pre_bridge_flags,
+ .port_bridge_flags = vsc73xx_port_bridge_flags,
+ .port_bridge_join = dsa_tag_8021q_bridge_join,
+ .port_bridge_leave = dsa_tag_8021q_bridge_leave,
.port_change_mtu = vsc73xx_change_mtu,
.port_max_mtu = vsc73xx_get_max_mtu,
+ .port_stp_state_set = vsc73xx_port_stp_state_set,
+ .port_vlan_filtering = vsc73xx_port_vlan_filtering,
+ .port_vlan_add = vsc73xx_port_vlan_add,
+ .port_vlan_del = vsc73xx_port_vlan_del,
.phylink_get_caps = vsc73xx_phylink_get_caps,
+ .tag_8021q_vlan_add = vsc73xx_tag_8021q_vlan_add,
+ .tag_8021q_vlan_del = vsc73xx_tag_8021q_vlan_del,
};
static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset)
@@ -1195,26 +1915,16 @@ int vsc73xx_probe(struct vsc73xx *vsc)
vsc->addr[0], vsc->addr[1], vsc->addr[2],
vsc->addr[3], vsc->addr[4], vsc->addr[5]);
- /* The VSC7395 switch chips have 5+1 ports which means 5
- * ordinary ports and a sixth CPU port facing the processor
- * with an RGMII interface. These ports are numbered 0..4
- * and 6, so they leave a "hole" in the port map for port 5,
- * which is invalid.
- *
- * The VSC7398 has 8 ports, port 7 is again the CPU port.
- *
- * We allocate 8 ports and avoid access to the nonexistant
- * ports.
- */
vsc->ds = devm_kzalloc(dev, sizeof(*vsc->ds), GFP_KERNEL);
if (!vsc->ds)
return -ENOMEM;
vsc->ds->dev = dev;
- vsc->ds->num_ports = 8;
+ vsc->ds->num_ports = VSC73XX_MAX_NUM_PORTS;
vsc->ds->priv = vsc;
vsc->ds->ops = &vsc73xx_ds_ops;
+ vsc->ds->phylink_mac_ops = &vsc73xx_phylink_mac_ops;
ret = dsa_register_switch(vsc->ds);
if (ret) {
dev_err(dev, "unable to register switch (%d)\n", ret);
diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h
index 30b1f0a36566..3ca579acc798 100644
--- a/drivers/net/dsa/vitesse-vsc73xx.h
+++ b/drivers/net/dsa/vitesse-vsc73xx.h
@@ -3,8 +3,48 @@
#include <linux/etherdevice.h>
#include <linux/gpio/driver.h>
+/* The VSC7395 switch chips have 5+1 ports which means 5 ordinary ports and
+ * a sixth CPU port facing the processor with an RGMII interface. These ports
+ * are numbered 0..4 and 6, so they leave a "hole" in the port map for port 5,
+ * which is invalid.
+ *
+ * The VSC7398 has 8 ports, port 7 is again the CPU port.
+ *
+ * We allocate 8 ports and avoid access to the nonexistent ports.
+ */
+#define VSC73XX_MAX_NUM_PORTS 8
+
+/**
+ * struct vsc73xx_portinfo - port data structure: contains storage data
+ * @pvid_vlan_filtering: pvid vlan number used in vlan filtering mode
+ * @pvid_tag_8021q: pvid vlan number used in tag_8021q mode
+ * @pvid_vlan_filtering_configured: informs if port has configured pvid in vlan
+ * filtering mode
+ * @pvid_tag_8021q_configured: imforms if port have configured pvid in tag_8021q
+ * mode
+ */
+struct vsc73xx_portinfo {
+ u16 pvid_vlan_filtering;
+ u16 pvid_tag_8021q;
+ bool pvid_vlan_filtering_configured;
+ bool pvid_tag_8021q_configured;
+};
+
/**
- * struct vsc73xx - VSC73xx state container
+ * struct vsc73xx - VSC73xx state container: main data structure
+ * @dev: The device pointer
+ * @reset: The descriptor for the GPIO line tied to the reset pin
+ * @ds: Pointer to the DSA core structure
+ * @gc: Main structure of the GPIO controller
+ * @chipid: Storage for the Chip ID value read from the CHIPID register of the
+ * switch
+ * @addr: MAC address used in flow control frames
+ * @ops: Structure with hardware-dependent operations
+ * @priv: Pointer to the configuration interface structure
+ * @portinfo: Storage table portinfo structructures
+ * @vlans: List of configured vlans. Contains port mask and untagged status of
+ * every vlan configured in port vlan operation. It doesn't cover tag_8021q
+ * vlans.
*/
struct vsc73xx {
struct device *dev;
@@ -15,8 +55,15 @@ struct vsc73xx {
u8 addr[ETH_ALEN];
const struct vsc73xx_ops *ops;
void *priv;
+ struct vsc73xx_portinfo portinfo[VSC73XX_MAX_NUM_PORTS];
+ struct list_head vlans;
};
+/**
+ * struct vsc73xx_ops - VSC73xx methods container
+ * @read: Method for register reading over the hardware-dependent interface
+ * @write: Method for register writing over the hardware-dependent interface
+ */
struct vsc73xx_ops {
int (*read)(struct vsc73xx *vsc, u8 block, u8 subblock, u8 reg,
u32 *val);
@@ -24,6 +71,21 @@ struct vsc73xx_ops {
u32 val);
};
+/**
+ * struct vsc73xx_bridge_vlan - VSC73xx driver structure which keeps vlan
+ * database copy
+ * @vid: VLAN number
+ * @portmask: each bit represents one port
+ * @untagged: each bit represents one port configured with @vid untagged
+ * @list: list structure
+ */
+struct vsc73xx_bridge_vlan {
+ u16 vid;
+ u8 portmask;
+ u8 untagged;
+ struct list_head list;
+};
+
int vsc73xx_is_addr_valid(u8 block, u8 subblock);
int vsc73xx_probe(struct vsc73xx *vsc);
void vsc73xx_remove(struct vsc73xx *vsc);
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 96db032b478f..de3b768f2ff9 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -466,13 +466,25 @@ static void xrs700x_phylink_get_caps(struct dsa_switch *ds, int port,
}
}
-static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
- unsigned int mode, phy_interface_t interface,
+static void xrs700x_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void xrs700x_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+}
+
+static void xrs700x_mac_link_up(struct phylink_config *config,
struct phy_device *phydev,
+ unsigned int mode, phy_interface_t interface,
int speed, int duplex,
bool tx_pause, bool rx_pause)
{
- struct xrs700x *priv = ds->priv;
+ struct dsa_port *dp = dsa_phylink_to_port(config);
+ struct xrs700x *priv = dp->ds->priv;
+ int port = dp->index;
unsigned int val;
switch (speed) {
@@ -699,13 +711,18 @@ static int xrs700x_hsr_leave(struct dsa_switch *ds, int port,
return 0;
}
+static const struct phylink_mac_ops xrs700x_phylink_mac_ops = {
+ .mac_config = xrs700x_mac_config,
+ .mac_link_down = xrs700x_mac_link_down,
+ .mac_link_up = xrs700x_mac_link_up,
+};
+
static const struct dsa_switch_ops xrs700x_ops = {
.get_tag_protocol = xrs700x_get_tag_protocol,
.setup = xrs700x_setup,
.teardown = xrs700x_teardown,
.port_stp_state_set = xrs700x_port_stp_state_set,
.phylink_get_caps = xrs700x_phylink_get_caps,
- .phylink_mac_link_up = xrs700x_mac_link_up,
.get_strings = xrs700x_get_strings,
.get_sset_count = xrs700x_get_sset_count,
.get_ethtool_stats = xrs700x_get_ethtool_stats,
@@ -763,6 +780,7 @@ struct xrs700x *xrs700x_switch_alloc(struct device *base, void *devpriv)
INIT_DELAYED_WORK(&priv->mib_work, xrs700x_mib_work);
ds->ops = &xrs700x_ops;
+ ds->phylink_mac_ops = &xrs700x_phylink_mac_ops;
ds->priv = priv;
priv->dev = base;
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index c1179d7311f7..9b731dea78c1 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -127,8 +127,8 @@ static void xrs700x_i2c_shutdown(struct i2c_client *i2c)
}
static const struct i2c_device_id xrs700x_i2c_id[] = {
- { "xrs700x-switch", 0 },
- {},
+ { "xrs700x-switch" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, xrs700x_i2c_id);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index ba3e7aa1a28f..4725a8cfd695 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -31,9 +31,6 @@
Setting to > 1512 effectively disables this feature. */
static int rx_copybreak = 200;
-/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
-static const int mtu = 1500;
-
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
static int max_interrupt_work = 20;
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 5267e9dcd87e..be58dac0502a 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -502,7 +502,7 @@ static int el3_config(struct net_device *dev, struct ifmap *map)
{
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 3) {
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
tc589_set_xcvr(dev, dev->if_port);
} else {
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 706bd59bf645..1fbab79e2be4 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -44,7 +44,7 @@ config 3C515
config PCMCIA_3C574
tristate "3Com 3c574 PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a 3Com 3c574 or compatible PCMCIA
(PC-card) Fast Ethernet card to your computer.
@@ -54,7 +54,7 @@ config PCMCIA_3C574
config PCMCIA_3C589
tristate "3Com 3c589 PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a 3Com 3c589 or compatible PCMCIA
(PC-card) Ethernet card to your computer.
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index a4130e643342..345f250781c6 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_8390
config PCMCIA_AXNET
tristate "Asix AX88190 PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach an Asix AX88190-based PCMCIA
(PC-card) Fast Ethernet card to your computer. These cards are
@@ -117,7 +117,7 @@ config NE2000
config NE2K_PCI
tristate "PCI NE2000 and clones support (see help)"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
help
This driver is for NE2000 compatible PCI cards. It will not work
@@ -146,7 +146,7 @@ config APNE
config PCMCIA_PCNET
tristate "NE2000 compatible PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select CRC32
help
Say Y here if you intend to attach an NE2000 compatible PCMCIA
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 05d39ecb97ff..e876fe52399a 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -258,7 +258,7 @@ static int etherh_set_config(struct net_device *dev, struct ifmap *map)
* media type, turn off automedia detection.
*/
dev->flags &= ~IFF_AUTOMEDIA;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
break;
default:
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 65f56a98c0a0..1a34da07c0db 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -186,17 +186,6 @@ static void ne2k_pci_block_output(struct net_device *dev, const int count,
static const struct ethtool_ops ne2k_pci_ethtool_ops;
-
-/* There is no room in the standard 8390 structure for extra info we need,
- * so we build a meta/outer-wrapper structure..
- */
-struct ne2k_pci_card {
- struct net_device *dev;
- struct pci_dev *pci_dev;
-};
-
-
-
/* NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet
* buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes
* 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 9bd5e991f1e5..780fb4afb6af 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -994,7 +994,7 @@ static int set_config(struct net_device *dev, struct ifmap *map)
return -EOPNOTSUPP;
else if ((map->port < 1) || (map->port > 2))
return -EINVAL;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
NS8390_init(dev, 1);
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 6a19b5393ed1..0baac25db4f8 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -122,6 +122,7 @@ source "drivers/net/ethernet/litex/Kconfig"
source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mediatek/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
+source "drivers/net/ethernet/meta/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
source "drivers/net/ethernet/mscc/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 0d872d4efcd1..c03203439c0e 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -59,6 +59,7 @@ obj-$(CONFIG_NET_VENDOR_LITEX) += litex/
obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
+obj-$(CONFIG_NET_VENDOR_META) += meta/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 857361c74f5d..e1b8794b14c9 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -441,14 +441,6 @@ enum rx_desc_bits {
};
/* Completion queue entry. */
-struct short_rx_done_desc {
- __le32 status; /* Low 16 bits is length. */
-};
-struct basic_rx_done_desc {
- __le32 status; /* Low 16 bits is length. */
- __le16 vlanid;
- __le16 status2;
-};
struct csum_rx_done_desc {
__le32 status; /* Low 16 bits is length. */
__le16 csum; /* Partial checksum */
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 8b4ef5121308..0713f1e2c7f3 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -11,10 +11,10 @@
#include <linux/crc8.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
+#include <linux/gpio/consumer.h>
#include <linux/if_bridge.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
-#include <linux/gpio.h>
#include <linux/kernel.h>
#include <linux/mii.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 3d9220f9c9fe..b325e0cef120 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3852,7 +3852,7 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
et131x_disable_txrx(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
et131x_adapter_memory_free(adapter);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index eafef84fe3be..3d8ac63132fb 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2539,7 +2539,7 @@ static int ace_change_mtu(struct net_device *dev, int new_mtu)
struct ace_regs __iomem *regs = ap->regs;
writel(new_mtu + ETH_HLEN + 4, &regs->IfMtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (new_mtu > ACE_STD_MTU) {
if (!(ap->jumbo)) {
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 1c8763be0e4b..3c112c18ae6a 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -788,7 +788,7 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index 2d8a66ea82fa..713a595370bf 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -312,7 +312,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
struct ena_com_io_sq *io_sq)
{
size_t size;
- int dev_node = 0;
memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
@@ -325,12 +324,9 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = io_sq->desc_entry_size * io_sq->q_depth;
if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->desc_addr.virt_addr) {
io_sq->desc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size,
@@ -354,10 +350,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
- dev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, dev_node);
if (!io_sq->bounce_buf_ctrl.base_buffer)
io_sq->bounce_buf_ctrl.base_buffer =
devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
@@ -397,7 +390,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
struct ena_com_io_cq *io_cq)
{
size_t size;
- int prev_node = 0;
memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
@@ -409,11 +401,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
- prev_node = dev_to_node(ena_dev->dmadev);
- set_dev_node(ena_dev->dmadev, ctx->numa_node);
io_cq->cdesc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
- set_dev_node(ena_dev->dmadev, prev_node);
if (!io_cq->cdesc_addr.virt_addr) {
io_cq->cdesc_addr.virt_addr =
dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index fea57eb8e58b..924f03f5a6c7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -47,7 +47,7 @@
/* ENA adaptive interrupt moderation settings */
#define ENA_INTR_INITIAL_TX_INTERVAL_USECS 64
-#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 0
+#define ENA_INTR_INITIAL_RX_INTERVAL_USECS 20
#define ENA_DEFAULT_INTR_DELAY_RESOLUTION 1
#define ENA_HASH_KEY_SIZE 40
@@ -305,6 +305,8 @@ struct ena_com_dev {
u16 stats_func; /* Selected function for extended statistic dump */
u16 stats_queue; /* Selected queue for extended statistic dump */
+ u32 ena_min_poll_delay_us;
+
struct ena_com_mmio_read mmio_read;
struct ena_rss rss;
@@ -325,8 +327,6 @@ struct ena_com_dev {
struct ena_intr_moder_entry *intr_moder_tbl;
struct ena_com_llq_info llq_info;
-
- u32 ena_min_poll_delay_us;
};
struct ena_com_dev_get_features_ctx {
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 933e619b3a31..4c6e07aa4bbb 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -229,30 +229,43 @@ static struct ena_eth_io_rx_cdesc_base *
idx * io_cq->cdesc_entry_size_in_bytes);
}
-static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
- u16 *first_cdesc_idx)
+static int ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+ u16 *first_cdesc_idx,
+ u16 *num_descs)
{
+ u16 count = io_cq->cur_rx_pkt_cdesc_count, head_masked;
struct ena_eth_io_rx_cdesc_base *cdesc;
- u16 count = 0, head_masked;
u32 last = 0;
do {
+ u32 status;
+
cdesc = ena_com_get_next_rx_cdesc(io_cq);
if (!cdesc)
break;
+ status = READ_ONCE(cdesc->status);
ena_com_cq_inc_head(io_cq);
+ if (unlikely((status & ENA_ETH_IO_RX_CDESC_BASE_FIRST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_FIRST_SHIFT && count != 0)) {
+ struct ena_com_dev *dev = ena_com_io_cq_to_ena_dev(io_cq);
+
+ netdev_err(dev->net_device,
+ "First bit is on in descriptor #%d on q_id: %d, req_id: %u\n",
+ count, io_cq->qid, cdesc->req_id);
+ return -EFAULT;
+ }
count++;
- last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
- ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ last = (status & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
} while (!last);
if (last) {
*first_cdesc_idx = io_cq->cur_rx_pkt_cdesc_start_idx;
- count += io_cq->cur_rx_pkt_cdesc_count;
head_masked = io_cq->head & (io_cq->q_depth - 1);
+ *num_descs = count;
io_cq->cur_rx_pkt_cdesc_count = 0;
io_cq->cur_rx_pkt_cdesc_start_idx = head_masked;
@@ -260,11 +273,11 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
"ENA q_id: %d packets were completed. first desc idx %u descs# %d\n",
io_cq->qid, *first_cdesc_idx, count);
} else {
- io_cq->cur_rx_pkt_cdesc_count += count;
- count = 0;
+ io_cq->cur_rx_pkt_cdesc_count = count;
+ *num_descs = 0;
}
- return count;
+ return 0;
}
static int ena_com_create_meta(struct ena_com_io_sq *io_sq,
@@ -539,10 +552,14 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
u16 cdesc_idx = 0;
u16 nb_hw_desc;
u16 i = 0;
+ int rc;
WARN(io_cq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
- nb_hw_desc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx);
+ rc = ena_com_cdesc_rx_pkt_get(io_cq, &cdesc_idx, &nb_hw_desc);
+ if (unlikely(rc != 0))
+ return -EFAULT;
+
if (nb_hw_desc == 0) {
ena_rx_ctx->descs = nb_hw_desc;
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 72b019758caa..449bc4960ccc 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -47,7 +47,7 @@ struct ena_com_rx_ctx {
bool frag;
u32 hash;
u16 descs;
- int max_bufs;
+ u16 max_bufs;
u8 pkt_offset;
};
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 0cb6cc1cef56..b24cc3f05248 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -49,6 +49,7 @@ static const struct ena_stats ena_stats_global_strings[] = {
ENA_STAT_GLOBAL_ENTRY(interface_up),
ENA_STAT_GLOBAL_ENTRY(interface_down),
ENA_STAT_GLOBAL_ENTRY(admin_q_pause),
+ ENA_STAT_GLOBAL_ENTRY(reset_fail),
};
static const struct ena_stats ena_stats_eni_strings[] = {
@@ -459,10 +460,18 @@ static void ena_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
struct ena_adapter *adapter = netdev_priv(dev);
-
- strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strscpy(info->bus_info, pci_name(adapter->pdev),
- sizeof(info->bus_info));
+ ssize_t ret = 0;
+
+ ret = strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ if (ret < 0)
+ netif_dbg(adapter, drv, dev,
+ "module name will be truncated, status = %zd\n", ret);
+
+ ret = strscpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+ if (ret < 0)
+ netif_dbg(adapter, drv, dev,
+ "bus info will be truncated, status = %zd\n", ret);
}
static void ena_get_ringparam(struct net_device *netdev,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index be5acfa41ee0..184b6e6cbed4 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
static void check_for_admin_com_state(struct ena_adapter *adapter);
-static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
+static int ena_destroy_device(struct ena_adapter *adapter, bool graceful);
static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
@@ -104,7 +104,7 @@ static int ena_change_mtu(struct net_device *dev, int new_mtu)
if (!ret) {
netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu);
update_rx_ring_mtu(adapter, new_mtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
} else {
netif_err(adapter, drv, dev, "Failed to set MTU to %d\n",
new_mtu);
@@ -1347,6 +1347,8 @@ error:
if (rc == -ENOSPC) {
ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
+ } else if (rc == -EFAULT) {
+ ena_reset_device(adapter, ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED);
} else {
ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
&rx_ring->syncp);
@@ -2701,6 +2703,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
{
struct device *dev = &pdev->dev;
struct ena_admin_host_info *host_info;
+ ssize_t ret;
int rc;
/* Allocate only the host info */
@@ -2715,11 +2718,19 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
host_info->bdf = pci_dev_id(pdev);
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
- strscpy(host_info->kernel_ver_str, utsname()->version,
- sizeof(host_info->kernel_ver_str) - 1);
+ ret = strscpy(host_info->kernel_ver_str, utsname()->version,
+ sizeof(host_info->kernel_ver_str));
+ if (ret < 0)
+ dev_dbg(dev,
+ "kernel version string will be truncated, status = %zd\n", ret);
+
host_info->os_dist = 0;
- strscpy(host_info->os_dist_str, utsname()->release,
- sizeof(host_info->os_dist_str));
+ ret = strscpy(host_info->os_dist_str, utsname()->release,
+ sizeof(host_info->os_dist_str));
+ if (ret < 0)
+ dev_dbg(dev,
+ "OS distribution string will be truncated, status = %zd\n", ret);
+
host_info->driver_version =
(DRV_MODULE_GEN_MAJOR) |
(DRV_MODULE_GEN_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) |
@@ -3235,14 +3246,15 @@ err_disable_msix:
return rc;
}
-static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
+static int ena_destroy_device(struct ena_adapter *adapter, bool graceful)
{
struct net_device *netdev = adapter->netdev;
struct ena_com_dev *ena_dev = adapter->ena_dev;
bool dev_up;
+ int rc = 0;
if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
- return;
+ return 0;
netif_carrier_off(netdev);
@@ -3260,7 +3272,7 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
* and device is up, ena_down() already reset the device.
*/
if (!(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags) && dev_up))
- ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
+ rc = ena_com_dev_reset(adapter->ena_dev, adapter->reset_reason);
ena_free_mgmnt_irq(adapter);
@@ -3279,6 +3291,8 @@ static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
+
+ return rc;
}
static int ena_restore_device(struct ena_adapter *adapter)
@@ -3355,14 +3369,17 @@ err:
static void ena_fw_reset_device(struct work_struct *work)
{
+ int rc = 0;
+
struct ena_adapter *adapter =
container_of(work, struct ena_adapter, reset_task);
rtnl_lock();
if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
- ena_destroy_device(adapter, false);
- ena_restore_device(adapter);
+ rc |= ena_destroy_device(adapter, false);
+ rc |= ena_restore_device(adapter);
+ adapter->dev_stats.reset_fail += !!rc;
dev_err(&adapter->pdev->dev, "Device reset completed successfully\n");
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 6d2cc20210cc..d59509747d1a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -290,6 +290,7 @@ struct ena_stats_dev {
u64 admin_q_pause;
u64 rx_drops;
u64 tx_drops;
+ u64 reset_fail;
};
enum ena_flags_t {
diff --git a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
index 2c3d6a77ea79..a2efebafd686 100644
--- a/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
+++ b/drivers/net/ethernet/amazon/ena/ena_regs_defs.h
@@ -22,6 +22,7 @@ enum ena_regs_reset_reason_types {
ENA_REGS_RESET_GENERIC = 13,
ENA_REGS_RESET_MISS_INTERRUPT = 14,
ENA_REGS_RESET_SUSPECTED_POLL_STARVATION = 15,
+ ENA_REGS_RESET_RX_DESCRIPTOR_MALFORMED = 16,
};
/* ena_registers offsets */
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index ef512cf89abf..27792a52b6cf 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -667,4 +667,5 @@ void lance_poll(struct net_device *dev)
EXPORT_SYMBOL_GPL(lance_poll);
#endif
+MODULE_DESCRIPTION("LANCE Ethernet IC generic routines");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index f8cc8925161c..b39c6f3e1eda 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -56,7 +56,7 @@ config LANCE
config PCNET32
tristate "AMD PCnet32 PCI support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
select MII
help
@@ -122,7 +122,7 @@ config MVME147_NET
config PCMCIA_NMCLAN
tristate "New Media PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a New Media Ethernet or LiveWire
PCMCIA (PC-card) Ethernet card to your computer.
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 68983b717145..1ca26a8c40eb 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -781,4 +781,5 @@ static void __exit a2065_cleanup_module(void)
module_init(a2065_init_module);
module_exit(a2065_cleanup_module);
+MODULE_DESCRIPTION("Commodore A2065 Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index ea6cfc2095e1..f64f96fa17cf 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1520,9 +1520,9 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
if (!netif_running(dev)) {
/* new_mtu will be used
- * when device starts netxt time
+ * when device starts next time
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -1531,7 +1531,7 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
/* stop the chip */
writel(RUN, lp->mmio + CMD0);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = amd8111e_restart(dev);
spin_unlock_irq(&lp->lock);
@@ -1796,7 +1796,6 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
lp = netdev_priv(dev);
lp->pci_dev = pdev;
lp->amd8111e_net_dev = dev;
- lp->pm_cap = pdev->pm_cap;
spin_lock_init(&lp->lock);
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 9d570adb295b..305232f5476d 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -764,7 +764,6 @@ struct amd8111e_priv{
u32 ext_phy_id;
struct amd8111e_link_config link_config;
- int pm_cap;
struct net_device *next;
int mii;
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 38153e633231..fa201da567ed 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -790,4 +790,5 @@ static void __exit ariadne_cleanup_module(void)
module_init(ariadne_init_module);
module_exit(ariadne_cleanup_module);
+MODULE_DESCRIPTION("Ariadne Ethernet Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 751454d305c6..8c8cc7d0f42d 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -79,6 +79,7 @@ static int lance_debug = 1;
#endif
module_param(lance_debug, int, 0);
MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)");
+MODULE_DESCRIPTION("Atari LANCE Ethernet driver");
MODULE_LICENSE("GPL");
/* Print debug messages on probing? */
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 055fda11c572..df42294530cb 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -234,4 +234,5 @@ static void __exit hplance_cleanup_module(void)
module_init(hplance_init_module);
module_exit(hplance_cleanup_module);
+MODULE_DESCRIPTION("HP300 on-board LANCE Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 6cf38180cc01..b1e6620ad41d 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -385,6 +385,7 @@ static void __exit lance_cleanup_module(void)
}
module_exit(lance_cleanup_module);
#endif /* MODULE */
+MODULE_DESCRIPTION("AMD LANCE/PCnet Ethernet driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 410c7b67eba4..c156566c0906 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -178,6 +178,7 @@ static int m147lance_close(struct net_device *dev)
return 0;
}
+MODULE_DESCRIPTION("MVME147 LANCE Ethernet driver");
MODULE_LICENSE("GPL");
static struct net_device *dev_mvme147_lance;
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 0dd391c84c13..37054a670407 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -760,7 +760,7 @@ static int mace_config(struct net_device *dev, struct ifmap *map)
{
if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
if (map->port <= 2) {
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
} else
return -EINVAL;
diff --git a/drivers/net/ethernet/amd/pds_core/core.h b/drivers/net/ethernet/amd/pds_core/core.h
index a3e17a0c187a..14522d6d5f86 100644
--- a/drivers/net/ethernet/amd/pds_core/core.h
+++ b/drivers/net/ethernet/amd/pds_core/core.h
@@ -256,7 +256,8 @@ int pdsc_dl_flash_update(struct devlink *dl,
int pdsc_dl_enable_get(struct devlink *dl, u32 id,
struct devlink_param_gset_ctx *ctx);
int pdsc_dl_enable_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx);
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack);
int pdsc_dl_enable_validate(struct devlink *dl, u32 id,
union devlink_param_value val,
struct netlink_ext_ack *extack);
diff --git a/drivers/net/ethernet/amd/pds_core/devlink.c b/drivers/net/ethernet/amd/pds_core/devlink.c
index 54864f27c87a..2681889162a2 100644
--- a/drivers/net/ethernet/amd/pds_core/devlink.c
+++ b/drivers/net/ethernet/amd/pds_core/devlink.c
@@ -37,7 +37,8 @@ int pdsc_dl_enable_get(struct devlink *dl, u32 id,
}
int pdsc_dl_enable_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct pdsc *pdsc = devlink_priv(dl);
struct pdsc_viftype *vt_entry;
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 246f34c43765..c60df4a21158 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -74,6 +74,7 @@ static int lance_debug = 1;
#endif
module_param(lance_debug, int, 0);
MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)");
+MODULE_DESCRIPTION("Sun3/Sun3x on-board LANCE Ethernet driver");
MODULE_LICENSE("GPL");
#define DPRINTK(n,a) \
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 6b73648b3779..c4a4e316683f 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -2070,7 +2070,7 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
return ret;
pdata->rx_buf_size = ret;
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
xgbe_restart_dev(pdata);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 58e7e88aae5b..21407a26f806 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -577,7 +577,7 @@ static int xgbe_set_rxfh(struct net_device *netdev,
}
static int xgbe_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct xgbe_prv_data *pdata = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index f409d7bd1f1e..c5e5fac49779 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -170,7 +170,7 @@ static int xgbe_config_irqs(struct xgbe_prv_data *pdata)
goto out;
ret = pci_alloc_irq_vectors(pdata->pcidev, 1, 1,
- PCI_IRQ_LEGACY | PCI_IRQ_MSI);
+ PCI_IRQ_INTX | PCI_IRQ_MSI);
if (ret < 0) {
dev_info(pdata->dev, "single IRQ enablement failed\n");
return ret;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
index 9131020d06af..7912b3b45148 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c
@@ -538,7 +538,6 @@ static const struct xgbe_version_data xgbe_v1 = {
.tx_tstamp_workaround = 1,
};
-#ifdef CONFIG_ACPI
static const struct acpi_device_id xgbe_acpi_match[] = {
{ .id = "AMDI8001",
.driver_data = (kernel_ulong_t)&xgbe_v1 },
@@ -546,9 +545,7 @@ static const struct acpi_device_id xgbe_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, xgbe_acpi_match);
-#endif
-#ifdef CONFIG_OF
static const struct of_device_id xgbe_of_match[] = {
{ .compatible = "amd,xgbe-seattle-v1a",
.data = &xgbe_v1 },
@@ -556,7 +553,6 @@ static const struct of_device_id xgbe_of_match[] = {
};
MODULE_DEVICE_TABLE(of, xgbe_of_match);
-#endif
static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
xgbe_platform_suspend, xgbe_platform_resume);
@@ -564,12 +560,8 @@ static SIMPLE_DEV_PM_OPS(xgbe_platform_pm_ops,
static struct platform_driver xgbe_driver = {
.driver = {
.name = XGBE_DRV_NAME,
-#ifdef CONFIG_ACPI
.acpi_match_table = xgbe_acpi_match,
-#endif
-#ifdef CONFIG_OF
.of_match_table = xgbe_of_match,
-#endif
.pm = &xgbe_platform_pm_ops,
},
.probe = xgbe_platform_probe,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 44900026d11b..4af9d89d5f88 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1530,7 +1530,7 @@ static int xgene_change_mtu(struct net_device *ndev, int new_mtu)
frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600;
xgene_enet_close(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
pdata->mac_ops->set_framesize(pdata, frame_size);
xgene_enet_open(ndev);
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
index 8ebcc68e807f..f6a96931c89a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/Makefile
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -8,7 +8,7 @@
obj-$(CONFIG_AQTION) += atlantic.o
-ccflags-y += -I$(srctree)/$(src)
+ccflags-y += -I$(src)
atlantic-objs := aq_main.o \
aq_nic.o \
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index 7e9c74b141ef..fc2b325f34e7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -17,7 +17,7 @@
#define AQ_CFG_IS_POLLING_DEF 0U
-#define AQ_CFG_FORCE_LEGACY_INT 0U
+#define AQ_CFG_FORCE_INTX 0U
#define AQ_CFG_INTERRUPT_MODERATION_OFF 0
#define AQ_CFG_INTERRUPT_MODERATION_ON 1
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a2606ee3b0a5..d0aecd1d7357 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -652,7 +652,7 @@ static int aq_ethtool_set_wol(struct net_device *ndev,
}
static int aq_ethtool_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index dbd284660135..f010bda61c96 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -104,7 +104,7 @@ struct aq_stats_s {
};
#define AQ_HW_IRQ_INVALID 0U
-#define AQ_HW_IRQ_LEGACY 1U
+#define AQ_HW_IRQ_INTX 1U
#define AQ_HW_IRQ_MSI 2U
#define AQ_HW_IRQ_MSIX 3U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index 0b2a52199914..c1d1673c5749 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -146,7 +146,7 @@ static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
if (err < 0)
goto err_exit;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
err_exit:
return err;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index d6d6d5d37ff3..fe0e3e2a8117 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -127,7 +127,7 @@ void aq_nic_cfg_start(struct aq_nic_s *self)
cfg->irq_type = aq_pci_func_get_irq_type(self);
- if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
+ if ((cfg->irq_type == AQ_HW_IRQ_INTX) ||
(cfg->aq_hw_caps->vecs == 1U) ||
(cfg->vecs == 1U)) {
cfg->is_rss = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index baa5f8cc31f2..43c71f6b314f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -200,7 +200,7 @@ unsigned int aq_pci_func_get_irq_type(struct aq_nic_s *self)
if (self->pdev->msi_enabled)
return AQ_HW_IRQ_MSI;
- return AQ_HW_IRQ_LEGACY;
+ return AQ_HW_IRQ_INTX;
}
static void aq_pci_free_irq_vectors(struct aq_nic_s *self)
@@ -298,11 +298,8 @@ static int aq_pci_probe(struct pci_dev *pdev,
numvecs += AQ_HW_SERVICE_IRQS;
/*enable interrupts */
-#if !AQ_CFG_FORCE_LEGACY_INT
- err = pci_alloc_irq_vectors(self->pdev, 1, numvecs,
- PCI_IRQ_MSIX | PCI_IRQ_MSI |
- PCI_IRQ_LEGACY);
-
+#if !AQ_CFG_FORCE_INTX
+ err = pci_alloc_irq_vectors(self->pdev, 1, numvecs, PCI_IRQ_ALL_TYPES);
if (err < 0)
goto err_hwinit;
numvecs = err;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 9dfd68f0fda9..8de2cdd09213 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -352,7 +352,7 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
- [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 54e70f07b573..56c46266bb0a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -562,7 +562,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
- [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index 220400a633f5..b0ed572e88c6 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -534,7 +534,7 @@ static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl2_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
- [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U },
+ [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U },
[AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U },
[AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U },
};
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 0a67612af228..0d400a7d8d91 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -23,16 +23,6 @@ config ARC_EMAC_CORE
select PHYLIB
select CRC32
-config ARC_EMAC
- tristate "ARC EMAC support"
- select ARC_EMAC_CORE
- depends on OF_IRQ
- depends on ARC || COMPILE_TEST
- help
- On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
- non-standard on-chip ethernet device ARC EMAC 10/100 is used.
- Say Y here if you have such a board. If unsure, say N.
-
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile
index d63ada577c8e..23586eefec44 100644
--- a/drivers/net/ethernet/arc/Makefile
+++ b/drivers/net/ethernet/arc/Makefile
@@ -5,5 +5,4 @@
arc_emac-objs := emac_main.o emac_mdio.o
obj-$(CONFIG_ARC_EMAC_CORE) += arc_emac.o
-obj-$(CONFIG_ARC_EMAC) += emac_arc.o
obj-$(CONFIG_EMAC_ROCKCHIP) += emac_rockchip.o
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
deleted file mode 100644
index a3afddb23ee8..000000000000
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ /dev/null
@@ -1,88 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/**
- * DOC: emac_arc.c - ARC EMAC specific glue layer
- *
- * Copyright (C) 2014 Romain Perier
- *
- * Romain Perier <romain.perier@gmail.com>
- */
-
-#include <linux/etherdevice.h>
-#include <linux/module.h>
-#include <linux/of_net.h>
-#include <linux/platform_device.h>
-
-#include "emac.h"
-
-#define DRV_NAME "emac_arc"
-
-static int emac_arc_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct arc_emac_priv *priv;
- phy_interface_t interface;
- struct net_device *ndev;
- int err;
-
- if (!dev->of_node)
- return -ENODEV;
-
- ndev = alloc_etherdev(sizeof(struct arc_emac_priv));
- if (!ndev)
- return -ENOMEM;
- platform_set_drvdata(pdev, ndev);
- SET_NETDEV_DEV(ndev, dev);
-
- priv = netdev_priv(ndev);
- priv->drv_name = DRV_NAME;
-
- err = of_get_phy_mode(dev->of_node, &interface);
- if (err) {
- if (err == -ENODEV)
- interface = PHY_INTERFACE_MODE_MII;
- else
- goto out_netdev;
- }
-
- priv->clk = devm_clk_get(dev, "hclk");
- if (IS_ERR(priv->clk)) {
- dev_err(dev, "failed to retrieve host clock from device tree\n");
- err = -EINVAL;
- goto out_netdev;
- }
-
- err = arc_emac_probe(ndev, interface);
-out_netdev:
- if (err)
- free_netdev(ndev);
- return err;
-}
-
-static void emac_arc_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
-
- arc_emac_remove(ndev);
- free_netdev(ndev);
-}
-
-static const struct of_device_id emac_arc_dt_ids[] = {
- { .compatible = "snps,arc-emac" },
- { /* Sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, emac_arc_dt_ids);
-
-static struct platform_driver emac_arc_driver = {
- .probe = emac_arc_probe,
- .remove_new = emac_arc_remove,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = emac_arc_dt_ids,
- },
-};
-
-module_platform_driver(emac_arc_driver);
-
-MODULE_AUTHOR("Romain Perier <romain.perier@gmail.com>");
-MODULE_DESCRIPTION("ARC EMAC platform driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 0f2f400b5bc4..a38be924cdaa 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1788,7 +1788,7 @@ static int ag71xx_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ag71xx *ag = netdev_priv(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
ag71xx_wr(ag, AG71XX_REG_MAC_MFL,
ag71xx_max_frame_len(ndev->mtu));
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 49bb9a8f00e6..ad6d6abd885f 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -901,7 +901,7 @@ static int alx_init_intr(struct alx_priv *alx)
int ret;
ret = pci_alloc_irq_vectors(alx->hw.pdev, 1, 1,
- PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ PCI_IRQ_MSI | PCI_IRQ_INTX);
if (ret < 0)
return ret;
@@ -1176,7 +1176,7 @@ static int alx_change_mtu(struct net_device *netdev, int mtu)
struct alx_priv *alx = netdev_priv(netdev);
int max_frame = ALX_MAX_FRAME_LEN(mtu);
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
alx->hw.mtu = mtu;
alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE);
netdev_update_features(netdev);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 46cdc32b4e31..c571614b1d50 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -561,7 +561,7 @@ static int atl1c_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.max_frame_size = new_mtu;
atl1c_set_rxbufsize(adapter, netdev);
atl1c_down(adapter);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 5f2a6fcba967..9b778b34b67e 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -428,7 +428,7 @@ static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
if (netif_running(netdev)) {
while (test_and_set_bit(__AT_RESETTING, &adapter->flags))
msleep(1);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.max_frame_size = new_mtu;
adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3;
atl1e_down(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index a9014d7932db..3afd3627ce48 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2687,7 +2687,7 @@ static int atl1_change_mtu(struct net_device *netdev, int new_mtu)
adapter->rx_buffer_len = (max_frame + 7) & ~7;
adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev)) {
atl1_down(adapter);
atl1_up(adapter);
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bcfc9488125b..fa9a4919f25d 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -905,7 +905,7 @@ static int atl2_change_mtu(struct net_device *netdev, int new_mtu)
struct atl2_hw *hw = &adapter->hw;
/* set MTU */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
hw->max_frame_size = new_mtu;
ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ETH_HLEN +
VLAN_HLEN + ETH_FCS_LEN);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index a806dadc4196..20c6529ec135 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -1380,6 +1380,7 @@ static int bcmasp_probe(struct platform_device *pdev)
dev_err(dev, "Cannot create eth interface %d\n", i);
bcmasp_remove_intfs(priv);
of_node_put(intf_node);
+ ret = -ENOMEM;
goto of_put_exit;
}
list_add_tail(&intf->list, &priv->intfs);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 1be6d14030bc..e5809ad5eb82 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1042,13 +1042,13 @@ static int b44_change_mtu(struct net_device *dev, int new_mtu)
/* We'll just catch it later when the
* device is up'd.
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
spin_lock_irq(&bp->lock);
b44_halt(bp);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
spin_unlock_irq(&bp->lock);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3196c4dea076..3c0e3b9828be 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1652,7 +1652,7 @@ static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index b65b8592ad75..6ec773e61182 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -7912,7 +7912,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnx2 *bp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
false);
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e2a4e1088b7f..9580ab83d387 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1262,7 +1262,7 @@ enum {
struct bnx2x_fw_stats_req {
struct stats_query_header hdr;
- struct stats_query_entry query[FP_SB_MAX_E1x+
+ struct stats_query_entry query[FP_SB_MAX_E2 +
BNX2X_FIRST_QUEUE_QUERY_IDX];
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index c9b6acd8c892..a8e07e51418f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4902,7 +4902,7 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
* because the actual alloc size is
* only updated as part of load
*/
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!bnx2x_mtu_allows_gro(new_mtu))
dev->features &= ~NETIF_F_GRO_HW;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 58956ed8f531..c7b56a5e5425 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3634,7 +3634,7 @@ static int bnx2x_set_channels(struct net_device *dev,
}
static int bnx2x_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bnx2x *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2c2ee79c4d77..04a623b3eee2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -76,7 +76,7 @@
NETIF_MSG_TX_ERR)
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
+MODULE_DESCRIPTION("Broadcom NetXtreme network driver");
#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
@@ -137,6 +137,7 @@ static const struct {
[NETXTREME_E_VF_HV] = { "Broadcom NetXtreme-E Virtual Function for Hyper-V" },
[NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" },
[NETXTREME_E_P5_VF_HV] = { "Broadcom BCM5750X NetXtreme-E Virtual Function for Hyper-V" },
+ [NETXTREME_E_P7_VF] = { "Broadcom BCM5760X Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
@@ -211,6 +212,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF },
{ PCI_VDEVICE(BROADCOM, 0x1808), .driver_data = NETXTREME_E_P5_VF_HV },
{ PCI_VDEVICE(BROADCOM, 0x1809), .driver_data = NETXTREME_E_P5_VF_HV },
+ { PCI_VDEVICE(BROADCOM, 0x1819), .driver_data = NETXTREME_E_P7_VF },
{ PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
#endif
{ 0 }
@@ -294,7 +296,7 @@ static bool bnxt_vf_pciid(enum board_idx idx)
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
idx == NETXTREME_S_VF || idx == NETXTREME_C_VF_HV ||
idx == NETXTREME_E_VF_HV || idx == NETXTREME_E_P5_VF ||
- idx == NETXTREME_E_P5_VF_HV);
+ idx == NETXTREME_E_P5_VF_HV || idx == NETXTREME_E_P7_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -454,8 +456,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t mapping;
unsigned int length, pad = 0;
u32 len, free_size, vlan_tag_flags, cfa_action, flags;
- u16 prod, last_frag;
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct pci_dev *pdev = bp->pdev;
+ u16 prod, last_frag, txts_prod;
struct bnxt_tx_ring_info *txr;
struct bnxt_sw_tx_bd *tx_buf;
__le32 lflags = 0;
@@ -507,23 +510,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
- struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp &&
+ ptp->tx_tstamp_en) {
+ if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) {
+ lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ tx_buf->is_ts_pkt = 1;
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ } else if (!skb_is_gso(skb)) {
+ u16 seq_id, hdr_off;
- if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) &&
- atomic_dec_if_positive(&ptp->tx_avail) >= 0) {
- if (!bnxt_ptp_parse(skb, &ptp->tx_seqid,
- &ptp->tx_hdr_off)) {
+ if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) &&
+ !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) {
if (vlan_tag_flags)
- ptp->tx_hdr_off += VLAN_HLEN;
+ hdr_off += VLAN_HLEN;
lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP);
+ tx_buf->is_ts_pkt = 1;
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- } else {
- atomic_inc(&bp->ptp_cfg->tx_avail);
+
+ ptp->txts_req[txts_prod].tx_seqid = seq_id;
+ ptp->txts_req[txts_prod].tx_hdr_off = hdr_off;
+ tx_buf->txts_prod = txts_prod;
}
}
}
-
if (unlikely(skb->no_fcs))
lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
@@ -730,9 +739,6 @@ tx_done:
return NETDEV_TX_OK;
tx_dma_error:
- if (BNXT_TX_PTP_IS_SET(lflags))
- atomic_inc(&bp->ptp_cfg->tx_avail);
-
last_frag = i;
/* start back at beginning and unmap skb */
@@ -754,6 +760,13 @@ tx_dma_error:
tx_free:
dev_kfree_skb_any(skb);
tx_kick_pending:
+ if (BNXT_TX_PTP_IS_SET(lflags)) {
+ txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
+ atomic64_inc(&bp->ptp_cfg->stats.ts_err);
+ if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ /* set SKB to err so PTP worker will clean up */
+ ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO);
+ }
if (txr->kick_pending)
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
@@ -761,7 +774,8 @@ tx_kick_pending:
return NETDEV_TX_OK;
}
-static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
+/* Returns true if some remaining TX packets not processed. */
+static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
int budget)
{
struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
@@ -770,24 +784,33 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
unsigned int tx_bytes = 0;
u16 cons = txr->tx_cons;
int tx_pkts = 0;
+ bool rc = false;
while (RING_TX(bp, cons) != hw_cons) {
struct bnxt_sw_tx_bd *tx_buf;
struct sk_buff *skb;
+ bool is_ts_pkt;
int j, last;
tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
- cons = NEXT_TX(cons);
skb = tx_buf->skb;
- tx_buf->skb = NULL;
if (unlikely(!skb)) {
bnxt_sched_reset_txr(bp, txr, cons);
- return;
+ return rc;
}
+ is_ts_pkt = tx_buf->is_ts_pkt;
+ if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) {
+ rc = true;
+ break;
+ }
+
+ cons = NEXT_TX(cons);
tx_pkts++;
tx_bytes += skb->len;
+ tx_buf->skb = NULL;
+ tx_buf->is_ts_pkt = 0;
if (tx_buf->is_push) {
tx_buf->is_push = 0;
@@ -807,13 +830,11 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
skb_frag_size(&skb_shinfo(skb)->frags[j]),
DMA_TO_DEVICE);
}
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
+ if (unlikely(is_ts_pkt)) {
if (BNXT_CHIP_P5(bp)) {
/* PTP worker takes ownership of the skb */
- if (!bnxt_get_tx_ts_p5(bp, skb))
- skb = NULL;
- else
- atomic_inc(&bp->ptp_cfg->tx_avail);
+ bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod);
+ skb = NULL;
}
}
@@ -828,18 +849,22 @@ next_tx_int:
__netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
bnxt_tx_avail(bp, txr), bp->tx_wake_thresh,
READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING);
+
+ return rc;
}
static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
{
struct bnxt_tx_ring_info *txr;
+ bool more = false;
int i;
bnxt_for_each_napi_tx(i, bnapi, txr) {
if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons))
- __bnxt_tx_int(bp, txr, budget);
+ more |= __bnxt_tx_int(bp, txr, budget);
}
- bnapi->events &= ~BNXT_TX_CMP_EVENT;
+ if (!more)
+ bnapi->events &= ~BNXT_TX_CMP_EVENT;
}
static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
@@ -1296,9 +1321,9 @@ static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
return RX_AGG_CMP_VALID(agg, *raw_cons);
}
-static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
- unsigned int len,
- dma_addr_t mapping)
+static struct sk_buff *bnxt_copy_data(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
{
struct bnxt *bp = bnapi->bp;
struct pci_dev *pdev = bp->pdev;
@@ -1318,6 +1343,39 @@ static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
bp->rx_dir);
skb_put(skb, len);
+
+ return skb;
+}
+
+static struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ return bnxt_copy_data(bnapi, data, len, mapping);
+}
+
+static struct sk_buff *bnxt_copy_xdp(struct bnxt_napi *bnapi,
+ struct xdp_buff *xdp,
+ unsigned int len,
+ dma_addr_t mapping)
+{
+ unsigned int metasize = 0;
+ u8 *data = xdp->data;
+ struct sk_buff *skb;
+
+ len = xdp->data_end - xdp->data_meta;
+ metasize = xdp->data - xdp->data_meta;
+ data = xdp->data_meta;
+
+ skb = bnxt_copy_data(bnapi, data, len, mapping);
+ if (!skb)
+ return skb;
+
+ if (metasize) {
+ skb_metadata_set(skb, metasize);
+ __skb_pull(skb, metasize);
+ }
+
return skb;
}
@@ -1778,7 +1836,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
if (!skb) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
} else {
@@ -1788,7 +1846,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
new_data = __bnxt_alloc_rx_frag(bp, &new_mapping, GFP_ATOMIC);
if (!new_data) {
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
@@ -1804,7 +1862,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (!skb) {
skb_free_frag(data);
bnxt_abort_tpa(cpr, idx, agg_bufs);
- cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
skb_reserve(skb, bp->rx_offset);
@@ -1815,7 +1873,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
skb = bnxt_rx_agg_pages_skb(bp, cpr, skb, idx, agg_bufs, true);
if (!skb) {
/* Page reuse already handled by bnxt_rx_pages(). */
- cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
return NULL;
}
}
@@ -2073,7 +2131,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rc = -EIO;
if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
- bnapi->cp_ring.sw_stats.rx.rx_buf_errors++;
+ bnapi->cp_ring.sw_stats->rx.rx_buf_errors++;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
!(bp->fw_cap & BNXT_FW_CAP_RING_MONITOR)) {
netdev_warn_once(bp->dev, "RX buffer error %x\n",
@@ -2101,14 +2159,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
if (xdp_active) {
- if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) {
+ if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
rc = 1;
goto next_rx;
}
}
if (len <= bp->rx_copy_thresh) {
- skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+ if (!xdp_active)
+ skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
+ else
+ skb = bnxt_copy_xdp(bnapi, &xdp, len, dma_addr);
bnxt_reuse_rx_data(rxr, cons, data);
if (!skb) {
if (agg_bufs) {
@@ -2186,7 +2247,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} else {
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
if (dev->features & NETIF_F_RXCSUM)
- bnapi->cp_ring.sw_stats.rx.rx_l4_csum_errors++;
+ bnapi->cp_ring.sw_stats->rx.rx_l4_csum_errors++;
}
}
@@ -2223,7 +2284,7 @@ next_rx_no_prod_no_len:
return rc;
oom_next_rx:
- cpr->bnapi->cp_ring.sw_stats.rx.rx_oom_discards += 1;
+ cpr->sw_stats->rx.rx_oom_discards += 1;
rc = -ENOMEM;
goto next_rx;
}
@@ -2272,7 +2333,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
}
rc = bnxt_rx_pkt(bp, cpr, raw_cons, event);
if (rc && rc != -EBUSY)
- cpr->bnapi->cp_ring.sw_stats.rx.rx_netpoll_discards += 1;
+ cpr->sw_stats->rx.rx_netpoll_discards += 1;
return rc;
}
@@ -2481,6 +2542,9 @@ static bool bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2)
}
return false;
}
+ case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED:
+ netdev_warn(bp->dev, "Speed change not supported with dual rate transceivers on this board\n");
+ break;
default:
netdev_err(bp->dev, "FW reported unknown error type %u\n",
err_type);
@@ -2866,6 +2930,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
cpr->has_more_work = 1;
break;
}
+ } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) {
+ bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp);
} else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
if (likely(budget))
@@ -2897,8 +2963,10 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
}
- if (event & BNXT_REDIRECT_EVENT)
+ if (event & BNXT_REDIRECT_EVENT) {
xdp_do_flush();
+ event &= ~BNXT_REDIRECT_EVENT;
+ }
if (event & BNXT_TX_EVENT) {
struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
@@ -2908,6 +2976,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
wmb();
bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
+ event &= ~BNXT_TX_EVENT;
}
cpr->cp_raw_cons = raw_cons;
@@ -2925,13 +2994,14 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi,
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ bnapi->events &= ~BNXT_RX_EVENT;
}
if (bnapi->events & BNXT_AGG_EVENT) {
struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnapi->events &= ~BNXT_AGG_EVENT;
}
- bnapi->events &= BNXT_TX_CMP_EVENT;
}
static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
@@ -3268,37 +3338,12 @@ static void bnxt_free_tx_skbs(struct bnxt *bp)
}
}
-static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
+static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
struct pci_dev *pdev = bp->pdev;
- struct bnxt_tpa_idx_map *map;
- int i, max_idx, max_agg_idx;
+ int i, max_idx;
max_idx = bp->rx_nr_pages * RX_DESC_CNT;
- max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- if (!rxr->rx_tpa)
- goto skip_rx_tpa_free;
-
- for (i = 0; i < bp->max_tpa; i++) {
- struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
- u8 *data = tpa_info->data;
-
- if (!data)
- continue;
-
- dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
- bp->rx_buf_use_size, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
-
- tpa_info->data = NULL;
-
- skb_free_frag(data);
- }
-
-skip_rx_tpa_free:
- if (!rxr->rx_buf_ring)
- goto skip_rx_buf_free;
for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i];
@@ -3318,12 +3363,15 @@ skip_rx_tpa_free:
skb_free_frag(data);
}
}
+}
-skip_rx_buf_free:
- if (!rxr->rx_agg_ring)
- goto skip_rx_agg_free;
+static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ int i, max_idx;
+
+ max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
- for (i = 0; i < max_agg_idx; i++) {
+ for (i = 0; i < max_idx; i++) {
struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i];
struct page *page = rx_agg_buf->page;
@@ -3335,6 +3383,45 @@ skip_rx_buf_free:
page_pool_recycle_direct(rxr->page_pool, page);
}
+}
+
+static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
+ struct pci_dev *pdev = bp->pdev;
+ struct bnxt_tpa_idx_map *map;
+ int i;
+
+ if (!rxr->rx_tpa)
+ goto skip_rx_tpa_free;
+
+ for (i = 0; i < bp->max_tpa; i++) {
+ struct bnxt_tpa_info *tpa_info = &rxr->rx_tpa[i];
+ u8 *data = tpa_info->data;
+
+ if (!data)
+ continue;
+
+ dma_unmap_single_attrs(&pdev->dev, tpa_info->mapping,
+ bp->rx_buf_use_size, bp->rx_dir,
+ DMA_ATTR_WEAK_ORDERING);
+
+ tpa_info->data = NULL;
+
+ skb_free_frag(data);
+ }
+
+skip_rx_tpa_free:
+ if (!rxr->rx_buf_ring)
+ goto skip_rx_buf_free;
+
+ bnxt_free_one_rx_ring(bp, rxr);
+
+skip_rx_buf_free:
+ if (!rxr->rx_agg_ring)
+ goto skip_rx_agg_free;
+
+ bnxt_free_one_rx_agg_ring(bp, rxr);
skip_rx_agg_free:
map = rxr->rx_tpa_idx_map;
@@ -3551,14 +3638,15 @@ static void bnxt_free_rx_rings(struct bnxt *bp)
}
static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
- struct bnxt_rx_ring_info *rxr)
+ struct bnxt_rx_ring_info *rxr,
+ int numa_node)
{
struct page_pool_params pp = { 0 };
pp.pool_size = bp->rx_agg_ring_size;
if (BNXT_RX_PAGE_MODE(bp))
pp.pool_size += bp->rx_ring_size;
- pp.nid = dev_to_node(&bp->pdev->dev);
+ pp.nid = numa_node;
pp.napi = &rxr->bnapi->napi;
pp.netdev = bp->dev;
pp.dev = &bp->pdev->dev;
@@ -3578,7 +3666,8 @@ static int bnxt_alloc_rx_page_pool(struct bnxt *bp,
static int bnxt_alloc_rx_rings(struct bnxt *bp)
{
- int i, rc = 0, agg_rings = 0;
+ int numa_node = dev_to_node(&bp->pdev->dev);
+ int i, rc = 0, agg_rings = 0, cpu;
if (!bp->rx_ring)
return -ENOMEM;
@@ -3589,10 +3678,15 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
struct bnxt_ring_struct *ring;
+ int cpu_node;
ring = &rxr->rx_ring_struct;
- rc = bnxt_alloc_rx_page_pool(bp, rxr);
+ cpu = cpumask_local_spread(i, numa_node);
+ cpu_node = cpu_to_node(cpu);
+ netdev_dbg(bp->dev, "Allocating page pool for rx_ring[%d] on numa_node: %d\n",
+ i, cpu_node);
+ rc = bnxt_alloc_rx_page_pool(bp, rxr, cpu_node);
if (rc)
return rc;
@@ -3851,13 +3945,12 @@ static int bnxt_alloc_cp_sub_ring(struct bnxt *bp,
static int bnxt_alloc_cp_rings(struct bnxt *bp)
{
bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
- int i, j, rc, ulp_base_vec, ulp_msix;
+ int i, j, rc, ulp_msix;
int tcs = bp->num_tc;
if (!tcs)
tcs = 1;
ulp_msix = bnxt_get_ulp_msix_num(bp);
- ulp_base_vec = bnxt_get_ulp_msix_base(bp);
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr, *cpr2;
@@ -3876,10 +3969,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
if (rc)
return rc;
- if (ulp_msix && i >= ulp_base_vec)
- ring->map_idx = i + ulp_msix;
- else
- ring->map_idx = i;
+ ring->map_idx = ulp_msix + i;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
continue;
@@ -3909,6 +3999,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
if (rc)
return rc;
cpr2->bnapi = bnapi;
+ cpr2->sw_stats = cpr->sw_stats;
cpr2->cp_idx = k;
if (!k && rx) {
bp->rx_ring[i].rx_cpr = cpr2;
@@ -3927,6 +4018,63 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
return 0;
}
+static void bnxt_init_rx_ring_struct(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_desc_ring;
+ rmem->dma_arr = rxr->rx_desc_mapping;
+ rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_buf_ring;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->nr_pages = bp->rx_agg_nr_pages;
+ rmem->page_size = HW_RXBD_RING_SIZE;
+ rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
+ rmem->dma_arr = rxr->rx_agg_desc_mapping;
+ rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
+ rmem->vmem = (void **)&rxr->rx_agg_ring;
+}
+
+static void bnxt_reset_rx_ring_struct(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_mem_info *rmem;
+ struct bnxt_ring_struct *ring;
+ int i;
+
+ rxr->page_pool->p.napi = NULL;
+ rxr->page_pool = NULL;
+ memset(&rxr->xdp_rxq, 0, sizeof(struct xdp_rxq_info));
+
+ ring = &rxr->rx_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->pg_tbl = NULL;
+ rmem->pg_tbl_map = 0;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ rmem->pg_arr[i] = NULL;
+ rmem->dma_arr[i] = 0;
+ }
+ *rmem->vmem = NULL;
+
+ ring = &rxr->rx_agg_ring_struct;
+ rmem = &ring->ring_mem;
+ rmem->pg_tbl = NULL;
+ rmem->pg_tbl_map = 0;
+ for (i = 0; i < rmem->nr_pages; i++) {
+ rmem->pg_arr[i] = NULL;
+ rmem->dma_arr[i] = 0;
+ }
+ *rmem->vmem = NULL;
+}
+
static void bnxt_init_ring_struct(struct bnxt *bp)
{
int i, j;
@@ -4009,37 +4157,55 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
}
}
-static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
+static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
{
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
- struct net_device *dev = bp->dev;
u32 prod;
int i;
prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) {
- netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_nr, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX(prod);
}
rxr->rx_prod = prod;
+}
- if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
- return 0;
+static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ int ring_nr)
+{
+ u32 prod;
+ int i;
prod = rxr->rx_agg_prod;
for (i = 0; i < bp->rx_agg_ring_size; i++) {
if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) {
- netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n",
ring_nr, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX_AGG(prod);
}
rxr->rx_agg_prod = prod;
+}
+
+static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr];
+ int i;
+
+ bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr);
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return 0;
+
+ bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr);
if (rxr->rx_tpa) {
dma_addr_t mapping;
@@ -4058,9 +4224,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr)
return 0;
}
-static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
{
- struct bnxt_rx_ring_info *rxr;
struct bnxt_ring_struct *ring;
u32 type;
@@ -4070,28 +4236,43 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
if (NET_IP_ALIGN == 2)
type |= RX_BD_FLAGS_SOP;
- rxr = &bp->rx_ring[ring_nr];
ring = &rxr->rx_ring_struct;
bnxt_init_rxbd_pages(ring, type);
-
- netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
- &rxr->bnapi->napi);
-
- if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
- bpf_prog_add(bp->xdp_prog, 1);
- rxr->xdp_prog = bp->xdp_prog;
- }
ring->fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring;
+ u32 type;
ring = &rxr->rx_agg_ring_struct;
ring->fw_ring_id = INVALID_HW_RING_ID;
-
if ((bp->flags & BNXT_FLAG_AGG_RINGS)) {
type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
bnxt_init_rxbd_pages(ring, type);
}
+}
+
+static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
+{
+ struct bnxt_rx_ring_info *rxr;
+
+ rxr = &bp->rx_ring[ring_nr];
+ bnxt_init_one_rx_ring_rxbd(bp, rxr);
+
+ netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX,
+ &rxr->bnapi->napi);
+
+ if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
+ bpf_prog_add(bp->xdp_prog, 1);
+ rxr->xdp_prog = bp->xdp_prog;
+ }
+
+ bnxt_init_one_rx_agg_ring_rxbd(bp, rxr);
return bnxt_alloc_one_rx_ring(bp, ring_nr);
}
@@ -4233,6 +4414,7 @@ static void bnxt_init_vnics(struct bnxt *bp)
int j;
vnic->fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->vnic_id = i;
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
@@ -4749,6 +4931,9 @@ static void bnxt_free_ring_stats(struct bnxt *bp)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
bnxt_free_stats_mem(bp, &cpr->stats);
+
+ kfree(cpr->sw_stats);
+ cpr->sw_stats = NULL;
}
}
@@ -4763,6 +4948,10 @@ static int bnxt_alloc_stats(struct bnxt *bp)
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ cpr->sw_stats = kzalloc(sizeof(*cpr->sw_stats), GFP_KERNEL);
+ if (!cpr->sw_stats)
+ return -ENOMEM;
+
cpr->stats.len = size;
rc = bnxt_alloc_stats_mem(bp, &cpr->stats, !i);
if (rc)
@@ -4867,7 +5056,7 @@ void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr)
list_del_init(&fltr->list);
}
-void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
+static void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all)
{
struct bnxt_filter_base *usr_fltr, *tmp;
@@ -5780,8 +5969,25 @@ void bnxt_fill_ipv6_mask(__be32 mask[4])
static void
bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp,
struct hwrm_cfa_ntuple_filter_alloc_input *req,
- u16 rxq)
+ struct bnxt_ntuple_filter *fltr)
{
+ u16 rxq = fltr->base.rxq;
+
+ if (fltr->base.flags & BNXT_ACT_RSS_CTX) {
+ struct ethtool_rxfh_context *ctx;
+ struct bnxt_rss_ctx *rss_ctx;
+ struct bnxt_vnic_info *vnic;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx,
+ fltr->base.fw_vnic_id);
+ if (ctx) {
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+ vnic = &rss_ctx->vnic;
+
+ req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
+ }
+ return;
+ }
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
struct bnxt_vnic_info *vnic;
u32 enables;
@@ -5822,7 +6028,7 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req->flags =
cpu_to_le32(CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DROP);
} else if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) {
- bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr->base.rxq);
+ bnxt_cfg_rfs_ring_tbl_idx(bp, req, fltr);
} else {
vnic = &bp->vnic_info[fltr->base.rxq + 1];
req->dst_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -5930,9 +6136,9 @@ static void bnxt_hwrm_vnic_update_tunl_tpa(struct bnxt *bp,
req->tnl_tpa_en_bitmap = cpu_to_le32(tunl_tpa_bmap);
}
-static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
+int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u32 tpa_flags)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX;
struct hwrm_vnic_tpa_cfg_input *req;
int rc;
@@ -6027,16 +6233,19 @@ static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp)
entries = HW_HASH_INDEX_SIZE;
bp->rss_indir_tbl_entries = entries;
- bp->rss_indir_tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl),
- GFP_KERNEL);
+ bp->rss_indir_tbl =
+ kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL);
if (!bp->rss_indir_tbl)
return -ENOMEM;
+
return 0;
}
-static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
+void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
+ struct ethtool_rxfh_context *rss_ctx)
{
u16 max_rings, max_entries, pad, i;
+ u32 *rss_indir_tbl;
if (!bp->rx_nr_rings)
return;
@@ -6047,18 +6256,22 @@ static void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp)
max_rings = bp->rx_nr_rings;
max_entries = bnxt_get_rxfh_indir_size(bp->dev);
+ if (rss_ctx)
+ rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx);
+ else
+ rss_indir_tbl = &bp->rss_indir_tbl[0];
for (i = 0; i < max_entries; i++)
- bp->rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
+ rss_indir_tbl[i] = ethtool_rxfh_indir_default(i, max_rings);
pad = bp->rss_indir_tbl_entries - max_entries;
if (pad)
- memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
+ memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl));
}
static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
{
- u16 i, tbl_size, max_ring = 0;
+ u32 i, tbl_size, max_ring = 0;
if (!bp->rss_indir_tbl)
return 0;
@@ -6109,6 +6322,8 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp,
if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG)
j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings);
+ else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG)
+ j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i];
else
j = bp->rss_indir_tbl[i];
rxr = &bp->rx_ring[j];
@@ -6146,9 +6361,9 @@ __bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct hwrm_vnic_rss_cfg_input *req,
req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
}
-static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
+static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ bool set_rss)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input *req;
int rc;
@@ -6166,9 +6381,9 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
return hwrm_req_send(bp, req);
}
-static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
+static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, bool set_rss)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input *req;
dma_addr_t ring_tbl_map;
u32 i, nr_ctxs;
@@ -6221,9 +6436,8 @@ static void bnxt_hwrm_update_rss_hash_cfg(struct bnxt *bp)
hwrm_req_drop(bp, req);
}
-static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
+static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_plcmodes_cfg_input *req;
int rc;
@@ -6248,7 +6462,8 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
return hwrm_req_send(bp, req);
}
-static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_free_input *req;
@@ -6257,10 +6472,10 @@ static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
return;
req->rss_cos_lb_ctx_id =
- cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
+ cpu_to_le16(vnic->fw_rss_cos_lb_ctx[ctx_idx]);
hwrm_req_send(bp, req);
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
}
static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
@@ -6272,13 +6487,14 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
- bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
+ bnxt_hwrm_vnic_ctx_free_one(bp, vnic, j);
}
}
bp->rsscos_nr_ctxs = 0;
}
-static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic, u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp;
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req;
@@ -6291,7 +6507,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
resp = hwrm_req_hold(bp, req);
rc = hwrm_req_send(bp, req);
if (!rc)
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
+ vnic->fw_rss_cos_lb_ctx[ctx_idx] =
le16_to_cpu(resp->rss_cos_lb_ctx_id);
hwrm_req_drop(bp, req);
@@ -6305,10 +6521,9 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
}
-int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
struct bnxt_vnic_info *vnic0 = &bp->vnic_info[BNXT_VNIC_DEFAULT];
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_cfg_input *req;
unsigned int ring = 0, grp_idx;
u16 def_vlan = 0;
@@ -6356,8 +6571,8 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
ring = 0;
else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
- ring = vnic_id - 1;
- else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
+ ring = vnic->vnic_id - 1;
+ else if ((vnic->vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index;
@@ -6373,25 +6588,25 @@ vnic_mru:
#endif
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
- if (!vnic_id && bnxt_ulp_registered(bp->edev))
+ if (vnic->vnic_id == BNXT_VNIC_DEFAULT && bnxt_ulp_registered(bp->edev))
req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
return hwrm_req_send(bp, req);
}
-static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
+static void bnxt_hwrm_vnic_free_one(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
{
- if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
+ if (vnic->fw_vnic_id != INVALID_HW_RING_ID) {
struct hwrm_vnic_free_input *req;
if (hwrm_req_init(bp, req, HWRM_VNIC_FREE))
return;
- req->vnic_id =
- cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
+ req->vnic_id = cpu_to_le32(vnic->fw_vnic_id);
hwrm_req_send(bp, req);
- bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
+ vnic->fw_vnic_id = INVALID_HW_RING_ID;
}
}
@@ -6400,15 +6615,14 @@ static void bnxt_hwrm_vnic_free(struct bnxt *bp)
u16 i;
for (i = 0; i < bp->nr_vnics; i++)
- bnxt_hwrm_vnic_free_one(bp, i);
+ bnxt_hwrm_vnic_free_one(bp, &bp->vnic_info[i]);
}
-static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
- unsigned int start_rx_ring_idx,
- unsigned int nr_rings)
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ unsigned int start_rx_ring_idx,
+ unsigned int nr_rings)
{
unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_alloc_output *resp;
struct hwrm_vnic_alloc_input *req;
int rc;
@@ -6434,7 +6648,7 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
vnic_no_ring_grps:
for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
- if (vnic_id == BNXT_VNIC_DEFAULT)
+ if (vnic->vnic_id == BNXT_VNIC_DEFAULT)
req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
resp = hwrm_req_hold(bp, req);
@@ -6596,6 +6810,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
switch (ring_type) {
case HWRM_RING_ALLOC_TX: {
struct bnxt_tx_ring_info *txr;
+ u16 flags = 0;
txr = container_of(ring, struct bnxt_tx_ring_info,
tx_ring_struct);
@@ -6609,6 +6824,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
if (bp->flags & BNXT_FLAG_TX_COAL_CMPL)
req->cmpl_coal_cnt =
RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64;
+ if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg)
+ flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE;
+ req->flags = cpu_to_le16(flags);
break;
}
case HWRM_RING_ALLOC_RX:
@@ -6782,6 +7000,48 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
bnxt_set_db_mask(bp, db, ring_type);
}
+static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ struct bnxt_napi *bnapi = rxr->bnapi;
+ u32 type = HWRM_RING_ALLOC_RX;
+ u32 map_idx = bnapi->index;
+ int rc;
+
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
+ bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
+static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 type = HWRM_RING_ALLOC_AGG;
+ u32 grp_idx = ring->grp_idx;
+ u32 map_idx;
+ int rc;
+
+ map_idx = grp_idx + bp->rx_nr_rings;
+ rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ if (rc)
+ return rc;
+
+ bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
+ ring->fw_ring_id);
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
+
+ return 0;
+}
+
static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{
bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS);
@@ -6847,24 +7107,21 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
}
- type = HWRM_RING_ALLOC_RX;
for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- struct bnxt_napi *bnapi = rxr->bnapi;
- u32 map_idx = bnapi->index;
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
if (rc)
goto err_out;
- bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
/* If we have agg rings, post agg buffers first. */
if (!agg_rings)
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
- bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr;
+ struct bnxt_napi *bnapi = rxr->bnapi;
u32 type2 = HWRM_RING_ALLOC_CMPL;
+ struct bnxt_ring_struct *ring;
+ u32 map_idx = bnapi->index;
ring = &cpr2->cp_ring_struct;
ring->handle = BNXT_SET_NQ_HDL(cpr2);
@@ -6878,23 +7135,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
}
if (agg_rings) {
- type = HWRM_RING_ALLOC_AGG;
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring =
- &rxr->rx_agg_ring_struct;
- u32 grp_idx = ring->grp_idx;
- u32 map_idx = grp_idx + bp->rx_nr_rings;
-
- rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
+ rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]);
if (rc)
goto err_out;
-
- bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
- ring->fw_ring_id);
- bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
- bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
- bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
}
}
err_out:
@@ -6934,6 +7178,50 @@ exit:
return 0;
}
+static void bnxt_hwrm_rx_ring_free(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 cmpl_ring_id;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ hwrm_ring_free_send_msg(bp, ring,
+ RING_FREE_REQ_RING_TYPE_RX,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID;
+}
+
+static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ bool close_path)
+{
+ struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
+ u32 grp_idx = rxr->bnapi->index;
+ u32 type, cmpl_ring_id;
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ type = RING_FREE_REQ_RING_TYPE_RX_AGG;
+ else
+ type = RING_FREE_REQ_RING_TYPE_RX;
+
+ if (ring->fw_ring_id == INVALID_HW_RING_ID)
+ return;
+
+ cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
+ hwrm_ring_free_send_msg(bp, ring, type,
+ close_path ? cmpl_ring_id :
+ INVALID_HW_RING_ID);
+ ring->fw_ring_id = INVALID_HW_RING_ID;
+ bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID;
+}
+
static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
{
u32 type;
@@ -6958,42 +7246,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
}
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
- u32 grp_idx = rxr->bnapi->index;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
-
- hwrm_ring_free_send_msg(bp, ring,
- RING_FREE_REQ_RING_TYPE_RX,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[grp_idx].rx_fw_ring_id =
- INVALID_HW_RING_ID;
- }
- }
-
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- type = RING_FREE_REQ_RING_TYPE_RX_AGG;
- else
- type = RING_FREE_REQ_RING_TYPE_RX;
- for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
- struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
- u32 grp_idx = rxr->bnapi->index;
-
- if (ring->fw_ring_id != INVALID_HW_RING_ID) {
- u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
-
- hwrm_ring_free_send_msg(bp, ring, type,
- close_path ? cmpl_ring_id :
- INVALID_HW_RING_ID);
- ring->fw_ring_id = INVALID_HW_RING_ID;
- bp->grp_info[grp_idx].agg_fw_ring_id =
- INVALID_HW_RING_ID;
- }
+ bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path);
+ bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path);
}
/* The completion rings are about to be freed. After that the
@@ -7266,17 +7520,7 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, struct bnxt_hw_rings *hwr)
int bnxt_nq_rings_in_use(struct bnxt *bp)
{
- int cp = bp->cp_nr_rings;
- int ulp_msix, ulp_base;
-
- ulp_msix = bnxt_get_ulp_msix_num(bp);
- if (ulp_msix) {
- ulp_base = bnxt_get_ulp_msix_base(bp);
- cp += ulp_msix;
- if ((ulp_base + ulp_msix) > cp)
- cp = ulp_base + ulp_msix;
- }
- return cp;
+ return bp->cp_nr_rings + bnxt_get_ulp_msix_num(bp);
}
static int bnxt_cp_rings_in_use(struct bnxt *bp)
@@ -7292,16 +7536,7 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp)
static int bnxt_get_func_stat_ctxs(struct bnxt *bp)
{
- int ulp_stat = bnxt_get_ulp_stat_ctxs(bp);
- int cp = bp->cp_nr_rings;
-
- if (!ulp_stat)
- return cp;
-
- if (bnxt_nq_rings_in_use(bp) > cp + bnxt_get_ulp_msix_num(bp))
- return bnxt_get_ulp_msix_base(bp) + ulp_stat;
-
- return cp + ulp_stat;
+ return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp);
}
static int bnxt_get_total_rss_ctxs(struct bnxt *bp, struct bnxt_hw_rings *hwr)
@@ -7333,7 +7568,7 @@ static void bnxt_check_rss_tbl_no_rmgr(struct bnxt *bp)
if (hw_resc->resv_rx_rings != bp->rx_nr_rings) {
hw_resc->resv_rx_rings = bp->rx_nr_rings;
if (!netif_is_rxfh_configured(bp->dev))
- bnxt_set_dflt_rss_indir_tbl(bp);
+ bnxt_set_dflt_rss_indir_tbl(bp, NULL);
}
}
@@ -7341,7 +7576,7 @@ static int bnxt_get_total_vnics(struct bnxt *bp, int rx_rings)
{
if (bp->flags & BNXT_FLAG_RFS) {
if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
- return 2;
+ return 2 + bp->num_rss_ctx;
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
return rx_rings + 1;
}
@@ -7356,19 +7591,20 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp)
int rx = bp->rx_nr_rings, stat;
int vnic, grp = rx;
- if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
- bp->hwrm_spec_code >= 0x10601)
- return true;
-
/* Old firmware does not need RX ring reservations but we still
* need to setup a default RSS map when needed. With new firmware
* we go through RX ring reservations first and then set up the
* RSS map for the successfully reserved RX rings when needed.
*/
- if (!BNXT_NEW_RM(bp)) {
+ if (!BNXT_NEW_RM(bp))
bnxt_check_rss_tbl_no_rmgr(bp);
+
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings &&
+ bp->hwrm_spec_code >= 0x10601)
+ return true;
+
+ if (!BNXT_NEW_RM(bp))
return false;
- }
vnic = bnxt_get_total_vnics(bp, rx);
@@ -7409,17 +7645,32 @@ static bool bnxt_rings_ok(struct bnxt *bp, struct bnxt_hw_rings *hwr)
hwr->stat && (hwr->cp_p5 || !(bp->flags & BNXT_FLAG_CHIP_P5_PLUS));
}
+static int bnxt_get_avail_msix(struct bnxt *bp, int num);
+
static int __bnxt_reserve_rings(struct bnxt *bp)
{
struct bnxt_hw_rings hwr = {0};
- int rx_rings, rc;
+ int rx_rings, old_rx_rings, rc;
+ int cp = bp->cp_nr_rings;
+ int ulp_msix = 0;
bool sh = false;
int tx_cp;
if (!bnxt_need_reserve_rings(bp))
return 0;
- hwr.cp = bnxt_nq_rings_in_use(bp);
+ if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
+ ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
+ if (!ulp_msix)
+ bnxt_set_ulp_stat_ctxs(bp, 0);
+
+ if (ulp_msix > bp->ulp_num_msix_want)
+ ulp_msix = bp->ulp_num_msix_want;
+ hwr.cp = cp + ulp_msix;
+ } else {
+ hwr.cp = bnxt_nq_rings_in_use(bp);
+ }
+
hwr.tx = bp->tx_nr_rings;
hwr.rx = bp->rx_nr_rings;
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -7434,6 +7685,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
hwr.grp = bp->rx_nr_rings;
hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
hwr.stat = bnxt_get_func_stat_ctxs(bp);
+ old_rx_rings = bp->hw_resc.resv_rx_rings;
rc = bnxt_hwrm_reserve_rings(bp, &hwr);
if (rc)
@@ -7488,8 +7740,22 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (!bnxt_rings_ok(bp, &hwr))
return -ENOMEM;
- if (!netif_is_rxfh_configured(bp->dev))
- bnxt_set_dflt_rss_indir_tbl(bp);
+ if (old_rx_rings != bp->hw_resc.resv_rx_rings &&
+ !netif_is_rxfh_configured(bp->dev))
+ bnxt_set_dflt_rss_indir_tbl(bp, NULL);
+
+ if (!bnxt_ulp_registered(bp->edev) && BNXT_NEW_RM(bp)) {
+ int resv_msix, resv_ctx, ulp_ctxs;
+ struct bnxt_hw_resc *hw_resc;
+
+ hw_resc = &bp->hw_resc;
+ resv_msix = hw_resc->resv_irqs - bp->cp_nr_rings;
+ ulp_msix = min_t(int, resv_msix, ulp_msix);
+ bnxt_set_ulp_msix_num(bp, ulp_msix);
+ resv_ctx = hw_resc->resv_stat_ctxs - bp->cp_nr_rings;
+ ulp_ctxs = min(resv_ctx, bnxt_get_ulp_stat_ctxs(bp));
+ bnxt_set_ulp_stat_ctxs(bp, ulp_ctxs);
+ }
return rc;
}
@@ -8744,7 +9010,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
u8 flags;
int rc;
- if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) {
+ if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) {
rc = -ENODEV;
goto no_ptp;
}
@@ -8760,7 +9026,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
goto exit;
flags = resp->flags;
- if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
+ if (BNXT_CHIP_P5_AND_MINUS(bp) &&
+ !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) {
rc = -ENODEV;
goto exit;
}
@@ -8773,10 +9040,13 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
ptp->bp = bp;
bp->ptp_cfg = ptp;
}
- if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) {
+
+ if (flags &
+ (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK |
+ PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) {
ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower);
ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper);
- } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+ } else if (BNXT_CHIP_P5(bp)) {
ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER;
ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER;
} else {
@@ -8858,6 +9128,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS;
if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED)
bp->flags |= BNXT_FLAG_UDP_GSO_CAP;
+ if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -8908,6 +9180,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
#endif
}
+ bp->tso_max_segs = le16_to_cpu(resp->max_tso_segs);
hwrm_func_qcaps_exit:
hwrm_req_drop(bp, req);
@@ -9668,7 +9941,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
else if (BNXT_NO_FW_ACCESS(bp))
return 0;
for (i = 0; i < bp->nr_vnics; i++) {
- rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
+ rc = bnxt_hwrm_vnic_set_tpa(bp, &bp->vnic_info[i], tpa_flags);
if (rc) {
netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
i, rc);
@@ -9683,7 +9956,7 @@ static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
int i;
for (i = 0; i < bp->nr_vnics; i++)
- bnxt_hwrm_vnic_set_rss(bp, i, false);
+ bnxt_hwrm_vnic_set_rss(bp, &bp->vnic_info[i], false);
}
static void bnxt_clear_vnic(struct bnxt *bp)
@@ -9761,28 +10034,27 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
return hwrm_req_send(bp, req);
}
-static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int __bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
- struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc;
if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
goto skip_rss_ctx;
/* allocate context for vnic */
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 0);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
bp->rsscos_nr_ctxs++;
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, 1);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
bp->rsscos_nr_ctxs++;
@@ -9790,26 +10062,26 @@ static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
skip_rss_ctx:
/* configure default vnic, ring grp */
- rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
/* Enable RSS hashing on vnic */
- rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
+ rc = bnxt_hwrm_vnic_set_rss(bp, vnic, true);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
goto vnic_setup_err;
}
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
}
}
@@ -9817,16 +10089,33 @@ vnic_setup_err:
return rc;
}
-static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
+int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
+{
+ int rc;
+
+ rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic, true);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
+ vnic->vnic_id, rc);
+ return rc;
+ }
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
+ if (rc)
+ netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
+ vnic->vnic_id, rc);
+ return rc;
+}
+
+int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
int rc, i, nr_ctxs;
nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings);
for (i = 0; i < nr_ctxs; i++) {
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic, i);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
- vnic_id, i, rc);
+ vnic->vnic_id, i, rc);
break;
}
bp->rsscos_nr_ctxs++;
@@ -9834,63 +10123,57 @@ static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
if (i < nr_ctxs)
return -ENOMEM;
- rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
- vnic_id, rc);
- return rc;
- }
- rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
- if (rc) {
- netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
- vnic_id, rc);
+ rc = bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic);
+ if (rc)
return rc;
- }
+
if (bp->flags & BNXT_FLAG_AGG_RINGS) {
- rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_set_hds(bp, vnic);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
}
}
return rc;
}
-static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
+static int bnxt_setup_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic)
{
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
- return __bnxt_setup_vnic_p5(bp, vnic_id);
+ return __bnxt_setup_vnic_p5(bp, vnic);
else
- return __bnxt_setup_vnic(bp, vnic_id);
+ return __bnxt_setup_vnic(bp, vnic);
}
-static int bnxt_alloc_and_setup_vnic(struct bnxt *bp, u16 vnic_id,
+static int bnxt_alloc_and_setup_vnic(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic,
u16 start_rx_ring_idx, int rx_rings)
{
int rc;
- rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, start_rx_ring_idx, rx_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, start_rx_ring_idx, rx_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
- vnic_id, rc);
+ vnic->vnic_id, rc);
return rc;
}
- return bnxt_setup_vnic(bp, vnic_id);
+ return bnxt_setup_vnic(bp, vnic);
}
static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic;
int i, rc = 0;
- if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
- return bnxt_alloc_and_setup_vnic(bp, BNXT_VNIC_NTUPLE, 0,
- bp->rx_nr_rings);
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
+ vnic = &bp->vnic_info[BNXT_VNIC_NTUPLE];
+ return bnxt_alloc_and_setup_vnic(bp, vnic, 0, bp->rx_nr_rings);
+ }
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
return 0;
for (i = 0; i < bp->rx_nr_rings; i++) {
- struct bnxt_vnic_info *vnic;
u16 vnic_id = i + 1;
u16 ring_id = i;
@@ -9901,12 +10184,82 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
vnic->flags |= BNXT_VNIC_RFS_FLAG;
if (bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
- if (bnxt_alloc_and_setup_vnic(bp, vnic_id, ring_id, 1))
+ if (bnxt_alloc_and_setup_vnic(bp, &bp->vnic_info[vnic_id], ring_id, 1))
break;
}
return rc;
}
+void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
+ bool all)
+{
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+ struct bnxt_filter_base *usr_fltr, *tmp;
+ struct bnxt_ntuple_filter *ntp_fltr;
+ int i;
+
+ if (netif_running(bp->dev)) {
+ bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic);
+ for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) {
+ if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i);
+ }
+ }
+ if (!all)
+ return;
+
+ list_for_each_entry_safe(usr_fltr, tmp, &bp->usr_fltr_list, list) {
+ if ((usr_fltr->flags & BNXT_ACT_RSS_CTX) &&
+ usr_fltr->fw_vnic_id == rss_ctx->index) {
+ ntp_fltr = container_of(usr_fltr,
+ struct bnxt_ntuple_filter,
+ base);
+ bnxt_hwrm_cfa_ntuple_filter_free(bp, ntp_fltr);
+ bnxt_del_ntp_filter(bp, ntp_fltr);
+ bnxt_del_one_usr_fltr(bp, usr_fltr);
+ }
+ }
+
+ if (vnic->rss_table)
+ dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size,
+ vnic->rss_table,
+ vnic->rss_table_dma_addr);
+ bp->num_rss_ctx--;
+}
+
+static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp)
+{
+ bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA);
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+ struct bnxt_vnic_info *vnic = &rss_ctx->vnic;
+
+ if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) ||
+ bnxt_hwrm_vnic_set_tpa(bp, vnic, set_tpa) ||
+ __bnxt_setup_vnic_p5(bp, vnic)) {
+ netdev_err(bp->dev, "Failed to restore RSS ctx %d\n",
+ rss_ctx->index);
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ ethtool_rxfh_context_lost(bp->dev, rss_ctx->index);
+ }
+ }
+}
+
+static void bnxt_clear_rss_ctxs(struct bnxt *bp)
+{
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
+
+ xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) {
+ struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_del_one_rss_ctx(bp, rss_ctx, false);
+ }
+}
+
/* Allow PF, trusted VFs and VFs with default VLAN to be in promiscuous mode */
static bool bnxt_promisc_ok(struct bnxt *bp)
{
@@ -9919,16 +10272,17 @@ static bool bnxt_promisc_ok(struct bnxt *bp)
static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
{
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[1];
unsigned int rc = 0;
- rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, bp->rx_nr_rings - 1, 1);
if (rc) {
netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
rc);
return rc;
}
- rc = bnxt_hwrm_vnic_cfg(bp, 1);
+ rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
rc);
@@ -9971,7 +10325,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rx_nr_rings--;
/* default vnic 0 */
- rc = bnxt_hwrm_vnic_alloc(bp, BNXT_VNIC_DEFAULT, 0, rx_nr_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, rx_nr_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
goto err_out;
@@ -9980,7 +10334,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
if (BNXT_VF(bp))
bnxt_hwrm_func_qcfg(bp);
- rc = bnxt_setup_vnic(bp, BNXT_VNIC_DEFAULT);
+ rc = bnxt_setup_vnic(bp, vnic);
if (rc)
goto err_out;
if (bp->rss_cap & BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA)
@@ -10295,19 +10649,10 @@ unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp)
return bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_func_stat_ctxs(bp);
}
-int bnxt_get_avail_msix(struct bnxt *bp, int num)
+static int bnxt_get_avail_msix(struct bnxt *bp, int num)
{
- int max_cp = bnxt_get_max_func_cp_rings(bp);
int max_irq = bnxt_get_max_func_irqs(bp);
int total_req = bp->cp_nr_rings + num;
- int max_idx, avail_msix;
-
- max_idx = bp->total_irqs;
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
- max_idx = min_t(int, bp->total_irqs, max_cp);
- avail_msix = max_idx - bp->cp_nr_rings;
- if (!BNXT_NEW_RM(bp) || avail_msix >= num)
- return avail_msix;
if (max_irq < total_req) {
num = max_irq - bp->cp_nr_rings;
@@ -10434,13 +10779,23 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
{
bool irq_cleared = false;
int tcs = bp->num_tc;
+ int irqs_required;
int rc;
if (!bnxt_need_reserve_rings(bp))
return 0;
- if (irq_re_init && BNXT_NEW_RM(bp) &&
- bnxt_get_num_msix(bp) != bp->total_irqs) {
+ if (BNXT_NEW_RM(bp) && !bnxt_ulp_registered(bp->edev)) {
+ int ulp_msix = bnxt_get_avail_msix(bp, bp->ulp_num_msix_want);
+
+ if (ulp_msix > bp->ulp_num_msix_want)
+ ulp_msix = bp->ulp_num_msix_want;
+ irqs_required = ulp_msix + bp->cp_nr_rings;
+ } else {
+ irqs_required = bnxt_get_num_msix(bp);
+ }
+
+ if (irq_re_init && BNXT_NEW_RM(bp) && irqs_required != bp->total_irqs) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
irq_cleared = true;
@@ -10622,9 +10977,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
cpr = &bnapi->cp_ring;
if (bnapi->tx_fault)
- cpr->sw_stats.tx.tx_resets++;
+ cpr->sw_stats->tx.tx_resets++;
if (bnapi->in_reset)
- cpr->sw_stats.rx.rx_resets++;
+ cpr->sw_stats->rx.rx_resets++;
napi_disable(&bnapi->napi);
if (bnapi->rx_ring)
cancel_work_sync(&cpr->dim.work);
@@ -11359,7 +11714,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
if (fw_reset) {
set_bit(BNXT_STATE_FW_RESET_DET, &bp->state);
if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
- bnxt_ulp_stop(bp);
+ bnxt_ulp_irq_stop(bp);
bnxt_free_ctx_mem(bp);
bnxt_dcb_free(bp);
rc = bnxt_fw_init_one(bp);
@@ -11666,6 +12021,46 @@ static void bnxt_cfg_usr_fltrs(struct bnxt *bp)
bnxt_cfg_one_usr_fltr(bp, usr_fltr);
}
+static int bnxt_set_xps_mapping(struct bnxt *bp)
+{
+ int numa_node = dev_to_node(&bp->pdev->dev);
+ unsigned int q_idx, map_idx, cpu, i;
+ const struct cpumask *cpu_mask_ptr;
+ int nr_cpus = num_online_cpus();
+ cpumask_t *q_map;
+ int rc = 0;
+
+ q_map = kcalloc(bp->tx_nr_rings_per_tc, sizeof(*q_map), GFP_KERNEL);
+ if (!q_map)
+ return -ENOMEM;
+
+ /* Create CPU mask for all TX queues across MQPRIO traffic classes.
+ * Each TC has the same number of TX queues. The nth TX queue for each
+ * TC will have the same CPU mask.
+ */
+ for (i = 0; i < nr_cpus; i++) {
+ map_idx = i % bp->tx_nr_rings_per_tc;
+ cpu = cpumask_local_spread(i, numa_node);
+ cpu_mask_ptr = get_cpu_mask(cpu);
+ cpumask_or(&q_map[map_idx], &q_map[map_idx], cpu_mask_ptr);
+ }
+
+ /* Register CPU mask for each TX queue except the ones marked for XDP */
+ for (q_idx = 0; q_idx < bp->dev->real_num_tx_queues; q_idx++) {
+ map_idx = q_idx % bp->tx_nr_rings_per_tc;
+ rc = netif_set_xps_queue(bp->dev, &q_map[map_idx], q_idx);
+ if (rc) {
+ netdev_warn(bp->dev, "Error setting XPS for q:%d\n",
+ q_idx);
+ break;
+ }
+ }
+
+ kfree(q_map);
+
+ return rc;
+}
+
static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{
int rc = 0;
@@ -11728,8 +12123,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
}
- if (irq_re_init)
+ if (irq_re_init) {
udp_tunnel_nic_reset_ntf(bp->dev);
+ rc = bnxt_set_xps_mapping(bp);
+ if (rc)
+ netdev_warn(bp->dev, "failed to set xps mapping\n");
+ }
if (bp->tx_nr_rings_xdp < num_possible_cpus()) {
if (!static_key_enabled(&bnxt_xdp_locking_key))
@@ -11750,10 +12149,12 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
- if (bp->ptp_cfg)
- atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
+ if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
+ WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS);
bnxt_ptp_init_rtc(bp, true);
bnxt_ptp_cfg_tstamp_filters(bp);
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_hwrm_realloc_rss_ctx_vnic(bp);
bnxt_cfg_usr_fltrs(bp);
return 0;
@@ -11868,10 +12269,9 @@ static int bnxt_open(struct net_device *dev)
bnxt_hwrm_if_change(bp, false);
} else {
if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) {
- if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
- bnxt_ulp_start(bp, 0);
- bnxt_reenable_sriov(bp);
- }
+ if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
+ bnxt_queue_sp_work(bp,
+ BNXT_RESTART_ULP_SP_EVENT);
}
}
@@ -11902,6 +12302,8 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
while (bnxt_drv_busy(bp))
msleep(20);
+ if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp))
+ bnxt_clear_rss_ctxs(bp);
/* Flush rings and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
@@ -12101,8 +12503,8 @@ static void bnxt_get_ring_stats(struct bnxt *bp,
stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts);
stats->rx_dropped +=
- cpr->sw_stats.rx.rx_netpoll_discards +
- cpr->sw_stats.rx.rx_oom_discards;
+ cpr->sw_stats->rx.rx_netpoll_discards +
+ cpr->sw_stats->rx.rx_oom_discards;
}
}
@@ -12169,7 +12571,7 @@ static void bnxt_get_one_ring_err_stats(struct bnxt *bp,
struct bnxt_total_ring_err_stats *stats,
struct bnxt_cp_ring_info *cpr)
{
- struct bnxt_sw_stats *sw_stats = &cpr->sw_stats;
+ struct bnxt_sw_stats *sw_stats = cpr->sw_stats;
u64 *hw_stats = cpr->stats.sw_stats;
stats->rx_total_l4_csum_errors += sw_stats->rx.rx_l4_csum_errors;
@@ -12399,33 +12801,26 @@ static bool bnxt_rfs_supported(struct bnxt *bp)
}
/* If runtime conditions support RFS */
-static bool bnxt_rfs_capable(struct bnxt *bp)
+bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx)
{
struct bnxt_hw_rings hwr = {0};
int max_vnics, max_rss_ctxs;
- hwr.rss_ctx = 1;
- if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) {
- /* 2 VNICS: default + Ntuple */
- hwr.vnic = 2;
- hwr.rss_ctx = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
- hwr.vnic;
- goto check_reserve_vnic;
- }
- if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
+ if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ !BNXT_SUPPORTS_NTUPLE_VNIC(bp))
return bnxt_rfs_supported(bp);
+
if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp) || !bp->rx_nr_rings)
return false;
- hwr.vnic = 1 + bp->rx_nr_rings;
-check_reserve_vnic:
+ hwr.grp = bp->rx_nr_rings;
+ hwr.vnic = bnxt_get_total_vnics(bp, bp->rx_nr_rings);
+ if (new_rss_ctx)
+ hwr.vnic++;
+ hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
max_vnics = bnxt_get_max_func_vnics(bp);
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
- if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
- !(bp->rss_cap & BNXT_RSS_CAP_NEW_RSS_CAP))
- hwr.rss_ctx = hwr.vnic;
-
if (hwr.vnic > max_vnics || hwr.rss_ctx > max_rss_ctxs) {
if (bp->rx_nr_rings > 1)
netdev_warn(bp->dev,
@@ -12437,7 +12832,11 @@ check_reserve_vnic:
if (!BNXT_NEW_RM(bp))
return true;
- if (hwr.vnic == bp->hw_resc.resv_vnics &&
+ /* Do not reduce VNIC and RSS ctx reservations. There is a FW
+ * issue that will mess up the default VNIC if we reduce the
+ * reservations.
+ */
+ if (hwr.vnic <= bp->hw_resc.resv_vnics &&
hwr.rss_ctx <= bp->hw_resc.resv_rsscos_ctxs)
return true;
@@ -12459,7 +12858,7 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
netdev_features_t vlan_features;
- if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
+ if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp, false))
features &= ~NETIF_F_NTUPLE;
if ((bp->flags & BNXT_FLAG_NO_AGG_RINGS) || bp->xdp_prog)
@@ -12851,17 +13250,8 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
if (!silent)
bnxt_dbg_dump_states(bp);
if (netif_running(bp->dev)) {
- int rc;
-
- if (silent) {
- bnxt_close_nic(bp, false, false);
- bnxt_open_nic(bp, false, false);
- } else {
- bnxt_ulp_stop(bp);
- bnxt_close_nic(bp, true, false);
- rc = bnxt_open_nic(bp, true, false);
- bnxt_ulp_start(bp, rc);
- }
+ bnxt_close_nic(bp, !silent, false);
+ bnxt_open_nic(bp, !silent, false);
}
}
@@ -13019,7 +13409,7 @@ static void bnxt_rx_ring_reset(struct bnxt *bp)
rxr->bnapi->in_reset = false;
bnxt_alloc_one_rx_ring(bp, i);
cpr = &rxr->bnapi->cp_ring;
- cpr->sw_stats.rx.rx_resets++;
+ cpr->sw_stats->rx.rx_resets++;
if (bp->flags & BNXT_FLAG_AGG_RINGS)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
@@ -13041,7 +13431,6 @@ static void bnxt_fw_fatal_close(struct bnxt *bp)
static void bnxt_fw_reset_close(struct bnxt *bp)
{
- bnxt_ulp_stop(bp);
/* When firmware is in fatal state, quiesce device and disable
* bus master to prevent any potential bad DMAs before freeing
* kernel memory.
@@ -13122,6 +13511,7 @@ void bnxt_fw_exception(struct bnxt *bp)
{
netdev_warn(bp->dev, "Detected firmware fatal condition, initiating reset\n");
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ bnxt_ulp_stop(bp);
bnxt_rtnl_lock_sp(bp);
bnxt_force_fw_reset(bp);
bnxt_rtnl_unlock_sp(bp);
@@ -13153,6 +13543,7 @@ static int bnxt_get_registered_vfs(struct bnxt *bp)
void bnxt_fw_reset(struct bnxt *bp)
{
+ bnxt_ulp_stop(bp);
bnxt_rtnl_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
@@ -13231,7 +13622,7 @@ static void bnxt_chk_missed_irq(struct bnxt *bp)
bnxt_dbg_hwrm_ring_info_get(bp,
DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
fw_ring_id, &val[0], &val[1]);
- cpr->sw_stats.cmn.missed_irqs++;
+ cpr->sw_stats->cmn.missed_irqs++;
}
}
}
@@ -13277,6 +13668,12 @@ static void bnxt_fw_echo_reply(struct bnxt *bp)
hwrm_req_send(bp, req);
}
+static void bnxt_ulp_restart(struct bnxt *bp)
+{
+ bnxt_ulp_stop(bp);
+ bnxt_ulp_start(bp, 0);
+}
+
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
@@ -13288,6 +13685,11 @@ static void bnxt_sp_task(struct work_struct *work)
return;
}
+ if (test_and_clear_bit(BNXT_RESTART_ULP_SP_EVENT, &bp->sp_event)) {
+ bnxt_ulp_restart(bp);
+ bnxt_reenable_sriov(bp);
+ }
+
if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
bnxt_cfg_rx_mode(bp);
@@ -13416,8 +13818,8 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
return -ENOMEM;
hwr.stat = hwr.cp;
if (BNXT_NEW_RM(bp)) {
- hwr.cp += bnxt_get_ulp_msix_num(bp);
- hwr.stat += bnxt_get_ulp_stat_ctxs(bp);
+ hwr.cp += bnxt_get_ulp_msix_num_in_use(bp);
+ hwr.stat += bnxt_get_ulp_stat_ctxs_in_use(bp);
hwr.grp = rx;
hwr.rss_ctx = bnxt_get_total_rss_ctxs(bp, &hwr);
}
@@ -13600,7 +14002,7 @@ static void bnxt_set_dflt_rfs(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_RFS;
if (bnxt_rfs_supported(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
- if (bnxt_rfs_capable(bp)) {
+ if (bnxt_rfs_capable(bp, false)) {
bp->flags |= BNXT_FLAG_RFS;
dev->features |= NETIF_F_NTUPLE;
}
@@ -13744,10 +14146,8 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp)
static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
{
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
- bnxt_ulp_start(bp, rc);
+ if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF)
bnxt_dl_health_fw_status_update(bp, false);
- }
bp->fw_reset_state = 0;
dev_close(bp->dev);
}
@@ -13778,7 +14178,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bp->fw_reset_state = 0;
netdev_err(bp->dev, "Firmware reset aborted, bnxt_get_registered_vfs() returns %d\n",
n);
- return;
+ goto ulp_start;
}
bnxt_queue_fw_reset_work(bp, HZ / 10);
return;
@@ -13788,7 +14188,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
bnxt_fw_reset_abort(bp, rc);
rtnl_unlock();
- return;
+ goto ulp_start;
}
bnxt_fw_reset_close(bp);
if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) {
@@ -13881,7 +14281,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
netdev_err(bp->dev, "bnxt_open() failed during FW reset\n");
bnxt_fw_reset_abort(bp, rc);
rtnl_unlock();
- return;
+ goto ulp_start;
}
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) &&
@@ -13893,10 +14293,6 @@ static void bnxt_fw_reset_task(struct work_struct *work)
/* Make sure fw_reset_state is 0 before clearing the flag */
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
- bnxt_ulp_start(bp, 0);
- bnxt_reenable_sriov(bp);
- bnxt_vf_reps_alloc(bp);
- bnxt_vf_reps_open(bp);
bnxt_ptp_reapply_pps(bp);
clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
@@ -13904,6 +14300,12 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_dl_health_fw_status_update(bp, true);
}
rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
+ bnxt_reenable_sriov(bp);
+ rtnl_lock();
+ bnxt_vf_reps_alloc(bp);
+ bnxt_vf_reps_open(bp);
+ rtnl_unlock();
break;
}
return;
@@ -13919,6 +14321,8 @@ fw_reset_abort:
rtnl_lock();
bnxt_fw_reset_abort(bp, rc);
rtnl_unlock();
+ulp_start:
+ bnxt_ulp_start(bp, rc);
}
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
@@ -14043,7 +14447,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev))
bnxt_close_nic(bp, true, false);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
bnxt_set_ring_params(bp);
if (netif_running(dev))
@@ -14453,12 +14857,9 @@ static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
u16 mode;
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
mode = nla_get_u16(attr);
if (mode == bp->br_mode)
break;
@@ -14542,7 +14943,7 @@ static void bnxt_get_queue_stats_rx(struct net_device *dev, int i,
stats->bytes += BNXT_GET_RING_STATS64(sw, rx_mcast_bytes);
stats->bytes += BNXT_GET_RING_STATS64(sw, rx_bcast_bytes);
- stats->alloc_fail = cpr->sw_stats.rx.rx_oom_discards;
+ stats->alloc_fail = cpr->sw_stats->rx.rx_oom_discards;
}
static void bnxt_get_queue_stats_tx(struct net_device *dev, int i,
@@ -14586,6 +14987,236 @@ static const struct netdev_stat_ops bnxt_stat_ops = {
.get_base_stats = bnxt_get_base_stats,
};
+static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
+{
+ u16 mem_size;
+
+ rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
+ mem_size = rxr->rx_agg_bmap_size / 8;
+ rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
+ if (!rxr->rx_agg_bmap)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt_rx_ring_info *rxr, *clone;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+ int rc;
+
+ rxr = &bp->rx_ring[idx];
+ clone = qmem;
+ memcpy(clone, rxr, sizeof(*rxr));
+ bnxt_init_rx_ring_struct(bp, clone);
+ bnxt_reset_rx_ring_struct(bp, clone);
+
+ clone->rx_prod = 0;
+ clone->rx_agg_prod = 0;
+ clone->rx_sw_agg_prod = 0;
+ clone->rx_next_cons = 0;
+
+ rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid);
+ if (rc)
+ return rc;
+
+ rc = xdp_rxq_info_reg(&clone->xdp_rxq, bp->dev, idx, 0);
+ if (rc < 0)
+ goto err_page_pool_destroy;
+
+ rc = xdp_rxq_info_reg_mem_model(&clone->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ clone->page_pool);
+ if (rc)
+ goto err_rxq_info_unreg;
+
+ ring = &clone->rx_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_ring;
+
+ if (bp->flags & BNXT_FLAG_AGG_RINGS) {
+ ring = &clone->rx_agg_ring_struct;
+ rc = bnxt_alloc_ring(bp, &ring->ring_mem);
+ if (rc)
+ goto err_free_rx_agg_ring;
+
+ rc = bnxt_alloc_rx_agg_bmap(bp, clone);
+ if (rc)
+ goto err_free_rx_agg_ring;
+ }
+
+ bnxt_init_one_rx_ring_rxbd(bp, clone);
+ bnxt_init_one_rx_agg_ring_rxbd(bp, clone);
+
+ bnxt_alloc_one_rx_ring_skb(bp, clone, idx);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_alloc_one_rx_ring_page(bp, clone, idx);
+
+ return 0;
+
+err_free_rx_agg_ring:
+ bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem);
+err_free_rx_ring:
+ bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem);
+err_rxq_info_unreg:
+ xdp_rxq_info_unreg(&clone->xdp_rxq);
+err_page_pool_destroy:
+ clone->page_pool->p.napi = NULL;
+ page_pool_destroy(clone->page_pool);
+ clone->page_pool = NULL;
+ return rc;
+}
+
+static void bnxt_queue_mem_free(struct net_device *dev, void *qmem)
+{
+ struct bnxt_rx_ring_info *rxr = qmem;
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ring_struct *ring;
+
+ bnxt_free_one_rx_ring(bp, rxr);
+ bnxt_free_one_rx_agg_ring(bp, rxr);
+
+ xdp_rxq_info_unreg(&rxr->xdp_rxq);
+
+ page_pool_destroy(rxr->page_pool);
+ rxr->page_pool = NULL;
+
+ ring = &rxr->rx_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ ring = &rxr->rx_agg_ring_struct;
+ bnxt_free_ring(bp, &ring->ring_mem);
+
+ kfree(rxr->rx_agg_bmap);
+ rxr->rx_agg_bmap = NULL;
+}
+
+static void bnxt_copy_rx_ring(struct bnxt *bp,
+ struct bnxt_rx_ring_info *dst,
+ struct bnxt_rx_ring_info *src)
+{
+ struct bnxt_ring_mem_info *dst_rmem, *src_rmem;
+ struct bnxt_ring_struct *dst_ring, *src_ring;
+ int i;
+
+ dst_ring = &dst->rx_ring_struct;
+ dst_rmem = &dst_ring->ring_mem;
+ src_ring = &src->rx_ring_struct;
+ src_rmem = &src_ring->ring_mem;
+
+ WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
+ WARN_ON(dst_rmem->page_size != src_rmem->page_size);
+ WARN_ON(dst_rmem->flags != src_rmem->flags);
+ WARN_ON(dst_rmem->depth != src_rmem->depth);
+ WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
+ WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
+
+ dst_rmem->pg_tbl = src_rmem->pg_tbl;
+ dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
+ *dst_rmem->vmem = *src_rmem->vmem;
+ for (i = 0; i < dst_rmem->nr_pages; i++) {
+ dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
+ dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
+ }
+
+ if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
+ return;
+
+ dst_ring = &dst->rx_agg_ring_struct;
+ dst_rmem = &dst_ring->ring_mem;
+ src_ring = &src->rx_agg_ring_struct;
+ src_rmem = &src_ring->ring_mem;
+
+ WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages);
+ WARN_ON(dst_rmem->page_size != src_rmem->page_size);
+ WARN_ON(dst_rmem->flags != src_rmem->flags);
+ WARN_ON(dst_rmem->depth != src_rmem->depth);
+ WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size);
+ WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem);
+ WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size);
+
+ dst_rmem->pg_tbl = src_rmem->pg_tbl;
+ dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map;
+ *dst_rmem->vmem = *src_rmem->vmem;
+ for (i = 0; i < dst_rmem->nr_pages; i++) {
+ dst_rmem->pg_arr[i] = src_rmem->pg_arr[i];
+ dst_rmem->dma_arr[i] = src_rmem->dma_arr[i];
+ }
+
+ dst->rx_agg_bmap = src->rx_agg_bmap;
+}
+
+static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rx_ring_info *rxr, *clone;
+ struct bnxt_cp_ring_info *cpr;
+ int rc;
+
+ rxr = &bp->rx_ring[idx];
+ clone = qmem;
+
+ rxr->rx_prod = clone->rx_prod;
+ rxr->rx_agg_prod = clone->rx_agg_prod;
+ rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod;
+ rxr->rx_next_cons = clone->rx_next_cons;
+ rxr->page_pool = clone->page_pool;
+ rxr->xdp_rxq = clone->xdp_rxq;
+
+ bnxt_copy_rx_ring(bp, rxr, clone);
+
+ rc = bnxt_hwrm_rx_ring_alloc(bp, rxr);
+ if (rc)
+ return rc;
+ rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr);
+ if (rc)
+ goto err_free_hwrm_rx_ring;
+
+ bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
+ bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+
+ napi_enable(&rxr->bnapi->napi);
+
+ cpr = &rxr->bnapi->cp_ring;
+ cpr->sw_stats->rx.rx_resets++;
+
+ return 0;
+
+err_free_hwrm_rx_ring:
+ bnxt_hwrm_rx_ring_free(bp, rxr, false);
+ return rc;
+}
+
+static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rx_ring_info *rxr;
+
+ rxr = &bp->rx_ring[idx];
+ napi_disable(&rxr->bnapi->napi);
+ bnxt_hwrm_rx_ring_free(bp, rxr, false);
+ bnxt_hwrm_rx_agg_ring_free(bp, rxr, false);
+ rxr->rx_next_cons = 0;
+ page_pool_disable_direct_recycling(rxr->page_pool);
+
+ memcpy(qmem, rxr, sizeof(*rxr));
+ bnxt_init_rx_ring_struct(bp, qmem);
+
+ return 0;
+}
+
+static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info),
+ .ndo_queue_mem_alloc = bnxt_queue_mem_alloc,
+ .ndo_queue_mem_free = bnxt_queue_mem_free,
+ .ndo_queue_start = bnxt_queue_start,
+ .ndo_queue_stop = bnxt_queue_stop,
+};
+
static void bnxt_remove_one(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
@@ -14594,12 +15225,16 @@ static void bnxt_remove_one(struct pci_dev *pdev)
if (BNXT_PF(bp))
bnxt_sriov_disable(bp);
- bnxt_rdma_aux_device_uninit(bp);
+ bnxt_rdma_aux_device_del(bp);
bnxt_ptp_clear(bp);
unregister_netdev(dev);
+
+ bnxt_rdma_aux_device_uninit(bp);
+
bnxt_free_l2_filters(bp, true);
bnxt_free_ntp_fltrs(bp, true);
+ WARN_ON(bp->num_rss_ctx);
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
/* Flush any pending tasks */
cancel_work_sync(&bp->sp_task);
@@ -14688,8 +15323,9 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_rx = hw_resc->max_rx_rings;
*max_cp = bnxt_get_max_func_cp_rings_for_en(bp);
max_irq = min_t(int, bnxt_get_max_func_irqs(bp) -
- bnxt_get_ulp_msix_num(bp),
- hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp));
+ bnxt_get_ulp_msix_num_in_use(bp),
+ hw_resc->max_stat_ctxs -
+ bnxt_get_ulp_stat_ctxs_in_use(bp));
if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS))
*max_cp = min_t(int, *max_cp, max_irq);
max_ring_grps = hw_resc->max_hw_ring_grps;
@@ -14785,6 +15421,7 @@ static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
{
int dflt_rings, max_rx_rings, max_tx_rings, rc;
+ int avail_msix;
if (!bnxt_can_reserve_rings(bp))
return 0;
@@ -14812,6 +15449,14 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
+ avail_msix = bnxt_get_max_func_irqs(bp) - bp->cp_nr_rings;
+ if (avail_msix >= BNXT_MIN_ROCE_CP_RINGS) {
+ int ulp_num_msix = min(avail_msix, bp->ulp_num_msix_want);
+
+ bnxt_set_ulp_msix_num(bp, ulp_num_msix);
+ bnxt_set_dflt_ulp_stat_ctxs(bp);
+ }
+
rc = __bnxt_reserve_rings(bp);
if (rc && rc != -ENODEV)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
@@ -15036,6 +15681,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->stat_ops = &bnxt_stat_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
+ dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops;
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);
@@ -15114,6 +15760,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->priv_flags |= IFF_UNICAST_FLT;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
+ if (bp->tso_max_segs)
+ netif_set_tso_max_segs(dev, bp->tso_max_segs);
dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_RX_SG;
@@ -15161,6 +15809,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
+ bnxt_rdma_aux_device_init(bp);
rc = bnxt_set_dflt_rings(bp, true);
if (rc) {
if (BNXT_VF(bp) && rc == -ENODEV) {
@@ -15211,13 +15860,16 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&bp->usr_fltr_list);
+ if (BNXT_SUPPORTS_NTUPLE_VNIC(bp))
+ bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX;
+
rc = register_netdev(dev);
if (rc)
goto init_err_cleanup;
bnxt_dl_fw_reporters_create(bp);
- bnxt_rdma_aux_device_init(bp);
+ bnxt_rdma_aux_device_add(bp);
bnxt_print_device_info(bp);
@@ -15225,6 +15877,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
init_err_cleanup:
+ bnxt_rdma_aux_device_uninit(bp);
bnxt_dl_unregister(bp);
init_err_dl:
bnxt_shutdown_tc(bp);
@@ -15285,8 +15938,9 @@ static int bnxt_suspend(struct device *device)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rtnl_lock();
bnxt_ulp_stop(bp);
+
+ rtnl_lock();
if (netif_running(dev)) {
netif_device_detach(dev);
rc = bnxt_close(dev);
@@ -15341,10 +15995,10 @@ static int bnxt_resume(struct device *device)
}
resume_exit:
+ rtnl_unlock();
bnxt_ulp_start(bp, rc);
if (!rc)
bnxt_reenable_sriov(bp);
- rtnl_unlock();
return rc;
}
@@ -15374,11 +16028,11 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
netdev_info(netdev, "PCI I/O error detected\n");
+ bnxt_ulp_stop(bp);
+
rtnl_lock();
netif_device_detach(netdev);
- bnxt_ulp_stop(bp);
-
if (test_and_set_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) {
netdev_err(bp->dev, "Firmware reset already in progress\n");
abort = true;
@@ -15430,6 +16084,10 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
netdev_info(bp->dev, "PCI Slot Reset\n");
+ if (!(bp->flags & BNXT_FLAG_CHIP_P5_PLUS) &&
+ test_bit(BNXT_STATE_PCI_CHANNEL_IO_FROZEN, &bp->state))
+ msleep(900);
+
rtnl_lock();
if (pci_enable_device(pdev)) {
@@ -15510,13 +16168,13 @@ static void bnxt_io_resume(struct pci_dev *pdev)
if (!err && netif_running(netdev))
err = bnxt_open(netdev);
- bnxt_ulp_start(bp, err);
- if (!err) {
- bnxt_reenable_sriov(bp);
+ if (!err)
netif_device_attach(netdev);
- }
rtnl_unlock();
+ bnxt_ulp_start(bp, err);
+ if (!err)
+ bnxt_reenable_sriov(bp);
}
static const struct pci_error_handlers bnxt_err_handler = {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index dd849e715c9b..059a6f81c1a8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -181,6 +181,32 @@ struct tx_cmp {
#define TX_CMP_SQ_CONS_IDX(txcmp) \
(le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK)
+struct tx_ts_cmp {
+ __le32 tx_ts_cmp_flags_type;
+ #define TX_TS_CMP_FLAGS_ERROR (1 << 6)
+ #define TX_TS_CMP_FLAGS_TS_TYPE (1 << 7)
+ #define TX_TS_CMP_FLAGS_TS_TYPE_PM (0 << 7)
+ #define TX_TS_CMP_FLAGS_TS_TYPE_PA (1 << 7)
+ #define TX_TS_CMP_FLAGS_TS_FALLBACK (1 << 8)
+ #define TX_TS_CMP_TS_SUB_NS (0xf << 12)
+ #define TX_TS_CMP_TS_NS_MID (0xffff << 16)
+ #define TX_TS_CMP_TS_NS_MID_SFT 16
+ u32 tx_ts_cmp_opaque;
+ __le32 tx_ts_cmp_errors_v;
+ #define TX_TS_CMP_V (1 << 0)
+ #define TX_TS_CMP_TS_INVALID_ERR (1 << 10)
+ __le32 tx_ts_cmp_ts_ns_lo;
+};
+
+#define BNXT_GET_TX_TS_48B_NS(tscmp) \
+ (le32_to_cpu((tscmp)->tx_ts_cmp_ts_ns_lo) | \
+ ((u64)(le32_to_cpu((tscmp)->tx_ts_cmp_flags_type) & \
+ TX_TS_CMP_TS_NS_MID) << TX_TS_CMP_TS_NS_MID_SFT))
+
+#define BNXT_TX_TS_ERR(tscmp) \
+ (((tscmp)->tx_ts_cmp_flags_type & cpu_to_le32(TX_TS_CMP_FLAGS_ERROR)) &&\
+ ((tscmp)->tx_ts_cmp_errors_v & cpu_to_le32(TX_TS_CMP_TS_INVALID_ERR)))
+
struct rx_cmp {
__le32 rx_cmp_len_flags_type;
#define RX_CMP_CMP_TYPE (0x3f << 0)
@@ -848,11 +874,14 @@ struct bnxt_sw_tx_bd {
DEFINE_DMA_UNMAP_ADDR(mapping);
DEFINE_DMA_UNMAP_LEN(len);
struct page *page;
- u8 is_gso;
+ u8 is_ts_pkt;
u8 is_push;
u8 action;
unsigned short nr_frags;
- u16 rx_prod;
+ union {
+ u16 rx_prod;
+ u16 txts_prod;
+ };
};
struct bnxt_sw_rx_bd {
@@ -1152,7 +1181,7 @@ struct bnxt_cp_ring_info {
struct bnxt_stats_mem stats;
u32 hw_stats_ctx_id;
- struct bnxt_sw_stats sw_stats;
+ struct bnxt_sw_stats *sw_stats;
struct bnxt_ring_struct cp_ring_struct;
@@ -1256,8 +1285,19 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_UCAST_FLAG 8
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
#define BNXT_VNIC_NTUPLE_FLAG 0x20
+#define BNXT_VNIC_RSSCTX_FLAG 0x40
+ struct ethtool_rxfh_context *rss_ctx;
+ u32 vnic_id;
};
+struct bnxt_rss_ctx {
+ struct bnxt_vnic_info vnic;
+ u8 index;
+};
+
+#define BNXT_MAX_ETH_RSS_CTX 32
+#define BNXT_VNIC_ID_INVALID 0xffffffff
+
struct bnxt_hw_rings {
int tx;
int rx;
@@ -1360,6 +1400,7 @@ struct bnxt_filter_base {
#define BNXT_ACT_RING_DST 2
#define BNXT_ACT_FUNC_DST 4
#define BNXT_ACT_NO_AGING 8
+#define BNXT_ACT_RSS_CTX 0x10
u16 sw_id;
u16 rxq;
u16 fw_vnic_id;
@@ -1419,6 +1460,57 @@ struct bnxt_l2_filter {
atomic_t refcnt;
};
+/* Compat version of hwrm_port_phy_qcfg_output capped at 96 bytes. The
+ * first 95 bytes are identical to hwrm_port_phy_qcfg_output in bnxt_hsi.h.
+ * The last valid byte in the compat version is different.
+ */
+struct hwrm_port_phy_qcfg_output_compat {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 link;
+ u8 active_fec_signal_mode;
+ __le16 link_speed;
+ u8 duplex_cfg;
+ u8 pause;
+ __le16 support_speeds;
+ __le16 force_link_speed;
+ u8 auto_mode;
+ u8 auto_pause;
+ __le16 auto_link_speed;
+ __le16 auto_link_speed_mask;
+ u8 wirespeed;
+ u8 lpbk;
+ u8 force_pause;
+ u8 module_status;
+ __le32 preemphasis;
+ u8 phy_maj;
+ u8 phy_min;
+ u8 phy_bld;
+ u8 phy_type;
+ u8 media_type;
+ u8 xcvr_pkg_type;
+ u8 eee_config_phy_addr;
+ u8 parallel_detect;
+ __le16 link_partner_adv_speeds;
+ u8 link_partner_adv_auto_mode;
+ u8 link_partner_adv_pause;
+ __le16 adv_eee_link_speed_mask;
+ __le16 link_partner_adv_eee_link_speed_mask;
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ __le16 fec_cfg;
+ u8 duplex_state;
+ u8 option_flags;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le16 support_pam4_speeds;
+ __le16 force_pam4_link_speed;
+ __le16 auto_pam4_link_speed_mask;
+ u8 link_partner_pam4_adv_speeds;
+ u8 valid;
+};
+
struct bnxt_link_info {
u8 phy_type;
u8 media_type;
@@ -1998,6 +2090,7 @@ enum board_idx {
NETXTREME_E_VF_HV,
NETXTREME_E_P5_VF,
NETXTREME_E_P5_VF_HV,
+ NETXTREME_E_P7_VF,
};
struct bnxt {
@@ -2170,9 +2263,17 @@ struct bnxt {
(BNXT_CHIP_NUM_58700((bp)->chip_num) && \
!BNXT_CHIP_TYPE_NITRO_A0(bp)))
+/* Chip class phase 3.x */
+#define BNXT_CHIP_P3(bp) \
+ (BNXT_CHIP_NUM_57X0X((bp)->chip_num) || \
+ BNXT_CHIP_TYPE_NITRO_A0(bp))
+
#define BNXT_CHIP_P4_PLUS(bp) \
(BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp))
+#define BNXT_CHIP_P5_AND_MINUS(bp) \
+ (BNXT_CHIP_P3(bp) || BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp))
+
struct bnxt_aux_priv *aux_priv;
struct bnxt_en_dev *edev;
@@ -2227,8 +2328,9 @@ struct bnxt {
/* grp_info indexed by completion ring index */
struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info;
+ u32 num_rss_ctx;
int nr_vnics;
- u16 *rss_indir_tbl;
+ u32 *rss_indir_tbl;
u16 rss_indir_tbl_entries;
u32 rss_hash_cfg;
u32 rss_hash_delta;
@@ -2241,12 +2343,14 @@ struct bnxt {
#define BNXT_RSS_CAP_AH_V6_RSS_CAP BIT(5)
#define BNXT_RSS_CAP_ESP_V4_RSS_CAP BIT(6)
#define BNXT_RSS_CAP_ESP_V6_RSS_CAP BIT(7)
+#define BNXT_RSS_CAP_MULTI_RSS_CTX BIT(8)
u8 rss_hash_key[HW_HASH_KEY_SIZE];
u8 rss_hash_key_valid:1;
u8 rss_hash_key_updated:1;
u16 max_mtu;
+ u16 tso_max_segs;
u8 max_tc;
u8 max_lltc; /* lossless TCs */
struct bnxt_queue_info q_info[BNXT_MAX_QUEUE];
@@ -2284,6 +2388,7 @@ struct bnxt {
struct bnxt_irq *irq_tbl;
int total_irqs;
+ int ulp_num_msix_want;
u8 mac_addr[ETH_ALEN];
#ifdef CONFIG_BNXT_DCB
@@ -2311,6 +2416,7 @@ struct bnxt {
#define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16)
#define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17)
#define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18)
+ #define BNXT_FW_CAP_TX_TS_CMP BIT_ULL(19)
#define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20)
#define BNXT_FW_CAP_HOT_RESET BIT_ULL(21)
#define BNXT_FW_CAP_PTP_RTC BIT_ULL(22)
@@ -2340,6 +2446,10 @@ struct bnxt {
#define BNXT_SUPPORTS_NTUPLE_VNIC(bp) \
(BNXT_PF(bp) && ((bp)->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V3))
+#define BNXT_SUPPORTS_MULTI_RSS_CTX(bp) \
+ (BNXT_PF(bp) && BNXT_SUPPORTS_NTUPLE_VNIC(bp) && \
+ ((bp)->rss_cap & BNXT_RSS_CAP_MULTI_RSS_CTX))
+
u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u16 hwrm_cmd_kong_seq;
@@ -2416,6 +2526,7 @@ struct bnxt {
#define BNXT_LINK_CFG_CHANGE_SP_EVENT 21
#define BNXT_THERMAL_THRESHOLD_SP_EVENT 22
#define BNXT_FW_ECHO_REQUEST_SP_EVENT 23
+#define BNXT_RESTART_ULP_SP_EVENT 24
struct delayed_work fw_reset_task;
int fw_reset_state;
@@ -2679,7 +2790,6 @@ void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_insert_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
void bnxt_del_one_usr_fltr(struct bnxt *bp, struct bnxt_filter_base *fltr);
-void bnxt_clear_usr_fltrs(struct bnxt *bp, bool all);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
@@ -2693,9 +2803,16 @@ int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_ntuple_filter *fltr);
+int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ u32 tpa_flags);
void bnxt_fill_ipv6_mask(__be32 mask[4]);
+void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp,
+ struct ethtool_rxfh_context *rss_ctx);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
-int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
+int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic,
+ unsigned int start_rx_ring_idx,
+ unsigned int nr_rings);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
@@ -2705,7 +2822,6 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp);
-int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
@@ -2721,6 +2837,10 @@ int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
int bnxt_hwrm_func_qcaps(struct bnxt *bp);
int bnxt_hwrm_fw_set_time(struct bnxt *);
+int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic);
+void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx,
+ bool all);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
@@ -2728,6 +2848,7 @@ void bnxt_reenable_sriov(struct bnxt *bp);
void bnxt_close_nic(struct bnxt *, bool, bool);
void bnxt_get_ring_err_stats(struct bnxt *bp,
struct bnxt_total_ring_err_stats *stats);
+bool bnxt_rfs_capable(struct bnxt *bp, bool new_rss_ctx);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf);
void bnxt_fw_exception(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index ae4529c043f0..4cb0fabf977e 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -437,18 +437,20 @@ static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
+ bnxt_ulp_stop(bp);
rtnl_lock();
if (bnxt_sriov_cfg(bp)) {
NL_SET_ERR_MSG_MOD(extack,
"reload is unsupported while VFs are allocated or being configured");
rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
return -EOPNOTSUPP;
}
if (bp->dev->reg_state == NETREG_UNREGISTERED) {
rtnl_unlock();
+ bnxt_ulp_start(bp, 0);
return -ENODEV;
}
- bnxt_ulp_stop(bp);
if (netif_running(bp->dev))
bnxt_close_nic(bp, true, true);
bnxt_vf_reps_free(bp);
@@ -516,7 +518,6 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
bnxt_vf_reps_alloc(bp);
if (netif_running(bp->dev))
rc = bnxt_open_nic(bp, true, true);
- bnxt_ulp_start(bp, rc);
if (!rc) {
bnxt_reenable_sriov(bp);
bnxt_ptp_reapply_pps(bp);
@@ -570,6 +571,8 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti
dev_close(bp->dev);
}
rtnl_unlock();
+ if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ bnxt_ulp_start(bp, rc);
return rc;
}
@@ -1096,7 +1099,8 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id,
}
static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
struct hwrm_nvm_set_variable_input *req;
@@ -1145,7 +1149,8 @@ static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
}
static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = bnxt_get_bp_from_dl(dl);
int rc;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 1d240a27455a..4cf9bf8b01b0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -631,13 +631,13 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
buf[j] = sw_stats[k];
skip_tpa_ring_stats:
- sw = (u64 *)&cpr->sw_stats.rx;
+ sw = (u64 *)&cpr->sw_stats->rx;
if (is_rx_ring(bp, i)) {
for (k = 0; k < NUM_RING_RX_SW_STATS; j++, k++)
buf[j] = sw[k];
}
- sw = (u64 *)&cpr->sw_stats.cmn;
+ sw = (u64 *)&cpr->sw_stats->cmn;
for (k = 0; k < NUM_RING_CMN_SW_STATS; j++, k++)
buf[j] = sw[k];
}
@@ -968,7 +968,6 @@ static int bnxt_set_channels(struct net_device *dev,
return -EINVAL;
}
- bnxt_clear_usr_fltrs(bp, true);
if (netif_running(dev)) {
if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's
@@ -1205,6 +1204,35 @@ fltr_err:
return rc;
}
+static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp,
+ u32 index)
+{
+ struct ethtool_rxfh_context *ctx;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx, index);
+ if (!ctx)
+ return NULL;
+ return ethtool_rxfh_context_priv(ctx);
+}
+
+static int bnxt_alloc_vnic_rss_table(struct bnxt *bp,
+ struct bnxt_vnic_info *vnic)
+{
+ int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5);
+
+ vnic->rss_table_size = size + HW_HASH_KEY_SIZE;
+ vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev,
+ vnic->rss_table_size,
+ &vnic->rss_table_dma_addr,
+ GFP_KERNEL);
+ if (!vnic->rss_table)
+ return -ENOMEM;
+
+ vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
+ vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
+ return 0;
+}
+
static int bnxt_add_l2_cls_rule(struct bnxt *bp,
struct ethtool_rx_flow_spec *fs)
{
@@ -1280,22 +1308,24 @@ static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec *ip_spec,
}
static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
- struct ethtool_rx_flow_spec *fs)
+ struct ethtool_rxnfc *cmd)
{
- u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
- u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct bnxt_ntuple_filter *new_fltr, *fltr;
+ u32 flow_type = fs->flow_type & 0xff;
struct bnxt_l2_filter *l2_fltr;
struct bnxt_flow_masks *fmasks;
- u32 flow_type = fs->flow_type;
struct flow_keys *fkeys;
- u32 idx;
+ u32 idx, ring;
int rc;
+ u8 vf;
if (!bp->vnic_info)
return -EAGAIN;
- if ((flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
+ vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ if ((fs->flow_type & (FLOW_MAC_EXT | FLOW_EXT)) || vf)
return -EOPNOTSUPP;
if (flow_type == IP_USER_FLOW) {
@@ -1403,6 +1433,19 @@ static int bnxt_add_ntuple_cls_rule(struct bnxt *bp,
rcu_read_unlock();
new_fltr->base.flags = BNXT_ACT_NO_AGING;
+ if (fs->flow_type & FLOW_RSS) {
+ struct bnxt_rss_ctx *rss_ctx;
+
+ new_fltr->base.fw_vnic_id = 0;
+ new_fltr->base.flags |= BNXT_ACT_RSS_CTX;
+ rss_ctx = bnxt_get_rss_ctx_from_index(bp, cmd->rss_context);
+ if (rss_ctx) {
+ new_fltr->base.fw_vnic_id = rss_ctx->index;
+ } else {
+ rc = -EINVAL;
+ goto ntuple_err;
+ }
+ }
if (fs->ring_cookie == RX_CLS_FLOW_DISC)
new_fltr->base.flags |= BNXT_ACT_DROP;
else
@@ -1444,12 +1487,12 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
flow_type == IPV6_USER_FLOW) &&
!(bp->fw_cap & BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO))
return -EOPNOTSUPP;
- if (flow_type & (FLOW_MAC_EXT | FLOW_RSS))
+ if (flow_type & FLOW_MAC_EXT)
return -EINVAL;
flow_type &= ~FLOW_EXT;
if (fs->ring_cookie == RX_CLS_FLOW_DISC && flow_type != ETHER_FLOW)
- return bnxt_add_ntuple_cls_rule(bp, fs);
+ return bnxt_add_ntuple_cls_rule(bp, cmd);
ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
@@ -1463,7 +1506,7 @@ static int bnxt_srxclsrlins(struct bnxt *bp, struct ethtool_rxnfc *cmd)
if (flow_type == ETHER_FLOW)
rc = bnxt_add_l2_cls_rule(bp, fs);
else
- rc = bnxt_add_ntuple_cls_rule(bp, fs);
+ rc = bnxt_add_ntuple_cls_rule(bp, cmd);
return rc;
}
@@ -1754,7 +1797,9 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev)
static int bnxt_get_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh)
{
+ struct bnxt_rss_ctx *rss_ctx = NULL;
struct bnxt *bp = netdev_priv(dev);
+ u32 *indir_tbl = bp->rss_indir_tbl;
struct bnxt_vnic_info *vnic;
u32 i, tbl_size;
@@ -1764,10 +1809,21 @@ static int bnxt_get_rxfh(struct net_device *dev,
return 0;
vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT];
- if (rxfh->indir && bp->rss_indir_tbl) {
+ if (rxfh->rss_context) {
+ struct ethtool_rxfh_context *ctx;
+
+ ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context);
+ if (!ctx)
+ return -EINVAL;
+ indir_tbl = ethtool_rxfh_context_indir(ctx);
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+ vnic = &rss_ctx->vnic;
+ }
+
+ if (rxfh->indir && indir_tbl) {
tbl_size = bnxt_get_rxfh_indir_size(dev);
for (i = 0; i < tbl_size; i++)
- rxfh->indir[i] = bp->rss_indir_tbl[i];
+ rxfh->indir[i] = indir_tbl[i];
}
if (rxfh->key && vnic->rss_hash_key)
@@ -1776,6 +1832,159 @@ static int bnxt_get_rxfh(struct net_device *dev,
return 0;
}
+static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx,
+ struct bnxt_rss_ctx *rss_ctx,
+ const struct ethtool_rxfh_param *rxfh)
+{
+ if (rxfh->key) {
+ if (rss_ctx) {
+ memcpy(rss_ctx->vnic.rss_hash_key, rxfh->key,
+ HW_HASH_KEY_SIZE);
+ } else {
+ memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
+ bp->rss_hash_key_updated = true;
+ }
+ }
+ if (rxfh->indir) {
+ u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev);
+ u32 *indir_tbl = bp->rss_indir_tbl;
+
+ if (rss_ctx)
+ indir_tbl = ethtool_rxfh_context_indir(ctx);
+ for (i = 0; i < tbl_size; i++)
+ indir_tbl[i] = rxfh->indir[i];
+ pad = bp->rss_indir_tbl_entries - tbl_size;
+ if (pad)
+ memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl));
+ }
+}
+
+static int bnxt_rxfh_context_check(struct bnxt *bp,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS hash function not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) {
+ NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!netif_running(bp->dev)) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to set RSS contexts when interface is down");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int bnxt_create_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+ struct bnxt_vnic_info *vnic;
+ int rc;
+
+ rc = bnxt_rxfh_context_check(bp, rxfh, extack);
+ if (rc)
+ return rc;
+
+ if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u",
+ BNXT_MAX_ETH_RSS_CTX);
+ return -EINVAL;
+ }
+
+ if (!bnxt_rfs_capable(bp, true)) {
+ NL_SET_ERR_MSG_MOD(extack, "Out hardware resources");
+ return -ENOMEM;
+ }
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bp->num_rss_ctx++;
+
+ vnic = &rss_ctx->vnic;
+ vnic->rss_ctx = ctx;
+ vnic->flags |= BNXT_VNIC_RSSCTX_FLAG;
+ vnic->vnic_id = BNXT_VNIC_ID_INVALID;
+ rc = bnxt_alloc_vnic_rss_table(bp, vnic);
+ if (rc)
+ goto out;
+
+ /* Populate defaults in the context */
+ bnxt_set_dflt_rss_indir_tbl(bp, ctx);
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+ memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE);
+ memcpy(ethtool_rxfh_context_key(ctx),
+ bp->rss_hash_key, HW_HASH_KEY_SIZE);
+
+ rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to allocate VNIC");
+ goto out;
+ }
+
+ rc = bnxt_hwrm_vnic_set_tpa(bp, vnic, bp->flags & BNXT_FLAG_TPA);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
+ goto out;
+ }
+ bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
+
+ rc = __bnxt_setup_vnic_p5(bp, vnic);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA");
+ goto out;
+ }
+
+ rss_ctx->index = rxfh->rss_context;
+ return 0;
+out:
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ return rc;
+}
+
+static int bnxt_modify_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+ int rc;
+
+ rc = bnxt_rxfh_context_check(bp, rxfh, extack);
+ if (rc)
+ return rc;
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_modify_rss(bp, ctx, rss_ctx, rxfh);
+
+ return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic);
+}
+
+static int bnxt_remove_rxfh_context(struct net_device *dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_rss_ctx *rss_ctx;
+
+ rss_ctx = ethtool_rxfh_context_priv(ctx);
+
+ bnxt_del_one_rss_ctx(bp, rss_ctx, true);
+ return 0;
+}
+
static int bnxt_set_rxfh(struct net_device *dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
@@ -1786,21 +1995,8 @@ static int bnxt_set_rxfh(struct net_device *dev,
if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->key) {
- memcpy(bp->rss_hash_key, rxfh->key, HW_HASH_KEY_SIZE);
- bp->rss_hash_key_updated = true;
- }
-
- if (rxfh->indir) {
- u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(dev);
+ bnxt_modify_rss(bp, NULL, NULL, rxfh);
- for (i = 0; i < tbl_size; i++)
- bp->rss_indir_tbl[i] = rxfh->indir[i];
- pad = bp->rss_indir_tbl_entries - tbl_size;
- if (pad)
- memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16));
- }
- bnxt_clear_usr_fltrs(bp, false);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -4641,6 +4837,14 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!bp->num_tests || !BNXT_PF(bp))
return;
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE &&
+ bnxt_ulp_registered(bp->edev)) {
+ etest->flags |= ETH_TEST_FL_FAILED;
+ netdev_warn(dev, "Offline tests cannot be run with RoCE driver loaded\n");
+ return;
+ }
+
memset(buf, 0, sizeof(u64) * bp->num_tests);
if (!netif_running(dev)) {
etest->flags |= ETH_TEST_FL_FAILED;
@@ -4671,7 +4875,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (!offline) {
bnxt_run_fw_tests(bp, test_mask, &test_results);
} else {
- bnxt_ulp_stop(bp);
bnxt_close_nic(bp, true, false);
bnxt_run_fw_tests(bp, test_mask, &test_results);
@@ -4682,7 +4885,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
if (rc) {
bnxt_hwrm_mac_loopback(bp, false);
etest->flags |= ETH_TEST_FL_FAILED;
- bnxt_ulp_start(bp, rc);
return;
}
if (bnxt_run_loopback(bp))
@@ -4709,7 +4911,6 @@ static void bnxt_self_test(struct net_device *dev, struct ethtool_test *etest,
bnxt_hwrm_phy_loopback(bp, false, false);
bnxt_half_close_nic(bp);
rc = bnxt_open_nic(bp, true, true);
- bnxt_ulp_start(bp, rc);
}
if (rc || bnxt_test_irq(bp)) {
buf[BNXT_IRQ_TEST_IDX] = 1;
@@ -4829,7 +5030,7 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
}
static int bnxt_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_ptp_cfg *ptp;
@@ -5049,6 +5250,19 @@ static void bnxt_get_rmon_stats(struct net_device *dev,
*ranges = bnxt_rmon_ranges;
}
+static void bnxt_get_ptp_stats(struct net_device *dev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+
+ if (ptp) {
+ ts_stats->pkts = ptp->stats.ts_pkts;
+ ts_stats->lost = ptp->stats.ts_lost;
+ ts_stats->err = atomic64_read(&ptp->stats.ts_err);
+ }
+}
+
static void bnxt_get_link_ext_stats(struct net_device *dev,
struct ethtool_link_ext_stats *stats)
{
@@ -5071,6 +5285,10 @@ void bnxt_ethtool_free(struct bnxt *bp)
const struct ethtool_ops bnxt_ethtool_ops = {
.cap_link_lanes_supported = 1,
+ .cap_rss_ctx_supported = 1,
+ .rxfh_max_num_contexts = BNXT_MAX_ETH_RSS_CTX + 1,
+ .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5,
+ .rxfh_priv_size = sizeof(struct bnxt_rss_ctx),
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
ETHTOOL_COALESCE_USECS_IRQ |
@@ -5108,6 +5326,9 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_rxfh_key_size = bnxt_get_rxfh_key_size,
.get_rxfh = bnxt_get_rxfh,
.set_rxfh = bnxt_set_rxfh,
+ .create_rxfh_context = bnxt_create_rxfh_context,
+ .modify_rxfh_context = bnxt_modify_rxfh_context,
+ .remove_rxfh_context = bnxt_remove_rxfh_context,
.flash_device = bnxt_flash_device,
.get_eeprom_len = bnxt_get_eeprom_len,
.get_eeprom = bnxt_get_eeprom,
@@ -5131,4 +5352,5 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_eth_mac_stats = bnxt_get_eth_mac_stats,
.get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats,
.get_rmon_stats = bnxt_get_rmon_stats,
+ .get_ts_stats = bnxt_get_ptp_stats,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index e957abd704db..f219709f9563 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -2,7 +2,7 @@
*
* Copyright (c) 2014-2016 Broadcom Corporation
* Copyright (c) 2014-2018 Broadcom Limited
- * Copyright (c) 2018-2023 Broadcom Inc.
+ * Copyright (c) 2018-2024 Broadcom Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -468,6 +468,10 @@ struct cmd_nums {
#define HWRM_TF_GLOBAL_CFG_GET 0x2fdUL
#define HWRM_TF_IF_TBL_SET 0x2feUL
#define HWRM_TF_IF_TBL_GET 0x2ffUL
+ #define HWRM_TF_RESC_USAGE_SET 0x300UL
+ #define HWRM_TF_RESC_USAGE_QUERY 0x301UL
+ #define HWRM_TF_TBL_TYPE_ALLOC 0x302UL
+ #define HWRM_TF_TBL_TYPE_FREE 0x303UL
#define HWRM_TFC_TBL_SCOPE_QCAPS 0x380UL
#define HWRM_TFC_TBL_SCOPE_ID_ALLOC 0x381UL
#define HWRM_TFC_TBL_SCOPE_CONFIG 0x382UL
@@ -495,7 +499,12 @@ struct cmd_nums {
#define HWRM_TFC_IF_TBL_SET 0x398UL
#define HWRM_TFC_IF_TBL_GET 0x399UL
#define HWRM_TFC_TBL_SCOPE_CONFIG_GET 0x39aUL
+ #define HWRM_TFC_RESC_USAGE_QUERY 0x39bUL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCAPS 0x39cUL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_CFG 0x39dUL
+ #define HWRM_QUEUE_PFCWD_TIMEOUT_QCFG 0x39eUL
#define HWRM_SV 0x400UL
+ #define HWRM_DBG_LOG_BUFFER_FLUSH 0xff0fUL
#define HWRM_DBG_READ_DIRECT 0xff10UL
#define HWRM_DBG_READ_INDIRECT 0xff11UL
#define HWRM_DBG_WRITE_DIRECT 0xff12UL
@@ -604,8 +613,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 3
-#define HWRM_VERSION_RSVD 15
-#define HWRM_VERSION_STR "1.10.3.15"
+#define HWRM_VERSION_RSVD 44
+#define HWRM_VERSION_STR "1.10.3.44"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -659,6 +668,7 @@ struct hwrm_ver_get_output {
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TFLIB_SUPPORTED 0x2000UL
#define VER_GET_RESP_DEV_CAPS_CFG_CFA_TRUFLOW_SUPPORTED 0x4000UL
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_BOOT_CAPABLE 0x8000UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SECURE_SOC_CAPABLE 0x10000UL
u8 roce_fw_maj_8b;
u8 roce_fw_min_8b;
u8 roce_fw_bld_8b;
@@ -838,7 +848,9 @@ struct hwrm_async_event_cmpl {
#define ASYNC_EVENT_CMPL_EVENT_ID_HW_DOORBELL_RECOVERY_READ_ERROR 0x49UL
#define ASYNC_EVENT_CMPL_EVENT_ID_CTX_ERROR 0x4aUL
#define ASYNC_EVENT_CMPL_EVENT_ID_UDCC_SESSION_CHANGE 0x4bUL
- #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER 0x4cUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_PEER_MMAP_CHANGE 0x4dUL
+ #define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID 0x4eUL
#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL
#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL
#define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1321,15 +1333,16 @@ struct hwrm_async_event_cmpl_error_report_base {
u8 timestamp_lo;
__le16 timestamp_hi;
__le32 event_data1;
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
- #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_RESERVED 0x0UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_PAUSE_STORM 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL 0x2UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_NVM 0x3UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DOORBELL_DROP_THRESHOLD 0x4UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_THERMAL_THRESHOLD 0x5UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
};
/* hwrm_async_event_cmpl_error_report_pause_storm (size:128b/16B) */
@@ -1478,6 +1491,30 @@ struct hwrm_async_event_cmpl_error_report_thermal {
#define ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_THERMAL_EVENT_DATA1_TRANSITION_DIR_INCREASING
};
+/* hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported (size:128b/16B) */
+struct hwrm_async_event_cmpl_error_report_dual_data_rate_not_supported {
+ __le16 type;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_MASK 0x3fUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT 0x2eUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_TYPE_HWRM_ASYNC_EVENT
+ __le16 event_id;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT 0x45UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_ID_ERROR_REPORT
+ __le32 event_data2;
+ u8 opaque_v;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_V 0x1UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_MASK 0xfeUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_MASK 0xffUL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_SFT 0
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED 0x6UL
+ #define ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_LAST ASYNC_EVENT_CMPL_ERROR_REPORT_DUAL_DATA_RATE_NOT_SUPPORTED_EVENT_DATA1_ERROR_TYPE_DUAL_DATA_RATE_NOT_SUPPORTED
+};
+
/* hwrm_func_reset_input (size:192b/24B) */
struct hwrm_func_reset_input {
__le16 req_type;
@@ -1781,6 +1818,12 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT2_ROCE_VF_RESOURCE_MGMT_SUPPORTED 0x100000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_UDCC_SUPPORTED 0x200000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT2_TIMED_TX_SO_TXTIME_SUPPORTED 0x400000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_SW_MAX_RESOURCE_LIMITS_SUPPORTED 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_INGRESS_NIC_FLOW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_LPBK_STATS_SUPPORTED 0x2000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_TF_EGRESS_NIC_FLOW_SUPPORTED 0x4000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_MULTI_LOSSLESS_QUEUES_SUPPORTED 0x8000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT2_PEER_MMAP_SUPPORTED 0x10000000UL
__le16 tunnel_disable_flag;
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_VXLAN 0x1UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_NGE 0x2UL
@@ -1791,13 +1834,11 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_MPLS 0x40UL
#define FUNC_QCAPS_RESP_TUNNEL_DISABLE_FLAG_DISABLE_PPPOE 0x80UL
__le16 xid_partition_cap;
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_TKC 0x1UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_KTLS_RKC 0x2UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_TKC 0x4UL
- #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_QUIC_RKC 0x8UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_TX_CK 0x1UL
+ #define FUNC_QCAPS_RESP_XID_PARTITION_CAP_RX_CK 0x2UL
u8 device_serial_number[8];
__le16 ctxs_per_partition;
- u8 unused_2[2];
+ __le16 max_tso_segs;
__le32 roce_vf_max_av;
__le32 roce_vf_max_cq;
__le32 roce_vf_max_mrw;
@@ -1844,6 +1885,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED 0x1000UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT 0x2000UL
#define FUNC_QCFG_RESP_FLAGS_ENABLE_RDMA_SRIOV 0x4000UL
+ #define FUNC_QCFG_RESP_FLAGS_ROCE_VNIC_ID_VALID 0x8000UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
@@ -1955,7 +1997,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_DB_PAGE_SIZE_2MB 0x9UL
#define FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB 0xaUL
#define FUNC_QCFG_RESP_DB_PAGE_SIZE_LAST FUNC_QCFG_RESP_DB_PAGE_SIZE_4MB
- u8 unused_2[2];
+ __le16 roce_vnic_id;
__le32 partition_min_bw;
#define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_PARTITION_MIN_BW_BW_VALUE_SFT 0
@@ -2003,6 +2045,8 @@ struct hwrm_func_qcfg_output {
__le32 roce_max_srq_per_vf;
__le32 roce_max_gid_per_vf;
__le16 xid_partition_cfg;
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_QCFG_RESP_XID_PARTITION_CFG_RX_CK 0x2UL
u8 unused_7;
u8 valid;
};
@@ -2229,10 +2273,8 @@ struct hwrm_func_cfg_input {
__le32 roce_max_srq_per_vf;
__le32 roce_max_gid_per_vf;
__le16 xid_partition_cfg;
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_TKC 0x1UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_KTLS_RKC 0x2UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_TKC 0x4UL
- #define FUNC_CFG_REQ_XID_PARTITION_CFG_QUIC_RKC 0x8UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_TX_CK 0x1UL
+ #define FUNC_CFG_REQ_XID_PARTITION_CFG_RX_CK 0x2UL
__le16 unused_2;
};
@@ -2416,6 +2458,8 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_FLAGS_RSS_STRICT_HASH_TYPE_SUPPORT 0x100UL
#define FUNC_DRV_RGTR_REQ_FLAGS_NPAR_1_2_SUPPORT 0x200UL
#define FUNC_DRV_RGTR_REQ_FLAGS_ASYM_QUEUE_CFG_SUPPORT 0x400UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_INGRESS_NIC_FLOW_MODE 0x800UL
+ #define FUNC_DRV_RGTR_REQ_FLAGS_TF_EGRESS_NIC_FLOW_MODE 0x1000UL
__le32 enables;
#define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL
#define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL
@@ -3627,28 +3671,33 @@ struct hwrm_func_backing_store_cfg_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_CFG_V2_REQ_TYPE_INVALID
__le16 instance;
__le32 flags;
#define FUNC_BACKING_STORE_CFG_V2_REQ_FLAGS_PREBOOT_MODE 0x1UL
@@ -3707,17 +3756,22 @@ struct hwrm_func_backing_store_qcfg_v2_input {
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_FP_TQM_RING 0x6UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MRAV 0xeUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RKC 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RX_CK 0x14UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_MP_TQM_RING 0x15UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_QUIC_RKC 0x1bUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_XID_PARTITION_TABLE 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID 0xffffUL
#define FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_REQ_TYPE_INVALID
__le16 instance;
@@ -3731,24 +3785,29 @@ struct hwrm_func_backing_store_qcfg_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCFG_V2_RESP_TYPE_INVALID
__le16 instance;
__le32 flags;
__le64 page_dir;
@@ -3832,28 +3891,33 @@ struct hwrm_func_backing_store_qcaps_v2_input {
__le16 target_id;
__le64 resp_addr;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_KTLS_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_REQ_TYPE_INVALID
u8 rsvd[6];
};
@@ -3864,28 +3928,33 @@ struct hwrm_func_backing_store_qcaps_v2_output {
__le16 seq_id;
__le16 resp_len;
__le16 type;
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_TKC 0x13UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_KTLS_RKC 0x14UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_TKC 0x1aUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QUIC_RKC 0x1bUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
- #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_QP 0x0UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ 0x1UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ 0x2UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_VNIC 0x3UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_STAT 0x4UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SP_TQM_RING 0x5UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_FP_TQM_RING 0x6UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MRAV 0xeUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TIM 0xfUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TX_CK 0x13UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RX_CK 0x14UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_MP_TQM_RING 0x15UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SQ_DB_SHADOW 0x16UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RQ_DB_SHADOW 0x17UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRQ_DB_SHADOW 0x18UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CQ_DB_SHADOW 0x19UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_TBL_SCOPE 0x1cUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_XID_PARTITION 0x1dUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT_TRACE 0x1eUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_SRT2_TRACE 0x1fUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT_TRACE 0x20UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_CRT2_TRACE 0x21UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_RIGP0_TRACE 0x22UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_L2_HWRM_TRACE 0x23UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_ROCE_HWRM_TRACE 0x24UL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID 0xffffUL
+ #define FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_LAST FUNC_BACKING_STORE_QCAPS_V2_RESP_TYPE_INVALID
__le16 entry_size;
__le32 flags;
#define FUNC_BACKING_STORE_QCAPS_V2_RESP_FLAGS_ENABLE_CTX_KIND_INIT 0x1UL
@@ -3990,6 +4059,7 @@ struct hwrm_func_drv_if_change_output {
__le32 flags;
#define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL
#define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL
+ #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_CAPS_CHANGE 0x4UL
u8 unused_0[3];
u8 valid;
};
@@ -4151,7 +4221,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
- #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_800GB_PAM4_112
__le16 auto_link_speeds2_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
@@ -4166,6 +4237,7 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_800GB_PAM4_112 0x2000UL
u8 unused_2[6];
};
@@ -4241,6 +4313,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_800GB 0x1f40UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
u8 duplex_cfg;
@@ -4400,7 +4473,13 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASECR8 0x38UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASESR8 0x39UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASELR8 0x3aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEER8 0x3bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEFR8 0x3cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8 0x3dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_800G_BASEDR8
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -4472,7 +4551,11 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
#define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
- #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPDD (0x18UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP112 (0x1eUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFPDD (0x1fUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP (0x20UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_LAST PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_CSFP
__le16 fec_cfg;
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
@@ -4994,33 +5077,43 @@ struct hwrm_port_qstats_ext_output {
u8 valid;
};
-/* hwrm_port_lpbk_qstats_input (size:128b/16B) */
+/* hwrm_port_lpbk_qstats_input (size:256b/32B) */
struct hwrm_port_lpbk_qstats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ __le16 lpbk_stat_size;
+ u8 flags;
+ #define PORT_LPBK_QSTATS_REQ_FLAGS_COUNTER_MASK 0x1UL
+ u8 unused_0[5];
+ __le64 lpbk_stat_host_addr;
};
-/* hwrm_port_lpbk_qstats_output (size:768b/96B) */
+/* hwrm_port_lpbk_qstats_output (size:128b/16B) */
struct hwrm_port_lpbk_qstats_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
+ __le16 lpbk_stat_size;
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* port_lpbk_stats (size:640b/80B) */
+struct port_lpbk_stats {
__le64 lpbk_ucast_frames;
__le64 lpbk_mcast_frames;
__le64 lpbk_bcast_frames;
__le64 lpbk_ucast_bytes;
__le64 lpbk_mcast_bytes;
__le64 lpbk_bcast_bytes;
- __le64 tx_stat_discard;
- __le64 tx_stat_error;
- __le64 rx_stat_discard;
- __le64 rx_stat_error;
- u8 unused_0[7];
- u8 valid;
+ __le64 lpbk_tx_discards;
+ __le64 lpbk_tx_errors;
+ __le64 lpbk_rx_discards;
+ __le64 lpbk_rx_errors;
};
/* hwrm_port_ecn_qstats_input (size:256b/32B) */
@@ -5085,13 +5178,15 @@ struct hwrm_port_clr_stats_output {
u8 valid;
};
-/* hwrm_port_lpbk_clr_stats_input (size:128b/16B) */
+/* hwrm_port_lpbk_clr_stats_input (size:192b/24B) */
struct hwrm_port_lpbk_clr_stats_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
+ __le16 port_id;
+ u8 unused_0[6];
};
/* hwrm_port_lpbk_clr_stats_output (size:128b/16B) */
@@ -5232,10 +5327,11 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
__le16 flags2;
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
- #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_REMOTE_LPBK_UNSUPPORTED 0x10UL
u8 internal_port_cnt;
u8 unused_0;
__le16 supported_speeds2_force_mode;
@@ -7380,7 +7476,7 @@ struct hwrm_cfa_l2_filter_free_output {
u8 valid;
};
-/* hwrm_cfa_l2_filter_cfg_input (size:320b/40B) */
+/* hwrm_cfa_l2_filter_cfg_input (size:384b/48B) */
struct hwrm_cfa_l2_filter_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -7388,23 +7484,33 @@ struct hwrm_cfa_l2_filter_cfg_input {
__le16 target_id;
__le64 resp_addr;
__le32 flags;
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
- #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX 0x0UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX 0x1UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_MASK 0xcUL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_SFT 2
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_NO_ROCE_L2 (0x0UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_L2 (0x1UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE (0x2UL << 2)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_TRAFFIC_ROCE
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_MASK 0x30UL
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_SFT 4
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_NO_UPDATE (0x0UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_BYPASS_LKUP (0x1UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP (0x2UL << 4)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_REMAP_OP_ENABLE_LKUP
__le32 enables;
#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
#define CFA_L2_FILTER_CFG_REQ_ENABLES_NEW_MIRROR_VNIC_ID 0x2UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_PROF_FUNC 0x4UL
+ #define CFA_L2_FILTER_CFG_REQ_ENABLES_L2_CONTEXT_ID 0x8UL
__le64 l2_filter_id;
__le32 dst_id;
__le32 new_mirror_vnic_id;
+ __le32 prof_func;
+ __le32 l2_context_id;
};
/* hwrm_cfa_l2_filter_cfg_output (size:128b/16B) */
@@ -8455,18 +8561,26 @@ struct hwrm_tunnel_dst_port_query_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
u8 tunnel_next_proto;
u8 unused_0[6];
};
@@ -8503,18 +8617,26 @@ struct hwrm_tunnel_dst_port_alloc_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
u8 tunnel_next_proto;
__be16 tunnel_dst_port_val;
u8 unused_0[4];
@@ -8554,18 +8676,26 @@ struct hwrm_tunnel_dst_port_free_input {
__le16 target_id;
__le64 resp_addr;
u8 tunnel_type;
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
- #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_CUSTOM_GRE 0xdUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ECPRI 0xeUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_SRV6 0xfUL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE 0x10UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GRE 0x11UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR 0x12UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES01 0x13UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES02 0x14UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES03 0x15UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES04 0x16UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES05 0x17UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES06 0x18UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07 0x19UL
+ #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_ULP_DYN_UPAR_RES07
u8 tunnel_next_proto;
__le16 tunnel_dst_port_id;
u8 unused_0[4];
@@ -8860,7 +8990,7 @@ struct hwrm_stat_generic_qstats_output {
u8 valid;
};
-/* generic_sw_hw_stats (size:1408b/176B) */
+/* generic_sw_hw_stats (size:1472b/184B) */
struct generic_sw_hw_stats {
__le64 pcie_statistics_tx_tlp;
__le64 pcie_statistics_rx_tlp;
@@ -8884,6 +9014,7 @@ struct generic_sw_hw_stats {
__le64 hw_db_recov_dbs_dropped;
__le64 hw_db_recov_drops_serviced;
__le64 hw_db_recov_dbs_recovered;
+ __le64 hw_db_recov_oo_drop_count;
};
/* hwrm_fw_reset_input (size:192b/24B) */
@@ -9019,6 +9150,7 @@ struct hwrm_struct_hdr {
#define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
#define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
#define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL
+ #define STRUCT_HDR_STRUCT_ID_PEER_MMAP 0x429UL
#define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL
#define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
#define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL
@@ -9668,6 +9800,9 @@ struct hwrm_dbg_coredump_initiate_input {
__le16 instance;
__le16 unused_0;
u8 seg_flags;
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_LIVE_DATA 0x1UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_CRASH_DATA 0x2UL
+ #define DBG_COREDUMP_INITIATE_REQ_SEG_FLAGS_COLLECT_CTX_L1_CACHE 0x4UL
u8 unused_1[7];
};
@@ -10343,13 +10478,13 @@ struct hwrm_selftest_irq_output {
/* dbc_dbc (size:64b/8B) */
struct dbc_dbc {
- u32 index;
+ __le32 index;
#define DBC_DBC_INDEX_MASK 0xffffffUL
#define DBC_DBC_INDEX_SFT 0
#define DBC_DBC_EPOCH 0x1000000UL
#define DBC_DBC_TOGGLE_MASK 0x6000000UL
#define DBC_DBC_TOGGLE_SFT 25
- u32 type_path_xid;
+ __le32 type_path_xid;
#define DBC_DBC_XID_MASK 0xfffffUL
#define DBC_DBC_XID_SFT 0
#define DBC_DBC_PATH_MASK 0x3000000UL
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
index 1df3d56cc4b5..d2fd2d04ed47 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c
@@ -680,7 +680,7 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx)
req_type);
else if (rc && rc != HWRM_ERR_CODE_PF_UNAVAILABLE)
hwrm_err(bp, ctx, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
- req_type, token->seq_id, rc);
+ req_type, le16_to_cpu(ctx->req->seq_id), rc);
rc = __hwrm_to_stderr(rc);
exit:
if (token)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index cc07660330f5..37d42423459c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -109,7 +109,8 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp)
spin_unlock_bh(&ptp->ptp_lock);
}
-static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
+static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts,
+ u32 txts_tmo, int slot)
{
struct hwrm_port_ts_query_output *resp;
struct hwrm_port_ts_query_input *req;
@@ -122,10 +123,16 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts)
req->flags = cpu_to_le32(flags);
if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) ==
PORT_TS_QUERY_REQ_FLAGS_PATH_TX) {
+ struct bnxt_ptp_tx_req *txts_req = &bp->ptp_cfg->txts_req[slot];
+ u32 tmo_us = txts_tmo * 1000;
+
req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES);
- req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid);
- req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off);
- req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT);
+ req->ptp_seq_id = cpu_to_le32(txts_req->tx_seqid);
+ req->ptp_hdr_offset = cpu_to_le16(txts_req->tx_hdr_off);
+ if (!tmo_us)
+ tmo_us = BNXT_PTP_QTS_TIMEOUT;
+ tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US);
+ req->ts_req_timeout = cpu_to_le16(tmo_us);
}
resp = hwrm_req_hold(bp, req);
@@ -650,6 +657,14 @@ static int bnxt_map_ptp_regs(struct bnxt *bp)
(ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK);
return 0;
}
+ if (bp->flags & BNXT_FLAG_CHIP_P7) {
+ for (i = 0; i < 2; i++) {
+ if (reg_arr[i] & BNXT_GRC_BASE_MASK)
+ return -EINVAL;
+ ptp->refclk_mapped_regs[i] = reg_arr[i];
+ }
+ return 0;
+ }
return -ENODEV;
}
@@ -668,29 +683,44 @@ static u64 bnxt_cc_read(const struct cyclecounter *cc)
return ns;
}
-static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb)
+static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
struct skb_shared_hwtstamps timestamp;
+ struct bnxt_ptp_tx_req *txts_req;
+ unsigned long now = jiffies;
u64 ts = 0, ns = 0;
+ u32 tmo = 0;
int rc;
- rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts);
+ txts_req = &ptp->txts_req[slot];
+ /* make sure bnxt_get_tx_ts_p5() has updated abs_txts_tmo */
+ smp_rmb();
+ if (!time_after_eq(now, txts_req->abs_txts_tmo))
+ tmo = jiffies_to_msecs(txts_req->abs_txts_tmo - now);
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts,
+ tmo, slot);
if (!rc) {
memset(&timestamp, 0, sizeof(timestamp));
spin_lock_bh(&ptp->ptp_lock);
ns = timecounter_cyc2time(&ptp->tc, ts);
spin_unlock_bh(&ptp->ptp_lock);
timestamp.hwtstamp = ns_to_ktime(ns);
- skb_tstamp_tx(ptp->tx_skb, &timestamp);
+ skb_tstamp_tx(txts_req->tx_skb, &timestamp);
+ ptp->stats.ts_pkts++;
} else {
+ if (!time_after_eq(jiffies, txts_req->abs_txts_tmo))
+ return -EAGAIN;
+
+ ptp->stats.ts_lost++;
netdev_warn_once(bp->dev,
"TS query for TX timer failed rc = %x\n", rc);
}
- dev_kfree_skb_any(ptp->tx_skb);
- ptp->tx_skb = NULL;
- atomic_inc(&ptp->tx_avail);
+ dev_kfree_skb_any(txts_req->tx_skb);
+ txts_req->tx_skb = NULL;
+
+ return 0;
}
static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
@@ -699,12 +729,30 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
ptp_info);
unsigned long now = jiffies;
struct bnxt *bp = ptp->bp;
+ u16 cons = ptp->txts_cons;
+ u32 num_requests;
+ int rc = 0;
+
+ num_requests = BNXT_MAX_TX_TS - READ_ONCE(ptp->tx_avail);
+ while (num_requests--) {
+ if (IS_ERR(ptp->txts_req[cons].tx_skb))
+ goto next_slot;
+ if (!ptp->txts_req[cons].tx_skb)
+ break;
+ rc = bnxt_stamp_tx_skb(bp, cons);
+ if (rc == -EAGAIN)
+ break;
+next_slot:
+ BNXT_PTP_INC_TX_AVAIL(ptp);
+ cons = NEXT_TXTS(cons);
+ }
+ ptp->txts_cons = cons;
- if (ptp->tx_skb)
- bnxt_stamp_tx_skb(bp, ptp->tx_skb);
-
- if (!time_after_eq(now, ptp->next_period))
+ if (!time_after_eq(now, ptp->next_period)) {
+ if (rc == -EAGAIN)
+ return 0;
return ptp->next_period - now;
+ }
bnxt_ptp_get_current_time(bp);
ptp->next_period = now + HZ;
@@ -714,20 +762,37 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info)
spin_unlock_bh(&ptp->ptp_lock);
ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD;
}
+ if (rc == -EAGAIN)
+ return 0;
return HZ;
}
-int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb)
+int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod)
+{
+ spin_lock_bh(&ptp->ptp_tx_lock);
+ if (ptp->tx_avail) {
+ *prod = ptp->txts_prod;
+ ptp->txts_prod = NEXT_TXTS(*prod);
+ ptp->tx_avail--;
+ spin_unlock_bh(&ptp->ptp_tx_lock);
+ return 0;
+ }
+ spin_unlock_bh(&ptp->ptp_tx_lock);
+ atomic64_inc(&ptp->stats.ts_err);
+ return -ENOSPC;
+}
+
+void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ struct bnxt_ptp_tx_req *txts_req;
- if (ptp->tx_skb) {
- netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n");
- return -EBUSY;
- }
- ptp->tx_skb = skb;
+ txts_req = &ptp->txts_req[prod];
+ txts_req->abs_txts_tmo = jiffies + msecs_to_jiffies(ptp->txts_tmo);
+ /* make sure abs_txts_tmo is written first */
+ smp_wmb();
+ txts_req->tx_skb = skb;
ptp_schedule_worker(ptp->ptp_clock, 0);
- return 0;
}
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
@@ -746,6 +811,38 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts)
return 0;
}
+void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct tx_ts_cmp *tscmp)
+{
+ struct skb_shared_hwtstamps timestamp = {};
+ struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ u32 opaque = tscmp->tx_ts_cmp_opaque;
+ struct bnxt_tx_ring_info *txr;
+ struct bnxt_sw_tx_bd *tx_buf;
+ u64 ts, ns;
+ u16 cons;
+
+ txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
+ ts = BNXT_GET_TX_TS_48B_NS(tscmp);
+ cons = TX_OPAQUE_IDX(opaque);
+ tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)];
+ if (tx_buf->is_ts_pkt) {
+ if (BNXT_TX_TS_ERR(tscmp)) {
+ netdev_err(bp->dev,
+ "timestamp completion error 0x%x 0x%x\n",
+ le32_to_cpu(tscmp->tx_ts_cmp_flags_type),
+ le32_to_cpu(tscmp->tx_ts_cmp_errors_v));
+ } else {
+ spin_lock_bh(&ptp->ptp_lock);
+ ns = timecounter_cyc2time(&ptp->tc, ts);
+ spin_unlock_bh(&ptp->ptp_lock);
+ timestamp.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(tx_buf->skb, &timestamp);
+ }
+ tx_buf->is_ts_pkt = 0;
+ }
+}
+
static const struct ptp_clock_info bnxt_ptp_caps = {
.owner = THIS_MODULE,
.name = "bnxt clock",
@@ -891,7 +988,8 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg)
if (rc)
return rc;
} else {
- rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, &ns);
+ rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME,
+ &ns, 0, 0);
if (rc)
return rc;
}
@@ -931,8 +1029,9 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
bnxt_ptp_free(bp);
- atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS);
+ WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS);
spin_lock_init(&ptp->ptp_lock);
+ spin_lock_init(&ptp->ptp_tx_lock);
if (BNXT_PTP_USE_RTC(bp)) {
bnxt_ptp_timecounter_init(bp, false);
@@ -958,13 +1057,19 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg)
rc = err;
goto out;
}
- if (BNXT_CHIP_P5(bp)) {
+
+ ptp->stats.ts_pkts = 0;
+ ptp->stats.ts_lost = 0;
+ atomic64_set(&ptp->stats.ts_err, 0);
+
+ if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
spin_lock_bh(&ptp->ptp_lock);
bnxt_refclk_read(bp, NULL, &ptp->current_time);
WRITE_ONCE(ptp->old_time, ptp->current_time);
spin_unlock_bh(&ptp->ptp_lock);
ptp_schedule_worker(ptp->ptp_clock, 0);
}
+ ptp->txts_tmo = BNXT_PTP_DFLT_TX_TMO;
return 0;
out:
@@ -976,6 +1081,7 @@ out:
void bnxt_ptp_clear(struct bnxt *bp)
{
struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
+ int i;
if (!ptp)
return;
@@ -987,9 +1093,12 @@ void bnxt_ptp_clear(struct bnxt *bp)
kfree(ptp->ptp_info.pin_config);
ptp->ptp_info.pin_config = NULL;
- if (ptp->tx_skb) {
- dev_kfree_skb_any(ptp->tx_skb);
- ptp->tx_skb = NULL;
+ for (i = 0; i < BNXT_MAX_TX_TS; i++) {
+ if (ptp->txts_req[i].tx_skb) {
+ dev_kfree_skb_any(ptp->txts_req[i].tx_skb);
+ ptp->txts_req[i].tx_skb = NULL;
+ }
}
+
bnxt_unmap_ptp_regs(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index fce8dc39a7d0..a9a2f9a18c9c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -22,7 +22,9 @@
#define BNXT_LO_TIMER_MASK 0x0000ffffffffUL
#define BNXT_HI_TIMER_MASK 0xffff00000000UL
+#define BNXT_PTP_DFLT_TX_TMO 1000 /* ms */
#define BNXT_PTP_QTS_TIMEOUT 1000
+#define BNXT_PTP_QTS_MAX_TMO_US 65535U
#define BNXT_PTP_QTS_TX_ENABLES (PORT_TS_QUERY_REQ_ENABLES_PTP_SEQ_ID | \
PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \
PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET)
@@ -77,6 +79,22 @@ struct bnxt_pps {
struct pps_pin pins[BNXT_MAX_TSIO_PINS];
};
+struct bnxt_ptp_stats {
+ u64 ts_pkts;
+ u64 ts_lost;
+ atomic64_t ts_err;
+};
+
+#define BNXT_MAX_TX_TS 4
+#define NEXT_TXTS(idx) (((idx) + 1) & (BNXT_MAX_TX_TS - 1))
+
+struct bnxt_ptp_tx_req {
+ struct sk_buff *tx_skb;
+ u16 tx_seqid;
+ u16 tx_hdr_off;
+ unsigned long abs_txts_tmo;
+};
+
struct bnxt_ptp_cfg {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
@@ -85,7 +103,8 @@ struct bnxt_ptp_cfg {
struct bnxt_pps pps_info;
/* serialize timecounter access */
spinlock_t ptp_lock;
- struct sk_buff *tx_skb;
+ /* serialize ts tx request queuing */
+ spinlock_t ptp_tx_lock;
u64 current_time;
u64 old_time;
unsigned long next_period;
@@ -94,11 +113,10 @@ struct bnxt_ptp_cfg {
/* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */
#define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ)
- u16 tx_seqid;
- u16 tx_hdr_off;
+ struct bnxt_ptp_tx_req txts_req[BNXT_MAX_TX_TS];
+
struct bnxt *bp;
- atomic_t tx_avail;
-#define BNXT_MAX_TX_TS 1
+ u32 tx_avail;
u16 rxctl;
#define BNXT_PTP_MSG_SYNC (1 << 0)
#define BNXT_PTP_MSG_DELAY_REQ (1 << 1)
@@ -120,6 +138,11 @@ struct bnxt_ptp_cfg {
u32 refclk_regs[2];
u32 refclk_mapped_regs[2];
+ u32 txts_tmo;
+ u16 txts_prod;
+ u16 txts_cons;
+
+ struct bnxt_ptp_stats stats;
};
#if BITS_PER_LONG == 32
@@ -134,6 +157,13 @@ do { \
((dst) = READ_ONCE(src))
#endif
+#define BNXT_PTP_INC_TX_AVAIL(ptp) \
+do { \
+ spin_lock_bh(&(ptp)->ptp_tx_lock); \
+ (ptp)->tx_avail++; \
+ spin_unlock_bh(&(ptp)->ptp_tx_lock); \
+} while (0)
+
int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off);
void bnxt_ptp_update_current_time(struct bnxt *bp);
void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2);
@@ -141,8 +171,11 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp);
void bnxt_ptp_reapply_pps(struct bnxt *bp);
int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr);
int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr);
-int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb);
+int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod);
+void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod);
int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts);
+void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi,
+ struct tx_ts_cmp *tscmp);
void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns);
int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg);
int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 175192ebaa77..22898d3d088b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -950,8 +950,11 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
struct hwrm_fwd_resp_input *req;
int rc;
- if (BNXT_FWD_RESP_SIZE_ERR(msg_size))
+ if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) {
+ netdev_warn_once(bp->dev, "HWRM fwd response too big (%d bytes)\n",
+ msg_size);
return -EINVAL;
+ }
rc = hwrm_req_init(bp, req, HWRM_FWD_RESP);
if (!rc) {
@@ -1085,7 +1088,7 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
rc = bnxt_hwrm_exec_fwd_resp(
bp, vf, sizeof(struct hwrm_port_phy_qcfg_input));
} else {
- struct hwrm_port_phy_qcfg_output phy_qcfg_resp = {0};
+ struct hwrm_port_phy_qcfg_output_compat phy_qcfg_resp = {};
struct hwrm_port_phy_qcfg_input *phy_qcfg_req;
phy_qcfg_req =
@@ -1096,6 +1099,11 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
mutex_unlock(&bp->link_lock);
phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp));
phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id;
+ /* New SPEEDS2 fields are beyond the legacy structure, so
+ * clear the SPEEDS2_SUPPORTED flag.
+ */
+ phy_qcfg_resp.option_flags &=
+ ~PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED;
phy_qcfg_resp.valid = 1;
if (vf->flags & BNXT_VF_LINK_UP) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 273c9ba48f09..d2ca90407cce 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -370,6 +370,7 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
struct bnxt_tc_flow *flow)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd);
+ struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
struct flow_dissector *dissector = rule->match.dissector;
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
@@ -380,6 +381,9 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 195c02dc0683..b9e7d3e7b15d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -31,21 +31,74 @@ static DEFINE_IDA(bnxt_aux_dev_ids);
static void bnxt_fill_msix_vecs(struct bnxt *bp, struct bnxt_msix_entry *ent)
{
struct bnxt_en_dev *edev = bp->edev;
- int num_msix, idx, i;
+ int num_msix, i;
if (!edev->ulp_tbl->msix_requested) {
netdev_warn(bp->dev, "Requested MSI-X vectors insufficient\n");
return;
}
num_msix = edev->ulp_tbl->msix_requested;
- idx = edev->ulp_tbl->msix_base;
for (i = 0; i < num_msix; i++) {
- ent[i].vector = bp->irq_tbl[idx + i].vector;
- ent[i].ring_idx = idx + i;
+ ent[i].vector = bp->irq_tbl[i].vector;
+ ent[i].ring_idx = i;
if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
ent[i].db_offset = bp->db_offset;
else
- ent[i].db_offset = (idx + i) * 0x80;
+ ent[i].db_offset = i * 0x80;
+ }
+}
+
+int bnxt_get_ulp_msix_num(struct bnxt *bp)
+{
+ if (bp->edev)
+ return bp->edev->ulp_num_msix_vec;
+ return 0;
+}
+
+void bnxt_set_ulp_msix_num(struct bnxt *bp, int num)
+{
+ if (bp->edev)
+ bp->edev->ulp_num_msix_vec = num;
+}
+
+int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev))
+ return bp->edev->ulp_num_msix_vec;
+ return 0;
+}
+
+int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
+{
+ if (bp->edev)
+ return bp->edev->ulp_num_ctxs;
+ return 0;
+}
+
+void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ulp_ctx)
+{
+ if (bp->edev)
+ bp->edev->ulp_num_ctxs = num_ulp_ctx;
+}
+
+int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp)
+{
+ if (bnxt_ulp_registered(bp->edev))
+ return bp->edev->ulp_num_ctxs;
+ return 0;
+}
+
+void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp)
+{
+ if (bp->edev) {
+ bp->edev->ulp_num_ctxs = BNXT_MIN_ROCE_STAT_CTXS;
+ /* Reserve one additional stat_ctx for PF0 (except
+ * on 1-port NICs) as it also creates one stat_ctx
+ * for PF1 in case of RoCE bonding.
+ */
+ if (BNXT_PF(bp) && !bp->pf.port_id &&
+ bp->port_count > 1)
+ bp->edev->ulp_num_ctxs++;
}
}
@@ -57,25 +110,36 @@ int bnxt_register_dev(struct bnxt_en_dev *edev,
struct bnxt *bp = netdev_priv(dev);
unsigned int max_stat_ctxs;
struct bnxt_ulp *ulp;
+ int rc = 0;
+ rtnl_lock();
+ mutex_lock(&edev->en_dev_lock);
+ if (!bp->irq_tbl) {
+ rc = -ENODEV;
+ goto exit;
+ }
max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp);
if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS ||
- bp->cp_nr_rings == max_stat_ctxs)
- return -ENOMEM;
+ bp->cp_nr_rings == max_stat_ctxs) {
+ rc = -ENOMEM;
+ goto exit;
+ }
ulp = edev->ulp_tbl;
- if (!ulp)
- return -ENOMEM;
-
ulp->handle = handle;
rcu_assign_pointer(ulp->ulp_ops, ulp_ops);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_hwrm_vnic_cfg(bp, 0);
+ bnxt_hwrm_vnic_cfg(bp, &bp->vnic_info[BNXT_VNIC_DEFAULT]);
+
+ edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
bnxt_fill_msix_vecs(bp, bp->edev->msix_entries);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
- return 0;
+exit:
+ mutex_unlock(&edev->en_dev_lock);
+ rtnl_unlock();
+ return rc;
}
EXPORT_SYMBOL(bnxt_register_dev);
@@ -87,8 +151,11 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
int i = 0;
ulp = edev->ulp_tbl;
+ rtnl_lock();
+ mutex_lock(&edev->en_dev_lock);
if (ulp->msix_requested)
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
+ edev->ulp_tbl->msix_requested = 0;
if (ulp->max_async_event_id)
bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
@@ -101,11 +168,13 @@ void bnxt_unregister_dev(struct bnxt_en_dev *edev)
msleep(100);
i++;
}
+ mutex_unlock(&edev->en_dev_lock);
+ rtnl_unlock();
return;
}
EXPORT_SYMBOL(bnxt_unregister_dev);
-int bnxt_get_ulp_msix_num(struct bnxt *bp)
+static int bnxt_set_dflt_ulp_msix(struct bnxt *bp)
{
u32 roce_msix = BNXT_VF(bp) ?
BNXT_MAX_VF_ROCE_MSIX : BNXT_MAX_ROCE_MSIX;
@@ -114,29 +183,6 @@ int bnxt_get_ulp_msix_num(struct bnxt *bp)
min_t(u32, roce_msix, num_online_cpus()) : 0);
}
-int bnxt_get_ulp_msix_base(struct bnxt *bp)
-{
- if (bnxt_ulp_registered(bp->edev)) {
- struct bnxt_en_dev *edev = bp->edev;
-
- if (edev->ulp_tbl->msix_requested)
- return edev->ulp_tbl->msix_base;
- }
- return 0;
-}
-
-int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
-{
- if (bnxt_ulp_registered(bp->edev)) {
- struct bnxt_en_dev *edev = bp->edev;
-
- if (edev->ulp_tbl->msix_requested)
- return BNXT_MIN_ROCE_STAT_CTXS;
- }
-
- return 0;
-}
-
int bnxt_send_msg(struct bnxt_en_dev *edev,
struct bnxt_fw_msg *fw_msg)
{
@@ -181,13 +227,19 @@ void bnxt_ulp_stop(struct bnxt *bp)
if (!edev)
return;
+ mutex_lock(&edev->en_dev_lock);
+ if (!bnxt_ulp_registered(edev)) {
+ mutex_unlock(&edev->en_dev_lock);
+ return;
+ }
+
edev->flags |= BNXT_EN_FLAG_ULP_STOPPED;
if (aux_priv) {
struct auxiliary_device *adev;
adev = &aux_priv->aux_dev;
if (adev->dev.driver) {
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
pm_message_t pm = {};
adrv = to_auxiliary_drv(adev->dev.driver);
@@ -195,6 +247,7 @@ void bnxt_ulp_stop(struct bnxt *bp)
adrv->suspend(adev, pm);
}
}
+ mutex_unlock(&edev->en_dev_lock);
}
void bnxt_ulp_start(struct bnxt *bp, int err)
@@ -210,6 +263,12 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
if (err)
return;
+ mutex_lock(&edev->en_dev_lock);
+ if (!bnxt_ulp_registered(edev)) {
+ mutex_unlock(&edev->en_dev_lock);
+ return;
+ }
+
if (edev->ulp_tbl->msix_requested)
bnxt_fill_msix_vecs(bp, edev->msix_entries);
@@ -218,14 +277,14 @@ void bnxt_ulp_start(struct bnxt *bp, int err)
adev = &aux_priv->aux_dev;
if (adev->dev.driver) {
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
adrv = to_auxiliary_drv(adev->dev.driver);
edev->en_state = bp->state;
adrv->resume(adev);
}
}
-
+ mutex_unlock(&edev->en_dev_lock);
}
void bnxt_ulp_irq_stop(struct bnxt *bp)
@@ -309,7 +368,6 @@ void bnxt_rdma_aux_device_uninit(struct bnxt *bp)
aux_priv = bp->aux_priv;
adev = &aux_priv->aux_dev;
- auxiliary_device_delete(adev);
auxiliary_device_uninit(adev);
}
@@ -327,6 +385,14 @@ static void bnxt_aux_dev_release(struct device *dev)
bp->aux_priv = NULL;
}
+void bnxt_rdma_aux_device_del(struct bnxt *bp)
+{
+ if (!bp->edev)
+ return;
+
+ auxiliary_device_delete(&bp->aux_priv->aux_dev);
+}
+
static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
{
edev->net = bp->dev;
@@ -334,6 +400,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->l2_db_size = bp->db_size;
edev->l2_db_size_nc = bp->db_size;
edev->l2_db_offset = bp->db_offset;
+ mutex_init(&edev->en_dev_lock);
if (bp->flags & BNXT_FLAG_ROCEV1_CAP)
edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP;
@@ -347,7 +414,23 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->pf_port_id = bp->pf.port_id;
edev->en_state = bp->state;
edev->bar0 = bp->bar0;
- edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
+}
+
+void bnxt_rdma_aux_device_add(struct bnxt *bp)
+{
+ struct auxiliary_device *aux_dev;
+ int rc;
+
+ if (!bp->edev)
+ return;
+
+ aux_dev = &bp->aux_priv->aux_dev;
+ rc = auxiliary_device_add(aux_dev);
+ if (rc) {
+ netdev_warn(bp->dev, "Failed to add auxiliary device for ROCE\n");
+ auxiliary_device_uninit(aux_dev);
+ bp->flags &= ~BNXT_FLAG_ROCE_CAP;
+ }
}
void bnxt_rdma_aux_device_init(struct bnxt *bp)
@@ -404,13 +487,7 @@ void bnxt_rdma_aux_device_init(struct bnxt *bp)
edev->ulp_tbl = ulp;
bp->edev = edev;
bnxt_set_edev_info(edev, bp);
-
- rc = auxiliary_device_add(aux_dev);
- if (rc) {
- netdev_warn(bp->dev,
- "Failed to add auxiliary device for ROCE\n");
- goto aux_dev_uninit;
- }
+ bp->ulp_num_msix_want = bnxt_set_dflt_ulp_msix(bp);
return;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index b9e73de14b57..4eafe6ec0abf 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -46,7 +46,6 @@ struct bnxt_ulp {
unsigned long *async_events_bmap;
u16 max_async_event_id;
u16 msix_requested;
- u16 msix_base;
atomic_t ref_count;
};
@@ -86,18 +85,28 @@ struct bnxt_en_dev {
* updated in resume.
*/
void __iomem *bar0;
+
+ u16 ulp_num_msix_vec;
+ u16 ulp_num_ctxs;
+
+ /* serialize ulp operations */
+ struct mutex en_dev_lock;
};
static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
{
- if (edev && edev->ulp_tbl)
+ if (edev && rcu_access_pointer(edev->ulp_tbl->ulp_ops))
return true;
return false;
}
int bnxt_get_ulp_msix_num(struct bnxt *bp);
-int bnxt_get_ulp_msix_base(struct bnxt *bp);
+int bnxt_get_ulp_msix_num_in_use(struct bnxt *bp);
+void bnxt_set_ulp_msix_num(struct bnxt *bp, int num);
int bnxt_get_ulp_stat_ctxs(struct bnxt *bp);
+void bnxt_set_ulp_stat_ctxs(struct bnxt *bp, int num_ctxs);
+int bnxt_get_ulp_stat_ctxs_in_use(struct bnxt *bp);
+void bnxt_set_dflt_ulp_stat_ctxs(struct bnxt *bp);
void bnxt_ulp_stop(struct bnxt *bp);
void bnxt_ulp_start(struct bnxt *bp, int err);
void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs);
@@ -105,6 +114,8 @@ void bnxt_ulp_irq_stop(struct bnxt *bp);
void bnxt_ulp_irq_restart(struct bnxt *bp, int err);
void bnxt_ulp_async_events(struct bnxt *bp, struct hwrm_async_event_cmpl *cmpl);
void bnxt_rdma_aux_device_uninit(struct bnxt *bp);
+void bnxt_rdma_aux_device_del(struct bnxt *bp);
+void bnxt_rdma_aux_device_add(struct bnxt *bp);
void bnxt_rdma_aux_device_init(struct bnxt *bp);
int bnxt_register_dev(struct bnxt_en_dev *edev, struct bnxt_ulp_ops *ulp_ops,
void *handle);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
index 4079538bc310..f88b641533fc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
@@ -197,7 +197,7 @@ void bnxt_xdp_buff_init(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir);
xdp_init_buff(xdp, buflen, &rxr->xdp_rxq);
- xdp_prepare_buff(xdp, data_ptr - offset, offset, len, false);
+ xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true);
}
void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
@@ -222,7 +222,7 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
* false - packet should be passed to the stack.
*/
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+ struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event)
{
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
@@ -244,9 +244,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
txr = rxr->bnapi->tx_ring[0];
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
- orig_data = xdp.data;
+ orig_data = xdp->data;
- act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ act = bpf_prog_run_xdp(xdp_prog, xdp);
tx_avail = bnxt_tx_avail(bp, txr);
/* If the tx ring is not full, we must not update the rx producer yet
@@ -255,10 +255,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail != bp->tx_ring_size)
*event &= ~BNXT_RX_EVENT;
- *len = xdp.data_end - xdp.data;
- if (orig_data != xdp.data) {
- offset = xdp.data - xdp.data_hard_start;
- *data_ptr = xdp.data_hard_start + offset;
+ *len = xdp->data_end - xdp->data;
+ if (orig_data != xdp->data) {
+ offset = xdp->data - xdp->data_hard_start;
+ *data_ptr = xdp->data_hard_start + offset;
}
switch (act) {
@@ -270,8 +270,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
mapping = rx_buf->mapping - bp->rx_dma_offset;
*event &= BNXT_TX_CMP_EVENT;
- if (unlikely(xdp_buff_has_frags(&xdp))) {
- struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
tx_needed += sinfo->nr_frags;
*event = BNXT_AGG_EVENT;
@@ -279,7 +279,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act);
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
@@ -289,7 +289,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
- NEXT_RX(rxr->rx_prod), &xdp);
+ NEXT_RX(rxr->rx_prod), xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
case XDP_REDIRECT:
@@ -297,21 +297,16 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
* redirect is coming from a frame received by the
* bnxt_en driver.
*/
- rx_buf = &rxr->rx_buf_ring[cons];
- mapping = rx_buf->mapping - bp->rx_dma_offset;
- dma_unmap_page_attrs(&pdev->dev, mapping,
- BNXT_RX_PAGE_SIZE, bp->rx_dir,
- DMA_ATTR_WEAK_ORDERING);
/* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
return true;
}
- if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
+ if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) {
trace_xdp_exception(bp->dev, xdp_prog, act);
page_pool_recycle_direct(rxr->page_pool, page);
return true;
@@ -326,7 +321,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough;
case XDP_DROP:
- bnxt_xdp_buff_frags_free(rxr, &xdp);
+ bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page);
break;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
index 5e412c5655ba..0122782400b8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.h
@@ -18,7 +18,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
- struct xdp_buff xdp, struct page *page, u8 **data_ptr,
+ struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 3d63177e7e52..c2b4188a1ef1 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -3682,7 +3682,8 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
#if defined(CONFIG_INET)
struct rtable *rt;
- rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
+ rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0,
+ RT_SCOPE_UNIVERSE);
if (!IS_ERR(rt)) {
*dst = &rt->dst;
return 0;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index 1248792d7fd4..0715ea5bf13e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -42,19 +42,15 @@ void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
- if (dev->phydev) {
+ if (dev->phydev)
phy_ethtool_get_wol(dev->phydev, wol);
- if (wol->supported)
- return;
- }
- if (!device_can_wakeup(kdev)) {
- wol->supported = 0;
- wol->wolopts = 0;
+ /* MAC is not wake-up capable, return what the PHY does */
+ if (!device_can_wakeup(kdev))
return;
- }
- wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
+ /* Overlay MAC capabilities with that of the PHY queried before */
+ wol->supported |= WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
memset(wol->sopass, 0, sizeof(wol->sopass));
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 62ff4381ac83..0ec5f01551f9 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4019,7 +4019,7 @@ static int tg3_power_up(struct tg3 *tp)
static int tg3_setup_phy(struct tg3 *, bool);
-static int tg3_power_down_prepare(struct tg3 *tp)
+static void tg3_power_down_prepare(struct tg3 *tp)
{
u32 misc_host_ctrl;
bool device_should_wake, do_low_power;
@@ -4263,7 +4263,7 @@ static int tg3_power_down_prepare(struct tg3 *tp)
tg3_ape_driver_state_change(tp, RESET_KIND_SHUTDOWN);
- return 0;
+ return;
}
static void tg3_power_down(struct tg3 *tp)
@@ -6141,7 +6141,7 @@ static void tg3_refclk_write(struct tg3 *tp, u64 newval)
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
static inline void tg3_full_unlock(struct tg3 *tp);
-static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
struct tg3 *tp = netdev_priv(dev);
@@ -14295,7 +14295,7 @@ static void tg3_set_rx_mode(struct net_device *dev)
static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (new_mtu > ETH_DATA_LEN) {
if (tg3_flag(tp, 5780_CLASS)) {
@@ -18084,7 +18084,6 @@ static int tg3_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct tg3 *tp = netdev_priv(dev);
- int err = 0;
rtnl_lock();
@@ -18108,32 +18107,11 @@ static int tg3_suspend(struct device *device)
tg3_flag_clear(tp, INIT_COMPLETE);
tg3_full_unlock(tp);
- err = tg3_power_down_prepare(tp);
- if (err) {
- int err2;
-
- tg3_full_lock(tp, 0);
-
- tg3_flag_set(tp, INIT_COMPLETE);
- err2 = tg3_restart_hw(tp, true);
- if (err2)
- goto out;
-
- tg3_timer_start(tp);
-
- netif_device_attach(dev);
- tg3_netif_start(tp);
-
-out:
- tg3_full_unlock(tp);
-
- if (!err2)
- tg3_phy_start(tp);
- }
+ tg3_power_down_prepare(tp);
unlock:
rtnl_unlock();
- return err;
+ return 0;
}
static int tg3_resume(struct device *device)
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index a5ebd7110e07..986f43d27711 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -416,7 +416,7 @@ struct bna_ib {
/* Tx object */
/* Tx datapath control structure */
-#define BNA_Q_NAME_SIZE 16
+#define BNA_Q_NAME_SIZE (IFNAMSIZ + 6)
struct bna_tcb {
/* Fast path */
void **sw_qpt;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index c32174484a96..ece6f3b48327 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1534,8 +1534,9 @@ bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
for (i = 0; i < num_txqs; i++) {
vector_num = tx_info->tcb[i]->intr_vector;
- sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
- tx_id + tx_info->tcb[i]->id);
+ snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d",
+ bnad->netdev->name,
+ tx_id + tx_info->tcb[i]->id);
err = request_irq(bnad->msix_table[vector_num].vector,
(irq_handler_t)bnad_msix_tx, 0,
tx_info->tcb[i]->name,
@@ -1585,9 +1586,9 @@ bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
for (i = 0; i < num_rxps; i++) {
vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
- sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
- bnad->netdev->name,
- rx_id + rx_info->rx_ctrl[i].ccb->id);
+ snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE,
+ "%s CQ %d", bnad->netdev->name,
+ rx_id + rx_info->rx_ctrl[i].ccb->id);
err = request_irq(bnad->msix_table[vector_num].vector,
(irq_handler_t)bnad_msix_rx, 0,
rx_info->rx_ctrl[i].ccb->name,
@@ -3276,7 +3277,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
mutex_lock(&bnad->conf_mutex);
mtu = netdev->mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
frame = BNAD_FRAME_SIZE(mtu);
new_frame = BNAD_FRAME_SIZE(new_mtu);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index aa5700ac9c00..ea71612f6b36 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -645,6 +645,10 @@
#define GEM_T2OFST_OFFSET 0 /* offset value */
#define GEM_T2OFST_SIZE 7
+/* Bitfields in queue pointer registers */
+#define MACB_QUEUE_DISABLE_OFFSET 0 /* disable queue */
+#define MACB_QUEUE_DISABLE_SIZE 1
+
/* Offset for screener type 2 compare values (T2CMPOFST).
* Note the offset is applied after the specified point,
* e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset
@@ -733,6 +737,7 @@
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
#define MACB_CAPS_MIIONRGMII 0x00000200
#define MACB_CAPS_NEED_TSUCLK 0x00000400
+#define MACB_CAPS_QUEUE_DISABLE 0x00000800
#define MACB_CAPS_PCS 0x01000000
#define MACB_CAPS_HIGH_SPEED 0x02000000
#define MACB_CAPS_CLK_HW_CHG 0x04000000
@@ -1163,7 +1168,7 @@ struct macb_ptp_info {
s32 (*get_ptp_max_adj)(void);
unsigned int (*get_tsu_rate)(struct macb *bp);
int (*get_ts_info)(struct net_device *dev,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int (*get_hwtst)(struct net_device *netdev,
struct kernel_hwtstamp_config *tstamp_config);
int (*set_hwtst)(struct net_device *netdev,
@@ -1254,6 +1259,8 @@ struct macb {
u32 (*macb_reg_readl)(struct macb *bp, int offset);
void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
+ struct macb_dma_desc *rx_ring_tieoff;
+ dma_addr_t rx_ring_tieoff_dma;
size_t rx_buffer_size;
unsigned int rx_ring_size;
@@ -1299,6 +1306,7 @@ struct macb {
unsigned int jumbo_max_len;
u32 wol;
+ u32 wolopts;
/* holds value of rx watermark value for pbuf_rxcutthru register */
u32 rx_watermark;
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 898debfd4db3..dcd3f54ed0cf 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -38,6 +38,7 @@
#include <linux/ptp_classify.h>
#include <linux/reset.h>
#include <linux/firmware/xlnx-zynqmp.h>
+#include <linux/inetdevice.h>
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -84,8 +85,7 @@ struct sifive_fu540_macb_mgmt {
#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
#define MACB_NETIF_LSO NETIF_F_TSO
-#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
-#define MACB_WOL_ENABLED (0x1 << 1)
+#define MACB_WOL_ENABLED BIT(0)
#define HS_SPEED_10000M 4
#define MACB_SERDES_RATE_10G 1
@@ -2477,6 +2477,12 @@ static void macb_free_consistent(struct macb *bp)
unsigned int q;
int size;
+ if (bp->rx_ring_tieoff) {
+ dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp),
+ bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
+ bp->rx_ring_tieoff = NULL;
+ }
+
bp->macbgem_ops.mog_free_rx_buffers(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
@@ -2568,6 +2574,16 @@ static int macb_alloc_consistent(struct macb *bp)
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err;
+ /* Required for tie off descriptor for PM cases */
+ if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
+ bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
+ macb_dma_desc_get_size(bp),
+ &bp->rx_ring_tieoff_dma,
+ GFP_KERNEL);
+ if (!bp->rx_ring_tieoff)
+ goto out_err;
+ }
+
return 0;
out_err:
@@ -2575,6 +2591,19 @@ out_err:
return -ENOMEM;
}
+static void macb_init_tieoff(struct macb *bp)
+{
+ struct macb_dma_desc *desc = bp->rx_ring_tieoff;
+
+ if (bp->caps & MACB_CAPS_QUEUE_DISABLE)
+ return;
+ /* Setup a wrapping descriptor with no free slots
+ * (WRAP and USED) to tie off/disable unused RX queues.
+ */
+ macb_set_addr(bp, desc, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED));
+ desc->ctrl = 0;
+}
+
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
@@ -2598,6 +2627,7 @@ static void gem_init_rings(struct macb *bp)
gem_rx_refill(queue);
}
+ macb_init_tieoff(bp);
}
static void macb_init_rings(struct macb *bp)
@@ -2615,6 +2645,8 @@ static void macb_init_rings(struct macb *bp)
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
desc->ctrl |= MACB_BIT(TX_WRAP);
+
+ macb_init_tieoff(bp);
}
static void macb_reset_hw(struct macb *bp)
@@ -3022,7 +3054,7 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
if (netif_running(dev))
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -3246,13 +3278,11 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct macb *bp = netdev_priv(netdev);
- if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
- phylink_ethtool_get_wol(bp->phylink, wol);
- wol->supported |= WAKE_MAGIC;
+ phylink_ethtool_get_wol(bp->phylink, wol);
+ wol->supported |= (WAKE_MAGIC | WAKE_ARP);
- if (bp->wol & MACB_WOL_ENABLED)
- wol->wolopts |= WAKE_MAGIC;
- }
+ /* Add macb wolopts to phy wolopts */
+ wol->wolopts |= bp->wolopts;
}
static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -3262,22 +3292,15 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
/* Pass the order to phylink layer */
ret = phylink_ethtool_set_wol(bp->phylink, wol);
- /* Don't manage WoL on MAC if handled by the PHY
- * or if there's a failure in talking to the PHY
- */
- if (!ret || ret != -EOPNOTSUPP)
+ /* Don't manage WoL on MAC, if PHY set_wol() fails */
+ if (ret && ret != -EOPNOTSUPP)
return ret;
- if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
- (wol->wolopts & ~WAKE_MAGIC))
- return -EOPNOTSUPP;
-
- if (wol->wolopts & WAKE_MAGIC)
- bp->wol |= MACB_WOL_ENABLED;
- else
- bp->wol &= ~MACB_WOL_ENABLED;
+ bp->wolopts = (wol->wolopts & WAKE_MAGIC) ? WAKE_MAGIC : 0;
+ bp->wolopts |= (wol->wolopts & WAKE_ARP) ? WAKE_ARP : 0;
+ bp->wol = (wol->wolopts) ? MACB_WOL_ENABLED : 0;
- device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
+ device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
return 0;
}
@@ -3376,7 +3399,7 @@ static s32 gem_get_ptp_max_adj(void)
}
static int gem_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct macb *bp = netdev_priv(dev);
@@ -3417,7 +3440,7 @@ static struct macb_ptp_info gem_ptp_info = {
#endif
static int macb_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct macb *bp = netdev_priv(netdev);
@@ -4917,7 +4940,8 @@ static const struct macb_config sama7g5_emac_config = {
static const struct macb_config versal_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK,
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK |
+ MACB_CAPS_QUEUE_DISABLE,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = init_reset_optional,
@@ -5053,9 +5077,7 @@ static int macb_probe(struct platform_device *pdev)
bp->max_tx_length = GEM_MAX_TX_LEN;
bp->wol = 0;
- if (of_property_read_bool(np, "magic-packet"))
- bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
- device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
+ device_set_wakeup_capable(&pdev->dev, 1);
bp->usrio = macb_config->usrio;
@@ -5211,10 +5233,13 @@ static int __maybe_unused macb_suspend(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev);
+ struct in_ifaddr *ifa = NULL;
struct macb_queue *queue;
+ struct in_device *idev;
unsigned long flags;
unsigned int q;
int err;
+ u32 tmp;
if (!device_may_wakeup(&bp->dev->dev))
phy_exit(bp->sgmii_phy);
@@ -5223,18 +5248,54 @@ static int __maybe_unused macb_suspend(struct device *dev)
return 0;
if (bp->wol & MACB_WOL_ENABLED) {
+ /* Check for IP address in WOL ARP mode */
+ idev = __in_dev_get_rcu(bp->dev);
+ if (idev)
+ ifa = rcu_dereference(idev->ifa_list);
+ if ((bp->wolopts & WAKE_ARP) && !ifa) {
+ netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n");
+ return -EOPNOTSUPP;
+ }
spin_lock_irqsave(&bp->lock, flags);
- /* Flush all status bits */
- macb_writel(bp, TSR, -1);
- macb_writel(bp, RSR, -1);
+
+ /* Disable Tx and Rx engines before disabling the queues,
+ * this is mandatory as per the IP spec sheet
+ */
+ tmp = macb_readl(bp, NCR);
+ macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) {
+ /* Disable RX queues */
+ if (bp->caps & MACB_CAPS_QUEUE_DISABLE) {
+ queue_writel(queue, RBQP, MACB_BIT(QUEUE_DISABLE));
+ } else {
+ /* Tie off RX queues */
+ queue_writel(queue, RBQP,
+ lower_32_bits(bp->rx_ring_tieoff_dma));
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ queue_writel(queue, RBQPH,
+ upper_32_bits(bp->rx_ring_tieoff_dma));
+#endif
+ }
/* Disable all interrupts */
queue_writel(queue, IDR, -1);
queue_readl(queue, ISR);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, -1);
}
+ /* Enable Receive engine */
+ macb_writel(bp, NCR, tmp | MACB_BIT(RE));
+ /* Flush all status bits */
+ macb_writel(bp, TSR, -1);
+ macb_writel(bp, RSR, -1);
+
+ tmp = (bp->wolopts & WAKE_MAGIC) ? MACB_BIT(MAG) : 0;
+ if (bp->wolopts & WAKE_ARP) {
+ tmp |= MACB_BIT(ARP);
+ /* write IP address into register */
+ tmp |= MACB_BFEXT(IP, be32_to_cpu(ifa->ifa_local));
+ }
+
/* Change interrupt handler and
* Enable WoL IRQ on queue 0
*/
@@ -5250,7 +5311,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
return err;
}
queue_writel(bp->queues, IER, GEM_BIT(WOL));
- gem_writel(bp, WOL, MACB_BIT(MAG));
+ gem_writel(bp, WOL, tmp);
} else {
err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
IRQF_SHARED, netdev->name, bp->queues);
@@ -5262,7 +5323,7 @@ static int __maybe_unused macb_suspend(struct device *dev)
return err;
}
queue_writel(bp->queues, IER, MACB_BIT(WOL));
- macb_writel(bp, WOL, MACB_BIT(MAG));
+ macb_writel(bp, WOL, tmp);
}
spin_unlock_irqrestore(&bp->lock, flags);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 5e97f1e4e38e..a71b320fd030 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1358,7 +1358,7 @@ static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
/* Bring interface down, change mtu and bring interface back up */
xgmac_stop(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return xgmac_open(dev);
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index f38d31bfab1b..674c54831875 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -1262,7 +1262,7 @@ int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
lio->mtu = new_mtu;
WRITE_ONCE(sc->caller_is_done, true);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index d3e07b6ed5e1..5835965dbc32 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -2497,7 +2497,7 @@ ret_intrmod:
}
static int lio_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct lio *lio = GET_LIO(netdev);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 34f02a8ec2ca..1d79f6eaa41f 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -92,12 +92,6 @@ static int octeon_console_debug_enabled(u32 console)
/* time to wait for possible in-flight requests in milliseconds */
#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
-struct oct_link_status_resp {
- u64 rh;
- struct oct_link_info link_info;
- u64 status;
-};
-
struct oct_timestamp_resp {
u64 rh;
u64 timestamp;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
index aa6c0dfb6f1c..989b4ddae342 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
@@ -218,7 +218,7 @@ lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
return -EIO;
}
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
@@ -272,13 +272,12 @@ lio_vf_rep_copy_packet(struct octeon_device *oct,
pg_info->page_offset;
memcpy(skb->data, va, MIN_SKB_SIZE);
skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset + MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
}
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- pg_info->page,
- pg_info->page_offset + MIN_SKB_SIZE,
- len - MIN_SKB_SIZE,
- LIO_RXBUFFER_SZ);
} else {
struct octeon_skb_page_info *pg_info =
((struct octeon_skb_page_info *)(skb->cb));
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 0d6ee30affb9..eef12fdd246d 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -30,11 +30,6 @@
#include "cn23xx_pf_device.h"
#include "cn23xx_vf_device.h"
-struct niclist {
- struct list_head list;
- void *ptr;
-};
-
struct __dispatch {
struct list_head list;
struct octeon_recv_info *rinfo;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 007d4b06819e..744f2434f7fa 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -649,7 +649,7 @@ static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
struct octeon_mgmt *p = netdev_priv(netdev);
int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
/* HW lifts the limit if the frame is VLAN tagged
* (+4 bytes per each tag, up to two tags)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 34125b8cd935..6a04d2530176 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -836,7 +836,7 @@ static int nicvf_set_pauseparam(struct net_device *dev,
}
static int nicvf_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct nicvf *nic = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index eff350e0bc2a..aebb9fef3f6e 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1589,7 +1589,7 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (!netif_running(netdev))
return 0;
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index a317feb8decb..608cc6af5af1 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -54,7 +54,7 @@ struct lmac {
bool link_up;
int lmacid; /* ID within BGX */
int lmacid_bd; /* ID on board */
- struct net_device netdev;
+ struct net_device *netdev;
struct phy_device *phydev;
unsigned int last_duplex;
unsigned int last_link;
@@ -590,10 +590,12 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
static void bgx_lmac_handler(struct net_device *netdev)
{
- struct lmac *lmac = container_of(netdev, struct lmac, netdev);
struct phy_device *phydev;
+ struct lmac *lmac, **priv;
int link_changed = 0;
+ priv = netdev_priv(netdev);
+ lmac = *priv;
phydev = lmac->phydev;
if (!phydev->link && lmac->last_link)
@@ -1116,7 +1118,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
}
lmac->phydev->dev_flags = 0;
- if (phy_connect_direct(&lmac->netdev, lmac->phydev,
+ if (phy_connect_direct(lmac->netdev, lmac->phydev,
bgx_lmac_handler,
phy_interface_mode(lmac->lmac_type)))
return -ENODEV;
@@ -1414,7 +1416,7 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
- SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
+ SET_NETDEV_DEV(bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
bgx->acpi_lmac_idx++; /* move to next LMAC */
@@ -1483,7 +1485,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
of_get_mac_address(node, bgx->lmac[lmac].mac);
- SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
+ SET_NETDEV_DEV(bgx->lmac[lmac].netdev, &bgx->pdev->dev);
bgx->lmac[lmac].lmacid = lmac;
phy_np = of_parse_phandle(node, "phy-handle", 0);
@@ -1644,6 +1646,23 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bgx_get_qlm_mode(bgx);
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ struct lmac *lmacp, **priv;
+
+ lmacp = &bgx->lmac[lmac];
+ lmacp->netdev = alloc_netdev_dummy(sizeof(struct lmac *));
+
+ if (!lmacp->netdev) {
+ for (int i = 0; i < lmac; i++)
+ free_netdev(bgx->lmac[i].netdev);
+ err = -ENOMEM;
+ goto err_enable;
+ }
+
+ priv = netdev_priv(lmacp->netdev);
+ *priv = lmacp;
+ }
+
err = bgx_init_phy(bgx);
if (err)
goto err_enable;
@@ -1683,8 +1702,10 @@ static void bgx_remove(struct pci_dev *pdev)
u8 lmac;
/* Disable all LMACs */
- for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
bgx_lmac_disable(bgx, lmac);
+ free_netdev(bgx->lmac[lmac].netdev);
+ }
pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d2286adf09fe..7d7d3e0098df 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -844,7 +844,7 @@ static int t1_change_mtu(struct net_device *dev, int new_mtu)
return -EOPNOTSUPP;
if ((ret = mac->ops->set_mtu(mac, new_mtu)))
return ret;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 2236f1d35f2b..f92a3550e480 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2559,7 +2559,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu)))
return ret;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
init_port_mtus(adapter);
if (adapter->params.rev == 0 && offload_running(adapter))
t3_load_mtus(adapter, adapter->params.mtus,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 47eecde36285..3d091947ae00 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -1550,7 +1550,7 @@ out_free_fw:
return ret;
}
-static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info)
+static int get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *ts_info)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 786ceae34488..dd9e68465e69 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1244,7 +1244,8 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
* in the Compressed Filter Tuple.
*/
if (tp->vlan_shift >= 0 && fs->mask.ivlan)
- ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
+ ntuple |= (u64)(FT_VLAN_VLD_F |
+ fs->val.ivlan) << tp->vlan_shift;
if (tp->port_shift >= 0 && fs->mask.iport)
ntuple |= (u64)fs->val.iport << tp->port_shift;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2eb33a727bba..2418645c8823 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3180,7 +3180,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid,
pi->viid_mirror, new_mtu, -1, -1, -1, -1, true);
if (!ret)
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index 72ac4a34424b..69d045d769c4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -305,7 +305,7 @@ static void cxgb4_process_flow_match(struct net_device *dev,
fs->mask.iport = ~0;
}
-static int cxgb4_validate_flow_match(struct net_device *dev,
+static int cxgb4_validate_flow_match(struct netlink_ext_ack *extack,
struct flow_rule *rule)
{
struct flow_dissector *dissector = rule->match.dissector;
@@ -321,11 +321,15 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
BIT_ULL(FLOW_DISSECTOR_KEY_IP))) {
- netdev_warn(dev, "Unsupported key used: 0x%llx\n",
- dissector->used_keys);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "Unsupported key used: 0x%llx",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -339,13 +343,15 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
struct flow_match_ip match;
if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
- netdev_err(dev, "IP Key supported only with IPv4/v6");
+ NL_SET_ERR_MSG_MOD(extack,
+ "IP Key supported only with IPv4/v6");
return -EINVAL;
}
flow_rule_match_ip(rule, &match);
if (match.mask->ttl) {
- netdev_warn(dev, "ttl match unsupported for offload");
+ NL_SET_ERR_MSG_MOD(extack,
+ "ttl match unsupported for offload");
return -EOPNOTSUPP;
}
}
@@ -576,7 +582,7 @@ static bool valid_l4_mask(u32 mask)
return hi && lo ? false : true;
}
-static bool valid_pedit_action(struct net_device *dev,
+static bool valid_pedit_action(struct netlink_ext_ack *extack,
const struct flow_action_entry *act,
u8 *natmode_flags)
{
@@ -595,8 +601,7 @@ static bool valid_pedit_action(struct net_device *dev,
case PEDIT_ETH_SMAC_47_16:
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -609,8 +614,7 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -629,8 +633,7 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DIP;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -638,8 +641,8 @@ static bool valid_pedit_action(struct net_device *dev,
switch (offset) {
case PEDIT_TCP_SPORT_DPORT:
if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported mask for TCP L4 ports");
return false;
}
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -648,8 +651,7 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
@@ -657,8 +659,8 @@ static bool valid_pedit_action(struct net_device *dev,
switch (offset) {
case PEDIT_UDP_SPORT_DPORT:
if (!valid_l4_mask(~mask)) {
- netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported mask for UDP L4 ports");
return false;
}
if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
@@ -667,13 +669,12 @@ static bool valid_pedit_action(struct net_device *dev,
*natmode_flags |= CXGB4_ACTION_NATMODE_DPORT;
break;
default:
- netdev_err(dev, "%s: Unsupported pedit field\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit field");
return false;
}
break;
default:
- netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported pedit type");
return false;
}
return true;
@@ -727,8 +728,7 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
* the provided output port is not valid
*/
if (!found) {
- netdev_err(dev, "%s: Out port invalid\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Out port invalid");
return -EINVAL;
}
act_redir = true;
@@ -745,21 +745,21 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_MANGLE:
if (proto != ETH_P_8021Q) {
- netdev_err(dev, "%s: Unsupported vlan proto\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported vlan proto");
return -EOPNOTSUPP;
}
break;
default:
- netdev_err(dev, "%s: Unsupported vlan action\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported vlan action");
return -EOPNOTSUPP;
}
act_vlan = true;
}
break;
case FLOW_ACTION_MANGLE: {
- bool pedit_valid = valid_pedit_action(dev, act,
+ bool pedit_valid = valid_pedit_action(extack, act,
&natmode_flags);
if (!pedit_valid)
@@ -771,14 +771,14 @@ int cxgb4_validate_flow_actions(struct net_device *dev,
/* Do nothing. cxgb4_set_filter will validate */
break;
default:
- netdev_err(dev, "%s: Unsupported action\n", __func__);
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
return -EOPNOTSUPP;
}
}
if ((act_pedit || act_vlan) && !act_redir) {
- netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
- __func__);
+ NL_SET_ERR_MSG_MOD(extack,
+ "pedit/vlan rewrite invalid without egress redirect");
return -EINVAL;
}
@@ -864,7 +864,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
if (cxgb4_validate_flow_actions(dev, &rule->action, extack, 0))
return -EOPNOTSUPP;
- if (cxgb4_validate_flow_match(dev, rule))
+ if (cxgb4_validate_flow_match(extack, rule))
return -EOPNOTSUPP;
cxgb4_process_flow_match(dev, rule, fs);
@@ -901,8 +901,7 @@ int cxgb4_flow_rule_replace(struct net_device *dev, struct flow_rule *rule,
init_completion(&ctx.completion);
ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
if (ret) {
- netdev_err(dev, "%s: filter creation err %d\n",
- __func__, ret);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "filter creation err %d", ret);
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 17faac715882..5c13bcb4550d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -406,7 +406,7 @@ free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
for (i = 0; i < nq; i++) {
struct sge_uld_txq *txq = &txq_info->uldtxq[i];
- if (txq && txq->q.desc) {
+ if (txq->q.desc) {
tasklet_kill(&txq->qresume_tsk);
t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
txq->q.cntxt_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 9ba0864592e8..2fbe0f059a0b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1169,7 +1169,7 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
-1, -1, -1, -1, true);
if (!ret)
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
index 6482728794dd..e8e460a92e0e 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/ch_ktls/chcr_ktls.c
@@ -10,6 +10,7 @@
#include <net/ipv6.h>
#include <linux/netdevice.h>
#include <crypto/aes.h>
+#include <linux/skbuff_ref.h>
#include "chcr_ktls.h"
static LIST_HEAD(uld_ctx_list);
diff --git a/drivers/net/ethernet/chelsio/libcxgb/Makefile b/drivers/net/ethernet/chelsio/libcxgb/Makefile
index aa79264e72ba..fbedc31674b3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/Makefile
+++ b/drivers/net/ethernet/chelsio/libcxgb/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-ccflags-y := -I $(srctree)/$(src)/../cxgb4
+ccflags-y := -I $(src)/../cxgb4
obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 887876f35f10..84b300fee2bb 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -554,6 +554,7 @@ static int set_mac_address(struct net_device *dev, void *addr)
return 0;
}
+MODULE_DESCRIPTION("Macintosh CS89x0-based Ethernet driver");
MODULE_LICENSE("GPL");
static void mac89x0_device_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 241906697019..f2f1055880b2 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -599,7 +599,7 @@ static int enic_set_rxfh(struct net_device *netdev,
}
static int enic_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
@@ -608,6 +608,28 @@ static int enic_get_ts_info(struct net_device *netdev,
return 0;
}
+static void enic_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_MSIX:
+ channels->max_rx = ENIC_RQ_MAX;
+ channels->max_tx = ENIC_WQ_MAX;
+ channels->rx_count = enic->rq_count;
+ channels->tx_count = enic->wq_count;
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ case VNIC_DEV_INTR_MODE_INTX:
+ channels->max_combined = 1;
+ channels->combined_count = 1;
+ break;
+ default:
+ break;
+ }
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
@@ -632,6 +654,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.set_rxfh = enic_set_rxfh,
.get_link_ksettings = enic_get_ksettings,
.get_ts_info = enic_get_ts_info,
+ .get_channels = enic_get_channels,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index d266a87297a5..5f26fc3ad655 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1117,18 +1117,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
if (port[IFLA_PORT_PROFILE]) {
+ if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_NAME;
memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
PORT_PROFILE_MAX);
}
if (port[IFLA_PORT_INSTANCE_UUID]) {
+ if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_INSTANCE;
memcpy(pp->instance_uuid,
nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
}
if (port[IFLA_PORT_HOST_UUID]) {
+ if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
+ memcpy(pp, &prev_pp, sizeof(*pp));
+ return -EINVAL;
+ }
pp->set |= ENIC_SET_HOST;
memcpy(pp->host_uuid,
nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
@@ -2027,7 +2039,7 @@ static int _enic_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (running) {
err = enic_open(netdev);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 12a83fa1302d..9f6089e81608 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -146,23 +146,19 @@ EXPORT_SYMBOL(vnic_dev_get_res);
static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
{
- /* The base address of the desc rings must be 512 byte aligned.
- * Descriptor count is aligned to groups of 32 descriptors. A
- * count of 0 means the maximum 4096 descriptors. Descriptor
- * size is aligned to 16 bytes.
- */
-
- unsigned int count_align = 32;
- unsigned int desc_align = 16;
- ring->base_align = 512;
+ /* Descriptor ring base address alignment in bytes*/
+ ring->base_align = VNIC_DESC_BASE_ALIGN;
+ /* A count of 0 means the maximum descriptors */
if (desc_count == 0)
- desc_count = 4096;
+ desc_count = VNIC_DESC_MAX_COUNT;
- ring->desc_count = ALIGN(desc_count, count_align);
+ /* Descriptor count aligned in groups of VNIC_DESC_COUNT_ALIGN descriptors */
+ ring->desc_count = ALIGN(desc_count, VNIC_DESC_COUNT_ALIGN);
- ring->desc_size = ALIGN(desc_size, desc_align);
+ /* Descriptor size alignment in bytes */
+ ring->desc_size = ALIGN(desc_size, VNIC_DESC_SIZE_ALIGN);
ring->size = ring->desc_count * ring->desc_size;
ring->size_unaligned = ring->size + ring->base_align;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 6273794b923b..7fdd8c661c99 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -31,6 +31,11 @@ static inline void writeq(u64 val, void __iomem *reg)
#undef pr_fmt
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#define VNIC_DESC_SIZE_ALIGN 16
+#define VNIC_DESC_COUNT_ALIGN 32
+#define VNIC_DESC_BASE_ALIGN 512
+#define VNIC_DESC_MAX_COUNT 4096
+
enum vnic_dev_intr_mode {
VNIC_DEV_INTR_MODE_UNKNOWN,
VNIC_DEV_INTR_MODE_INTX,
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 705c3eb19cd3..73e1c71c5092 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -79,7 +79,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
#define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT)
#define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \
- NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM)
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6)
/**
* struct gmac_queue_page - page buffer per-page info
@@ -287,13 +288,13 @@ static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx)
spin_unlock_irqrestore(&port->config_lock, flags);
}
-static void gmac_speed_set(struct net_device *netdev)
+static void gmac_adjust_link(struct net_device *netdev)
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
union gmac_status status, old_status;
- int pause_tx = 0;
- int pause_rx = 0;
+ bool pause_tx = false;
+ bool pause_rx = false;
status.bits32 = readl(port->gmac_base + GMAC_STATUS);
old_status.bits32 = status.bits32;
@@ -328,14 +329,9 @@ static void gmac_speed_set(struct net_device *netdev)
}
if (phydev->duplex == DUPLEX_FULL) {
- u16 lcladv = phy_read(phydev, MII_ADVERTISE);
- u16 rmtadv = phy_read(phydev, MII_LPA);
- u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
-
- if (cap & FLOW_CTRL_RX)
- pause_rx = 1;
- if (cap & FLOW_CTRL_TX)
- pause_tx = 1;
+ phy_get_pause(phydev, &pause_tx, &pause_rx);
+ netdev_dbg(netdev, "set negotiated pause params pause TX = %s, pause RX = %s\n",
+ pause_tx ? "ON" : "OFF", pause_rx ? "ON" : "OFF");
}
gmac_set_flow_control(netdev, pause_tx, pause_rx);
@@ -366,7 +362,7 @@ static int gmac_setup_phy(struct net_device *netdev)
phy = of_phy_get_and_connect(netdev,
dev->of_node,
- gmac_speed_set);
+ gmac_adjust_link);
if (!phy)
return -ENODEV;
netdev->phydev = phy;
@@ -1107,10 +1103,13 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
{
struct gemini_ethernet_port *port = netdev_priv(netdev);
struct gemini_ethernet *geth = port->geth;
+ unsigned long flags;
u32 val, mask;
netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
+ spin_lock_irqsave(&geth->irq_lock, flags);
+
mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
if (en)
@@ -1119,6 +1118,8 @@ static void gmac_tx_irq_enable(struct net_device *netdev,
val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
val = en ? val | mask : val & ~mask;
writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
}
static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
@@ -1143,13 +1144,25 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
skb_frag_t *skb_frag;
dma_addr_t mapping;
void *buffer;
+ u16 mss;
int ret;
- /* TODO: implement proper TSO using MTU in word3 */
word1 = skb->len;
word3 = SOF_BIT;
- if (skb->len >= ETH_FRAME_LEN) {
+ mss = skb_shinfo(skb)->gso_size;
+ if (mss) {
+ /* This means we are dealing with TCP and skb->len is the
+ * sum total of all the segments. The TSO will deal with
+ * chopping this up for us.
+ */
+ /* The accelerator needs the full frame size here */
+ mss += skb_tcp_all_headers(skb);
+ netdev_dbg(netdev, "segment offloading mss = %04x len=%04x\n",
+ mss, skb->len);
+ word1 |= TSS_MTU_ENABLE_BIT;
+ word3 |= mss;
+ } else if (skb->len >= ETH_FRAME_LEN) {
/* Hardware offloaded checksumming isn't working on frames
* bigger than 1514 bytes. A hypothesis about this is that the
* checksum buffer is only 1518 bytes, so when the frames get
@@ -1164,7 +1177,9 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
return ret;
}
word1 |= TSS_BYPASS_BIT;
- } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
int tcp = 0;
/* We do not switch off the checksumming on non TCP/UDP
@@ -1415,15 +1430,19 @@ static unsigned int gmac_rx(struct net_device *netdev, unsigned int budget)
union gmac_rxdesc_3 word3;
struct page *page = NULL;
unsigned int page_offs;
+ unsigned long flags;
unsigned short r, w;
union dma_rwptr rw;
dma_addr_t mapping;
int frag_nr = 0;
+ spin_lock_irqsave(&geth->irq_lock, flags);
rw.bits32 = readl(ptr_reg);
/* Reset interrupt as all packages until here are taken into account */
writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+ spin_unlock_irqrestore(&geth->irq_lock, flags);
+
r = rw.bits.rptr;
w = rw.bits.wptr;
@@ -1726,10 +1745,9 @@ static irqreturn_t gmac_irq(int irq, void *data)
gmac_update_hw_stats(netdev);
if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+ spin_lock(&geth->irq_lock);
writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
-
- spin_lock(&geth->irq_lock);
u64_stats_update_begin(&port->ir_stats_syncp);
++port->stats.rx_fifo_errors;
u64_stats_update_end(&port->ir_stats_syncp);
@@ -1978,7 +1996,7 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
gmac_disable_tx_rx(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
CONFIG0_MAXLEN_MASK);
@@ -2108,6 +2126,19 @@ static void gmac_get_pauseparam(struct net_device *netdev,
pparam->autoneg = true;
}
+static int gmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pparam)
+{
+ struct phy_device *phydev = netdev->phydev;
+
+ if (!pparam->autoneg)
+ return -EOPNOTSUPP;
+
+ phy_set_asym_pause(phydev, pparam->rx_pause, pparam->tx_pause);
+
+ return 0;
+}
+
static void gmac_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *rp,
struct kernel_ethtool_ringparam *kernel_rp,
@@ -2228,6 +2259,7 @@ static const struct ethtool_ops gmac_351x_ethtool_ops = {
.set_link_ksettings = gmac_set_ksettings,
.nway_reset = gmac_nway_reset,
.get_pauseparam = gmac_get_pauseparam,
+ .set_pauseparam = gmac_set_pauseparam,
.get_ringparam = gmac_get_ringparam,
.set_ringparam = gmac_set_ringparam,
.get_coalesce = gmac_get_coalesce,
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index aaf0eda96292..8af5ecec7d61 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -708,7 +708,7 @@ static int change_mtu(struct net_device *dev, int new_mtu)
{
if (netif_running(dev))
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index ad862ed7888a..a8596ebcdfd6 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4982,10 +4982,7 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
mode = nla_get_u16(attr);
if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index 65ec1abc9442..9aa286ba1f00 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -305,7 +305,7 @@ static void tsnep_ethtool_get_channels(struct net_device *netdev,
}
static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index 4b15af6b7122..44da335d66bd 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -1587,7 +1587,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
length = __le32_to_cpu(entry->desc_wb->properties) &
TSNEP_DESC_LENGTH_MASK;
xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
- xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(entry->xdp);
/* RX metadata with timestamps is in front of actual data,
* subtract metadata size to get length of actual data and
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 003bc9a45c65..1047c805054e 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1092,7 +1092,7 @@ static int ftmac100_change_mtu(struct net_device *netdev, int mtu)
}
iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR);
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index dcbc598b11c6..cfe6b57b1da0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -371,6 +371,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
void *type_data)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
+ int num_txqs_per_tc = dpaa_num_txqs_per_tc();
struct tc_mqprio_qopt *mqprio = type_data;
u8 num_tc;
int i;
@@ -398,12 +399,12 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
netdev_set_num_tc(net_dev, num_tc);
for (i = 0; i < num_tc; i++)
- netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM,
- i * DPAA_TC_TXQ_NUM);
+ netdev_set_tc_queue(net_dev, i, num_txqs_per_tc,
+ i * num_txqs_per_tc);
out:
priv->num_tc = num_tc ? : 1;
- netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+ netif_set_real_num_tx_queues(net_dev, priv->num_tc * num_txqs_per_tc);
return 0;
}
@@ -649,7 +650,7 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
fq->wq = 6;
break;
case FQ_TYPE_TX:
- switch (idx / DPAA_TC_TXQ_NUM) {
+ switch (idx / dpaa_num_txqs_per_tc()) {
case 0:
/* Low priority (best effort) */
fq->wq = 6;
@@ -667,8 +668,8 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx)
fq->wq = 0;
break;
default:
- WARN(1, "Too many TX FQs: more than %d!\n",
- DPAA_ETH_TXQ_NUM);
+ WARN(1, "Too many TX FQs: more than %zu!\n",
+ dpaa_max_num_txqs());
}
break;
default:
@@ -740,7 +741,8 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
port_fqs->rx_pcdq = &dpaa_fq[0];
- if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ))
+ if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list,
+ FQ_TYPE_TX_CONF_MQ))
goto fq_alloc_failed;
dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR);
@@ -755,7 +757,7 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list,
port_fqs->tx_defq = &dpaa_fq[0];
- if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX))
+ if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, FQ_TYPE_TX))
goto fq_alloc_failed;
return 0;
@@ -931,14 +933,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv,
}
}
-static void dpaa_fq_setup(struct dpaa_priv *priv,
- const struct dpaa_fq_cbs *fq_cbs,
- struct fman_port *tx_port)
+static int dpaa_fq_setup(struct dpaa_priv *priv,
+ const struct dpaa_fq_cbs *fq_cbs,
+ struct fman_port *tx_port)
{
int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu;
const cpumask_t *affine_cpus = qman_affine_cpus();
- u16 channels[NR_CPUS];
struct dpaa_fq *fq;
+ u16 *channels;
+
+ channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
for_each_cpu_and(cpu, affine_cpus, cpu_online_mask)
channels[num_portals++] = qman_affine_channel(cpu);
@@ -965,11 +971,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
case FQ_TYPE_TX:
dpaa_setup_egress(priv, fq, tx_port,
&fq_cbs->egress_ern);
- /* If we have more Tx queues than the number of cores,
- * just ignore the extra ones.
- */
- if (egress_cnt < DPAA_ETH_TXQ_NUM)
- priv->egress_fqs[egress_cnt++] = &fq->fq_base;
+ priv->egress_fqs[egress_cnt++] = &fq->fq_base;
break;
case FQ_TYPE_TX_CONF_MQ:
priv->conf_fqs[conf_cnt++] = &fq->fq_base;
@@ -987,16 +989,9 @@ static void dpaa_fq_setup(struct dpaa_priv *priv,
}
}
- /* Make sure all CPUs receive a corresponding Tx queue. */
- while (egress_cnt < DPAA_ETH_TXQ_NUM) {
- list_for_each_entry(fq, &priv->dpaa_fq_list, list) {
- if (fq->fq_type != FQ_TYPE_TX)
- continue;
- priv->egress_fqs[egress_cnt++] = &fq->fq_base;
- if (egress_cnt == DPAA_ETH_TXQ_NUM)
- break;
- }
- }
+ kfree(channels);
+
+ return 0;
}
static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
@@ -1004,7 +999,7 @@ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv,
{
int i;
- for (i = 0; i < DPAA_ETH_TXQ_NUM; i++)
+ for (i = 0; i < dpaa_max_num_txqs(); i++)
if (priv->egress_fqs[i] == tx_fq)
return i;
@@ -2995,7 +2990,7 @@ static int dpaa_change_mtu(struct net_device *net_dev, int new_mtu)
if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu))
return -EINVAL;
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
return 0;
}
@@ -3324,7 +3319,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
/* Allocate this early, so we can store relevant information in
* the private area
*/
- net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM);
+ net_dev = alloc_etherdev_mq(sizeof(*priv), dpaa_max_num_txqs());
if (!net_dev) {
dev_err(dev, "alloc_etherdev_mq() failed\n");
return -ENOMEM;
@@ -3339,6 +3334,22 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT);
+ priv->egress_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(),
+ sizeof(*priv->egress_fqs),
+ GFP_KERNEL);
+ if (!priv->egress_fqs) {
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+
+ priv->conf_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(),
+ sizeof(*priv->conf_fqs),
+ GFP_KERNEL);
+ if (!priv->conf_fqs) {
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+
mac_dev = dpaa_mac_dev_get(pdev);
if (IS_ERR(mac_dev)) {
netdev_err(net_dev, "dpaa_mac_dev_get() failed\n");
@@ -3416,7 +3427,9 @@ static int dpaa_eth_probe(struct platform_device *pdev)
*/
dpaa_eth_add_channel(priv->channel, &pdev->dev);
- dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
+ err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
+ if (err)
+ goto free_dpaa_bps;
/* Create a congestion group for this netdev, with
* dynamically-allocated CGR ID.
@@ -3462,7 +3475,8 @@ static int dpaa_eth_probe(struct platform_device *pdev)
}
priv->num_tc = 1;
- netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM);
+ netif_set_real_num_tx_queues(net_dev,
+ priv->num_tc * dpaa_num_txqs_per_tc());
/* Initialize NAPI */
err = dpaa_napi_add(net_dev);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index ac3c8ed57bbe..7ed659eb08de 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -18,10 +18,6 @@
/* Number of prioritised traffic classes */
#define DPAA_TC_NUM 4
-/* Number of Tx queues per traffic class */
-#define DPAA_TC_TXQ_NUM NR_CPUS
-/* Total number of Tx queues */
-#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
/* More detailed FQ types - used for fine-grained WQ assignments */
enum dpaa_fq_type {
@@ -142,8 +138,8 @@ struct dpaa_priv {
struct mac_device *mac_dev;
struct device *rx_dma_dev;
struct device *tx_dma_dev;
- struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM];
- struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM];
+ struct qman_fq **egress_fqs;
+ struct qman_fq **conf_fqs;
u16 channel;
struct list_head dpaa_fq_list;
@@ -185,4 +181,16 @@ extern const struct ethtool_ops dpaa_ethtool_ops;
/* from dpaa_eth_sysfs.c */
void dpaa_eth_sysfs_remove(struct device *dev);
void dpaa_eth_sysfs_init(struct device *dev);
+
+static inline size_t dpaa_num_txqs_per_tc(void)
+{
+ return num_possible_cpus();
+}
+
+/* Total number of Tx queues */
+static inline size_t dpaa_max_num_txqs(void)
+{
+ return DPAA_TC_NUM * dpaa_num_txqs_per_tc();
+}
+
#endif /* __DPAA_H */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index 4fee74c024bd..aad470e9caea 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -35,7 +35,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev,
u32 last_fqid = 0;
ssize_t bytes = 0;
char *str;
- int i = 0;
list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) {
switch (fq->fq_type) {
@@ -85,7 +84,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev,
prev = fq;
prevstr = str;
- i++;
}
if (prev) {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
index 889f89df9930..6f0e58a2a58a 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_trace.h
@@ -57,7 +57,7 @@ DECLARE_EVENT_CLASS(dpaa_eth_fd,
__entry->fd_offset = qm_fd_get_offset(fd);
__entry->fd_length = qm_fd_get_length(fd);
__entry->fd_status = fd->status;
- __assign_str(name, netdev->name);
+ __assign_str(name);
),
/* This is what gets printed when the trace event is triggered */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 5bd0b36d1feb..b0060cf96090 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -394,7 +394,7 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
}
static int dpaa_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct device *dev = net_dev->dev.parent;
struct device_node *mac_node = dev->of_node;
@@ -457,12 +457,16 @@ static int dpaa_set_coalesce(struct net_device *dev,
struct netlink_ext_ack *extack)
{
const cpumask_t *cpus = qman_affine_cpus();
- bool needs_revert[NR_CPUS] = {false};
struct qman_portal *portal;
u32 period, prev_period;
u8 thresh, prev_thresh;
+ bool *needs_revert;
int cpu, res;
+ needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL);
+ if (!needs_revert)
+ return -ENOMEM;
+
period = c->rx_coalesce_usecs;
thresh = c->rx_max_coalesced_frames;
@@ -485,6 +489,8 @@ static int dpaa_set_coalesce(struct net_device *dev,
needs_revert[cpu] = true;
}
+ kfree(needs_revert);
+
return 0;
revert_values:
@@ -498,6 +504,8 @@ revert_values:
qman_dqrr_set_ithresh(portal, prev_thresh);
}
+ kfree(needs_revert);
+
return res;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
index 9b43fadb9b11..956767e0869c 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-trace.h
@@ -48,7 +48,7 @@ DECLARE_EVENT_CLASS(dpaa2_eth_fd,
__entry->fd_addr = dpaa2_fd_get_addr(fd);
__entry->fd_len = dpaa2_fd_get_len(fd);
__entry->fd_offset = dpaa2_fd_get_offset(fd);
- __assign_str(name, netdev->name);
+ __assign_str(name);
),
/* This is what gets printed when the trace event is
@@ -144,7 +144,7 @@ DECLARE_EVENT_CLASS(dpaa2_eth_buf,
__entry->dma_addr = dma_addr;
__entry->map_size = map_size;
__entry->bpid = bpid;
- __assign_str(name, netdev->name);
+ __assign_str(name);
),
/* This is what gets printed when the trace event is
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 888509cf1f21..6866807973da 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -2698,7 +2698,7 @@ static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
return err;
out:
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -2896,11 +2896,14 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
static int update_xps(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
- struct cpumask xps_mask;
- struct dpaa2_eth_fq *fq;
int i, num_queues, netdev_queues;
+ struct dpaa2_eth_fq *fq;
+ cpumask_var_t xps_mask;
int err = 0;
+ if (!alloc_cpumask_var(&xps_mask, GFP_KERNEL))
+ return -ENOMEM;
+
num_queues = dpaa2_eth_queue_count(priv);
netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
@@ -2910,16 +2913,17 @@ static int update_xps(struct dpaa2_eth_priv *priv)
for (i = 0; i < netdev_queues; i++) {
fq = &priv->fq[i % num_queues];
- cpumask_clear(&xps_mask);
- cpumask_set_cpu(fq->target_cpu, &xps_mask);
+ cpumask_clear(xps_mask);
+ cpumask_set_cpu(fq->target_cpu, xps_mask);
- err = netif_set_xps_queue(net_dev, &xps_mask, i);
+ err = netif_set_xps_queue(net_dev, xps_mask, i);
if (err) {
netdev_warn_once(net_dev, "Error setting XPS queue\n");
break;
}
}
+ free_cpumask_var(xps_mask);
return err;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index e80e9388c71f..7f476519b7ad 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -794,7 +794,7 @@ int dpaa2_phc_index = -1;
EXPORT_SYMBOL(dpaa2_phc_index);
static int dpaa2_eth_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
if (!dpaa2_ptp)
return ethtool_op_get_ts_info(dev, info);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
index b6a534a3e0b1..701a87370737 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c
@@ -33,6 +33,9 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls,
acl_h = &acl_key->match;
acl_m = &acl_key->mask;
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
struct flow_match_basic match;
@@ -548,6 +551,9 @@ static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index f3543a2df68d..a293b08f36d4 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -590,7 +590,7 @@ static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu)
return err;
}
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return 0;
}
@@ -2638,13 +2638,14 @@ static int dpaa2_switch_refill_bp(struct ethsw_core *ethsw)
static int dpaa2_switch_seed_bp(struct ethsw_core *ethsw)
{
- int *count, i;
+ int *count, ret, i;
for (i = 0; i < DPAA2_ETHSW_NUM_BUFS; i += BUFS_PER_CMD) {
+ ret = dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
count = &ethsw->buf_count;
- *count += dpaa2_switch_add_bufs(ethsw, ethsw->bpid);
+ *count += ret;
- if (unlikely(*count < BUFS_PER_CMD))
+ if (unlikely(ret < BUFS_PER_CMD))
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
index 051748b997f3..a466c2379146 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-xsk.c
@@ -55,7 +55,7 @@ static u32 dpaa2_xsk_run_xdp(struct dpaa2_eth_priv *priv,
xdp_set_data_meta_invalid(xdp_buff);
xdp_buff->rxq = &ch->xdp_rxq;
- xsk_buff_dma_sync_for_cpu(xdp_buff, ch->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xdp_buff);
xdp_act = bpf_prog_run_xdp(xdp_prog, xdp_buff);
/* xdp.data pointer may have changed */
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 9f07f4947b63..5c45f42232d3 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2769,7 +2769,7 @@ static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog,
if (priv->min_num_stack_tx_queues + num_xdp_tx_queues >
priv->num_tx_rings) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Reserving %d XDP TXQs does not leave a minimum of %d for stack (total %d)",
+ "Reserving %d XDP TXQs leaves under %d for stack (total %d)",
num_xdp_tx_queues,
priv->min_num_stack_tx_queues,
priv->num_tx_rings);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index f7753ea5b57e..5e684b23c5f5 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -841,7 +841,7 @@ static int enetc_set_coalesce(struct net_device *ndev,
}
static int enetc_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
int *phc_idx;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8bd213da8fb6..a923cb95cdc6 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1361,6 +1361,12 @@ fec_stop(struct net_device *ndev)
writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
}
+
+ if (fep->bufdesc_ex) {
+ val = readl(fep->hwp + FEC_ECNTRL);
+ val |= FEC_ECR_EN1588;
+ writel(val, fep->hwp + FEC_ECNTRL);
+ }
}
static void
@@ -2762,7 +2768,7 @@ static void fec_enet_get_regs(struct net_device *ndev,
}
static int fec_enet_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -3674,29 +3680,6 @@ fec_set_mac_address(struct net_device *ndev, void *p)
return 0;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/**
- * fec_poll_controller - FEC Poll controller function
- * @dev: The FEC network adapter
- *
- * Polled functionality used by netconsole and others in non interrupt mode
- *
- */
-static void fec_poll_controller(struct net_device *dev)
-{
- int i;
- struct fec_enet_private *fep = netdev_priv(dev);
-
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- if (fep->irq[i] > 0) {
- disable_irq(fep->irq[i]);
- fec_enet_interrupt(fep->irq[i], dev);
- enable_irq(fep->irq[i]);
- }
- }
-}
-#endif
-
static inline void fec_enet_set_netdev_features(struct net_device *netdev,
netdev_features_t features)
{
@@ -4003,9 +3986,6 @@ static const struct net_device_ops fec_netdev_ops = {
.ndo_tx_timeout = fec_timeout,
.ndo_set_mac_address = fec_set_mac_address,
.ndo_eth_ioctl = phy_do_ioctl_running,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = fec_poll_controller,
-#endif
.ndo_set_features = fec_set_features,
.ndo_bpf = fec_enet_bpf,
.ndo_xdp_xmit = fec_enet_xdp_xmit,
@@ -4156,6 +4136,14 @@ free_queue_mem:
return ret;
}
+static void fec_enet_deinit(struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+
+ netif_napi_del(&fep->napi);
+ fec_enet_free_queue(ndev);
+}
+
#ifdef CONFIG_OF
static int fec_reset_phy(struct platform_device *pdev)
{
@@ -4550,6 +4538,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_irq:
+ fec_enet_deinit(ndev);
failed_init:
fec_ptp_stop(pdev);
failed_reset:
@@ -4613,6 +4602,7 @@ fec_drv_remove(struct platform_device *pdev)
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
+ fec_enet_deinit(ndev);
free_netdev(ndev);
}
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 181d9bfbee22..2e4f3e1782a2 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -104,14 +104,13 @@ static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
struct timespec64 ts;
u64 ns;
- if (fep->pps_enable == enable)
- return 0;
-
- fep->pps_channel = DEFAULT_PPS_CHANNEL;
- fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
-
spin_lock_irqsave(&fep->tmreg_lock, flags);
+ if (fep->pps_enable == enable) {
+ spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+ return 0;
+ }
+
if (enable) {
/* clear capture or output compare interrupt status if have.
*/
@@ -532,6 +531,9 @@ static int fec_ptp_enable(struct ptp_clock_info *ptp,
int ret = 0;
if (rq->type == PTP_CLK_REQ_PPS) {
+ fep->pps_channel = DEFAULT_PPS_CHANNEL;
+ fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+
ret = fec_ptp_enable_pps(fep, on);
return ret;
@@ -773,6 +775,9 @@ void fec_ptp_stop(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ if (fep->pps_enable)
+ fec_ptp_enable_pps(fep, 0);
+
cancel_delayed_work_sync(&fep->time_keep);
hrtimer_cancel(&fep->perout_timer);
if (fep->ptp_clock)
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 758535adc9ff..796e6f4e583d 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -267,7 +267,6 @@ struct memac_cfg {
bool reset_on_init;
bool pause_ignore;
bool promiscuous_mode_enable;
- struct fixed_phy_status *fixed_link;
u16 max_frame_length;
u16 pause_quanta;
u32 tx_ipg_length;
@@ -1067,7 +1066,6 @@ int memac_initialization(struct mac_device *mac_dev,
struct fman_mac_params *params)
{
int err;
- struct device_node *fixed;
struct phylink_pcs *pcs;
struct fman_mac *memac;
unsigned long capabilities;
@@ -1223,18 +1221,15 @@ int memac_initialization(struct mac_device *mac_dev,
memac->rgmii_no_half_duplex = true;
/* Most boards should use MLO_AN_INBAND, but existing boards don't have
- * a managed property. Default to MLO_AN_INBAND if nothing else is
- * specified. We need to be careful and not enable this if we have a
- * fixed link or if we are using MII or RGMII, since those
- * configurations modes don't use in-band autonegotiation.
+ * a managed property. Default to MLO_AN_INBAND rather than MLO_AN_PHY.
+ * Phylink will allow this to be overriden by a fixed link. We need to
+ * be careful and not enable this if we are using MII or RGMII, since
+ * those configurations modes don't use in-band autonegotiation.
*/
- fixed = of_get_child_by_name(mac_node, "fixed-link");
- if (!fixed && !of_property_read_bool(mac_node, "fixed-link") &&
- !of_property_read_bool(mac_node, "managed") &&
+ if (!of_property_read_bool(mac_node, "managed") &&
mac_dev->phy_if != PHY_INTERFACE_MODE_MII &&
!phy_interface_mode_is_rgmii(mac_dev->phy_if))
- mac_dev->phylink_config.ovr_an_inband = true;
- of_node_put(fixed);
+ mac_dev->phylink_config.default_an_inband = true;
err = memac_init(mac_dev->fman_mac);
if (err < 0)
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index f557d68e5b76..1ed245a2ee01 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -12,7 +12,6 @@
struct muram_info {
struct gen_pool *pool;
void __iomem *vbase;
- size_t size;
phys_addr_t pbase;
};
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a811238c018d..2baef59f741d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2026,7 +2026,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
stop_gfar(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (dev->flags & IFF_UP)
startup_gfar(dev);
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 7a15b9245698..f581402ad740 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -1448,7 +1448,7 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
}
static int gfar_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct gfar_private *priv = netdev_priv(dev);
struct platform_device *ptp_dev;
diff --git a/drivers/net/ethernet/fujitsu/Kconfig b/drivers/net/ethernet/fujitsu/Kconfig
index 0a1400cb410a..06a28bce5d27 100644
--- a/drivers/net/ethernet/fujitsu/Kconfig
+++ b/drivers/net/ethernet/fujitsu/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_FUJITSU
config PCMCIA_FMVJ18X
tristate "Fujitsu FMV-J18x PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select CRC32
help
Say Y here if you intend to attach a Fujitsu FMV-J18x or compatible
diff --git a/drivers/net/ethernet/fungible/funeth/Makefile b/drivers/net/ethernet/fungible/funeth/Makefile
index 646d69595b4f..d51e4c2b4a1a 100644
--- a/drivers/net/ethernet/fungible/funeth/Makefile
+++ b/drivers/net/ethernet/fungible/funeth/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
-ccflags-y += -I$(srctree)/$(src)/../funcore -I$(srctree)/$(src)
+ccflags-y += -I$(src)/../funcore -I$(src)
obj-$(CONFIG_FUN_ETH) += funeth.o
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
index 4edd0adfc6c7..7f081e6e8c87 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c
@@ -1040,7 +1040,7 @@ static int fun_set_rxfh(struct net_device *netdev,
}
static int fun_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index df86770731ad..ac86179a0a81 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -927,7 +927,7 @@ static int fun_change_mtu(struct net_device *netdev, int new_mtu)
rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu);
if (!rc)
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return rc;
}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_trace.h b/drivers/net/ethernet/fungible/funeth/funeth_trace.h
index 9e58dfec19d5..b9985900f30b 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_trace.h
+++ b/drivers/net/ethernet/fungible/funeth/funeth_trace.h
@@ -32,7 +32,7 @@ TRACE_EVENT(funeth_tx,
__entry->len = len;
__entry->sqe_idx = sqe_idx;
__entry->ngle = ngle;
- __assign_str(devname, txq->netdev->name);
+ __assign_str(devname);
),
TP_printk("%s: Txq %u, SQE idx %u, len %u, num GLEs %u",
@@ -62,7 +62,7 @@ TRACE_EVENT(funeth_tx_free,
__entry->sqe_idx = sqe_idx;
__entry->num_sqes = num_sqes;
__entry->hw_head = hw_head;
- __assign_str(devname, txq->netdev->name);
+ __assign_str(devname);
),
TP_printk("%s: Txq %u, SQE idx %u, SQEs %u, HW head %u",
@@ -97,7 +97,7 @@ TRACE_EVENT(funeth_rx,
__entry->len = pkt_len;
__entry->hash = hash;
__entry->cls_vec = cls_vec;
- __assign_str(devname, rxq->netdev->name);
+ __assign_str(devname);
),
TP_printk("%s: Rxq %u, CQ head %u, RQEs %u, len %u, hash %u, CV %#x",
diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile
index b9a6be76531b..9ed07080b38a 100644
--- a/drivers/net/ethernet/google/gve/Makefile
+++ b/drivers/net/ethernet/google/gve/Makefile
@@ -1,4 +1,4 @@
# Makefile for the Google virtual Ethernet (gve) driver
obj-$(CONFIG_GVE) += gve.o
-gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o
+gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 4814c96d5fe7..84ac004d3953 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2021 Google, Inc.
+ * Copyright (C) 2015-2024 Google LLC
*/
#ifndef _GVE_H_
@@ -50,12 +50,21 @@
/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024
+/* Default minimum ring size */
+#define GVE_DEFAULT_MIN_TX_RING_SIZE 256
+#define GVE_DEFAULT_MIN_RX_RING_SIZE 512
+
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
#define GVE_MAX_RX_BUFFER_SIZE 4096
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
+#define GVE_FLOW_RULES_CACHE_SIZE \
+ (GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule))
+#define GVE_FLOW_RULE_IDS_CACHE_SIZE \
+ (GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location))
+
#define GVE_XDP_ACTIONS 5
#define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
@@ -63,7 +72,6 @@
#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
#define DQO_QPL_DEFAULT_TX_PAGES 512
-#define DQO_QPL_DEFAULT_RX_PAGES 2048
/* Maximum TSO size supported on DQO */
#define GVE_DQO_TX_MAX 0x3FFFF
@@ -607,6 +615,7 @@ struct gve_notify_block {
struct gve_priv *priv;
struct gve_tx_ring *tx; /* tx rings on this block */
struct gve_rx_ring *rx; /* rx rings on this block */
+ u32 irq;
};
/* Tracks allowed and current queue settings */
@@ -621,11 +630,6 @@ struct gve_qpl_config {
unsigned long *qpl_id_map; /* bitmap of used qpl ids */
};
-struct gve_options_dqo_rda {
- u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
- u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
-};
-
struct gve_irq_db {
__be32 index;
} ____cacheline_aligned;
@@ -639,28 +643,10 @@ struct gve_ptype_lut {
struct gve_ptype ptypes[GVE_NUM_PTYPES];
};
-/* Parameters for allocating queue page lists */
-struct gve_qpls_alloc_cfg {
- struct gve_qpl_config *qpl_cfg;
- struct gve_queue_config *tx_cfg;
- struct gve_queue_config *rx_cfg;
-
- u16 num_xdp_queues;
- bool raw_addressing;
- bool is_gqi;
-
- /* Allocated resources are returned here */
- struct gve_queue_page_list *qpls;
-};
-
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
struct gve_queue_config *qcfg;
- /* qpls and qpl_cfg must already be allocated */
- struct gve_queue_page_list *qpls;
- struct gve_qpl_config *qpl_cfg;
-
u16 ring_size;
u16 start_idx;
u16 num_rings;
@@ -676,10 +662,6 @@ struct gve_rx_alloc_rings_cfg {
struct gve_queue_config *qcfg;
struct gve_queue_config *qcfg_tx;
- /* qpls and qpl_cfg must already be allocated */
- struct gve_queue_page_list *qpls;
- struct gve_qpl_config *qpl_cfg;
-
u16 ring_size;
u16 packet_buffer_size;
bool raw_addressing;
@@ -701,11 +683,43 @@ enum gve_queue_format {
GVE_DQO_QPL_FORMAT = 0x4,
};
+struct gve_flow_spec {
+ __be32 src_ip[4];
+ __be32 dst_ip[4];
+ union {
+ struct {
+ __be16 src_port;
+ __be16 dst_port;
+ };
+ __be32 spi;
+ };
+ union {
+ u8 tos;
+ u8 tclass;
+ };
+};
+
+struct gve_flow_rule {
+ u32 location;
+ u16 flow_type;
+ u16 action;
+ struct gve_flow_spec key;
+ struct gve_flow_spec mask;
+};
+
+struct gve_flow_rules_cache {
+ bool rules_cache_synced; /* False if the driver's rules_cache is outdated */
+ struct gve_adminq_queried_flow_rule *rules_cache;
+ __be32 *rule_ids_cache;
+ /* The total number of queried rules that stored in the caches */
+ u32 rules_cache_num;
+ u32 rule_ids_cache_num;
+};
+
struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
- struct gve_queue_page_list *qpls; /* array of num qpls */
struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
dma_addr_t irq_db_indices_bus;
@@ -718,9 +732,13 @@ struct gve_priv {
u16 num_event_counters;
u16 tx_desc_cnt; /* num desc per ring */
u16 rx_desc_cnt; /* num desc per ring */
+ u16 max_tx_desc_cnt;
+ u16 max_rx_desc_cnt;
+ u16 min_tx_desc_cnt;
+ u16 min_rx_desc_cnt;
+ bool modify_ring_size_enabled;
+ bool default_min_ring_size;
u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
- u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */
- u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
struct bpf_prog *xdp_prog; /* XDP BPF program */
@@ -730,7 +748,6 @@ struct gve_priv {
u16 num_xdp_queues;
struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg;
- struct gve_qpl_config qpl_cfg; /* map used QPL ids */
u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
@@ -745,6 +762,7 @@ struct gve_priv {
union gve_adminq_command *adminq;
dma_addr_t adminq_bus_addr;
struct dma_pool *adminq_pool;
+ struct mutex adminq_lock; /* Protects adminq command execution */
u32 adminq_mask; /* masks prod_cnt to adminq size */
u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
@@ -764,6 +782,8 @@ struct gve_priv {
u32 adminq_report_link_speed_cnt;
u32 adminq_get_ptype_map_cnt;
u32 adminq_verify_driver_compatibility_cnt;
+ u32 adminq_query_flow_rules_cnt;
+ u32 adminq_cfg_flow_rule_cnt;
/* Global stats */
u32 interface_up_cnt; /* count of times interface turned up since last reset */
@@ -792,7 +812,6 @@ struct gve_priv {
u64 link_speed;
bool up_before_suspend; /* True if dev was up before suspend */
- struct gve_options_dqo_rda options_dqo_rda;
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
@@ -807,6 +826,11 @@ struct gve_priv {
u16 header_buf_size; /* device configured, header-split supported if non-zero */
bool header_split_enabled; /* True if the header split is enabled by the user */
+
+ u32 max_flow_rules;
+ u32 num_flow_rules;
+
+ struct gve_flow_rules_cache flow_rules_cache;
};
enum gve_service_task_flags_bit {
@@ -1027,7 +1051,6 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
-/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
@@ -1038,41 +1061,17 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
return gve_tx_qpl_id(priv, 0);
}
-/* Returns the index into priv->qpls where the first rx queue's QPL resides */
static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
{
return gve_get_rx_qpl_id(tx_cfg, 0);
}
-/* Returns a pointer to the next available tx qpl in the list of qpls */
-static inline
-struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
- int tx_qid)
-{
- /* QPL already in use */
- if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map))
- return NULL;
- set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map);
- return &cfg->qpls[tx_qid];
-}
-
-/* Returns a pointer to the next available rx qpl in the list of qpls */
-static inline
-struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg,
- int rx_qid)
+static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
{
- int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid);
- /* QPL already in use */
- if (test_bit(id, cfg->qpl_cfg->qpl_id_map))
- return NULL;
- set_bit(id, cfg->qpl_cfg->qpl_id_map);
- return &cfg->qpls[id];
-}
-
-/* Unassigns the qpl with the given id */
-static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id)
-{
- clear_bit(id, qpl_cfg->qpl_id_map);
+ /* For DQO, page count should be more than ring size for
+ * out-of-order completions. Set it to two times of ring size.
+ */
+ return 2 * rx_desc_cnt;
}
/* Returns the correct dma direction for tx and rx qpls */
@@ -1115,6 +1114,12 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
+/* qpls */
+struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
+ u32 id, int pages);
+void gve_free_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ u32 id);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
@@ -1137,6 +1142,12 @@ bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx);
+int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx);
+void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg);
int gve_rx_alloc_rings(struct gve_priv *priv);
int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
@@ -1150,9 +1161,21 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
+void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
+int gve_adjust_config(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config);
+/* flow steering rule */
+int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
+int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs);
+int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
+int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd);
+int gve_flow_rules_reset(struct gve_priv *priv);
/* report stats handling */
void gve_handle_report_stats(struct gve_priv *priv);
/* exported by ethtool.c */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index ae12ac38e18b..c5bbc1b7524e 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -32,6 +32,8 @@ struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *desc
return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
}
+#define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE 8
+
static
void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_descriptor *device_descriptor,
@@ -41,7 +43,9 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
- struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_flow_steering **dev_op_flow_steering,
+ struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
@@ -165,6 +169,44 @@ void gve_parse_device_option(struct gve_priv *priv,
"Buffer Sizes");
*dev_op_buffer_sizes = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_MODIFY_RING:
+ if (option_length < GVE_DEVICE_OPTION_NO_MIN_RING_SIZE ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Modify Ring", (int)sizeof(**dev_op_modify_ring),
+ GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_modify_ring)) {
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring");
+ }
+
+ *dev_op_modify_ring = (void *)(option + 1);
+
+ /* device has not provided min ring size */
+ if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
+ priv->default_min_ring_size = true;
+ break;
+ case GVE_DEV_OPT_ID_FLOW_STEERING:
+ if (option_length < sizeof(**dev_op_flow_steering) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Flow Steering",
+ (int)sizeof(**dev_op_flow_steering),
+ GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_flow_steering))
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Flow Steering");
+ *dev_op_flow_steering = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -183,7 +225,9 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
- struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_flow_steering **dev_op_flow_steering,
+ struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
@@ -204,7 +248,8 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
- dev_op_dqo_qpl, dev_op_buffer_sizes);
+ dev_op_dqo_qpl, dev_op_buffer_sizes,
+ dev_op_flow_steering, dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -242,6 +287,8 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_report_stats_cnt = 0;
priv->adminq_report_link_speed_cnt = 0;
priv->adminq_get_ptype_map_cnt = 0;
+ priv->adminq_query_flow_rules_cnt = 0;
+ priv->adminq_cfg_flow_rule_cnt = 0;
/* Setup Admin queue with the device */
if (priv->pdev->revision < 0x1) {
@@ -258,6 +305,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
&priv->reg_bar0->adminq_base_address_lo);
iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status);
}
+ mutex_init(&priv->adminq_lock);
gve_set_admin_queue_ok(priv);
return 0;
}
@@ -434,6 +482,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
+ if (opcode == GVE_ADMINQ_EXTENDED_COMMAND)
+ opcode = be32_to_cpu(cmd->extended_command.inner_opcode);
switch (opcode) {
case GVE_ADMINQ_DESCRIBE_DEVICE:
@@ -478,6 +528,12 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
priv->adminq_verify_driver_compatibility_cnt++;
break;
+ case GVE_ADMINQ_QUERY_FLOW_RULES:
+ priv->adminq_query_flow_rules_cnt++;
+ break;
+ case GVE_ADMINQ_CONFIGURE_FLOW_RULE:
+ priv->adminq_cfg_flow_rule_cnt++;
+ break;
default:
dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
}
@@ -485,28 +541,58 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv,
return 0;
}
-/* This function is not threadsafe - the caller is responsible for any
- * necessary locks.
- * The caller is also responsible for making sure there are no commands
- * waiting to be executed.
- */
static int gve_adminq_execute_cmd(struct gve_priv *priv,
union gve_adminq_command *cmd_orig)
{
u32 tail, head;
int err;
+ mutex_lock(&priv->adminq_lock);
tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
head = priv->adminq_prod_cnt;
- if (tail != head)
- // This is not a valid path
- return -EINVAL;
+ if (tail != head) {
+ err = -EINVAL;
+ goto out;
+ }
err = gve_adminq_issue_cmd(priv, cmd_orig);
if (err)
- return err;
+ goto out;
- return gve_adminq_kick_and_wait(priv);
+ err = gve_adminq_kick_and_wait(priv);
+
+out:
+ mutex_unlock(&priv->adminq_lock);
+ return err;
+}
+
+static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode,
+ size_t cmd_size, void *cmd_orig)
+{
+ union gve_adminq_command cmd;
+ dma_addr_t inner_cmd_bus;
+ void *inner_cmd;
+ int err;
+
+ inner_cmd = dma_alloc_coherent(&priv->pdev->dev, cmd_size,
+ &inner_cmd_bus, GFP_KERNEL);
+ if (!inner_cmd)
+ return -ENOMEM;
+
+ memcpy(inner_cmd, cmd_orig, cmd_size);
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_EXTENDED_COMMAND);
+ cmd.extended_command = (struct gve_adminq_extended_command) {
+ .inner_opcode = cpu_to_be32(opcode),
+ .inner_length = cpu_to_be32(cmd_size),
+ .inner_command_addr = cpu_to_be64(inner_cmd_bus),
+ };
+
+ err = gve_adminq_execute_cmd(priv, &cmd);
+
+ dma_free_coherent(&priv->pdev->dev, cmd_size, inner_cmd, inner_cmd_bus);
+ return err;
}
/* The device specifies that the management vector can either be the first irq
@@ -565,6 +651,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be64(tx->q_resources_bus),
.tx_ring_addr = cpu_to_be64(tx->bus),
.ntfy_id = cpu_to_be32(tx->ntfy_id),
+ .tx_ring_size = cpu_to_be16(priv->tx_desc_cnt),
};
if (gve_is_gqi(priv)) {
@@ -573,24 +660,17 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
} else {
- u16 comp_ring_size;
u32 qpl_id = 0;
- if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
- comp_ring_size =
- priv->options_dqo_rda.tx_comp_ring_entries;
- } else {
+ else
qpl_id = tx->dqo.qpl->id;
- comp_ring_size = priv->tx_desc_cnt;
- }
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_tx_queue.tx_ring_size =
- cpu_to_be16(priv->tx_desc_cnt);
cmd.create_tx_queue.tx_comp_ring_addr =
cpu_to_be64(tx->complq_bus_dqo);
cmd.create_tx_queue.tx_comp_ring_size =
- cpu_to_be16(comp_ring_size);
+ cpu_to_be16(priv->tx_desc_cnt);
}
return gve_adminq_issue_cmd(priv, &cmd);
@@ -610,63 +690,73 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que
return gve_adminq_kick_and_wait(priv);
}
-static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
+ union gve_adminq_command *cmd,
+ u32 queue_index)
{
struct gve_rx_ring *rx = &priv->rx[queue_index];
- union gve_adminq_command cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
- cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
+ cmd->create_rx_queue = (struct gve_adminq_create_rx_queue) {
.queue_id = cpu_to_be32(queue_index),
.ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
+ .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
};
if (gve_is_gqi(priv)) {
u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
- cmd.create_rx_queue.rx_desc_ring_addr =
- cpu_to_be64(rx->desc.bus),
- cmd.create_rx_queue.rx_data_ring_addr =
- cpu_to_be64(rx->data.data_bus),
- cmd.create_rx_queue.index = cpu_to_be32(queue_index);
- cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
+ cmd->create_rx_queue.rx_desc_ring_addr =
+ cpu_to_be64(rx->desc.bus);
+ cmd->create_rx_queue.rx_data_ring_addr =
+ cpu_to_be64(rx->data.data_bus);
+ cmd->create_rx_queue.index = cpu_to_be32(queue_index);
+ cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+ cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
- u16 rx_buff_ring_entries;
u32 qpl_id = 0;
- if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
- rx_buff_ring_entries =
- priv->options_dqo_rda.rx_buff_ring_entries;
- } else {
+ else
qpl_id = rx->dqo.qpl->id;
- rx_buff_ring_entries = priv->rx_desc_cnt;
- }
- cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_rx_queue.rx_ring_size =
- cpu_to_be16(priv->rx_desc_cnt);
- cmd.create_rx_queue.rx_desc_ring_addr =
+ cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+ cmd->create_rx_queue.rx_desc_ring_addr =
cpu_to_be64(rx->dqo.complq.bus);
- cmd.create_rx_queue.rx_data_ring_addr =
+ cmd->create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->dqo.bufq.bus);
- cmd.create_rx_queue.packet_buffer_size =
+ cmd->create_rx_queue.packet_buffer_size =
cpu_to_be16(priv->data_buffer_size_dqo);
- cmd.create_rx_queue.rx_buff_ring_size =
- cpu_to_be16(rx_buff_ring_entries);
- cmd.create_rx_queue.enable_rsc =
+ cmd->create_rx_queue.rx_buff_ring_size =
+ cpu_to_be16(priv->rx_desc_cnt);
+ cmd->create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO);
if (priv->header_split_enabled)
- cmd.create_rx_queue.header_buffer_size =
+ cmd->create_rx_queue.header_buffer_size =
cpu_to_be16(priv->header_buf_size);
}
+}
+static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
+
+ gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
return gve_adminq_issue_cmd(priv, &cmd);
}
+/* Unlike gve_adminq_create_rx_queue, this actually rings the doorbell */
+int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
+
+ gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
{
int err;
@@ -713,22 +803,31 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu
return gve_adminq_kick_and_wait(priv);
}
+static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
+ u32 queue_index)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
+ cmd->destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
+ .queue_id = cpu_to_be32(queue_index),
+ };
+}
+
static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
- int err;
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
- cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
- .queue_id = cpu_to_be32(queue_index),
- };
+ gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
+ return gve_adminq_issue_cmd(priv, &cmd);
+}
- err = gve_adminq_issue_cmd(priv, &cmd);
- if (err)
- return err;
+/* Unlike gve_adminq_destroy_rx_queue, this actually rings the doorbell */
+int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
- return 0;
+ gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
+ return gve_adminq_execute_cmd(priv, &cmd);
}
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
@@ -745,31 +844,17 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
return gve_adminq_kick_and_wait(priv);
}
-static int gve_set_desc_cnt(struct gve_priv *priv,
- struct gve_device_descriptor *descriptor)
-{
- priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
- priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- return 0;
-}
-
-static int
-gve_set_desc_cnt_dqo(struct gve_priv *priv,
- const struct gve_device_descriptor *descriptor,
- const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
+static void gve_set_default_desc_cnt(struct gve_priv *priv,
+ const struct gve_device_descriptor *descriptor)
{
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- if (priv->queue_format == GVE_DQO_QPL_FORMAT)
- return 0;
-
- priv->options_dqo_rda.tx_comp_ring_entries =
- be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
- priv->options_dqo_rda.rx_buff_ring_entries =
- be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
-
- return 0;
+ /* set default ranges */
+ priv->max_tx_desc_cnt = priv->tx_desc_cnt;
+ priv->max_rx_desc_cnt = priv->rx_desc_cnt;
+ priv->min_tx_desc_cnt = priv->tx_desc_cnt;
+ priv->min_rx_desc_cnt = priv->rx_desc_cnt;
}
static void gve_enable_supported_features(struct gve_priv *priv,
@@ -779,7 +864,11 @@ static void gve_enable_supported_features(struct gve_priv *priv,
const struct gve_device_option_dqo_qpl
*dev_op_dqo_qpl,
const struct gve_device_option_buffer_sizes
- *dev_op_buffer_sizes)
+ *dev_op_buffer_sizes,
+ const struct gve_device_option_flow_steering
+ *dev_op_flow_steering,
+ const struct gve_device_option_modify_ring
+ *dev_op_modify_ring)
{
/* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in
@@ -796,12 +885,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (dev_op_dqo_qpl) {
priv->tx_pages_per_qpl =
be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
- priv->rx_pages_per_qpl =
- be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl);
if (priv->tx_pages_per_qpl == 0)
priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
- if (priv->rx_pages_per_qpl == 0)
- priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
}
if (dev_op_buffer_sizes &&
@@ -814,12 +899,46 @@ static void gve_enable_supported_features(struct gve_priv *priv,
"BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
priv->max_rx_buffer_size, priv->header_buf_size);
}
+
+ /* Read and store ring size ranges given by device */
+ if (dev_op_modify_ring &&
+ (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
+ priv->modify_ring_size_enabled = true;
+
+ /* max ring size for DQO QPL should not be overwritten because of device limit */
+ if (priv->queue_format != GVE_DQO_QPL_FORMAT) {
+ priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
+ priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
+ }
+ if (priv->default_min_ring_size) {
+ /* If device hasn't provided minimums, use default minimums */
+ priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
+ priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
+ } else {
+ priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size);
+ priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
+ }
+ }
+
+ if (dev_op_flow_steering &&
+ (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) {
+ if (dev_op_flow_steering->max_flow_rules) {
+ priv->max_flow_rules =
+ be32_to_cpu(dev_op_flow_steering->max_flow_rules);
+ priv->dev->hw_features |= NETIF_F_NTUPLE;
+ dev_info(&priv->pdev->dev,
+ "FLOW STEERING device option enabled with max rule limit of %u.\n",
+ priv->max_flow_rules);
+ }
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
+ struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
@@ -851,9 +970,10 @@ int gve_adminq_describe_device(struct gve_priv *priv)
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda,
- &dev_op_jumbo_frames,
- &dev_op_dqo_qpl,
- &dev_op_buffer_sizes);
+ &dev_op_jumbo_frames, &dev_op_dqo_qpl,
+ &dev_op_buffer_sizes,
+ &dev_op_flow_steering,
+ &dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -888,15 +1008,13 @@ int gve_adminq_describe_device(struct gve_priv *priv)
dev_info(&priv->pdev->dev,
"Driver is running with GQI QPL queue format.\n");
}
- if (gve_is_gqi(priv)) {
- err = gve_set_desc_cnt(priv, descriptor);
- } else {
- /* DQO supports LRO. */
+
+ /* set default descriptor counts */
+ gve_set_default_desc_cnt(priv, descriptor);
+
+ /* DQO supports LRO. */
+ if (!gve_is_gqi(priv))
priv->dev->hw_features |= NETIF_F_LRO;
- err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
- }
- if (err)
- goto free_device_descriptor;
priv->max_registered_pages =
be64_to_cpu(descriptor->max_registered_pages);
@@ -912,18 +1030,12 @@ int gve_adminq_describe_device(struct gve_priv *priv)
mac = descriptor->mac;
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
- priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
-
- if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
- dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
- priv->rx_data_slot_cnt);
- priv->rx_desc_cnt = priv->rx_data_slot_cnt;
- }
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
- dev_op_buffer_sizes);
+ dev_op_buffer_sizes, dev_op_flow_steering,
+ dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
@@ -1086,3 +1198,130 @@ err:
ptype_map_bus);
return err;
}
+
+static int
+gve_adminq_configure_flow_rule(struct gve_priv *priv,
+ struct gve_adminq_configure_flow_rule *flow_rule_cmd)
+{
+ int err = gve_adminq_execute_extended_cmd(priv,
+ GVE_ADMINQ_CONFIGURE_FLOW_RULE,
+ sizeof(struct gve_adminq_configure_flow_rule),
+ flow_rule_cmd);
+
+ if (err) {
+ dev_err(&priv->pdev->dev, "Timeout to configure the flow rule, trigger reset");
+ gve_reset(priv, true);
+ } else {
+ priv->flow_rules_cache.rules_cache_synced = false;
+ }
+
+ return err;
+}
+
+int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc)
+{
+ struct gve_adminq_configure_flow_rule flow_rule_cmd = {
+ .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_ADD),
+ .location = cpu_to_be32(loc),
+ .rule = *rule,
+ };
+
+ return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
+}
+
+int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc)
+{
+ struct gve_adminq_configure_flow_rule flow_rule_cmd = {
+ .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_DEL),
+ .location = cpu_to_be32(loc),
+ };
+
+ return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
+}
+
+int gve_adminq_reset_flow_rules(struct gve_priv *priv)
+{
+ struct gve_adminq_configure_flow_rule flow_rule_cmd = {
+ .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_RESET),
+ };
+
+ return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
+}
+
+/* In the dma memory that the driver allocated for the device to query the flow rules, the device
+ * will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device
+ * will write an array of rules or rule ids with the count that specified in the descriptor.
+ * For GVE_FLOW_RULE_QUERY_STATS, the device will only write the descriptor.
+ */
+static int gve_adminq_process_flow_rules_query(struct gve_priv *priv, u16 query_opcode,
+ struct gve_query_flow_rules_descriptor *descriptor)
+{
+ struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
+ u32 num_queried_rules, total_memory_len, rule_info_len;
+ void *rule_info;
+
+ total_memory_len = be32_to_cpu(descriptor->total_length);
+ num_queried_rules = be32_to_cpu(descriptor->num_queried_rules);
+ rule_info = (void *)(descriptor + 1);
+
+ switch (query_opcode) {
+ case GVE_FLOW_RULE_QUERY_RULES:
+ rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rules_cache);
+ if (sizeof(*descriptor) + rule_info_len != total_memory_len) {
+ dev_err(&priv->dev->dev, "flow rules query is out of memory.\n");
+ return -ENOMEM;
+ }
+
+ memcpy(flow_rules_cache->rules_cache, rule_info, rule_info_len);
+ flow_rules_cache->rules_cache_num = num_queried_rules;
+ break;
+ case GVE_FLOW_RULE_QUERY_IDS:
+ rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rule_ids_cache);
+ if (sizeof(*descriptor) + rule_info_len != total_memory_len) {
+ dev_err(&priv->dev->dev, "flow rule ids query is out of memory.\n");
+ return -ENOMEM;
+ }
+
+ memcpy(flow_rules_cache->rule_ids_cache, rule_info, rule_info_len);
+ flow_rules_cache->rule_ids_cache_num = num_queried_rules;
+ break;
+ case GVE_FLOW_RULE_QUERY_STATS:
+ priv->num_flow_rules = be32_to_cpu(descriptor->num_flow_rules);
+ priv->max_flow_rules = be32_to_cpu(descriptor->max_flow_rules);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc)
+{
+ struct gve_query_flow_rules_descriptor *descriptor;
+ union gve_adminq_command cmd;
+ dma_addr_t descriptor_bus;
+ int err = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus);
+ if (!descriptor)
+ return -ENOMEM;
+
+ cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_FLOW_RULES);
+ cmd.query_flow_rules = (struct gve_adminq_query_flow_rules) {
+ .opcode = cpu_to_be16(query_opcode),
+ .starting_rule_id = cpu_to_be32(starting_loc),
+ .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE),
+ .rule_descriptor_addr = cpu_to_be64(descriptor_bus),
+ };
+ err = gve_adminq_execute_cmd(priv, &cmd);
+ if (err)
+ goto out;
+
+ err = gve_adminq_process_flow_rules_query(priv, query_opcode, descriptor);
+
+out:
+ dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
+ return err;
+}
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 5ac972e45ff8..ed1370c9b197 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -25,6 +25,19 @@ enum gve_adminq_opcodes {
GVE_ADMINQ_REPORT_LINK_SPEED = 0xD,
GVE_ADMINQ_GET_PTYPE_MAP = 0xE,
GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF,
+ GVE_ADMINQ_QUERY_FLOW_RULES = 0x10,
+
+ /* For commands that are larger than 56 bytes */
+ GVE_ADMINQ_EXTENDED_COMMAND = 0xFF,
+};
+
+/* The normal adminq command is restricted to be 56 bytes at maximum. For the
+ * longer adminq command, it is wrapped by GVE_ADMINQ_EXTENDED_COMMAND with
+ * inner opcode of gve_adminq_extended_cmd_opcodes specified. The inner command
+ * is written in the dma memory allocated by GVE_ADMINQ_EXTENDED_COMMAND.
+ */
+enum gve_adminq_extended_cmd_opcodes {
+ GVE_ADMINQ_CONFIGURE_FLOW_RULE = 0x101,
};
/* Admin queue status codes */
@@ -103,8 +116,7 @@ static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
struct gve_device_option_dqo_rda {
__be32 supported_features_mask;
- __be16 tx_comp_ring_entries;
- __be16 rx_buff_ring_entries;
+ __be32 reserved;
};
static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
@@ -134,6 +146,24 @@ struct gve_device_option_buffer_sizes {
static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
+struct gve_device_option_modify_ring {
+ __be32 supported_featured_mask;
+ __be16 max_rx_ring_size;
+ __be16 max_tx_ring_size;
+ __be16 min_rx_ring_size;
+ __be16 min_tx_ring_size;
+};
+
+static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
+
+struct gve_device_option_flow_steering {
+ __be32 supported_features_mask;
+ __be32 reserved;
+ __be32 max_flow_rules;
+};
+
+static_assert(sizeof(struct gve_device_option_flow_steering) == 12);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -143,28 +173,34 @@ static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
* the device for read/write and data is copied from/to SKBs.
*/
enum gve_dev_opt_id {
- GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
- GVE_DEV_OPT_ID_GQI_RDA = 0x2,
- GVE_DEV_OPT_ID_GQI_QPL = 0x3,
- GVE_DEV_OPT_ID_DQO_RDA = 0x4,
- GVE_DEV_OPT_ID_DQO_QPL = 0x7,
- GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
- GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
+ GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
+ GVE_DEV_OPT_ID_GQI_RDA = 0x2,
+ GVE_DEV_OPT_ID_GQI_QPL = 0x3,
+ GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+ GVE_DEV_OPT_ID_MODIFY_RING = 0x6,
+ GVE_DEV_OPT_ID_DQO_QPL = 0x7,
+ GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
+ GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
+ GVE_DEV_OPT_ID_FLOW_STEERING = 0xb,
};
enum gve_dev_opt_req_feat_mask {
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0,
};
enum gve_sup_feature_mask {
- GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
- GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
+ GVE_SUP_MODIFY_RING_MASK = 1 << 0,
+ GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
+ GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
+ GVE_SUP_FLOW_STEERING_MASK = 1 << 5,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -196,6 +232,14 @@ enum gve_driver_capbility {
#define GVE_DRIVER_CAPABILITY_FLAGS3 0x0
#define GVE_DRIVER_CAPABILITY_FLAGS4 0x0
+struct gve_adminq_extended_command {
+ __be32 inner_opcode;
+ __be32 inner_length;
+ __be64 inner_command_addr;
+};
+
+static_assert(sizeof(struct gve_adminq_extended_command) == 16);
+
struct gve_driver_info {
u8 os_type; /* 0x01 = Linux */
u8 driver_major;
@@ -400,6 +444,71 @@ struct gve_adminq_get_ptype_map {
__be64 ptype_map_addr;
};
+/* Flow-steering related definitions */
+enum gve_adminq_flow_rule_cfg_opcode {
+ GVE_FLOW_RULE_CFG_ADD = 0,
+ GVE_FLOW_RULE_CFG_DEL = 1,
+ GVE_FLOW_RULE_CFG_RESET = 2,
+};
+
+enum gve_adminq_flow_rule_query_opcode {
+ GVE_FLOW_RULE_QUERY_RULES = 0,
+ GVE_FLOW_RULE_QUERY_IDS = 1,
+ GVE_FLOW_RULE_QUERY_STATS = 2,
+};
+
+enum gve_adminq_flow_type {
+ GVE_FLOW_TYPE_TCPV4,
+ GVE_FLOW_TYPE_UDPV4,
+ GVE_FLOW_TYPE_SCTPV4,
+ GVE_FLOW_TYPE_AHV4,
+ GVE_FLOW_TYPE_ESPV4,
+ GVE_FLOW_TYPE_TCPV6,
+ GVE_FLOW_TYPE_UDPV6,
+ GVE_FLOW_TYPE_SCTPV6,
+ GVE_FLOW_TYPE_AHV6,
+ GVE_FLOW_TYPE_ESPV6,
+};
+
+/* Flow-steering command */
+struct gve_adminq_flow_rule {
+ __be16 flow_type;
+ __be16 action; /* RX queue id */
+ struct gve_flow_spec key;
+ struct gve_flow_spec mask;
+};
+
+struct gve_adminq_configure_flow_rule {
+ __be16 opcode;
+ u8 padding[2];
+ struct gve_adminq_flow_rule rule;
+ __be32 location;
+};
+
+static_assert(sizeof(struct gve_adminq_configure_flow_rule) == 92);
+
+struct gve_query_flow_rules_descriptor {
+ __be32 num_flow_rules;
+ __be32 max_flow_rules;
+ __be32 num_queried_rules;
+ __be32 total_length;
+};
+
+struct gve_adminq_queried_flow_rule {
+ __be32 location;
+ struct gve_adminq_flow_rule flow_rule;
+};
+
+struct gve_adminq_query_flow_rules {
+ __be16 opcode;
+ u8 padding[2];
+ __be32 starting_rule_id;
+ __be64 available_length; /* The dma memory length that the driver allocated */
+ __be64 rule_descriptor_addr; /* The dma memory address */
+};
+
+static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24);
+
union gve_adminq_command {
struct {
__be32 opcode;
@@ -420,6 +529,8 @@ union gve_adminq_command {
struct gve_adminq_get_ptype_map get_ptype_map;
struct gve_adminq_verify_driver_compatibility
verify_driver_compatibility;
+ struct gve_adminq_query_flow_rules query_flow_rules;
+ struct gve_adminq_extended_command extended_command;
};
};
u8 reserved[64];
@@ -439,7 +550,9 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
+int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index);
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
+int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index);
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);
@@ -451,6 +564,10 @@ int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
u64 driver_info_len,
dma_addr_t driver_info_addr);
int gve_adminq_report_link_speed(struct gve_priv *priv);
+int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc);
+int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc);
+int gve_adminq_reset_flow_rules(struct gve_priv *priv);
+int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc);
struct gve_ptype_lut;
int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index b81584829c40..e83773fb891f 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -44,6 +44,12 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx);
void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx);
+int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx);
+void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg);
int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 9aebfb843d9d..5a8b490ab3ad 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2021 Google, Inc.
+ * Copyright (C) 2015-2024 Google LLC
*/
#include <linux/rtnetlink.h>
#include "gve.h"
#include "gve_adminq.h"
#include "gve_dqo.h"
+#include "gve_utils.h"
static void gve_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -73,7 +74,8 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
"adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
- "adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
+ "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt",
+ "adminq_query_flow_rules", "adminq_cfg_flow_rule",
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
@@ -89,42 +91,34 @@ static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct gve_priv *priv = netdev_priv(netdev);
- char *s = (char *)data;
+ u8 *s = (char *)data;
int num_tx_queues;
int i, j;
num_tx_queues = gve_num_tx_queues(priv);
switch (stringset) {
case ETH_SS_STATS:
- memcpy(s, *gve_gstrings_main_stats,
- sizeof(gve_gstrings_main_stats));
- s += sizeof(gve_gstrings_main_stats);
-
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
- snprintf(s, ETH_GSTRING_LEN,
- gve_gstrings_rx_stats[j], i);
- s += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++)
+ ethtool_puts(&s, gve_gstrings_main_stats[i]);
- for (i = 0; i < num_tx_queues; i++) {
- for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
- snprintf(s, ETH_GSTRING_LEN,
- gve_gstrings_tx_stats[j], i);
- s += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ for (j = 0; j < NUM_GVE_RX_CNTS; j++)
+ ethtool_sprintf(&s, gve_gstrings_rx_stats[j],
+ i);
+
+ for (i = 0; i < num_tx_queues; i++)
+ for (j = 0; j < NUM_GVE_TX_CNTS; j++)
+ ethtool_sprintf(&s, gve_gstrings_tx_stats[j],
+ i);
+
+ for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
+ ethtool_puts(&s, gve_gstrings_adminq_stats[i]);
- memcpy(s, *gve_gstrings_adminq_stats,
- sizeof(gve_gstrings_adminq_stats));
- s += sizeof(gve_gstrings_adminq_stats);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(s, *gve_gstrings_priv_flags,
- sizeof(gve_gstrings_priv_flags));
- s += sizeof(gve_gstrings_priv_flags);
+ for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++)
+ ethtool_puts(&s, gve_gstrings_priv_flags[i]);
break;
default:
@@ -165,6 +159,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct stats *report_stats;
int *rx_qid_to_stats_idx;
int *tx_qid_to_stats_idx;
+ int num_stopped_rxqs = 0;
+ int num_stopped_txqs = 0;
struct gve_priv *priv;
bool skip_nic_stats;
unsigned int start;
@@ -181,12 +177,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
sizeof(int), GFP_KERNEL);
if (!rx_qid_to_stats_idx)
return;
+ for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
+ rx_qid_to_stats_idx[ring] = -1;
+ if (!gve_rx_was_added_to_block(priv, ring))
+ num_stopped_rxqs++;
+ }
tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
sizeof(int), GFP_KERNEL);
if (!tx_qid_to_stats_idx) {
kfree(rx_qid_to_stats_idx);
return;
}
+ for (ring = 0; ring < num_tx_queues; ring++) {
+ tx_qid_to_stats_idx[ring] = -1;
+ if (!gve_tx_was_added_to_block(priv, ring))
+ num_stopped_txqs++;
+ }
+
for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
@@ -260,7 +267,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For rx cross-reporting stats, start from nic rx stats in report */
base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
- max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
+ /* The boundary between driver stats and NIC stats shifts if there are
+ * stopped queues.
+ */
+ base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
+ NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
+ max_stats_idx = NIC_RX_STATS_REPORT_NUM *
+ (priv->rx_cfg.num_queues - num_stopped_rxqs) +
base_stats_idx;
/* Preprocess the stats report for rx, map queue id to start index */
skip_nic_stats = false;
@@ -274,6 +287,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
skip_nic_stats = true;
break;
}
+ if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
+ net_err_ratelimited("Invalid rxq id in NIC stats\n");
+ continue;
+ }
rx_qid_to_stats_idx[queue_id] = stats_idx;
}
/* walk RX rings */
@@ -308,11 +325,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->rx_copybreak_pkt;
data[i++] = rx->rx_copied_pkt;
/* stats from NIC */
- if (skip_nic_stats) {
+ stats_idx = rx_qid_to_stats_idx[ring];
+ if (skip_nic_stats || stats_idx < 0) {
/* skip NIC rx stats */
i += NIC_RX_STATS_REPORT_NUM;
} else {
- stats_idx = rx_qid_to_stats_idx[ring];
for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
u64 value =
be64_to_cpu(report_stats[stats_idx + j].value);
@@ -338,7 +355,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For tx cross-reporting stats, start from nic tx stats in report */
base_stats_idx = max_stats_idx;
- max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
+ max_stats_idx = NIC_TX_STATS_REPORT_NUM *
+ (num_tx_queues - num_stopped_txqs) +
max_stats_idx;
/* Preprocess the stats report for tx, map queue id to start index */
skip_nic_stats = false;
@@ -352,6 +370,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
skip_nic_stats = true;
break;
}
+ if (queue_id < 0 || queue_id >= num_tx_queues) {
+ net_err_ratelimited("Invalid txq id in NIC stats\n");
+ continue;
+ }
tx_qid_to_stats_idx[queue_id] = stats_idx;
}
/* walk TX rings */
@@ -383,11 +405,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = gve_tx_load_event_counter(priv, tx);
data[i++] = tx->dma_mapping_error;
/* stats from NIC */
- if (skip_nic_stats) {
+ stats_idx = tx_qid_to_stats_idx[ring];
+ if (skip_nic_stats || stats_idx < 0) {
/* skip NIC tx stats */
i += NIC_TX_STATS_REPORT_NUM;
} else {
- stats_idx = tx_qid_to_stats_idx[ring];
for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
u64 value =
be64_to_cpu(report_stats[stats_idx + j].value);
@@ -428,6 +450,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_set_driver_parameter_cnt;
data[i++] = priv->adminq_report_stats_cnt;
data[i++] = priv->adminq_report_link_speed_cnt;
+ data[i++] = priv->adminq_get_ptype_map_cnt;
+ data[i++] = priv->adminq_query_flow_rules_cnt;
+ data[i++] = priv->adminq_cfg_flow_rule_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -470,7 +495,7 @@ static int gve_set_channels(struct net_device *netdev,
return -EINVAL;
}
- if (!netif_carrier_ok(netdev)) {
+ if (!netif_running(netdev)) {
priv->tx_cfg.num_queues = new_tx;
priv->rx_cfg.num_queues = new_rx;
return 0;
@@ -489,8 +514,8 @@ static void gve_get_ringparam(struct net_device *netdev,
{
struct gve_priv *priv = netdev_priv(netdev);
- cmd->rx_max_pending = priv->rx_desc_cnt;
- cmd->tx_max_pending = priv->tx_desc_cnt;
+ cmd->rx_max_pending = priv->max_rx_desc_cnt;
+ cmd->tx_max_pending = priv->max_tx_desc_cnt;
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
@@ -502,20 +527,81 @@ static void gve_get_ringparam(struct net_device *netdev,
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
+static int gve_adjust_ring_sizes(struct gve_priv *priv,
+ u16 new_tx_desc_cnt,
+ u16 new_rx_desc_cnt)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ int err;
+
+ /* get current queue configuration */
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+
+ /* copy over the new ring_size from ethtool */
+ tx_alloc_cfg.ring_size = new_tx_desc_cnt;
+ rx_alloc_cfg.ring_size = new_rx_desc_cnt;
+
+ if (netif_running(priv->dev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+ }
+
+ /* Set new ring_size for the next up */
+ priv->tx_desc_cnt = new_tx_desc_cnt;
+ priv->rx_desc_cnt = new_rx_desc_cnt;
+
+ return 0;
+}
+
+static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
+ u16 new_rx_desc_cnt)
+{
+ /* check for valid range */
+ if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
+ new_tx_desc_cnt > priv->max_tx_desc_cnt ||
+ new_rx_desc_cnt < priv->min_rx_desc_cnt ||
+ new_rx_desc_cnt > priv->max_rx_desc_cnt) {
+ dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n");
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
+ dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int gve_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *cmd,
struct kernel_ethtool_ringparam *kernel_cmd,
struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
+ u16 new_tx_cnt, new_rx_cnt;
+ int err;
+
+ err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+ if (err)
+ return err;
- if (priv->tx_desc_cnt != cmd->tx_pending ||
- priv->rx_desc_cnt != cmd->rx_pending) {
- dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n");
+ if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt)
+ return 0;
+
+ if (!priv->modify_ring_size_enabled) {
+ dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
return -EOPNOTSUPP;
}
- return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+ new_tx_cnt = cmd->tx_pending;
+ new_rx_cnt = cmd->rx_pending;
+
+ if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt))
+ return -EINVAL;
+
+ return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt);
}
static int gve_user_reset(struct net_device *netdev, u32 *flags)
@@ -689,6 +775,69 @@ static int gve_set_coalesce(struct net_device *netdev,
return 0;
}
+static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ if (!(netdev->features & NETIF_F_NTUPLE))
+ return -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = gve_add_flow_rule(priv, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = gve_del_flow_rule(priv, cmd);
+ break;
+ case ETHTOOL_SRXFH:
+ err = -EOPNOTSUPP;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct gve_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->rx_cfg.num_queues;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0);
+ if (err)
+ return err;
+
+ cmd->rule_cnt = priv->num_flow_rules;
+ cmd->data = priv->max_flow_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = gve_get_flow_rule_entry(priv, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs);
+ break;
+ case ETHTOOL_GRXFH:
+ err = -EOPNOTSUPP;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
.supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
@@ -700,6 +849,8 @@ const struct ethtool_ops gve_ethtool_ops = {
.get_msglevel = gve_get_msglevel,
.set_channels = gve_set_channels,
.get_channels = gve_get_channels,
+ .set_rxnfc = gve_set_rxnfc,
+ .get_rxnfc = gve_get_rxnfc,
.get_link = ethtool_op_get_link,
.get_coalesce = gve_get_coalesce,
.set_coalesce = gve_set_coalesce,
@@ -710,5 +861,6 @@ const struct ethtool_ops gve_ethtool_ops = {
.set_tunable = gve_set_tunable,
.get_priv_flags = gve_get_priv_flags,
.set_priv_flags = gve_set_priv_flags,
- .get_link_ksettings = gve_get_link_ksettings
+ .get_link_ksettings = gve_get_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/google/gve/gve_flow_rule.c b/drivers/net/ethernet/google/gve/gve_flow_rule.c
new file mode 100644
index 000000000000..0bb8cd1876a3
--- /dev/null
+++ b/drivers/net/ethernet/google/gve/gve_flow_rule.c
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Google virtual Ethernet (gve) driver
+ *
+ * Copyright (C) 2015-2024 Google LLC
+ */
+
+#include "gve.h"
+#include "gve_adminq.h"
+
+static
+int gve_fill_ethtool_flow_spec(struct ethtool_rx_flow_spec *fsp,
+ struct gve_adminq_queried_flow_rule *rule)
+{
+ struct gve_adminq_flow_rule *flow_rule = &rule->flow_rule;
+ static const u16 flow_type_lut[] = {
+ [GVE_FLOW_TYPE_TCPV4] = TCP_V4_FLOW,
+ [GVE_FLOW_TYPE_UDPV4] = UDP_V4_FLOW,
+ [GVE_FLOW_TYPE_SCTPV4] = SCTP_V4_FLOW,
+ [GVE_FLOW_TYPE_AHV4] = AH_V4_FLOW,
+ [GVE_FLOW_TYPE_ESPV4] = ESP_V4_FLOW,
+ [GVE_FLOW_TYPE_TCPV6] = TCP_V6_FLOW,
+ [GVE_FLOW_TYPE_UDPV6] = UDP_V6_FLOW,
+ [GVE_FLOW_TYPE_SCTPV6] = SCTP_V6_FLOW,
+ [GVE_FLOW_TYPE_AHV6] = AH_V6_FLOW,
+ [GVE_FLOW_TYPE_ESPV6] = ESP_V6_FLOW,
+ };
+
+ if (be16_to_cpu(flow_rule->flow_type) >= ARRAY_SIZE(flow_type_lut))
+ return -EINVAL;
+
+ fsp->flow_type = flow_type_lut[be16_to_cpu(flow_rule->flow_type)];
+
+ memset(&fsp->h_u, 0, sizeof(fsp->h_u));
+ memset(&fsp->h_ext, 0, sizeof(fsp->h_ext));
+ memset(&fsp->m_u, 0, sizeof(fsp->m_u));
+ memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
+
+ switch (fsp->flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ fsp->h_u.tcp_ip4_spec.ip4src = flow_rule->key.src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = flow_rule->key.dst_ip[0];
+ fsp->h_u.tcp_ip4_spec.psrc = flow_rule->key.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = flow_rule->key.dst_port;
+ fsp->h_u.tcp_ip4_spec.tos = flow_rule->key.tos;
+ fsp->m_u.tcp_ip4_spec.ip4src = flow_rule->mask.src_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4dst = flow_rule->mask.dst_ip[0];
+ fsp->m_u.tcp_ip4_spec.psrc = flow_rule->mask.src_port;
+ fsp->m_u.tcp_ip4_spec.pdst = flow_rule->mask.dst_port;
+ fsp->m_u.tcp_ip4_spec.tos = flow_rule->mask.tos;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ fsp->h_u.ah_ip4_spec.ip4src = flow_rule->key.src_ip[0];
+ fsp->h_u.ah_ip4_spec.ip4dst = flow_rule->key.dst_ip[0];
+ fsp->h_u.ah_ip4_spec.spi = flow_rule->key.spi;
+ fsp->h_u.ah_ip4_spec.tos = flow_rule->key.tos;
+ fsp->m_u.ah_ip4_spec.ip4src = flow_rule->mask.src_ip[0];
+ fsp->m_u.ah_ip4_spec.ip4dst = flow_rule->mask.dst_ip[0];
+ fsp->m_u.ah_ip4_spec.spi = flow_rule->mask.spi;
+ fsp->m_u.ah_ip4_spec.tos = flow_rule->mask.tos;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6src, &flow_rule->key.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, &flow_rule->key.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.tcp_ip6_spec.psrc = flow_rule->key.src_port;
+ fsp->h_u.tcp_ip6_spec.pdst = flow_rule->key.dst_port;
+ fsp->h_u.tcp_ip6_spec.tclass = flow_rule->key.tclass;
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6src, &flow_rule->mask.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, &flow_rule->mask.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.tcp_ip6_spec.psrc = flow_rule->mask.src_port;
+ fsp->m_u.tcp_ip6_spec.pdst = flow_rule->mask.dst_port;
+ fsp->m_u.tcp_ip6_spec.tclass = flow_rule->mask.tclass;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(fsp->h_u.ah_ip6_spec.ip6src, &flow_rule->key.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &flow_rule->key.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->h_u.ah_ip6_spec.spi = flow_rule->key.spi;
+ fsp->h_u.ah_ip6_spec.tclass = flow_rule->key.tclass;
+ memcpy(fsp->m_u.ah_ip6_spec.ip6src, &flow_rule->mask.src_ip,
+ sizeof(struct in6_addr));
+ memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &flow_rule->mask.dst_ip,
+ sizeof(struct in6_addr));
+ fsp->m_u.ah_ip6_spec.spi = flow_rule->mask.spi;
+ fsp->m_u.ah_ip6_spec.tclass = flow_rule->mask.tclass;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fsp->ring_cookie = be16_to_cpu(flow_rule->action);
+
+ return 0;
+}
+
+static int gve_generate_flow_rule(struct gve_priv *priv, struct ethtool_rx_flow_spec *fsp,
+ struct gve_adminq_flow_rule *rule)
+{
+ static const u16 flow_type_lut[] = {
+ [TCP_V4_FLOW] = GVE_FLOW_TYPE_TCPV4,
+ [UDP_V4_FLOW] = GVE_FLOW_TYPE_UDPV4,
+ [SCTP_V4_FLOW] = GVE_FLOW_TYPE_SCTPV4,
+ [AH_V4_FLOW] = GVE_FLOW_TYPE_AHV4,
+ [ESP_V4_FLOW] = GVE_FLOW_TYPE_ESPV4,
+ [TCP_V6_FLOW] = GVE_FLOW_TYPE_TCPV6,
+ [UDP_V6_FLOW] = GVE_FLOW_TYPE_UDPV6,
+ [SCTP_V6_FLOW] = GVE_FLOW_TYPE_SCTPV6,
+ [AH_V6_FLOW] = GVE_FLOW_TYPE_AHV6,
+ [ESP_V6_FLOW] = GVE_FLOW_TYPE_ESPV6,
+ };
+ u32 flow_type;
+
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ return -EOPNOTSUPP;
+
+ if (fsp->ring_cookie >= priv->rx_cfg.num_queues)
+ return -EINVAL;
+
+ rule->action = cpu_to_be16(fsp->ring_cookie);
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+ if (!flow_type || flow_type >= ARRAY_SIZE(flow_type_lut))
+ return -EINVAL;
+
+ rule->flow_type = cpu_to_be16(flow_type_lut[flow_type]);
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ rule->key.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ rule->key.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
+ rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
+ rule->mask.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ rule->mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+ break;
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ rule->key.spi = fsp->h_u.ah_ip4_spec.spi;
+ rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
+ rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
+ rule->mask.spi = fsp->m_u.ah_ip4_spec.spi;
+ break;
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ memcpy(&rule->key.src_ip, fsp->h_u.tcp_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&rule->key.dst_ip, fsp->h_u.tcp_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ rule->key.src_port = fsp->h_u.tcp_ip6_spec.psrc;
+ rule->key.dst_port = fsp->h_u.tcp_ip6_spec.pdst;
+ memcpy(&rule->mask.src_ip, fsp->m_u.tcp_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&rule->mask.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ rule->mask.src_port = fsp->m_u.tcp_ip6_spec.psrc;
+ rule->mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst;
+ break;
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ memcpy(&rule->key.src_ip, fsp->h_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&rule->key.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ rule->key.spi = fsp->h_u.ah_ip6_spec.spi;
+ memcpy(&rule->mask.src_ip, fsp->m_u.usr_ip6_spec.ip6src,
+ sizeof(struct in6_addr));
+ memcpy(&rule->mask.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst,
+ sizeof(struct in6_addr));
+ rule->key.spi = fsp->h_u.ah_ip6_spec.spi;
+ break;
+ default:
+ /* not doing un-parsed flow types */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd)
+{
+ struct gve_adminq_queried_flow_rule *rules_cache = priv->flow_rules_cache.rules_cache;
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+ u32 *cache_num = &priv->flow_rules_cache.rules_cache_num;
+ struct gve_adminq_queried_flow_rule *rule = NULL;
+ int err = 0;
+ u32 i;
+
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ if (!priv->flow_rules_cache.rules_cache_synced ||
+ fsp->location < be32_to_cpu(rules_cache[0].location) ||
+ fsp->location > be32_to_cpu(rules_cache[*cache_num - 1].location)) {
+ err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_RULES, fsp->location);
+ if (err)
+ return err;
+
+ priv->flow_rules_cache.rules_cache_synced = true;
+ }
+
+ for (i = 0; i < *cache_num; i++) {
+ if (fsp->location == be32_to_cpu(rules_cache[i].location)) {
+ rule = &rules_cache[i];
+ break;
+ }
+ }
+
+ if (!rule)
+ return -EINVAL;
+
+ err = gve_fill_ethtool_flow_spec(fsp, rule);
+
+ return err;
+}
+
+int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ __be32 *rule_ids_cache = priv->flow_rules_cache.rule_ids_cache;
+ u32 *cache_num = &priv->flow_rules_cache.rule_ids_cache_num;
+ u32 starting_rule_id = 0;
+ u32 i = 0, j = 0;
+ int err = 0;
+
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ do {
+ err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_IDS,
+ starting_rule_id);
+ if (err)
+ return err;
+
+ for (i = 0; i < *cache_num; i++) {
+ if (j >= cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ rule_locs[j++] = be32_to_cpu(rule_ids_cache[i]);
+ starting_rule_id = be32_to_cpu(rule_ids_cache[i]) + 1;
+ }
+ } while (*cache_num != 0);
+ cmd->data = priv->max_flow_rules;
+
+ return err;
+}
+
+int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct gve_adminq_flow_rule *rule = NULL;
+ int err;
+
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ rule = kvzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ err = gve_generate_flow_rule(priv, fsp, rule);
+ if (err)
+ goto out;
+
+ err = gve_adminq_add_flow_rule(priv, rule, fsp->location);
+
+out:
+ kvfree(rule);
+ if (err)
+ dev_err(&priv->pdev->dev, "Failed to add the flow rule: %u", fsp->location);
+
+ return err;
+}
+
+int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (!priv->max_flow_rules)
+ return -EOPNOTSUPP;
+
+ return gve_adminq_del_flow_rule(priv, fsp->location);
+}
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 166bd827a6d7..661566db68c8 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
/* Google virtual Ethernet (gve) driver
*
- * Copyright (C) 2015-2021 Google, Inc.
+ * Copyright (C) 2015-2024 Google LLC
*/
#include <linux/bpf.h>
@@ -9,6 +9,7 @@
#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
@@ -16,6 +17,7 @@
#include <linux/workqueue.h>
#include <linux/utsname.h>
#include <linux/version.h>
+#include <net/netdev_queues.h>
#include <net/sch_generic.h>
#include <net/xdp_sock_drv.h>
#include "gve.h"
@@ -139,6 +141,49 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s)
}
}
+static int gve_alloc_flow_rule_caches(struct gve_priv *priv)
+{
+ struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
+ int err = 0;
+
+ if (!priv->max_flow_rules)
+ return 0;
+
+ flow_rules_cache->rules_cache =
+ kvcalloc(GVE_FLOW_RULES_CACHE_SIZE, sizeof(*flow_rules_cache->rules_cache),
+ GFP_KERNEL);
+ if (!flow_rules_cache->rules_cache) {
+ dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n");
+ return -ENOMEM;
+ }
+
+ flow_rules_cache->rule_ids_cache =
+ kvcalloc(GVE_FLOW_RULE_IDS_CACHE_SIZE, sizeof(*flow_rules_cache->rule_ids_cache),
+ GFP_KERNEL);
+ if (!flow_rules_cache->rule_ids_cache) {
+ dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n");
+ err = -ENOMEM;
+ goto free_rules_cache;
+ }
+
+ return 0;
+
+free_rules_cache:
+ kvfree(flow_rules_cache->rules_cache);
+ flow_rules_cache->rules_cache = NULL;
+ return err;
+}
+
+static void gve_free_flow_rule_caches(struct gve_priv *priv)
+{
+ struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
+
+ kvfree(flow_rules_cache->rule_ids_cache);
+ flow_rules_cache->rule_ids_cache = NULL;
+ kvfree(flow_rules_cache->rules_cache);
+ flow_rules_cache->rules_cache = NULL;
+}
+
static int gve_alloc_counter_array(struct gve_priv *priv)
{
priv->counter_array =
@@ -253,6 +298,18 @@ static irqreturn_t gve_intr_dqo(int irq, void *arg)
return IRQ_HANDLED;
}
+static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq)
+{
+ int cpu_curr = smp_processor_id();
+ const struct cpumask *aff_mask;
+
+ aff_mask = irq_get_effective_affinity_mask(irq);
+ if (unlikely(!aff_mask))
+ return 1;
+
+ return cpumask_test_cpu(cpu_curr, aff_mask);
+}
+
int gve_napi_poll(struct napi_struct *napi, int budget)
{
struct gve_notify_block *block;
@@ -322,8 +379,21 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
reschedule |= work_done == budget;
}
- if (reschedule)
- return budget;
+ if (reschedule) {
+ /* Reschedule by returning budget only if already on the correct
+ * cpu.
+ */
+ if (likely(gve_is_napi_on_home_cpu(priv, block->irq)))
+ return budget;
+
+ /* If not on the cpu with which this queue's irq has affinity
+ * with, we avoid rescheduling napi and arm the irq instead so
+ * that napi gets rescheduled back eventually onto the right
+ * cpu.
+ */
+ if (work_done == budget)
+ work_done--;
+ }
if (likely(napi_complete_done(napi, work_done))) {
/* Enable interrupts again.
@@ -428,6 +498,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
"Failed to receive msix vector %d\n", i);
goto abort_with_some_ntfy_blocks;
}
+ block->irq = priv->msix_vectors[msix_idx].vector;
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
get_cpu_mask(i % active_cpus));
block->irq_db_index = &priv->irq_db_indices[i].index;
@@ -441,6 +512,7 @@ abort_with_some_ntfy_blocks:
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
+ block->irq = 0;
}
kvfree(priv->ntfy_blocks);
priv->ntfy_blocks = NULL;
@@ -474,6 +546,7 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
+ block->irq = 0;
}
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
kvfree(priv->ntfy_blocks);
@@ -491,9 +564,12 @@ static int gve_setup_device_resources(struct gve_priv *priv)
{
int err;
- err = gve_alloc_counter_array(priv);
+ err = gve_alloc_flow_rule_caches(priv);
if (err)
return err;
+ err = gve_alloc_counter_array(priv);
+ if (err)
+ goto abort_with_flow_rule_caches;
err = gve_alloc_notify_blocks(priv);
if (err)
goto abort_with_counter;
@@ -545,6 +621,8 @@ abort_with_ntfy_blocks:
gve_free_notify_blocks(priv);
abort_with_counter:
gve_free_counter_array(priv);
+abort_with_flow_rule_caches:
+ gve_free_flow_rule_caches(priv);
return err;
}
@@ -557,6 +635,12 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
/* Tell device its resources are being freed */
if (gve_get_device_resources_ok(priv)) {
+ err = gve_flow_rules_reset(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev,
+ "Failed to reset flow rules: err=%d\n", err);
+ gve_trigger_reset(priv);
+ }
/* detach the stats report */
err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD);
if (err) {
@@ -576,43 +660,43 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
kvfree(priv->ptype_lut_dqo);
priv->ptype_lut_dqo = NULL;
+ gve_free_flow_rule_caches(priv);
gve_free_counter_array(priv);
gve_free_notify_blocks(priv);
gve_free_stats_report(priv);
gve_clear_device_resources_ok(priv);
}
-static int gve_unregister_qpl(struct gve_priv *priv, u32 i)
+static int gve_unregister_qpl(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl)
{
int err;
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ if (!qpl)
+ return 0;
+
+ err = gve_adminq_unregister_page_list(priv, qpl->id);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ qpl->id);
return err;
}
- priv->num_registered_pages -= priv->qpls[i].num_entries;
+ priv->num_registered_pages -= qpl->num_entries;
return 0;
}
-static int gve_register_qpl(struct gve_priv *priv, u32 i)
+static int gve_register_qpl(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl)
{
- int num_rx_qpls;
int pages;
int err;
- /* Rx QPLs succeed Tx QPLs in the priv->qpls array. */
- num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
- if (i >= gve_rx_start_qpl_id(&priv->tx_cfg) + num_rx_qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot register nonexisting QPL at index %d\n", i);
- return -EINVAL;
- }
+ if (!qpl)
+ return 0;
- pages = priv->qpls[i].num_entries;
+ pages = qpl->num_entries;
if (pages + priv->num_registered_pages > priv->max_registered_pages) {
netif_err(priv, drv, priv->dev,
@@ -622,14 +706,11 @@ static int gve_register_qpl(struct gve_priv *priv, u32 i)
return -EINVAL;
}
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ err = gve_adminq_register_page_list(priv, qpl);
if (err) {
netif_err(priv, drv, priv->dev,
"failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ qpl->id);
return err;
}
@@ -637,6 +718,26 @@ static int gve_register_qpl(struct gve_priv *priv, u32 i)
return 0;
}
+static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx)
+{
+ struct gve_tx_ring *tx = &priv->tx[idx];
+
+ if (gve_is_gqi(priv))
+ return tx->tx_fifo.qpl;
+ else
+ return tx->dqo.qpl;
+}
+
+static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+
+ if (gve_is_gqi(priv))
+ return rx->data.qpl;
+ else
+ return rx->dqo.qpl;
+}
+
static int gve_register_xdp_qpls(struct gve_priv *priv)
{
int start_id;
@@ -645,7 +746,7 @@ static int gve_register_xdp_qpls(struct gve_priv *priv)
start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_register_qpl(priv, i);
+ err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean up */
if (err)
return err;
@@ -656,7 +757,6 @@ static int gve_register_xdp_qpls(struct gve_priv *priv)
static int gve_register_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
- int start_id;
int err;
int i;
@@ -665,15 +765,13 @@ static int gve_register_qpls(struct gve_priv *priv)
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
- err = gve_register_qpl(priv, i);
+ err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
if (err)
return err;
}
- /* there might be a gap between the tx and rx qpl ids */
- start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
for (i = 0; i < num_rx_qpls; i++) {
- err = gve_register_qpl(priv, start_id + i);
+ err = gve_register_qpl(priv, gve_rx_get_qpl(priv, i));
if (err)
return err;
}
@@ -689,7 +787,7 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv)
start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_unregister_qpl(priv, i);
+ err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
@@ -700,7 +798,6 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv)
static int gve_unregister_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
- int start_id;
int err;
int i;
@@ -709,15 +806,14 @@ static int gve_unregister_qpls(struct gve_priv *priv)
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
- err = gve_unregister_qpl(priv, i);
+ err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
}
- start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
for (i = 0; i < num_rx_qpls; i++) {
- err = gve_unregister_qpl(priv, start_id + i);
+ err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
@@ -828,8 +924,6 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
{
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
- cfg->qpls = priv->qpls;
- cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->tx_desc_cnt;
cfg->start_idx = 0;
cfg->num_rings = gve_num_tx_queues(priv);
@@ -886,9 +980,9 @@ static int gve_alloc_xdp_rings(struct gve_priv *priv)
return 0;
}
-static int gve_alloc_rings(struct gve_priv *priv,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+static int gve_queues_mem_alloc(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
@@ -974,9 +1068,9 @@ static void gve_free_xdp_rings(struct gve_priv *priv)
}
}
-static void gve_free_rings(struct gve_priv *priv,
- struct gve_tx_alloc_rings_cfg *tx_cfg,
- struct gve_rx_alloc_rings_cfg *rx_cfg)
+static void gve_queues_mem_free(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_cfg)
{
if (gve_is_gqi(priv)) {
gve_tx_free_rings_gqi(priv, tx_cfg);
@@ -1005,35 +1099,41 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
return 0;
}
-static int gve_alloc_queue_page_list(struct gve_priv *priv,
- struct gve_queue_page_list *qpl,
- u32 id, int pages)
+struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
+ u32 id, int pages)
{
+ struct gve_queue_page_list *qpl;
int err;
int i;
+ qpl = kvzalloc(sizeof(*qpl), GFP_KERNEL);
+ if (!qpl)
+ return NULL;
+
qpl->id = id;
qpl->num_entries = 0;
qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
- /* caller handles clean up */
if (!qpl->pages)
- return -ENOMEM;
+ goto abort;
+
qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
- /* caller handles clean up */
if (!qpl->page_buses)
- return -ENOMEM;
+ goto abort;
for (i = 0; i < pages; i++) {
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
gve_qpl_dma_dir(priv, id), GFP_KERNEL);
- /* caller handles clean up */
if (err)
- return -ENOMEM;
+ goto abort;
qpl->num_entries++;
}
- return 0;
+ return qpl;
+
+abort:
+ gve_free_queue_page_list(priv, qpl, id);
+ return NULL;
}
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
@@ -1045,14 +1145,16 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
put_page(page);
}
-static void gve_free_queue_page_list(struct gve_priv *priv,
- struct gve_queue_page_list *qpl,
- int id)
+void gve_free_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ u32 id)
{
int i;
- if (!qpl->pages)
+ if (!qpl)
return;
+ if (!qpl->pages)
+ goto free_qpl;
if (!qpl->page_buses)
goto free_pages;
@@ -1065,120 +1167,8 @@ static void gve_free_queue_page_list(struct gve_priv *priv,
free_pages:
kvfree(qpl->pages);
qpl->pages = NULL;
-}
-
-static void gve_free_n_qpls(struct gve_priv *priv,
- struct gve_queue_page_list *qpls,
- int start_id,
- int num_qpls)
-{
- int i;
-
- for (i = start_id; i < start_id + num_qpls; i++)
- gve_free_queue_page_list(priv, &qpls[i], i);
-}
-
-static int gve_alloc_n_qpls(struct gve_priv *priv,
- struct gve_queue_page_list *qpls,
- int page_count,
- int start_id,
- int num_qpls)
-{
- int err;
- int i;
-
- for (i = start_id; i < start_id + num_qpls; i++) {
- err = gve_alloc_queue_page_list(priv, &qpls[i], i, page_count);
- if (err)
- goto free_qpls;
- }
-
- return 0;
-
-free_qpls:
- /* Must include the failing QPL too for gve_alloc_queue_page_list fails
- * without cleaning up.
- */
- gve_free_n_qpls(priv, qpls, start_id, i - start_id + 1);
- return err;
-}
-
-static int gve_alloc_qpls(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
-{
- int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
- int rx_start_id, tx_num_qpls, rx_num_qpls;
- struct gve_queue_page_list *qpls;
- int page_count;
- int err;
-
- if (cfg->raw_addressing)
- return 0;
-
- qpls = kvcalloc(max_queues, sizeof(*qpls), GFP_KERNEL);
- if (!qpls)
- return -ENOMEM;
-
- cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) *
- sizeof(unsigned long) * BITS_PER_BYTE;
- cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
- sizeof(unsigned long), GFP_KERNEL);
- if (!cfg->qpl_cfg->qpl_id_map) {
- err = -ENOMEM;
- goto free_qpl_array;
- }
-
- /* Allocate TX QPLs */
- page_count = priv->tx_pages_per_qpl;
- tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
- gve_is_qpl(priv));
- err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
- if (err)
- goto free_qpl_map;
-
- /* Allocate RX QPLs */
- rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
- /* For GQI_QPL number of pages allocated have 1:1 relationship with
- * number of descriptors. For DQO, number of pages required are
- * more than descriptors (because of out of order completions).
- */
- page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
- rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
- err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
- if (err)
- goto free_tx_qpls;
-
- cfg->qpls = qpls;
- return 0;
-
-free_tx_qpls:
- gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
-free_qpl_map:
- kvfree(cfg->qpl_cfg->qpl_id_map);
- cfg->qpl_cfg->qpl_id_map = NULL;
-free_qpl_array:
- kvfree(qpls);
- return err;
-}
-
-static void gve_free_qpls(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
-{
- int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
- struct gve_queue_page_list *qpls = cfg->qpls;
- int i;
-
- if (!qpls)
- return;
-
- kvfree(cfg->qpl_cfg->qpl_id_map);
- cfg->qpl_cfg->qpl_id_map = NULL;
-
- for (i = 0; i < max_queues; i++)
- gve_free_queue_page_list(priv, &qpls[i], i);
-
- kvfree(qpls);
- cfg->qpls = NULL;
+free_qpl:
+ kvfree(qpl);
}
/* Use this to schedule a reset when the device is capable of continuing
@@ -1282,18 +1272,6 @@ static void gve_drain_page_cache(struct gve_priv *priv)
page_frag_cache_drain(&priv->rx[i].page_cache);
}
-static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
-{
- cfg->raw_addressing = !gve_is_qpl(priv);
- cfg->is_gqi = gve_is_gqi(priv);
- cfg->num_xdp_queues = priv->num_xdp_queues;
- cfg->qpl_cfg = &priv->qpl_cfg;
- cfg->tx_cfg = &priv->tx_cfg;
- cfg->rx_cfg = &priv->rx_cfg;
- cfg->qpls = priv->qpls;
-}
-
static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{
@@ -1301,8 +1279,6 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
- cfg->qpls = priv->qpls;
- cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->rx_desc_cnt;
cfg->packet_buffer_size = gve_is_gqi(priv) ?
GVE_DEFAULT_RX_BUFFER_SIZE :
@@ -1310,90 +1286,56 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->rx = priv->rx;
}
-static void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
- gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
}
-static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
+static void gve_rx_start_ring(struct gve_priv *priv, int i)
{
- int i;
-
- for (i = 0; i < num_rings; i++) {
- if (gve_is_gqi(priv))
- gve_rx_start_ring_gqi(priv, i);
- else
- gve_rx_start_ring_dqo(priv, i);
- }
+ if (gve_is_gqi(priv))
+ gve_rx_start_ring_gqi(priv, i);
+ else
+ gve_rx_start_ring_dqo(priv, i);
}
-static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
+static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
{
int i;
- if (!priv->rx)
- return;
-
- for (i = 0; i < num_rings; i++) {
- if (gve_is_gqi(priv))
- gve_rx_stop_ring_gqi(priv, i);
- else
- gve_rx_stop_ring_dqo(priv, i);
- }
+ for (i = 0; i < num_rings; i++)
+ gve_rx_start_ring(priv, i);
}
-static void gve_queues_mem_free(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+static void gve_rx_stop_ring(struct gve_priv *priv, int i)
{
- gve_free_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
- gve_free_qpls(priv, qpls_alloc_cfg);
+ if (gve_is_gqi(priv))
+ gve_rx_stop_ring_gqi(priv, i);
+ else
+ gve_rx_stop_ring_dqo(priv, i);
}
-static int gve_queues_mem_alloc(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
{
- int err;
-
- err = gve_alloc_qpls(priv, qpls_alloc_cfg);
- if (err) {
- netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
- return err;
- }
- tx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
- rx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
- err = gve_alloc_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
- if (err) {
- netif_err(priv, drv, priv->dev, "Failed to alloc rings\n");
- goto free_qpls;
- }
+ int i;
- return 0;
+ if (!priv->rx)
+ return;
-free_qpls:
- gve_free_qpls(priv, qpls_alloc_cfg);
- return err;
+ for (i = 0; i < num_rings; i++)
+ gve_rx_stop_ring(priv, i);
}
static void gve_queues_mem_remove(struct gve_priv *priv)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- gve_queues_mem_free(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- priv->qpls = NULL;
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_queues_mem_free(priv, &tx_alloc_cfg, &rx_alloc_cfg);
priv->tx = NULL;
priv->rx = NULL;
}
@@ -1402,7 +1344,6 @@ static void gve_queues_mem_remove(struct gve_priv *priv)
* No memory is allocated. Passed-in memory is freed on errors.
*/
static int gve_queues_start(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
@@ -1410,12 +1351,10 @@ static int gve_queues_start(struct gve_priv *priv,
int err;
/* Record new resources into priv */
- priv->qpls = qpls_alloc_cfg->qpls;
priv->tx = tx_alloc_cfg->tx;
priv->rx = rx_alloc_cfg->rx;
/* Record new configs into priv */
- priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg;
priv->tx_cfg = *tx_alloc_cfg->qcfg;
priv->rx_cfg = *rx_alloc_cfg->qcfg;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
@@ -1483,23 +1422,19 @@ static int gve_open(struct net_device *dev)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(dev);
int err;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
- err = gve_queues_mem_alloc(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_queues_mem_alloc(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
/* No need to free on error: ownership of resources is lost after
* calling gve_queues_start.
*/
- err = gve_queues_start(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_queues_start(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
@@ -1558,11 +1493,8 @@ static int gve_close(struct net_device *dev)
static int gve_remove_xdp_queues(struct gve_priv *priv)
{
- int qpl_start_id;
int err;
- qpl_start_id = gve_xdp_tx_start_queue_id(priv);
-
err = gve_destroy_xdp_rings(priv);
if (err)
return err;
@@ -1574,27 +1506,19 @@ static int gve_remove_xdp_queues(struct gve_priv *priv)
gve_unreg_xdp_info(priv);
gve_free_xdp_rings(priv);
- gve_free_n_qpls(priv, priv->qpls, qpl_start_id, gve_num_xdp_qpls(priv));
priv->num_xdp_queues = 0;
return 0;
}
static int gve_add_xdp_queues(struct gve_priv *priv)
{
- int start_id;
int err;
priv->num_xdp_queues = priv->rx_cfg.num_queues;
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_alloc_n_qpls(priv, priv->qpls, priv->tx_pages_per_qpl,
- start_id, gve_num_xdp_qpls(priv));
- if (err)
- goto err;
-
err = gve_alloc_xdp_rings(priv);
if (err)
- goto free_xdp_qpls;
+ goto err;
err = gve_reg_xdp_info(priv, priv->dev);
if (err)
@@ -1612,8 +1536,6 @@ static int gve_add_xdp_queues(struct gve_priv *priv)
free_xdp_rings:
gve_free_xdp_rings(priv);
-free_xdp_qpls:
- gve_free_n_qpls(priv, priv->qpls, start_id, gve_num_xdp_qpls(priv));
err:
priv->num_xdp_queues = 0;
return err;
@@ -1644,7 +1566,7 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
u32 status;
old_prog = READ_ONCE(priv->xdp_prog);
- if (!netif_carrier_ok(priv->dev)) {
+ if (!netif_running(priv->dev)) {
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
@@ -1863,16 +1785,22 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
-static int gve_adjust_config(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+int gve_flow_rules_reset(struct gve_priv *priv)
+{
+ if (!priv->max_flow_rules)
+ return 0;
+
+ return gve_adminq_reset_flow_rules(priv);
+}
+
+int gve_adjust_config(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
/* Allocate resources for the new confiugration */
- err = gve_queues_mem_alloc(priv, qpls_alloc_cfg,
- tx_alloc_cfg, rx_alloc_cfg);
+ err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to alloc new queues");
@@ -1884,14 +1812,12 @@ static int gve_adjust_config(struct gve_priv *priv,
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to close old queues");
- gve_queues_mem_free(priv, qpls_alloc_cfg,
- tx_alloc_cfg, rx_alloc_cfg);
+ gve_queues_mem_free(priv, tx_alloc_cfg, rx_alloc_cfg);
return err;
}
/* Bring the device back up again with the new resources. */
- err = gve_queues_start(priv, qpls_alloc_cfg,
- tx_alloc_cfg, rx_alloc_cfg);
+ err = gve_queues_start(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
@@ -1911,32 +1837,18 @@ int gve_adjust_queues(struct gve_priv *priv,
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
- struct gve_qpl_config new_qpl_cfg;
int err;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
-
- /* qpl_cfg is not read-only, it contains a map that gets updated as
- * rings are allocated, which is why we cannot use the yet unreleased
- * one in priv.
- */
- qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
/* Relay the new config from ethtool */
- qpls_alloc_cfg.tx_cfg = &new_tx_config;
tx_alloc_cfg.qcfg = &new_tx_config;
rx_alloc_cfg.qcfg_tx = &new_tx_config;
- qpls_alloc_cfg.rx_cfg = &new_rx_config;
rx_alloc_cfg.qcfg = &new_rx_config;
tx_alloc_cfg.num_rings = new_tx_config.num_queues;
- if (netif_carrier_ok(priv->dev)) {
- err = gve_adjust_config(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ if (netif_running(priv->dev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
/* Set the config for the next up. */
@@ -1961,12 +1873,16 @@ static void gve_turndown(struct gve_priv *priv)
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ if (!gve_tx_was_added_to_block(priv, idx))
+ continue;
napi_disable(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ if (!gve_rx_was_added_to_block(priv, idx))
+ continue;
napi_disable(&block->napi);
}
@@ -1989,6 +1905,9 @@ static void gve_turnup(struct gve_priv *priv)
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ if (!gve_tx_was_added_to_block(priv, idx))
+ continue;
+
napi_enable(&block->napi);
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
@@ -1996,11 +1915,21 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_itr_coalesce_usecs_dqo(priv, block,
priv->tx_coalesce_usecs);
}
+
+ /* Any descs written by the NIC before this barrier will be
+ * handled by the one-off napi schedule below. Whereas any
+ * descs after the barrier will generate interrupts.
+ */
+ mb();
+ napi_schedule(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ if (!gve_rx_was_added_to_block(priv, idx))
+ continue;
+
napi_enable(&block->napi);
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
@@ -2008,11 +1937,27 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_itr_coalesce_usecs_dqo(priv, block,
priv->rx_coalesce_usecs);
}
+
+ /* Any descs written by the NIC before this barrier will be
+ * handled by the one-off napi schedule below. Whereas any
+ * descs after the barrier will generate interrupts.
+ */
+ mb();
+ napi_schedule(&block->napi);
}
gve_set_napi_enabled(priv);
}
+static void gve_turnup_and_check_status(struct gve_priv *priv)
+{
+ u32 status;
+
+ gve_turnup(priv);
+ status = ioread32be(&priv->reg_bar0->device_status);
+ gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
+}
+
static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gve_notify_block *block;
@@ -2077,7 +2022,6 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
bool enable_hdr_split;
int err = 0;
@@ -2097,15 +2041,13 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
if (enable_hdr_split == priv->header_split_enabled)
return 0;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
rx_alloc_cfg.enable_header_split = enable_hdr_split;
rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
if (netif_running(priv->dev))
- err = gve_adjust_config(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
@@ -2115,35 +2057,30 @@ static int gve_set_features(struct net_device *netdev,
const netdev_features_t orig_features = netdev->features;
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(netdev);
- struct gve_qpl_config new_qpl_cfg;
int err;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- /* qpl_cfg is not read-only, it contains a map that gets updated as
- * rings are allocated, which is why we cannot use the yet unreleased
- * one in priv.
- */
- qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO;
- if (netif_carrier_ok(netdev)) {
- err = gve_adjust_config(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- if (err) {
- /* Revert the change on error. */
- netdev->features = orig_features;
- return err;
- }
+ if (netif_running(netdev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ goto revert_features;
}
}
+ if ((netdev->features & NETIF_F_NTUPLE) && !(features & NETIF_F_NTUPLE)) {
+ err = gve_flow_rules_reset(priv);
+ if (err)
+ goto revert_features;
+ }
return 0;
+
+revert_features:
+ netdev->features = orig_features;
+ return err;
}
static const struct net_device_ops gve_netdev_ops = {
@@ -2422,7 +2359,7 @@ err:
int gve_reset(struct gve_priv *priv, bool attempt_teardown)
{
- bool was_up = netif_carrier_ok(priv->dev);
+ bool was_up = netif_running(priv->dev);
int err;
dev_info(&priv->pdev->dev, "Performing reset\n");
@@ -2473,6 +2410,140 @@ static void gve_write_version(u8 __iomem *driver_version_register)
writeb('\n', driver_version_register);
}
+static int gve_rx_queue_stop(struct net_device *dev, void *per_q_mem, int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_ring *gve_per_q_mem;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+
+ /* Destroying queue 0 while other queues exist is not supported in DQO */
+ if (!gve_is_gqi(priv) && idx == 0)
+ return -ERANGE;
+
+ /* Single-queue destruction requires quiescence on all queues */
+ gve_turndown(priv);
+
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_adminq_destroy_single_rx_queue(priv, idx);
+ if (err)
+ return err;
+
+ if (gve_is_qpl(priv)) {
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, idx));
+ if (err)
+ return err;
+ }
+
+ gve_rx_stop_ring(priv, idx);
+
+ /* Turn the unstopped queues back up */
+ gve_turnup_and_check_status(priv);
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ *gve_per_q_mem = priv->rx[idx];
+ memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
+ return 0;
+}
+
+static void gve_rx_queue_mem_free(struct net_device *dev, void *per_q_mem)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_alloc_rings_cfg cfg = {0};
+ struct gve_rx_ring *gve_per_q_mem;
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ gve_rx_get_curr_alloc_cfg(priv, &cfg);
+
+ if (gve_is_gqi(priv))
+ gve_rx_free_ring_gqi(priv, gve_per_q_mem, &cfg);
+ else
+ gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg);
+}
+
+static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem,
+ int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_alloc_rings_cfg cfg = {0};
+ struct gve_rx_ring *gve_per_q_mem;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ gve_rx_get_curr_alloc_cfg(priv, &cfg);
+
+ if (gve_is_gqi(priv))
+ err = gve_rx_alloc_ring_gqi(priv, &cfg, gve_per_q_mem, idx);
+ else
+ err = gve_rx_alloc_ring_dqo(priv, &cfg, gve_per_q_mem, idx);
+
+ return err;
+}
+
+static int gve_rx_queue_start(struct net_device *dev, void *per_q_mem, int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_ring *gve_per_q_mem;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ priv->rx[idx] = *gve_per_q_mem;
+
+ /* Single-queue creation requires quiescence on all queues */
+ gve_turndown(priv);
+
+ gve_rx_start_ring(priv, idx);
+
+ if (gve_is_qpl(priv)) {
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_register_qpl(priv, gve_rx_get_qpl(priv, idx));
+ if (err)
+ goto abort;
+ }
+
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_adminq_create_single_rx_queue(priv, idx);
+ if (err)
+ goto abort;
+
+ if (gve_is_gqi(priv))
+ gve_rx_write_doorbell(priv, &priv->rx[idx]);
+ else
+ gve_rx_post_buffers_dqo(&priv->rx[idx]);
+
+ /* Turn the unstopped queues back up */
+ gve_turnup_and_check_status(priv);
+ return 0;
+
+abort:
+ gve_rx_stop_ring(priv, idx);
+
+ /* All failures in this func result in a reset, by clearing the struct
+ * at idx, we prevent a double free when that reset runs. The reset,
+ * which needs the rtnl lock, will not run till this func returns and
+ * its caller gives up the lock.
+ */
+ memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
+ return err;
+}
+
+static const struct netdev_queue_mgmt_ops gve_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct gve_rx_ring),
+ .ndo_queue_mem_alloc = gve_rx_queue_mem_alloc,
+ .ndo_queue_mem_free = gve_rx_queue_mem_free,
+ .ndo_queue_start = gve_rx_queue_start,
+ .ndo_queue_stop = gve_rx_queue_stop,
+};
+
static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int max_tx_queues, max_rx_queues;
@@ -2527,6 +2598,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
dev->ethtool_ops = &gve_ethtool_ops;
dev->netdev_ops = &gve_netdev_ops;
+ dev->queue_mgmt_ops = &gve_queue_mgmt_ops;
/* Set default and supported features.
*
@@ -2628,7 +2700,7 @@ static void gve_shutdown(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct gve_priv *priv = netdev_priv(netdev);
- bool was_up = netif_carrier_ok(priv->dev);
+ bool was_up = netif_running(priv->dev);
rtnl_lock();
if (was_up && gve_close(priv->dev)) {
@@ -2646,7 +2718,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct gve_priv *priv = netdev_priv(netdev);
- bool was_up = netif_carrier_ok(priv->dev);
+ bool was_up = netif_running(priv->dev);
priv->suspend_cnt++;
rtnl_lock();
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 20f5a9e7fae9..acb73d4d0de6 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -30,6 +30,9 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
u32 slots = rx->mask + 1;
int i;
+ if (!rx->data.page_info)
+ return;
+
if (rx->data.raw_addressing) {
for (i = 0; i < slots; i++)
gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
@@ -38,8 +41,6 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
- rx->data.qpl = NULL;
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
page_ref_sub(rx->qpl_copy_pool[i].page,
@@ -51,6 +52,41 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
rx->data.page_info = NULL;
}
+static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
+{
+ ctx->skb_head = NULL;
+ ctx->skb_tail = NULL;
+ ctx->total_size = 0;
+ ctx->frag_cnt = 0;
+ ctx->drop_pkt = false;
+}
+
+static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx)
+{
+ rx->desc.seqno = 1;
+ rx->cnt = 0;
+ gve_rx_ctx_clear(&rx->ctx);
+}
+
+static void gve_rx_reset_ring_gqi(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ const u32 slots = priv->rx_desc_cnt;
+ size_t size;
+
+ /* Reset desc ring */
+ if (rx->desc.desc_ring) {
+ size = slots * sizeof(rx->desc.desc_ring[0]);
+ memset(rx->desc.desc_ring, 0, size);
+ }
+
+ /* Reset q_resources */
+ if (rx->q_resources)
+ memset(rx->q_resources, 0, sizeof(*rx->q_resources));
+
+ gve_rx_init_ring_state_gqi(rx);
+}
+
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -60,34 +96,48 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
+ gve_rx_reset_ring_gqi(priv, idx);
}
-static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
- struct gve_rx_alloc_rings_cfg *cfg)
+void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct device *dev = &priv->pdev->dev;
u32 slots = rx->mask + 1;
int idx = rx->q_num;
size_t bytes;
+ u32 qpl_id;
- bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
- dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
- rx->desc.desc_ring = NULL;
+ if (rx->desc.desc_ring) {
+ bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
+ dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
+ rx->desc.desc_ring = NULL;
+ }
- dma_free_coherent(dev, sizeof(*rx->q_resources),
- rx->q_resources, rx->q_resources_bus);
- rx->q_resources = NULL;
+ if (rx->q_resources) {
+ dma_free_coherent(dev, sizeof(*rx->q_resources),
+ rx->q_resources, rx->q_resources_bus);
+ rx->q_resources = NULL;
+ }
gve_rx_unfill_pages(priv, rx, cfg);
- bytes = sizeof(*rx->data.data_ring) * slots;
- dma_free_coherent(dev, bytes, rx->data.data_ring,
- rx->data.data_bus);
- rx->data.data_ring = NULL;
+ if (rx->data.data_ring) {
+ bytes = sizeof(*rx->data.data_ring) * slots;
+ dma_free_coherent(dev, bytes, rx->data.data_ring,
+ rx->data.data_bus);
+ rx->data.data_ring = NULL;
+ }
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
+ if (rx->data.qpl) {
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, idx);
+ gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
+ rx->data.qpl = NULL;
+ }
+
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
@@ -144,14 +194,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
if (!rx->data.page_info)
return -ENOMEM;
- if (!rx->data.raw_addressing) {
- rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
- if (!rx->data.qpl) {
- kvfree(rx->data.page_info);
- rx->data.page_info = NULL;
- return -ENOMEM;
- }
- }
for (i = 0; i < slots; i++) {
if (!rx->data.raw_addressing) {
struct page *page = rx->data.qpl->pages[i];
@@ -204,9 +246,6 @@ alloc_err_qpl:
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
- rx->data.qpl = NULL;
-
return err;
alloc_err_rda:
@@ -217,15 +256,6 @@ alloc_err_rda:
return err;
}
-static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
-{
- ctx->skb_head = NULL;
- ctx->skb_tail = NULL;
- ctx->total_size = 0;
- ctx->frag_cnt = 0;
- ctx->drop_pkt = false;
-}
-
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -234,14 +264,16 @@ void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
gve_add_napi(priv, ntfy_idx, gve_napi_poll);
}
-static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
- struct gve_rx_alloc_rings_cfg *cfg,
- struct gve_rx_ring *rx,
- int idx)
+int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
struct device *hdev = &priv->pdev->dev;
- u32 slots = priv->rx_data_slot_cnt;
+ u32 slots = cfg->ring_size;
int filled_pages;
+ int qpl_page_cnt;
+ u32 qpl_id = 0;
size_t bytes;
int err;
@@ -274,10 +306,22 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
goto abort_with_slots;
}
+ if (!rx->data.raw_addressing) {
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ qpl_page_cnt = cfg->ring_size;
+
+ rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
+ if (!rx->data.qpl) {
+ err = -ENOMEM;
+ goto abort_with_copy_pool;
+ }
+ }
+
filled_pages = gve_rx_prefill_pages(rx, cfg);
if (filled_pages < 0) {
err = -ENOMEM;
- goto abort_with_copy_pool;
+ goto abort_with_qpl;
}
rx->fill_cnt = filled_pages;
/* Ensure data ring slots (packet buffers) are visible. */
@@ -304,9 +348,8 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
err = -ENOMEM;
goto abort_with_q_resources;
}
- rx->cnt = 0;
rx->db_threshold = slots / 2;
- rx->desc.seqno = 1;
+ gve_rx_init_ring_state_gqi(rx);
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
@@ -319,6 +362,11 @@ abort_with_q_resources:
rx->q_resources = NULL;
abort_filled:
gve_rx_unfill_pages(priv, rx, cfg);
+abort_with_qpl:
+ if (!rx->data.raw_addressing) {
+ gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
+ rx->data.qpl = NULL;
+ }
abort_with_copy_pool:
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
@@ -337,12 +385,6 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8e8071308aeb..1154c1d8f66f 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -178,7 +178,7 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return err;
} else {
idx = rx->dqo.next_qpl_page_idx;
- if (idx >= priv->rx_pages_per_qpl) {
+ if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
net_err_ratelimited("%s: Out of QPL pages\n",
priv->dev->name);
return -ENOMEM;
@@ -211,6 +211,82 @@ static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
}
}
+static void gve_rx_init_ring_state_dqo(struct gve_rx_ring *rx,
+ const u32 buffer_queue_slots,
+ const u32 completion_queue_slots)
+{
+ int i;
+
+ /* Set buffer queue state */
+ rx->dqo.bufq.mask = buffer_queue_slots - 1;
+ rx->dqo.bufq.head = 0;
+ rx->dqo.bufq.tail = 0;
+
+ /* Set completion queue state */
+ rx->dqo.complq.num_free_slots = completion_queue_slots;
+ rx->dqo.complq.mask = completion_queue_slots - 1;
+ rx->dqo.complq.cur_gen_bit = 0;
+ rx->dqo.complq.head = 0;
+
+ /* Set RX SKB context */
+ rx->ctx.skb_head = NULL;
+ rx->ctx.skb_tail = NULL;
+
+ /* Set up linked list of buffer IDs */
+ if (rx->dqo.buf_states) {
+ for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
+ rx->dqo.buf_states[i].next = i + 1;
+ rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
+ }
+
+ rx->dqo.free_buf_states = 0;
+ rx->dqo.recycled_buf_states.head = -1;
+ rx->dqo.recycled_buf_states.tail = -1;
+ rx->dqo.used_buf_states.head = -1;
+ rx->dqo.used_buf_states.tail = -1;
+}
+
+static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ size_t size;
+ int i;
+
+ const u32 buffer_queue_slots = priv->rx_desc_cnt;
+ const u32 completion_queue_slots = priv->rx_desc_cnt;
+
+ /* Reset buffer queue */
+ if (rx->dqo.bufq.desc_ring) {
+ size = sizeof(rx->dqo.bufq.desc_ring[0]) *
+ buffer_queue_slots;
+ memset(rx->dqo.bufq.desc_ring, 0, size);
+ }
+
+ /* Reset completion queue */
+ if (rx->dqo.complq.desc_ring) {
+ size = sizeof(rx->dqo.complq.desc_ring[0]) *
+ completion_queue_slots;
+ memset(rx->dqo.complq.desc_ring, 0, size);
+ }
+
+ /* Reset q_resources */
+ if (rx->q_resources)
+ memset(rx->q_resources, 0, sizeof(*rx->q_resources));
+
+ /* Reset buf states */
+ if (rx->dqo.buf_states) {
+ for (i = 0; i < rx->dqo.num_buf_states; i++) {
+ struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
+
+ if (bs->page_info.page)
+ gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
+ }
+ }
+
+ gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
+ completion_queue_slots);
+}
+
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -220,16 +296,18 @@ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
+ gve_rx_reset_ring_dqo(priv, idx);
}
-static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
- struct gve_rx_alloc_rings_cfg *cfg)
+void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct device *hdev = &priv->pdev->dev;
size_t completion_queue_slots;
size_t buffer_queue_slots;
int idx = rx->q_num;
size_t size;
+ u32 qpl_id;
int i;
completion_queue_slots = rx->dqo.complq.mask + 1;
@@ -247,8 +325,10 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
if (bs->page_info.page)
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
}
+
if (rx->dqo.qpl) {
- gve_unassign_qpl(cfg->qpl_cfg, rx->dqo.qpl->id);
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ gve_free_queue_page_list(priv, rx->dqo.qpl, qpl_id);
rx->dqo.qpl = NULL;
}
@@ -275,10 +355,10 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx,
+ const u32 buf_count)
{
struct device *hdev = &priv->pdev->dev;
- int buf_count = rx->dqo.bufq.mask + 1;
rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
&rx->dqo.hdr_bufs.addr, GFP_KERNEL);
@@ -296,17 +376,17 @@ void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx)
gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
}
-static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
- struct gve_rx_alloc_rings_cfg *cfg,
- struct gve_rx_ring *rx,
- int idx)
+int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
struct device *hdev = &priv->pdev->dev;
+ int qpl_page_cnt;
size_t size;
- int i;
+ u32 qpl_id;
- const u32 buffer_queue_slots = cfg->raw_addressing ?
- priv->options_dqo_rda.rx_buff_ring_entries : cfg->ring_size;
+ const u32 buffer_queue_slots = cfg->ring_size;
const u32 completion_queue_slots = cfg->ring_size;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
@@ -314,15 +394,10 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
memset(rx, 0, sizeof(*rx));
rx->gve = priv;
rx->q_num = idx;
- rx->dqo.bufq.mask = buffer_queue_slots - 1;
- rx->dqo.complq.num_free_slots = completion_queue_slots;
- rx->dqo.complq.mask = completion_queue_slots - 1;
- rx->ctx.skb_head = NULL;
- rx->ctx.skb_tail = NULL;
rx->dqo.num_buf_states = cfg->raw_addressing ?
min_t(s16, S16_MAX, buffer_queue_slots * 4) :
- priv->rx_pages_per_qpl;
+ gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
sizeof(rx->dqo.buf_states[0]),
GFP_KERNEL);
@@ -331,19 +406,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
/* Allocate header buffers for header-split */
if (cfg->enable_header_split)
- if (gve_rx_alloc_hdr_bufs(priv, rx))
+ if (gve_rx_alloc_hdr_bufs(priv, rx, buffer_queue_slots))
goto err;
- /* Set up linked list of buffer IDs */
- for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
- rx->dqo.buf_states[i].next = i + 1;
-
- rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
- rx->dqo.recycled_buf_states.head = -1;
- rx->dqo.recycled_buf_states.tail = -1;
- rx->dqo.used_buf_states.head = -1;
- rx->dqo.used_buf_states.tail = -1;
-
/* Allocate RX completion queue */
size = sizeof(rx->dqo.complq.desc_ring[0]) *
completion_queue_slots;
@@ -360,7 +425,11 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (!cfg->raw_addressing) {
- rx->dqo.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
+
+ rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
if (!rx->dqo.qpl)
goto err;
rx->dqo.next_qpl_page_idx = 0;
@@ -371,6 +440,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
if (!rx->q_resources)
goto err;
+ gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
+ completion_queue_slots);
+
return 0;
err:
@@ -393,12 +465,6 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
int err;
int i;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
@@ -581,11 +647,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
}
-static void gve_rx_free_skb(struct gve_rx_ring *rx)
+static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
{
if (!rx->ctx.skb_head)
return;
+ if (rx->ctx.skb_head == napi->skb)
+ napi->skb = NULL;
dev_kfree_skb_any(rx->ctx.skb_head);
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
@@ -884,7 +952,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
if (err < 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
if (err == -ENOMEM)
rx->rx_skb_alloc_fail++;
@@ -927,7 +995,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* gve_rx_complete_skb() will consume skb if successful */
if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
rx->rx_desc_err_dropped_pkt++;
u64_stats_update_end(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 4b9853adc113..e7fb7d6d283d 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -158,15 +158,16 @@ static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do)
{
struct gve_tx_buffer_state *info;
- u32 clean_end = tx->done + to_do;
u64 pkts = 0, bytes = 0;
size_t space_freed = 0;
u32 xsk_complete = 0;
u32 idx;
+ int i;
- for (; tx->done < clean_end; tx->done++) {
+ for (i = 0; i < to_do; i++) {
idx = tx->done & tx->mask;
info = &tx->info[idx];
+ tx->done++;
if (unlikely(!info->xdp.size))
continue;
@@ -216,6 +217,7 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
+ u32 qpl_id;
u32 slots;
slots = tx->mask + 1;
@@ -223,9 +225,12 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->q_resources, tx->q_resources_bus);
tx->q_resources = NULL;
- if (!tx->raw_addressing) {
- gve_tx_fifo_release(priv, &tx->tx_fifo);
- gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
+ if (tx->tx_fifo.qpl) {
+ if (tx->tx_fifo.base)
+ gve_tx_fifo_release(priv, &tx->tx_fifo);
+
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
tx->tx_fifo.qpl = NULL;
}
@@ -256,6 +261,8 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
+ int qpl_page_cnt;
+ u32 qpl_id = 0;
size_t bytes;
/* Make sure everything is zeroed to start */
@@ -280,9 +287,14 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
tx->raw_addressing = cfg->raw_addressing;
tx->dev = hdev;
if (!tx->raw_addressing) {
- tx->tx_fifo.qpl = gve_assign_tx_qpl(cfg, idx);
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ qpl_page_cnt = priv->tx_pages_per_qpl;
+
+ tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
+
/* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
goto abort_with_qpl;
@@ -302,8 +314,10 @@ abort_with_fifo:
if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_qpl:
- if (!tx->raw_addressing)
- gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
+ if (!tx->raw_addressing) {
+ gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
+ tx->tx_fifo.qpl = NULL;
+ }
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -320,12 +334,6 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index bc34b6cd3a3e..f879426cb552 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -209,6 +209,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
+ u32 qpl_id;
if (tx->q_resources) {
dma_free_coherent(hdev, sizeof(*tx->q_resources),
@@ -237,7 +238,8 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dqo.tx_qpl_buf_next = NULL;
if (tx->dqo.qpl) {
- gve_unassign_qpl(cfg->qpl_cfg, tx->dqo.qpl->id);
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ gve_free_queue_page_list(priv, tx->dqo.qpl, qpl_id);
tx->dqo.qpl = NULL;
}
@@ -285,7 +287,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
{
struct device *hdev = &priv->pdev->dev;
int num_pending_packets;
+ int qpl_page_cnt;
size_t bytes;
+ u32 qpl_id;
int i;
memset(tx, 0, sizeof(*tx));
@@ -295,9 +299,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
/* Queue sizes must be a power of 2 */
tx->mask = cfg->ring_size - 1;
- tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ?
- priv->options_dqo_rda.tx_comp_ring_entries - 1 :
- tx->mask;
+ tx->dqo.complq_mask = tx->mask;
/* The max number of pending packets determines the maximum number of
* descriptors which maybe written to the completion queue.
@@ -354,7 +356,11 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (!cfg->raw_addressing) {
- tx->dqo.qpl = gve_assign_tx_qpl(cfg, idx);
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ qpl_page_cnt = priv->tx_pages_per_qpl;
+
+ tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
if (!tx->dqo.qpl)
goto err;
@@ -376,12 +382,6 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
int err = 0;
int i, j;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
@@ -555,28 +555,18 @@ static int gve_prep_tso(struct sk_buff *skb)
if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
return -1;
+ if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+ return -EINVAL;
+
/* Needed because we will modify header. */
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
tcp = tcp_hdr(skb);
-
- /* Remove payload length from checksum. */
paylen = skb->len - skb_transport_offset(skb);
-
- switch (skb_shinfo(skb)->gso_type) {
- case SKB_GSO_TCPV4:
- case SKB_GSO_TCPV6:
- csum_replace_by_diff(&tcp->check,
- (__force __wsum)htonl(paylen));
-
- /* Compute length of segmentation header. */
- header_len = skb_tcp_all_headers(skb);
- break;
- default:
- return -EINVAL;
- }
+ csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
+ header_len = skb_tcp_all_headers(skb);
if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
return -EINVAL;
@@ -876,22 +866,42 @@ static bool gve_can_send_tso(const struct sk_buff *skb)
const int header_len = skb_tcp_all_headers(skb);
const int gso_size = shinfo->gso_size;
int cur_seg_num_bufs;
+ int prev_frag_size;
int cur_seg_size;
int i;
cur_seg_size = skb_headlen(skb) - header_len;
+ prev_frag_size = skb_headlen(skb);
cur_seg_num_bufs = cur_seg_size > 0;
for (i = 0; i < shinfo->nr_frags; i++) {
if (cur_seg_size >= gso_size) {
cur_seg_size %= gso_size;
cur_seg_num_bufs = cur_seg_size > 0;
+
+ if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) {
+ int prev_frag_remain = prev_frag_size %
+ GVE_TX_MAX_BUF_SIZE_DQO;
+
+ /* If the last descriptor of the previous frag
+ * is less than cur_seg_size, the segment will
+ * span two descriptors in the previous frag.
+ * Since max gso size (9728) is less than
+ * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible
+ * for the segment to span more than two
+ * descriptors.
+ */
+ if (prev_frag_remain &&
+ cur_seg_size > prev_frag_remain)
+ cur_seg_num_bufs++;
+ }
}
if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
return false;
- cur_seg_size += skb_frag_size(&shinfo->frags[i]);
+ prev_frag_size = skb_frag_size(&shinfo->frags[i]);
+ cur_seg_size += prev_frag_size;
}
return true;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 8a713eed4465..fd32e15cadcb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1777,7 +1777,7 @@ static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
}
/* finally, set new mtu to netdevice */
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
out:
if (if_running) {
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
index e214bfaece1f..e8af26da1fc1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y += -I$(srctree)/$(src)
+ccflags-y += -I$(src)
ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3pf
ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3vf
ccflags-y += -I$(srctree)/drivers/net/ethernet/hisilicon/hns3/hns3_common
@@ -15,15 +15,14 @@ hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
-obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o hclge-common.o
-hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o \
- hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
+hclge-common-objs += hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
-obj-$(CONFIG_HNS3_HCLGE) += hclge.o
+hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o
+
+obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-common.o
hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \
hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \
- hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o
-
hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index f19f1e1d1f9f..27dbe367f3d3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -104,6 +104,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_WOL_B,
HNAE3_DEV_SUPPORT_TM_FLUSH_B,
HNAE3_DEV_SUPPORT_VF_FAULT_B,
+ HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B,
};
#define hnae3_ae_dev_fd_supported(ae_dev) \
@@ -181,6 +182,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_vf_fault_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_VF_FAULT_B, (ae_dev)->caps)
+#define hnae3_ae_dev_gen_reg_dfx_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B, (hdev)->ae_dev->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -362,6 +366,15 @@ struct hnae3_vector_info {
#define HNAE3_FW_VERSION_BYTE0_SHIFT 0
#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0)
+#define HNAE3_SCC_VERSION_BYTE3_SHIFT 24
+#define HNAE3_SCC_VERSION_BYTE3_MASK GENMASK(31, 24)
+#define HNAE3_SCC_VERSION_BYTE2_SHIFT 16
+#define HNAE3_SCC_VERSION_BYTE2_MASK GENMASK(23, 16)
+#define HNAE3_SCC_VERSION_BYTE1_SHIFT 8
+#define HNAE3_SCC_VERSION_BYTE1_MASK GENMASK(15, 8)
+#define HNAE3_SCC_VERSION_BYTE0_SHIFT 0
+#define HNAE3_SCC_VERSION_BYTE0_MASK GENMASK(7, 0)
+
struct hnae3_ring_chain_node {
struct hnae3_ring_chain_node *next;
u32 tqp_index;
@@ -773,7 +786,7 @@ struct hnae3_ae_ops {
void (*get_rx_hwts)(struct hnae3_handle *handle, struct sk_buff *skb,
u32 nsec, u32 sec);
int (*get_ts_info)(struct hnae3_handle *handle,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
u32 *status_code);
void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
@@ -897,7 +910,7 @@ struct hnae3_handle {
struct hnae3_roce_private_info rinfo;
};
- u32 numa_node_mask; /* for multi-chip support */
+ nodemask_t numa_node_mask; /* for multi-chip support */
enum hnae3_port_base_vlan_state port_base_vlan_state;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index 652d71326231..4ad4e8ab2f1f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -48,6 +48,7 @@ void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
else
desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR);
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_reuse_desc);
static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
bool is_pf)
@@ -72,6 +73,7 @@ void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc,
if (is_read)
desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR);
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_setup_basic_desc);
int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw, bool en)
@@ -158,6 +160,7 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
{HCLGE_COMM_CAP_WOL_B, HNAE3_DEV_SUPPORT_WOL_B},
{HCLGE_COMM_CAP_TM_FLUSH_B, HNAE3_DEV_SUPPORT_TM_FLUSH_B},
{HCLGE_COMM_CAP_VF_FAULT_B, HNAE3_DEV_SUPPORT_VF_FAULT_B},
+ {HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B, HNAE3_DEV_SUPPORT_ERR_MOD_GEN_REG_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
@@ -470,10 +473,14 @@ static int hclge_comm_cmd_check_result(struct hclge_comm_hw *hw,
int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
int num)
{
+ bool is_special = hclge_comm_is_special_opcode(le16_to_cpu(desc->opcode));
struct hclge_comm_cmq_ring *csq = &hw->cmq.csq;
int ret;
int ntc;
+ if (hw->cmq.ops.trace_cmd_send)
+ hw->cmq.ops.trace_cmd_send(hw, desc, num, is_special);
+
spin_lock_bh(&hw->cmq.csq.lock);
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hw->comm_state)) {
@@ -507,8 +514,12 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
spin_unlock_bh(&hw->cmq.csq.lock);
+ if (hw->cmq.ops.trace_cmd_get)
+ hw->cmq.ops.trace_cmd_get(hw, desc, num, is_special);
+
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_send);
static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw)
{
@@ -545,6 +556,7 @@ void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev,
hclge_comm_free_cmd_desc(&cmdq->csq);
hclge_comm_free_cmd_desc(&cmdq->crq);
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_uninit);
int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw)
{
@@ -583,6 +595,19 @@ err_csq:
hclge_comm_free_cmd_desc(&hw->cmq.csq);
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_queue_init);
+
+void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw,
+ const struct hclge_comm_cmq_ops *ops)
+{
+ struct hclge_comm_cmq *cmdq = &hw->cmq;
+
+ if (ops) {
+ cmdq->ops.trace_cmd_send = ops->trace_cmd_send;
+ cmdq->ops.trace_cmd_get = ops->trace_cmd_get;
+ }
+}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_init_ops);
int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
u32 *fw_version, bool is_pf,
@@ -653,3 +678,8 @@ err_cmd_init:
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_cmd_init);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet PF/VF Common Library");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 552396518e08..2c2a2f1e0d7a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -91,6 +91,7 @@ enum hclge_opcode_type {
HCLGE_OPC_DFX_RCB_REG = 0x004D,
HCLGE_OPC_DFX_TQP_REG = 0x004E,
HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
+ HCLGE_OPC_DFX_GEN_REG = 0x7038,
HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
HCLGE_OPC_GET_QUEUE_ERR_VF = 0x0067,
@@ -246,6 +247,9 @@ enum hclge_opcode_type {
HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
+ /* SCC commands */
+ HCLGE_OPC_QUERY_SCC_VER = 0x1A84,
+
/* Mailbox command */
HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
@@ -353,6 +357,7 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_LANE_NUM_B = 27,
HCLGE_COMM_CAP_WOL_B = 28,
HCLGE_COMM_CAP_TM_FLUSH_B = 31,
+ HCLGE_COMM_CAP_ERR_MOD_GEN_REG_B = 32,
};
enum HCLGE_COMM_API_CAP_BITS {
@@ -392,6 +397,11 @@ struct hclge_comm_query_version_cmd {
__le32 caps[HCLGE_COMM_QUERY_CAP_LENGTH]; /* capabilities of device */
};
+struct hclge_comm_query_scc_cmd {
+ __le32 scc_version;
+ u8 rsv[20];
+};
+
#define HCLGE_DESC_DATA_LEN 6
struct hclge_desc {
__le16 opcode;
@@ -423,11 +433,22 @@ enum hclge_comm_cmd_status {
HCLGE_COMM_ERR_CSQ_ERROR = -3,
};
+struct hclge_comm_hw;
+struct hclge_comm_cmq_ops {
+ void (*trace_cmd_send)(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int num, bool is_special);
+ void (*trace_cmd_get)(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int num, bool is_special);
+};
+
struct hclge_comm_cmq {
struct hclge_comm_cmq_ring csq;
struct hclge_comm_cmq_ring crq;
u16 tx_timeout;
enum hclge_comm_cmd_status last_status;
+ struct hclge_comm_cmq_ops ops;
};
struct hclge_comm_hw {
@@ -474,5 +495,6 @@ int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw);
int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw,
u32 *fw_version, bool is_pf,
unsigned long reset_pending);
-
+void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw,
+ const struct hclge_comm_cmq_ops *ops);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
index b4ae2160aff4..4e2bb6556b1c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c
@@ -62,6 +62,7 @@ int hclge_comm_rss_init_cfg(struct hnae3_handle *nic,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_rss_init_cfg);
void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
u16 *tc_valid, u16 *tc_size)
@@ -78,6 +79,7 @@ void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset,
tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0;
}
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_tc_info);
int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
u16 *tc_valid, u16 *tc_size)
@@ -113,6 +115,7 @@ int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset,
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_tc_mode);
int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
struct hclge_comm_hw *hw, const u8 *key,
@@ -143,6 +146,7 @@ int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_hash_key);
int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
struct hclge_comm_hw *hw,
@@ -185,11 +189,13 @@ int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev,
rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_tuple);
u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle)
{
return HCLGE_COMM_RSS_KEY_SIZE;
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_key_size);
int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg,
const u8 hfunc, u8 *hash_algo)
@@ -217,6 +223,7 @@ void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev,
for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++)
rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
}
+EXPORT_SYMBOL_GPL(hclge_comm_rss_indir_init_cfg);
int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
u8 *tuple_sets)
@@ -250,6 +257,7 @@ int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_tuple);
static void
hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req,
@@ -304,6 +312,7 @@ int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev,
}
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_indir_table);
int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw,
struct hclge_comm_rss_cfg *rss_cfg)
@@ -332,6 +341,7 @@ int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw,
"failed to configure rss input, ret = %d.\n", ret);
return ret;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_input_tuple);
void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
u8 *hfunc)
@@ -355,6 +365,7 @@ void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key,
if (key)
memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE);
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_hash_info);
void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
u32 *indir, u16 rss_ind_tbl_size)
@@ -367,6 +378,7 @@ void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg,
for (i = 0; i < rss_ind_tbl_size; i++)
indir[i] = rss_cfg->rss_indirection_tbl[i];
}
+EXPORT_SYMBOL_GPL(hclge_comm_get_rss_indir_tbl);
int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
const u8 *key)
@@ -408,6 +420,7 @@ int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_set_rss_algo_key);
static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
{
@@ -502,3 +515,4 @@ u64 hclge_comm_convert_rss_tuple(u8 tuple_sets)
return tuple_data;
}
+EXPORT_SYMBOL_GPL(hclge_comm_convert_rss_tuple);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
index 618f66d9586b..2b31188ff555 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c
@@ -26,6 +26,7 @@ u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
return buff;
}
+EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_stats);
int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
{
@@ -33,6 +34,7 @@ int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle)
return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE;
}
+EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_sset_count);
u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
{
@@ -56,6 +58,7 @@ u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
return buff;
}
+EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_strings);
int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
struct hclge_comm_hw *hw)
@@ -99,6 +102,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle,
return 0;
}
+EXPORT_SYMBOL_GPL(hclge_comm_tqps_update_stats);
void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle)
{
@@ -113,3 +117,4 @@ void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle)
memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
}
}
+EXPORT_SYMBOL_GPL(hclge_comm_reset_tqp_stats);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 19668a8d22f7..4cbc4d069a1f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2068,8 +2068,6 @@ static void hns3_tx_push_bd(struct hns3_enet_ring *ring, int num)
__iowrite64_copy(ring->tqp->mem_base, desc,
(sizeof(struct hns3_desc) * HNS3_MAX_PUSH_BD_NUM) /
HNS3_BYTES_PER_64BIT);
-
- io_stop_wc();
}
static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
@@ -2088,8 +2086,6 @@ static void hns3_tx_mem_doorbell(struct hns3_enet_ring *ring)
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_mem_doorbell += ring->pending_buf;
u64_stats_update_end(&ring->syncp);
-
- io_stop_wc();
}
static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
@@ -2761,7 +2757,7 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
netdev_err(netdev, "failed to change MTU in hardware %d\n",
ret);
else
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return ret;
}
@@ -3539,6 +3535,9 @@ static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
ret = hns3_alloc_and_attach_buffer(ring, i);
if (ret)
goto out_buffer_fail;
+
+ if (!(i % HNS3_RESCHED_BD_NUM))
+ cond_resched();
}
return 0;
@@ -5111,6 +5110,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
}
u64_stats_init(&priv->ring[i].syncp);
+ cond_resched();
}
return 0;
@@ -5724,6 +5724,9 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
struct net_device *netdev = handle->kinfo.netdev;
struct hns3_nic_priv *priv = netdev_priv(netdev);
+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ hns3_nic_net_stop(netdev);
+
if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
netdev_warn(netdev, "already uninitialized\n");
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index acd756b0c7c9..d36c4ed16d8d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -214,6 +214,8 @@ enum hns3_nic_state {
#define HNS3_CQ_MODE_EQE 1U
#define HNS3_CQ_MODE_CQE 0U
+#define HNS3_RESCHED_BD_NUM 1024
+
enum hns3_pkt_l2t_type {
HNS3_L2_TYPE_UNICAST,
HNS3_L2_TYPE_MULTICAST,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 941cb529d671..b1e988347347 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -2009,7 +2009,7 @@ static int hns3_set_tunable(struct net_device *netdev,
ETHTOOL_RING_USE_TX_PUSH)
static int hns3_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct hnae3_handle *handle = hns3_get_handle(netdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
index b8a1ecb4b8fb..3362b8d14d4f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -84,7 +84,7 @@ TRACE_EVENT(hns3_tx_desc,
__entry->desc_dma = ring->desc_dma_addr,
memcpy(__entry->desc, &ring->desc[cur_ntu],
sizeof(struct hns3_desc));
- __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ __assign_str(devname);
),
TP_printk(
@@ -117,7 +117,7 @@ TRACE_EVENT(hns3_rx_desc,
__entry->buf_dma = ring->desc_cb[ring->next_to_clean].dma;
memcpy(__entry->desc, &ring->desc[ring->next_to_clean],
sizeof(struct hns3_desc));
- __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ __assign_str(devname);
),
TP_printk(
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 9ec471ced3d6..debf143e9940 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -18,6 +18,646 @@ static const char * const hclge_mac_state_str[] = {
static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
+static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
+ {false, "Reserved"},
+ {true, "BP_CPU_STATE"},
+ {true, "DFX_MSIX_INFO_NIC_0"},
+ {true, "DFX_MSIX_INFO_NIC_1"},
+ {true, "DFX_MSIX_INFO_NIC_2"},
+ {true, "DFX_MSIX_INFO_NIC_3"},
+
+ {true, "DFX_MSIX_INFO_ROC_0"},
+ {true, "DFX_MSIX_INFO_ROC_1"},
+ {true, "DFX_MSIX_INFO_ROC_2"},
+ {true, "DFX_MSIX_INFO_ROC_3"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
+ {false, "Reserved"},
+ {true, "SSU_ETS_PORT_STATUS"},
+ {true, "SSU_ETS_TCG_STATUS"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {true, "SSU_BP_STATUS_0"},
+
+ {true, "SSU_BP_STATUS_1"},
+ {true, "SSU_BP_STATUS_2"},
+ {true, "SSU_BP_STATUS_3"},
+ {true, "SSU_BP_STATUS_4"},
+ {true, "SSU_BP_STATUS_5"},
+ {true, "SSU_MAC_TX_PFC_IND"},
+
+ {true, "MAC_SSU_RX_PFC_IND"},
+ {true, "BTMP_AGEING_ST_B0"},
+ {true, "BTMP_AGEING_ST_B1"},
+ {true, "BTMP_AGEING_ST_B2"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "FULL_DROP_NUM"},
+ {true, "PART_DROP_NUM"},
+ {true, "PPP_KEY_DROP_NUM"},
+ {true, "PPP_RLT_DROP_NUM"},
+ {true, "LO_PRI_UNICAST_RLT_DROP_NUM"},
+ {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"},
+
+ {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"},
+ {true, "NCSI_PACKET_CURR_BUFFER_CNT"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK0"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK1"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK2"},
+ {true, "SSU_MB_RD_RLT_DROP_CNT"},
+
+ {true, "SSU_PPP_MAC_KEY_NUM_L"},
+ {true, "SSU_PPP_MAC_KEY_NUM_H"},
+ {true, "SSU_PPP_HOST_KEY_NUM_L"},
+ {true, "SSU_PPP_HOST_KEY_NUM_H"},
+ {true, "PPP_SSU_MAC_RLT_NUM_L"},
+ {true, "PPP_SSU_MAC_RLT_NUM_H"},
+
+ {true, "PPP_SSU_HOST_RLT_NUM_L"},
+ {true, "PPP_SSU_HOST_RLT_NUM_H"},
+ {true, "NCSI_RX_PACKET_IN_CNT_L"},
+ {true, "NCSI_RX_PACKET_IN_CNT_H"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_L"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_H"},
+
+ {true, "SSU_KEY_DROP_NUM"},
+ {true, "MB_UNCOPY_NUM"},
+ {true, "RX_OQ_DROP_PKT_CNT"},
+ {true, "TX_OQ_DROP_PKT_CNT"},
+ {true, "BANK_UNBALANCE_DROP_CNT"},
+ {true, "BANK_UNBALANCE_RX_DROP_CNT"},
+
+ {true, "NIC_L2_ERR_DROP_PKT_CNT"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT"},
+ {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "RX_OQ_GLB_DROP_PKT_CNT"},
+ {false, "Reserved"},
+
+ {true, "LO_PRI_UNICAST_CUR_CNT"},
+ {true, "HI_PRI_MULTICAST_CUR_CNT"},
+ {true, "LO_PRI_MULTICAST_CUR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
+ {true, "prt_id"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_0"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_1"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_2"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_3"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_4"},
+
+ {true, "PACKET_TC_CURR_BUFFER_CNT_5"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_6"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_7"},
+ {true, "PACKET_CURR_BUFFER_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "RX_PACKET_IN_CNT_L"},
+ {true, "RX_PACKET_IN_CNT_H"},
+ {true, "RX_PACKET_OUT_CNT_L"},
+ {true, "RX_PACKET_OUT_CNT_H"},
+ {true, "TX_PACKET_IN_CNT_L"},
+ {true, "TX_PACKET_IN_CNT_H"},
+
+ {true, "TX_PACKET_OUT_CNT_L"},
+ {true, "TX_PACKET_OUT_CNT_H"},
+ {true, "ROC_RX_PACKET_IN_CNT_L"},
+ {true, "ROC_RX_PACKET_IN_CNT_H"},
+ {true, "ROC_TX_PACKET_OUT_CNT_L"},
+ {true, "ROC_TX_PACKET_OUT_CNT_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_0_L"},
+ {true, "RX_PACKET_TC_IN_CNT_0_H"},
+ {true, "RX_PACKET_TC_IN_CNT_1_L"},
+ {true, "RX_PACKET_TC_IN_CNT_1_H"},
+ {true, "RX_PACKET_TC_IN_CNT_2_L"},
+ {true, "RX_PACKET_TC_IN_CNT_2_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_3_L"},
+ {true, "RX_PACKET_TC_IN_CNT_3_H"},
+ {true, "RX_PACKET_TC_IN_CNT_4_L"},
+ {true, "RX_PACKET_TC_IN_CNT_4_H"},
+ {true, "RX_PACKET_TC_IN_CNT_5_L"},
+ {true, "RX_PACKET_TC_IN_CNT_5_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_6_L"},
+ {true, "RX_PACKET_TC_IN_CNT_6_H"},
+ {true, "RX_PACKET_TC_IN_CNT_7_L"},
+ {true, "RX_PACKET_TC_IN_CNT_7_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_7_H"},
+ {true, "TX_PACKET_TC_IN_CNT_0_L"},
+ {true, "TX_PACKET_TC_IN_CNT_0_H"},
+ {true, "TX_PACKET_TC_IN_CNT_1_L"},
+ {true, "TX_PACKET_TC_IN_CNT_1_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_2_L"},
+ {true, "TX_PACKET_TC_IN_CNT_2_H"},
+ {true, "TX_PACKET_TC_IN_CNT_3_L"},
+ {true, "TX_PACKET_TC_IN_CNT_3_H"},
+ {true, "TX_PACKET_TC_IN_CNT_4_L"},
+ {true, "TX_PACKET_TC_IN_CNT_4_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_5_L"},
+ {true, "TX_PACKET_TC_IN_CNT_5_H"},
+ {true, "TX_PACKET_TC_IN_CNT_6_L"},
+ {true, "TX_PACKET_TC_IN_CNT_6_H"},
+ {true, "TX_PACKET_TC_IN_CNT_7_L"},
+ {true, "TX_PACKET_TC_IN_CNT_7_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_0_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_3_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_6_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
+ {true, "OQ_INDEX"},
+ {true, "QUEUE_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
+ {true, "prt_id"},
+ {true, "IGU_RX_ERR_PKT"},
+ {true, "IGU_RX_NO_SOF_PKT"},
+ {true, "EGU_TX_1588_SHORT_PKT"},
+ {true, "EGU_TX_1588_PKT"},
+ {true, "EGU_TX_ERR_PKT"},
+
+ {true, "IGU_RX_OUT_L2_PKT"},
+ {true, "IGU_RX_OUT_L3_PKT"},
+ {true, "IGU_RX_OUT_L4_PKT"},
+ {true, "IGU_RX_IN_L2_PKT"},
+ {true, "IGU_RX_IN_L3_PKT"},
+ {true, "IGU_RX_IN_L4_PKT"},
+
+ {true, "IGU_RX_EL3E_PKT"},
+ {true, "IGU_RX_EL4E_PKT"},
+ {true, "IGU_RX_L3E_PKT"},
+ {true, "IGU_RX_L4E_PKT"},
+ {true, "IGU_RX_ROCEE_PKT"},
+ {true, "IGU_RX_OUT_UDP0_PKT"},
+
+ {true, "IGU_RX_IN_UDP0_PKT"},
+ {true, "IGU_MC_CAR_DROP_PKT_L"},
+ {true, "IGU_MC_CAR_DROP_PKT_H"},
+ {true, "IGU_BC_CAR_DROP_PKT_L"},
+ {true, "IGU_BC_CAR_DROP_PKT_H"},
+ {false, "Reserved"},
+
+ {true, "IGU_RX_OVERSIZE_PKT_L"},
+ {true, "IGU_RX_OVERSIZE_PKT_H"},
+ {true, "IGU_RX_UNDERSIZE_PKT_L"},
+ {true, "IGU_RX_UNDERSIZE_PKT_H"},
+ {true, "IGU_RX_OUT_ALL_PKT_L"},
+ {true, "IGU_RX_OUT_ALL_PKT_H"},
+
+ {true, "IGU_TX_OUT_ALL_PKT_L"},
+ {true, "IGU_TX_OUT_ALL_PKT_H"},
+ {true, "IGU_RX_UNI_PKT_L"},
+ {true, "IGU_RX_UNI_PKT_H"},
+ {true, "IGU_RX_MULTI_PKT_L"},
+ {true, "IGU_RX_MULTI_PKT_H"},
+
+ {true, "IGU_RX_BROAD_PKT_L"},
+ {true, "IGU_RX_BROAD_PKT_H"},
+ {true, "EGU_TX_OUT_ALL_PKT_L"},
+ {true, "EGU_TX_OUT_ALL_PKT_H"},
+ {true, "EGU_TX_UNI_PKT_L"},
+ {true, "EGU_TX_UNI_PKT_H"},
+
+ {true, "EGU_TX_MULTI_PKT_L"},
+ {true, "EGU_TX_MULTI_PKT_H"},
+ {true, "EGU_TX_BROAD_PKT_L"},
+ {true, "EGU_TX_BROAD_PKT_H"},
+ {true, "IGU_TX_KEY_NUM_L"},
+ {true, "IGU_TX_KEY_NUM_H"},
+
+ {true, "IGU_RX_NON_TUN_PKT_L"},
+ {true, "IGU_RX_NON_TUN_PKT_H"},
+ {true, "IGU_RX_TUN_PKT_L"},
+ {true, "IGU_RX_TUN_PKT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
+ {true, "tc_queue_num"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "RPU_RX_PKT_DROP_CNT"},
+ {true, "BUF_WAIT_TIMEOUT"},
+ {true, "BUF_WAIT_TIMEOUT_QID"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
+ {false, "Reserved"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+
+ {true, "FIFO_DFX_ST5"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
+ {false, "Reserved"},
+ {true, "NCSI_EGU_TX_FIFO_STS"},
+ {true, "NCSI_PAUSE_STATUS"},
+ {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_CKS_ERR_CNT"},
+
+ {true, "NCSI_RX_CTRL_PKT_CNT"},
+ {true, "NCSI_RX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_PKT_CNT"},
+ {true, "NCSI_RX_FCS_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"},
+
+ {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_CNT"},
+ {true, "NCSI_TX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_PKT_CNT"},
+ {true, "NCSI_TX_PT_PKT_TRUNC_CNT"},
+
+ {true, "NCSI_TX_PT_PKT_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "NCSI_MAC_RX_OCTETS_OK"},
+ {true, "NCSI_MAC_RX_OCTETS_BAD"},
+ {true, "NCSI_MAC_RX_UC_PKTS"},
+ {true, "NCSI_MAC_RX_MC_PKTS"},
+ {true, "NCSI_MAC_RX_BC_PKTS"},
+ {true, "NCSI_MAC_RX_PKTS_64OCTETS"},
+
+ {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"},
+
+ {true, "NCSI_MAC_RX_FCS_ERRORS"},
+ {true, "NCSI_MAC_RX_LONG_ERRORS"},
+ {true, "NCSI_MAC_RX_JABBER_ERRORS"},
+ {true, "NCSI_MAC_RX_RUNT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_SHORT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_FILT_PKT_CNT"},
+
+ {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"},
+ {true, "NCSI_MAC_TX_OCTETS_OK"},
+ {true, "NCSI_MAC_TX_OCTETS_BAD"},
+ {true, "NCSI_MAC_TX_UC_PKTS"},
+ {true, "NCSI_MAC_TX_MC_PKTS"},
+ {true, "NCSI_MAC_TX_BC_PKTS"},
+
+ {true, "NCSI_MAC_TX_PKTS_64OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"},
+
+ {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"},
+ {true, "NCSI_MAC_TX_UNDERRUN"},
+ {true, "NCSI_MAC_TX_CRC_ERROR"},
+ {true, "NCSI_MAC_TX_PAUSE_FRAMES"},
+ {true, "NCSI_MAC_RX_PAD_PKTS"},
+ {true, "NCSI_MAC_RX_PAUSE_FRAMES"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
+ {false, "Reserved"},
+ {true, "LGE_IGU_AFIFO_DFX_0"},
+ {true, "LGE_IGU_AFIFO_DFX_1"},
+ {true, "LGE_IGU_AFIFO_DFX_2"},
+ {true, "LGE_IGU_AFIFO_DFX_3"},
+ {true, "LGE_IGU_AFIFO_DFX_4"},
+
+ {true, "LGE_IGU_AFIFO_DFX_5"},
+ {true, "LGE_IGU_AFIFO_DFX_6"},
+ {true, "LGE_IGU_AFIFO_DFX_7"},
+ {true, "LGE_EGU_AFIFO_DFX_0"},
+ {true, "LGE_EGU_AFIFO_DFX_1"},
+ {true, "LGE_EGU_AFIFO_DFX_2"},
+
+ {true, "LGE_EGU_AFIFO_DFX_3"},
+ {true, "LGE_EGU_AFIFO_DFX_4"},
+ {true, "LGE_EGU_AFIFO_DFX_5"},
+ {true, "LGE_EGU_AFIFO_DFX_6"},
+ {true, "LGE_EGU_AFIFO_DFX_7"},
+ {true, "CGE_IGU_AFIFO_DFX_0"},
+
+ {true, "CGE_IGU_AFIFO_DFX_1"},
+ {true, "CGE_EGU_AFIFO_DFX_0"},
+ {true, "CGE_EGU_AFIFO_DFX_1"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
+ {false, "Reserved"},
+ {true, "DROP_FROM_PRT_PKT_CNT"},
+ {true, "DROP_FROM_HOST_PKT_CNT"},
+ {true, "DROP_TX_VLAN_PROC_CNT"},
+ {true, "DROP_MNG_CNT"},
+ {true, "DROP_FD_CNT"},
+
+ {true, "DROP_NO_DST_CNT"},
+ {true, "DROP_MC_MBID_FULL_CNT"},
+ {true, "DROP_SC_FILTERED"},
+ {true, "PPP_MC_DROP_PKT_CNT"},
+ {true, "DROP_PT_CNT"},
+ {true, "DROP_MAC_ANTI_SPOOF_CNT"},
+
+ {true, "DROP_IG_VFV_CNT"},
+ {true, "DROP_IG_PRTV_CNT"},
+ {true, "DROP_CNM_PFC_PAUSE_CNT"},
+ {true, "DROP_TORUS_TC_CNT"},
+ {true, "DROP_TORUS_LPBK_CNT"},
+ {true, "PPP_HFS_STS"},
+
+ {true, "PPP_MC_RSLT_STS"},
+ {true, "PPP_P3U_STS"},
+ {true, "PPP_RSLT_DESCR_STS"},
+ {true, "PPP_UMV_STS_0"},
+ {true, "PPP_UMV_STS_1"},
+ {true, "PPP_VFV_STS"},
+
+ {true, "PPP_GRO_KEY_CNT"},
+ {true, "PPP_GRO_INFO_CNT"},
+ {true, "PPP_GRO_DROP_CNT"},
+ {true, "PPP_GRO_OUT_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_DATA_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"},
+
+ {true, "PPP_GRO_INFO_MATCH_CNT"},
+ {true, "PPP_GRO_FREE_ENTRY_CNT"},
+ {true, "PPP_GRO_INNER_DFX_SIGNAL"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "GET_RX_PKT_CNT_L"},
+ {true, "GET_RX_PKT_CNT_H"},
+ {true, "GET_TX_PKT_CNT_L"},
+ {true, "GET_TX_PKT_CNT_H"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_H"},
+
+ {true, "SEND_MC_FROM_PRT_CNT_L"},
+ {true, "SEND_MC_FROM_PRT_CNT_H"},
+ {true, "SEND_MC_FROM_HOST_CNT_L"},
+ {true, "SEND_MC_FROM_HOST_CNT_H"},
+ {true, "SSU_MC_RD_CNT_L"},
+ {true, "SSU_MC_RD_CNT_H"},
+
+ {true, "SSU_MC_DROP_CNT_L"},
+ {true, "SSU_MC_DROP_CNT_H"},
+ {true, "SSU_MC_RD_PKT_CNT_L"},
+ {true, "SSU_MC_RD_PKT_CNT_H"},
+ {true, "PPP_MC_2HOST_PKT_CNT_L"},
+ {true, "PPP_MC_2HOST_PKT_CNT_H"},
+
+ {true, "PPP_MC_2PRT_PKT_CNT_L"},
+ {true, "PPP_MC_2PRT_PKT_CNT_H"},
+ {true, "NTSNOS_PKT_CNT_L"},
+ {true, "NTSNOS_PKT_CNT_H"},
+ {true, "NTUP_PKT_CNT_L"},
+ {true, "NTUP_PKT_CNT_H"},
+
+ {true, "NTLCL_PKT_CNT_L"},
+ {true, "NTLCL_PKT_CNT_H"},
+ {true, "NTTGT_PKT_CNT_L"},
+ {true, "NTTGT_PKT_CNT_H"},
+ {true, "RTNS_PKT_CNT_L"},
+ {true, "RTNS_PKT_CNT_H"},
+
+ {true, "RTLPBK_PKT_CNT_L"},
+ {true, "RTLPBK_PKT_CNT_H"},
+ {true, "NR_PKT_CNT_L"},
+ {true, "NR_PKT_CNT_H"},
+ {true, "RR_PKT_CNT_L"},
+ {true, "RR_PKT_CNT_H"},
+
+ {true, "MNG_TBL_HIT_CNT_L"},
+ {true, "MNG_TBL_HIT_CNT_H"},
+ {true, "FD_TBL_HIT_CNT_L"},
+ {true, "FD_TBL_HIT_CNT_H"},
+ {true, "FD_LKUP_CNT_L"},
+ {true, "FD_LKUP_CNT_H"},
+
+ {true, "BC_HIT_CNT_L"},
+ {true, "BC_HIT_CNT_H"},
+ {true, "UM_TBL_UC_HIT_CNT_L"},
+ {true, "UM_TBL_UC_HIT_CNT_H"},
+ {true, "UM_TBL_MC_HIT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_CNT_H"},
+
+ {true, "UM_TBL_VMDQ1_HIT_CNT_L"},
+ {true, "UM_TBL_VMDQ1_HIT_CNT_H"},
+ {true, "MTA_TBL_HIT_CNT_L"},
+ {true, "MTA_TBL_HIT_CNT_H"},
+ {true, "FWD_BONDING_HIT_CNT_L"},
+ {true, "FWD_BONDING_HIT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_CNT_H"},
+ {true, "GET_TUNL_PKT_CNT_L"},
+ {true, "GET_TUNL_PKT_CNT_H"},
+ {true, "GET_BMC_PKT_CNT_L"},
+ {true, "GET_BMC_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_H"},
+ {true, "PPP_MC_2BMC_PKT_CNT_L"},
+ {true, "PPP_MC_2BMC_PKT_CNT_H"},
+ {true, "VLAN_MIRR_CNT_L"},
+ {true, "VLAN_MIRR_CNT_H"},
+
+ {true, "IG_MIRR_CNT_L"},
+ {true, "IG_MIRR_CNT_H"},
+ {true, "EG_MIRR_CNT_L"},
+ {true, "EG_MIRR_CNT_H"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_L"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_H"},
+
+ {true, "LAN_PAIR_CNT_L"},
+ {true, "LAN_PAIR_CNT_H"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_H"},
+ {true, "MTA_TBL_HIT_PKT_CNT_L"},
+ {true, "MTA_TBL_HIT_PKT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_PKT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_PKT_CNT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
+ {false, "Reserved"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "FSM_DFX_ST2"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+ {true, "FIFO_DFX_ST5"},
+ {true, "FIFO_DFX_ST6"},
+ {true, "FIFO_DFX_ST7"},
+
+ {true, "FIFO_DFX_ST8"},
+ {true, "FIFO_DFX_ST9"},
+ {true, "FIFO_DFX_ST10"},
+ {true, "FIFO_DFX_ST11"},
+ {true, "Q_CREDIT_VLD_0"},
+ {true, "Q_CREDIT_VLD_1"},
+
+ {true, "Q_CREDIT_VLD_2"},
+ {true, "Q_CREDIT_VLD_3"},
+ {true, "Q_CREDIT_VLD_4"},
+ {true, "Q_CREDIT_VLD_5"},
+ {true, "Q_CREDIT_VLD_6"},
+ {true, "Q_CREDIT_VLD_7"},
+
+ {true, "Q_CREDIT_VLD_8"},
+ {true, "Q_CREDIT_VLD_9"},
+ {true, "Q_CREDIT_VLD_10"},
+ {true, "Q_CREDIT_VLD_11"},
+ {true, "Q_CREDIT_VLD_12"},
+ {true, "Q_CREDIT_VLD_13"},
+
+ {true, "Q_CREDIT_VLD_14"},
+ {true, "Q_CREDIT_VLD_15"},
+ {true, "Q_CREDIT_VLD_16"},
+ {true, "Q_CREDIT_VLD_17"},
+ {true, "Q_CREDIT_VLD_18"},
+ {true, "Q_CREDIT_VLD_19"},
+
+ {true, "Q_CREDIT_VLD_20"},
+ {true, "Q_CREDIT_VLD_21"},
+ {true, "Q_CREDIT_VLD_22"},
+ {true, "Q_CREDIT_VLD_23"},
+ {true, "Q_CREDIT_VLD_24"},
+ {true, "Q_CREDIT_VLD_25"},
+
+ {true, "Q_CREDIT_VLD_26"},
+ {true, "Q_CREDIT_VLD_27"},
+ {true, "Q_CREDIT_VLD_28"},
+ {true, "Q_CREDIT_VLD_29"},
+ {true, "Q_CREDIT_VLD_30"},
+ {true, "Q_CREDIT_VLD_31"},
+
+ {true, "GRO_BD_SERR_CNT"},
+ {true, "GRO_CONTEXT_SERR_CNT"},
+ {true, "RX_STASH_CFG_SERR_CNT"},
+ {true, "AXI_RD_FBD_SERR_CNT"},
+ {true, "GRO_BD_MERR_CNT"},
+ {true, "GRO_CONTEXT_MERR_CNT"},
+
+ {true, "RX_STASH_CFG_MERR_CNT"},
+ {true, "AXI_RD_FBD_MERR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
+ {true, "q_num"},
+ {true, "RCB_CFG_RX_RING_TAIL"},
+ {true, "RCB_CFG_RX_RING_HEAD"},
+ {true, "RCB_CFG_RX_RING_FBDNUM"},
+ {true, "RCB_CFG_RX_RING_OFFSET"},
+ {true, "RCB_CFG_RX_RING_FBDOFFSET"},
+
+ {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"},
+ {true, "RCB_CFG_TX_RING_TAIL"},
+ {true, "RCB_CFG_TX_RING_HEAD"},
+ {true, "RCB_CFG_TX_RING_FBDNUM"},
+ {true, "RCB_CFG_TX_RING_OFFSET"},
+ {true, "RCB_CFG_TX_RING_EBDNUM"},
+};
+
static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dfx_msg = &hclge_dbg_bios_common_reg[0],
@@ -161,10 +801,8 @@ static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset,
return 0;
}
-static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
- struct hclge_desc *desc_src,
- int index, int bd_num,
- enum hclge_opcode_type cmd)
+int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
+ int index, int bd_num, enum hclge_opcode_type cmd)
{
struct hclge_desc *desc = desc_src;
int ret, i;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
index 724052928b88..2b998cbed826 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -99,646 +99,6 @@ struct hclge_dbg_status_dfx_info {
char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
};
-static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
- {false, "Reserved"},
- {true, "BP_CPU_STATE"},
- {true, "DFX_MSIX_INFO_NIC_0"},
- {true, "DFX_MSIX_INFO_NIC_1"},
- {true, "DFX_MSIX_INFO_NIC_2"},
- {true, "DFX_MSIX_INFO_NIC_3"},
-
- {true, "DFX_MSIX_INFO_ROC_0"},
- {true, "DFX_MSIX_INFO_ROC_1"},
- {true, "DFX_MSIX_INFO_ROC_2"},
- {true, "DFX_MSIX_INFO_ROC_3"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
- {false, "Reserved"},
- {true, "SSU_ETS_PORT_STATUS"},
- {true, "SSU_ETS_TCG_STATUS"},
- {false, "Reserved"},
- {false, "Reserved"},
- {true, "SSU_BP_STATUS_0"},
-
- {true, "SSU_BP_STATUS_1"},
- {true, "SSU_BP_STATUS_2"},
- {true, "SSU_BP_STATUS_3"},
- {true, "SSU_BP_STATUS_4"},
- {true, "SSU_BP_STATUS_5"},
- {true, "SSU_MAC_TX_PFC_IND"},
-
- {true, "MAC_SSU_RX_PFC_IND"},
- {true, "BTMP_AGEING_ST_B0"},
- {true, "BTMP_AGEING_ST_B1"},
- {true, "BTMP_AGEING_ST_B2"},
- {false, "Reserved"},
- {false, "Reserved"},
-
- {true, "FULL_DROP_NUM"},
- {true, "PART_DROP_NUM"},
- {true, "PPP_KEY_DROP_NUM"},
- {true, "PPP_RLT_DROP_NUM"},
- {true, "LO_PRI_UNICAST_RLT_DROP_NUM"},
- {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"},
-
- {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"},
- {true, "NCSI_PACKET_CURR_BUFFER_CNT"},
- {true, "BTMP_AGEING_RLS_CNT_BANK0"},
- {true, "BTMP_AGEING_RLS_CNT_BANK1"},
- {true, "BTMP_AGEING_RLS_CNT_BANK2"},
- {true, "SSU_MB_RD_RLT_DROP_CNT"},
-
- {true, "SSU_PPP_MAC_KEY_NUM_L"},
- {true, "SSU_PPP_MAC_KEY_NUM_H"},
- {true, "SSU_PPP_HOST_KEY_NUM_L"},
- {true, "SSU_PPP_HOST_KEY_NUM_H"},
- {true, "PPP_SSU_MAC_RLT_NUM_L"},
- {true, "PPP_SSU_MAC_RLT_NUM_H"},
-
- {true, "PPP_SSU_HOST_RLT_NUM_L"},
- {true, "PPP_SSU_HOST_RLT_NUM_H"},
- {true, "NCSI_RX_PACKET_IN_CNT_L"},
- {true, "NCSI_RX_PACKET_IN_CNT_H"},
- {true, "NCSI_TX_PACKET_OUT_CNT_L"},
- {true, "NCSI_TX_PACKET_OUT_CNT_H"},
-
- {true, "SSU_KEY_DROP_NUM"},
- {true, "MB_UNCOPY_NUM"},
- {true, "RX_OQ_DROP_PKT_CNT"},
- {true, "TX_OQ_DROP_PKT_CNT"},
- {true, "BANK_UNBALANCE_DROP_CNT"},
- {true, "BANK_UNBALANCE_RX_DROP_CNT"},
-
- {true, "NIC_L2_ERR_DROP_PKT_CNT"},
- {true, "ROC_L2_ERR_DROP_PKT_CNT"},
- {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"},
- {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"},
- {true, "RX_OQ_GLB_DROP_PKT_CNT"},
- {false, "Reserved"},
-
- {true, "LO_PRI_UNICAST_CUR_CNT"},
- {true, "HI_PRI_MULTICAST_CUR_CNT"},
- {true, "LO_PRI_MULTICAST_CUR_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
- {true, "prt_id"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_0"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_1"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_2"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_3"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_4"},
-
- {true, "PACKET_TC_CURR_BUFFER_CNT_5"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_6"},
- {true, "PACKET_TC_CURR_BUFFER_CNT_7"},
- {true, "PACKET_CURR_BUFFER_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
-
- {true, "RX_PACKET_IN_CNT_L"},
- {true, "RX_PACKET_IN_CNT_H"},
- {true, "RX_PACKET_OUT_CNT_L"},
- {true, "RX_PACKET_OUT_CNT_H"},
- {true, "TX_PACKET_IN_CNT_L"},
- {true, "TX_PACKET_IN_CNT_H"},
-
- {true, "TX_PACKET_OUT_CNT_L"},
- {true, "TX_PACKET_OUT_CNT_H"},
- {true, "ROC_RX_PACKET_IN_CNT_L"},
- {true, "ROC_RX_PACKET_IN_CNT_H"},
- {true, "ROC_TX_PACKET_OUT_CNT_L"},
- {true, "ROC_TX_PACKET_OUT_CNT_H"},
-
- {true, "RX_PACKET_TC_IN_CNT_0_L"},
- {true, "RX_PACKET_TC_IN_CNT_0_H"},
- {true, "RX_PACKET_TC_IN_CNT_1_L"},
- {true, "RX_PACKET_TC_IN_CNT_1_H"},
- {true, "RX_PACKET_TC_IN_CNT_2_L"},
- {true, "RX_PACKET_TC_IN_CNT_2_H"},
-
- {true, "RX_PACKET_TC_IN_CNT_3_L"},
- {true, "RX_PACKET_TC_IN_CNT_3_H"},
- {true, "RX_PACKET_TC_IN_CNT_4_L"},
- {true, "RX_PACKET_TC_IN_CNT_4_H"},
- {true, "RX_PACKET_TC_IN_CNT_5_L"},
- {true, "RX_PACKET_TC_IN_CNT_5_H"},
-
- {true, "RX_PACKET_TC_IN_CNT_6_L"},
- {true, "RX_PACKET_TC_IN_CNT_6_H"},
- {true, "RX_PACKET_TC_IN_CNT_7_L"},
- {true, "RX_PACKET_TC_IN_CNT_7_H"},
- {true, "RX_PACKET_TC_OUT_CNT_0_L"},
- {true, "RX_PACKET_TC_OUT_CNT_0_H"},
-
- {true, "RX_PACKET_TC_OUT_CNT_1_L"},
- {true, "RX_PACKET_TC_OUT_CNT_1_H"},
- {true, "RX_PACKET_TC_OUT_CNT_2_L"},
- {true, "RX_PACKET_TC_OUT_CNT_2_H"},
- {true, "RX_PACKET_TC_OUT_CNT_3_L"},
- {true, "RX_PACKET_TC_OUT_CNT_3_H"},
-
- {true, "RX_PACKET_TC_OUT_CNT_4_L"},
- {true, "RX_PACKET_TC_OUT_CNT_4_H"},
- {true, "RX_PACKET_TC_OUT_CNT_5_L"},
- {true, "RX_PACKET_TC_OUT_CNT_5_H"},
- {true, "RX_PACKET_TC_OUT_CNT_6_L"},
- {true, "RX_PACKET_TC_OUT_CNT_6_H"},
-
- {true, "RX_PACKET_TC_OUT_CNT_7_L"},
- {true, "RX_PACKET_TC_OUT_CNT_7_H"},
- {true, "TX_PACKET_TC_IN_CNT_0_L"},
- {true, "TX_PACKET_TC_IN_CNT_0_H"},
- {true, "TX_PACKET_TC_IN_CNT_1_L"},
- {true, "TX_PACKET_TC_IN_CNT_1_H"},
-
- {true, "TX_PACKET_TC_IN_CNT_2_L"},
- {true, "TX_PACKET_TC_IN_CNT_2_H"},
- {true, "TX_PACKET_TC_IN_CNT_3_L"},
- {true, "TX_PACKET_TC_IN_CNT_3_H"},
- {true, "TX_PACKET_TC_IN_CNT_4_L"},
- {true, "TX_PACKET_TC_IN_CNT_4_H"},
-
- {true, "TX_PACKET_TC_IN_CNT_5_L"},
- {true, "TX_PACKET_TC_IN_CNT_5_H"},
- {true, "TX_PACKET_TC_IN_CNT_6_L"},
- {true, "TX_PACKET_TC_IN_CNT_6_H"},
- {true, "TX_PACKET_TC_IN_CNT_7_L"},
- {true, "TX_PACKET_TC_IN_CNT_7_H"},
-
- {true, "TX_PACKET_TC_OUT_CNT_0_L"},
- {true, "TX_PACKET_TC_OUT_CNT_0_H"},
- {true, "TX_PACKET_TC_OUT_CNT_1_L"},
- {true, "TX_PACKET_TC_OUT_CNT_1_H"},
- {true, "TX_PACKET_TC_OUT_CNT_2_L"},
- {true, "TX_PACKET_TC_OUT_CNT_2_H"},
-
- {true, "TX_PACKET_TC_OUT_CNT_3_L"},
- {true, "TX_PACKET_TC_OUT_CNT_3_H"},
- {true, "TX_PACKET_TC_OUT_CNT_4_L"},
- {true, "TX_PACKET_TC_OUT_CNT_4_H"},
- {true, "TX_PACKET_TC_OUT_CNT_5_L"},
- {true, "TX_PACKET_TC_OUT_CNT_5_H"},
-
- {true, "TX_PACKET_TC_OUT_CNT_6_L"},
- {true, "TX_PACKET_TC_OUT_CNT_6_H"},
- {true, "TX_PACKET_TC_OUT_CNT_7_L"},
- {true, "TX_PACKET_TC_OUT_CNT_7_H"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
- {true, "OQ_INDEX"},
- {true, "QUEUE_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
- {true, "prt_id"},
- {true, "IGU_RX_ERR_PKT"},
- {true, "IGU_RX_NO_SOF_PKT"},
- {true, "EGU_TX_1588_SHORT_PKT"},
- {true, "EGU_TX_1588_PKT"},
- {true, "EGU_TX_ERR_PKT"},
-
- {true, "IGU_RX_OUT_L2_PKT"},
- {true, "IGU_RX_OUT_L3_PKT"},
- {true, "IGU_RX_OUT_L4_PKT"},
- {true, "IGU_RX_IN_L2_PKT"},
- {true, "IGU_RX_IN_L3_PKT"},
- {true, "IGU_RX_IN_L4_PKT"},
-
- {true, "IGU_RX_EL3E_PKT"},
- {true, "IGU_RX_EL4E_PKT"},
- {true, "IGU_RX_L3E_PKT"},
- {true, "IGU_RX_L4E_PKT"},
- {true, "IGU_RX_ROCEE_PKT"},
- {true, "IGU_RX_OUT_UDP0_PKT"},
-
- {true, "IGU_RX_IN_UDP0_PKT"},
- {true, "IGU_MC_CAR_DROP_PKT_L"},
- {true, "IGU_MC_CAR_DROP_PKT_H"},
- {true, "IGU_BC_CAR_DROP_PKT_L"},
- {true, "IGU_BC_CAR_DROP_PKT_H"},
- {false, "Reserved"},
-
- {true, "IGU_RX_OVERSIZE_PKT_L"},
- {true, "IGU_RX_OVERSIZE_PKT_H"},
- {true, "IGU_RX_UNDERSIZE_PKT_L"},
- {true, "IGU_RX_UNDERSIZE_PKT_H"},
- {true, "IGU_RX_OUT_ALL_PKT_L"},
- {true, "IGU_RX_OUT_ALL_PKT_H"},
-
- {true, "IGU_TX_OUT_ALL_PKT_L"},
- {true, "IGU_TX_OUT_ALL_PKT_H"},
- {true, "IGU_RX_UNI_PKT_L"},
- {true, "IGU_RX_UNI_PKT_H"},
- {true, "IGU_RX_MULTI_PKT_L"},
- {true, "IGU_RX_MULTI_PKT_H"},
-
- {true, "IGU_RX_BROAD_PKT_L"},
- {true, "IGU_RX_BROAD_PKT_H"},
- {true, "EGU_TX_OUT_ALL_PKT_L"},
- {true, "EGU_TX_OUT_ALL_PKT_H"},
- {true, "EGU_TX_UNI_PKT_L"},
- {true, "EGU_TX_UNI_PKT_H"},
-
- {true, "EGU_TX_MULTI_PKT_L"},
- {true, "EGU_TX_MULTI_PKT_H"},
- {true, "EGU_TX_BROAD_PKT_L"},
- {true, "EGU_TX_BROAD_PKT_H"},
- {true, "IGU_TX_KEY_NUM_L"},
- {true, "IGU_TX_KEY_NUM_H"},
-
- {true, "IGU_RX_NON_TUN_PKT_L"},
- {true, "IGU_RX_NON_TUN_PKT_H"},
- {true, "IGU_RX_TUN_PKT_L"},
- {true, "IGU_RX_TUN_PKT_H"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
- {true, "tc_queue_num"},
- {true, "FSM_DFX_ST0"},
- {true, "FSM_DFX_ST1"},
- {true, "RPU_RX_PKT_DROP_CNT"},
- {true, "BUF_WAIT_TIMEOUT"},
- {true, "BUF_WAIT_TIMEOUT_QID"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
- {false, "Reserved"},
- {true, "FIFO_DFX_ST0"},
- {true, "FIFO_DFX_ST1"},
- {true, "FIFO_DFX_ST2"},
- {true, "FIFO_DFX_ST3"},
- {true, "FIFO_DFX_ST4"},
-
- {true, "FIFO_DFX_ST5"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
- {false, "Reserved"},
- {true, "NCSI_EGU_TX_FIFO_STS"},
- {true, "NCSI_PAUSE_STATUS"},
- {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"},
- {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"},
- {true, "NCSI_RX_CTRL_CKS_ERR_CNT"},
-
- {true, "NCSI_RX_CTRL_PKT_CNT"},
- {true, "NCSI_RX_PT_DMAC_ERR_CNT"},
- {true, "NCSI_RX_PT_SMAC_ERR_CNT"},
- {true, "NCSI_RX_PT_PKT_CNT"},
- {true, "NCSI_RX_FCS_ERR_CNT"},
- {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"},
-
- {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"},
- {true, "NCSI_TX_CTRL_PKT_CNT"},
- {true, "NCSI_TX_PT_DMAC_ERR_CNT"},
- {true, "NCSI_TX_PT_SMAC_ERR_CNT"},
- {true, "NCSI_TX_PT_PKT_CNT"},
- {true, "NCSI_TX_PT_PKT_TRUNC_CNT"},
-
- {true, "NCSI_TX_PT_PKT_ERR_CNT"},
- {true, "NCSI_TX_CTRL_PKT_ERR_CNT"},
- {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"},
- {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
-
- {true, "NCSI_MAC_RX_OCTETS_OK"},
- {true, "NCSI_MAC_RX_OCTETS_BAD"},
- {true, "NCSI_MAC_RX_UC_PKTS"},
- {true, "NCSI_MAC_RX_MC_PKTS"},
- {true, "NCSI_MAC_RX_BC_PKTS"},
- {true, "NCSI_MAC_RX_PKTS_64OCTETS"},
-
- {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"},
- {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"},
-
- {true, "NCSI_MAC_RX_FCS_ERRORS"},
- {true, "NCSI_MAC_RX_LONG_ERRORS"},
- {true, "NCSI_MAC_RX_JABBER_ERRORS"},
- {true, "NCSI_MAC_RX_RUNT_ERR_CNT"},
- {true, "NCSI_MAC_RX_SHORT_ERR_CNT"},
- {true, "NCSI_MAC_RX_FILT_PKT_CNT"},
-
- {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"},
- {true, "NCSI_MAC_TX_OCTETS_OK"},
- {true, "NCSI_MAC_TX_OCTETS_BAD"},
- {true, "NCSI_MAC_TX_UC_PKTS"},
- {true, "NCSI_MAC_TX_MC_PKTS"},
- {true, "NCSI_MAC_TX_BC_PKTS"},
-
- {true, "NCSI_MAC_TX_PKTS_64OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"},
- {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"},
-
- {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"},
- {true, "NCSI_MAC_TX_UNDERRUN"},
- {true, "NCSI_MAC_TX_CRC_ERROR"},
- {true, "NCSI_MAC_TX_PAUSE_FRAMES"},
- {true, "NCSI_MAC_RX_PAD_PKTS"},
- {true, "NCSI_MAC_RX_PAUSE_FRAMES"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
- {false, "Reserved"},
- {true, "LGE_IGU_AFIFO_DFX_0"},
- {true, "LGE_IGU_AFIFO_DFX_1"},
- {true, "LGE_IGU_AFIFO_DFX_2"},
- {true, "LGE_IGU_AFIFO_DFX_3"},
- {true, "LGE_IGU_AFIFO_DFX_4"},
-
- {true, "LGE_IGU_AFIFO_DFX_5"},
- {true, "LGE_IGU_AFIFO_DFX_6"},
- {true, "LGE_IGU_AFIFO_DFX_7"},
- {true, "LGE_EGU_AFIFO_DFX_0"},
- {true, "LGE_EGU_AFIFO_DFX_1"},
- {true, "LGE_EGU_AFIFO_DFX_2"},
-
- {true, "LGE_EGU_AFIFO_DFX_3"},
- {true, "LGE_EGU_AFIFO_DFX_4"},
- {true, "LGE_EGU_AFIFO_DFX_5"},
- {true, "LGE_EGU_AFIFO_DFX_6"},
- {true, "LGE_EGU_AFIFO_DFX_7"},
- {true, "CGE_IGU_AFIFO_DFX_0"},
-
- {true, "CGE_IGU_AFIFO_DFX_1"},
- {true, "CGE_EGU_AFIFO_DFX_0"},
- {true, "CGE_EGU_AFIFO_DFX_1"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
- {false, "Reserved"},
- {true, "DROP_FROM_PRT_PKT_CNT"},
- {true, "DROP_FROM_HOST_PKT_CNT"},
- {true, "DROP_TX_VLAN_PROC_CNT"},
- {true, "DROP_MNG_CNT"},
- {true, "DROP_FD_CNT"},
-
- {true, "DROP_NO_DST_CNT"},
- {true, "DROP_MC_MBID_FULL_CNT"},
- {true, "DROP_SC_FILTERED"},
- {true, "PPP_MC_DROP_PKT_CNT"},
- {true, "DROP_PT_CNT"},
- {true, "DROP_MAC_ANTI_SPOOF_CNT"},
-
- {true, "DROP_IG_VFV_CNT"},
- {true, "DROP_IG_PRTV_CNT"},
- {true, "DROP_CNM_PFC_PAUSE_CNT"},
- {true, "DROP_TORUS_TC_CNT"},
- {true, "DROP_TORUS_LPBK_CNT"},
- {true, "PPP_HFS_STS"},
-
- {true, "PPP_MC_RSLT_STS"},
- {true, "PPP_P3U_STS"},
- {true, "PPP_RSLT_DESCR_STS"},
- {true, "PPP_UMV_STS_0"},
- {true, "PPP_UMV_STS_1"},
- {true, "PPP_VFV_STS"},
-
- {true, "PPP_GRO_KEY_CNT"},
- {true, "PPP_GRO_INFO_CNT"},
- {true, "PPP_GRO_DROP_CNT"},
- {true, "PPP_GRO_OUT_CNT"},
- {true, "PPP_GRO_KEY_MATCH_DATA_CNT"},
- {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"},
-
- {true, "PPP_GRO_INFO_MATCH_CNT"},
- {true, "PPP_GRO_FREE_ENTRY_CNT"},
- {true, "PPP_GRO_INNER_DFX_SIGNAL"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-
- {true, "GET_RX_PKT_CNT_L"},
- {true, "GET_RX_PKT_CNT_H"},
- {true, "GET_TX_PKT_CNT_L"},
- {true, "GET_TX_PKT_CNT_H"},
- {true, "SEND_UC_PRT2HOST_PKT_CNT_L"},
- {true, "SEND_UC_PRT2HOST_PKT_CNT_H"},
-
- {true, "SEND_UC_PRT2PRT_PKT_CNT_L"},
- {true, "SEND_UC_PRT2PRT_PKT_CNT_H"},
- {true, "SEND_UC_HOST2HOST_PKT_CNT_L"},
- {true, "SEND_UC_HOST2HOST_PKT_CNT_H"},
- {true, "SEND_UC_HOST2PRT_PKT_CNT_L"},
- {true, "SEND_UC_HOST2PRT_PKT_CNT_H"},
-
- {true, "SEND_MC_FROM_PRT_CNT_L"},
- {true, "SEND_MC_FROM_PRT_CNT_H"},
- {true, "SEND_MC_FROM_HOST_CNT_L"},
- {true, "SEND_MC_FROM_HOST_CNT_H"},
- {true, "SSU_MC_RD_CNT_L"},
- {true, "SSU_MC_RD_CNT_H"},
-
- {true, "SSU_MC_DROP_CNT_L"},
- {true, "SSU_MC_DROP_CNT_H"},
- {true, "SSU_MC_RD_PKT_CNT_L"},
- {true, "SSU_MC_RD_PKT_CNT_H"},
- {true, "PPP_MC_2HOST_PKT_CNT_L"},
- {true, "PPP_MC_2HOST_PKT_CNT_H"},
-
- {true, "PPP_MC_2PRT_PKT_CNT_L"},
- {true, "PPP_MC_2PRT_PKT_CNT_H"},
- {true, "NTSNOS_PKT_CNT_L"},
- {true, "NTSNOS_PKT_CNT_H"},
- {true, "NTUP_PKT_CNT_L"},
- {true, "NTUP_PKT_CNT_H"},
-
- {true, "NTLCL_PKT_CNT_L"},
- {true, "NTLCL_PKT_CNT_H"},
- {true, "NTTGT_PKT_CNT_L"},
- {true, "NTTGT_PKT_CNT_H"},
- {true, "RTNS_PKT_CNT_L"},
- {true, "RTNS_PKT_CNT_H"},
-
- {true, "RTLPBK_PKT_CNT_L"},
- {true, "RTLPBK_PKT_CNT_H"},
- {true, "NR_PKT_CNT_L"},
- {true, "NR_PKT_CNT_H"},
- {true, "RR_PKT_CNT_L"},
- {true, "RR_PKT_CNT_H"},
-
- {true, "MNG_TBL_HIT_CNT_L"},
- {true, "MNG_TBL_HIT_CNT_H"},
- {true, "FD_TBL_HIT_CNT_L"},
- {true, "FD_TBL_HIT_CNT_H"},
- {true, "FD_LKUP_CNT_L"},
- {true, "FD_LKUP_CNT_H"},
-
- {true, "BC_HIT_CNT_L"},
- {true, "BC_HIT_CNT_H"},
- {true, "UM_TBL_UC_HIT_CNT_L"},
- {true, "UM_TBL_UC_HIT_CNT_H"},
- {true, "UM_TBL_MC_HIT_CNT_L"},
- {true, "UM_TBL_MC_HIT_CNT_H"},
-
- {true, "UM_TBL_VMDQ1_HIT_CNT_L"},
- {true, "UM_TBL_VMDQ1_HIT_CNT_H"},
- {true, "MTA_TBL_HIT_CNT_L"},
- {true, "MTA_TBL_HIT_CNT_H"},
- {true, "FWD_BONDING_HIT_CNT_L"},
- {true, "FWD_BONDING_HIT_CNT_H"},
-
- {true, "PROMIS_TBL_HIT_CNT_L"},
- {true, "PROMIS_TBL_HIT_CNT_H"},
- {true, "GET_TUNL_PKT_CNT_L"},
- {true, "GET_TUNL_PKT_CNT_H"},
- {true, "GET_BMC_PKT_CNT_L"},
- {true, "GET_BMC_PKT_CNT_H"},
-
- {true, "SEND_UC_PRT2BMC_PKT_CNT_L"},
- {true, "SEND_UC_PRT2BMC_PKT_CNT_H"},
- {true, "SEND_UC_HOST2BMC_PKT_CNT_L"},
- {true, "SEND_UC_HOST2BMC_PKT_CNT_H"},
- {true, "SEND_UC_BMC2HOST_PKT_CNT_L"},
- {true, "SEND_UC_BMC2HOST_PKT_CNT_H"},
-
- {true, "SEND_UC_BMC2PRT_PKT_CNT_L"},
- {true, "SEND_UC_BMC2PRT_PKT_CNT_H"},
- {true, "PPP_MC_2BMC_PKT_CNT_L"},
- {true, "PPP_MC_2BMC_PKT_CNT_H"},
- {true, "VLAN_MIRR_CNT_L"},
- {true, "VLAN_MIRR_CNT_H"},
-
- {true, "IG_MIRR_CNT_L"},
- {true, "IG_MIRR_CNT_H"},
- {true, "EG_MIRR_CNT_L"},
- {true, "EG_MIRR_CNT_H"},
- {true, "RX_DEFAULT_HOST_HIT_CNT_L"},
- {true, "RX_DEFAULT_HOST_HIT_CNT_H"},
-
- {true, "LAN_PAIR_CNT_L"},
- {true, "LAN_PAIR_CNT_H"},
- {true, "UM_TBL_MC_HIT_PKT_CNT_L"},
- {true, "UM_TBL_MC_HIT_PKT_CNT_H"},
- {true, "MTA_TBL_HIT_PKT_CNT_L"},
- {true, "MTA_TBL_HIT_PKT_CNT_H"},
-
- {true, "PROMIS_TBL_HIT_PKT_CNT_L"},
- {true, "PROMIS_TBL_HIT_PKT_CNT_H"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
- {false, "Reserved"},
- {true, "FSM_DFX_ST0"},
- {true, "FSM_DFX_ST1"},
- {true, "FSM_DFX_ST2"},
- {true, "FIFO_DFX_ST0"},
- {true, "FIFO_DFX_ST1"},
-
- {true, "FIFO_DFX_ST2"},
- {true, "FIFO_DFX_ST3"},
- {true, "FIFO_DFX_ST4"},
- {true, "FIFO_DFX_ST5"},
- {true, "FIFO_DFX_ST6"},
- {true, "FIFO_DFX_ST7"},
-
- {true, "FIFO_DFX_ST8"},
- {true, "FIFO_DFX_ST9"},
- {true, "FIFO_DFX_ST10"},
- {true, "FIFO_DFX_ST11"},
- {true, "Q_CREDIT_VLD_0"},
- {true, "Q_CREDIT_VLD_1"},
-
- {true, "Q_CREDIT_VLD_2"},
- {true, "Q_CREDIT_VLD_3"},
- {true, "Q_CREDIT_VLD_4"},
- {true, "Q_CREDIT_VLD_5"},
- {true, "Q_CREDIT_VLD_6"},
- {true, "Q_CREDIT_VLD_7"},
-
- {true, "Q_CREDIT_VLD_8"},
- {true, "Q_CREDIT_VLD_9"},
- {true, "Q_CREDIT_VLD_10"},
- {true, "Q_CREDIT_VLD_11"},
- {true, "Q_CREDIT_VLD_12"},
- {true, "Q_CREDIT_VLD_13"},
-
- {true, "Q_CREDIT_VLD_14"},
- {true, "Q_CREDIT_VLD_15"},
- {true, "Q_CREDIT_VLD_16"},
- {true, "Q_CREDIT_VLD_17"},
- {true, "Q_CREDIT_VLD_18"},
- {true, "Q_CREDIT_VLD_19"},
-
- {true, "Q_CREDIT_VLD_20"},
- {true, "Q_CREDIT_VLD_21"},
- {true, "Q_CREDIT_VLD_22"},
- {true, "Q_CREDIT_VLD_23"},
- {true, "Q_CREDIT_VLD_24"},
- {true, "Q_CREDIT_VLD_25"},
-
- {true, "Q_CREDIT_VLD_26"},
- {true, "Q_CREDIT_VLD_27"},
- {true, "Q_CREDIT_VLD_28"},
- {true, "Q_CREDIT_VLD_29"},
- {true, "Q_CREDIT_VLD_30"},
- {true, "Q_CREDIT_VLD_31"},
-
- {true, "GRO_BD_SERR_CNT"},
- {true, "GRO_CONTEXT_SERR_CNT"},
- {true, "RX_STASH_CFG_SERR_CNT"},
- {true, "AXI_RD_FBD_SERR_CNT"},
- {true, "GRO_BD_MERR_CNT"},
- {true, "GRO_CONTEXT_MERR_CNT"},
-
- {true, "RX_STASH_CFG_MERR_CNT"},
- {true, "AXI_RD_FBD_MERR_CNT"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
- {false, "Reserved"},
-};
-
-static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
- {true, "q_num"},
- {true, "RCB_CFG_RX_RING_TAIL"},
- {true, "RCB_CFG_RX_RING_HEAD"},
- {true, "RCB_CFG_RX_RING_FBDNUM"},
- {true, "RCB_CFG_RX_RING_OFFSET"},
- {true, "RCB_CFG_RX_RING_FBDOFFSET"},
-
- {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"},
- {true, "RCB_CFG_TX_RING_TAIL"},
- {true, "RCB_CFG_TX_RING_HEAD"},
- {true, "RCB_CFG_TX_RING_FBDNUM"},
- {true, "RCB_CFG_TX_RING_OFFSET"},
- {true, "RCB_CFG_TX_RING_EBDNUM"},
-};
-
#define HCLGE_DBG_INFO_LEN 256
#define HCLGE_DBG_VLAN_FLTR_INFO_LEN 256
#define HCLGE_DBG_VLAN_OFFLOAD_INFO_LEN 512
@@ -771,4 +131,7 @@ struct hclge_dbg_vlan_cfg {
u8 pri_only2;
};
+int hclge_dbg_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc_src,
+ int index, int bd_num, enum hclge_opcode_type cmd);
+
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
index 9a939c0b217f..a1571c108678 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -5,6 +5,34 @@
#include "hclge_devlink.h"
+static int hclge_devlink_scc_info_get(struct devlink *devlink,
+ struct devlink_info_req *req)
+{
+ struct hclge_devlink_priv *priv = devlink_priv(devlink);
+ char scc_version[HCLGE_DEVLINK_FW_SCC_LEN];
+ struct hclge_dev *hdev = priv->hdev;
+ u32 scc_version_tmp;
+ int ret;
+
+ ret = hclge_query_scc_version(hdev, &scc_version_tmp);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get scc version, ret = %d\n", ret);
+ return ret;
+ }
+
+ snprintf(scc_version, sizeof(scc_version), "%lu.%lu.%lu.%lu",
+ hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(scc_version_tmp, HNAE3_SCC_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+ return devlink_info_version_running_put(req, "fw.scc", scc_version);
+}
+
static int hclge_devlink_info_get(struct devlink *devlink,
struct devlink_info_req *req,
struct netlink_ext_ack *extack)
@@ -13,6 +41,7 @@ static int hclge_devlink_info_get(struct devlink *devlink,
struct hclge_devlink_priv *priv = devlink_priv(devlink);
char version_str[HCLGE_DEVLINK_FW_STRING_LEN];
struct hclge_dev *hdev = priv->hdev;
+ int ret;
snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu",
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
@@ -24,9 +53,18 @@ static int hclge_devlink_info_get(struct devlink *devlink,
hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
HNAE3_FW_VERSION_BYTE0_SHIFT));
- return devlink_info_version_running_put(req,
- DEVLINK_INFO_VERSION_GENERIC_FW,
- version_str);
+ ret = devlink_info_version_running_put(req,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ version_str);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to set running version of fw\n");
+ return ret;
+ }
+
+ if (hdev->pdev->revision > HNAE3_DEVICE_VERSION_V2)
+ ret = hclge_devlink_scc_info_get(devlink, req);
+
+ return ret;
}
static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
index 918be04507a5..148effa5ea89 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h
@@ -6,6 +6,8 @@
#include "hclge_main.h"
+#define HCLGE_DEVLINK_FW_SCC_LEN 32
+
struct hclge_devlink_priv {
struct hclge_dev *hdev;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index d63e114f93d0..cc7f46c0b35f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1198,6 +1198,425 @@ static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
}
};
+static const struct hclge_mod_reg_info hclge_ssu_reg_0_info[] = {
+ {
+ .reg_name = "SSU_BP_STATUS_0~5",
+ .reg_offset_group = { 5, 6, 7, 8, 9, 10},
+ .group_size = 6
+ }, {
+ .reg_name = "LO_PRI_UNICAST_CUR_CNT",
+ .reg_offset_group = {54},
+ .group_size = 1
+ }, {
+ .reg_name = "HI/LO_PRI_MULTICAST_CUR_CNT",
+ .reg_offset_group = {55, 56},
+ .group_size = 2
+ }, {
+ .reg_name = "SSU_MB_RD_RLT_DROP_CNT",
+ .reg_offset_group = {29},
+ .group_size = 1
+ }, {
+ .reg_name = "SSU_PPP_MAC_KEY_NUM",
+ .reg_offset_group = {31, 30},
+ .group_size = 2
+ }, {
+ .reg_name = "SSU_PPP_HOST_KEY_NUM",
+ .reg_offset_group = {33, 32},
+ .group_size = 2
+ }, {
+ .reg_name = "PPP_SSU_MAC/HOST_RLT_NUM",
+ .reg_offset_group = {35, 34, 37, 36},
+ .group_size = 4
+ }, {
+ .reg_name = "FULL/PART_DROP_NUM",
+ .reg_offset_group = {18, 19},
+ .group_size = 2
+ }, {
+ .reg_name = "PPP_KEY/RLT_DROP_NUM",
+ .reg_offset_group = {20, 21},
+ .group_size = 2
+ }, {
+ .reg_name = "NIC/ROC_L2_ERR_DROP_PKT_CNT",
+ .reg_offset_group = {48, 49},
+ .group_size = 2
+ }, {
+ .reg_name = "NIC/ROC_L2_ERR_DROP_PKT_CNT_RX",
+ .reg_offset_group = {50, 51},
+ .group_size = 2
+ },
+};
+
+static const struct hclge_mod_reg_info hclge_ssu_reg_1_info[] = {
+ {
+ .reg_name = "RX_PACKET_IN/OUT_CNT",
+ .reg_offset_group = {13, 12, 15, 14},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_IN/OUT_CNT",
+ .reg_offset_group = {17, 16, 19, 18},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC0_IN/OUT_CNT",
+ .reg_offset_group = {25, 24, 41, 40},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC1_IN/OUT_CNT",
+ .reg_offset_group = {27, 26, 43, 42},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC2_IN/OUT_CNT",
+ .reg_offset_group = {29, 28, 45, 44},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC3_IN/OUT_CNT",
+ .reg_offset_group = {31, 30, 47, 46},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC4_IN/OUT_CNT",
+ .reg_offset_group = {33, 32, 49, 48},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC5_IN/OUT_CNT",
+ .reg_offset_group = {35, 34, 51, 50},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC6_IN/OUT_CNT",
+ .reg_offset_group = {37, 36, 53, 52},
+ .group_size = 4
+ }, {
+ .reg_name = "RX_PACKET_TC7_IN/OUT_CNT",
+ .reg_offset_group = {39, 38, 55, 54},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC0_IN/OUT_CNT",
+ .reg_offset_group = {57, 56, 73, 72},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC1_IN/OUT_CNT",
+ .reg_offset_group = {59, 58, 75, 74},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC2_IN/OUT_CNT",
+ .reg_offset_group = {61, 60, 77, 76},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC3_IN/OUT_CNT",
+ .reg_offset_group = {63, 62, 79, 78},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC4_IN/OUT_CNT",
+ .reg_offset_group = {65, 64, 81, 80},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC5_IN/OUT_CNT",
+ .reg_offset_group = {67, 66, 83, 82},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC6_IN/OUT_CNT",
+ .reg_offset_group = {69, 68, 85, 84},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PACKET_TC7_IN/OUT_CNT",
+ .reg_offset_group = {71, 70, 87, 86},
+ .group_size = 4
+ }, {
+ .reg_name = "PACKET_TC0~3_CURR_BUFFER_CNT",
+ .reg_offset_group = {1, 2, 3, 4},
+ .group_size = 4
+ }, {
+ .reg_name = "PACKET_TC4~7_CURR_BUFFER_CNT",
+ .reg_offset_group = {5, 6, 7, 8},
+ .group_size = 4
+ }, {
+ .reg_name = "ROC_RX_PACKET_IN_CNT",
+ .reg_offset_group = {21, 20},
+ .group_size = 2
+ }, {
+ .reg_name = "ROC_TX_PACKET_OUT_CNT",
+ .reg_offset_group = {23, 22},
+ .group_size = 2
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_rpu_reg_0_info[] = {
+ {
+ .reg_name = "RPU_FSM_DFX_ST0/ST1_TNL",
+ .has_suffix = true,
+ .reg_offset_group = {1, 2},
+ .group_size = 2
+ }, {
+ .reg_name = "RPU_RX_PKT_DROP_CNT_TNL",
+ .has_suffix = true,
+ .reg_offset_group = {3},
+ .group_size = 1
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_rpu_reg_1_info[] = {
+ {
+ .reg_name = "FIFO_DFX_ST0_1_2_4",
+ .reg_offset_group = {1, 2, 3, 5},
+ .group_size = 4
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_igu_egu_reg_info[] = {
+ {
+ .reg_name = "IGU_RX_ERR_PKT",
+ .reg_offset_group = {1},
+ .group_size = 1
+ }, {
+ .reg_name = "IGU_RX_OUT_ALL_PKT",
+ .reg_offset_group = {29, 28},
+ .group_size = 2
+ }, {
+ .reg_name = "EGU_TX_OUT_ALL_PKT",
+ .reg_offset_group = {39, 38},
+ .group_size = 2
+ }, {
+ .reg_name = "EGU_TX_ERR_PKT",
+ .reg_offset_group = {5},
+ .group_size = 1
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_gen_reg_info_tnl[] = {
+ {
+ .reg_name = "SSU2RPU_TNL_WR_PKT_CNT_TNL",
+ .has_suffix = true,
+ .reg_offset_group = {1},
+ .group_size = 1
+ }, {
+ .reg_name = "RPU2HST_TNL_WR_PKT_CNT_TNL",
+ .has_suffix = true,
+ .reg_offset_group = {12},
+ .group_size = 1
+ }
+};
+
+static const struct hclge_mod_reg_info hclge_gen_reg_info[] = {
+ {
+ .reg_name = "SSU_OVERSIZE_DROP_CNT",
+ .reg_offset_group = {12},
+ .group_size = 1
+ }, {
+ .reg_name = "ROCE_RX_BYPASS_5NS_DROP_NUM",
+ .reg_offset_group = {13},
+ .group_size = 1
+ }, {
+ .reg_name = "RX_PKT_IN/OUT_ERR_CNT",
+ .reg_offset_group = {15, 14, 19, 18},
+ .group_size = 4
+ }, {
+ .reg_name = "TX_PKT_IN/OUT_ERR_CNT",
+ .reg_offset_group = {17, 16, 21, 20},
+ .group_size = 4
+ }, {
+ .reg_name = "ETS_TC_READY",
+ .reg_offset_group = {22},
+ .group_size = 1
+ }, {
+ .reg_name = "MIB_TX/RX_BAD_PKTS",
+ .reg_offset_group = {19, 18, 29, 28},
+ .group_size = 4
+ }, {
+ .reg_name = "MIB_TX/RX_GOOD_PKTS",
+ .reg_offset_group = {21, 20, 31, 30},
+ .group_size = 4
+ }, {
+ .reg_name = "MIB_TX/RX_TOTAL_PKTS",
+ .reg_offset_group = {23, 22, 33, 32},
+ .group_size = 4
+ }, {
+ .reg_name = "MIB_TX/RX_PAUSE_PKTS",
+ .reg_offset_group = {25, 24, 35, 34},
+ .group_size = 4
+ }, {
+ .reg_name = "MIB_TX_ERR_ALL_PKTS",
+ .reg_offset_group = {27, 26},
+ .group_size = 2
+ }, {
+ .reg_name = "MIB_RX_FCS_ERR_PKTS",
+ .reg_offset_group = {37, 36},
+ .group_size = 2
+ }, {
+ .reg_name = "IGU_EGU_AUTO_GATE_EN",
+ .reg_offset_group = {42},
+ .group_size = 1
+ }, {
+ .reg_name = "IGU_EGU_INT_SRC",
+ .reg_offset_group = {43},
+ .group_size = 1
+ }, {
+ .reg_name = "EGU_READY_NUM_CFG",
+ .reg_offset_group = {44},
+ .group_size = 1
+ }, {
+ .reg_name = "IGU_EGU_TNL_DFX",
+ .reg_offset_group = {45},
+ .group_size = 1
+ }, {
+ .reg_name = "TX_TNL_NOTE_PKT",
+ .reg_offset_group = {46},
+ .group_size = 1
+ }
+};
+
+static const struct hclge_mod_reg_common_msg hclge_ssu_reg_common_msg[] = {
+ {
+ .cmd = HCLGE_OPC_DFX_SSU_REG_0,
+ .result_regs = hclge_ssu_reg_0_info,
+ .bd_num = HCLGE_BD_NUM_SSU_REG_0,
+ .result_regs_size = ARRAY_SIZE(hclge_ssu_reg_0_info)
+ }, {
+ .cmd = HCLGE_OPC_DFX_SSU_REG_1,
+ .result_regs = hclge_ssu_reg_1_info,
+ .bd_num = HCLGE_BD_NUM_SSU_REG_1,
+ .result_regs_size = ARRAY_SIZE(hclge_ssu_reg_1_info)
+ }, {
+ .cmd = HCLGE_OPC_DFX_RPU_REG_0,
+ .result_regs = hclge_rpu_reg_0_info,
+ .bd_num = HCLGE_BD_NUM_RPU_REG_0,
+ .result_regs_size = ARRAY_SIZE(hclge_rpu_reg_0_info),
+ .need_para = true
+ }, {
+ .cmd = HCLGE_OPC_DFX_RPU_REG_1,
+ .result_regs = hclge_rpu_reg_1_info,
+ .bd_num = HCLGE_BD_NUM_RPU_REG_1,
+ .result_regs_size = ARRAY_SIZE(hclge_rpu_reg_1_info)
+ }, {
+ .cmd = HCLGE_OPC_DFX_IGU_EGU_REG,
+ .result_regs = hclge_igu_egu_reg_info,
+ .bd_num = HCLGE_BD_NUM_IGU_EGU_REG,
+ .result_regs_size = ARRAY_SIZE(hclge_igu_egu_reg_info)
+ }, {
+ .cmd = HCLGE_OPC_DFX_GEN_REG,
+ .result_regs = hclge_gen_reg_info_tnl,
+ .bd_num = HCLGE_BD_NUM_GEN_REG,
+ .result_regs_size = ARRAY_SIZE(hclge_gen_reg_info_tnl),
+ .need_para = true
+ }, {
+ .cmd = HCLGE_OPC_DFX_GEN_REG,
+ .result_regs = hclge_gen_reg_info,
+ .bd_num = HCLGE_BD_NUM_GEN_REG,
+ .result_regs_size = ARRAY_SIZE(hclge_gen_reg_info)
+ }
+};
+
+static int
+hclge_print_mod_reg_info(struct device *dev, struct hclge_desc *desc,
+ const struct hclge_mod_reg_info *reg_info, int size)
+{
+ int i, j, pos, actual_len;
+ u8 offset, bd_idx, index;
+ char *buf;
+
+ buf = kzalloc(HCLGE_MOD_REG_INFO_LEN_MAX, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < size; i++) {
+ actual_len = strlen(reg_info[i].reg_name) +
+ HCLGE_MOD_REG_EXTRA_LEN +
+ HCLGE_MOD_REG_VALUE_LEN * reg_info[i].group_size;
+ if (actual_len > HCLGE_MOD_REG_INFO_LEN_MAX) {
+ dev_info(dev, "length of reg(%s) is invalid, len=%d\n",
+ reg_info[i].reg_name, actual_len);
+ continue;
+ }
+
+ pos = scnprintf(buf, HCLGE_MOD_REG_INFO_LEN_MAX, "%s",
+ reg_info[i].reg_name);
+ if (reg_info[i].has_suffix)
+ pos += scnprintf(buf + pos,
+ HCLGE_MOD_REG_INFO_LEN_MAX - pos, "%u",
+ le32_to_cpu(desc->data[0]));
+ pos += scnprintf(buf + pos,
+ HCLGE_MOD_REG_INFO_LEN_MAX - pos,
+ ":");
+ for (j = 0; j < reg_info[i].group_size; j++) {
+ offset = reg_info[i].reg_offset_group[j];
+ index = offset % HCLGE_DESC_DATA_LEN;
+ bd_idx = offset / HCLGE_DESC_DATA_LEN;
+ pos += scnprintf(buf + pos,
+ HCLGE_MOD_REG_INFO_LEN_MAX - pos,
+ " %08x",
+ le32_to_cpu(desc[bd_idx].data[index]));
+ }
+ dev_info(dev, "%s\n", buf);
+ }
+
+ kfree(buf);
+ return 0;
+}
+
+static bool hclge_err_mod_check_support_cmd(enum hclge_opcode_type opcode,
+ struct hclge_dev *hdev)
+{
+ if (opcode == HCLGE_OPC_DFX_GEN_REG &&
+ !hnae3_ae_dev_gen_reg_dfx_supported(hdev))
+ return false;
+ return true;
+}
+
+/* For each common msg, send cmdq to IMP and print result reg info.
+ * If there is a parameter, loop it and request.
+ */
+static void
+hclge_query_reg_info(struct hclge_dev *hdev,
+ struct hclge_mod_reg_common_msg *msg, u32 loop_time,
+ u32 *loop_para)
+{
+ int desc_len, i, ret;
+
+ desc_len = msg->bd_num * sizeof(struct hclge_desc);
+ msg->desc = kzalloc(desc_len, GFP_KERNEL);
+ if (!msg->desc) {
+ dev_err(&hdev->pdev->dev, "failed to query reg info, ret=%d",
+ -ENOMEM);
+ return;
+ }
+
+ for (i = 0; i < loop_time; i++) {
+ ret = hclge_dbg_cmd_send(hdev, msg->desc, *loop_para,
+ msg->bd_num, msg->cmd);
+ loop_para++;
+ if (ret)
+ continue;
+ ret = hclge_print_mod_reg_info(&hdev->pdev->dev, msg->desc,
+ msg->result_regs,
+ msg->result_regs_size);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "failed to print mod reg info, ret=%d\n",
+ ret);
+ }
+
+ kfree(msg->desc);
+}
+
+static void hclge_query_reg_info_of_ssu(struct hclge_dev *hdev)
+{
+ u32 loop_para[HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE] = {0};
+ struct hclge_mod_reg_common_msg msg;
+ u8 i, j, num, loop_time;
+
+ num = ARRAY_SIZE(hclge_ssu_reg_common_msg);
+ for (i = 0; i < num; i++) {
+ msg = hclge_ssu_reg_common_msg[i];
+ if (!hclge_err_mod_check_support_cmd(msg.cmd, hdev))
+ continue;
+ loop_time = 1;
+ loop_para[0] = 0;
+ if (msg.need_para) {
+ loop_time = min(hdev->ae_dev->dev_specs.tnl_num,
+ HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE);
+ for (j = 0; j < loop_time; j++)
+ loop_para[j] = j + 1;
+ }
+ hclge_query_reg_info(hdev, &msg, loop_time, loop_para);
+ }
+}
+
static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
{
.module_id = MODULE_NONE,
@@ -1210,7 +1629,8 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
.msg = "MODULE_GE"
}, {
.module_id = MODULE_IGU_EGU,
- .msg = "MODULE_IGU_EGU"
+ .msg = "MODULE_IGU_EGU",
+ .query_reg_info = hclge_query_reg_info_of_ssu
}, {
.module_id = MODULE_LGE,
.msg = "MODULE_LGE"
@@ -1231,7 +1651,8 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
.msg = "MODULE_RTC"
}, {
.module_id = MODULE_SSU,
- .msg = "MODULE_SSU"
+ .msg = "MODULE_SSU",
+ .query_reg_info = hclge_query_reg_info_of_ssu
}, {
.module_id = MODULE_TM,
.msg = "MODULE_TM"
@@ -2762,7 +3183,7 @@ void hclge_handle_occurred_error(struct hclge_dev *hdev)
}
static bool
-hclge_handle_error_type_reg_log(struct device *dev,
+hclge_handle_error_type_reg_log(struct hclge_dev *hdev,
struct hclge_mod_err_info *mod_info,
struct hclge_type_reg_err_info *type_reg_info)
{
@@ -2770,6 +3191,7 @@ hclge_handle_error_type_reg_log(struct device *dev,
#define HCLGE_ERR_TYPE_IS_RAS_OFFSET 7
u8 mod_id, total_module, type_id, total_type, i, is_ras;
+ struct device *dev = &hdev->pdev->dev;
u8 index_module = MODULE_NONE;
u8 index_type = NONE_ERROR;
bool cause_by_vf = false;
@@ -2810,6 +3232,9 @@ hclge_handle_error_type_reg_log(struct device *dev,
for (i = 0; i < type_reg_info->reg_num; i++)
dev_err(dev, "0x%08x\n", type_reg_info->hclge_reg[i]);
+ if (hclge_hw_module_id_st[index_module].query_reg_info)
+ hclge_hw_module_id_st[index_module].query_reg_info(hdev);
+
return cause_by_vf;
}
@@ -2850,7 +3275,7 @@ static void hclge_handle_error_module_log(struct hnae3_ae_dev *ae_dev,
type_reg_info = (struct hclge_type_reg_err_info *)
&buf[offset++];
- if (hclge_handle_error_type_reg_log(dev, mod_info,
+ if (hclge_handle_error_type_reg_log(hdev, mod_info,
type_reg_info))
cause_by_vf = true;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 68b738affa66..45a783a50643 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -5,6 +5,7 @@
#define __HCLGE_ERR_H
#include "hclge_main.h"
+#include "hclge_debugfs.h"
#include "hnae3.h"
#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10
@@ -115,6 +116,18 @@
#define HCLGE_REG_NUM_MAX 256
#define HCLGE_DESC_NO_DATA_LEN 8
+#define HCLGE_BD_NUM_SSU_REG_0 10
+#define HCLGE_BD_NUM_SSU_REG_1 15
+#define HCLGE_BD_NUM_RPU_REG_0 1
+#define HCLGE_BD_NUM_RPU_REG_1 2
+#define HCLGE_BD_NUM_IGU_EGU_REG 9
+#define HCLGE_BD_NUM_GEN_REG 8
+#define HCLGE_MOD_REG_INFO_LEN_MAX 256
+#define HCLGE_MOD_REG_EXTRA_LEN 11
+#define HCLGE_MOD_REG_VALUE_LEN 9
+#define HCLGE_MOD_REG_GROUP_MAX_SIZE 6
+#define HCLGE_MOD_MSG_PARA_ARRAY_MAX_SIZE 8
+
enum hclge_err_int_type {
HCLGE_ERR_INT_MSIX = 0,
HCLGE_ERR_INT_RAS_CE = 1,
@@ -191,6 +204,7 @@ struct hclge_hw_error {
struct hclge_hw_module_id {
enum hclge_mod_name_list module_id;
const char *msg;
+ void (*query_reg_info)(struct hclge_dev *hdev);
};
struct hclge_hw_type_id {
@@ -218,6 +232,28 @@ struct hclge_type_reg_err_info {
u32 hclge_reg[HCLGE_REG_NUM_MAX];
};
+struct hclge_mod_reg_info {
+ const char *reg_name;
+ bool has_suffix; /* add suffix for register name */
+ /* the positions of reg values in hclge_desc.data */
+ u8 reg_offset_group[HCLGE_MOD_REG_GROUP_MAX_SIZE];
+ u8 group_size;
+};
+
+/* This structure defines cmdq used to query the hardware module debug
+ * regisgers.
+ */
+struct hclge_mod_reg_common_msg {
+ enum hclge_opcode_type cmd;
+ struct hclge_desc *desc;
+ u8 bd_num; /* the bd number of hclge_desc used */
+ bool need_para; /* whether this cmdq needs to add para */
+
+ /* the regs need to print */
+ const struct hclge_mod_reg_info *result_regs;
+ u16 result_regs_size;
+};
+
int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ff6a2ed23ddb..6c33195a1168 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -27,6 +27,8 @@
#include "hclge_devlink.h"
#include "hclge_comm_cmd.h"
+#include "hclge_trace.h"
+
#define HCLGE_NAME "hclge"
#define HCLGE_BUF_SIZE_UNIT 256U
@@ -391,6 +393,48 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
return hclge_comm_cmd_send(&hw->hw, desc, num);
}
+static void hclge_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ trace_hclge_pf_cmd_send(hw, desc, 0, num);
+
+ if (!is_special) {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_cmd_send(hw, &desc[i], i, num);
+ } else {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_special_cmd_send(hw, (__le32 *)&desc[i],
+ i, num);
+ }
+}
+
+static void hclge_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
+ return;
+
+ trace_hclge_pf_cmd_get(hw, desc, 0, num);
+
+ if (!is_special) {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_cmd_get(hw, &desc[i], i, num);
+ } else {
+ for (i = 1; i < num; i++)
+ trace_hclge_pf_special_cmd_get(hw, (__le32 *)&desc[i],
+ i, num);
+ }
+}
+
+static const struct hclge_comm_cmq_ops hclge_cmq_ops = {
+ .trace_cmd_send = hclge_trace_cmd_send,
+ .trace_cmd_get = hclge_trace_cmd_get,
+};
+
static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
{
#define HCLGE_MAC_CMD_NUM 21
@@ -1537,6 +1581,9 @@ static int hclge_configure(struct hclge_dev *hdev)
cfg.default_speed, ret);
return ret;
}
+ hdev->hw.mac.req_speed = hdev->hw.mac.speed;
+ hdev->hw.mac.req_autoneg = AUTONEG_ENABLE;
+ hdev->hw.mac.req_duplex = DUPLEX_FULL;
hclge_parse_link_mode(hdev, cfg.speed_ability);
@@ -1766,7 +1813,8 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
nic->pdev = hdev->pdev;
nic->ae_algo = &ae_algo;
- nic->numa_node_mask = hdev->numa_node_mask;
+ bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
+ MAX_NUMNODES);
nic->kinfo.io_base = hdev->hw.hw.io_base;
ret = hclge_knic_setup(vport, num_tqps,
@@ -2458,7 +2506,8 @@ static int hclge_init_roce_base_info(struct hclge_vport *vport)
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
- roce->numa_node_mask = nic->numa_node_mask;
+ bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
+ MAX_NUMNODES);
return 0;
}
@@ -2604,8 +2653,17 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
+
+ if (ret)
+ return ret;
- return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
+ hdev->hw.mac.req_speed = speed;
+ hdev->hw.mac.req_duplex = duplex;
+
+ return 0;
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2907,17 +2965,20 @@ static int hclge_mac_init(struct hclge_dev *hdev)
if (!test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
- hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
- if (ret)
- return ret;
-
if (hdev->hw.mac.support_autoneg) {
ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
if (ret)
return ret;
}
+ if (!hdev->hw.mac.autoneg) {
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.req_speed,
+ hdev->hw.mac.req_duplex,
+ hdev->hw.mac.lane_num);
+ if (ret)
+ return ret;
+ }
+
mac->link = 0;
if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
@@ -3037,9 +3098,7 @@ static void hclge_push_link_status(struct hclge_dev *hdev)
static void hclge_update_link_status(struct hclge_dev *hdev)
{
- struct hnae3_handle *rhandle = &hdev->vport[0].roce;
struct hnae3_handle *handle = &hdev->vport[0].nic;
- struct hnae3_client *rclient = hdev->roce_client;
struct hnae3_client *client = hdev->nic_client;
int state;
int ret;
@@ -3063,8 +3122,15 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state);
- if (rclient && rclient->ops->link_status_change)
- rclient->ops->link_status_change(rhandle, state);
+
+ if (test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state)) {
+ struct hnae3_handle *rhandle = &hdev->vport[0].roce;
+ struct hnae3_client *rclient = hdev->roce_client;
+
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle,
+ state);
+ }
hclge_push_link_status(hdev);
}
@@ -3342,9 +3408,9 @@ hclge_set_phy_link_ksettings(struct hnae3_handle *handle,
return ret;
}
- hdev->hw.mac.autoneg = cmd->base.autoneg;
- hdev->hw.mac.speed = cmd->base.speed;
- hdev->hw.mac.duplex = cmd->base.duplex;
+ hdev->hw.mac.req_autoneg = cmd->base.autoneg;
+ hdev->hw.mac.req_speed = cmd->base.speed;
+ hdev->hw.mac.req_duplex = cmd->base.duplex;
linkmode_copy(hdev->hw.mac.advertising, cmd->link_modes.advertising);
return 0;
@@ -3377,9 +3443,9 @@ static int hclge_tp_port_init(struct hclge_dev *hdev)
if (!hnae3_dev_phy_imp_supported(hdev))
return 0;
- cmd.base.autoneg = hdev->hw.mac.autoneg;
- cmd.base.speed = hdev->hw.mac.speed;
- cmd.base.duplex = hdev->hw.mac.duplex;
+ cmd.base.autoneg = hdev->hw.mac.req_autoneg;
+ cmd.base.speed = hdev->hw.mac.req_speed;
+ cmd.base.duplex = hdev->hw.mac.req_duplex;
linkmode_copy(cmd.link_modes.advertising, hdev->hw.mac.advertising);
return hclge_set_phy_link_ksettings(&hdev->vport->nic, &cmd);
@@ -7178,8 +7244,9 @@ static void hclge_get_cls_key_vlan(const struct flow_rule *flow,
}
}
-static void hclge_get_cls_key_ip(const struct flow_rule *flow,
- struct hclge_fd_rule *rule)
+static int hclge_get_cls_key_ip(const struct flow_rule *flow,
+ struct hclge_fd_rule *rule,
+ struct netlink_ext_ack *extack)
{
u16 addr_type = 0;
@@ -7188,6 +7255,9 @@ static void hclge_get_cls_key_ip(const struct flow_rule *flow,
flow_rule_match_control(flow, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags, extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -7216,6 +7286,8 @@ static void hclge_get_cls_key_ip(const struct flow_rule *flow,
rule->unused_tuple |= BIT(INNER_SRC_IP);
rule->unused_tuple |= BIT(INNER_DST_IP);
}
+
+ return 0;
}
static void hclge_get_cls_key_port(const struct flow_rule *flow,
@@ -7241,7 +7313,9 @@ static int hclge_parse_cls_flower(struct hclge_dev *hdev,
struct hclge_fd_rule *rule)
{
struct flow_rule *flow = flow_cls_offload_flow_rule(cls_flower);
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
struct flow_dissector *dissector = flow->match.dissector;
+ int ret;
if (dissector->used_keys &
~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
@@ -7259,7 +7333,11 @@ static int hclge_parse_cls_flower(struct hclge_dev *hdev,
hclge_get_cls_key_basic(flow, rule);
hclge_get_cls_key_mac(flow, rule);
hclge_get_cls_key_vlan(flow, rule);
- hclge_get_cls_key_ip(flow, rule);
+
+ ret = hclge_get_cls_key_ip(flow, rule, extack);
+ if (ret)
+ return ret;
+
hclge_get_cls_key_port(flow, rule);
return 0;
@@ -7952,8 +8030,7 @@ static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
/* Set the DOWN flag here to disable link updating */
set_bit(HCLGE_STATE_DOWN, &hdev->state);
- /* flush memory to make sure DOWN is seen by service task */
- smp_mb__before_atomic();
+ smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
hclge_flush_link_update(hdev);
}
}
@@ -9906,6 +9983,7 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
static int hclge_init_vlan_filter(struct hclge_dev *hdev)
{
struct hclge_vport *vport;
+ bool enable = true;
int ret;
int i;
@@ -9925,8 +10003,12 @@ static int hclge_init_vlan_filter(struct hclge_dev *hdev)
vport->cur_vlan_fltr_en = true;
}
+ if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, hdev->ae_dev->caps) &&
+ !test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, hdev->ae_dev->caps))
+ enable = false;
+
return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
- HCLGE_FILTER_FE_INGRESS, true, 0);
+ HCLGE_FILTER_FE_INGRESS, enable, 0);
}
static int hclge_init_vlan_type(struct hclge_dev *hdev)
@@ -10839,6 +10921,24 @@ static u32 hclge_get_fw_version(struct hnae3_handle *handle)
return hdev->fw_version;
}
+int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version)
+{
+ struct hclge_comm_query_scc_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_SCC_VER, 1);
+ resp = (struct hclge_comm_query_scc_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ *scc_version = le32_to_cpu(resp->scc_version);
+
+ return 0;
+}
+
static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
{
struct phy_device *phydev = hdev->hw.mac.phydev;
@@ -11236,6 +11336,12 @@ clear_roce:
return ret;
}
+static bool hclge_uninit_need_wait(struct hclge_dev *hdev)
+{
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+}
+
static void hclge_uninit_client_instance(struct hnae3_client *client,
struct hnae3_ae_dev *ae_dev)
{
@@ -11244,7 +11350,7 @@ static void hclge_uninit_client_instance(struct hnae3_client *client,
if (hdev->roce_client) {
clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
- while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ while (hclge_uninit_need_wait(hdev))
msleep(HCLGE_WAIT_RESET_DONE);
hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
@@ -11350,7 +11456,7 @@ static void hclge_pci_uninit(struct hclge_dev *hdev)
pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_free_irq_vectors(pdev);
- pci_release_mem_regions(pdev);
+ pci_release_regions(pdev);
pci_disable_device(pdev);
}
@@ -11422,8 +11528,8 @@ static void hclge_reset_done(struct hnae3_ae_dev *ae_dev)
dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
hdev->reset_type = HNAE3_NONE_RESET;
- clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
+ if (test_and_clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ up(&hdev->reset_sem);
}
static void hclge_clear_resetting_state(struct hclge_dev *hdev)
@@ -11622,18 +11728,13 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto out;
- ret = hclge_devlink_init(hdev);
- if (ret)
- goto err_pci_uninit;
-
- devl_lock(hdev->devlink);
-
/* Firmware command queue initialize */
ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
- goto err_devlink_uninit;
+ goto err_pci_uninit;
/* Firmware command initialize */
+ hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclge_cmq_ops);
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw, &hdev->fw_version,
true, hdev->reset_pending);
if (ret)
@@ -11759,7 +11860,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
ret = hclge_update_port_info(hdev);
if (ret)
- goto err_mdiobus_unreg;
+ goto err_ptp_uninit;
INIT_KFIFO(hdev->mac_tnl_log);
@@ -11799,6 +11900,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
dev_warn(&pdev->dev,
"failed to wake on lan init, ret = %d\n", ret);
+ ret = hclge_devlink_init(hdev);
+ if (ret)
+ goto err_ptp_uninit;
+
hclge_state_init(hdev);
hdev->last_reset_time = jiffies;
@@ -11806,10 +11911,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
HCLGE_DRIVER_NAME);
hclge_task_schedule(hdev, round_jiffies_relative(HZ));
-
- devl_unlock(hdev->devlink);
return 0;
+err_ptp_uninit:
+ hclge_ptp_uninit(hdev);
err_mdiobus_unreg:
if (hdev->hw.mac.phydev)
mdiobus_unregister(hdev->hw.mac.mdio_bus);
@@ -11819,9 +11924,6 @@ err_msi_uninit:
pci_free_irq_vectors(pdev);
err_cmd_uninit:
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
-err_devlink_uninit:
- devl_unlock(hdev->devlink);
- hclge_devlink_uninit(hdev);
err_pci_uninit:
pcim_iounmap(pdev, hdev->hw.hw.io_base);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index e821dd2f1528..b5178b0f88b3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -279,11 +279,14 @@ struct hclge_mac {
u8 media_type; /* port media type, e.g. fibre/copper/backplane */
u8 mac_addr[ETH_ALEN];
u8 autoneg;
+ u8 req_autoneg;
u8 duplex;
+ u8 req_duplex;
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
u8 lane_num;
u32 speed;
+ u32 req_speed;
u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
@@ -891,7 +894,7 @@ struct hclge_dev {
u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
u16 num_alloc_vport; /* Num vports this driver supports */
- u32 numa_node_mask;
+ nodemask_t numa_node_mask;
u16 rx_buf_len;
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
@@ -1169,4 +1172,5 @@ int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en);
int hclge_mac_update_stats(struct hclge_dev *hdev);
struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf);
int hclge_inform_vf_reset(struct hclge_vport *vport, u16 reset_type);
+int hclge_query_scc_version(struct hclge_dev *hdev, u32 *scc_version);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index d4a0e0be7a72..59c863306657 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -1077,12 +1077,13 @@ static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
hdev = param->vport->back;
cmd_func = hclge_mbx_ops_list[param->req->msg.code];
- if (cmd_func)
- ret = cmd_func(param);
- else
+ if (!cmd_func) {
dev_err(&hdev->pdev->dev,
"un-supported mailbox message, code = %u\n",
param->req->msg.code);
+ return;
+ }
+ ret = cmd_func(param);
/* PF driver should not reply IMP */
if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 85fb11de43a1..80079657afeb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -191,6 +191,9 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
+ hdev->hw.mac.req_speed = (u32)speed;
+ hdev->hw.mac.req_duplex = (u8)duplex;
+
ret = hclge_cfg_flowctrl(hdev);
if (ret)
netdev_err(netdev, "failed to configure flow control.\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
index 507d7ce26d83..5fff8ed388f8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c
@@ -378,7 +378,7 @@ int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr)
}
int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
index bbee74cd8404..63483636c074 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h
@@ -138,6 +138,6 @@ int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr);
int hclge_ptp_init(struct hclge_dev *hdev);
void hclge_ptp_uninit(struct hclge_dev *hdev);
int hclge_ptp_get_ts_info(struct hnae3_handle *handle,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
index f3cd5a376eca..7103cf04bffc 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -10,6 +10,7 @@
#include <linux/tracepoint.h>
+#define PF_DESC_LEN (sizeof(struct hclge_desc) / sizeof(u32))
#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
@@ -32,8 +33,8 @@ TRACE_EVENT(hclge_pf_mbx_get,
__entry->vfid = req->mbx_src_vfid;
__entry->code = req->msg.code;
__entry->subcode = req->msg.subcode;
- __assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
+ __assign_str(pciname);
+ __assign_str(devname);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_vf_to_pf_cmd));
),
@@ -63,8 +64,8 @@ TRACE_EVENT(hclge_pf_mbx_send,
TP_fast_assign(
__entry->vfid = req->dest_vfid;
__entry->code = le16_to_cpu(req->msg.code);
- __assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, hdev->vport[0].nic.kinfo.netdev->name);
+ __assign_str(pciname);
+ __assign_str(devname);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_pf_to_vf_cmd));
),
@@ -77,6 +78,99 @@ TRACE_EVENT(hclge_pf_mbx_send,
)
);
+DECLARE_EVENT_CLASS(hclge_pf_cmd_template,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num),
+
+ TP_STRUCT__entry(__field(u16, opcode)
+ __field(u16, flag)
+ __field(u16, retval)
+ __field(u16, rsv)
+ __field(int, index)
+ __field(int, num)
+ __string(pciname, pci_name(hw->cmq.csq.pdev))
+ __array(u32, data, HCLGE_DESC_DATA_LEN)),
+
+ TP_fast_assign(int i;
+ __entry->opcode = le16_to_cpu(desc->opcode);
+ __entry->flag = le16_to_cpu(desc->flag);
+ __entry->retval = le16_to_cpu(desc->retval);
+ __entry->rsv = le16_to_cpu(desc->rsv);
+ __entry->index = index;
+ __entry->num = num;
+ __assign_str(pciname);
+ for (i = 0; i < HCLGE_DESC_DATA_LEN; i++)
+ __entry->data[i] = le32_to_cpu(desc->data[i]);),
+
+ TP_printk("%s opcode:0x%04x %d-%d flag:0x%04x retval:0x%04x rsv:0x%04x data:%s",
+ __get_str(pciname), __entry->opcode,
+ __entry->index, __entry->num,
+ __entry->flag, __entry->retval, __entry->rsv,
+ __print_array(__entry->data,
+ HCLGE_DESC_DATA_LEN, sizeof(u32)))
+);
+
+DEFINE_EVENT(hclge_pf_cmd_template, hclge_pf_cmd_send,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num)
+);
+
+DEFINE_EVENT(hclge_pf_cmd_template, hclge_pf_cmd_get,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num)
+);
+
+DECLARE_EVENT_CLASS(hclge_pf_special_cmd_template,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ __le32 *data,
+ int index,
+ int num),
+ TP_ARGS(hw, data, index, num),
+
+ TP_STRUCT__entry(__field(int, index)
+ __field(int, num)
+ __string(pciname, pci_name(hw->cmq.csq.pdev))
+ __array(u32, data, PF_DESC_LEN)),
+
+ TP_fast_assign(int i;
+ __entry->index = index;
+ __entry->num = num;
+ __assign_str(pciname);
+ for (i = 0; i < PF_DESC_LEN; i++)
+ __entry->data[i] = le32_to_cpu(data[i]);
+ ),
+
+ TP_printk("%s %d-%d data:%s",
+ __get_str(pciname),
+ __entry->index, __entry->num,
+ __print_array(__entry->data,
+ PF_DESC_LEN, sizeof(u32)))
+);
+
+DEFINE_EVENT(hclge_pf_special_cmd_template, hclge_pf_special_cmd_send,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ __le32 *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num));
+
+DEFINE_EVENT(hclge_pf_special_cmd_template, hclge_pf_special_cmd_get,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ __le32 *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num)
+);
+
#endif /* _HCLGE_TRACE_H_ */
/* This must be outside ifdef _HCLGE_TRACE_H */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 0aa9beefd1c7..094a7c7b5592 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -11,6 +11,7 @@
#include "hnae3.h"
#include "hclgevf_devlink.h"
#include "hclge_comm_rss.h"
+#include "hclgevf_trace.h"
#define HCLGEVF_NAME "hclgevf"
@@ -47,6 +48,42 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclge_desc *desc, int num)
return hclge_comm_cmd_send(&hw->hw, desc, num);
}
+static void hclgevf_trace_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ trace_hclge_vf_cmd_send(hw, desc, 0, num);
+
+ if (is_special)
+ return;
+
+ for (i = 1; i < num; i++)
+ trace_hclge_vf_cmd_send(hw, &desc[i], i, num);
+}
+
+static void hclgevf_trace_cmd_get(struct hclge_comm_hw *hw, struct hclge_desc *desc,
+ int num, bool is_special)
+{
+ int i;
+
+ if (!HCLGE_COMM_SEND_SYNC(le16_to_cpu(desc->flag)))
+ return;
+
+ trace_hclge_vf_cmd_get(hw, desc, 0, num);
+
+ if (is_special)
+ return;
+
+ for (i = 1; i < num; i++)
+ trace_hclge_vf_cmd_get(hw, &desc[i], i, num);
+}
+
+static const struct hclge_comm_cmq_ops hclgevf_cmq_ops = {
+ .trace_cmd_send = hclgevf_trace_cmd_send,
+ .trace_cmd_get = hclgevf_trace_cmd_get,
+};
+
void hclgevf_arq_init(struct hclgevf_dev *hdev)
{
struct hclge_comm_cmq *cmdq = &hdev->hw.hw.cmq;
@@ -412,7 +449,8 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
nic->ae_algo = &ae_algovf;
nic->pdev = hdev->pdev;
- nic->numa_node_mask = hdev->numa_node_mask;
+ bitmap_copy(nic->numa_node_mask.bits, hdev->numa_node_mask.bits,
+ MAX_NUMNODES);
nic->flags |= HNAE3_SUPPORT_VF;
nic->kinfo.io_base = hdev->hw.hw.io_base;
@@ -1709,8 +1747,8 @@ static void hclgevf_reset_done(struct hnae3_ae_dev *ae_dev)
ret);
hdev->reset_type = HNAE3_NONE_RESET;
- clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
- up(&hdev->reset_sem);
+ if (test_and_clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ up(&hdev->reset_sem);
}
static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
@@ -2082,8 +2120,8 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
roce->pdev = nic->pdev;
roce->ae_algo = nic->ae_algo;
- roce->numa_node_mask = nic->numa_node_mask;
-
+ bitmap_copy(roce->numa_node_mask.bits, nic->numa_node_mask.bits,
+ MAX_NUMNODES);
return 0;
}
@@ -2180,8 +2218,7 @@ static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
} else {
set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
- /* flush memory to make sure DOWN is seen by service task */
- smp_mb__before_atomic();
+ smp_mb__after_atomic(); /* flush memory to make sure DOWN is seen by service task */
hclgevf_flush_link_update(hdev);
}
}
@@ -2796,6 +2833,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
}
hclgevf_arq_init(hdev);
+
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
&hdev->fw_version, false,
hdev->reset_pending);
@@ -2845,15 +2883,13 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret)
return ret;
- ret = hclgevf_devlink_init(hdev);
- if (ret)
- goto err_devlink_init;
-
ret = hclge_comm_cmd_queue_init(hdev->pdev, &hdev->hw.hw);
if (ret)
goto err_cmd_queue_init;
hclgevf_arq_init(hdev);
+
+ hclge_comm_cmd_init_ops(&hdev->hw.hw, &hclgevf_cmq_ops);
ret = hclge_comm_cmd_init(hdev->ae_dev, &hdev->hw.hw,
&hdev->fw_version, false,
hdev->reset_pending);
@@ -2941,6 +2977,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_init_rxd_adv_layout(hdev);
+ ret = hclgevf_devlink_init(hdev);
+ if (ret)
+ goto err_config;
+
set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
hdev->last_reset_time = jiffies;
@@ -2960,8 +3000,6 @@ err_misc_irq_init:
err_cmd_init:
hclge_comm_cmd_uninit(hdev->ae_dev, &hdev->hw.hw);
err_cmd_queue_init:
- hclgevf_devlink_uninit(hdev);
-err_devlink_init:
hclgevf_pci_uninit(hdev);
clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
return ret;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index a73f2bf3a56a..cccef3228461 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -236,7 +236,7 @@ struct hclgevf_dev {
u16 rss_size_max; /* HW defined max RSS task queue */
u16 num_alloc_vport; /* num vports this driver supports */
- u32 numa_node_mask;
+ nodemask_t numa_node_mask;
u16 rx_buf_len;
u16 num_tx_desc; /* desc num of per tx queue */
u16 num_rx_desc; /* desc num of per rx queue */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
index b259e95dd53c..66b084309c91 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -30,8 +30,8 @@ TRACE_EVENT(hclge_vf_mbx_get,
TP_fast_assign(
__entry->vfid = req->dest_vfid;
__entry->code = le16_to_cpu(req->msg.code);
- __assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, hdev->nic.kinfo.netdev->name);
+ __assign_str(pciname);
+ __assign_str(devname);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_pf_to_vf_cmd));
),
@@ -63,8 +63,8 @@ TRACE_EVENT(hclge_vf_mbx_send,
__entry->vfid = req->mbx_src_vfid;
__entry->code = req->msg.code;
__entry->subcode = req->msg.subcode;
- __assign_str(pciname, pci_name(hdev->pdev));
- __assign_str(devname, hdev->nic.kinfo.netdev->name);
+ __assign_str(pciname);
+ __assign_str(devname);
memcpy(__entry->mbx_data, req,
sizeof(struct hclge_mbx_vf_to_pf_cmd));
),
@@ -77,6 +77,56 @@ TRACE_EVENT(hclge_vf_mbx_send,
)
);
+DECLARE_EVENT_CLASS(hclge_vf_cmd_template,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+
+ TP_ARGS(hw, desc, index, num),
+
+ TP_STRUCT__entry(__field(u16, opcode)
+ __field(u16, flag)
+ __field(u16, retval)
+ __field(u16, rsv)
+ __field(int, index)
+ __field(int, num)
+ __string(pciname, pci_name(hw->cmq.csq.pdev))
+ __array(u32, data, HCLGE_DESC_DATA_LEN)),
+
+ TP_fast_assign(int i;
+ __entry->opcode = le16_to_cpu(desc->opcode);
+ __entry->flag = le16_to_cpu(desc->flag);
+ __entry->retval = le16_to_cpu(desc->retval);
+ __entry->rsv = le16_to_cpu(desc->rsv);
+ __entry->index = index;
+ __entry->num = num;
+ __assign_str(pciname);
+ for (i = 0; i < HCLGE_DESC_DATA_LEN; i++)
+ __entry->data[i] = le32_to_cpu(desc->data[i]);),
+
+ TP_printk("%s opcode:0x%04x %d-%d flag:0x%04x retval:0x%04x rsv:0x%04x data:%s",
+ __get_str(pciname), __entry->opcode,
+ __entry->index, __entry->num,
+ __entry->flag, __entry->retval, __entry->rsv,
+ __print_array(__entry->data,
+ HCLGE_DESC_DATA_LEN, sizeof(u32)))
+);
+
+DEFINE_EVENT(hclge_vf_cmd_template, hclge_vf_cmd_send,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num));
+
+DEFINE_EVENT(hclge_vf_cmd_template, hclge_vf_cmd_get,
+ TP_PROTO(struct hclge_comm_hw *hw,
+ struct hclge_desc *desc,
+ int index,
+ int num),
+ TP_ARGS(hw, desc, index, num));
+
#endif /* _HCLGEVF_TRACE_H_ */
/* This must be outside ifdef _HCLGEVF_TRACE_H */
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 499c657d37a9..890f213da8d1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -581,7 +581,7 @@ static int hinic_change_mtu(struct net_device *netdev, int new_mtu)
if (err)
netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n");
else
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return err;
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index e6e47b1842ea..a19d098f2e2b 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1098,7 +1098,7 @@ static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
/* This is to prevent starting RX channel in emac_rx_enable() */
set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags);
- dev->ndev->mtu = new_mtu;
+ WRITE_ONCE(dev->ndev->mtu, new_mtu);
emac_full_tx_reset(dev);
}
@@ -1130,7 +1130,7 @@ static int emac_change_mtu(struct net_device *ndev, int new_mtu)
}
if (!ret) {
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
dev->rx_skb_size = emac_rx_skb_size(new_mtu);
dev->rx_sync_size = emac_rx_sync_size(new_mtu);
}
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 2439f7e96e05..d92dd9c83031 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -605,9 +605,13 @@ static int mal_probe(struct platform_device *ofdev)
INIT_LIST_HEAD(&mal->list);
spin_lock_init(&mal->lock);
- init_dummy_netdev(&mal->dummy_dev);
+ mal->dummy_dev = alloc_netdev_dummy(0);
+ if (!mal->dummy_dev) {
+ err = -ENOMEM;
+ goto fail_unmap;
+ }
- netif_napi_add_weight(&mal->dummy_dev, &mal->napi, mal_poll,
+ netif_napi_add_weight(mal->dummy_dev, &mal->napi, mal_poll,
CONFIG_IBM_EMAC_POLL_WEIGHT);
/* Load power-on reset defaults */
@@ -637,7 +641,7 @@ static int mal_probe(struct platform_device *ofdev)
GFP_KERNEL);
if (mal->bd_virt == NULL) {
err = -ENOMEM;
- goto fail_unmap;
+ goto fail_dummy;
}
for (i = 0; i < mal->num_tx_chans; ++i)
@@ -703,6 +707,8 @@ static int mal_probe(struct platform_device *ofdev)
free_irq(mal->serr_irq, mal);
fail2:
dma_free_coherent(&ofdev->dev, bd_size, mal->bd_virt, mal->bd_dma);
+ fail_dummy:
+ free_netdev(mal->dummy_dev);
fail_unmap:
dcr_unmap(mal->dcr_host, 0x100);
fail:
@@ -734,6 +740,8 @@ static void mal_remove(struct platform_device *ofdev)
mal_reset(mal);
+ free_netdev(mal->dummy_dev);
+
dma_free_coherent(&ofdev->dev,
sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
diff --git a/drivers/net/ethernet/ibm/emac/mal.h b/drivers/net/ethernet/ibm/emac/mal.h
index d212373a72e7..e0ddc41186a2 100644
--- a/drivers/net/ethernet/ibm/emac/mal.h
+++ b/drivers/net/ethernet/ibm/emac/mal.h
@@ -205,7 +205,7 @@ struct mal_instance {
int index;
spinlock_t lock;
- struct net_device dummy_dev;
+ struct net_device *dummy_dev;
unsigned int features;
};
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b5aef0b29efe..4c9d9badd698 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1537,7 +1537,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
adapter->rx_buff_pool[i].active = 1;
if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
vio_cmo_set_dev_desired(viodev,
ibmveth_get_desired_dma
(viodev));
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 30c47b8470ad..23ebeb143987 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2371,7 +2371,7 @@ static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
else
ind_bufp->index = 0;
- return 0;
+ return rc;
}
static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
@@ -2424,7 +2424,9 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_dropped++;
tx_send_failed++;
ret = NETDEV_TX_OK;
- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
goto out;
}
@@ -2439,8 +2441,10 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb_any(skb);
tx_send_failed++;
tx_dropped++;
- ibmvnic_tx_scrq_flush(adapter, tx_scrq);
ret = NETDEV_TX_OK;
+ lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
+ if (lpar_rc != H_SUCCESS)
+ goto tx_err;
goto out;
}
@@ -2478,6 +2482,18 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
(tx_pool->consumer_index + 1) % tx_pool->num_buffers;
tx_buff = &tx_pool->tx_buff[bufidx];
+
+ /* Sanity checks on our free map to make sure it points to an index
+ * that is not being occupied by another skb. If skb memory is
+ * not freed then we see congestion control kick in and halt tx.
+ */
+ if (unlikely(tx_buff->skb)) {
+ dev_warn_ratelimited(dev, "TX free map points to untracked skb (%s %d idx=%d)\n",
+ skb_is_gso(skb) ? "tso_pool" : "tx_pool",
+ queue_num, bufidx);
+ dev_kfree_skb_any(tx_buff->skb);
+ }
+
tx_buff->skb = skb;
tx_buff->index = bufidx;
tx_buff->pool_index = queue_num;
@@ -4057,6 +4073,12 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
adapter->num_active_tx_scrqs = 0;
}
+ /* Clean any remaining outstanding SKBs
+ * we freed the irq so we won't be hearing
+ * from them
+ */
+ clean_tx_pools(adapter);
+
if (adapter->rx_scrq) {
for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
if (!adapter->rx_scrq[i])
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 639fbb12bd35..0375c7448a57 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -16,6 +16,9 @@ config NET_VENDOR_INTEL
if NET_VENDOR_INTEL
+source "drivers/net/ethernet/intel/libeth/Kconfig"
+source "drivers/net/ethernet/intel/libie/Kconfig"
+
config E100
tristate "Intel(R) PRO/100+ support"
depends on PCI
@@ -41,7 +44,7 @@ config E100
config E1000
tristate "Intel(R) PRO/1000 Gigabit Ethernet support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
help
This driver supports Intel(R) PRO/1000 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
@@ -225,6 +228,7 @@ config I40E
depends on PTP_1588_CLOCK_OPTIONAL
depends on PCI
select AUXILIARY_BUS
+ select LIBIE
select NET_DEVLINK
help
This driver supports Intel(R) Ethernet Controller XL710 Family of
@@ -253,6 +257,8 @@ config I40E_DCB
# so that CONFIG_IAVF symbol will always mirror the state of CONFIG_I40EVF
config IAVF
tristate
+ select LIBIE
+
config I40EVF
tristate "Intel(R) Ethernet Adaptive Virtual Function support"
select IAVF
@@ -283,6 +289,7 @@ config ICE
depends on GNSS || GNSS = n
select AUXILIARY_BUS
select DIMLIB
+ select LIBIE
select NET_DEVLINK
select PLDMFW
select DPLL
@@ -377,17 +384,6 @@ config IGC_LEDS
Optional support for controlling the NIC LED's with the netdev
LED trigger.
-config IDPF
- tristate "Intel(R) Infrastructure Data Path Function Support"
- depends on PCI_MSI
- select DIMLIB
- select PAGE_POOL
- select PAGE_POOL_STATS
- help
- This driver supports Intel(R) Infrastructure Data Path Function
- devices.
-
- To compile this driver as a module, choose M here. The module
- will be called idpf.
+source "drivers/net/ethernet/intel/idpf/Kconfig"
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/Makefile b/drivers/net/ethernet/intel/Makefile
index dacb481ee5b1..04c844ef4964 100644
--- a/drivers/net/ethernet/intel/Makefile
+++ b/drivers/net/ethernet/intel/Makefile
@@ -3,6 +3,9 @@
# Makefile for the Intel network device drivers.
#
+obj-$(CONFIG_LIBETH) += libeth/
+obj-$(CONFIG_LIBIE) += libie/
+
obj-$(CONFIG_E100) += e100.o
obj-$(CONFIG_E1000) += e1000/
obj-$(CONFIG_E1000E) += e1000e/
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 3fcb8daaa243..aa139b67a55b 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -161,7 +161,6 @@
#define FIRMWARE_D102E "e100/d102e_ucode.bin"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(FIRMWARE_D101M);
MODULE_FIRMWARE(FIRMWARE_D101S);
@@ -3037,7 +3036,7 @@ static int __e100_power_off(struct pci_dev *pdev, bool wake)
return 0;
}
-static int __maybe_unused e100_suspend(struct device *dev_d)
+static int e100_suspend(struct device *dev_d)
{
bool wake;
@@ -3046,7 +3045,7 @@ static int __maybe_unused e100_suspend(struct device *dev_d)
return 0;
}
-static int __maybe_unused e100_resume(struct device *dev_d)
+static int e100_resume(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct nic *nic = netdev_priv(netdev);
@@ -3163,7 +3162,7 @@ static const struct pci_error_handlers e100_err_handler = {
.resume = e100_io_resume,
};
-static SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(e100_pm_ops, e100_suspend, e100_resume);
static struct pci_driver e100_driver = {
.name = DRV_NAME,
@@ -3172,7 +3171,7 @@ static struct pci_driver e100_driver = {
.remove = e100_remove,
/* Power Management hooks */
- .driver.pm = &e100_pm_ops,
+ .driver.pm = pm_sleep_ptr(&e100_pm_ops),
.shutdown = e100_shutdown,
.err_handler = &e100_err_handler,
diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile
index 314c52d44b7c..79491dec47e1 100644
--- a/drivers/net/ethernet/intel/e1000/Makefile
+++ b/drivers/net/ethernet/intel/e1000/Makefile
@@ -7,4 +7,4 @@
obj-$(CONFIG_E1000) += e1000.o
-e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o
+e1000-y := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 1d1e93686af2..ab7ae418d294 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -149,8 +149,8 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
__be16 proto, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
-static int __maybe_unused e1000_suspend(struct device *dev);
-static int __maybe_unused e1000_resume(struct device *dev);
+static int e1000_suspend(struct device *dev);
+static int e1000_resume(struct device *dev);
static void e1000_shutdown(struct pci_dev *pdev);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -175,21 +175,18 @@ static const struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(e1000_pm_ops, e1000_suspend, e1000_resume);
static struct pci_driver e1000_driver = {
.name = e1000_driver_name,
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
- .driver = {
- .pm = &e1000_pm_ops,
- },
+ .driver.pm = pm_sleep_ptr(&e1000_pm_ops),
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL v2");
@@ -3571,7 +3568,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
e1000_up(adapter);
@@ -5135,7 +5132,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-static int __maybe_unused e1000_suspend(struct device *dev)
+static int e1000_suspend(struct device *dev)
{
int retval;
struct pci_dev *pdev = to_pci_dev(dev);
@@ -5147,7 +5144,7 @@ static int __maybe_unused e1000_suspend(struct device *dev)
return retval;
}
-static int __maybe_unused e1000_resume(struct device *dev)
+static int e1000_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile
index 0baa15503c38..18f22b6374d5 100644
--- a/drivers/net/ethernet/intel/e1000e/Makefile
+++ b/drivers/net/ethernet/intel/e1000e/Makefile
@@ -10,7 +10,6 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_E1000E) += e1000e.o
-e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \
- mac.o manage.o nvm.o phy.o \
- param.o ethtool.o netdev.o ptp.o
-
+e1000e-y := 82571.o ich8lan.o 80003es2lan.o \
+ mac.o manage.o nvm.o phy.o \
+ param.o ethtool.o netdev.o ptp.o
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 23a58cada43a..5e2cfa73f889 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -679,8 +679,6 @@
/* PCI/PCI-X/PCI-EX Config space */
#define PCI_HEADER_TYPE_REGISTER 0x0E
-#define PCI_HEADER_TYPE_MULTIFUNC 0x80
-
#define PHY_REVISION_MASK 0xFFFFFFF0
#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */
#define MAX_PHY_MULTI_PAGE_REG 0xF
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index dc553c51d79a..9364bc2b4eb1 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -156,7 +156,7 @@ static int e1000_get_link_ksettings(struct net_device *netdev,
speed = adapter->link_speed;
cmd->base.duplex = adapter->link_duplex - 1;
}
- } else if (!pm_runtime_suspended(netdev->dev.parent)) {
+ } else {
u32 status = er32(STATUS);
if (status & E1000_STATUS_LU) {
@@ -274,16 +274,13 @@ static int e1000_set_link_ksettings(struct net_device *netdev,
ethtool_convert_link_mode_to_legacy_u32(&advertising,
cmd->link_modes.advertising);
- pm_runtime_get_sync(netdev->dev.parent);
-
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
*/
if (hw->phy.ops.check_reset_block &&
hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot change link characteristics when SoL/IDER is active.\n");
- ret_val = -EINVAL;
- goto out;
+ return -EINVAL;
}
/* MDI setting is only allowed when autoneg enabled because
@@ -291,16 +288,13 @@ static int e1000_set_link_ksettings(struct net_device *netdev,
* duplex is forced.
*/
if (cmd->base.eth_tp_mdix_ctrl) {
- if (hw->phy.media_type != e1000_media_type_copper) {
- ret_val = -EOPNOTSUPP;
- goto out;
- }
+ if (hw->phy.media_type != e1000_media_type_copper)
+ return -EOPNOTSUPP;
if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
(cmd->base.autoneg != AUTONEG_ENABLE)) {
e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
- ret_val = -EINVAL;
- goto out;
+ return -EINVAL;
}
}
@@ -347,7 +341,6 @@ static int e1000_set_link_ksettings(struct net_device *netdev,
}
out:
- pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
return ret_val;
}
@@ -383,8 +376,6 @@ static int e1000_set_pauseparam(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
- pm_runtime_get_sync(netdev->dev.parent);
-
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
if (netif_running(adapter->netdev)) {
@@ -417,7 +408,6 @@ static int e1000_set_pauseparam(struct net_device *netdev,
}
out:
- pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
return retval;
}
@@ -448,8 +438,6 @@ static void e1000_get_regs(struct net_device *netdev,
u32 *regs_buff = p;
u16 phy_data;
- pm_runtime_get_sync(netdev->dev.parent);
-
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1u << 24) |
@@ -495,8 +483,6 @@ static void e1000_get_regs(struct net_device *netdev,
e1e_rphy(hw, MII_STAT1000, &phy_data);
regs_buff[24] = (u32)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
-
- pm_runtime_put_sync(netdev->dev.parent);
}
static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -529,8 +515,6 @@ static int e1000_get_eeprom(struct net_device *netdev,
if (!eeprom_buff)
return -ENOMEM;
- pm_runtime_get_sync(netdev->dev.parent);
-
if (hw->nvm.type == e1000_nvm_eeprom_spi) {
ret_val = e1000_read_nvm(hw, first_word,
last_word - first_word + 1,
@@ -544,8 +528,6 @@ static int e1000_get_eeprom(struct net_device *netdev,
}
}
- pm_runtime_put_sync(netdev->dev.parent);
-
if (ret_val) {
/* a read error occurred, throw away the result */
memset(eeprom_buff, 0xff, sizeof(u16) *
@@ -595,8 +577,6 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
- pm_runtime_get_sync(netdev->dev.parent);
-
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
@@ -637,7 +617,6 @@ static int e1000_set_eeprom(struct net_device *netdev,
ret_val = e1000e_update_nvm_checksum(hw);
out:
- pm_runtime_put_sync(netdev->dev.parent);
kfree(eeprom_buff);
return ret_val;
}
@@ -733,8 +712,6 @@ static int e1000_set_ringparam(struct net_device *netdev,
}
}
- pm_runtime_get_sync(netdev->dev.parent);
-
e1000e_down(adapter, true);
/* We can't just free everything and then setup again, because the
@@ -773,7 +750,6 @@ err_setup_rx:
e1000e_free_tx_resources(temp_tx);
err_setup:
e1000e_up(adapter);
- pm_runtime_put_sync(netdev->dev.parent);
free_temp:
vfree(temp_tx);
vfree(temp_rx);
@@ -1816,8 +1792,6 @@ static void e1000_diag_test(struct net_device *netdev,
u8 autoneg;
bool if_running = netif_running(netdev);
- pm_runtime_get_sync(netdev->dev.parent);
-
set_bit(__E1000_TESTING, &adapter->state);
if (!if_running) {
@@ -1903,8 +1877,6 @@ static void e1000_diag_test(struct net_device *netdev,
}
msleep_interruptible(4 * 1000);
-
- pm_runtime_put_sync(netdev->dev.parent);
}
static void e1000_get_wol(struct net_device *netdev,
@@ -2046,15 +2018,11 @@ static int e1000_set_coalesce(struct net_device *netdev,
adapter->itr_setting = adapter->itr & ~3;
}
- pm_runtime_get_sync(netdev->dev.parent);
-
if (adapter->itr_setting != 0)
e1000e_write_itr(adapter, adapter->itr);
else
e1000e_write_itr(adapter, 0);
- pm_runtime_put_sync(netdev->dev.parent);
-
return 0;
}
@@ -2068,9 +2036,7 @@ static int e1000_nway_reset(struct net_device *netdev)
if (!adapter->hw.mac.autoneg)
return -EINVAL;
- pm_runtime_get_sync(netdev->dev.parent);
e1000e_reinit_locked(adapter);
- pm_runtime_put_sync(netdev->dev.parent);
return 0;
}
@@ -2084,12 +2050,8 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
int i;
char *p = NULL;
- pm_runtime_get_sync(netdev->dev.parent);
-
dev_get_stats(netdev, &net_stats);
- pm_runtime_put_sync(netdev->dev.parent);
-
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
@@ -2146,9 +2108,7 @@ static int e1000_get_rxnfc(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
u32 mrqc;
- pm_runtime_get_sync(netdev->dev.parent);
mrqc = er32(MRQC);
- pm_runtime_put_sync(netdev->dev.parent);
if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
return 0;
@@ -2211,13 +2171,9 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_keee *edata)
return -EOPNOTSUPP;
}
- pm_runtime_get_sync(netdev->dev.parent);
-
ret_val = hw->phy.ops.acquire(hw);
- if (ret_val) {
- pm_runtime_put_sync(netdev->dev.parent);
+ if (ret_val)
return -EBUSY;
- }
/* EEE Capability */
ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
@@ -2257,8 +2213,6 @@ release:
if (ret_val)
ret_val = -ENODATA;
- pm_runtime_put_sync(netdev->dev.parent);
-
return ret_val;
}
@@ -2299,21 +2253,17 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata)
hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
- pm_runtime_get_sync(netdev->dev.parent);
-
/* reset the link */
if (netif_running(netdev))
e1000e_reinit_locked(adapter);
else
e1000e_reset(adapter);
- pm_runtime_put_sync(netdev->dev.parent);
-
return 0;
}
static int e1000e_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f9e94be36e97..ce227b56cf72 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1109,6 +1109,46 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
}
/**
+ * e1000e_force_smbus - Force interfaces to transition to SMBUS mode.
+ * @hw: pointer to the HW structure
+ *
+ * Force the MAC and the PHY to SMBUS mode. Assumes semaphore already
+ * acquired.
+ *
+ * Return: 0 on success, negative errno on failure.
+ **/
+static s32 e1000e_force_smbus(struct e1000_hw *hw)
+{
+ u16 smb_ctrl = 0;
+ u32 ctrl_ext;
+ s32 ret_val;
+
+ /* Switching PHY interface always returns MDI error
+ * so disable retry mechanism to avoid wasting time
+ */
+ e1000e_disable_phy_retry(hw);
+
+ /* Force SMBus mode in the PHY */
+ ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &smb_ctrl);
+ if (ret_val) {
+ e1000e_enable_phy_retry(hw);
+ return ret_val;
+ }
+
+ smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
+ e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, smb_ctrl);
+
+ e1000e_enable_phy_retry(hw);
+
+ /* Force SMBus mode in the MAC */
+ ctrl_ext = er32(CTRL_EXT);
+ ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, ctrl_ext);
+
+ return 0;
+}
+
+/**
* e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP
* @hw: pointer to the HW structure
* @to_sx: boolean indicating a system power state transition to Sx
@@ -1165,6 +1205,14 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
if (ret_val)
goto out;
+ if (hw->mac.type != e1000_pch_mtp) {
+ ret_val = e1000e_force_smbus(hw);
+ if (ret_val) {
+ e_dbg("Failed to force SMBUS: %d\n", ret_val);
+ goto release;
+ }
+ }
+
/* Si workaround for ULP entry flow on i127/rev6 h/w. Enable
* LPLU and disable Gig speed when entering ULP
*/
@@ -1225,6 +1273,13 @@ s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx)
}
release:
+ if (hw->mac.type == e1000_pch_mtp) {
+ ret_val = e1000e_force_smbus(hw);
+ if (ret_val)
+ e_dbg("Failed to force SMBUS over MTL system: %d\n",
+ ret_val);
+ }
+
hw->phy.ops.release(hw);
out:
if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3692fce20195..360ee26557f7 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6038,7 +6038,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
adapter->max_frame_size = max_frame;
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
pm_runtime_get_sync(netdev->dev.parent);
@@ -6363,49 +6363,49 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
ew32(EXTCNF_CTRL, mac_data);
- /* Enable the Dynamic Power Gating in the MAC */
- mac_data = er32(FEXTNVM7);
- mac_data |= BIT(22);
- ew32(FEXTNVM7, mac_data);
-
/* Disable disconnected cable conditioning for Power Gating */
mac_data = er32(DPGFR);
mac_data |= BIT(2);
ew32(DPGFR, mac_data);
- /* Don't wake from dynamic Power Gating with clock request */
- mac_data = er32(FEXTNVM12);
- mac_data |= BIT(12);
- ew32(FEXTNVM12, mac_data);
-
- /* Ungate PGCB clock */
- mac_data = er32(FEXTNVM9);
- mac_data &= ~BIT(28);
- ew32(FEXTNVM9, mac_data);
-
- /* Enable K1 off to enable mPHY Power Gating */
- mac_data = er32(FEXTNVM6);
- mac_data |= BIT(31);
- ew32(FEXTNVM6, mac_data);
-
- /* Enable mPHY power gating for any link and speed */
- mac_data = er32(FEXTNVM8);
- mac_data |= BIT(9);
- ew32(FEXTNVM8, mac_data);
-
/* Enable the Dynamic Clock Gating in the DMA and MAC */
mac_data = er32(CTRL_EXT);
mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN;
ew32(CTRL_EXT, mac_data);
-
- /* No MAC DPG gating SLP_S0 in modern standby
- * Switch the logic of the lanphypc to use PMC counter
- */
- mac_data = er32(FEXTNVM5);
- mac_data |= BIT(7);
- ew32(FEXTNVM5, mac_data);
}
+ /* Enable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data |= BIT(22);
+ ew32(FEXTNVM7, mac_data);
+
+ /* Don't wake from dynamic Power Gating with clock request */
+ mac_data = er32(FEXTNVM12);
+ mac_data |= BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data &= ~BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Enable K1 off to enable mPHY Power Gating */
+ mac_data = er32(FEXTNVM6);
+ mac_data |= BIT(31);
+ ew32(FEXTNVM6, mac_data);
+
+ /* Enable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data |= BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* No MAC DPG gating SLP_S0 in modern standby
+ * Switch the logic of the lanphypc to use PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data |= BIT(7);
+ ew32(FEXTNVM5, mac_data);
+
/* Disable the time synchronization clock */
mac_data = er32(FEXTNVM7);
mac_data |= BIT(31);
@@ -6498,33 +6498,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
} else {
/* Request driver unconfigure the device from S0ix */
- /* Disable the Dynamic Power Gating in the MAC */
- mac_data = er32(FEXTNVM7);
- mac_data &= 0xFFBFFFFF;
- ew32(FEXTNVM7, mac_data);
-
- /* Disable mPHY power gating for any link and speed */
- mac_data = er32(FEXTNVM8);
- mac_data &= ~BIT(9);
- ew32(FEXTNVM8, mac_data);
-
- /* Disable K1 off */
- mac_data = er32(FEXTNVM6);
- mac_data &= ~BIT(31);
- ew32(FEXTNVM6, mac_data);
-
- /* Disable Ungate PGCB clock */
- mac_data = er32(FEXTNVM9);
- mac_data |= BIT(28);
- ew32(FEXTNVM9, mac_data);
-
- /* Cancel not waking from dynamic
- * Power Gating with clock request
- */
- mac_data = er32(FEXTNVM12);
- mac_data &= ~BIT(12);
- ew32(FEXTNVM12, mac_data);
-
/* Cancel disable disconnected cable conditioning
* for Power Gating
*/
@@ -6537,13 +6510,6 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data &= 0xFFF7FFFF;
ew32(CTRL_EXT, mac_data);
- /* Revert the lanphypc logic to use the internal Gbe counter
- * and not the PMC counter
- */
- mac_data = er32(FEXTNVM5);
- mac_data &= 0xFFFFFF7F;
- ew32(FEXTNVM5, mac_data);
-
/* Enable the periodic inband message,
* Request PCIe clock in K1 page770_17[10:9] =01b
*/
@@ -6581,6 +6547,40 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data &= ~BIT(31);
mac_data |= BIT(0);
ew32(FEXTNVM7, mac_data);
+
+ /* Disable the Dynamic Power Gating in the MAC */
+ mac_data = er32(FEXTNVM7);
+ mac_data &= 0xFFBFFFFF;
+ ew32(FEXTNVM7, mac_data);
+
+ /* Disable mPHY power gating for any link and speed */
+ mac_data = er32(FEXTNVM8);
+ mac_data &= ~BIT(9);
+ ew32(FEXTNVM8, mac_data);
+
+ /* Disable K1 off */
+ mac_data = er32(FEXTNVM6);
+ mac_data &= ~BIT(31);
+ ew32(FEXTNVM6, mac_data);
+
+ /* Disable Ungate PGCB clock */
+ mac_data = er32(FEXTNVM9);
+ mac_data |= BIT(28);
+ ew32(FEXTNVM9, mac_data);
+
+ /* Cancel not waking from dynamic
+ * Power Gating with clock request
+ */
+ mac_data = er32(FEXTNVM12);
+ mac_data &= ~BIT(12);
+ ew32(FEXTNVM12, mac_data);
+
+ /* Revert the lanphypc logic to use the internal Gbe counter
+ * and not the PMC counter
+ */
+ mac_data = er32(FEXTNVM5);
+ mac_data &= 0xFFFFFF7F;
+ ew32(FEXTNVM5, mac_data);
}
static int e1000e_pm_freeze(struct device *dev)
@@ -6623,7 +6623,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, ctrl_ext, rctl, status, wufc;
int retval = 0;
- u16 smb_ctrl;
/* Runtime suspend should only enable wakeup for link changes */
if (runtime)
@@ -6697,23 +6696,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
if (retval)
return retval;
}
-
- /* Force SMBUS to allow WOL */
- /* Switching PHY interface always returns MDI error
- * so disable retry mechanism to avoid wasting time
- */
- e1000e_disable_phy_retry(hw);
-
- e1e_rphy(hw, CV_SMB_CTRL, &smb_ctrl);
- smb_ctrl |= CV_SMB_CTRL_FORCE_SMBUS;
- e1e_wphy(hw, CV_SMB_CTRL, smb_ctrl);
-
- e1000e_enable_phy_retry(hw);
-
- /* Force SMBus mode in MAC */
- ctrl_ext = er32(CTRL_EXT);
- ctrl_ext |= E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, ctrl_ext);
}
/* Ensure that the appropriate bits are set in LPI_CTRL
@@ -6968,13 +6950,13 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
-static __maybe_unused int e1000e_pm_prepare(struct device *dev)
+static int e1000e_pm_prepare(struct device *dev)
{
return pm_runtime_suspended(dev) &&
pm_suspend_via_firmware();
}
-static __maybe_unused int e1000e_pm_suspend(struct device *dev)
+static int e1000e_pm_suspend(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -6997,7 +6979,7 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev)
return rc;
}
-static __maybe_unused int e1000e_pm_resume(struct device *dev)
+static int e1000e_pm_resume(struct device *dev)
{
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
struct e1000_adapter *adapter = netdev_priv(netdev);
@@ -7031,7 +7013,7 @@ static __maybe_unused int e1000e_pm_runtime_idle(struct device *dev)
return -EBUSY;
}
-static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
+static int e1000e_pm_runtime_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7050,7 +7032,7 @@ static __maybe_unused int e1000e_pm_runtime_resume(struct device *dev)
return rc;
}
-static __maybe_unused int e1000e_pm_runtime_suspend(struct device *dev)
+static int e1000e_pm_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7937,8 +7919,7 @@ static const struct pci_device_id e1000_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
-static const struct dev_pm_ops e1000_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
+static const struct dev_pm_ops e1000e_pm_ops = {
.prepare = e1000e_pm_prepare,
.suspend = e1000e_pm_suspend,
.resume = e1000e_pm_resume,
@@ -7946,9 +7927,8 @@ static const struct dev_pm_ops e1000_pm_ops = {
.thaw = e1000e_pm_thaw,
.poweroff = e1000e_pm_suspend,
.restore = e1000e_pm_resume,
-#endif
- SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
- e1000e_pm_runtime_idle)
+ RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
+ e1000e_pm_runtime_idle)
};
/* PCI Device API Driver */
@@ -7957,9 +7937,7 @@ static struct pci_driver e1000_driver = {
.id_table = e1000_pci_tbl,
.probe = e1000_probe,
.remove = e1000_remove,
- .driver = {
- .pm = &e1000_pm_ops,
- },
+ .driver.pm = pm_ptr(&e1000e_pm_ops),
.shutdown = e1000_shutdown,
.err_handler = &e1000_err_handler
};
@@ -7991,7 +7969,6 @@ static void __exit e1000_exit_module(void)
}
module_exit(e1000_exit_module);
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index bbcfd529399b..89d57dd911dc 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -124,7 +124,8 @@ static int e1000e_phc_get_syncdevicetime(ktime_t *device,
sys_cycles = er32(PLTSTMPH);
sys_cycles <<= 32;
sys_cycles |= er32(PLTSTMPL);
- *system = convert_art_to_tsc(sys_cycles);
+ system->cycles = sys_cycles;
+ system->cs_id = CSID_X86_ART;
return 0;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index fc373472e4e1..142f07ca8bc0 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -17,7 +17,6 @@ static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
"Copyright(c) 2013 - 2019 Intel Corporation.";
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index d748b98274e7..92de609b7218 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2342,7 +2342,7 @@ static int fm10k_handle_resume(struct fm10k_intfc *interface)
* suspend or hibernation. This function does not need to handle lower PCIe
* device state as the stack takes care of that for us.
**/
-static int __maybe_unused fm10k_resume(struct device *dev)
+static int fm10k_resume(struct device *dev)
{
struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
@@ -2369,7 +2369,7 @@ static int __maybe_unused fm10k_resume(struct device *dev)
* system suspend or hibernation. This function does not need to handle lower
* PCIe device state as the stack takes care of that for us.
**/
-static int __maybe_unused fm10k_suspend(struct device *dev)
+static int fm10k_suspend(struct device *dev)
{
struct fm10k_intfc *interface = dev_get_drvdata(dev);
struct net_device *netdev = interface->netdev;
@@ -2502,16 +2502,14 @@ static const struct pci_error_handlers fm10k_err_handler = {
.reset_done = fm10k_io_reset_done,
};
-static SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(fm10k_pm_ops, fm10k_suspend, fm10k_resume);
static struct pci_driver fm10k_driver = {
.name = fm10k_driver_name,
.id_table = fm10k_pci_tbl,
.probe = fm10k_probe,
.remove = fm10k_remove,
- .driver = {
- .pm = &fm10k_pm_ops,
- },
+ .driver.pm = pm_sleep_ptr(&fm10k_pm_ops),
.sriov_configure = fm10k_iov_configure,
.err_handler = &fm10k_err_handler
};
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index cad93f323bd5..9faa4339a76c 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -10,7 +10,7 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_I40E) += i40e.o
-i40e-objs := i40e_main.o \
+i40e-y := i40e_main.o \
i40e_ethtool.o \
i40e_adminq.o \
i40e_common.o \
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 2fbabcdb5bb5..d546567e0286 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -735,7 +735,7 @@ __i40e_pf_next_veb(struct i40e_pf *pf, int *idx)
_i++, _veb = __i40e_pf_next_veb(_pf, &_i))
/**
- * i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
+ * i40e_addr_to_hkey - Convert a 6-byte MAC Address to a u64 hash key
* @macaddr: the MAC Address as the base key
*
* Simply copies the address and returns it as a u64 for hashing
@@ -788,7 +788,6 @@ struct i40e_veb {
u16 stats_idx; /* index of VEB parent */
u8 enabled_tc;
u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */
- u16 flags;
u16 bw_limit;
u8 bw_max_quanta;
bool is_abs_credits;
@@ -1213,7 +1212,7 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi);
void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi);
int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi);
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
-struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc);
void i40e_veb_release(struct i40e_veb *veb);
@@ -1237,8 +1236,8 @@ static inline void i40e_dbg_exit(void) {}
int i40e_lan_add_device(struct i40e_pf *pf);
int i40e_lan_del_device(struct i40e_pf *pf);
void i40e_client_subtask(struct i40e_pf *pf);
-void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi);
-void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset);
+void i40e_notify_client_of_l2_param_changes(struct i40e_pf *pf);
+void i40e_notify_client_of_netdev_close(struct i40e_pf *pf, bool reset);
void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs);
void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id);
void i40e_client_update_msix_info(struct i40e_pf *pf);
@@ -1374,6 +1373,17 @@ i40e_pf_get_vsi_by_seid(struct i40e_pf *pf, u16 seid)
}
/**
+ * i40e_pf_get_main_vsi - get pointer to main VSI
+ * @pf: pointer to a PF
+ *
+ * Return: pointer to main VSI or NULL if it does not exist
+ **/
+static inline struct i40e_vsi *i40e_pf_get_main_vsi(struct i40e_pf *pf)
+{
+ return (pf->lan_vsi != I40E_NO_VSI) ? pf->vsi[pf->lan_vsi] : NULL;
+}
+
+/**
* i40e_pf_get_veb_by_seid - find VEB by SEID
* @pf: pointer to a PF
* @seid: SEID of the VSI
@@ -1391,4 +1401,15 @@ i40e_pf_get_veb_by_seid(struct i40e_pf *pf, u16 seid)
return NULL;
}
+/**
+ * i40e_pf_get_main_veb - get pointer to main VEB
+ * @pf: pointer to a PF
+ *
+ * Return: pointer to main VEB or NULL if it does not exist
+ **/
+static inline struct i40e_veb *i40e_pf_get_main_veb(struct i40e_pf *pf)
+{
+ return (pf->lan_veb != I40E_NO_VEB) ? pf->veb[pf->lan_veb] : NULL;
+}
+
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index ee86d2c53079..55b5bb884d73 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -109,10 +109,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
-EFBIG, /* I40E_AQ_RC_EFBIG */
};
- /* aq_rc is invalid if AQ timed out */
- if (aq_ret == -EIO)
- return -EAGAIN;
-
if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
return -ERANGE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index b32071ee84af..59263551c383 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -101,25 +101,26 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len)
/**
* i40e_notify_client_of_l2_param_changes - call the client notify callback
- * @vsi: the VSI with l2 param changes
+ * @pf: PF device pointer
*
- * If there is a client to this VSI, call the client
+ * If there is a client, call its callback
**/
-void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
+void i40e_notify_client_of_l2_param_changes(struct i40e_pf *pf)
{
- struct i40e_pf *pf = vsi->back;
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_client_instance *cdev = pf->cinst;
struct i40e_params params;
if (!cdev || !cdev->client)
return;
if (!cdev->client->ops || !cdev->client->ops->l2_param_change) {
- dev_dbg(&vsi->back->pdev->dev,
+ dev_dbg(&pf->pdev->dev,
"Cannot locate client instance l2_param_change routine\n");
return;
}
if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) {
- dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n");
+ dev_dbg(&pf->pdev->dev,
+ "Client is not open, abort l2 param change\n");
return;
}
memset(&params, 0, sizeof(params));
@@ -157,20 +158,19 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev)
/**
* i40e_notify_client_of_netdev_close - call the client close callback
- * @vsi: the VSI with netdev closed
+ * @pf: PF device pointer
* @reset: true when close called due to a reset pending
*
* If there is a client to this netdev, call the client with close
**/
-void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset)
+void i40e_notify_client_of_netdev_close(struct i40e_pf *pf, bool reset)
{
- struct i40e_pf *pf = vsi->back;
struct i40e_client_instance *cdev = pf->cinst;
if (!cdev || !cdev->client)
return;
if (!cdev->client->ops || !cdev->client->ops->close) {
- dev_dbg(&vsi->back->pdev->dev,
+ dev_dbg(&pf->pdev->dev,
"Cannot locate client instance close routine\n");
return;
}
@@ -333,9 +333,9 @@ static int i40e_register_auxiliary_dev(struct i40e_info *ldev, const char *name)
**/
static void i40e_client_add_instance(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_client_instance *cdev = NULL;
struct netdev_hw_addr *mac = NULL;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
if (!cdev)
@@ -399,9 +399,9 @@ void i40e_client_del_instance(struct i40e_pf *pf)
**/
void i40e_client_subtask(struct i40e_pf *pf)
{
- struct i40e_client *client;
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_client_instance *cdev;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_client *client;
int ret = 0;
if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state))
@@ -665,8 +665,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev,
bool is_vf, u32 vf_id,
u32 flag, u32 valid_flag)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(ldev->pf);
struct i40e_pf *pf = ldev->pf;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
struct i40e_vsi_context ctxt;
bool update = true;
int err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index de6ca6295742..e8031f1a9b4f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -381,259 +381,6 @@ int i40e_aq_set_rss_key(struct i40e_hw *hw,
return i40e_aq_get_set_rss_key(hw, vsi_id, key, true);
}
-/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT i40e_ptype_lookup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum i40e_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-
-/* macro to make the table lines short, use explicit indexing with [PTYPE] */
-#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = { \
- 1, \
- I40E_RX_PTYPE_OUTER_##OUTER_IP, \
- I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- I40E_RX_PTYPE_##OUTER_FRAG, \
- I40E_RX_PTYPE_TUNNEL_##T, \
- I40E_RX_PTYPE_TUNNEL_END_##TE, \
- I40E_RX_PTYPE_##TEF, \
- I40E_RX_PTYPE_INNER_PROT_##I, \
- I40E_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG
-#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG
-#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC
-
-/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */
-struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = {
- /* L2 Packet types */
- I40E_PTT_UNUSED_ENTRY(0),
- I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
- I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT_UNUSED_ENTRY(4),
- I40E_PTT_UNUSED_ENTRY(5),
- I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT_UNUSED_ENTRY(8),
- I40E_PTT_UNUSED_ENTRY(9),
- I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-
- /* Non Tunneled IPv4 */
- I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(25),
- I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(32),
- I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(39),
- I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(47),
- I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(54),
- I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(62),
- I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(69),
- I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(77),
- I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(84),
- I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(91),
- I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(98),
- I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(105),
- I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(113),
- I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(120),
- I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(128),
- I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(135),
- I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(143),
- I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- I40E_PTT_UNUSED_ENTRY(150),
- I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* unused entries */
- [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-};
-
/**
* i40e_init_shared_code - Initialize the shared code
* @hw: pointer to hardware structure
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
index 2f53f0f53bc3..daa9f2c42f70 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c
@@ -407,8 +407,9 @@ static int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size,
**/
static int i40e_ddp_restore(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+ struct net_device *netdev = vsi->netdev;
struct i40e_ddp_old_profile_list *entry;
- struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
int status = 0;
if (!list_empty(&pf->ddp_old_prof)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index f9ba45f596c9..abf624d770e6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -53,6 +53,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ struct i40e_vsi *main_vsi;
int bytes_not_copied;
int buf_size = 256;
char *buf;
@@ -68,8 +69,8 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
if (!buf)
return -ENOSPC;
- len = snprintf(buf, buf_size, "%s: %s\n",
- pf->vsi[pf->lan_vsi]->netdev->name,
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
i40e_dbg_command_buf);
bytes_not_copied = copy_to_user(buffer, buf, len);
@@ -128,7 +129,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" state[%d] = %08lx\n",
i, vsi->state[i]);
- if (vsi == pf->vsi[pf->lan_vsi])
+ if (vsi->type == I40E_VSI_MAIN)
dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n",
pf->hw.mac.addr,
pf->hw.mac.port_addr);
@@ -786,7 +787,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
if (cnt == 0) {
/* default to PF VSI */
- vsi_seid = pf->vsi[pf->lan_vsi]->seid;
+ vsi = i40e_pf_get_main_vsi(pf);
+ vsi_seid = vsi->seid;
} else if (vsi_seid < 0) {
dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n",
vsi_seid);
@@ -867,7 +869,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc);
+ veb = i40e_veb_setup(pf, uplink_seid, vsi_seid, enabled_tc);
if (veb)
dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid);
else
@@ -1030,7 +1032,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
- vsi = pf->vsi[pf->lan_vsi];
+ vsi = i40e_pf_get_main_vsi(pf);
switch_id =
le16_to_cpu(vsi->info.switch_id) &
I40E_AQ_VSI_SW_ID_MASK;
@@ -1380,6 +1382,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
i40e_get_current_fd_count(pf));
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
+ /* Get main VSI */
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1391,10 +1396,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
goto command_write_done;
}
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
- pf->hw.mac.addr,
- ETH_P_LLDP, 0,
- pf->vsi[pf->lan_vsi]->seid,
- 0, true, NULL, NULL);
+ pf->hw.mac.addr, ETH_P_LLDP, 0,
+ main_vsi->seid, 0, true, NULL,
+ NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: Add Control Packet Filter AQ command failed =0x%x\n",
@@ -1409,10 +1413,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
int ret;
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
- pf->hw.mac.addr,
- ETH_P_LLDP, 0,
- pf->vsi[pf->lan_vsi]->seid,
- 0, false, NULL, NULL);
+ pf->hw.mac.addr, ETH_P_LLDP, 0,
+ main_vsi->seid, 0, false, NULL,
+ NULL);
if (ret) {
dev_info(&pf->pdev->dev,
"%s: Remove Control Packet Filter AQ command failed =0x%x\n",
@@ -1639,6 +1642,7 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
size_t count, loff_t *ppos)
{
struct i40e_pf *pf = filp->private_data;
+ struct i40e_vsi *main_vsi;
int bytes_not_copied;
int buf_size = 256;
char *buf;
@@ -1654,8 +1658,8 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer,
if (!buf)
return -ENOSPC;
- len = snprintf(buf, buf_size, "%s: %s\n",
- pf->vsi[pf->lan_vsi]->netdev->name,
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name,
i40e_dbg_netdev_ops_buf);
bytes_not_copied = copy_to_user(buffer, buf, len);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 42e7e6cdaa6d..1d0d2e526adb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -1241,7 +1241,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev,
i40e_partition_setting_complaint(pf);
return -EOPNOTSUPP;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
@@ -1710,7 +1710,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
return -EOPNOTSUPP;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED;
@@ -2029,7 +2029,7 @@ static void i40e_get_ringparam(struct net_device *netdev,
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_pf *pf = np->vsi->back;
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
ring->rx_max_pending = i40e_get_max_num_descriptors(pf);
ring->tx_max_pending = i40e_get_max_num_descriptors(pf);
@@ -2292,7 +2292,7 @@ static int i40e_get_stats_count(struct net_device *netdev)
struct i40e_pf *pf = vsi->back;
int stats_len;
- if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1)
+ if (vsi->type == I40E_VSI_MAIN && pf->hw.partition_id == 1)
stats_len = I40E_PF_STATS_LEN;
else
stats_len = I40E_VSI_STATS_LEN;
@@ -2422,17 +2422,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
}
rcu_read_unlock();
- if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+ if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1)
goto check_data_pointer;
- veb_stats = ((pf->lan_veb != I40E_NO_VEB) &&
- (pf->lan_veb < I40E_MAX_VEB) &&
- test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags));
+ veb = i40e_pf_get_main_veb(pf);
+ veb_stats = veb && test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags);
- if (veb_stats) {
- veb = pf->veb[pf->lan_veb];
+ if (veb_stats)
i40e_update_veb_stats(veb);
- }
/* If veb stats aren't enabled, pass NULL instead of the veb so that
* we initialize stats to zero and update the data pointer
@@ -2495,7 +2492,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data)
"rx", i);
}
- if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1)
+ if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1)
goto check_data_pointer;
i40e_add_stat_strings(&data, i40e_gstrings_veb_stats);
@@ -2549,7 +2546,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
}
static int i40e_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
@@ -2792,7 +2789,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
}
- if (vsi != pf->vsi[pf->lan_vsi])
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
/* NVM bit on means WoL disabled for the port */
@@ -3370,6 +3367,7 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
struct i40e_rx_flow_userdef userdef = {0};
struct i40e_fdir_filter *rule = NULL;
struct hlist_node *node2;
+ struct i40e_vsi *vsi;
u64 input_set;
u16 index;
@@ -3493,9 +3491,8 @@ no_input_set:
fsp->flow_type |= FLOW_EXT;
}
- if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) {
- struct i40e_vsi *vsi;
-
+ vsi = i40e_pf_get_main_vsi(pf);
+ if (rule->dest_vsi != vsi->id) {
vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi);
if (vsi && vsi->type == I40E_VSI_SRIOV) {
/* VFs are zero-indexed by the driver, but ethtool
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ffb9f9f15c52..cbcfada7b357 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -98,8 +98,8 @@ static int debug = -1;
module_param(debug, uint, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)");
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
+MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
static struct workqueue_struct *i40e_wq;
@@ -989,7 +989,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
ns->tx_dropped = es->tx_discards;
/* pull in a couple PF stats if this is the main vsi */
- if (vsi == pf->vsi[pf->lan_vsi]) {
+ if (vsi->type == I40E_VSI_MAIN) {
ns->rx_crc_errors = pf->stats.crc_errors;
ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
ns->rx_length_errors = pf->stats.rx_length_errors;
@@ -1234,7 +1234,7 @@ void i40e_update_stats(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- if (vsi == pf->vsi[pf->lan_vsi])
+ if (vsi->type == I40E_VSI_MAIN)
i40e_update_pf_stats(pf);
i40e_update_vsi_stats(vsi);
@@ -2475,12 +2475,12 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
**/
static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
int aq_ret;
if (vsi->type == I40E_VSI_MAIN &&
- pf->lan_veb != I40E_NO_VEB &&
+ i40e_pf_get_main_veb(pf) &&
!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) {
/* set defport ON for Main VSI instead of true promisc
* this way we will get all unicast/multicast and VLAN
@@ -2960,7 +2960,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
i40e_vsi_reinit_locked(vsi);
set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
@@ -4322,7 +4322,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
/* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
/* We do not have a way to disarm Queue causes while leaving
@@ -5472,7 +5472,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
**/
static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
u8 num_tc = vsi->mqprio_qopt.qopt.num_tc;
u8 enabled_tc = 1, i;
@@ -5489,13 +5489,14 @@ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf)
**/
static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
u8 i, enabled_tc = 1;
u8 num_tc = 0;
- struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
- if (i40e_is_tc_mqprio_enabled(pf))
- return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc;
+ if (i40e_is_tc_mqprio_enabled(pf)) {
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+
+ return vsi->mqprio_qopt.qopt.num_tc;
+ }
/* If neither MQPRIO nor DCB is enabled, then always use single TC */
if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags))
@@ -5503,7 +5504,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
/* SFP mode will be enabled for all TCs on port */
if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags))
- return i40e_dcb_get_num_tc(dcbcfg);
+ return i40e_dcb_get_num_tc(&pf->hw.local_dcbx_config);
/* MFP mode return count of enabled TCs for this PF */
if (pf->hw.func_caps.iscsi)
@@ -5916,6 +5917,28 @@ out:
}
/**
+ * i40e_vsi_reconfig_tc - Reconfigure VSI Tx Scheduler for stored TC map
+ * @vsi: VSI to be reconfigured
+ *
+ * This reconfigures a particular VSI for TCs that are mapped to the
+ * TC bitmap stored previously for the VSI.
+ *
+ * Context: It is expected that the VSI queues have been quisced before
+ * calling this function.
+ *
+ * Return: 0 on success, negative value on failure
+ **/
+static int i40e_vsi_reconfig_tc(struct i40e_vsi *vsi)
+{
+ u8 enabled_tc;
+
+ enabled_tc = vsi->tc_config.enabled_tc;
+ vsi->tc_config.enabled_tc = 0;
+
+ return i40e_vsi_config_tc(vsi, enabled_tc);
+}
+
+/**
* i40e_get_link_speed - Returns link speed for the interface
* @vsi: VSI to be configured
*
@@ -6477,6 +6500,7 @@ static inline int i40e_setup_hw_channel(struct i40e_pf *pf,
static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
struct i40e_channel *ch)
{
+ struct i40e_vsi *main_vsi;
u8 vsi_type;
u16 seid;
int ret;
@@ -6490,7 +6514,8 @@ static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi,
}
/* underlying switching element */
- seid = pf->vsi[pf->lan_vsi]->uplink_seid;
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ seid = main_vsi->uplink_seid;
/* create channel (VSI), configure TX rings */
ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type);
@@ -6808,7 +6833,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
/* - Enable all TCs for the LAN VSI
* - For all others keep them at TC0 for now
*/
- if (v == pf->lan_vsi)
+ if (vsi->type == I40E_VSI_MAIN)
tc_map = i40e_pf_get_tc_map(pf);
else
tc_map = I40E_DEFAULT_TRAFFIC_CLASS;
@@ -7047,7 +7072,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg)
/* Configure Rx Packet Buffers in HW */
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
- mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu;
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+
+ mfs_tc[i] = main_vsi->netdev->mtu;
mfs_tc[i] += I40E_PACKET_HDR_PAD;
}
@@ -8643,6 +8670,10 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -9113,7 +9144,7 @@ err_setup_rx:
i40e_vsi_free_rx_resources(vsi);
err_setup_tx:
i40e_vsi_free_tx_resources(vsi);
- if (vsi == pf->vsi[pf->lan_vsi])
+ if (vsi->type == I40E_VSI_MAIN)
i40e_do_reset(pf, I40E_PF_RESET_FLAG, true);
return err;
@@ -9804,7 +9835,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
} else {
/* replay sideband filters */
- i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
+ i40e_fdir_filter_restore(i40e_pf_get_main_vsi(pf));
if (!disable_atr && !pf->fd_tcp4_filter_cnt)
clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state);
clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state);
@@ -9902,7 +9933,8 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
**/
static void i40e_link_event(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+ struct i40e_veb *veb = i40e_pf_get_main_veb(pf);
u8 new_link_speed, old_link_speed;
bool new_link, old_link;
int status;
@@ -9942,8 +9974,8 @@ static void i40e_link_event(struct i40e_pf *pf)
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
*/
- if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
- i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
+ if (veb)
+ i40e_veb_link_event(veb, new_link);
else
i40e_vsi_link_event(vsi, new_link);
@@ -10273,7 +10305,7 @@ static void i40e_verify_eeprom(struct i40e_pf *pf)
**/
static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_vsi_context ctxt;
int ret;
@@ -10309,7 +10341,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
**/
static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_vsi_context ctxt;
int ret;
@@ -10385,7 +10417,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (veb->uplink_seid == pf->mac_seid) {
/* Check that the LAN VSI has VEB owning flag set */
- ctl_vsi = pf->vsi[pf->lan_vsi];
+ ctl_vsi = i40e_pf_get_main_vsi(pf);
if (WARN_ON(ctl_vsi->veb_idx != veb->idx ||
!(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) {
@@ -10528,7 +10560,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
**/
static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi;
+ struct i40e_vsi *main_vsi, *vsi;
/* quick workaround for an NVM issue that leaves a critical register
* uninitialized
@@ -10553,8 +10585,8 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
/* create a new VSI if none exists */
if (!vsi) {
- vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
- pf->vsi[pf->lan_vsi]->seid, 0);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, main_vsi->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags);
@@ -10833,7 +10865,7 @@ static int i40e_reset(struct i40e_pf *pf)
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf);
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
struct i40e_veb *veb;
int ret;
@@ -10842,7 +10874,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
is_recovery_mode_reported)
- i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev);
+ i40e_set_ethtool_ops(vsi->netdev);
if (test_bit(__I40E_DOWN, pf->state) &&
!test_bit(__I40E_RECOVERY_MODE, pf->state))
@@ -11138,6 +11170,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
ret = i40e_reset(pf);
if (!ret)
i40e_rebuild(pf, reinit, lock_acquired);
+ else
+ dev_err(&pf->pdev->dev, "%s: i40e_reset() FAILED", __func__);
}
/**
@@ -11266,7 +11300,7 @@ static void i40e_service_task(struct work_struct *work)
return;
if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) {
- i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]);
+ i40e_detect_recover_hung(pf);
i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf);
@@ -11275,14 +11309,12 @@ static void i40e_service_task(struct work_struct *work)
i40e_fdir_reinit_subtask(pf);
if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) {
/* Client subtask will reopen next time through. */
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi],
- true);
+ i40e_notify_client_of_netdev_close(pf, true);
} else {
i40e_client_subtask(pf);
if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE,
pf->state))
- i40e_notify_client_of_l2_param_changes(
- pf->vsi[pf->lan_vsi]);
+ i40e_notify_client_of_l2_param_changes(pf);
}
i40e_sync_filters_subtask(pf);
} else {
@@ -11990,7 +12022,7 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
/* if not MSIX, give the one vector only to the LAN VSI */
if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags))
num_q_vectors = vsi->num_q_vectors;
- else if (vsi == pf->vsi[pf->lan_vsi])
+ else if (vsi->type == I40E_VSI_MAIN)
num_q_vectors = 1;
else
return -EINVAL;
@@ -12396,7 +12428,7 @@ void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut,
**/
static int i40e_pf_config_rss(struct i40e_pf *pf)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
u8 seed[I40E_HKEY_ARRAY_SIZE];
u8 *lut;
struct i40e_hw *hw = &pf->hw;
@@ -12468,7 +12500,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
**/
int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
{
- struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
int new_rss_size;
if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags))
@@ -13107,7 +13139,7 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
int rem;
/* Only for PF VSI for now */
- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
/* Find the HW bridge for PF VSI */
@@ -13117,20 +13149,16 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- __u16 mode;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
+ __u16 mode = nla_get_u16(attr);
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
- mode = nla_get_u16(attr);
if ((mode != BRIDGE_MODE_VEPA) &&
(mode != BRIDGE_MODE_VEB))
return -EINVAL;
/* Insert a new HW bridge */
if (!veb) {
- veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid,
vsi->tc_config.enabled_tc);
if (veb) {
veb->bridge_mode = mode;
@@ -13179,7 +13207,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct i40e_veb *veb;
/* Only for PF VSI for now */
- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
+ if (vsi->type != I40E_VSI_MAIN)
return -EOPNOTSUPP;
/* Find the HW bridge for the PF VSI */
@@ -13264,6 +13292,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
bool need_reset;
int i;
+ /* VSI shall be deleted in a moment, block loading new programs */
+ if (prog && test_bit(__I40E_IN_REMOVE, pf->state))
+ return -EINVAL;
+
/* Don't allow frames that span over multiple buffers */
if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) {
NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags");
@@ -13272,14 +13304,9 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
/* When turning XDP on->off/off->on we reset and rebuild the rings. */
need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog);
-
if (need_reset)
i40e_prep_for_reset(pf);
- /* VSI shall be deleted in a moment, just return EINVAL */
- if (test_bit(__I40E_IN_REMOVE, pf->state))
- return -EINVAL;
-
old_prog = xchg(&vsi->xdp_prog, prog);
if (need_reset) {
@@ -13761,9 +13788,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
* the end, which is 4 bytes long, so force truncation of the
* original name by IFNAMSIZ - 4
*/
- snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d",
- IFNAMSIZ - 4,
- pf->vsi[pf->lan_vsi]->netdev->name);
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+
+ snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4,
+ main_vsi->netdev->name);
eth_random_addr(mac_addr);
spin_lock_bh(&vsi->mac_filter_hash_lock);
@@ -14130,8 +14158,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
vsi->seid, vsi->uplink_seid);
return -ENODEV;
}
- if (vsi == pf->vsi[pf->lan_vsi] &&
- !test_bit(__I40E_DOWN, pf->state)) {
+ if (vsi->type == I40E_VSI_MAIN && !test_bit(__I40E_DOWN, pf->state)) {
dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
return -ENODEV;
}
@@ -14275,9 +14302,9 @@ vector_setup_out:
**/
static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
{
+ struct i40e_vsi *main_vsi;
u16 alloc_queue_pairs;
struct i40e_pf *pf;
- u8 enabled_tc;
int ret;
if (!vsi)
@@ -14309,10 +14336,10 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
/* Update the FW view of the VSI. Force a reset of TC and queue
* layout configurations.
*/
- enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
- pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
- pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
- i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ main_vsi->seid = pf->main_vsi_seid;
+ i40e_vsi_reconfig_tc(main_vsi);
+
if (vsi->type == I40E_VSI_MAIN)
i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
@@ -14386,13 +14413,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
}
if (vsi->uplink_seid == pf->mac_seid)
- veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
+ veb = i40e_veb_setup(pf, pf->mac_seid, vsi->seid,
vsi->tc_config.enabled_tc);
else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
- veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
+ veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid,
vsi->tc_config.enabled_tc);
if (veb) {
- if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
+ if (vsi->type != I40E_VSI_MAIN) {
dev_info(&vsi->back->pdev->dev,
"New VSI creation error, uplink seid of LAN VSI expected.\n");
return NULL;
@@ -14783,7 +14810,6 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
/**
* i40e_veb_setup - Set up a VEB
* @pf: board private structure
- * @flags: VEB setup flags
* @uplink_seid: the switch element to link to
* @vsi_seid: the initial VSI seid
* @enabled_tc: Enabled TC bit-map
@@ -14796,9 +14822,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
* Returns pointer to the successfully allocated VEB sw struct on
* success, otherwise returns NULL on failure.
**/
-struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
- u16 uplink_seid, u16 vsi_seid,
- u8 enabled_tc)
+struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid,
+ u16 vsi_seid, u8 enabled_tc)
{
struct i40e_vsi *vsi = NULL;
struct i40e_veb *veb;
@@ -14829,7 +14854,6 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
if (veb_idx < 0)
goto err_alloc;
veb = pf->veb[veb_idx];
- veb->flags = flags;
veb->uplink_seid = uplink_seid;
veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
@@ -14881,7 +14905,8 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
/* Main VEB? */
if (uplink_seid != pf->mac_seid)
break;
- if (pf->lan_veb >= I40E_MAX_VEB) {
+ veb = i40e_pf_get_main_veb(pf);
+ if (!veb) {
int v;
/* find existing or else empty VEB */
@@ -14895,12 +14920,15 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
pf->lan_veb = v;
}
}
- if (pf->lan_veb >= I40E_MAX_VEB)
+
+ /* Try to get again main VEB as pf->lan_veb may have changed */
+ veb = i40e_pf_get_main_veb(pf);
+ if (!veb)
break;
- pf->veb[pf->lan_veb]->seid = seid;
- pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
- pf->veb[pf->lan_veb]->pf = pf;
+ veb->seid = seid;
+ veb->uplink_seid = pf->mac_seid;
+ veb->pf = pf;
break;
case I40E_SWITCH_ELEMENT_TYPE_VSI:
if (num_reported != 1)
@@ -14998,6 +15026,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
**/
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired)
{
+ struct i40e_vsi *main_vsi;
u16 flags = 0;
int ret;
@@ -15042,22 +15071,25 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
}
/* first time setup */
- if (pf->lan_vsi == I40E_NO_VSI || reinit) {
- struct i40e_vsi *vsi = NULL;
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ if (!main_vsi || reinit) {
+ struct i40e_veb *veb;
u16 uplink_seid;
/* Set up the PF VSI associated with the PF's main VSI
* that is already in the HW switch
*/
- if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb])
- uplink_seid = pf->veb[pf->lan_veb]->seid;
+ veb = i40e_pf_get_main_veb(pf);
+ if (veb)
+ uplink_seid = veb->seid;
else
uplink_seid = pf->mac_seid;
- if (pf->lan_vsi == I40E_NO_VSI)
- vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
+ if (!main_vsi)
+ main_vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN,
+ uplink_seid, 0);
else if (reinit)
- vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
- if (!vsi) {
+ main_vsi = i40e_vsi_reinit_setup(main_vsi);
+ if (!main_vsi) {
dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
i40e_cloud_filter_exit(pf);
i40e_fdir_teardown(pf);
@@ -15065,13 +15097,10 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
}
} else {
/* force a reset of TC and queue layout configurations */
- u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
-
- pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
- pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
- i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ main_vsi->seid = pf->main_vsi_seid;
+ i40e_vsi_reconfig_tc(main_vsi);
}
- i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
+ i40e_vlan_stripping_disable(main_vsi);
i40e_fdir_sb_setup(pf);
@@ -15098,7 +15127,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui
rtnl_lock();
/* repopulate tunnel port filters */
- udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev);
+ udp_tunnel_nic_reset_ntf(main_vsi->netdev);
if (!lock_acquired)
rtnl_unlock();
@@ -15242,6 +15271,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
#define REMAIN(__x) (INFO_STRING_LEN - (__x))
static void i40e_print_features(struct i40e_pf *pf)
{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
char *buf;
int i;
@@ -15255,8 +15285,7 @@ static void i40e_print_features(struct i40e_pf *pf)
i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
#endif
i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
- pf->hw.func_caps.num_vsis,
- pf->vsi[pf->lan_vsi]->num_queue_pairs);
+ pf->hw.func_caps.num_vsis, main_vsi->num_queue_pairs);
if (test_bit(I40E_FLAG_RSS_ENA, pf->flags))
i += scnprintf(&buf[i], REMAIN(i), " RSS");
if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags))
@@ -15920,7 +15949,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
- INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list);
+
+ vsi = i40e_pf_get_main_vsi(pf);
+ INIT_LIST_HEAD(&vsi->ch_list);
/* if FDIR VSI was set up, start it now */
vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR);
@@ -16223,7 +16254,7 @@ static void i40e_remove(struct pci_dev *pdev)
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+ i40e_notify_client_of_netdev_close(pf, false);
i40e_fdir_teardown(pf);
@@ -16304,6 +16335,139 @@ unmap:
}
/**
+ * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
+ * using the mac_address_write admin q function
+ * @pf: pointer to i40e_pf struct
+ **/
+static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
+{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
+ struct i40e_hw *hw = &pf->hw;
+ u8 mac_addr[6];
+ u16 flags = 0;
+ int ret;
+
+ /* Get current MAC address in case it's an LAA */
+ if (main_vsi && main_vsi->netdev) {
+ ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr);
+ } else {
+ dev_err(&pf->pdev->dev,
+ "Failed to retrieve MAC address; using default\n");
+ ether_addr_copy(mac_addr, hw->mac.addr);
+ }
+
+ /* The FW expects the mac address write cmd to first be called with
+ * one of these flags before calling it again with the multicast
+ * enable flags.
+ */
+ flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
+
+ if (hw->func_caps.flex10_enable && hw->partition_id != 1)
+ flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
+
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
+ return;
+ }
+
+ flags = I40E_AQC_MC_MAG_EN
+ | I40E_AQC_WOL_PRESERVE_ON_PFR
+ | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
+ ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "Failed to enable Multicast Magic Packet wake up\n");
+}
+
+/**
+ * i40e_io_suspend - suspend all IO operations
+ * @pf: pointer to i40e_pf struct
+ *
+ **/
+static int i40e_io_suspend(struct i40e_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+
+ set_bit(__I40E_DOWN, pf->state);
+
+ /* Ensure service task will not be running */
+ del_timer_sync(&pf->service_timer);
+ cancel_work_sync(&pf->service_task);
+
+ /* Client close must be called explicitly here because the timer
+ * has been stopped.
+ */
+ i40e_notify_client_of_netdev_close(pf, false);
+
+ if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
+ pf->wol_en)
+ i40e_enable_mc_magic_wake(pf);
+
+ /* Since we're going to destroy queues during the
+ * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
+ * whole section
+ */
+ rtnl_lock();
+
+ i40e_prep_for_reset(pf);
+
+ wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
+ wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+
+ /* Clear the interrupt scheme and release our IRQs so that the system
+ * can safely hibernate even when there are a large number of CPUs.
+ * Otherwise hibernation might fail when mapping all the vectors back
+ * to CPU0.
+ */
+ i40e_clear_interrupt_scheme(pf);
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+/**
+ * i40e_io_resume - resume IO operations
+ * @pf: pointer to i40e_pf struct
+ *
+ **/
+static int i40e_io_resume(struct i40e_pf *pf)
+{
+ struct device *dev = &pf->pdev->dev;
+ int err;
+
+ /* We need to hold the RTNL lock prior to restoring interrupt schemes,
+ * since we're going to be restoring queues
+ */
+ rtnl_lock();
+
+ /* We cleared the interrupt scheme when we suspended, so we need to
+ * restore it now to resume device functionality.
+ */
+ err = i40e_restore_interrupt_scheme(pf);
+ if (err) {
+ dev_err(dev, "Cannot restore interrupt scheme: %d\n",
+ err);
+ }
+
+ clear_bit(__I40E_DOWN, pf->state);
+ i40e_reset_and_rebuild(pf, false, true);
+
+ rtnl_unlock();
+
+ /* Clear suspended state last after everything is recovered */
+ clear_bit(__I40E_SUSPENDED, pf->state);
+
+ /* Restart the service task */
+ mod_timer(&pf->service_timer,
+ round_jiffies(jiffies + pf->service_timer_period));
+
+ return 0;
+}
+
+/**
* i40e_pci_error_detected - warning that something funky happened in PCI land
* @pdev: PCI device information struct
* @error: the type of PCI error
@@ -16327,7 +16491,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
/* shutdown all operations */
if (!test_bit(__I40E_SUSPENDED, pf->state))
- i40e_prep_for_reset(pf);
+ i40e_io_suspend(pf);
/* Request a slot reset */
return PCI_ERS_RESULT_NEED_RESET;
@@ -16349,7 +16513,8 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
u32 reg;
dev_dbg(&pdev->dev, "%s\n", __func__);
- if (pci_enable_device_mem(pdev)) {
+ /* enable I/O and memory of the device */
+ if (pci_enable_device(pdev)) {
dev_info(&pdev->dev,
"Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
@@ -16412,54 +16577,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev)
if (test_bit(__I40E_SUSPENDED, pf->state))
return;
- i40e_handle_reset_warning(pf, false);
-}
-
-/**
- * i40e_enable_mc_magic_wake - enable multicast magic packet wake up
- * using the mac_address_write admin q function
- * @pf: pointer to i40e_pf struct
- **/
-static void i40e_enable_mc_magic_wake(struct i40e_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- u8 mac_addr[6];
- u16 flags = 0;
- int ret;
-
- /* Get current MAC address in case it's an LAA */
- if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) {
- ether_addr_copy(mac_addr,
- pf->vsi[pf->lan_vsi]->netdev->dev_addr);
- } else {
- dev_err(&pf->pdev->dev,
- "Failed to retrieve MAC address; using default\n");
- ether_addr_copy(mac_addr, hw->mac.addr);
- }
-
- /* The FW expects the mac address write cmd to first be called with
- * one of these flags before calling it again with the multicast
- * enable flags.
- */
- flags = I40E_AQC_WRITE_TYPE_LAA_WOL;
-
- if (hw->func_caps.flex10_enable && hw->partition_id != 1)
- flags = I40E_AQC_WRITE_TYPE_LAA_ONLY;
-
- ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up");
- return;
- }
-
- flags = I40E_AQC_MC_MAG_EN
- | I40E_AQC_WOL_PRESERVE_ON_PFR
- | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG;
- ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL);
- if (ret)
- dev_err(&pf->pdev->dev,
- "Failed to enable Multicast Magic Packet wake up\n");
+ i40e_io_resume(pf);
}
/**
@@ -16482,7 +16600,7 @@ static void i40e_shutdown(struct pci_dev *pdev)
/* Client close must be called explicitly here because the timer
* has been stopped.
*/
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
+ i40e_notify_client_of_netdev_close(pf, false);
if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
pf->wol_en)
@@ -16518,93 +16636,28 @@ static void i40e_shutdown(struct pci_dev *pdev)
* i40e_suspend - PM callback for moving to D3
* @dev: generic device information structure
**/
-static int __maybe_unused i40e_suspend(struct device *dev)
+static int i40e_suspend(struct device *dev)
{
struct i40e_pf *pf = dev_get_drvdata(dev);
- struct i40e_hw *hw = &pf->hw;
/* If we're already suspended, then there is nothing to do */
if (test_and_set_bit(__I40E_SUSPENDED, pf->state))
return 0;
-
- set_bit(__I40E_DOWN, pf->state);
-
- /* Ensure service task will not be running */
- del_timer_sync(&pf->service_timer);
- cancel_work_sync(&pf->service_task);
-
- /* Client close must be called explicitly here because the timer
- * has been stopped.
- */
- i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false);
-
- if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) &&
- pf->wol_en)
- i40e_enable_mc_magic_wake(pf);
-
- /* Since we're going to destroy queues during the
- * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
- * whole section
- */
- rtnl_lock();
-
- i40e_prep_for_reset(pf);
-
- wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
- wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
-
- /* Clear the interrupt scheme and release our IRQs so that the system
- * can safely hibernate even when there are a large number of CPUs.
- * Otherwise hibernation might fail when mapping all the vectors back
- * to CPU0.
- */
- i40e_clear_interrupt_scheme(pf);
-
- rtnl_unlock();
-
- return 0;
+ return i40e_io_suspend(pf);
}
/**
* i40e_resume - PM callback for waking up from D3
* @dev: generic device information structure
**/
-static int __maybe_unused i40e_resume(struct device *dev)
+static int i40e_resume(struct device *dev)
{
struct i40e_pf *pf = dev_get_drvdata(dev);
- int err;
/* If we're not suspended, then there is nothing to do */
if (!test_bit(__I40E_SUSPENDED, pf->state))
return 0;
-
- /* We need to hold the RTNL lock prior to restoring interrupt schemes,
- * since we're going to be restoring queues
- */
- rtnl_lock();
-
- /* We cleared the interrupt scheme when we suspended, so we need to
- * restore it now to resume device functionality.
- */
- err = i40e_restore_interrupt_scheme(pf);
- if (err) {
- dev_err(dev, "Cannot restore interrupt scheme: %d\n",
- err);
- }
-
- clear_bit(__I40E_DOWN, pf->state);
- i40e_reset_and_rebuild(pf, false, true);
-
- rtnl_unlock();
-
- /* Clear suspended state last after everything is recovered */
- clear_bit(__I40E_SUSPENDED, pf->state);
-
- /* Restart the service task */
- mod_timer(&pf->service_timer,
- round_jiffies(jiffies + pf->service_timer_period));
-
- return 0;
+ return i40e_io_resume(pf);
}
static const struct pci_error_handlers i40e_err_handler = {
@@ -16615,16 +16668,14 @@ static const struct pci_error_handlers i40e_err_handler = {
.resume = i40e_pci_error_resume,
};
-static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume);
static struct pci_driver i40e_driver = {
.name = i40e_driver_name,
.id_table = i40e_pci_tbl,
.probe = i40e_probe,
.remove = i40e_remove,
- .driver = {
- .pm = &i40e_pm_ops,
- },
+ .driver.pm = pm_sleep_ptr(&i40e_pm_ops),
.shutdown = i40e_shutdown,
.err_handler = &i40e_err_handler,
.sriov_configure = i40e_pci_sriov_configure,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 605fd82f5d20..7f0936f4e05e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -734,37 +734,7 @@ int i40e_validate_nvm_checksum(struct i40e_hw *hw,
return ret_code;
}
-static int i40e_nvmupd_state_init(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_state_reading(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_state_writing(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *errno);
-static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno);
-static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno);
-static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno);
-static inline u8 i40e_nvmupd_get_module(u32 val)
+static u8 i40e_nvmupd_get_module(u32 val)
{
return (u8)(val & I40E_NVM_MOD_PNT_MASK);
}
@@ -799,121 +769,408 @@ static const char * const i40e_nvm_update_state_str[] = {
};
/**
- * i40e_nvmupd_command - Process an NVM update command
+ * i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command
- * @bytes: pointer to the data buffer
+ * @cmd: pointer to nvm update command buffer
* @perrno: pointer to return error code
*
- * Dispatches command depending on what update state is current
+ * Return one of the valid command types or I40E_NVMUPD_INVALID
**/
-int i40e_nvmupd_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+static enum i40e_nvmupd_cmd
+i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd,
+ int *perrno)
{
enum i40e_nvmupd_cmd upd_cmd;
- int status;
-
- /* assume success */
- *perrno = 0;
+ u8 module, transaction;
- /* early check for status command and debug msgs */
- upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
+ /* anything that doesn't match a recognized case is an error */
+ upd_cmd = I40E_NVMUPD_INVALID;
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
- i40e_nvm_update_state_str[upd_cmd],
- hw->nvmupd_state,
- hw->nvm_release_on_done, hw->nvm_wait_opcode,
- cmd->command, cmd->config, cmd->offset, cmd->data_size);
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
- if (upd_cmd == I40E_NVMUPD_INVALID) {
- *perrno = -EFAULT;
+ /* limits on data size */
+ if (cmd->data_size < 1 || cmd->data_size > I40E_NVMUPD_MAX_DATA) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_validate_command returns %d errno %d\n",
- upd_cmd, *perrno);
+ "%s data_size %d\n", __func__, cmd->data_size);
+ *perrno = -EFAULT;
+ return I40E_NVMUPD_INVALID;
}
- /* a status request returns immediately rather than
- * going into the state machine
- */
- if (upd_cmd == I40E_NVMUPD_STATUS) {
- if (!cmd->data_size) {
- *perrno = -EFAULT;
- return -EINVAL;
+ switch (cmd->command) {
+ case I40E_NVM_READ:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_READ_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_READ_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_READ_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_READ_SA;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0xf)
+ upd_cmd = I40E_NVMUPD_STATUS;
+ else if (module == 0)
+ upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+ break;
+ case I40E_NVM_AQE:
+ upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
+ break;
}
+ break;
- bytes[0] = hw->nvmupd_state;
-
- if (cmd->data_size >= 4) {
- bytes[1] = 0;
- *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ case I40E_NVM_WRITE:
+ switch (transaction) {
+ case I40E_NVM_CON:
+ upd_cmd = I40E_NVMUPD_WRITE_CON;
+ break;
+ case I40E_NVM_SNT:
+ upd_cmd = I40E_NVMUPD_WRITE_SNT;
+ break;
+ case I40E_NVM_LCB:
+ upd_cmd = I40E_NVMUPD_WRITE_LCB;
+ break;
+ case I40E_NVM_SA:
+ upd_cmd = I40E_NVMUPD_WRITE_SA;
+ break;
+ case I40E_NVM_ERA:
+ upd_cmd = I40E_NVMUPD_WRITE_ERA;
+ break;
+ case I40E_NVM_CSUM:
+ upd_cmd = I40E_NVMUPD_CSUM_CON;
+ break;
+ case (I40E_NVM_CSUM | I40E_NVM_SA):
+ upd_cmd = I40E_NVMUPD_CSUM_SA;
+ break;
+ case (I40E_NVM_CSUM | I40E_NVM_LCB):
+ upd_cmd = I40E_NVMUPD_CSUM_LCB;
+ break;
+ case I40E_NVM_EXEC:
+ if (module == 0)
+ upd_cmd = I40E_NVMUPD_EXEC_AQ;
+ break;
}
+ break;
+ }
- /* Clear error status on read */
- if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ return upd_cmd;
+}
- return 0;
+/**
+ * i40e_nvmupd_nvm_erase - Erase an NVM module
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ int status = 0;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s mod 0x%x off 0x%x len 0x%x\n",
+ __func__, module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s status %d aq %d\n",
+ __func__, status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
- /* Clear status even it is not read and log */
- if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_write - Write NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ u8 preservation_flags;
+ int status = 0;
+ bool last;
+
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction & I40E_NVM_LCB);
+ preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_update_nvm(hw, module, cmd->offset,
+ (u16)cmd->data_size, bytes, last,
+ preservation_flags, &cmd_details);
+ if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ "%s mod 0x%x off 0x%x len 0x%x\n",
+ __func__, module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s status %d aq %d\n",
+ __func__, status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
- /* Acquire lock to prevent race condition where adminq_task
- * can execute after i40e_nvmupd_nvm_read/write but before state
- * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
- *
- * During NVMUpdate, it is observed that lock could be held for
- * ~5ms for most commands. However lock is held for ~60ms for
- * NVMUPD_CSUM_LCB command.
- */
- mutex_lock(&hw->aq.arq_mutex);
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT:
- status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
- break;
+ return status;
+}
- case I40E_NVMUPD_STATE_READING:
- status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
- break;
+/**
+ * i40e_nvmupd_nvm_read - Read NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ u8 module, transaction;
+ int status;
+ bool last;
- case I40E_NVMUPD_STATE_WRITING:
- status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
- break;
+ transaction = i40e_nvmupd_get_transaction(cmd->config);
+ module = i40e_nvmupd_get_module(cmd->config);
+ last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
- case I40E_NVMUPD_STATE_INIT_WAIT:
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- /* if we need to stop waiting for an event, clear
- * the wait info and return before doing anything else
- */
- if (cmd->offset == 0xffff) {
- i40e_nvmupd_clear_wait_state(hw);
- status = 0;
- break;
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+ bytes, last, &cmd_details);
+ if (status) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s mod 0x%x off 0x%x len 0x%x\n",
+ __func__, module, cmd->offset, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s status %d aq %d\n",
+ __func__, status, hw->aq.asq_last_status);
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ }
+
+ return status;
+}
+
+/**
+ * i40e_nvmupd_exec_aq - Run an AQ command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ struct i40e_asq_cmd_details cmd_details;
+ struct i40e_aq_desc *aq_desc;
+ u32 buff_size = 0;
+ u8 *buff = NULL;
+ u32 aq_desc_len;
+ u32 aq_data_len;
+ int status;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ if (cmd->offset == 0xffff)
+ return 0;
+
+ memset(&cmd_details, 0, sizeof(cmd_details));
+ cmd_details.wb_desc = &hw->nvm_wb_desc;
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+
+ /* get the aq descriptor */
+ if (cmd->data_size < aq_desc_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
+ cmd->data_size, aq_desc_len);
+ *perrno = -EINVAL;
+ return -EINVAL;
+ }
+ aq_desc = (struct i40e_aq_desc *)bytes;
+
+ /* if data buffer needed, make sure it's ready */
+ aq_data_len = cmd->data_size - aq_desc_len;
+ buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
+ if (buff_size) {
+ if (!hw->nvm_buff.va) {
+ status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
+ hw->aq.asq_buf_size);
+ if (status)
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
+ status);
}
- status = -EBUSY;
- *perrno = -EBUSY;
- break;
+ if (hw->nvm_buff.va) {
+ buff = hw->nvm_buff.va;
+ memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ }
+ }
- default:
- /* invalid state, should never happen */
+ if (cmd->offset)
+ memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
+
+ /* and away we go! */
+ status = i40e_asq_send_command(hw, aq_desc, buff,
+ buff_size, &cmd_details);
+ if (status) {
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: no such state %d\n", hw->nvmupd_state);
- status = -EOPNOTSUPP;
- *perrno = -ESRCH;
- break;
+ "%s err %pe aq_err %s\n",
+ __func__, ERR_PTR(status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ return status;
+ }
+
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
- mutex_unlock(&hw->aq.arq_mutex);
return status;
}
/**
+ * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+ int remainder;
+ u8 *buff;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+
+ /* check offset range */
+ if (cmd->offset > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
+ __func__, cmd->offset, aq_total_len);
+ *perrno = -EINVAL;
+ return -EINVAL;
+ }
+
+ /* check copylength range */
+ if (cmd->data_size > (aq_total_len - cmd->offset)) {
+ int new_len = aq_total_len - cmd->offset;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, new_len);
+ cmd->data_size = new_len;
+ }
+
+ remainder = cmd->data_size;
+ if (cmd->offset < aq_desc_len) {
+ u32 len = aq_desc_len - cmd->offset;
+
+ len = min(len, cmd->data_size);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
+ __func__, cmd->offset, cmd->offset + len);
+
+ buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
+ memcpy(bytes, buff, len);
+
+ bytes += len;
+ remainder -= len;
+ buff = hw->nvm_buff.va;
+ } else {
+ buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ }
+
+ if (remainder > 0) {
+ int start_byte = buff - (u8 *)hw->nvm_buff.va;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
+ __func__, start_byte, start_byte + remainder);
+ memcpy(bytes, buff, remainder);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @perrno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
+{
+ u32 aq_total_len;
+ u32 aq_desc_len;
+
+ i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+
+ aq_desc_len = sizeof(struct i40e_aq_desc);
+ aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+
+ /* check copylength range */
+ if (cmd->data_size > aq_total_len) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "%s: copy length %d too big, trimming to %d\n",
+ __func__, cmd->data_size, aq_total_len);
+ cmd->data_size = aq_total_len;
+ }
+
+ memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
+
+ return 0;
+}
+
+/**
* i40e_nvmupd_state_init - Handle NVM update state Init
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -937,7 +1194,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
i40e_release_nvm(hw);
@@ -948,7 +1205,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno);
if (status)
@@ -962,7 +1219,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_erase(hw, cmd, perrno);
if (status) {
@@ -979,7 +1236,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
if (status) {
@@ -996,7 +1253,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
if (status) {
@@ -1012,7 +1269,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw,
status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
if (status) {
*perrno = i40e_aq_rc_to_posix(status,
- hw->aq.asq_last_status);
+ hw->aq.asq_last_status);
} else {
status = i40e_update_nvm_checksum(hw);
if (status) {
@@ -1185,7 +1442,7 @@ retry:
* so here we try to reacquire the semaphore then retry the write.
* We only do one retry, then give up.
*/
- if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) &&
+ if (status && hw->aq.asq_last_status == I40E_AQ_RC_EBUSY &&
!retry_attempt) {
u32 old_asq_status = hw->aq.asq_last_status;
int old_status = status;
@@ -1215,457 +1472,168 @@ retry:
}
/**
- * i40e_nvmupd_clear_wait_state - clear wait state on hw
- * @hw: pointer to the hardware structure
- **/
-void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
-{
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: clearing wait on opcode 0x%04x\n",
- hw->nvm_wait_opcode);
-
- if (hw->nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->nvm_release_on_done = false;
- }
- hw->nvm_wait_opcode = 0;
-
- if (hw->aq.arq_last_status) {
- hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
- return;
- }
-
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
-
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
-
- default:
- break;
- }
-}
-
-/**
- * i40e_nvmupd_check_wait_event - handle NVM update operation events
- * @hw: pointer to the hardware structure
- * @opcode: the event that just happened
- * @desc: AdminQ descriptor
- **/
-void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
- struct i40e_aq_desc *desc)
-{
- u32 aq_desc_len = sizeof(struct i40e_aq_desc);
-
- if (opcode == hw->nvm_wait_opcode) {
- memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
- i40e_nvmupd_clear_wait_state(hw);
- }
-}
-
-/**
- * i40e_nvmupd_validate_command - Validate given command
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @perrno: pointer to return error code
- *
- * Return one of the valid command types or I40E_NVMUPD_INVALID
- **/
-static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno)
-{
- enum i40e_nvmupd_cmd upd_cmd;
- u8 module, transaction;
-
- /* anything that doesn't match a recognized case is an error */
- upd_cmd = I40E_NVMUPD_INVALID;
-
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
-
- /* limits on data size */
- if ((cmd->data_size < 1) ||
- (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_validate_command data_size %d\n",
- cmd->data_size);
- *perrno = -EFAULT;
- return I40E_NVMUPD_INVALID;
- }
-
- switch (cmd->command) {
- case I40E_NVM_READ:
- switch (transaction) {
- case I40E_NVM_CON:
- upd_cmd = I40E_NVMUPD_READ_CON;
- break;
- case I40E_NVM_SNT:
- upd_cmd = I40E_NVMUPD_READ_SNT;
- break;
- case I40E_NVM_LCB:
- upd_cmd = I40E_NVMUPD_READ_LCB;
- break;
- case I40E_NVM_SA:
- upd_cmd = I40E_NVMUPD_READ_SA;
- break;
- case I40E_NVM_EXEC:
- if (module == 0xf)
- upd_cmd = I40E_NVMUPD_STATUS;
- else if (module == 0)
- upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
- break;
- case I40E_NVM_AQE:
- upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
- break;
- }
- break;
-
- case I40E_NVM_WRITE:
- switch (transaction) {
- case I40E_NVM_CON:
- upd_cmd = I40E_NVMUPD_WRITE_CON;
- break;
- case I40E_NVM_SNT:
- upd_cmd = I40E_NVMUPD_WRITE_SNT;
- break;
- case I40E_NVM_LCB:
- upd_cmd = I40E_NVMUPD_WRITE_LCB;
- break;
- case I40E_NVM_SA:
- upd_cmd = I40E_NVMUPD_WRITE_SA;
- break;
- case I40E_NVM_ERA:
- upd_cmd = I40E_NVMUPD_WRITE_ERA;
- break;
- case I40E_NVM_CSUM:
- upd_cmd = I40E_NVMUPD_CSUM_CON;
- break;
- case (I40E_NVM_CSUM|I40E_NVM_SA):
- upd_cmd = I40E_NVMUPD_CSUM_SA;
- break;
- case (I40E_NVM_CSUM|I40E_NVM_LCB):
- upd_cmd = I40E_NVMUPD_CSUM_LCB;
- break;
- case I40E_NVM_EXEC:
- if (module == 0)
- upd_cmd = I40E_NVMUPD_EXEC_AQ;
- break;
- }
- break;
- }
-
- return upd_cmd;
-}
-
-/**
- * i40e_nvmupd_exec_aq - Run an AQ command
+ * i40e_nvmupd_command - Process an NVM update command
* @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
+ * @cmd: pointer to nvm update command
* @bytes: pointer to the data buffer
* @perrno: pointer to return error code
*
- * cmd structure contains identifiers and data buffer
+ * Dispatches command depending on what update state is current
**/
-static int i40e_nvmupd_exec_aq(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+int i40e_nvmupd_command(struct i40e_hw *hw,
+ struct i40e_nvm_access *cmd,
+ u8 *bytes, int *perrno)
{
- struct i40e_asq_cmd_details cmd_details;
- struct i40e_aq_desc *aq_desc;
- u32 buff_size = 0;
- u8 *buff = NULL;
- u32 aq_desc_len;
- u32 aq_data_len;
+ enum i40e_nvmupd_cmd upd_cmd;
int status;
- i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
- if (cmd->offset == 0xffff)
- return 0;
+ /* assume success */
+ *perrno = 0;
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ /* early check for status command and debug msgs */
+ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
- aq_desc_len = sizeof(struct i40e_aq_desc);
- memset(&hw->nvm_wb_desc, 0, aq_desc_len);
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_nvm_update_state_str[upd_cmd],
+ hw->nvmupd_state,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
+ cmd->command, cmd->config, cmd->offset, cmd->data_size);
- /* get the aq descriptor */
- if (cmd->data_size < aq_desc_len) {
+ if (upd_cmd == I40E_NVMUPD_INVALID) {
+ *perrno = -EFAULT;
i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n",
- cmd->data_size, aq_desc_len);
- *perrno = -EINVAL;
- return -EINVAL;
+ "i40e_nvmupd_validate_command returns %d errno %d\n",
+ upd_cmd, *perrno);
}
- aq_desc = (struct i40e_aq_desc *)bytes;
- /* if data buffer needed, make sure it's ready */
- aq_data_len = cmd->data_size - aq_desc_len;
- buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen));
- if (buff_size) {
- if (!hw->nvm_buff.va) {
- status = i40e_allocate_virt_mem(hw, &hw->nvm_buff,
- hw->aq.asq_buf_size);
- if (status)
- i40e_debug(hw, I40E_DEBUG_NVM,
- "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n",
- status);
- }
-
- if (hw->nvm_buff.va) {
- buff = hw->nvm_buff.va;
- memcpy(buff, &bytes[aq_desc_len], aq_data_len);
+ /* a status request returns immediately rather than
+ * going into the state machine
+ */
+ if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return -EINVAL;
}
- }
-
- if (cmd->offset)
- memset(&hw->nvm_aq_event_desc, 0, aq_desc_len);
-
- /* and away we go! */
- status = i40e_asq_send_command(hw, aq_desc, buff,
- buff_size, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "%s err %pe aq_err %s\n",
- __func__, ERR_PTR(status),
- i40e_aq_str(hw, hw->aq.asq_last_status));
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
- return status;
- }
-
- /* should we wait for a followup event? */
- if (cmd->offset) {
- hw->nvm_wait_opcode = cmd->offset;
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
- }
-
- return status;
-}
-/**
- * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
- **/
-static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
-{
- u32 aq_total_len;
- u32 aq_desc_len;
- int remainder;
- u8 *buff;
-
- i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
-
- aq_desc_len = sizeof(struct i40e_aq_desc);
- aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen);
+ bytes[0] = hw->nvmupd_state;
- /* check offset range */
- if (cmd->offset > aq_total_len) {
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n",
- __func__, cmd->offset, aq_total_len);
- *perrno = -EINVAL;
- return -EINVAL;
- }
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
- /* check copylength range */
- if (cmd->data_size > (aq_total_len - cmd->offset)) {
- int new_len = aq_total_len - cmd->offset;
+ /* Clear error status on read */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR)
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n",
- __func__, cmd->data_size, new_len);
- cmd->data_size = new_len;
+ return 0;
}
- remainder = cmd->data_size;
- if (cmd->offset < aq_desc_len) {
- u32 len = aq_desc_len - cmd->offset;
-
- len = min(len, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n",
- __func__, cmd->offset, cmd->offset + len);
-
- buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset;
- memcpy(bytes, buff, len);
-
- bytes += len;
- remainder -= len;
- buff = hw->nvm_buff.va;
- } else {
- buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len);
+ /* Clear status even it is not read and log */
+ if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n");
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
}
- if (remainder > 0) {
- int start_byte = buff - (u8 *)hw->nvm_buff.va;
-
- i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n",
- __func__, start_byte, start_byte + remainder);
- memcpy(bytes, buff, remainder);
- }
+ /* Acquire lock to prevent race condition where adminq_task
+ * can execute after i40e_nvmupd_nvm_read/write but before state
+ * variables (nvm_wait_opcode, nvm_release_on_done) are updated.
+ *
+ * During NVMUpdate, it is observed that lock could be held for
+ * ~5ms for most commands. However lock is held for ~60ms for
+ * NVMUPD_CSUM_LCB command.
+ */
+ mutex_lock(&hw->aq.arq_mutex);
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT:
+ status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno);
+ break;
- return 0;
-}
+ case I40E_NVMUPD_STATE_READING:
+ status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno);
+ break;
-/**
- * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
- **/
-static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
-{
- u32 aq_total_len;
- u32 aq_desc_len;
+ case I40E_NVMUPD_STATE_WRITING:
+ status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno);
+ break;
- i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__);
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_clear_wait_state(hw);
+ status = 0;
+ break;
+ }
- aq_desc_len = sizeof(struct i40e_aq_desc);
- aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen);
+ status = -EBUSY;
+ *perrno = -EBUSY;
+ break;
- /* check copylength range */
- if (cmd->data_size > aq_total_len) {
+ default:
+ /* invalid state, should never happen */
i40e_debug(hw, I40E_DEBUG_NVM,
- "%s: copy length %d too big, trimming to %d\n",
- __func__, cmd->data_size, aq_total_len);
- cmd->data_size = aq_total_len;
+ "NVMUPD: no such state %d\n", hw->nvmupd_state);
+ status = -EOPNOTSUPP;
+ *perrno = -ESRCH;
+ break;
}
- memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size);
-
- return 0;
+ mutex_unlock(&hw->aq.arq_mutex);
+ return status;
}
/**
- * i40e_nvmupd_nvm_read - Read NVM
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * cmd structure contains identifiers and data buffer
+ * i40e_nvmupd_clear_wait_state - clear wait state on hw
+ * @hw: pointer to the hardware structure
**/
-static int i40e_nvmupd_nvm_read(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw)
{
- struct i40e_asq_cmd_details cmd_details;
- u8 module, transaction;
- int status;
- bool last;
-
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
- last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
-
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n",
+ hw->nvm_wait_opcode);
- status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- bytes, last, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_read status %d aq %d\n",
- status, hw->aq.asq_last_status);
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
}
+ hw->nvm_wait_opcode = 0;
- return status;
-}
-
-/**
- * i40e_nvmupd_nvm_erase - Erase an NVM module
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @perrno: pointer to return error code
- *
- * module, offset, data_size and data are in cmd structure
- **/
-static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- int *perrno)
-{
- struct i40e_asq_cmd_details cmd_details;
- u8 module, transaction;
- int status = 0;
- bool last;
+ if (hw->aq.arq_last_status) {
+ hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR;
+ return;
+ }
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
- last = (transaction & I40E_NVM_LCB);
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
- status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
- last, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_erase status %d aq %d\n",
- status, hw->aq.asq_last_status);
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ default:
+ break;
}
-
- return status;
}
/**
- * i40e_nvmupd_nvm_write - Write NVM
- * @hw: pointer to hardware structure
- * @cmd: pointer to nvm update command buffer
- * @bytes: pointer to the data buffer
- * @perrno: pointer to return error code
- *
- * module, offset, data_size and data are in cmd structure
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ * @desc: AdminQ descriptor
**/
-static int i40e_nvmupd_nvm_write(struct i40e_hw *hw,
- struct i40e_nvm_access *cmd,
- u8 *bytes, int *perrno)
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode,
+ struct i40e_aq_desc *desc)
{
- struct i40e_asq_cmd_details cmd_details;
- u8 module, transaction;
- u8 preservation_flags;
- int status = 0;
- bool last;
-
- transaction = i40e_nvmupd_get_transaction(cmd->config);
- module = i40e_nvmupd_get_module(cmd->config);
- last = (transaction & I40E_NVM_LCB);
- preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config);
-
- memset(&cmd_details, 0, sizeof(cmd_details));
- cmd_details.wb_desc = &hw->nvm_wb_desc;
+ u32 aq_desc_len = sizeof(struct i40e_aq_desc);
- status = i40e_aq_update_nvm(hw, module, cmd->offset,
- (u16)cmd->data_size, bytes, last,
- preservation_flags, &cmd_details);
- if (status) {
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
- module, cmd->offset, cmd->data_size);
- i40e_debug(hw, I40E_DEBUG_NVM,
- "i40e_nvmupd_nvm_write status %d aq %d\n",
- status, hw->aq.asq_last_status);
- *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
+ if (opcode == hw->nvm_wait_opcode) {
+ memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len);
+ i40e_nvmupd_clear_wait_state(hw);
}
-
- return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index ce1f11b8ad65..5a0699ca7ce5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -371,13 +371,6 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
int i40e_set_mac_type(struct i40e_hw *hw);
-extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
-
-static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
-{
- return i40e_ptype_lookup[ptype];
-}
-
/**
* i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition
* @link_speed: the speed to convert
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e7ebcb09f23c..b72a4b5d76b9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1472,7 +1472,8 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf)
**/
void i40e_ptp_init(struct i40e_pf *pf)
{
- struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
+ struct net_device *netdev = vsi->netdev;
struct i40e_hw *hw = &pf->hw;
u32 pf_id;
long err;
@@ -1536,6 +1537,7 @@ void i40e_ptp_init(struct i40e_pf *pf)
**/
void i40e_ptp_stop(struct i40e_pf *pf)
{
+ struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf);
struct i40e_hw *hw = &pf->hw;
u32 regval;
@@ -1555,7 +1557,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
ptp_clock_unregister(pf->ptp_clock);
pf->ptp_clock = NULL;
dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__,
- pf->vsi[pf->lan_vsi]->netdev->name);
+ main_vsi->netdev->name);
}
if (i40e_is_ptp_pin_dev(&pf->hw)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
index 33b4e30f5e00..759f3d1c4c8f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
@@ -89,8 +89,8 @@ TRACE_EVENT(i40e_napi_poll,
__entry->tx_clean_complete = tx_clean_complete;
__entry->irq_num = q->irq_num;
__entry->curr_cpu = get_cpu();
- __assign_str(qname, q->name);
- __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV);
+ __assign_str(qname);
+ __assign_str(dev_name);
__assign_bitmask(irq_affinity, cpumask_bits(&q->affinity_mask),
nr_cpumask_bits);
),
@@ -132,7 +132,7 @@ DECLARE_EVENT_CLASS(
__entry->ring = ring;
__entry->desc = desc;
__entry->buf = buf;
- __assign_str(devname, ring->netdev->name);
+ __assign_str(devname);
),
TP_printk(
@@ -177,7 +177,7 @@ DECLARE_EVENT_CLASS(
__entry->ring = ring;
__entry->desc = desc;
__entry->xdp = xdp;
- __assign_str(devname, ring->netdev->name);
+ __assign_str(devname);
),
TP_printk(
@@ -219,7 +219,7 @@ DECLARE_EVENT_CLASS(
TP_fast_assign(
__entry->skb = skb;
__entry->ring = ring;
- __assign_str(devname, ring->netdev->name);
+ __assign_str(devname);
),
TP_printk(
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 1a12b732818e..c006f716a3bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/bpf_trace.h>
+#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h>
#include <linux/sctp.h>
#include <net/mpls.h>
@@ -23,7 +24,7 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
{
struct i40e_filter_program_desc *fdir_desc;
struct i40e_pf *pf = tx_ring->vsi->back;
- u32 flex_ptype, dtype_cmd;
+ u32 flex_ptype, dtype_cmd, vsi_id;
u16 i;
/* grab the next descriptor */
@@ -41,8 +42,8 @@ static void i40e_fdir(struct i40e_ring *tx_ring,
flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype);
/* Use LAN VSI Id if not programmed by user */
- flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK,
- fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id);
+ vsi_id = fdata->dest_vsi ? : i40e_pf_get_main_vsi(pf)->id;
+ flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK, vsi_id);
dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG;
@@ -860,13 +861,15 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
/**
* i40e_detect_recover_hung - Function to detect and recover hung_queues
- * @vsi: pointer to vsi struct with tx queues
+ * @pf: pointer to PF struct
*
- * VSI has netdev and netdev has TX queues. This function is to check each of
- * those TX queues if they are hung, trigger recovery by issuing SW interrupt.
+ * LAN VSI has netdev and netdev has TX queues. This function is to check
+ * each of those TX queues if they are hung, trigger recovery by issuing
+ * SW interrupt.
**/
-void i40e_detect_recover_hung(struct i40e_vsi *vsi)
+void i40e_detect_recover_hung(struct i40e_pf *pf)
{
+ struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf);
struct i40e_ring *tx_ring = NULL;
struct net_device *netdev;
unsigned int i;
@@ -1741,38 +1744,30 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
union i40e_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded;
+ struct libeth_rx_pt decoded;
u32 rx_error, rx_status;
bool ipv4, ipv6;
u8 ptype;
u64 qword;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
- rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
- rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
- decoded = decode_rx_desc_ptype(ptype);
-
skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword);
- /* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ decoded = libie_rx_pt_parse(ptype);
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
return;
+ rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword);
+ rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword);
+
/* did the hardware decode the packet and checksum? */
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* both known and outer_ip must be set for the below code to work */
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (ipv4 &&
(rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) |
@@ -1800,20 +1795,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
- if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case I40E_RX_PTYPE_INNER_PROT_TCP:
- case I40E_RX_PTYPE_INNER_PROT_UDP:
- case I40E_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- fallthrough;
- default:
- break;
- }
-
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
@@ -1821,29 +1806,6 @@ checksum_fail:
}
/**
- * i40e_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns a hash type to be used by skb_set_hash
- **/
-static inline int i40e_ptype_to_htype(u8 ptype)
-{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
-
- if (!decoded.known)
- return PKT_HASH_TYPE_NONE;
-
- if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4)
- return PKT_HASH_TYPE_L4;
- else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3)
- return PKT_HASH_TYPE_L3;
- else
- return PKT_HASH_TYPE_L2;
-}
-
-/**
* i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
@@ -1855,17 +1817,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
struct sk_buff *skb,
u8 rx_ptype)
{
+ struct libeth_rx_pt decoded;
u32 hash;
const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (!(ring->netdev->features & NETIF_F_RXHASH))
+ decoded = libie_rx_pt_parse(rx_ptype);
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
}
@@ -2144,9 +2108,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
*/
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- I40E_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, I40E_RX_HDR_SIZE);
if (unlikely(!skb))
return NULL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 2cdc7de6301c..7c26c9a2bf65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -470,7 +470,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
-void i40e_detect_recover_hung(struct i40e_vsi *vsi);
+void i40e_detect_recover_hung(struct i40e_pf *pf);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index d9031499697e..28568e126850 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -745,94 +745,6 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks {
#define I40E_RXD_QW1_PTYPE_SHIFT 30
#define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT)
-/* Packet type non-ip values */
-enum i40e_rx_l2_ptype {
- I40E_RX_PTYPE_L2_RESERVED = 0,
- I40E_RX_PTYPE_L2_MAC_PAY2 = 1,
- I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- I40E_RX_PTYPE_L2_FIP_PAY2 = 3,
- I40E_RX_PTYPE_L2_OUI_PAY2 = 4,
- I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- I40E_RX_PTYPE_L2_LLDP_PAY2 = 6,
- I40E_RX_PTYPE_L2_ECP_PAY2 = 7,
- I40E_RX_PTYPE_L2_EVB_PAY2 = 8,
- I40E_RX_PTYPE_L2_QCN_PAY2 = 9,
- I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- I40E_RX_PTYPE_L2_ARP = 11,
- I40E_RX_PTYPE_L2_FCOE_PAY3 = 12,
- I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
- I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
- I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
- I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
- I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
-};
-
-struct i40e_rx_ptype_decoded {
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:1;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum i40e_rx_ptype_outer_ip {
- I40E_RX_PTYPE_OUTER_L2 = 0,
- I40E_RX_PTYPE_OUTER_IP = 1
-};
-
-enum i40e_rx_ptype_outer_ip_ver {
- I40E_RX_PTYPE_OUTER_NONE = 0,
- I40E_RX_PTYPE_OUTER_IPV4 = 0,
- I40E_RX_PTYPE_OUTER_IPV6 = 1
-};
-
-enum i40e_rx_ptype_outer_fragmented {
- I40E_RX_PTYPE_NOT_FRAG = 0,
- I40E_RX_PTYPE_FRAG = 1
-};
-
-enum i40e_rx_ptype_tunnel_type {
- I40E_RX_PTYPE_TUNNEL_NONE = 0,
- I40E_RX_PTYPE_TUNNEL_IP_IP = 1,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum i40e_rx_ptype_tunnel_end_prot {
- I40E_RX_PTYPE_TUNNEL_END_NONE = 0,
- I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum i40e_rx_ptype_inner_prot {
- I40E_RX_PTYPE_INNER_PROT_NONE = 0,
- I40E_RX_PTYPE_INNER_PROT_UDP = 1,
- I40E_RX_PTYPE_INNER_PROT_TCP = 2,
- I40E_RX_PTYPE_INNER_PROT_SCTP = 3,
- I40E_RX_PTYPE_INNER_PROT_ICMP = 4,
- I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5
-};
-
-enum i40e_rx_ptype_payload_layer {
- I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
#define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
I40E_RXD_QW1_LENGTH_PBUF_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 232b65b9c8ea..662622f01e31 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -795,13 +795,13 @@ error_param:
static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
{
struct i40e_mac_filter *f = NULL;
+ struct i40e_vsi *main_vsi, *vsi;
struct i40e_pf *pf = vf->pf;
- struct i40e_vsi *vsi;
u64 max_tx_rate = 0;
int ret = 0;
- vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
- vf->vf_id);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, main_vsi->seid, vf->vf_id);
if (!vsi) {
dev_err(&pf->pdev->dev,
@@ -3322,8 +3322,9 @@ error_param:
static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
{
struct i40e_pf *pf = vf->pf;
- int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+ struct i40e_vsi *main_vsi;
int aq_ret = 0;
+ int abs_vf_id;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) ||
!test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) {
@@ -3331,8 +3332,9 @@ static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id,
- msg, msglen);
+ main_vsi = i40e_pf_get_main_vsi(pf);
+ abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id;
+ i40e_notify_client_of_vf_msg(main_vsi, abs_vf_id, msg, msglen);
error_param:
/* send the response to the VF */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 11500003af0d..4e885df789ef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -301,8 +301,7 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
if (unlikely(!skb))
goto out;
@@ -483,7 +482,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
bi = *i40e_rx_bi(rx_ring, next_to_process);
xsk_buff_set_size(bi, size);
- xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(bi);
if (!first)
first = bi;
diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile
index 2d154a4e2fd7..356ac9faa5bf 100644
--- a/drivers/net/ethernet/intel/iavf/Makefile
+++ b/drivers/net/ethernet/intel/iavf/Makefile
@@ -11,6 +11,5 @@ subdir-ccflags-y += -I$(src)
obj-$(CONFIG_IAVF) += iavf.o
-iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
- iavf_adv_rss.o \
- iavf_txrx.o iavf_common.o iavf_adminq.o
+iavf-y := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \
+ iavf_adv_rss.o iavf_txrx.o iavf_common.o iavf_adminq.o
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index db8188c7ac4b..23a6557fc3db 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -287,7 +287,7 @@ struct iavf_adapter {
#define IAVF_FLAG_RESET_PENDING BIT(4)
#define IAVF_FLAG_RESET_NEEDED BIT(5)
#define IAVF_FLAG_WB_ON_ITR_CAPABLE BIT(6)
-#define IAVF_FLAG_LEGACY_RX BIT(15)
+/* BIT(15) is free, was IAVF_FLAG_LEGACY_RX */
#define IAVF_FLAG_REINIT_ITR_NEEDED BIT(16)
#define IAVF_FLAG_QUEUES_DISABLED BIT(17)
#define IAVF_FLAG_SETUP_NETDEV_FEATURES BIT(18)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_common.c b/drivers/net/ethernet/intel/iavf/iavf_common.c
index 5a25233a89d5..aa751ce3425b 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_common.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_common.c
@@ -432,259 +432,6 @@ enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 vsi_id,
return iavf_aq_get_set_rss_key(hw, vsi_id, key, true);
}
-/* The iavf_ptype_lookup table is used to convert from the 8-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT iavf_ptype_lookup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF iavf_ptype_lookup[ptype].outer_ip == IAVF_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum iavf_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-
-/* macro to make the table lines short, use explicit indexing with [PTYPE] */
-#define IAVF_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = { \
- 1, \
- IAVF_RX_PTYPE_OUTER_##OUTER_IP, \
- IAVF_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- IAVF_RX_PTYPE_##OUTER_FRAG, \
- IAVF_RX_PTYPE_TUNNEL_##T, \
- IAVF_RX_PTYPE_TUNNEL_END_##TE, \
- IAVF_RX_PTYPE_##TEF, \
- IAVF_RX_PTYPE_INNER_PROT_##I, \
- IAVF_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define IAVF_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define IAVF_RX_PTYPE_NOF IAVF_RX_PTYPE_NOT_FRAG
-#define IAVF_RX_PTYPE_FRG IAVF_RX_PTYPE_FRAG
-#define IAVF_RX_PTYPE_INNER_PROT_TS IAVF_RX_PTYPE_INNER_PROT_TIMESYNC
-
-/* Lookup table mapping the 8-bit HW PTYPE to the bit field for decoding */
-struct iavf_rx_ptype_decoded iavf_ptype_lookup[BIT(8)] = {
- /* L2 Packet types */
- IAVF_PTT_UNUSED_ENTRY(0),
- IAVF_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2),
- IAVF_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT_UNUSED_ENTRY(4),
- IAVF_PTT_UNUSED_ENTRY(5),
- IAVF_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT_UNUSED_ENTRY(8),
- IAVF_PTT_UNUSED_ENTRY(9),
- IAVF_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2),
- IAVF_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE),
- IAVF_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3),
-
- /* Non Tunneled IPv4 */
- IAVF_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(25),
- IAVF_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4),
- IAVF_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4),
- IAVF_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv4 */
- IAVF_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(32),
- IAVF_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> IPv6 */
- IAVF_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(39),
- IAVF_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT */
- IAVF_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> IPv4 */
- IAVF_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(47),
- IAVF_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> IPv6 */
- IAVF_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(54),
- IAVF_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC */
- IAVF_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */
- IAVF_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(62),
- IAVF_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */
- IAVF_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(69),
- IAVF_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv4 --> GRE/NAT --> MAC/VLAN */
- IAVF_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */
- IAVF_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(77),
- IAVF_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */
- IAVF_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(84),
- IAVF_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* Non Tunneled IPv6 */
- IAVF_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- IAVF_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(91),
- IAVF_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
- IAVF_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
- IAVF_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv4 */
- IAVF_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(98),
- IAVF_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> IPv6 */
- IAVF_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(105),
- IAVF_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT */
- IAVF_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> IPv4 */
- IAVF_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(113),
- IAVF_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> IPv6 */
- IAVF_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(120),
- IAVF_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC */
- IAVF_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */
- IAVF_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(128),
- IAVF_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */
- IAVF_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(135),
- IAVF_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN */
- IAVF_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */
- IAVF_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3),
- IAVF_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3),
- IAVF_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(143),
- IAVF_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4),
- IAVF_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4),
- IAVF_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4),
-
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */
- IAVF_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3),
- IAVF_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3),
- IAVF_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4),
- IAVF_PTT_UNUSED_ENTRY(150),
- IAVF_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4),
- IAVF_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4),
- IAVF_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
- /* unused entries */
- [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-};
-
/**
* iavf_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 378c3e9ddf9d..52273f7eab2c 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -240,29 +240,6 @@ static const struct iavf_stats iavf_gstrings_stats[] = {
#define IAVF_QUEUE_STATS_LEN ARRAY_SIZE(iavf_gstrings_queue_stats)
-/* For now we have one and only one private flag and it is only defined
- * when we have support for the SKIP_CPU_SYNC DMA attribute. Instead
- * of leaving all this code sitting around empty we will strip it unless
- * our one private flag is actually available.
- */
-struct iavf_priv_flags {
- char flag_string[ETH_GSTRING_LEN];
- u32 flag;
- bool read_only;
-};
-
-#define IAVF_PRIV_FLAG(_name, _flag, _read_only) { \
- .flag_string = _name, \
- .flag = _flag, \
- .read_only = _read_only, \
-}
-
-static const struct iavf_priv_flags iavf_gstrings_priv_flags[] = {
- IAVF_PRIV_FLAG("legacy-rx", IAVF_FLAG_LEGACY_RX, 0),
-};
-
-#define IAVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(iavf_gstrings_priv_flags)
-
/**
* iavf_get_link_ksettings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
@@ -342,8 +319,6 @@ static int iavf_get_sset_count(struct net_device *netdev, int sset)
return IAVF_STATS_LEN +
(IAVF_QUEUE_STATS_LEN * 2 *
netdev->real_num_tx_queues);
- else if (sset == ETH_SS_PRIV_FLAGS)
- return IAVF_PRIV_FLAGS_STR_LEN;
else
return -EINVAL;
}
@@ -386,21 +361,6 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
}
/**
- * iavf_get_priv_flag_strings - Get private flag strings
- * @netdev: network interface device structure
- * @data: buffer for string data
- *
- * Builds the private flags string table
- **/
-static void iavf_get_priv_flag_strings(struct net_device *netdev, u8 *data)
-{
- unsigned int i;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++)
- ethtool_puts(&data, iavf_gstrings_priv_flags[i].flag_string);
-}
-
-/**
* iavf_get_stat_strings - Get stat strings
* @netdev: network interface device structure
* @data: buffer for string data
@@ -438,109 +398,12 @@ static void iavf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
case ETH_SS_STATS:
iavf_get_stat_strings(netdev, data);
break;
- case ETH_SS_PRIV_FLAGS:
- iavf_get_priv_flag_strings(netdev, data);
- break;
default:
break;
}
}
/**
- * iavf_get_priv_flags - report device private flags
- * @netdev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned. Add new strings for each flag to the iavf_gstrings_priv_flags
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 iavf_get_priv_flags(struct net_device *netdev)
-{
- struct iavf_adapter *adapter = netdev_priv(netdev);
- u32 i, ret_flags = 0;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
- const struct iavf_priv_flags *priv_flags;
-
- priv_flags = &iavf_gstrings_priv_flags[i];
-
- if (priv_flags->flag & adapter->flags)
- ret_flags |= BIT(i);
- }
-
- return ret_flags;
-}
-
-/**
- * iavf_set_priv_flags - set private flags
- * @netdev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int iavf_set_priv_flags(struct net_device *netdev, u32 flags)
-{
- struct iavf_adapter *adapter = netdev_priv(netdev);
- u32 orig_flags, new_flags, changed_flags;
- int ret = 0;
- u32 i;
-
- orig_flags = READ_ONCE(adapter->flags);
- new_flags = orig_flags;
-
- for (i = 0; i < IAVF_PRIV_FLAGS_STR_LEN; i++) {
- const struct iavf_priv_flags *priv_flags;
-
- priv_flags = &iavf_gstrings_priv_flags[i];
-
- if (flags & BIT(i))
- new_flags |= priv_flags->flag;
- else
- new_flags &= ~(priv_flags->flag);
-
- if (priv_flags->read_only &&
- ((orig_flags ^ new_flags) & ~BIT(i)))
- return -EOPNOTSUPP;
- }
-
- /* Before we finalize any flag changes, any checks which we need to
- * perform to determine if the new flags will be supported should go
- * here...
- */
-
- /* Compare and exchange the new flags into place. If we failed, that
- * is if cmpxchg returns anything but the old value, this means
- * something else must have modified the flags variable since we
- * copied it. We'll just punt with an error and log something in the
- * message buffer.
- */
- if (cmpxchg(&adapter->flags, orig_flags, new_flags) != orig_flags) {
- dev_warn(&adapter->pdev->dev,
- "Unable to update adapter->flags as it was modified by another thread...\n");
- return -EAGAIN;
- }
-
- changed_flags = orig_flags ^ new_flags;
-
- /* Process any additional changes needed as a result of flag changes.
- * The changed_flags value reflects the list of bits that were changed
- * in the code above.
- */
-
- /* issue a reset to force legacy-rx change to take effect */
- if (changed_flags & IAVF_FLAG_LEGACY_RX) {
- if (netif_running(netdev)) {
- iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
- ret = iavf_wait_for_reset(adapter);
- if (ret)
- netdev_warn(netdev, "Changing private flags timeout or interrupted waiting for reset");
- }
- }
-
- return ret;
-}
-
-/**
* iavf_get_msglevel - Get debug message level
* @netdev: network interface device structure
*
@@ -585,7 +448,6 @@ static void iavf_get_drvinfo(struct net_device *netdev,
strscpy(drvinfo->driver, iavf_driver_name, 32);
strscpy(drvinfo->fw_version, "N/A", 4);
strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
}
/**
@@ -1995,8 +1857,6 @@ static const struct ethtool_ops iavf_ethtool_ops = {
.get_strings = iavf_get_strings,
.get_ethtool_stats = iavf_get_ethtool_stats,
.get_sset_count = iavf_get_sset_count,
- .get_priv_flags = iavf_get_priv_flags,
- .set_priv_flags = iavf_set_priv_flags,
.get_msglevel = iavf_get_msglevel,
.set_msglevel = iavf_set_msglevel,
.get_coalesce = iavf_get_coalesce,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 166832a4213a..ff11bafb3b4f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/net/intel/libie/rx.h>
+
#include "iavf.h"
#include "iavf_prototype.h"
/* All iavf tracepoints are defined by the include below, which must
@@ -43,8 +45,9 @@ static const struct pci_device_id iavf_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
MODULE_ALIAS("i40evf");
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
+MODULE_IMPORT_NS(LIBETH);
+MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
static const struct net_device_ops iavf_netdev_ops;
@@ -714,40 +717,10 @@ static void iavf_configure_tx(struct iavf_adapter *adapter)
**/
static void iavf_configure_rx(struct iavf_adapter *adapter)
{
- unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
struct iavf_hw *hw = &adapter->hw;
- int i;
-
- /* Legacy Rx will always default to a 2048 buffer size. */
-#if (PAGE_SIZE < 8192)
- if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
- struct net_device *netdev = adapter->netdev;
-
- /* For jumbo frames on systems with 4K pages we have to use
- * an order 1 page, so we might as well increase the size
- * of our Rx buffer to make better use of the available space
- */
- rx_buf_len = IAVF_RXBUFFER_3072;
-
- /* We use a 1536 buffer size for configurations with
- * standard Ethernet mtu. On x86 this gives us enough room
- * for shared info and 192 bytes of padding.
- */
- if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
- (netdev->mtu <= ETH_DATA_LEN))
- rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
- }
-#endif
- for (i = 0; i < adapter->num_active_queues; i++) {
+ for (u32 i = 0; i < adapter->num_active_queues; i++)
adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
- adapter->rx_rings[i].rx_buf_len = rx_buf_len;
-
- if (adapter->flags & IAVF_FLAG_LEGACY_RX)
- clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
- else
- set_ring_build_skb_enabled(&adapter->rx_rings[i]);
- }
}
/**
@@ -1615,7 +1588,6 @@ static int iavf_alloc_queues(struct iavf_adapter *adapter)
rx_ring = &adapter->rx_rings[i];
rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev;
- rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
rx_ring->itr_setting = IAVF_ITR_RX_DEF;
}
@@ -2642,9 +2614,8 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
iavf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- /* MTU range: 68 - 9710 */
netdev->min_mtu = ETH_MIN_MTU;
- netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
+ netdev->max_mtu = LIBIE_MAX_MTU;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
@@ -3785,6 +3756,10 @@ static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
@@ -4324,7 +4299,7 @@ static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev)) {
iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
@@ -5051,7 +5026,7 @@ err_dma:
*
* Called when the system (VM) is entering sleep/suspend.
**/
-static int __maybe_unused iavf_suspend(struct device *dev_d)
+static int iavf_suspend(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct iavf_adapter *adapter = netdev_priv(netdev);
@@ -5079,7 +5054,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
*
* Called when the system (VM) is resumed from sleep/suspend.
**/
-static int __maybe_unused iavf_resume(struct device *dev_d)
+static int iavf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct iavf_adapter *adapter;
@@ -5266,14 +5241,14 @@ static void iavf_shutdown(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D3hot);
}
-static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
static struct pci_driver iavf_driver = {
.name = iavf_driver_name,
.id_table = iavf_pci_tbl,
.probe = iavf_probe,
.remove = iavf_remove,
- .driver.pm = &iavf_pm_ops,
+ .driver.pm = pm_sleep_ptr(&iavf_pm_ops),
.shutdown = iavf_shutdown,
};
diff --git a/drivers/net/ethernet/intel/iavf/iavf_prototype.h b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
index 4a48e6171405..48c3901381b4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_prototype.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_prototype.h
@@ -45,13 +45,6 @@ enum iavf_status iavf_aq_set_rss_lut(struct iavf_hw *hw, u16 seid,
enum iavf_status iavf_aq_set_rss_key(struct iavf_hw *hw, u16 seid,
struct iavf_aqc_get_set_rss_key_data *key);
-extern struct iavf_rx_ptype_decoded iavf_ptype_lookup[];
-
-static inline struct iavf_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype)
-{
- return iavf_ptype_lookup[ptype];
-}
-
void iavf_vf_parse_hw_config(struct iavf_hw *hw,
struct virtchnl_vf_resource *msg);
enum iavf_status iavf_aq_send_msg_to_pf(struct iavf_hw *hw,
diff --git a/drivers/net/ethernet/intel/iavf/iavf_trace.h b/drivers/net/ethernet/intel/iavf/iavf_trace.h
index 82fda6f5abf0..62212011c807 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_trace.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_trace.h
@@ -83,7 +83,7 @@ DECLARE_EVENT_CLASS(
__entry->ring = ring;
__entry->desc = desc;
__entry->buf = buf;
- __assign_str(devname, ring->netdev->name);
+ __assign_str(devname);
),
TP_printk(
@@ -128,7 +128,7 @@ DECLARE_EVENT_CLASS(
__entry->ring = ring;
__entry->desc = desc;
__entry->skb = skb;
- __assign_str(devname, ring->netdev->name);
+ __assign_str(devname);
),
TP_printk(
@@ -170,7 +170,7 @@ DECLARE_EVENT_CLASS(
TP_fast_assign(
__entry->skb = skb;
__entry->ring = ring;
- __assign_str(devname, ring->netdev->name);
+ __assign_str(devname);
),
TP_printk(
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
index b71484c87a84..26b424fd6718 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include <linux/bitfield.h>
+#include <linux/net/intel/libie/rx.h>
#include <linux/prefetch.h>
#include "iavf.h"
@@ -184,7 +185,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi)
* pending work.
*/
packets = tx_ring->stats.packets & INT_MAX;
- if (tx_ring->tx_stats.prev_pkt_ctr == packets) {
+ if (tx_ring->prev_pkt_ctr == packets) {
iavf_force_wb(vsi, tx_ring->q_vector);
continue;
}
@@ -193,7 +194,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi)
* to iavf_get_tx_pending()
*/
smp_rmb();
- tx_ring->tx_stats.prev_pkt_ctr =
+ tx_ring->prev_pkt_ctr =
iavf_get_tx_pending(tx_ring, true) ? packets : -1;
}
}
@@ -319,7 +320,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,
((j / WB_STRIDE) == 0) && (j > 0) &&
!test_bit(__IAVF_VSI_DOWN, vsi->state) &&
(IAVF_DESC_UNUSED(tx_ring) != tx_ring->count))
- tx_ring->arm_wb = true;
+ tx_ring->flags |= IAVF_TXR_FLAGS_ARM_WB;
}
/* notify netdev of completed buffers */
@@ -674,7 +675,7 @@ int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring)
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->tx_stats.prev_pkt_ctr = -1;
+ tx_ring->prev_pkt_ctr = -1;
return 0;
err:
@@ -689,11 +690,8 @@ err:
**/
static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
{
- unsigned long bi_size;
- u16 i;
-
/* ring already cleared, nothing to do */
- if (!rx_ring->rx_bi)
+ if (!rx_ring->rx_fqes)
return;
if (rx_ring->skb) {
@@ -701,41 +699,16 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
rx_ring->skb = NULL;
}
- /* Free all the Rx ring sk_buffs */
- for (i = 0; i < rx_ring->count; i++) {
- struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+ /* Free all the Rx ring buffers */
+ for (u32 i = rx_ring->next_to_clean; i != rx_ring->next_to_use; ) {
+ const struct libeth_fqe *rx_fqes = &rx_ring->rx_fqes[i];
- if (!rx_bi->page)
- continue;
+ page_pool_put_full_page(rx_ring->pp, rx_fqes->page, false);
- /* Invalidate cache lines that may have been written to by
- * device so that we avoid corrupting memory.
- */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->dma,
- rx_bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
-
- /* free resources associated with mapping */
- dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma,
- iavf_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE,
- IAVF_RX_DMA_ATTR);
-
- __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias);
-
- rx_bi->page = NULL;
- rx_bi->page_offset = 0;
+ if (unlikely(++i == rx_ring->count))
+ i = 0;
}
- bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
- memset(rx_ring->rx_bi, 0, bi_size);
-
- /* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
-
- rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -748,15 +721,22 @@ static void iavf_clean_rx_ring(struct iavf_ring *rx_ring)
**/
void iavf_free_rx_resources(struct iavf_ring *rx_ring)
{
+ struct libeth_fq fq = {
+ .fqes = rx_ring->rx_fqes,
+ .pp = rx_ring->pp,
+ };
+
iavf_clean_rx_ring(rx_ring);
- kfree(rx_ring->rx_bi);
- rx_ring->rx_bi = NULL;
if (rx_ring->desc) {
- dma_free_coherent(rx_ring->dev, rx_ring->size,
+ dma_free_coherent(rx_ring->pp->p.dev, rx_ring->size,
rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
+
+ libeth_rx_fq_destroy(&fq);
+ rx_ring->rx_fqes = NULL;
+ rx_ring->pp = NULL;
}
/**
@@ -767,38 +747,46 @@ void iavf_free_rx_resources(struct iavf_ring *rx_ring)
**/
int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring)
{
- struct device *dev = rx_ring->dev;
- int bi_size;
-
- /* warn if we are about to overwrite the pointer */
- WARN_ON(rx_ring->rx_bi);
- bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count;
- rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL);
- if (!rx_ring->rx_bi)
- goto err;
+ struct libeth_fq fq = {
+ .count = rx_ring->count,
+ .buf_len = LIBIE_MAX_RX_BUF_LEN,
+ .nid = NUMA_NO_NODE,
+ };
+ int ret;
+
+ ret = libeth_rx_fq_create(&fq, &rx_ring->q_vector->napi);
+ if (ret)
+ return ret;
+
+ rx_ring->pp = fq.pp;
+ rx_ring->rx_fqes = fq.fqes;
+ rx_ring->truesize = fq.truesize;
+ rx_ring->rx_buf_len = fq.buf_len;
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
- rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+ rx_ring->desc = dma_alloc_coherent(fq.pp->p.dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
- dev_info(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
+ dev_info(fq.pp->p.dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
rx_ring->size);
goto err;
}
- rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
return 0;
+
err:
- kfree(rx_ring->rx_bi);
- rx_ring->rx_bi = NULL;
+ libeth_rx_fq_destroy(&fq);
+ rx_ring->rx_fqes = NULL;
+ rx_ring->pp = NULL;
+
return -ENOMEM;
}
@@ -811,9 +799,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
- /* update next to alloc since we have filled the ring */
- rx_ring->next_to_alloc = val;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -824,69 +809,6 @@ static void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val)
}
/**
- * iavf_rx_offset - Return expected offset into page to access data
- * @rx_ring: Ring we are requesting offset of
- *
- * Returns the offset value for ring into the data buffer.
- */
-static unsigned int iavf_rx_offset(struct iavf_ring *rx_ring)
-{
- return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0;
-}
-
-/**
- * iavf_alloc_mapped_page - recycle or make a new page
- * @rx_ring: ring to use
- * @bi: rx_buffer struct to modify
- *
- * Returns true if the page was successfully allocated or
- * reused.
- **/
-static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *bi)
-{
- struct page *page = bi->page;
- dma_addr_t dma;
-
- /* since we are recycling buffers we should seldom need to alloc */
- if (likely(page)) {
- rx_ring->rx_stats.page_reuse_count++;
- return true;
- }
-
- /* alloc new page for storage */
- page = dev_alloc_pages(iavf_rx_pg_order(rx_ring));
- if (unlikely(!page)) {
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- /* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0,
- iavf_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE,
- IAVF_RX_DMA_ATTR);
-
- /* if mapping failed free memory back to system since
- * there isn't much point in holding memory we can't use
- */
- if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_pages(page, iavf_rx_pg_order(rx_ring));
- rx_ring->rx_stats.alloc_page_failed++;
- return false;
- }
-
- bi->dma = dma;
- bi->page = page;
- bi->page_offset = iavf_rx_offset(rx_ring);
-
- /* initialize pagecnt_bias to 1 representing we fully own page */
- bi->pagecnt_bias = 1;
-
- return true;
-}
-
-/**
* iavf_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play
* @skb: packet to send up
@@ -916,38 +838,37 @@ static void iavf_receive_skb(struct iavf_ring *rx_ring,
**/
bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count)
{
+ const struct libeth_fq_fp fq = {
+ .pp = rx_ring->pp,
+ .fqes = rx_ring->rx_fqes,
+ .truesize = rx_ring->truesize,
+ .count = rx_ring->count,
+ };
u16 ntu = rx_ring->next_to_use;
union iavf_rx_desc *rx_desc;
- struct iavf_rx_buffer *bi;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
rx_desc = IAVF_RX_DESC(rx_ring, ntu);
- bi = &rx_ring->rx_bi[ntu];
do {
- if (!iavf_alloc_mapped_page(rx_ring, bi))
- goto no_buffers;
+ dma_addr_t addr;
- /* sync the buffer for use by the device */
- dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
+ addr = libeth_rx_alloc(&fq, ntu);
+ if (addr == DMA_MAPPING_ERROR)
+ goto no_buffers;
/* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->read.pkt_addr = cpu_to_le64(addr);
rx_desc++;
- bi++;
ntu++;
if (unlikely(ntu == rx_ring->count)) {
rx_desc = IAVF_RX_DESC(rx_ring, 0);
- bi = rx_ring->rx_bi;
ntu = 0;
}
@@ -966,6 +887,8 @@ no_buffers:
if (rx_ring->next_to_use != ntu)
iavf_release_rx_desc(rx_ring, ntu);
+ rx_ring->rx_stats.alloc_page_failed++;
+
/* make sure to come back via polling to try again after
* allocation failure
*/
@@ -982,38 +905,30 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi,
struct sk_buff *skb,
union iavf_rx_desc *rx_desc)
{
- struct iavf_rx_ptype_decoded decoded;
+ struct libeth_rx_pt decoded;
u32 rx_error, rx_status;
bool ipv4, ipv6;
u8 ptype;
u64 qword;
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
- rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
- rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
- decoded = decode_rx_desc_ptype(ptype);
-
skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = FIELD_GET(IAVF_RXD_QW1_PTYPE_MASK, qword);
- /* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ decoded = libie_rx_pt_parse(ptype);
+ if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded))
return;
+ rx_error = FIELD_GET(IAVF_RXD_QW1_ERROR_MASK, qword);
+ rx_status = FIELD_GET(IAVF_RXD_QW1_STATUS_MASK, qword);
+
/* did the hardware decode the packet and checksum? */
if (!(rx_status & BIT(IAVF_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* both known and outer_ip must be set for the below code to work */
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == IAVF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (ipv4 &&
(rx_error & (BIT(IAVF_RX_DESC_ERROR_IPE_SHIFT) |
@@ -1037,17 +952,7 @@ static void iavf_rx_checksum(struct iavf_vsi *vsi,
if (rx_error & BIT(IAVF_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case IAVF_RX_PTYPE_INNER_PROT_TCP:
- case IAVF_RX_PTYPE_INNER_PROT_UDP:
- case IAVF_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- fallthrough;
- default:
- break;
- }
-
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
@@ -1055,29 +960,6 @@ checksum_fail:
}
/**
- * iavf_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns a hash type to be used by skb_set_hash
- **/
-static int iavf_ptype_to_htype(u8 ptype)
-{
- struct iavf_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
-
- if (!decoded.known)
- return PKT_HASH_TYPE_NONE;
-
- if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4)
- return PKT_HASH_TYPE_L4;
- else if (decoded.outer_ip == IAVF_RX_PTYPE_OUTER_IP &&
- decoded.payload_layer == IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3)
- return PKT_HASH_TYPE_L3;
- else
- return PKT_HASH_TYPE_L2;
-}
-
-/**
* iavf_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
@@ -1089,17 +971,19 @@ static void iavf_rx_hash(struct iavf_ring *ring,
struct sk_buff *skb,
u8 rx_ptype)
{
+ struct libeth_rx_pt decoded;
u32 hash;
const __le64 rss_mask =
cpu_to_le64((u64)IAVF_RX_DESC_FLTSTAT_RSS_HASH <<
IAVF_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (!(ring->netdev->features & NETIF_F_RXHASH))
+ decoded = libie_rx_pt_parse(rx_ptype);
+ if (!libeth_rx_pt_has_hash(ring->netdev, decoded))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, iavf_ptype_to_htype(rx_ptype));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
}
@@ -1152,95 +1036,9 @@ static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb)
}
/**
- * iavf_reuse_rx_page - page flip buffer and store it back on the ring
- * @rx_ring: rx descriptor ring to store buffers on
- * @old_buff: donor buffer to have page reused
- *
- * Synchronizes page for reuse by the adapter
- **/
-static void iavf_reuse_rx_page(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *old_buff)
-{
- struct iavf_rx_buffer *new_buff;
- u16 nta = rx_ring->next_to_alloc;
-
- new_buff = &rx_ring->rx_bi[nta];
-
- /* update, and store next to alloc */
- nta++;
- rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
-
- /* transfer page from old buffer to new buffer */
- new_buff->dma = old_buff->dma;
- new_buff->page = old_buff->page;
- new_buff->page_offset = old_buff->page_offset;
- new_buff->pagecnt_bias = old_buff->pagecnt_bias;
-}
-
-/**
- * iavf_can_reuse_rx_page - Determine if this page can be reused by
- * the adapter for another receive
- *
- * @rx_buffer: buffer containing the page
- *
- * If page is reusable, rx_buffer->page_offset is adjusted to point to
- * an unused region in the page.
- *
- * For small pages, @truesize will be a constant value, half the size
- * of the memory at page. We'll attempt to alternate between high and
- * low halves of the page, with one half ready for use by the hardware
- * and the other half being consumed by the stack. We use the page
- * ref count to determine whether the stack has finished consuming the
- * portion of this page that was passed up with a previous packet. If
- * the page ref count is >1, we'll assume the "other" half page is
- * still busy, and this page cannot be reused.
- *
- * For larger pages, @truesize will be the actual space used by the
- * received packet (adjusted upward to an even multiple of the cache
- * line size). This will advance through the page by the amount
- * actually consumed by the received packets while there is still
- * space for a buffer. Each region of larger pages will be used at
- * most once, after which the page will not be reused.
- *
- * In either case, if the page is reusable its refcount is increased.
- **/
-static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
-{
- unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
- struct page *page = rx_buffer->page;
-
- /* Is any reuse possible? */
- if (!dev_page_is_reusable(page))
- return false;
-
-#if (PAGE_SIZE < 8192)
- /* if we are only owner of page we can reuse it */
- if (unlikely((page_count(page) - pagecnt_bias) > 1))
- return false;
-#else
-#define IAVF_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - IAVF_RXBUFFER_2048)
- if (rx_buffer->page_offset > IAVF_LAST_OFFSET)
- return false;
-#endif
-
- /* If we have drained the page fragment pool we need to update
- * the pagecnt_bias and page count so that we fully restock the
- * number of references the driver holds.
- */
- if (unlikely(!pagecnt_bias)) {
- page_ref_add(page, USHRT_MAX);
- rx_buffer->pagecnt_bias = USHRT_MAX;
- }
-
- return true;
-}
-
-/**
* iavf_add_rx_frag - Add contents of Rx buffer to sk_buff
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: buffer containing page to add
* @skb: sk_buff to place the data into
+ * @rx_buffer: buffer containing page to add
* @size: packet length from rx_desc
*
* This function will add the data contained in rx_buffer->page to the skb.
@@ -1248,206 +1046,50 @@ static bool iavf_can_reuse_rx_page(struct iavf_rx_buffer *rx_buffer)
*
* The function will then update the page offset.
**/
-static void iavf_add_rx_frag(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer,
- struct sk_buff *skb,
+static void iavf_add_rx_frag(struct sk_buff *skb,
+ const struct libeth_fqe *rx_buffer,
unsigned int size)
{
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));
-#endif
-
- if (!size)
- return;
+ u32 hr = rx_buffer->page->pp->p.offset;
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
- rx_buffer->page_offset, size, truesize);
-
- /* page is being used so we must update the page offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
-}
-
-/**
- * iavf_get_rx_buffer - Fetch Rx buffer and synchronize data for use
- * @rx_ring: rx descriptor ring to transact packets on
- * @size: size of buffer to add to skb
- *
- * This function will pull an Rx buffer from the ring and synchronize it
- * for use by the CPU.
- */
-static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,
- const unsigned int size)
-{
- struct iavf_rx_buffer *rx_buffer;
-
- rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
- prefetchw(rx_buffer->page);
- if (!size)
- return rx_buffer;
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
-
- /* We have pulled a buffer for use, so decrement pagecnt_bias */
- rx_buffer->pagecnt_bias--;
-
- return rx_buffer;
-}
-
-/**
- * iavf_construct_skb - Allocate skb and populate it
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: rx buffer to pull data from
- * @size: size of buffer to add to skb
- *
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
- */
-static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer,
- unsigned int size)
-{
- void *va;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
-#endif
- unsigned int headlen;
- struct sk_buff *skb;
-
- if (!rx_buffer)
- return NULL;
- /* prefetch first cache line of first page */
- va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- net_prefetch(va);
-
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
- IAVF_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
- if (unlikely(!skb))
- return NULL;
-
- /* Determine available headroom for copy */
- headlen = size;
- if (headlen > IAVF_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, va, IAVF_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
-
- /* update all of the pointers */
- size -= headlen;
- if (size) {
- skb_add_rx_frag(skb, 0, rx_buffer->page,
- rx_buffer->page_offset + headlen,
- size, truesize);
-
- /* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
- } else {
- /* buffer is unused, reset bias back to rx_buffer */
- rx_buffer->pagecnt_bias++;
- }
-
- return skb;
+ rx_buffer->offset + hr, size, rx_buffer->truesize);
}
/**
* iavf_build_skb - Build skb around an existing buffer
- * @rx_ring: Rx descriptor ring to transact packets on
* @rx_buffer: Rx buffer to pull data from
* @size: size of buffer to add to skb
*
* This function builds an skb around an existing Rx buffer, taking care
* to set up the skb correctly and avoid any memcpy overhead.
*/
-static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer,
+static struct sk_buff *iavf_build_skb(const struct libeth_fqe *rx_buffer,
unsigned int size)
{
- void *va;
-#if (PAGE_SIZE < 8192)
- unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2;
-#else
- unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(IAVF_SKB_PAD + size);
-#endif
+ u32 hr = rx_buffer->page->pp->p.offset;
struct sk_buff *skb;
+ void *va;
- if (!rx_buffer || !size)
- return NULL;
/* prefetch first cache line of first page */
- va = page_address(rx_buffer->page) + rx_buffer->page_offset;
- net_prefetch(va);
+ va = page_address(rx_buffer->page) + rx_buffer->offset;
+ net_prefetch(va + hr);
/* build an skb around the page buffer */
- skb = napi_build_skb(va - IAVF_SKB_PAD, truesize);
+ skb = napi_build_skb(va, rx_buffer->truesize);
if (unlikely(!skb))
return NULL;
+ skb_mark_for_recycle(skb);
+
/* update pointers within the skb to store the data */
- skb_reserve(skb, IAVF_SKB_PAD);
+ skb_reserve(skb, hr);
__skb_put(skb, size);
- /* buffer is used by skb, update page_offset */
-#if (PAGE_SIZE < 8192)
- rx_buffer->page_offset ^= truesize;
-#else
- rx_buffer->page_offset += truesize;
-#endif
-
return skb;
}
/**
- * iavf_put_rx_buffer - Clean up used buffer and either recycle or free
- * @rx_ring: rx descriptor ring to transact packets on
- * @rx_buffer: rx buffer to pull data from
- *
- * This function will clean up the contents of the rx_buffer. It will
- * either recycle the buffer or unmap it and free the associated resources.
- */
-static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,
- struct iavf_rx_buffer *rx_buffer)
-{
- if (!rx_buffer)
- return;
-
- if (iavf_can_reuse_rx_page(rx_buffer)) {
- /* hand second half of page back to the ring */
- iavf_reuse_rx_page(rx_ring, rx_buffer);
- rx_ring->rx_stats.page_reuse_count++;
- } else {
- /* we are not reusing the buffer so unmap it */
- dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
- iavf_rx_pg_size(rx_ring),
- DMA_FROM_DEVICE, IAVF_RX_DMA_ATTR);
- __page_frag_cache_drain(rx_buffer->page,
- rx_buffer->pagecnt_bias);
- }
-
- /* clear contents of buffer_info */
- rx_buffer->page = NULL;
-}
-
-/**
* iavf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
@@ -1500,7 +1142,7 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
bool failure = false;
while (likely(total_rx_packets < (unsigned int)budget)) {
- struct iavf_rx_buffer *rx_buffer;
+ struct libeth_fqe *rx_buffer;
union iavf_rx_desc *rx_desc;
unsigned int size;
u16 vlan_tag = 0;
@@ -1535,28 +1177,27 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)
size = FIELD_GET(IAVF_RXD_QW1_LENGTH_PBUF_MASK, qword);
iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);
- rx_buffer = iavf_get_rx_buffer(rx_ring, size);
+
+ rx_buffer = &rx_ring->rx_fqes[rx_ring->next_to_clean];
+ if (!libeth_rx_sync_for_cpu(rx_buffer, size))
+ goto skip_data;
/* retrieve a buffer from the ring */
if (skb)
- iavf_add_rx_frag(rx_ring, rx_buffer, skb, size);
- else if (ring_uses_build_skb(rx_ring))
- skb = iavf_build_skb(rx_ring, rx_buffer, size);
+ iavf_add_rx_frag(skb, rx_buffer, size);
else
- skb = iavf_construct_skb(rx_ring, rx_buffer, size);
+ skb = iavf_build_skb(rx_buffer, size);
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++;
- if (rx_buffer && size)
- rx_buffer->pagecnt_bias++;
break;
}
- iavf_put_rx_buffer(rx_ring, rx_buffer);
+skip_data:
cleaned_count++;
- if (iavf_is_non_eop(rx_ring, rx_desc, skb))
+ if (iavf_is_non_eop(rx_ring, rx_desc, skb) || unlikely(!skb))
continue;
/* ERR_MASK will only have valid bits if EOP set, and
@@ -1743,8 +1384,8 @@ int iavf_napi_poll(struct napi_struct *napi, int budget)
clean_complete = false;
continue;
}
- arm_wb |= ring->arm_wb;
- ring->arm_wb = false;
+ arm_wb |= !!(ring->flags & IAVF_TXR_FLAGS_ARM_WB);
+ ring->flags &= ~IAVF_TXR_FLAGS_ARM_WB;
}
/* Handle case where we are called by netpoll with a budget of 0 */
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.h b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
index 10ba36602c0c..d7b5587aeb8e 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_txrx.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.h
@@ -80,79 +80,8 @@ enum iavf_dyn_idx_t {
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
BIT_ULL(IAVF_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
-/* Supported Rx Buffer Sizes (a multiple of 128) */
-#define IAVF_RXBUFFER_256 256
-#define IAVF_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
-#define IAVF_RXBUFFER_2048 2048
-#define IAVF_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
-#define IAVF_MAX_RXBUFFER 9728 /* largest size for single descriptor */
-
-/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
- * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
- * this adds up to 512 bytes of extra data meaning the smallest allocation
- * we could have is 1K.
- * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
- * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
- */
-#define IAVF_RX_HDR_SIZE IAVF_RXBUFFER_256
-#define IAVF_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2))
#define iavf_rx_desc iavf_32byte_rx_desc
-#define IAVF_RX_DMA_ATTR \
- (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
-
-/* Attempt to maximize the headroom available for incoming frames. We
- * use a 2K buffer for receives and need 1536/1534 to store the data for
- * the frame. This leaves us with 512 bytes of room. From that we need
- * to deduct the space needed for the shared info and the padding needed
- * to IP align the frame.
- *
- * Note: For cache line sizes 256 or larger this value is going to end
- * up negative. In these cases we should fall back to the legacy
- * receive path.
- */
-#if (PAGE_SIZE < 8192)
-#define IAVF_2K_TOO_SMALL_WITH_PADDING \
-((NET_SKB_PAD + IAVF_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IAVF_RXBUFFER_2048))
-
-static inline int iavf_compute_pad(int rx_buf_len)
-{
- int page_size, pad_size;
-
- page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2);
- pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len;
-
- return pad_size;
-}
-
-static inline int iavf_skb_pad(void)
-{
- int rx_buf_len;
-
- /* If a 2K buffer cannot handle a standard Ethernet frame then
- * optimize padding for a 3K buffer instead of a 1.5K buffer.
- *
- * For a 3K buffer we need to add enough padding to allow for
- * tailroom due to NET_IP_ALIGN possibly shifting us out of
- * cache-line alignment.
- */
- if (IAVF_2K_TOO_SMALL_WITH_PADDING)
- rx_buf_len = IAVF_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN);
- else
- rx_buf_len = IAVF_RXBUFFER_1536;
-
- /* if needed make room for NET_IP_ALIGN */
- rx_buf_len -= NET_IP_ALIGN;
-
- return iavf_compute_pad(rx_buf_len);
-}
-
-#define IAVF_SKB_PAD iavf_skb_pad()
-#else
-#define IAVF_2K_TOO_SMALL_WITH_PADDING false
-#define IAVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
-#endif
-
/**
* iavf_test_staterr - tests bits in Rx descriptor status and error fields
* @rx_desc: pointer to receive descriptor (in le64 format)
@@ -271,17 +200,6 @@ struct iavf_tx_buffer {
u32 tx_flags;
};
-struct iavf_rx_buffer {
- dma_addr_t dma;
- struct page *page;
-#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
- __u32 page_offset;
-#else
- __u16 page_offset;
-#endif
- __u16 pagecnt_bias;
-};
-
struct iavf_queue_stats {
u64 packets;
u64 bytes;
@@ -293,7 +211,6 @@ struct iavf_tx_queue_stats {
u64 tx_done_old;
u64 tx_linearize;
u64 tx_force_wb;
- int prev_pkt_ctr;
u64 tx_lost_interrupt;
};
@@ -301,14 +218,6 @@ struct iavf_rx_queue_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buff_failed;
- u64 page_reuse_count;
- u64 realloc_count;
-};
-
-enum iavf_ring_state_t {
- __IAVF_TX_FDIR_INIT_DONE,
- __IAVF_TX_XPS_INIT_DONE,
- __IAVF_RING_STATE_NBITS /* must be last */
};
/* some useful defines for virtchannel interface, which
@@ -326,16 +235,19 @@ enum iavf_ring_state_t {
struct iavf_ring {
struct iavf_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
- struct device *dev; /* Used for DMA mapping */
+ union {
+ struct page_pool *pp; /* Used on Rx for buffer management */
+ struct device *dev; /* Used on Tx for DMA mapping */
+ };
struct net_device *netdev; /* netdev ring maps to */
union {
+ struct libeth_fqe *rx_fqes;
struct iavf_tx_buffer *tx_bi;
- struct iavf_rx_buffer *rx_bi;
};
- DECLARE_BITMAP(state, __IAVF_RING_STATE_NBITS);
- u16 queue_index; /* Queue number of ring */
- u8 dcb_tc; /* Traffic class of ring */
u8 __iomem *tail;
+ u32 truesize;
+
+ u16 queue_index; /* Queue number of ring */
/* high bit set means dynamic, use accessors routines to read/write.
* hardware only supports 2us resolution for the ITR registers.
@@ -345,23 +257,15 @@ struct iavf_ring {
u16 itr_setting;
u16 count; /* Number of descriptors */
- u16 reg_idx; /* HW register index of the ring */
- u16 rx_buf_len;
/* used in interrupt processing */
u16 next_to_use;
u16 next_to_clean;
- u8 atr_sample_rate;
- u8 atr_count;
-
- bool ring_active; /* is ring online or not */
- bool arm_wb; /* do something to arm write back */
- u8 packet_stride;
-
u16 flags;
#define IAVF_TXR_FLAGS_WB_ON_ITR BIT(0)
-#define IAVF_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1)
+#define IAVF_TXR_FLAGS_ARM_WB BIT(1)
+/* BIT(2) is free */
#define IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1 BIT(3)
#define IAVF_TXR_FLAGS_VLAN_TAG_LOC_L2TAG2 BIT(4)
#define IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2 BIT(5)
@@ -374,6 +278,7 @@ struct iavf_ring {
struct iavf_rx_queue_stats rx_stats;
};
+ int prev_pkt_ctr; /* For Tx stall detection */
unsigned int size; /* length of descriptor ring in bytes */
dma_addr_t dma; /* physical address of ring */
@@ -381,7 +286,6 @@ struct iavf_ring {
struct iavf_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
- u16 next_to_alloc;
struct sk_buff *skb; /* When iavf_clean_rx_ring_irq() must
* return before it sees the EOP for
* the current packet, we save that skb
@@ -390,22 +294,9 @@ struct iavf_ring {
* iavf_clean_rx_ring_irq() is called
* for this ring.
*/
-} ____cacheline_internodealigned_in_smp;
-
-static inline bool ring_uses_build_skb(struct iavf_ring *ring)
-{
- return !!(ring->flags & IAVF_RXR_FLAGS_BUILD_SKB_ENABLED);
-}
-static inline void set_ring_build_skb_enabled(struct iavf_ring *ring)
-{
- ring->flags |= IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
-}
-
-static inline void clear_ring_build_skb_enabled(struct iavf_ring *ring)
-{
- ring->flags &= ~IAVF_RXR_FLAGS_BUILD_SKB_ENABLED;
-}
+ u32 rx_buf_len;
+} ____cacheline_internodealigned_in_smp;
#define IAVF_ITR_ADAPTIVE_MIN_INC 0x0002
#define IAVF_ITR_ADAPTIVE_MIN_USECS 0x0002
@@ -428,17 +319,6 @@ struct iavf_ring_container {
#define iavf_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-static inline unsigned int iavf_rx_pg_order(struct iavf_ring *ring)
-{
-#if (PAGE_SIZE < 8192)
- if (ring->rx_buf_len > (PAGE_SIZE / 2))
- return 1;
-#endif
- return 0;
-}
-
-#define iavf_rx_pg_size(_ring) (PAGE_SIZE << iavf_rx_pg_order(_ring))
-
bool iavf_alloc_rx_buffers(struct iavf_ring *rxr, u16 cleaned_count);
netdev_tx_t iavf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
int iavf_setup_tx_descriptors(struct iavf_ring *tx_ring);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_type.h b/drivers/net/ethernet/intel/iavf/iavf_type.h
index 2b6a207fa441..f6b09e57abce 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_type.h
+++ b/drivers/net/ethernet/intel/iavf/iavf_type.h
@@ -10,8 +10,6 @@
#include "iavf_adminq.h"
#include "iavf_devids.h"
-#define IAVF_RXQ_CTX_DBUFF_SHIFT 7
-
/* IAVF_MASK is a macro used on 32 bit registers */
#define IAVF_MASK(mask, shift) ((u32)(mask) << (shift))
@@ -327,94 +325,6 @@ enum iavf_rx_desc_error_l3l4e_fcoe_masks {
#define IAVF_RXD_QW1_PTYPE_SHIFT 30
#define IAVF_RXD_QW1_PTYPE_MASK (0xFFULL << IAVF_RXD_QW1_PTYPE_SHIFT)
-/* Packet type non-ip values */
-enum iavf_rx_l2_ptype {
- IAVF_RX_PTYPE_L2_RESERVED = 0,
- IAVF_RX_PTYPE_L2_MAC_PAY2 = 1,
- IAVF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- IAVF_RX_PTYPE_L2_FIP_PAY2 = 3,
- IAVF_RX_PTYPE_L2_OUI_PAY2 = 4,
- IAVF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- IAVF_RX_PTYPE_L2_LLDP_PAY2 = 6,
- IAVF_RX_PTYPE_L2_ECP_PAY2 = 7,
- IAVF_RX_PTYPE_L2_EVB_PAY2 = 8,
- IAVF_RX_PTYPE_L2_QCN_PAY2 = 9,
- IAVF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- IAVF_RX_PTYPE_L2_ARP = 11,
- IAVF_RX_PTYPE_L2_FCOE_PAY3 = 12,
- IAVF_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13,
- IAVF_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14,
- IAVF_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15,
- IAVF_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16,
- IAVF_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20,
- IAVF_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21,
- IAVF_RX_PTYPE_GRENAT4_MAC_PAY3 = 58,
- IAVF_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87,
- IAVF_RX_PTYPE_GRENAT6_MAC_PAY3 = 124,
- IAVF_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153
-};
-
-struct iavf_rx_ptype_decoded {
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:1;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum iavf_rx_ptype_outer_ip {
- IAVF_RX_PTYPE_OUTER_L2 = 0,
- IAVF_RX_PTYPE_OUTER_IP = 1
-};
-
-enum iavf_rx_ptype_outer_ip_ver {
- IAVF_RX_PTYPE_OUTER_NONE = 0,
- IAVF_RX_PTYPE_OUTER_IPV4 = 0,
- IAVF_RX_PTYPE_OUTER_IPV6 = 1
-};
-
-enum iavf_rx_ptype_outer_fragmented {
- IAVF_RX_PTYPE_NOT_FRAG = 0,
- IAVF_RX_PTYPE_FRAG = 1
-};
-
-enum iavf_rx_ptype_tunnel_type {
- IAVF_RX_PTYPE_TUNNEL_NONE = 0,
- IAVF_RX_PTYPE_TUNNEL_IP_IP = 1,
- IAVF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- IAVF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum iavf_rx_ptype_tunnel_end_prot {
- IAVF_RX_PTYPE_TUNNEL_END_NONE = 0,
- IAVF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- IAVF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum iavf_rx_ptype_inner_prot {
- IAVF_RX_PTYPE_INNER_PROT_NONE = 0,
- IAVF_RX_PTYPE_INNER_PROT_UDP = 1,
- IAVF_RX_PTYPE_INNER_PROT_TCP = 2,
- IAVF_RX_PTYPE_INNER_PROT_SCTP = 3,
- IAVF_RX_PTYPE_INNER_PROT_ICMP = 4,
- IAVF_RX_PTYPE_INNER_PROT_TIMESYNC = 5
-};
-
-enum iavf_rx_ptype_payload_layer {
- IAVF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- IAVF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
#define IAVF_RXD_QW1_LENGTH_PBUF_SHIFT 38
#define IAVF_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \
IAVF_RXD_QW1_LENGTH_PBUF_SHIFT)
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 22f2df7c460b..1e543f6a7c30 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2013 - 2018 Intel Corporation. */
+#include <linux/net/intel/libie/rx.h>
+
#include "iavf.h"
#include "iavf_prototype.h"
@@ -268,13 +270,13 @@ int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter)
void iavf_configure_queues(struct iavf_adapter *adapter)
{
struct virtchnl_vsi_queue_config_info *vqci;
- int i, max_frame = adapter->vf_res->max_mtu;
int pairs = adapter->num_active_queues;
struct virtchnl_queue_pair_info *vqpi;
+ u32 i, max_frame;
size_t len;
- if (max_frame > IAVF_MAX_RXBUFFER || !max_frame)
- max_frame = IAVF_MAX_RXBUFFER;
+ max_frame = LIBIE_MAX_RX_FRM_LEN(adapter->rx_rings->pp->p.offset);
+ max_frame = min_not_zero(adapter->vf_res->max_mtu, max_frame);
if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -288,11 +290,6 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
if (!vqci)
return;
- /* Limit maximum frame size when jumbo frames is not enabled */
- if (!(adapter->flags & IAVF_FLAG_LEGACY_RX) &&
- (adapter->netdev->mtu <= ETH_DATA_LEN))
- max_frame = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
-
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -309,9 +306,7 @@ void iavf_configure_queues(struct iavf_adapter *adapter)
vqpi->rxq.ring_len = adapter->rx_rings[i].count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = max_frame;
- vqpi->rxq.databuffer_size =
- ALIGN(adapter->rx_rings[i].rx_buf_len,
- BIT_ULL(IAVF_RXQ_CTX_DBUFF_SHIFT));
+ vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
if (CRC_OFFLOAD_ALLOWED(adapter))
vqpi->rxq.crc_disable = !!(adapter->netdev->features &
NETIF_F_RXFCS);
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index cddd82d4ca0f..03500e28ac99 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -5,6 +5,7 @@
# Makefile for the Intel(R) Ethernet Connection E800 Series Linux Driver
#
+subdir-ccflags-y += -I$(src)
obj-$(CONFIG_ICE) += ice.o
ice-y := ice_main.o \
@@ -28,7 +29,8 @@ ice-y := ice_main.o \
ice_flex_pipe.o \
ice_flow.o \
ice_idc.o \
- ice_devlink.o \
+ devlink/devlink.o \
+ devlink/devlink_port.o \
ice_ddp.o \
ice_fw_update.o \
ice_lag.o \
@@ -36,7 +38,8 @@ ice-y := ice_main.o \
ice_repr.o \
ice_tc_lib.o \
ice_fwlog.o \
- ice_debugfs.o
+ ice_debugfs.o \
+ ice_adapter.o
ice-$(CONFIG_PCI_IOV) += \
ice_sriov.o \
ice_virtchnl.o \
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c
index b516e42b41f0..810a901d7afd 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c
@@ -5,13 +5,11 @@
#include "ice.h"
#include "ice_lib.h"
-#include "ice_devlink.h"
+#include "devlink.h"
#include "ice_eswitch.h"
#include "ice_fw_update.h"
#include "ice_dcb_lib.h"
-static int ice_active_port_option = -1;
-
/* context for devlink info version reporting */
struct ice_info_ctx {
char buf[128];
@@ -478,17 +476,17 @@ ice_devlink_reload_down(struct devlink *devlink, bool netns_change,
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
if (ice_is_eswitch_mode_switchdev(pf)) {
NL_SET_ERR_MSG_MOD(extack,
- "Go to legacy mode before doing reinit\n");
+ "Go to legacy mode before doing reinit");
return -EOPNOTSUPP;
}
if (ice_is_adq_active(pf)) {
NL_SET_ERR_MSG_MOD(extack,
- "Turn off ADQ before doing reinit\n");
+ "Turn off ADQ before doing reinit");
return -EOPNOTSUPP;
}
if (ice_has_vfs(pf)) {
NL_SET_ERR_MSG_MOD(extack,
- "Remove all VFs before doing reinit\n");
+ "Remove all VFs before doing reinit");
return -EOPNOTSUPP;
}
ice_devlink_reinit_down(pf);
@@ -526,248 +524,153 @@ ice_devlink_reload_empr_finish(struct ice_pf *pf,
}
/**
- * ice_devlink_port_opt_speed_str - convert speed to a string
- * @speed: speed value
- */
-static const char *ice_devlink_port_opt_speed_str(u8 speed)
-{
- switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
- case ICE_AQC_PORT_OPT_MAX_LANE_100M:
- return "0.1";
- case ICE_AQC_PORT_OPT_MAX_LANE_1G:
- return "1";
- case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
- return "2.5";
- case ICE_AQC_PORT_OPT_MAX_LANE_5G:
- return "5";
- case ICE_AQC_PORT_OPT_MAX_LANE_10G:
- return "10";
- case ICE_AQC_PORT_OPT_MAX_LANE_25G:
- return "25";
- case ICE_AQC_PORT_OPT_MAX_LANE_50G:
- return "50";
- case ICE_AQC_PORT_OPT_MAX_LANE_100G:
- return "100";
- }
-
- return "-";
-}
-
-#define ICE_PORT_OPT_DESC_LEN 50
-/**
- * ice_devlink_port_options_print - Print available port split options
- * @pf: the PF to print split port options
+ * ice_get_tx_topo_user_sel - Read user's choice from flash
+ * @pf: pointer to pf structure
+ * @layers: value read from flash will be saved here
+ *
+ * Reads user's preference for Tx Scheduler Topology Tree from PFA TLV.
*
- * Prints a table with available port split options and max port speeds
+ * Return: zero when read was successful, negative values otherwise.
*/
-static void ice_devlink_port_options_print(struct ice_pf *pf)
+static int ice_get_tx_topo_user_sel(struct ice_pf *pf, uint8_t *layers)
{
- u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
- struct ice_aqc_get_port_options_elem *options, *opt;
- struct device *dev = ice_pf_to_dev(pf);
- bool active_valid, pending_valid;
- char desc[ICE_PORT_OPT_DESC_LEN];
- const char *str;
- int status;
+ struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
+ struct ice_hw *hw = &pf->hw;
+ int err;
- options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
- sizeof(*options), GFP_KERNEL);
- if (!options)
- return;
+ err = ice_acquire_nvm(hw, ICE_RES_READ);
+ if (err)
+ return err;
- for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
- opt = options + i * ICE_AQC_PORT_OPT_MAX;
- options_count = ICE_AQC_PORT_OPT_MAX;
- active_valid = 0;
+ err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
+ sizeof(usr_sel), &usr_sel, true, true, NULL);
+ if (err)
+ goto exit_release_res;
- status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
- i, true, &active_idx,
- &active_valid, &pending_idx,
- &pending_valid);
- if (status) {
- dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
- i, status);
- goto err;
- }
- }
+ if (usr_sel.data & ICE_AQC_NVM_TX_TOPO_USER_SEL)
+ *layers = ICE_SCHED_5_LAYERS;
+ else
+ *layers = ICE_SCHED_9_LAYERS;
- dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
- dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
- dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+exit_release_res:
+ ice_release_nvm(hw);
- for (i = 0; i < options_count; i++) {
- cnt = 0;
+ return err;
+}
- if (i == ice_active_port_option)
- str = "Active";
- else if ((i == pending_idx) && pending_valid)
- str = "Pending";
- else
- str = "";
+/**
+ * ice_update_tx_topo_user_sel - Save user's preference in flash
+ * @pf: pointer to pf structure
+ * @layers: value to be saved in flash
+ *
+ * Variable "layers" defines user's preference about number of layers in Tx
+ * Scheduler Topology Tree. This choice should be stored in PFA TLV field
+ * and be picked up by driver, next time during init.
+ *
+ * Return: zero when save was successful, negative values otherwise.
+ */
+static int ice_update_tx_topo_user_sel(struct ice_pf *pf, int layers)
+{
+ struct ice_aqc_nvm_tx_topo_user_sel usr_sel = {};
+ struct ice_hw *hw = &pf->hw;
+ int err;
- cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
- "%-8s", str);
+ err = ice_acquire_nvm(hw, ICE_RES_WRITE);
+ if (err)
+ return err;
- cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
- "%-6u", options[i].pmd);
+ err = ice_aq_read_nvm(hw, ICE_AQC_NVM_TX_TOPO_MOD_ID, 0,
+ sizeof(usr_sel), &usr_sel, true, true, NULL);
+ if (err)
+ goto exit_release_res;
- for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
- speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
- str = ice_devlink_port_opt_speed_str(speed);
- cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
- "%3s ", str);
- }
+ if (layers == ICE_SCHED_5_LAYERS)
+ usr_sel.data |= ICE_AQC_NVM_TX_TOPO_USER_SEL;
+ else
+ usr_sel.data &= ~ICE_AQC_NVM_TX_TOPO_USER_SEL;
- dev_dbg(dev, "%s\n", desc);
- }
+ err = ice_write_one_nvm_block(pf, ICE_AQC_NVM_TX_TOPO_MOD_ID, 2,
+ sizeof(usr_sel.data), &usr_sel.data,
+ true, NULL, NULL);
+exit_release_res:
+ ice_release_nvm(hw);
-err:
- kfree(options);
+ return err;
}
/**
- * ice_devlink_aq_set_port_option - Send set port option admin queue command
- * @pf: the PF to print split port options
- * @option_idx: selected port option
- * @extack: extended netdev ack structure
+ * ice_devlink_tx_sched_layers_get - Get tx_scheduling_layers parameter
+ * @devlink: pointer to the devlink instance
+ * @id: the parameter ID to set
+ * @ctx: context to store the parameter value
*
- * Sends set port option admin queue command with selected port option and
- * calls NVM write activate.
+ * Return: zero on success and negative value on failure.
*/
-static int
-ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
- struct netlink_ext_ack *extack)
+static int ice_devlink_tx_sched_layers_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
{
- struct device *dev = ice_pf_to_dev(pf);
- int status;
-
- status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
- if (status) {
- dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
- status, pf->hw.adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
- return -EIO;
- }
-
- status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
- if (status) {
- dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
- status, pf->hw.adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
- return -EIO;
- }
-
- status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
- if (status) {
- dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
- status, pf->hw.adminq.sq_last_status);
- NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
- ice_release_nvm(&pf->hw);
- return -EIO;
- }
+ struct ice_pf *pf = devlink_priv(devlink);
+ int err;
- ice_release_nvm(&pf->hw);
+ err = ice_get_tx_topo_user_sel(pf, &ctx->val.vu8);
+ if (err)
+ return err;
- NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
return 0;
}
/**
- * ice_devlink_port_split - .port_split devlink handler
- * @devlink: devlink instance structure
- * @port: devlink port structure
- * @count: number of ports to split to
- * @extack: extended netdev ack structure
- *
- * Callback for the devlink .port_split operation.
- *
- * Unfortunately, the devlink expression of available options is limited
- * to just a number, so search for an FW port option which supports
- * the specified number. As there could be multiple FW port options with
- * the same port split count, allow switching between them. When the same
- * port split count request is issued again, switch to the next FW port
- * option with the same port split count.
+ * ice_devlink_tx_sched_layers_set - Set tx_scheduling_layers parameter
+ * @devlink: pointer to the devlink instance
+ * @id: the parameter ID to set
+ * @ctx: context to get the parameter value
+ * @extack: netlink extended ACK structure
*
- * Return: zero on success or an error code on failure.
+ * Return: zero on success and negative value on failure.
*/
-static int
-ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
- unsigned int count, struct netlink_ext_ack *extack)
+static int ice_devlink_tx_sched_layers_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
- struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
- u8 i, j, active_idx, pending_idx, new_option;
struct ice_pf *pf = devlink_priv(devlink);
- u8 option_count = ICE_AQC_PORT_OPT_MAX;
- struct device *dev = ice_pf_to_dev(pf);
- bool active_valid, pending_valid;
- int status;
-
- status = ice_aq_get_port_options(&pf->hw, options, &option_count,
- 0, true, &active_idx, &active_valid,
- &pending_idx, &pending_valid);
- if (status) {
- dev_dbg(dev, "Couldn't read port split options, err = %d\n",
- status);
- NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
- return -EIO;
- }
-
- new_option = ICE_AQC_PORT_OPT_MAX;
- active_idx = pending_valid ? pending_idx : active_idx;
- for (i = 1; i <= option_count; i++) {
- /* In order to allow switching between FW port options with
- * the same port split count, search for a new option starting
- * from the active/pending option (with array wrap around).
- */
- j = (active_idx + i) % option_count;
-
- if (count == options[j].pmd) {
- new_option = j;
- break;
- }
- }
-
- if (new_option == active_idx) {
- dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
- count);
- NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
- ice_devlink_port_options_print(pf);
- return -EINVAL;
- }
-
- if (new_option == ICE_AQC_PORT_OPT_MAX) {
- dev_dbg(dev, "request to split: count: %u not found\n", count);
- NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
- ice_devlink_port_options_print(pf);
- return -EINVAL;
- }
+ int err;
- status = ice_devlink_aq_set_port_option(pf, new_option, extack);
- if (status)
- return status;
+ err = ice_update_tx_topo_user_sel(pf, ctx->val.vu8);
+ if (err)
+ return err;
- ice_devlink_port_options_print(pf);
+ NL_SET_ERR_MSG_MOD(extack,
+ "Tx scheduling layers have been changed on this device. You must do the PCI slot powercycle for the change to take effect.");
return 0;
}
/**
- * ice_devlink_port_unsplit - .port_unsplit devlink handler
- * @devlink: devlink instance structure
- * @port: devlink port structure
- * @extack: extended netdev ack structure
+ * ice_devlink_tx_sched_layers_validate - Validate passed tx_scheduling_layers
+ * parameter value
+ * @devlink: unused pointer to devlink instance
+ * @id: the parameter ID to validate
+ * @val: value to validate
+ * @extack: netlink extended ACK structure
*
- * Callback for the devlink .port_unsplit operation.
- * Calls ice_devlink_port_split with split count set to 1.
- * There could be no FW option available with split count 1.
+ * Supported values are:
+ * - 5 - five layers Tx Scheduler Topology Tree
+ * - 9 - nine layers Tx Scheduler Topology Tree
*
- * Return: zero on success or an error code on failure.
+ * Return: zero when passed parameter value is supported. Negative value on
+ * error.
*/
-static int
-ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
- struct netlink_ext_ack *extack)
+static int ice_devlink_tx_sched_layers_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
{
- return ice_devlink_port_split(devlink, port, 1, extack);
+ if (val.vu8 != ICE_SCHED_5_LAYERS && val.vu8 != ICE_SCHED_9_LAYERS) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Wrong number of tx scheduler layers provided.");
+ return -EINVAL;
+ }
+
+ return 0;
}
/**
@@ -891,10 +794,8 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v
tc_node = pi->root->children[0];
mutex_lock(&pi->sched_lock);
- devl_lock(devlink);
for (i = 0; i < tc_node->num_children; i++)
ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf);
- devl_unlock(devlink);
mutex_unlock(&pi->sched_lock);
return 0;
@@ -1290,18 +1191,16 @@ static int ice_devlink_set_parent(struct devlink_rate *devlink_rate,
static int ice_devlink_reinit_up(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
- struct ice_vsi_cfg_params params;
int err;
err = ice_init_dev(pf);
if (err)
return err;
- params = ice_vsi_to_params(vsi);
- params.flags = ICE_VSI_FLAG_INIT;
+ vsi->flags = ICE_VSI_FLAG_INIT;
rtnl_lock();
- err = ice_vsi_cfg(vsi, &params);
+ err = ice_vsi_cfg(vsi);
rtnl_unlock();
if (err)
goto err_vsi_cfg;
@@ -1391,9 +1290,9 @@ ice_devlink_enable_roce_get(struct devlink *devlink, u32 id,
return 0;
}
-static int
-ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int ice_devlink_enable_roce_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
bool roce_ena = ctx->val.vbool;
@@ -1442,9 +1341,9 @@ ice_devlink_enable_iw_get(struct devlink *devlink, u32 id,
return 0;
}
-static int
-ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+static int ice_devlink_enable_iw_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct ice_pf *pf = devlink_priv(devlink);
bool iw_ena = ctx->val.vbool;
@@ -1482,7 +1381,132 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id,
return 0;
}
-static const struct devlink_param ice_devlink_params[] = {
+#define DEVLINK_LOCAL_FWD_DISABLED_STR "disabled"
+#define DEVLINK_LOCAL_FWD_ENABLED_STR "enabled"
+#define DEVLINK_LOCAL_FWD_PRIORITIZED_STR "prioritized"
+
+/**
+ * ice_devlink_local_fwd_mode_to_str - Get string for local_fwd mode.
+ * @mode: local forwarding for mode used in port_info struct.
+ *
+ * Return: Mode respective string or "Invalid".
+ */
+static const char *
+ice_devlink_local_fwd_mode_to_str(enum ice_local_fwd_mode mode)
+{
+ switch (mode) {
+ case ICE_LOCAL_FWD_MODE_ENABLED:
+ return DEVLINK_LOCAL_FWD_ENABLED_STR;
+ case ICE_LOCAL_FWD_MODE_PRIORITIZED:
+ return DEVLINK_LOCAL_FWD_PRIORITIZED_STR;
+ case ICE_LOCAL_FWD_MODE_DISABLED:
+ return DEVLINK_LOCAL_FWD_DISABLED_STR;
+ }
+
+ return "Invalid";
+}
+
+/**
+ * ice_devlink_local_fwd_str_to_mode - Get local_fwd mode from string name.
+ * @mode_str: local forwarding mode string.
+ *
+ * Return: Mode value or negative number if invalid.
+ */
+static int ice_devlink_local_fwd_str_to_mode(const char *mode_str)
+{
+ if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_ENABLED_STR))
+ return ICE_LOCAL_FWD_MODE_ENABLED;
+ else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_PRIORITIZED_STR))
+ return ICE_LOCAL_FWD_MODE_PRIORITIZED;
+ else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_DISABLED_STR))
+ return ICE_LOCAL_FWD_MODE_DISABLED;
+
+ return -EINVAL;
+}
+
+/**
+ * ice_devlink_local_fwd_get - Get local_fwd parameter.
+ * @devlink: Pointer to the devlink instance.
+ * @id: The parameter ID to set.
+ * @ctx: Context to store the parameter value.
+ *
+ * Return: Zero.
+ */
+static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct ice_port_info *pi;
+ const char *mode_str;
+
+ pi = pf->hw.port_info;
+ mode_str = ice_devlink_local_fwd_mode_to_str(pi->local_fwd_mode);
+ snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", mode_str);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_local_fwd_set - Set local_fwd parameter.
+ * @devlink: Pointer to the devlink instance.
+ * @id: The parameter ID to set.
+ * @ctx: Context to get the parameter value.
+ * @extack: Netlink extended ACK structure.
+ *
+ * Return: Zero.
+ */
+static int ice_devlink_local_fwd_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ int new_local_fwd_mode = ice_devlink_local_fwd_str_to_mode(ctx->val.vstr);
+ struct ice_pf *pf = devlink_priv(devlink);
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_port_info *pi;
+
+ pi = pf->hw.port_info;
+ if (pi->local_fwd_mode != new_local_fwd_mode) {
+ pi->local_fwd_mode = new_local_fwd_mode;
+ dev_info(dev, "Setting local_fwd to %s\n", ctx->val.vstr);
+ ice_schedule_reset(pf, ICE_RESET_CORER);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_local_fwd_validate - Validate passed local_fwd parameter value.
+ * @devlink: Unused pointer to devlink instance.
+ * @id: The parameter ID to validate.
+ * @val: Value to validate.
+ * @extack: Netlink extended ACK structure.
+ *
+ * Supported values are:
+ * "enabled" - local_fwd is enabled, "disabled" - local_fwd is disabled
+ * "prioritized" - local_fwd traffic is prioritized in scheduling.
+ *
+ * Return: Zero when passed parameter value is supported. Negative value on
+ * error.
+ */
+static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ if (ice_devlink_local_fwd_str_to_mode(val.vstr) < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "Error: Requested value is not supported.");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+enum ice_param_id {
+ ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
+ ICE_DEVLINK_PARAM_ID_LOCAL_FWD,
+};
+
+static const struct devlink_param ice_dvl_rdma_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_RUNTIME),
ice_devlink_enable_roce_get,
ice_devlink_enable_roce_set,
@@ -1491,7 +1515,22 @@ static const struct devlink_param ice_devlink_params[] = {
ice_devlink_enable_iw_get,
ice_devlink_enable_iw_set,
ice_devlink_enable_iw_validate),
+};
+static const struct devlink_param ice_dvl_sched_params[] = {
+ DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS,
+ "tx_scheduling_layers",
+ DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_PERMANENT),
+ ice_devlink_tx_sched_layers_get,
+ ice_devlink_tx_sched_layers_set,
+ ice_devlink_tx_sched_layers_validate),
+ DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_LOCAL_FWD,
+ "local_forwarding", DEVLINK_PARAM_TYPE_STRING,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ice_devlink_local_fwd_get,
+ ice_devlink_local_fwd_set,
+ ice_devlink_local_fwd_validate),
};
static void ice_devlink_free(void *devlink_ptr)
@@ -1534,7 +1573,7 @@ void ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
- devlink_register(devlink);
+ devl_register(devlink);
}
/**
@@ -1545,197 +1584,38 @@ void ice_devlink_register(struct ice_pf *pf)
*/
void ice_devlink_unregister(struct ice_pf *pf)
{
- devlink_unregister(priv_to_devlink(pf));
-}
-
-/**
- * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
- * @pf: the PF to create a devlink port for
- * @ppid: struct with switch id information
- */
-static void
-ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
-{
- struct pci_dev *pdev = pf->pdev;
- u64 id;
-
- id = pci_get_dsn(pdev);
-
- ppid->id_len = sizeof(id);
- put_unaligned_be64(id, &ppid->id);
+ devl_unregister(priv_to_devlink(pf));
}
int ice_devlink_register_params(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
-
- return devlink_params_register(devlink, ice_devlink_params,
- ARRAY_SIZE(ice_devlink_params));
-}
-
-void ice_devlink_unregister_params(struct ice_pf *pf)
-{
- devlink_params_unregister(priv_to_devlink(pf), ice_devlink_params,
- ARRAY_SIZE(ice_devlink_params));
-}
-
-/**
- * ice_devlink_set_port_split_options - Set port split options
- * @pf: the PF to set port split options
- * @attrs: devlink attributes
- *
- * Sets devlink port split options based on available FW port options
- */
-static void
-ice_devlink_set_port_split_options(struct ice_pf *pf,
- struct devlink_port_attrs *attrs)
-{
- struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
- u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
- bool active_valid, pending_valid;
+ struct ice_hw *hw = &pf->hw;
int status;
- status = ice_aq_get_port_options(&pf->hw, options, &option_count,
- 0, true, &active_idx, &active_valid,
- &pending_idx, &pending_valid);
- if (status) {
- dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
- status);
- return;
- }
-
- /* find the biggest available port split count */
- for (i = 0; i < option_count; i++)
- attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
-
- attrs->splittable = attrs->lanes ? 1 : 0;
- ice_active_port_option = active_idx;
-}
-
-static const struct devlink_port_ops ice_devlink_port_ops = {
- .port_split = ice_devlink_port_split,
- .port_unsplit = ice_devlink_port_unsplit,
-};
-
-/**
- * ice_devlink_create_pf_port - Create a devlink port for this PF
- * @pf: the PF to create a devlink port for
- *
- * Create and register a devlink_port for this PF.
- * This function has to be called under devl_lock.
- *
- * Return: zero on success or an error code on failure.
- */
-int ice_devlink_create_pf_port(struct ice_pf *pf)
-{
- struct devlink_port_attrs attrs = {};
- struct devlink_port *devlink_port;
- struct devlink *devlink;
- struct ice_vsi *vsi;
- struct device *dev;
- int err;
-
- devlink = priv_to_devlink(pf);
-
- dev = ice_pf_to_dev(pf);
-
- devlink_port = &pf->devlink_port;
-
- vsi = ice_get_main_vsi(pf);
- if (!vsi)
- return -EIO;
-
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pf->hw.bus.func;
-
- /* As FW supports only port split options for whole device,
- * set port split options only for first PF.
- */
- if (pf->hw.pf_id == 0)
- ice_devlink_set_port_split_options(pf, &attrs);
-
- ice_devlink_set_switch_id(pf, &attrs.switch_id);
-
- devlink_port_attrs_set(devlink_port, &attrs);
-
- err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
- &ice_devlink_port_ops);
- if (err) {
- dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
- pf->hw.pf_id, err);
- return err;
- }
+ status = devl_params_register(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
+ if (status)
+ return status;
- return 0;
-}
+ if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
+ status = devl_params_register(devlink, ice_dvl_sched_params,
+ ARRAY_SIZE(ice_dvl_sched_params));
-/**
- * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
- * @pf: the PF to cleanup
- *
- * Unregisters the devlink_port structure associated with this PF.
- * This function has to be called under devl_lock.
- */
-void ice_devlink_destroy_pf_port(struct ice_pf *pf)
-{
- devl_port_unregister(&pf->devlink_port);
+ return status;
}
-/**
- * ice_devlink_create_vf_port - Create a devlink port for this VF
- * @vf: the VF to create a port for
- *
- * Create and register a devlink_port for this VF.
- *
- * Return: zero on success or an error code on failure.
- */
-int ice_devlink_create_vf_port(struct ice_vf *vf)
+void ice_devlink_unregister_params(struct ice_pf *pf)
{
- struct devlink_port_attrs attrs = {};
- struct devlink_port *devlink_port;
- struct devlink *devlink;
- struct ice_vsi *vsi;
- struct device *dev;
- struct ice_pf *pf;
- int err;
-
- pf = vf->pf;
- dev = ice_pf_to_dev(pf);
- devlink_port = &vf->devlink_port;
-
- vsi = ice_get_vf_vsi(vf);
- if (!vsi)
- return -EINVAL;
-
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
- attrs.pci_vf.pf = pf->hw.bus.func;
- attrs.pci_vf.vf = vf->vf_id;
-
- ice_devlink_set_switch_id(pf, &attrs.switch_id);
-
- devlink_port_attrs_set(devlink_port, &attrs);
- devlink = priv_to_devlink(pf);
-
- err = devlink_port_register(devlink, devlink_port, vsi->idx);
- if (err) {
- dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
- vf->vf_id, err);
- return err;
- }
+ struct devlink *devlink = priv_to_devlink(pf);
+ struct ice_hw *hw = &pf->hw;
- return 0;
-}
+ devl_params_unregister(devlink, ice_dvl_rdma_params,
+ ARRAY_SIZE(ice_dvl_rdma_params));
-/**
- * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
- * @vf: the VF to cleanup
- *
- * Unregisters the devlink_port structure associated with this VF.
- */
-void ice_devlink_destroy_vf_port(struct ice_vf *vf)
-{
- devl_rate_leaf_destroy(&vf->devlink_port);
- devlink_port_unregister(&vf->devlink_port);
+ if (hw->func_caps.common_cap.tx_sched_topo_comp_mode_en)
+ devl_params_unregister(devlink, ice_dvl_sched_params,
+ ARRAY_SIZE(ice_dvl_sched_params));
}
#define ICE_DEVLINK_READ_BLK_SIZE (1024 * 1024)
@@ -1976,8 +1856,8 @@ void ice_devlink_init_regions(struct ice_pf *pf)
u64 nvm_size, sram_size;
nvm_size = pf->hw.flash.flash_size;
- pf->nvm_region = devlink_region_create(devlink, &ice_nvm_region_ops, 1,
- nvm_size);
+ pf->nvm_region = devl_region_create(devlink, &ice_nvm_region_ops, 1,
+ nvm_size);
if (IS_ERR(pf->nvm_region)) {
dev_err(dev, "failed to create NVM devlink region, err %ld\n",
PTR_ERR(pf->nvm_region));
@@ -1985,17 +1865,17 @@ void ice_devlink_init_regions(struct ice_pf *pf)
}
sram_size = pf->hw.flash.sr_words * 2u;
- pf->sram_region = devlink_region_create(devlink, &ice_sram_region_ops,
- 1, sram_size);
+ pf->sram_region = devl_region_create(devlink, &ice_sram_region_ops,
+ 1, sram_size);
if (IS_ERR(pf->sram_region)) {
dev_err(dev, "failed to create shadow-ram devlink region, err %ld\n",
PTR_ERR(pf->sram_region));
pf->sram_region = NULL;
}
- pf->devcaps_region = devlink_region_create(devlink,
- &ice_devcaps_region_ops, 10,
- ICE_AQ_MAX_BUF_LEN);
+ pf->devcaps_region = devl_region_create(devlink,
+ &ice_devcaps_region_ops, 10,
+ ICE_AQ_MAX_BUF_LEN);
if (IS_ERR(pf->devcaps_region)) {
dev_err(dev, "failed to create device-caps devlink region, err %ld\n",
PTR_ERR(pf->devcaps_region));
@@ -2012,11 +1892,11 @@ void ice_devlink_init_regions(struct ice_pf *pf)
void ice_devlink_destroy_regions(struct ice_pf *pf)
{
if (pf->nvm_region)
- devlink_region_destroy(pf->nvm_region);
+ devl_region_destroy(pf->nvm_region);
if (pf->sram_region)
- devlink_region_destroy(pf->sram_region);
+ devl_region_destroy(pf->sram_region);
if (pf->devcaps_region)
- devlink_region_destroy(pf->devcaps_region);
+ devl_region_destroy(pf->devcaps_region);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/devlink/devlink.h
index d291c0e2e17b..d291c0e2e17b 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink.h
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
new file mode 100644
index 000000000000..62ef8e2fb5f1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c
@@ -0,0 +1,487 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024, Intel Corporation. */
+
+#include <linux/vmalloc.h>
+
+#include "ice.h"
+#include "devlink.h"
+
+static int ice_active_port_option = -1;
+
+/**
+ * ice_devlink_port_opt_speed_str - convert speed to a string
+ * @speed: speed value
+ */
+static const char *ice_devlink_port_opt_speed_str(u8 speed)
+{
+ switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
+ case ICE_AQC_PORT_OPT_MAX_LANE_100M:
+ return "0.1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_1G:
+ return "1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
+ return "2.5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_5G:
+ return "5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_10G:
+ return "10";
+ case ICE_AQC_PORT_OPT_MAX_LANE_25G:
+ return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_50G:
+ return "50";
+ case ICE_AQC_PORT_OPT_MAX_LANE_100G:
+ return "100";
+ }
+
+ return "-";
+}
+
+#define ICE_PORT_OPT_DESC_LEN 50
+/**
+ * ice_devlink_port_options_print - Print available port split options
+ * @pf: the PF to print split port options
+ *
+ * Prints a table with available port split options and max port speeds
+ */
+static void ice_devlink_port_options_print(struct ice_pf *pf)
+{
+ u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
+ struct ice_aqc_get_port_options_elem *options, *opt;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ char desc[ICE_PORT_OPT_DESC_LEN];
+ const char *str;
+ int status;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
+ sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return;
+
+ for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
+ opt = options + i * ICE_AQC_PORT_OPT_MAX;
+ options_count = ICE_AQC_PORT_OPT_MAX;
+ active_valid = 0;
+
+ status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
+ i, true, &active_idx,
+ &active_valid, &pending_idx,
+ &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
+ i, status);
+ goto err;
+ }
+ }
+
+ dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
+ dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
+ dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+
+ for (i = 0; i < options_count; i++) {
+ cnt = 0;
+
+ if (i == ice_active_port_option)
+ str = "Active";
+ else if ((i == pending_idx) && pending_valid)
+ str = "Pending";
+ else
+ str = "";
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-8s", str);
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-6u", options[i].pmd);
+
+ for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
+ speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
+ str = ice_devlink_port_opt_speed_str(speed);
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%3s ", str);
+ }
+
+ dev_dbg(dev, "%s\n", desc);
+ }
+
+err:
+ kfree(options);
+}
+
+/**
+ * ice_devlink_aq_set_port_option - Send set port option admin queue command
+ * @pf: the PF to print split port options
+ * @option_idx: selected port option
+ * @extack: extended netdev ack structure
+ *
+ * Sends set port option admin queue command with selected port option and
+ * calls NVM write activate.
+ */
+static int
+ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int status;
+
+ status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
+ if (status) {
+ dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
+ return -EIO;
+ }
+
+ status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EIO;
+ }
+
+ status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
+ if (status) {
+ dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
+ ice_release_nvm(&pf->hw);
+ return -EIO;
+ }
+
+ ice_release_nvm(&pf->hw);
+
+ NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
+ return 0;
+}
+
+/**
+ * ice_devlink_port_split - .port_split devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @count: number of ports to split to
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_split operation.
+ *
+ * Unfortunately, the devlink expression of available options is limited
+ * to just a number, so search for an FW port option which supports
+ * the specified number. As there could be multiple FW port options with
+ * the same port split count, allow switching between them. When the same
+ * port split count request is issued again, switch to the next FW port
+ * option with the same port split count.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
+ unsigned int count, struct netlink_ext_ack *extack)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, j, active_idx, pending_idx, new_option;
+ struct ice_pf *pf = devlink_priv(devlink);
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port split options, err = %d\n",
+ status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
+ return -EIO;
+ }
+
+ new_option = ICE_AQC_PORT_OPT_MAX;
+ active_idx = pending_valid ? pending_idx : active_idx;
+ for (i = 1; i <= option_count; i++) {
+ /* In order to allow switching between FW port options with
+ * the same port split count, search for a new option starting
+ * from the active/pending option (with array wrap around).
+ */
+ j = (active_idx + i) % option_count;
+
+ if (count == options[j].pmd) {
+ new_option = j;
+ break;
+ }
+ }
+
+ if (new_option == active_idx) {
+ dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
+ count);
+ NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ if (new_option == ICE_AQC_PORT_OPT_MAX) {
+ dev_dbg(dev, "request to split: count: %u not found\n", count);
+ NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ status = ice_devlink_aq_set_port_option(pf, new_option, extack);
+ if (status)
+ return status;
+
+ ice_devlink_port_options_print(pf);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_unsplit - .port_unsplit devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_unsplit operation.
+ * Calls ice_devlink_port_split with split count set to 1.
+ * There could be no FW option available with split count 1.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return ice_devlink_port_split(devlink, port, 1, extack);
+}
+
+/**
+ * ice_devlink_set_port_split_options - Set port split options
+ * @pf: the PF to set port split options
+ * @attrs: devlink attributes
+ *
+ * Sets devlink port split options based on available FW port options
+ */
+static void
+ice_devlink_set_port_split_options(struct ice_pf *pf,
+ struct devlink_port_attrs *attrs)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
+ status);
+ return;
+ }
+
+ /* find the biggest available port split count */
+ for (i = 0; i < option_count; i++)
+ attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
+
+ attrs->splittable = attrs->lanes ? 1 : 0;
+ ice_active_port_option = active_idx;
+}
+
+static const struct devlink_port_ops ice_devlink_port_ops = {
+ .port_split = ice_devlink_port_split,
+ .port_unsplit = ice_devlink_port_unsplit,
+};
+
+/**
+ * ice_devlink_set_switch_id - Set unique switch id based on pci dsn
+ * @pf: the PF to create a devlink port for
+ * @ppid: struct with switch id information
+ */
+static void
+ice_devlink_set_switch_id(struct ice_pf *pf, struct netdev_phys_item_id *ppid)
+{
+ struct pci_dev *pdev = pf->pdev;
+ u64 id;
+
+ id = pci_get_dsn(pdev);
+
+ ppid->id_len = sizeof(id);
+ put_unaligned_be64(id, &ppid->id);
+}
+
+/**
+ * ice_devlink_create_pf_port - Create a devlink port for this PF
+ * @pf: the PF to create a devlink port for
+ *
+ * Create and register a devlink_port for this PF.
+ * This function has to be called under devl_lock.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_pf_port(struct ice_pf *pf)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ int err;
+
+ devlink = priv_to_devlink(pf);
+
+ dev = ice_pf_to_dev(pf);
+
+ devlink_port = &pf->devlink_port;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EIO;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = pf->hw.pf_id;
+
+ /* As FW supports only port split options for whole device,
+ * set port split options only for first PF.
+ */
+ if (pf->hw.pf_id == 0)
+ ice_devlink_set_port_split_options(pf, &attrs);
+
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+
+ err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_port_ops);
+ if (err) {
+ dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
+ pf->hw.pf_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
+ * @pf: the PF to cleanup
+ *
+ * Unregisters the devlink_port structure associated with this PF.
+ * This function has to be called under devl_lock.
+ */
+void ice_devlink_destroy_pf_port(struct ice_pf *pf)
+{
+ devl_port_unregister(&pf->devlink_port);
+}
+
+/**
+ * ice_devlink_port_get_vf_fn_mac - .port_fn_hw_addr_get devlink handler
+ * @port: devlink port structure
+ * @hw_addr: MAC address of the port
+ * @hw_addr_len: length of MAC address
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_fn_hw_addr_get operation
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_devlink_port_get_vf_fn_mac(struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_vf *vf = container_of(port, struct ice_vf, devlink_port);
+
+ ether_addr_copy(hw_addr, vf->dev_lan_addr);
+ *hw_addr_len = ETH_ALEN;
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_set_vf_fn_mac - .port_fn_hw_addr_set devlink handler
+ * @port: devlink port structure
+ * @hw_addr: MAC address of the port
+ * @hw_addr_len: length of MAC address
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_fn_hw_addr_set operation
+ * Return: zero on success or an error code on failure.
+ */
+static int ice_devlink_port_set_vf_fn_mac(struct devlink_port *port,
+ const u8 *hw_addr,
+ int hw_addr_len,
+ struct netlink_ext_ack *extack)
+
+{
+ struct devlink_port_attrs *attrs = &port->attrs;
+ struct devlink_port_pci_vf_attrs *pci_vf;
+ struct devlink *devlink = port->devlink;
+ struct ice_pf *pf;
+ u16 vf_id;
+
+ pf = devlink_priv(devlink);
+ pci_vf = &attrs->pci_vf;
+ vf_id = pci_vf->vf;
+
+ return __ice_set_vf_mac(pf, vf_id, hw_addr);
+}
+
+static const struct devlink_port_ops ice_devlink_vf_port_ops = {
+ .port_fn_hw_addr_get = ice_devlink_port_get_vf_fn_mac,
+ .port_fn_hw_addr_set = ice_devlink_port_set_vf_fn_mac,
+};
+
+/**
+ * ice_devlink_create_vf_port - Create a devlink port for this VF
+ * @vf: the VF to create a port for
+ *
+ * Create and register a devlink_port for this VF.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_vf_port(struct ice_vf *vf)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_pf *pf;
+ int err;
+
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ devlink_port = &vf->devlink_port;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi)
+ return -EINVAL;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = pf->hw.pf_id;
+ attrs.pci_vf.vf = vf->vf_id;
+
+ ice_devlink_set_switch_id(pf, &attrs.switch_id);
+
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx,
+ &ice_devlink_vf_port_ops);
+ if (err) {
+ dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
+ * @vf: the VF to cleanup
+ *
+ * Unregisters the devlink_port structure associated with this VF.
+ */
+void ice_devlink_destroy_vf_port(struct ice_vf *vf)
+{
+ devl_rate_leaf_destroy(&vf->devlink_port);
+ devl_port_unregister(&vf->devlink_port);
+}
diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.h b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
new file mode 100644
index 000000000000..9223bcdb6444
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2024, Intel Corporation. */
+
+#ifndef _DEVLINK_PORT_H_
+#define _DEVLINK_PORT_H_
+
+int ice_devlink_create_pf_port(struct ice_pf *pf);
+void ice_devlink_destroy_pf_port(struct ice_pf *pf);
+int ice_devlink_create_vf_port(struct ice_vf *vf);
+void ice_devlink_destroy_vf_port(struct ice_vf *vf);
+
+#endif /* _DEVLINK_PORT_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 365c03d1c462..caaa10157909 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -77,6 +77,7 @@
#include "ice_gnss.h"
#include "ice_irq.h"
#include "ice_dpll.h"
+#include "ice_adapter.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -330,7 +331,6 @@ struct ice_vsi {
struct net_device *netdev;
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
- struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_rx_ring **rx_rings; /* Rx ring array */
struct ice_tx_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
@@ -348,12 +348,9 @@ struct ice_vsi {
/* tell if only dynamic irq allocation is allowed */
bool irq_dyn_alloc;
- enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
- struct ice_vf *vf; /* VF associated with this VSI */
-
u16 num_gfltr;
u16 num_bfltr;
@@ -412,7 +409,6 @@ struct ice_vsi {
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
struct ice_tx_ring **xdp_rings; /* XDP ring array */
- unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
@@ -445,12 +441,18 @@ struct ice_vsi {
u8 old_numtc;
u16 old_ena_tc;
- struct ice_channel *ch;
-
/* setup back reference, to which aggregator node this VSI
* corresponds to
*/
struct ice_agg_node *agg_node;
+
+ struct_group_tagged(ice_vsi_cfg_params, params,
+ struct ice_port_info *port_info; /* back pointer to port_info */
+ struct ice_channel *ch; /* VSI's channel structure, may be NULL */
+ struct ice_vf *vf; /* VF associated with this VSI, may be NULL */
+ u32 flags; /* VSI flags used for rebuild and configuration */
+ enum ice_vsi_type type; /* the type of the VSI */
+ );
} ____cacheline_internodealigned_in_smp;
/* struct that defines an interrupt vector */
@@ -458,7 +460,7 @@ struct ice_q_vector {
struct ice_vsi *vsi;
u16 v_idx; /* index in the vsi->q_vector array. */
- u16 reg_idx;
+ u16 reg_idx; /* PF relative register index */
u8 num_ring_rx; /* total number of Rx rings in vector */
u8 num_ring_tx; /* total number of Tx rings in vector */
u8 wb_on_itr:1; /* if true, WB on ITR is enabled */
@@ -480,6 +482,7 @@ struct ice_q_vector {
char name[ICE_INT_NAME_STR_LEN];
u16 total_events; /* net_dim(): number of interrupts processed */
+ u16 vf_reg_idx; /* VF relative register index */
struct msi_map irq;
} ____cacheline_internodealigned_in_smp;
@@ -522,17 +525,10 @@ enum ice_misc_thread_tasks {
};
struct ice_eswitch {
- struct ice_vsi *control_vsi;
struct ice_vsi *uplink_vsi;
struct ice_esw_br_offloads *br_offloads;
struct xarray reprs;
bool is_running;
- /* struct to allow cp queues management optimization */
- struct {
- int to_reach;
- int value;
- bool is_reaching;
- } qs;
};
struct ice_agg_node {
@@ -544,6 +540,7 @@ struct ice_agg_node {
struct ice_pf {
struct pci_dev *pdev;
+ struct ice_adapter *adapter;
struct devlink_region *nvm_region;
struct devlink_region *sram_region;
@@ -749,21 +746,36 @@ static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
}
/**
- * ice_xsk_pool - get XSK buffer pool bound to a ring
+ * ice_get_xp_from_qid - get ZC XSK buffer pool bound to a queue ID
+ * @vsi: pointer to VSI
+ * @qid: index of a queue to look at XSK buff pool presence
+ *
+ * Return: A pointer to xsk_buff_pool structure if there is a buffer pool
+ * attached and configured as zero-copy, NULL otherwise.
+ */
+static inline struct xsk_buff_pool *ice_get_xp_from_qid(struct ice_vsi *vsi,
+ u16 qid)
+{
+ struct xsk_buff_pool *pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+
+ if (!ice_is_xdp_ena_vsi(vsi))
+ return NULL;
+
+ return (pool && pool->dev) ? pool : NULL;
+}
+
+/**
+ * ice_rx_xsk_pool - assign XSK buff pool to Rx ring
* @ring: Rx ring to use
*
- * Returns a pointer to xsk_buff_pool structure if there is a buffer pool
- * present, NULL otherwise.
+ * Sets XSK buff pool pointer on Rx ring.
*/
-static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
+static inline void ice_rx_xsk_pool(struct ice_rx_ring *ring)
{
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
- return NULL;
-
- return xsk_get_pool_from_qid(vsi->netdev, qid);
+ WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
}
/**
@@ -788,12 +800,7 @@ static inline void ice_tx_xsk_pool(struct ice_vsi *vsi, u16 qid)
if (!ring)
return;
- if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps)) {
- ring->xsk_pool = NULL;
- return;
- }
-
- ring->xsk_pool = xsk_get_pool_from_qid(vsi->netdev, qid);
+ WRITE_ONCE(ring->xsk_pool, ice_get_xp_from_qid(vsi, qid));
}
/**
@@ -922,9 +929,17 @@ int ice_down(struct ice_vsi *vsi);
int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg_lan(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+
+enum ice_xdp_cfg {
+ ICE_XDP_CFG_FULL, /* Fully apply new config in .ndo_bpf() */
+ ICE_XDP_CFG_PART, /* Save/use part of config in VSI rebuild */
+};
+
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
-int ice_destroy_xdp_rings(struct ice_vsi *vsi);
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ enum ice_xdp_cfg cfg_type);
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type);
+void ice_map_xdp_rings(struct ice_vsi *vsi);
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c
new file mode 100644
index 000000000000..ad84d8ad49a6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: Copyright Red Hat
+
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/xarray.h>
+#include "ice_adapter.h"
+
+static DEFINE_XARRAY(ice_adapters);
+static DEFINE_MUTEX(ice_adapters_mutex);
+
+/* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */
+#define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13)
+#define INDEX_FIELD_BUS GENMASK(12, 5)
+#define INDEX_FIELD_SLOT GENMASK(4, 0)
+
+static unsigned long ice_adapter_index(const struct pci_dev *pdev)
+{
+ unsigned int domain = pci_domain_nr(pdev->bus);
+
+ WARN_ON(domain > FIELD_MAX(INDEX_FIELD_DOMAIN));
+
+ return FIELD_PREP(INDEX_FIELD_DOMAIN, domain) |
+ FIELD_PREP(INDEX_FIELD_BUS, pdev->bus->number) |
+ FIELD_PREP(INDEX_FIELD_SLOT, PCI_SLOT(pdev->devfn));
+}
+
+static struct ice_adapter *ice_adapter_new(void)
+{
+ struct ice_adapter *adapter;
+
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter)
+ return NULL;
+
+ spin_lock_init(&adapter->ptp_gltsyn_time_lock);
+ refcount_set(&adapter->refcount, 1);
+
+ return adapter;
+}
+
+static void ice_adapter_free(struct ice_adapter *adapter)
+{
+ kfree(adapter);
+}
+
+/**
+ * ice_adapter_get - Get a shared ice_adapter structure.
+ * @pdev: Pointer to the pci_dev whose driver is getting the ice_adapter.
+ *
+ * Gets a pointer to a shared ice_adapter structure. Physical functions (PFs)
+ * of the same multi-function PCI device share one ice_adapter structure.
+ * The ice_adapter is reference-counted. The PF driver must use ice_adapter_put
+ * to release its reference.
+ *
+ * Context: Process, may sleep.
+ * Return: Pointer to ice_adapter on success.
+ * ERR_PTR() on error. -ENOMEM is the only possible error.
+ */
+struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev)
+{
+ unsigned long index = ice_adapter_index(pdev);
+ struct ice_adapter *adapter;
+ int err;
+
+ scoped_guard(mutex, &ice_adapters_mutex) {
+ err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL);
+ if (err == -EBUSY) {
+ adapter = xa_load(&ice_adapters, index);
+ refcount_inc(&adapter->refcount);
+ return adapter;
+ }
+ if (err)
+ return ERR_PTR(err);
+
+ adapter = ice_adapter_new();
+ if (!adapter)
+ return ERR_PTR(-ENOMEM);
+ xa_store(&ice_adapters, index, adapter, GFP_KERNEL);
+ }
+ return adapter;
+}
+
+/**
+ * ice_adapter_put - Release a reference to the shared ice_adapter structure.
+ * @pdev: Pointer to the pci_dev whose driver is releasing the ice_adapter.
+ *
+ * Releases the reference to ice_adapter previously obtained with
+ * ice_adapter_get.
+ *
+ * Context: Process, may sleep.
+ */
+void ice_adapter_put(const struct pci_dev *pdev)
+{
+ unsigned long index = ice_adapter_index(pdev);
+ struct ice_adapter *adapter;
+
+ scoped_guard(mutex, &ice_adapters_mutex) {
+ adapter = xa_load(&ice_adapters, index);
+ if (WARN_ON(!adapter))
+ return;
+ if (!refcount_dec_and_test(&adapter->refcount))
+ return;
+
+ WARN_ON(xa_erase(&ice_adapters, index) != adapter);
+ }
+ ice_adapter_free(adapter);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.h b/drivers/net/ethernet/intel/ice/ice_adapter.h
new file mode 100644
index 000000000000..9d11014ec02f
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_adapter.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-FileCopyrightText: Copyright Red Hat */
+
+#ifndef _ICE_ADAPTER_H_
+#define _ICE_ADAPTER_H_
+
+#include <linux/spinlock_types.h>
+#include <linux/refcount_types.h>
+
+struct pci_dev;
+
+/**
+ * struct ice_adapter - PCI adapter resources shared across PFs
+ * @ptp_gltsyn_time_lock: Spinlock protecting access to the GLTSYN_TIME
+ * register of the PTP clock.
+ * @refcount: Reference count. struct ice_pf objects hold the references.
+ */
+struct ice_adapter {
+ /* For access to the GLTSYN_TIME register */
+ spinlock_t ptp_gltsyn_time_lock;
+
+ refcount_t refcount;
+};
+
+struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev);
+void ice_adapter_put(const struct pci_dev *pdev);
+
+#endif /* _ICE_ADAPTER_H */
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 1f3e7a6903e5..66f02988d549 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -121,6 +121,8 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
+#define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085
+#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087
#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
#define ICE_AQC_BIT_ROCEV2_LAG 0x01
#define ICE_AQC_BIT_SRIOV_LAG 0x02
@@ -230,6 +232,13 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15)
};
+/* Loopback port parameter mode values. */
+enum ice_local_fwd_mode {
+ ICE_LOCAL_FWD_MODE_ENABLED = 0,
+ ICE_LOCAL_FWD_MODE_DISABLED = 1,
+ ICE_LOCAL_FWD_MODE_PRIORITIZED = 2,
+};
+
/* Set Port parameters, (direct, 0x0203) */
struct ice_aqc_set_port_params {
__le16 cmd_flags;
@@ -238,7 +247,9 @@ struct ice_aqc_set_port_params {
__le16 swid;
#define ICE_AQC_PORT_SWID_VALID BIT(15)
#define ICE_AQC_PORT_SWID_M 0xFF
- u8 reserved[10];
+ u8 local_fwd_mode;
+#define ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID BIT(2)
+ u8 reserved[9];
};
/* These resource type defines are used for all switch resource
@@ -264,6 +275,8 @@ struct ice_aqc_set_port_params {
#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7)
#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
+#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED BIT(14)
+#define ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL BIT(15)
#define ICE_AQC_RES_TYPE_FLAG_DEDICATED 0x00
@@ -808,6 +821,23 @@ struct ice_aqc_get_topo {
__le32 addr_low;
};
+/* Get/Set Tx Topology (indirect 0x0418/0x0417) */
+struct ice_aqc_get_set_tx_topo {
+ u8 set_flags;
+#define ICE_AQC_TX_TOPO_FLAGS_CORRER BIT(0)
+#define ICE_AQC_TX_TOPO_FLAGS_SRC_RAM BIT(1)
+#define ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW BIT(4)
+#define ICE_AQC_TX_TOPO_FLAGS_ISSUED BIT(5)
+
+ u8 get_flags;
+#define ICE_AQC_TX_TOPO_GET_RAM 2
+
+ __le16 reserved1;
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Update TSE (indirect 0x0403)
* Get TSE (indirect 0x0404)
* Add TSE (indirect 0x0401)
@@ -1440,6 +1470,55 @@ struct ice_aqc_get_sensor_reading_resp {
} data;
};
+/* DNL call command (indirect 0x0682)
+ * Struct is used for both command and response
+ */
+struct ice_aqc_dnl_call_command {
+ u8 ctx; /* Used in command, reserved in response */
+ u8 reserved;
+ __le16 activity_id;
+#define ICE_AQC_ACT_ID_DNL 0x1129
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_dnl_equa_param {
+ __le16 data_in;
+#define ICE_AQC_RX_EQU_SHIFT 8
+#define ICE_AQC_RX_EQU_PRE2 (0x10 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_PRE1 (0x11 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT)
+#define ICE_AQC_TX_EQU_PRE1 0x0
+#define ICE_AQC_TX_EQU_PRE3 0x3
+#define ICE_AQC_TX_EQU_ATTEN 0x4
+#define ICE_AQC_TX_EQU_POST1 0x8
+#define ICE_AQC_TX_EQU_PRE2 0xC
+ __le16 op_code_serdes_sel;
+#define ICE_AQC_OP_CODE_SHIFT 4
+#define ICE_AQC_OP_CODE_RX_EQU (0x9 << ICE_AQC_OP_CODE_SHIFT)
+#define ICE_AQC_OP_CODE_TX_EQU (0x10 << ICE_AQC_OP_CODE_SHIFT)
+ __le32 reserved[3];
+};
+
+struct ice_aqc_dnl_equa_respon {
+ /* Equalization value can be negative */
+ int val;
+ __le32 reserved[3];
+};
+
+/* DNL call command/response buffer (indirect 0x0682) */
+struct ice_aqc_dnl_call {
+ union {
+ struct ice_aqc_dnl_equa_param txrx_equa_reqs;
+ __le32 stores[4];
+ struct ice_aqc_dnl_equa_respon txrx_equa_resp;
+ } sto;
+};
+
struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
@@ -1664,6 +1743,15 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_START_POINT 0
+#define ICE_AQC_NVM_TX_TOPO_MOD_ID 0x14B
+
+struct ice_aqc_nvm_tx_topo_user_sel {
+ __le16 length;
+ u8 data;
+#define ICE_AQC_NVM_TX_TOPO_USER_SEL BIT(4)
+ u8 reserved;
+};
+
/* NVM Checksum Command (direct, 0x0706) */
struct ice_aqc_nvm_checksum {
u8 flags;
@@ -2534,8 +2622,10 @@ struct ice_aq_desc {
struct ice_aqc_get_link_status get_link_status;
struct ice_aqc_event_lan_overflow lan_overflow;
struct ice_aqc_get_link_topo get_link_topo;
+ struct ice_aqc_dnl_call_command dnl_call;
struct ice_aqc_i2c read_write_i2c;
struct ice_aqc_read_i2c_resp read_i2c_resp;
+ struct ice_aqc_get_set_tx_topo get_set_tx_topo;
} params;
};
@@ -2642,6 +2732,10 @@ enum ice_adminq_opc {
ice_aqc_opc_query_sched_res = 0x0412,
ice_aqc_opc_remove_rl_profiles = 0x0415,
+ /* tx topology commands */
+ ice_aqc_opc_set_tx_topo = 0x0417,
+ ice_aqc_opc_get_tx_topo = 0x0418,
+
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
ice_aqc_opc_set_phy_cfg = 0x0601,
@@ -2653,6 +2747,7 @@ enum ice_adminq_opc {
ice_aqc_opc_set_phy_rec_clk_out = 0x0630,
ice_aqc_opc_get_phy_rec_clk_out = 0x0631,
ice_aqc_opc_get_sensor_reading = 0x0632,
+ ice_aqc_opc_dnl_call = 0x0682,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index a545a7917e4f..f448d3a84564 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -121,7 +121,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->irq.index = -ENOENT;
if (vsi->type == ICE_VSI_VF) {
- q_vector->reg_idx = ice_calc_vf_reg_idx(vsi->vf, q_vector);
+ ice_calc_vf_reg_idx(vsi->vf, q_vector);
goto out;
} else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);
@@ -145,6 +145,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
skip_alloc:
q_vector->reg_idx = q_vector->irq.index;
+ q_vector->vf_reg_idx = q_vector->irq.index;
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
@@ -264,30 +265,6 @@ static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8
}
/**
- * ice_eswitch_calc_txq_handle
- * @ring: pointer to ring which unique index is needed
- *
- * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
- * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
- * here by finding index in vsi->tx_rings of this ring.
- *
- * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
- * because VSI is get from ring->vsi, so it has to be present in this VSI.
- */
-static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
-{
- const struct ice_vsi *vsi = ring->vsi;
- int i;
-
- ice_for_each_txq(vsi, i) {
- if (vsi->tx_rings[i] == ring)
- return i;
- }
-
- return ICE_INVAL_Q_INDEX;
-}
-
-/**
* ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
* @ring: The Tx ring to configure
*
@@ -353,9 +330,6 @@ ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
- case ICE_VSI_SWITCHDEV_CTRL:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
- break;
default:
return;
}
@@ -479,6 +453,14 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
+ /* PF acts as uplink for switchdev; set flex descriptor with src_vsi
+ * metadata and flags to allow redirecting to PR netdev
+ */
+ if (ice_is_eswitch_mode_switchdev(vsi->back)) {
+ ring->flags |= ICE_RX_FLAGS_MULTIDEV;
+ rxdid = ICE_RXDID_FLEX_NIC_2;
+ }
+
/* Enable Flexible Descriptors in the queue context which
* allows this driver to select a specific receive descriptor format
* increasing context priority to pick up profile ID; default is 0x01;
@@ -531,6 +513,25 @@ static void ice_xsk_pool_fill_cb(struct ice_rx_ring *ring)
}
/**
+ * ice_get_frame_sz - calculate xdp_buff::frame_sz
+ * @rx_ring: the ring being configured
+ *
+ * Return frame size based on underlying PAGE_SIZE
+ */
+static unsigned int ice_get_frame_sz(struct ice_rx_ring *rx_ring)
+{
+ unsigned int frame_sz;
+
+#if (PAGE_SIZE >= 8192)
+ frame_sz = rx_ring->rx_buf_len;
+#else
+ frame_sz = ice_rx_pg_size(rx_ring) / 2;
+#endif
+
+ return frame_sz;
+}
+
+/**
* ice_vsi_cfg_rxq - Configure an Rx queue
* @ring: the ring being configured
*
@@ -554,7 +555,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return err;
}
- ring->xsk_pool = ice_xsk_pool(ring);
+ ice_rx_xsk_pool(ring);
if (ring->xsk_pool) {
xdp_rxq_info_unreg(&ring->xdp_rxq);
@@ -594,7 +595,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
}
}
- xdp_init_buff(&ring->xdp, ice_rx_pg_size(ring) / 2, &ring->xdp_rxq);
+ xdp_init_buff(&ring->xdp, ice_get_frame_sz(ring), &ring->xdp_rxq);
ring->xdp.data = NULL;
ring->xdp_ext.pkt_ctx = &ring->pkt_ctx;
err = ice_setup_rx_ctx(ring);
@@ -615,7 +616,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
return 0;
}
- ok = ice_alloc_rx_bufs_zc(ring, num_bufs);
+ ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs);
if (!ok) {
u16 pf_q = ring->vsi->rxq_map[ring->q_index];
@@ -860,6 +861,9 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
}
rx_rings_rem -= rx_rings_per_v;
}
+
+ if (ice_is_xdp_ena_vsi(vsi))
+ ice_map_xdp_rings(vsi);
}
/**
@@ -919,14 +923,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
/* Add unique software queue handle of the Tx queue per
* TC into the VSI Tx ring
*/
- if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
- ring->q_handle = ice_eswitch_calc_txq_handle(ring);
-
- if (ring->q_handle == ICE_INVAL_Q_INDEX)
- return -ENODEV;
- } else {
- ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
- }
+ ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
if (ch)
status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
index 57abd52386d0..10d9d74f3545 100644
--- a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
+++ b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h
@@ -23,7 +23,18 @@ union nac_cgu_dword9 {
u32 clk_synce0_amp : 2;
u32 one_pps_out_amp : 2;
u32 misc24 : 12;
- } field;
+ };
+ u32 val;
+};
+
+#define NAC_CGU_DWORD16_E825C 0x40
+union nac_cgu_dword16_e825c {
+ struct {
+ u32 synce_remndr : 6;
+ u32 synce_phlmt_en : 1;
+ u32 misc13 : 17;
+ u32 tspll_ck_refclkfreq : 8;
+ };
u32 val;
};
@@ -39,7 +50,7 @@ union nac_cgu_dword19 {
u32 japll_ndivratio : 4;
u32 japll_iref_ndivratio : 3;
u32 misc27 : 1;
- } field;
+ };
u32 val;
};
@@ -63,7 +74,23 @@ union nac_cgu_dword22 {
u32 fdpllclk_sel_div2 : 1;
u32 time1588clk_sel_div2 : 1;
u32 misc3 : 1;
- } field;
+ };
+ u32 val;
+};
+
+#define NAC_CGU_DWORD23_E825C 0x5C
+union nac_cgu_dword23_e825c {
+ struct {
+ u32 cgupll_fbdiv_intgr : 10;
+ u32 ux56pll_fbdiv_intgr : 10;
+ u32 misc20 : 4;
+ u32 ts_pll_enable : 1;
+ u32 time_sync_tspll_align_sel : 1;
+ u32 ext_synce_sel : 1;
+ u32 ref1588_ck_div : 4;
+ u32 time_ref_sel : 1;
+
+ };
u32 val;
};
@@ -77,7 +104,7 @@ union nac_cgu_dword24 {
u32 ext_synce_sel : 1;
u32 ref1588_ck_div : 4;
u32 time_ref_sel : 1;
- } field;
+ };
u32 val;
};
@@ -92,7 +119,7 @@ union tspll_cntr_bist_settings {
u32 i_plllock_cnt_6_0 : 7;
u32 i_plllock_cnt_10_7 : 4;
u32 reserved200 : 4;
- } field;
+ };
u32 val;
};
@@ -109,7 +136,45 @@ union tspll_ro_bwm_lf {
u32 afcdone_cri : 1;
u32 feedfwrdgain_cal_cri_7_0 : 8;
u32 m2fbdivmod_cri_7_0 : 8;
- } field;
+ };
+ u32 val;
+};
+
+#define TSPLL_RO_LOCK_E825C 0x3f0
+union tspll_ro_lock_e825c {
+ struct {
+ u32 bw_freqov_high_cri_7_0 : 8;
+ u32 bw_freqov_high_cri_9_8 : 2;
+ u32 reserved455 : 1;
+ u32 plllock_gain_tran_cri : 1;
+ u32 plllock_true_lock_cri : 1;
+ u32 pllunlock_flag_cri : 1;
+ u32 afcerr_cri : 1;
+ u32 afcdone_cri : 1;
+ u32 feedfwrdgain_cal_cri_7_0 : 8;
+ u32 reserved462 : 8;
+ };
+ u32 val;
+};
+
+#define TSPLL_BW_TDC_E825C 0x31c
+union tspll_bw_tdc_e825c {
+ struct {
+ u32 i_tdc_offset_lock_1_0 : 2;
+ u32 i_bbthresh1_2_0 : 3;
+ u32 i_bbthresh2_2_0 : 3;
+ u32 i_tdcsel_1_0 : 2;
+ u32 i_tdcovccorr_en_h : 1;
+ u32 i_divretimeren : 1;
+ u32 i_bw_ampmeas_window : 1;
+ u32 i_bw_lowerbound_2_0 : 3;
+ u32 i_bw_upperbound_2_0 : 3;
+ u32 i_bw_mode_1_0 : 2;
+ u32 i_ft_mode_sel_2_0 : 3;
+ u32 i_bwphase_4_0 : 5;
+ u32 i_plllock_sel_1_0 : 2;
+ u32 i_afc_divratio : 1;
+ };
u32 val;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index d9f6cc71d900..009716a12a26 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -160,10 +160,16 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E825C_SGMII:
hw->mac_type = ICE_MAC_GENERIC_3K_E825;
break;
- case ICE_DEV_ID_E830_BACKPLANE:
- case ICE_DEV_ID_E830_QSFP56:
- case ICE_DEV_ID_E830_SFP:
- case ICE_DEV_ID_E830_SFP_DD:
+ case ICE_DEV_ID_E830CC_BACKPLANE:
+ case ICE_DEV_ID_E830CC_QSFP56:
+ case ICE_DEV_ID_E830CC_SFP:
+ case ICE_DEV_ID_E830CC_SFP_DD:
+ case ICE_DEV_ID_E830C_BACKPLANE:
+ case ICE_DEV_ID_E830_XXV_BACKPLANE:
+ case ICE_DEV_ID_E830C_QSFP:
+ case ICE_DEV_ID_E830_XXV_QSFP:
+ case ICE_DEV_ID_E830C_SFP:
+ case ICE_DEV_ID_E830_XXV_SFP:
hw->mac_type = ICE_MAC_E830;
break;
default:
@@ -234,6 +240,30 @@ bool ice_is_e810t(struct ice_hw *hw)
}
/**
+ * ice_is_e822 - Check if a device is E822 family device
+ * @hw: pointer to the hardware structure
+ *
+ * Return: true if the device is E822 based, false if not.
+ */
+bool ice_is_e822(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E822C_BACKPLANE:
+ case ICE_DEV_ID_E822C_QSFP:
+ case ICE_DEV_ID_E822C_SFP:
+ case ICE_DEV_ID_E822C_10G_BASE_T:
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822L_BACKPLANE:
+ case ICE_DEV_ID_E822L_SFP:
+ case ICE_DEV_ID_E822L_10G_BASE_T:
+ case ICE_DEV_ID_E822L_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
* ice_is_e823
* @hw: pointer to the hardware structure
*
@@ -904,6 +934,9 @@ static int ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
sw->prof_res_bm_init = 0;
+ /* Initialize recipe count with default recipes read from NVM */
+ sw->recp_cnt = ICE_SW_LKUP_LAST;
+
status = ice_init_def_sw_recp(hw);
if (status) {
devm_kfree(ice_hw_to_dev(hw), hw->switch_info);
@@ -931,14 +964,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
}
recps = sw->recp_list;
for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
- struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
-
recps[i].root_rid = i;
- list_for_each_entry_safe(rg_entry, tmprg_entry,
- &recps[i].rg_list, l_entry) {
- list_del(&rg_entry->l_entry);
- devm_kfree(ice_hw_to_dev(hw), rg_entry);
- }
if (recps[i].adv_rule) {
struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
@@ -963,7 +989,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
devm_kfree(ice_hw_to_dev(hw), lst_itr);
}
}
- devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
}
ice_rm_all_sw_replay_rule_info(hw);
devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
@@ -1056,6 +1081,7 @@ int ice_init_hw(struct ice_hw *hw)
goto err_unroll_cqinit;
}
+ hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED;
/* set the back pointer to HW */
hw->port_info->hw = hw;
@@ -1142,6 +1168,8 @@ int ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
mutex_init(&hw->tnl_lock);
+ ice_init_chk_recipe_reuse_support(hw);
+
return 0;
err_unroll_fltr_mgmt_struct:
@@ -1465,8 +1493,9 @@ ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc,
* ice_sbq_rw_reg - Fill Sideband Queue command
* @hw: pointer to the HW struct
* @in: message info to be filled in descriptor
+ * @flags: control queue descriptor flags
*/
-int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags)
{
struct ice_sbq_cmd_desc desc = {0};
struct ice_sbq_msg_req msg = {0};
@@ -1490,7 +1519,7 @@ int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in)
*/
msg_len -= sizeof(msg.data);
- desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD);
+ desc.flags = cpu_to_le16(flags);
desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req);
desc.param0.cmd_len = cpu_to_le16(msg_len);
status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL);
@@ -1615,6 +1644,8 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
case ice_aqc_opc_set_port_params:
case ice_aqc_opc_get_vlan_mode_parameters:
case ice_aqc_opc_set_vlan_mode_parameters:
+ case ice_aqc_opc_set_tx_topo:
+ case ice_aqc_opc_get_tx_topo:
case ice_aqc_opc_add_recipe:
case ice_aqc_opc_recipe_to_profile:
case ice_aqc_opc_get_recipe:
@@ -2171,6 +2202,9 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
prefix, caps->sriov_lag);
break;
+ case ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE:
+ caps->tx_sched_topo_comp_mode_en = (number == 1);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -2277,8 +2311,13 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0);
info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0);
- info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
- info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+ if (!ice_is_e825c(hw)) {
+ info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number);
+ info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0);
+ } else {
+ info->clk_freq = ICE_TIME_REF_FREQ_156_250;
+ info->clk_src = ICE_CLK_SRC_TCXO;
+ }
if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) {
info->time_ref = (enum ice_time_ref_freq)info->clk_freq;
@@ -2552,6 +2591,34 @@ ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities.
+ */
+static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw,
+ struct ice_hw_dev_caps *dev_p,
+ struct ice_aqc_list_caps_elem *cap)
+{
+ dev_p->nac_topo.mode = le32_to_cpu(cap->number);
+ dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M;
+
+ dev_info(ice_hw_to_dev(hw),
+ "PF is configured in %s mode with IP instance ID %d\n",
+ (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ?
+ "primary" : "secondary", dev_p->nac_topo.id);
+
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n",
+ !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M));
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n",
+ !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M));
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n",
+ dev_p->nac_topo.id);
+}
+
+/**
* ice_parse_dev_caps - Parse device capabilities
* @hw: pointer to the HW struct
* @dev_p: pointer to device capabilities structure
@@ -2602,6 +2669,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
case ICE_AQC_CAPS_SENSOR_READING:
ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]);
break;
+ case ICE_AQC_CAPS_NAC_TOPOLOGY:
+ ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
default:
/* Don't list common capabilities as unknown */
if (!found)
@@ -2997,6 +3067,9 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
cmd->cmd_flags = cpu_to_le16(cmd_flags);
+ cmd->local_fwd_mode = pi->local_fwd_mode |
+ ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID;
+
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
@@ -3030,11 +3103,13 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw)
* Note: In the structure of [phy_type_low, phy_type_high], there should
* be one bit set, as this function will convert one PHY type to its
* speed.
- * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
- * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
+ *
+ * Return:
+ * * PHY speed for recognized PHY type
+ * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
+ * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned
*/
-static u16
-ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
+u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
{
u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
@@ -3135,6 +3210,16 @@ ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
case ICE_PHY_TYPE_HIGH_100G_AUI2:
speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
break;
+ case ICE_PHY_TYPE_HIGH_200G_CR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_SR4:
+ case ICE_PHY_TYPE_HIGH_200G_FR4:
+ case ICE_PHY_TYPE_HIGH_200G_LR4:
+ case ICE_PHY_TYPE_HIGH_200G_DR4:
+ case ICE_PHY_TYPE_HIGH_200G_KR4_PAM4:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC:
+ case ICE_PHY_TYPE_HIGH_200G_AUI4:
+ speed_phy_type_high = ICE_AQ_LINK_SPEED_200GB;
+ break;
default:
speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
break;
@@ -3286,6 +3371,100 @@ int ice_update_link_info(struct ice_port_info *pi)
}
/**
+ * ice_aq_get_phy_equalization - function to read serdes equaliser
+ * value from firmware using admin queue command.
+ * @hw: pointer to the HW struct
+ * @data_in: represents the serdes equalization parameter requested
+ * @op_code: represents the serdes number and flag to represent tx or rx
+ * @serdes_num: represents the serdes number
+ * @output: pointer to the caller-supplied buffer to return serdes equaliser
+ *
+ * Return: non-zero status on error and 0 on success.
+ */
+int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
+ u8 serdes_num, int *output)
+{
+ struct ice_aqc_dnl_call_command *cmd;
+ struct ice_aqc_dnl_call buf = {};
+ struct ice_aq_desc desc;
+ int err;
+
+ buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in);
+ buf.sto.txrx_equa_reqs.op_code_serdes_sel =
+ cpu_to_le16(op_code | (serdes_num & 0xF));
+ cmd = &desc.params.dnl_call;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF |
+ ICE_AQ_FLAG_RD |
+ ICE_AQ_FLAG_SI);
+ desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call));
+ cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL);
+
+ err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call),
+ NULL);
+ *output = err ? 0 : buf.sto.txrx_equa_resp.val;
+
+ return err;
+}
+
+#define FEC_REG_PORT(port) { \
+ FEC_CORR_LOW_REG_PORT##port, \
+ FEC_CORR_HIGH_REG_PORT##port, \
+ FEC_UNCORR_LOW_REG_PORT##port, \
+ FEC_UNCORR_HIGH_REG_PORT##port, \
+}
+
+static const u32 fec_reg[][ICE_FEC_MAX] = {
+ FEC_REG_PORT(0),
+ FEC_REG_PORT(1),
+ FEC_REG_PORT(2),
+ FEC_REG_PORT(3)
+};
+
+/**
+ * ice_aq_get_fec_stats - reads fec stats from phy
+ * @hw: pointer to the HW struct
+ * @pcs_quad: represents pcsquad of user input serdes
+ * @pcs_port: represents the pcs port number part of above pcs quad
+ * @fec_type: represents FEC stats type
+ * @output: pointer to the caller-supplied buffer to return requested fec stats
+ *
+ * Return: non-zero status on error and 0 on success.
+ */
+int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ enum ice_fec_stats_types fec_type, u32 *output)
+{
+ u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI);
+ struct ice_sbq_msg_input msg = {};
+ u32 receiver_id, reg_offset;
+ int err;
+
+ if (pcs_port > 3)
+ return -EINVAL;
+
+ reg_offset = fec_reg[pcs_port][fec_type];
+
+ if (pcs_quad == 0)
+ receiver_id = FEC_RECEIVER_ID_PCS0;
+ else if (pcs_quad == 1)
+ receiver_id = FEC_RECEIVER_ID_PCS1;
+ else
+ return -EINVAL;
+
+ msg.msg_addr_low = lower_16_bits(reg_offset);
+ msg.msg_addr_high = receiver_id;
+ msg.opcode = ice_sbq_msg_rd;
+ msg.dest_dev = rmn_0;
+
+ err = ice_sbq_rw_reg(hw, &msg, flag);
+ if (err)
+ return err;
+
+ *output = msg.data;
+ return 0;
+}
+
+/**
* ice_cache_phy_user_req
* @pi: port information structure
* @cache_data: PHY logging data
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index ffb22c7ce28b..66f29bac783a 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -17,13 +17,34 @@
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
+#define FEC_REG_SHIFT 2
+#define FEC_RECV_ID_SHIFT 4
+#define FEC_CORR_LOW_REG_PORT0 (0x02 << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT0 (0x03 << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT0 (0x04 << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT0 (0x05 << FEC_REG_SHIFT)
+#define FEC_CORR_LOW_REG_PORT1 (0x42 << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT1 (0x43 << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT1 (0x44 << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT1 (0x45 << FEC_REG_SHIFT)
+#define FEC_CORR_LOW_REG_PORT2 (0x4A << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT2 (0x4B << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT2 (0x4C << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT2 (0x4D << FEC_REG_SHIFT)
+#define FEC_CORR_LOW_REG_PORT3 (0x52 << FEC_REG_SHIFT)
+#define FEC_CORR_HIGH_REG_PORT3 (0x53 << FEC_REG_SHIFT)
+#define FEC_UNCORR_LOW_REG_PORT3 (0x54 << FEC_REG_SHIFT)
+#define FEC_UNCORR_HIGH_REG_PORT3 (0x55 << FEC_REG_SHIFT)
+#define FEC_RECEIVER_ID_PCS0 (0x33 << FEC_RECV_ID_SHIFT)
+#define FEC_RECEIVER_ID_PCS1 (0x34 << FEC_RECV_ID_SHIFT)
+
int ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
int ice_check_reset(struct ice_hw *hw);
int ice_reset(struct ice_hw *hw, enum ice_reset_req req);
int ice_create_all_ctrlq(struct ice_hw *hw);
int ice_init_all_ctrlq(struct ice_hw *hw);
-void ice_shutdown_all_ctrlq(struct ice_hw *hw);
+void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading);
void ice_destroy_all_ctrlq(struct ice_hw *hw);
int
ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
@@ -121,6 +142,11 @@ int
ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
struct ice_port_info *pi);
bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps);
+int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code,
+ u8 serdes_num, int *output);
+int
+ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ enum ice_fec_stats_types fec_type, u32 *output);
enum ice_fc_mode ice_caps_to_fc_mode(u8 caps);
enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options);
@@ -201,7 +227,7 @@ int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
-int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
+int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag);
int
ice_aq_get_cgu_abilities(struct ice_hw *hw,
struct ice_aqc_get_cgu_abilities *abilities);
@@ -249,6 +275,7 @@ void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
bool ice_is_e810t(struct ice_hw *hw);
+bool ice_is_e822(struct ice_hw *hw);
bool ice_is_e823(struct ice_hw *hw);
bool ice_is_e825c(struct ice_hw *hw);
int
@@ -261,6 +288,7 @@ int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
bool ice_is_100m_speed_supported(struct ice_hw *hw);
+u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high);
int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c
index ffe660f34992..ffaa6511c455 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.c
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.c
@@ -510,16 +510,19 @@ shutdown_sq_out:
*/
static bool ice_aq_ver_check(struct ice_hw *hw)
{
- if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
+ u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw);
+ u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw);
+
+ if (hw->api_maj_ver > exp_fw_api_ver_major) {
/* Major API version is newer than expected, don't load */
dev_warn(ice_hw_to_dev(hw),
"The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
return false;
- } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
- if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
+ } else if (hw->api_maj_ver == exp_fw_api_ver_major) {
+ if (hw->api_min_ver > (exp_fw_api_ver_minor + 2))
dev_info(ice_hw_to_dev(hw),
"The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
- else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
+ else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor)
dev_info(ice_hw_to_dev(hw),
"The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
} else {
@@ -684,10 +687,12 @@ struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw)
* ice_shutdown_ctrlq - shutdown routine for any control queue
* @hw: pointer to the hardware structure
* @q_type: specific Control queue type
+ * @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks.
*/
-static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
+static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type,
+ bool unloading)
{
struct ice_ctl_q_info *cq;
@@ -695,7 +700,7 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
case ICE_CTL_Q_ADMIN:
cq = &hw->adminq;
if (ice_check_sq_alive(hw, cq))
- ice_aq_q_shutdown(hw, true);
+ ice_aq_q_shutdown(hw, unloading);
break;
case ICE_CTL_Q_SB:
cq = &hw->sbq;
@@ -714,20 +719,21 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
/**
* ice_shutdown_all_ctrlq - shutdown routine for all control queues
* @hw: pointer to the hardware structure
+ * @unloading: is the driver unloading itself
*
* NOTE: this function does not destroy the control queue locks. The driver
* may call this at runtime to shutdown and later restart control queues, such
* as in response to a reset event.
*/
-void ice_shutdown_all_ctrlq(struct ice_hw *hw)
+void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading)
{
/* Shutdown FW admin queue */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading);
/* Shutdown PHY Sideband */
if (ice_is_sbq_supported(hw))
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading);
/* Shutdown PF-VF Mailbox */
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading);
}
/**
@@ -759,7 +765,7 @@ int ice_init_all_ctrlq(struct ice_hw *hw)
break;
ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n");
- ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
+ ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true);
msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
} while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
@@ -840,7 +846,7 @@ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
void ice_destroy_all_ctrlq(struct ice_hw *hw)
{
/* shut down all the control queues first */
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, true);
ice_destroy_ctrlq_locks(&hw->adminq);
if (ice_is_sbq_supported(hw))
diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h
index 8f2fd1613a95..1d54b1cdb1c5 100644
--- a/drivers/net/ethernet/intel/ice/ice_controlq.h
+++ b/drivers/net/ethernet/intel/ice/ice_controlq.h
@@ -21,9 +21,18 @@
/* Defines that help manage the driver vs FW API checks.
* Take a look at ice_aq_ver_check in ice_controlq.c for actual usage.
*/
-#define EXP_FW_API_VER_BRANCH 0x00
-#define EXP_FW_API_VER_MAJOR 0x01
-#define EXP_FW_API_VER_MINOR 0x05
+#define EXP_FW_API_VER_MAJOR_E810 0x01
+#define EXP_FW_API_VER_MINOR_E810 0x05
+
+#define EXP_FW_API_VER_MAJOR_E830 0x01
+#define EXP_FW_API_VER_MINOR_E830 0x07
+
+#define EXP_FW_API_VER_MAJOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_MAJOR_E830 : \
+ EXP_FW_API_VER_MAJOR_E810)
+#define EXP_FW_API_VER_MINOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \
+ EXP_FW_API_VER_MINOR_E830 : \
+ EXP_FW_API_VER_MINOR_E810)
/* Different control queue types: These are mainly for SW consumption. */
enum ice_ctl_q {
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 6e20ee610022..a94e7072b570 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -3,7 +3,7 @@
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
@@ -291,7 +291,6 @@ static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
switch (vsi->type) {
case ICE_VSI_CHNL:
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
if (ena)
ice_ena_vsi(vsi, locked);
@@ -776,8 +775,7 @@ void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
/* no need to proceed with remaining cfg if it is CHNL
* or switchdev VSI
*/
- if (vsi->type == ICE_VSI_CHNL ||
- vsi->type == ICE_VSI_SWITCHDEV_CTRL)
+ if (vsi->type == ICE_VSI_CHNL)
continue;
ice_vsi_map_rings_to_vectors(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index fc91c4d41186..f182179529b7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -4,6 +4,7 @@
#include "ice_common.h"
#include "ice.h"
#include "ice_ddp.h"
+#include "ice_sched.h"
/* For supporting double VLAN mode, it is necessary to enable or disable certain
* boost tcam entries. The metadata labels names that match the following
@@ -721,6 +722,12 @@ static bool ice_is_gtp_c_profile(u16 prof_idx)
}
}
+static bool ice_is_pfcp_profile(u16 prof_idx)
+{
+ return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE &&
+ prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION;
+}
+
/**
* ice_get_sw_prof_type - determine switch profile type
* @hw: pointer to the HW structure
@@ -738,6 +745,9 @@ static enum ice_prof_type ice_get_sw_prof_type(struct ice_hw *hw,
if (ice_is_gtp_u_profile(prof_idx))
return ICE_PROF_TUN_GTPU;
+ if (ice_is_pfcp_profile(prof_idx))
+ return ICE_PROF_TUN_PFCP;
+
for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
/* UDP tunnel will have UDP_OF protocol ID and VNI offset */
if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
@@ -1329,6 +1339,7 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
for (i = 0; i < count; i++) {
bool last = false;
+ int try_cnt = 0;
int status;
bh = (struct ice_buf_hdr *)(bufs + start + i);
@@ -1336,8 +1347,26 @@ ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
if (indicate_last)
last = ice_is_last_download_buffer(bh, i, count);
- status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
- &offset, &info, NULL);
+ while (1) {
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE,
+ last, &offset, &info,
+ NULL);
+ if (hw->adminq.sq_last_status != ICE_AQ_RC_ENOSEC &&
+ hw->adminq.sq_last_status != ICE_AQ_RC_EBADSIG)
+ break;
+
+ try_cnt++;
+
+ if (try_cnt == 5)
+ break;
+
+ msleep(20);
+ }
+
+ if (try_cnt)
+ dev_dbg(ice_hw_to_dev(hw),
+ "ice_aq_download_pkg number of retries: %d\n",
+ try_cnt);
/* Save AQ status from download package */
if (status) {
@@ -1424,14 +1453,14 @@ ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
goto exit;
}
- conf_idx = le32_to_cpu(seg->signed_seg_idx);
- start = le32_to_cpu(seg->signed_buf_start);
count = le32_to_cpu(seg->signed_buf_count);
-
state = ice_download_pkg_sig_seg(hw, seg);
- if (state)
+ if (state || !count)
goto exit;
+ conf_idx = le32_to_cpu(seg->signed_seg_idx);
+ start = le32_to_cpu(seg->signed_buf_start);
+
state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
count);
@@ -2263,3 +2292,211 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf,
return state;
}
+
+/**
+ * ice_get_set_tx_topo - get or set Tx topology
+ * @hw: pointer to the HW struct
+ * @buf: pointer to Tx topology buffer
+ * @buf_size: buffer size
+ * @cd: pointer to command details structure or NULL
+ * @flags: pointer to descriptor flags
+ * @set: 0-get, 1-set topology
+ *
+ * The function will get or set Tx topology
+ *
+ * Return: zero when set was successful, negative values otherwise.
+ */
+static int
+ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
+ struct ice_sq_cd *cd, u8 *flags, bool set)
+{
+ struct ice_aqc_get_set_tx_topo *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ cmd = &desc.params.get_set_tx_topo;
+ if (set) {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
+ cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
+ /* requested to update a new topology, not a default topology */
+ if (buf)
+ cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
+ ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
+
+ if (ice_is_e825c(hw))
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ } else {
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
+ cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
+ }
+
+ if (!ice_is_e825c(hw))
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (status)
+ return status;
+ /* read the return flag values (first byte) for get operation */
+ if (!set && flags)
+ *flags = desc.params.get_set_tx_topo.set_flags;
+
+ return 0;
+}
+
+/**
+ * ice_cfg_tx_topo - Initialize new Tx topology if available
+ * @hw: pointer to the HW struct
+ * @buf: pointer to Tx topology buffer
+ * @len: buffer size
+ *
+ * The function will apply the new Tx topology from the package buffer
+ * if available.
+ *
+ * Return: zero when update was successful, negative values otherwise.
+ */
+int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ u8 *current_topo, *new_topo = NULL;
+ struct ice_run_time_cfg_seg *seg;
+ struct ice_buf_hdr *section;
+ struct ice_pkg_hdr *pkg_hdr;
+ enum ice_ddp_state state;
+ u16 offset, size = 0;
+ u32 reg = 0;
+ int status;
+ u8 flags;
+
+ if (!buf || !len)
+ return -EINVAL;
+
+ /* Does FW support new Tx topology mode ? */
+ if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
+ ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ current_topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!current_topo)
+ return -ENOMEM;
+
+ /* Get the current Tx topology */
+ status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
+ &flags, false);
+
+ kfree(current_topo);
+
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
+ return status;
+ }
+
+ /* Is default topology already applied ? */
+ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) {
+ ice_debug(hw, ICE_DBG_INIT, "Default topology already applied\n");
+ return -EEXIST;
+ }
+
+ /* Is new topology already applied ? */
+ if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
+ ice_debug(hw, ICE_DBG_INIT, "New topology already applied\n");
+ return -EEXIST;
+ }
+
+ /* Setting topology already issued? */
+ if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
+ ice_debug(hw, ICE_DBG_INIT, "Update Tx topology was done by another PF\n");
+ /* Add a small delay before exiting */
+ msleep(2000);
+ return -EEXIST;
+ }
+
+ /* Change the topology from new to default (5 to 9) */
+ if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
+ hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
+ ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
+ goto update_topo;
+ }
+
+ pkg_hdr = (struct ice_pkg_hdr *)buf;
+ state = ice_verify_pkg(pkg_hdr, len);
+ if (state) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n",
+ state);
+ return -EIO;
+ }
+
+ /* Find runtime configuration segment */
+ seg = (struct ice_run_time_cfg_seg *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
+ if (!seg) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
+ return -EIO;
+ }
+
+ if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
+ seg->buf_table.buf_count);
+ return -EIO;
+ }
+
+ section = ice_pkg_val_buf(seg->buf_table.buf_array);
+ if (!section || le32_to_cpu(section->section_entry[0].type) !=
+ ICE_SID_TX_5_LAYER_TOPO) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
+ return -EIO;
+ }
+
+ size = le16_to_cpu(section->section_entry[0].size);
+ offset = le16_to_cpu(section->section_entry[0].offset);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
+ return -EIO;
+ }
+
+ /* Make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE) {
+ ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
+ return -EIO;
+ }
+
+ /* Get the new topology buffer */
+ new_topo = ((u8 *)section) + offset;
+
+update_topo:
+ /* Acquire global lock to make sure that set topology issued
+ * by one PF.
+ */
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
+ return status;
+ }
+
+ /* Check if reset was triggered already. */
+ reg = rd32(hw, GLGEN_RSTAT);
+ if (reg & GLGEN_RSTAT_DEVSTATE_M) {
+ /* Reset is in progress, re-init the HW again */
+ ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
+ ice_check_reset(hw);
+ return 0;
+ }
+
+ /* Set new topology */
+ status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
+ return status;
+ }
+
+ /* New topology is updated, delay 1 second before issuing the CORER */
+ msleep(1000);
+ ice_reset(hw, ICE_RESET_CORER);
+ /* CORER will clear the global lock, so no explicit call
+ * required for release.
+ */
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index ff66c2ffb1a2..622543f08b43 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -454,4 +454,6 @@ u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
+int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len);
+
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 9dfae9bce758..34fd604132f5 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -16,14 +16,26 @@
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
+/* Intel(R) Ethernet Controller E830-CC for backplane */
+#define ICE_DEV_ID_E830CC_BACKPLANE 0x12D1
+/* Intel(R) Ethernet Controller E830-CC for QSFP */
+#define ICE_DEV_ID_E830CC_QSFP56 0x12D2
+/* Intel(R) Ethernet Controller E830-CC for SFP */
+#define ICE_DEV_ID_E830CC_SFP 0x12D3
+/* Intel(R) Ethernet Controller E830-CC for SFP-DD */
+#define ICE_DEV_ID_E830CC_SFP_DD 0x12D4
/* Intel(R) Ethernet Controller E830-C for backplane */
-#define ICE_DEV_ID_E830_BACKPLANE 0x12D1
+#define ICE_DEV_ID_E830C_BACKPLANE 0x12D5
/* Intel(R) Ethernet Controller E830-C for QSFP */
-#define ICE_DEV_ID_E830_QSFP56 0x12D2
+#define ICE_DEV_ID_E830C_QSFP 0x12D8
/* Intel(R) Ethernet Controller E830-C for SFP */
-#define ICE_DEV_ID_E830_SFP 0x12D3
-/* Intel(R) Ethernet Controller E830-C for SFP-DD */
-#define ICE_DEV_ID_E830_SFP_DD 0x12D4
+#define ICE_DEV_ID_E830C_SFP 0x12DA
+/* Intel(R) Ethernet Controller E830-XXV for backplane */
+#define ICE_DEV_ID_E830_XXV_BACKPLANE 0x12DC
+/* Intel(R) Ethernet Controller E830-XXV for QSFP */
+#define ICE_DEV_ID_E830_XXV_QSFP 0x12DD
+/* Intel(R) Ethernet Controller E830-XXV for SFP */
+#define ICE_DEV_ID_E830_XXV_SFP 0x12DE
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 9069725c71b4..3cfa071e3718 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -7,89 +7,10 @@
#include "ice_eswitch_br.h"
#include "ice_fltr.h"
#include "ice_repr.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
#include "ice_tc_lib.h"
/**
- * ice_eswitch_del_sp_rules - delete adv rules added on PRs
- * @pf: pointer to the PF struct
- *
- * Delete all advanced rules that were used to forward packets with the
- * device's VSI index to the corresponding eswitch ctrl VSI queue.
- */
-static void ice_eswitch_del_sp_rules(struct ice_pf *pf)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- xa_for_each(&pf->eswitch.reprs, id, repr) {
- if (repr->sp_rule.rid)
- ice_rem_adv_rule_by_id(&pf->hw, &repr->sp_rule);
- }
-}
-
-/**
- * ice_eswitch_add_sp_rule - add adv rule with device's VSI index
- * @pf: pointer to PF struct
- * @repr: pointer to the repr struct
- *
- * This function adds advanced rule that forwards packets with
- * device's VSI index to the corresponding eswitch ctrl VSI queue.
- */
-static int ice_eswitch_add_sp_rule(struct ice_pf *pf, struct ice_repr *repr)
-{
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
- struct ice_adv_rule_info rule_info = { 0 };
- struct ice_adv_lkup_elem *list;
- struct ice_hw *hw = &pf->hw;
- const u16 lkups_cnt = 1;
- int err;
-
- list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
- if (!list)
- return -ENOMEM;
-
- ice_rule_add_src_vsi_metadata(list);
-
- rule_info.sw_act.flag = ICE_FLTR_TX;
- rule_info.sw_act.vsi_handle = ctrl_vsi->idx;
- rule_info.sw_act.fltr_act = ICE_FWD_TO_Q;
- rule_info.sw_act.fwd_id.q_id = hw->func_caps.common_cap.rxq_first_id +
- ctrl_vsi->rxq_map[repr->q_id];
- rule_info.flags_info.act |= ICE_SINGLE_ACT_LB_ENABLE;
- rule_info.flags_info.act_valid = true;
- rule_info.tun_type = ICE_SW_TUN_AND_NON_TUN;
- rule_info.src_vsi = repr->src_vsi->idx;
-
- err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info,
- &repr->sp_rule);
- if (err)
- dev_err(ice_pf_to_dev(pf), "Unable to add slow-path rule for eswitch for PR %d",
- repr->id);
-
- kfree(list);
- return err;
-}
-
-static int
-ice_eswitch_add_sp_rules(struct ice_pf *pf)
-{
- struct ice_repr *repr;
- unsigned long id;
- int err;
-
- xa_for_each(&pf->eswitch.reprs, id, repr) {
- err = ice_eswitch_add_sp_rule(pf, repr);
- if (err) {
- ice_eswitch_del_sp_rules(pf);
- return err;
- }
- }
-
- return 0;
-}
-
-/**
* ice_eswitch_setup_env - configure eswitch HW filters
* @pf: pointer to PF struct
*
@@ -99,10 +20,13 @@ ice_eswitch_add_sp_rules(struct ice_pf *pf)
static int ice_eswitch_setup_env(struct ice_pf *pf)
{
struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
struct net_device *netdev = uplink_vsi->netdev;
+ bool if_running = netif_running(netdev);
struct ice_vsi_vlan_ops *vlan_ops;
- bool rule_added = false;
+
+ if (if_running && !test_and_set_bit(ICE_VSI_DOWN, uplink_vsi->state))
+ if (ice_down(uplink_vsi))
+ return -ENODEV;
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
@@ -112,98 +36,53 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
netif_addr_unlock_bh(netdev);
if (ice_vsi_add_vlan_zero(uplink_vsi))
+ goto err_vlan_zero;
+
+ if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+ ICE_FLTR_RX))
goto err_def_rx;
- if (!ice_is_dflt_vsi_in_use(uplink_vsi->port_info)) {
- if (ice_set_dflt_vsi(uplink_vsi))
- goto err_def_rx;
- rule_added = true;
- }
+ if (ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, true,
+ ICE_FLTR_TX))
+ goto err_def_tx;
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
if (vlan_ops->dis_rx_filtering(uplink_vsi))
- goto err_dis_rx;
+ goto err_vlan_filtering;
if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_uplink;
- if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
- goto err_override_control;
-
if (ice_vsi_update_local_lb(uplink_vsi, true))
goto err_override_local_lb;
+ if (if_running && ice_up(uplink_vsi))
+ goto err_up;
+
return 0;
+err_up:
+ ice_vsi_update_local_lb(uplink_vsi, false);
err_override_local_lb:
- ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
-err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
vlan_ops->ena_rx_filtering(uplink_vsi);
-err_dis_rx:
- if (rule_added)
- ice_clear_dflt_vsi(uplink_vsi);
+err_vlan_filtering:
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_TX);
+err_def_tx:
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_RX);
err_def_rx:
+ ice_vsi_del_vlan_zero(uplink_vsi);
+err_vlan_zero:
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
- return -ENODEV;
-}
-
-/**
- * ice_eswitch_remap_rings_to_vectors - reconfigure rings of eswitch ctrl VSI
- * @eswitch: pointer to eswitch struct
- *
- * In eswitch number of allocated Tx/Rx rings is equal.
- *
- * This function fills q_vectors structures associated with representor and
- * move each ring pairs to port representor netdevs. Each port representor
- * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
- * number of VFs.
- */
-static void ice_eswitch_remap_rings_to_vectors(struct ice_eswitch *eswitch)
-{
- struct ice_vsi *vsi = eswitch->control_vsi;
- unsigned long repr_id = 0;
- int q_id;
-
- ice_for_each_txq(vsi, q_id) {
- struct ice_q_vector *q_vector;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- struct ice_repr *repr;
-
- repr = xa_find(&eswitch->reprs, &repr_id, U32_MAX,
- XA_PRESENT);
- if (!repr)
- break;
-
- repr_id += 1;
- repr->q_id = q_id;
- q_vector = repr->q_vector;
- tx_ring = vsi->tx_rings[q_id];
- rx_ring = vsi->rx_rings[q_id];
-
- q_vector->vsi = vsi;
- q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
-
- q_vector->num_ring_tx = 1;
- q_vector->tx.tx_ring = tx_ring;
- tx_ring->q_vector = q_vector;
- tx_ring->next = NULL;
- tx_ring->netdev = repr->netdev;
- /* In switchdev mode, from OS stack perspective, there is only
- * one queue for given netdev, so it needs to be indexed as 0.
- */
- tx_ring->q_index = 0;
+ if (if_running)
+ ice_up(uplink_vsi);
- q_vector->num_ring_rx = 1;
- q_vector->rx.rx_ring = rx_ring;
- rx_ring->q_vector = q_vector;
- rx_ring->next = NULL;
- rx_ring->netdev = repr->netdev;
- }
+ return -ENODEV;
}
/**
@@ -225,8 +104,6 @@ ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
repr->dst = NULL;
ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
ICE_FWD_TO_VSI);
-
- netif_napi_del(&repr->q_vector->napi);
}
/**
@@ -236,43 +113,64 @@ ice_eswitch_release_repr(struct ice_pf *pf, struct ice_repr *repr)
*/
static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr)
{
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
+ struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
struct ice_vsi *vsi = repr->src_vsi;
struct metadata_dst *dst;
- ice_remove_vsi_fltr(&pf->hw, vsi->idx);
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!repr->dst)
- goto err_add_mac_fltr;
+ return -ENOMEM;
- if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof))
- goto err_dst_free;
+ netif_keep_dst(uplink_vsi->netdev);
- if (ice_vsi_add_vlan_zero(vsi))
- goto err_update_security;
+ dst = repr->dst;
+ dst->u.port_info.port_id = vsi->vsi_num;
+ dst->u.port_info.lower_dev = uplink_vsi->netdev;
- netif_napi_add(repr->netdev, &repr->q_vector->napi,
- ice_napi_poll);
+ return 0;
+}
- netif_keep_dst(repr->netdev);
+/**
+ * ice_eswitch_cfg_vsi - configure VSI to work in slow-path
+ * @vsi: VSI structure of representee
+ * @mac: representee MAC
+ *
+ * Return: 0 on success, non-zero on error.
+ */
+int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac)
+{
+ int err;
- dst = repr->dst;
- dst->u.port_info.port_id = vsi->vsi_num;
- dst->u.port_info.lower_dev = repr->netdev;
- ice_repr_set_traffic_vsi(repr, ctrl_vsi);
+ ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx);
+
+ err = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+ if (err)
+ goto err_update_security;
+
+ err = ice_vsi_add_vlan_zero(vsi);
+ if (err)
+ goto err_vlan_zero;
return 0;
-err_update_security:
+err_vlan_zero:
ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
-err_dst_free:
- metadata_dst_free(repr->dst);
- repr->dst = NULL;
-err_add_mac_fltr:
- ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI);
+err_update_security:
+ ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
- return -ENODEV;
+ return err;
+}
+
+/**
+ * ice_eswitch_decfg_vsi - unroll changes done to VSI for switchdev
+ * @vsi: VSI structure of representee
+ * @mac: representee MAC
+ */
+void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac)
+{
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI);
}
/**
@@ -280,16 +178,16 @@ err_add_mac_fltr:
* @repr_id: representor ID
* @vsi: VSI for which port representor is configured
*/
-void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
+void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_repr *repr;
- int ret;
+ int err;
if (!ice_is_switchdev_running(pf))
return;
- repr = xa_load(&pf->eswitch.reprs, repr_id);
+ repr = xa_load(&pf->eswitch.reprs, *repr_id);
if (!repr)
return;
@@ -299,12 +197,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
if (repr->br_port)
repr->br_port->vsi = vsi;
- ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
- if (ret) {
- ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac,
- ICE_FWD_TO_VSI);
+ err = ice_eswitch_cfg_vsi(vsi, repr->parent_mac);
+ if (err)
dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d",
repr->id);
+
+ /* The VSI number is different, reload the PR with new id */
+ if (repr->id != vsi->vsi_num) {
+ xa_erase(&pf->eswitch.reprs, repr->id);
+ repr->id = vsi->vsi_num;
+ if (xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL))
+ dev_err(ice_pf_to_dev(pf), "Failed to reload port representor %d",
+ repr->id);
+ *repr_id = repr->id;
}
}
@@ -318,27 +223,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi)
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
- struct ice_netdev_priv *np;
- struct ice_repr *repr;
- struct ice_vsi *vsi;
-
- np = netdev_priv(netdev);
- vsi = np->vsi;
-
- if (!vsi || !ice_is_switchdev_running(vsi->back))
- return NETDEV_TX_BUSY;
-
- if (ice_is_reset_in_progress(vsi->back->state) ||
- test_bit(ICE_VF_DIS, vsi->back->state))
- return NETDEV_TX_BUSY;
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ unsigned int len = skb->len;
+ int ret;
- repr = ice_netdev_to_repr(netdev);
skb_dst_drop(skb);
dst_hold((struct dst_entry *)repr->dst);
skb_dst_set(skb, (struct dst_entry *)repr->dst);
- skb->queue_mapping = repr->q_id;
+ skb->dev = repr->dst->u.port_info.lower_dev;
- return ice_start_xmit(skb, netdev);
+ ret = dev_queue_xmit(skb);
+ ice_repr_inc_tx_stats(repr, len, ret);
+
+ return ret;
}
/**
@@ -374,71 +271,29 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
static void ice_eswitch_release_env(struct ice_pf *pf)
{
struct ice_vsi *uplink_vsi = pf->eswitch.uplink_vsi;
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
struct ice_vsi_vlan_ops *vlan_ops;
vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
ice_vsi_update_local_lb(uplink_vsi, false);
- ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
vlan_ops->ena_rx_filtering(uplink_vsi);
- ice_clear_dflt_vsi(uplink_vsi);
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_TX);
+ ice_cfg_dflt_vsi(uplink_vsi->port_info, uplink_vsi->idx, false,
+ ICE_FLTR_RX);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
ICE_FWD_TO_VSI);
}
/**
- * ice_eswitch_vsi_setup - configure eswitch control VSI
- * @pf: pointer to PF structure
- * @pi: pointer to port_info structure
- */
-static struct ice_vsi *
-ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
-{
- struct ice_vsi_cfg_params params = {};
-
- params.type = ICE_VSI_SWITCHDEV_CTRL;
- params.pi = pi;
- params.flags = ICE_VSI_FLAG_INIT;
-
- return ice_vsi_setup(pf, &params);
-}
-
-/**
- * ice_eswitch_napi_enable - enable NAPI for all port representors
- * @reprs: xarray of reprs
- */
-static void ice_eswitch_napi_enable(struct xarray *reprs)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- xa_for_each(reprs, id, repr)
- napi_enable(&repr->q_vector->napi);
-}
-
-/**
- * ice_eswitch_napi_disable - disable NAPI for all port representors
- * @reprs: xarray of reprs
- */
-static void ice_eswitch_napi_disable(struct xarray *reprs)
-{
- struct ice_repr *repr;
- unsigned long id;
-
- xa_for_each(reprs, id, repr)
- napi_disable(&repr->q_vector->napi);
-}
-
-/**
* ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
* @pf: pointer to PF structure
*/
static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi, *uplink_vsi;
+ struct ice_vsi *uplink_vsi;
uplink_vsi = ice_get_main_vsi(pf);
if (!uplink_vsi)
@@ -450,17 +305,10 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
return -EINVAL;
}
- pf->eswitch.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
- if (!pf->eswitch.control_vsi)
- return -ENODEV;
-
- ctrl_vsi = pf->eswitch.control_vsi;
- /* cp VSI is createad with 1 queue as default */
- pf->eswitch.qs.value = 1;
pf->eswitch.uplink_vsi = uplink_vsi;
if (ice_eswitch_setup_env(pf))
- goto err_vsi;
+ return -ENODEV;
if (ice_eswitch_br_offloads_init(pf))
goto err_br_offloads;
@@ -471,8 +319,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
err_br_offloads:
ice_eswitch_release_env(pf);
-err_vsi:
- ice_vsi_release(ctrl_vsi);
return -ENODEV;
}
@@ -482,14 +328,10 @@ err_vsi:
*/
static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi = pf->eswitch.control_vsi;
-
ice_eswitch_br_offloads_deinit(pf);
ice_eswitch_release_env(pf);
- ice_vsi_release(ctrl_vsi);
pf->eswitch.is_running = false;
- pf->eswitch.qs.is_reaching = false;
}
/**
@@ -530,7 +372,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
pf->hw.pf_id);
- xa_init_flags(&pf->eswitch.reprs, XA_FLAGS_ALLOC);
+ xa_init(&pf->eswitch.reprs);
NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
break;
}
@@ -602,56 +444,19 @@ void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
static void ice_eswitch_stop_reprs(struct ice_pf *pf)
{
- ice_eswitch_del_sp_rules(pf);
ice_eswitch_stop_all_tx_queues(pf);
- ice_eswitch_napi_disable(&pf->eswitch.reprs);
}
static void ice_eswitch_start_reprs(struct ice_pf *pf)
{
- ice_eswitch_napi_enable(&pf->eswitch.reprs);
ice_eswitch_start_all_tx_queues(pf);
- ice_eswitch_add_sp_rules(pf);
-}
-
-static void
-ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
-{
- struct ice_vsi *cp = eswitch->control_vsi;
- int queues = 0;
-
- if (eswitch->qs.is_reaching) {
- if (eswitch->qs.to_reach >= eswitch->qs.value + change) {
- queues = eswitch->qs.to_reach;
- eswitch->qs.is_reaching = false;
- } else {
- queues = 0;
- }
- } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) ||
- change < 0) {
- queues = cp->alloc_txq + change;
- }
-
- if (queues) {
- cp->req_txq = queues;
- cp->req_rxq = queues;
- ice_vsi_close(cp);
- ice_vsi_rebuild(cp, ICE_VSI_FLAG_NO_INIT);
- ice_vsi_open(cp);
- } else if (!change) {
- /* change == 0 means that VSI wasn't open, open it here */
- ice_vsi_open(cp);
- }
-
- eswitch->qs.value += change;
- ice_eswitch_remap_rings_to_vectors(eswitch);
}
int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
{
+ struct devlink *devlink = priv_to_devlink(pf);
struct ice_repr *repr;
- int change = 1;
int err;
if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
@@ -661,14 +466,13 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
err = ice_eswitch_enable_switchdev(pf);
if (err)
return err;
- /* Control plane VSI is created with 1 queue as default */
- pf->eswitch.qs.to_reach -= 1;
- change = 0;
}
ice_eswitch_stop_reprs(pf);
+ devl_lock(devlink);
repr = ice_repr_add_vf(vf);
+ devl_unlock(devlink);
if (IS_ERR(repr)) {
err = PTR_ERR(repr);
goto err_create_repr;
@@ -678,14 +482,12 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
if (err)
goto err_setup_repr;
- err = xa_alloc(&pf->eswitch.reprs, &repr->id, repr,
- XA_LIMIT(1, INT_MAX), GFP_KERNEL);
+ err = xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL);
if (err)
goto err_xa_alloc;
vf->repr_id = repr->id;
- ice_eswitch_cp_change_queues(&pf->eswitch, change);
ice_eswitch_start_reprs(pf);
return 0;
@@ -693,7 +495,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
err_xa_alloc:
ice_eswitch_release_repr(pf, repr);
err_setup_repr:
+ devl_lock(devlink);
ice_repr_rem_vf(repr);
+ devl_unlock(devlink);
err_create_repr:
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
@@ -715,10 +519,9 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
if (xa_empty(&pf->eswitch.reprs))
ice_eswitch_disable_switchdev(pf);
- else
- ice_eswitch_cp_change_queues(&pf->eswitch, -1);
ice_eswitch_release_repr(pf, repr);
+ devl_lock(devlink);
ice_repr_rem_vf(repr);
if (xa_empty(&pf->eswitch.reprs)) {
@@ -726,49 +529,32 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf)
* no point in keeping the nodes
*/
ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf));
- devl_lock(devlink);
devl_rate_nodes_destroy(devlink);
- devl_unlock(devlink);
} else {
ice_eswitch_start_reprs(pf);
}
+ devl_unlock(devlink);
}
/**
- * ice_eswitch_rebuild - rebuild eswitch
- * @pf: pointer to PF structure
+ * ice_eswitch_get_target - get netdev based on src_vsi from descriptor
+ * @rx_ring: ring used to receive the packet
+ * @rx_desc: descriptor used to get src_vsi value
+ *
+ * Get src_vsi value from descriptor and load correct representor. If it isn't
+ * found return rx_ring->netdev.
*/
-int ice_eswitch_rebuild(struct ice_pf *pf)
+struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
{
+ struct ice_eswitch *eswitch = &rx_ring->vsi->back->eswitch;
+ struct ice_32b_rx_flex_desc_nic_2 *desc;
struct ice_repr *repr;
- unsigned long id;
- int err;
-
- if (!ice_is_switchdev_running(pf))
- return 0;
- err = ice_vsi_rebuild(pf->eswitch.control_vsi, ICE_VSI_FLAG_INIT);
- if (err)
- return err;
-
- xa_for_each(&pf->eswitch.reprs, id, repr)
- ice_eswitch_detach(pf, repr->vf);
-
- return 0;
-}
-
-/**
- * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues
- * @pf: pointer to PF structure
- * @change: how many more (or less) queues is needed
- *
- * Remember to call ice_eswitch_attach/detach() the "change" times.
- */
-void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change)
-{
- if (pf->eswitch.qs.value + change < 0)
- return;
+ desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
+ repr = xa_load(&eswitch->reprs, le16_to_cpu(desc->src_vsi));
+ if (!repr)
+ return rx_ring->netdev;
- pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change;
- pf->eswitch.qs.is_reaching = true;
+ return repr->netdev;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
index 1a288a03a79a..78fd39a6935d 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -10,7 +10,6 @@
void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf);
int
ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf);
-int ice_eswitch_rebuild(struct ice_pf *pf);
int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
int
@@ -18,7 +17,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack);
bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
-void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi);
+void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi);
void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
@@ -26,7 +25,11 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off);
netdev_tx_t
ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change);
+struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc);
+
+int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac);
+void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac);
#else /* CONFIG_ICE_SWITCHDEV */
static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
@@ -43,18 +46,13 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb,
struct ice_tx_offload_params *off) { }
static inline void
-ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { }
+ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { }
static inline int ice_eswitch_configure(struct ice_pf *pf)
{
return 0;
}
-static inline int ice_eswitch_rebuild(struct ice_pf *pf)
-{
- return -EOPNOTSUPP;
-}
-
static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
return DEVLINK_ESWITCH_MODE_LEGACY;
@@ -78,7 +76,18 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
-static inline void
-ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { }
+static inline struct net_device *
+ice_eswitch_get_target(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ return rx_ring->netdev;
+}
+
+static inline int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac) { }
#endif /* CONFIG_ICE_SWITCHDEV */
#endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
index ac5beecd028b..f5aceb32bf4d 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -896,7 +896,8 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) {
vsi->back->br_port = NULL;
} else {
- struct ice_repr *repr = ice_repr_get_by_vsi(vsi);
+ struct ice_repr *repr =
+ ice_repr_get(vsi->back, br_port->repr_id);
if (repr)
repr->br_port = NULL;
@@ -937,6 +938,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
br_port->vsi = repr->src_vsi;
br_port->vsi_idx = br_port->vsi->idx;
br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT;
+ br_port->repr_id = repr->id;
repr->br_port = br_port;
err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
index 85a8fadb2928..c15c7344d7f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
@@ -46,6 +46,7 @@ struct ice_esw_br_port {
enum ice_esw_br_port_type type;
u16 vsi_idx;
u16 pvid;
+ u32 repr_id;
struct xarray vlans;
};
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 78b833b3e1d7..bc79ba974e49 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -463,7 +463,354 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
static int ice_get_regs_len(struct net_device __always_unused *netdev)
{
- return sizeof(ice_regs_dump_list);
+ return (sizeof(ice_regs_dump_list) +
+ sizeof(struct ice_regdump_to_ethtool));
+}
+
+/**
+ * ice_ethtool_get_maxspeed - Get the max speed for given lport
+ * @hw: pointer to the HW struct
+ * @lport: logical port for which max speed is requested
+ * @max_speed: return max speed for input lport
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_ethtool_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {};
+ bool active_valid = false, pending_valid = true;
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ u8 active_idx = 0, pending_idx = 0;
+ int status;
+
+ status = ice_aq_get_port_options(hw, options, &option_count, lport,
+ true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status)
+ return -EIO;
+ if (!active_valid)
+ return -EINVAL;
+
+ *max_speed = options[active_idx].max_lane_speed & ICE_AQC_PORT_OPT_MAX_LANE_M;
+ return 0;
+}
+
+/**
+ * ice_is_serdes_muxed - returns whether serdes is muxed in hardware
+ * @hw: pointer to the HW struct
+ *
+ * Return: true when serdes is muxed, false when serdes is not muxed.
+ */
+static bool ice_is_serdes_muxed(struct ice_hw *hw)
+{
+ u32 reg_value = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
+
+ return FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, reg_value);
+}
+
+static int ice_map_port_topology_for_sfp(struct ice_port_topology *port_topology,
+ u8 lport, bool is_muxed)
+{
+ switch (lport) {
+ case 0:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 0;
+ break;
+ case 1:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 0;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 2;
+ else
+ port_topology->primary_serdes_lane = 4;
+ break;
+ case 2:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 1;
+ port_topology->primary_serdes_lane = 1;
+ break;
+ case 3:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 1;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 3;
+ else
+ port_topology->primary_serdes_lane = 5;
+ break;
+ case 4:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 2;
+ break;
+ case 5:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 6;
+ break;
+ case 6:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 3;
+ break;
+ case 7:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ice_map_port_topology_for_qsfp(struct ice_port_topology *port_topology,
+ u8 lport, bool is_muxed)
+{
+ switch (lport) {
+ case 0:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 0;
+ port_topology->primary_serdes_lane = 0;
+ break;
+ case 1:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 0;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 2;
+ else
+ port_topology->primary_serdes_lane = 4;
+ break;
+ case 2:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 1;
+ port_topology->primary_serdes_lane = 1;
+ break;
+ case 3:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 1;
+ if (is_muxed)
+ port_topology->primary_serdes_lane = 3;
+ else
+ port_topology->primary_serdes_lane = 5;
+ break;
+ case 4:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 2;
+ break;
+ case 5:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 2;
+ port_topology->primary_serdes_lane = 6;
+ break;
+ case 6:
+ port_topology->pcs_quad_select = 0;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 3;
+ break;
+ case 7:
+ port_topology->pcs_quad_select = 1;
+ port_topology->pcs_port = 3;
+ port_topology->primary_serdes_lane = 7;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_port_topology - returns physical topology like pcsquad, pcsport,
+ * serdes number
+ * @hw: pointer to the HW struct
+ * @lport: logical port for which physical info requested
+ * @port_topology: buffer to hold port topology
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_get_port_topology(struct ice_hw *hw, u8 lport,
+ struct ice_port_topology *port_topology)
+{
+ struct ice_aqc_get_link_topo cmd = {};
+ u16 node_handle = 0;
+ u8 cage_type = 0;
+ bool is_muxed;
+ int err;
+ u8 ctx;
+
+ ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
+ ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
+ cmd.addr.topo_params.node_type_ctx = ctx;
+
+ err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle);
+ if (err)
+ return -EINVAL;
+
+ is_muxed = ice_is_serdes_muxed(hw);
+
+ if (cage_type == 0x11 || /* SFP+ */
+ cage_type == 0x12) { /* SFP28 */
+ port_topology->serdes_lane_count = 1;
+ err = ice_map_port_topology_for_sfp(port_topology, lport, is_muxed);
+ if (err)
+ return err;
+ } else if (cage_type == 0x13 || /* QSFP */
+ cage_type == 0x14) { /* QSFP28 */
+ u8 max_speed = 0;
+
+ err = ice_ethtool_get_maxspeed(hw, lport, &max_speed);
+ if (err)
+ return err;
+
+ if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G)
+ port_topology->serdes_lane_count = 4;
+ else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G)
+ port_topology->serdes_lane_count = 2;
+ else
+ port_topology->serdes_lane_count = 1;
+
+ err = ice_map_port_topology_for_qsfp(port_topology, lport, is_muxed);
+ if (err)
+ return err;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_tx_rx_equa - read serdes tx rx equaliser param
+ * @hw: pointer to the HW struct
+ * @serdes_num: represents the serdes number
+ * @ptr: structure to read all serdes parameter for given serdes
+ *
+ * Return: all serdes equalization parameter supported per serdes number
+ */
+static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num,
+ struct ice_serdes_equalization_to_ethtool *ptr)
+{
+ int err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE1,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_pre1);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE3,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_pre3);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_ATTEN,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_atten);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_POST1,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_post1);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE2,
+ ICE_AQC_OP_CODE_TX_EQU, serdes_num,
+ &ptr->tx_equalization_pre2);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE2,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_pre2);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE1,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_pre1);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_POST1,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_post1);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFLF,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_bflf);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFHF,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_bfhf);
+ if (err)
+ return err;
+
+ err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_DRATE,
+ ICE_AQC_OP_CODE_RX_EQU, serdes_num,
+ &ptr->rx_equalization_drate);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/**
+ * ice_get_extended_regs - returns FEC correctable, uncorrectable stats per
+ * pcsquad, pcsport
+ * @netdev: pointer to net device structure
+ * @p: output buffer to fill requested register dump
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_get_extended_regs(struct net_device *netdev, void *p)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_regdump_to_ethtool *ice_prv_regs_buf;
+ struct ice_port_topology port_topology = {};
+ struct ice_port_info *pi;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ unsigned int i;
+ int err;
+
+ pf = np->vsi->back;
+ hw = &pf->hw;
+ pi = np->vsi->port_info;
+
+ /* Serdes parameters are not supported if not the PF VSI */
+ if (np->vsi->type != ICE_VSI_PF || !pi)
+ return -EINVAL;
+
+ err = ice_get_port_topology(hw, pi->lport, &port_topology);
+ if (err)
+ return -EINVAL;
+ if (port_topology.serdes_lane_count > 4)
+ return -EINVAL;
+
+ ice_prv_regs_buf = p;
+
+ /* Get serdes equalization parameter for available serdes */
+ for (i = 0; i < port_topology.serdes_lane_count; i++) {
+ u8 serdes_num = 0;
+
+ serdes_num = port_topology.primary_serdes_lane + i;
+ err = ice_get_tx_rx_equa(hw, serdes_num,
+ &ice_prv_regs_buf->equalization[i]);
+ if (err)
+ return -EINVAL;
+ }
+
+ return 0;
}
static void
@@ -475,10 +822,12 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
u32 *regs_buf = (u32 *)p;
unsigned int i;
- regs->version = 1;
+ regs->version = 2;
for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
+
+ ice_get_extended_regs(netdev, (void *)&regs_buf[i]);
}
static u32 ice_get_msglevel(struct net_device *netdev)
@@ -3434,7 +3783,7 @@ ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
}
static int
-ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
+ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info)
{
struct ice_pf *pf = ice_netdev_to_pf(dev);
@@ -3593,7 +3942,6 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_pf *pf = vsi->back;
int new_rx = 0, new_tx = 0;
bool locked = false;
- u32 curr_combined;
int ret = 0;
/* do not support changing channels in Safe Mode */
@@ -3615,22 +3963,8 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EOPNOTSUPP;
}
- curr_combined = ice_get_combined_cnt(vsi);
-
- /* these checks are for cases where user didn't specify a particular
- * value on cmd line but we get non-zero value anyway via
- * get_channels(); look at ethtool.c in ethtool repository (the user
- * space part), particularly, do_schannels() routine
- */
- if (ch->rx_count == vsi->num_rxq - curr_combined)
- ch->rx_count = 0;
- if (ch->tx_count == vsi->num_txq - curr_combined)
- ch->tx_count = 0;
- if (ch->combined_count == curr_combined)
- ch->combined_count = 0;
-
- if (!(ch->combined_count || (ch->rx_count && ch->tx_count))) {
- netdev_err(dev, "Please specify at least 1 Rx and 1 Tx channel\n");
+ if (ch->rx_count && ch->tx_count) {
+ netdev_err(dev, "Dedicated RX or TX channels cannot be used simultaneously\n");
return -EINVAL;
}
@@ -4297,6 +4631,94 @@ ice_get_module_eeprom(struct net_device *netdev,
return 0;
}
+/**
+ * ice_get_port_fec_stats - returns FEC correctable, uncorrectable stats per
+ * pcsquad, pcsport
+ * @hw: pointer to the HW struct
+ * @pcs_quad: pcsquad for input port
+ * @pcs_port: pcsport for input port
+ * @fec_stats: buffer to hold FEC statistics for given port
+ *
+ * Return: 0 on success, negative on failure.
+ */
+static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_uncorr_low_val = 0, fec_uncorr_high_val = 0;
+ u32 fec_corr_low_val = 0, fec_corr_high_val = 0;
+ int err;
+
+ if (pcs_quad > 1 || pcs_port > 3)
+ return -EINVAL;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_LOW,
+ &fec_corr_low_val);
+ if (err)
+ return err;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_HIGH,
+ &fec_corr_high_val);
+ if (err)
+ return err;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
+ ICE_FEC_UNCORR_LOW,
+ &fec_uncorr_low_val);
+ if (err)
+ return err;
+
+ err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port,
+ ICE_FEC_UNCORR_HIGH,
+ &fec_uncorr_high_val);
+ if (err)
+ return err;
+
+ fec_stats->corrected_blocks.total = (fec_corr_high_val << 16) +
+ fec_corr_low_val;
+ fec_stats->uncorrectable_blocks.total = (fec_uncorr_high_val << 16) +
+ fec_uncorr_low_val;
+ return 0;
+}
+
+/**
+ * ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev
+ * @netdev: network interface device structure
+ * @fec_stats: buffer to hold FEC statistics for given port
+ *
+ */
+static void ice_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_port_topology port_topology;
+ struct ice_port_info *pi;
+ struct ice_pf *pf;
+ struct ice_hw *hw;
+ int err;
+
+ pf = np->vsi->back;
+ hw = &pf->hw;
+ pi = np->vsi->port_info;
+
+ /* Serdes parameters are not supported if not the PF VSI */
+ if (np->vsi->type != ICE_VSI_PF || !pi)
+ return;
+
+ err = ice_get_port_topology(hw, pi->lport, &port_topology);
+ if (err) {
+ netdev_info(netdev, "Extended register dump failed Lport %d\n",
+ pi->lport);
+ return;
+ }
+
+ /* Get FEC correctable, uncorrectable counter */
+ err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select,
+ port_topology.pcs_port, fec_stats);
+ if (err)
+ netdev_info(netdev, "FEC stats get failed Lport %d Err %d\n",
+ pi->lport, err);
+}
+
static const struct ethtool_ops ice_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
@@ -4305,6 +4727,7 @@ static const struct ethtool_ops ice_ethtool_ops = {
.cap_rss_sym_xor_supported = true,
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
+ .get_fec_stats = ice_get_fec_stats,
.get_drvinfo = ice_get_drvinfo,
.get_regs_len = ice_get_regs_len,
.get_regs = ice_get_regs,
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
index b88e3da06f13..9acccae38625 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -9,6 +9,35 @@ struct ice_phy_type_to_ethtool {
u8 link_mode;
};
+struct ice_serdes_equalization_to_ethtool {
+ int rx_equalization_pre2;
+ int rx_equalization_pre1;
+ int rx_equalization_post1;
+ int rx_equalization_bflf;
+ int rx_equalization_bfhf;
+ int rx_equalization_drate;
+ int tx_equalization_pre1;
+ int tx_equalization_pre3;
+ int tx_equalization_atten;
+ int tx_equalization_post1;
+ int tx_equalization_pre2;
+};
+
+struct ice_regdump_to_ethtool {
+ /* A multilane port can have max 4 serdes */
+ struct ice_serdes_equalization_to_ethtool equalization[4];
+};
+
+/* Port topology from lport i.e.
+ * serdes mapping, pcsquad, macport, cage etc...
+ */
+struct ice_port_topology {
+ u16 pcs_port;
+ u16 primary_serdes_lane;
+ u16 serdes_lane_count;
+ u16 pcs_quad_select;
+};
+
/* Macro to make PHY type to Ethtool link mode table entry.
* The index is the PHY type.
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 9a1a04f5f146..5412eff8ef23 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -41,6 +41,8 @@ static struct in6_addr zero_ipv6_addr_mask = {
static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
{
switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_ETH:
+ return ETHER_FLOW;
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
return TCP_V4_FLOW;
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
@@ -72,6 +74,8 @@ static int ice_fltr_to_ethtool_flow(enum ice_fltr_ptype flow)
static enum ice_fltr_ptype ice_ethtool_flow_to_fltr(int eth)
{
switch (eth) {
+ case ETHER_FLOW:
+ return ICE_FLTR_PTYPE_NONF_ETH;
case TCP_V4_FLOW:
return ICE_FLTR_PTYPE_NONF_IPV4_TCP;
case UDP_V4_FLOW:
@@ -137,6 +141,10 @@ int ice_get_ethtool_fdir_entry(struct ice_hw *hw, struct ethtool_rxnfc *cmd)
memset(&fsp->m_ext, 0, sizeof(fsp->m_ext));
switch (fsp->flow_type) {
+ case ETHER_FLOW:
+ fsp->h_u.ether_spec = rule->eth;
+ fsp->m_u.ether_spec = rule->eth_mask;
+ break;
case IPV4_USER_FLOW:
fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
fsp->h_u.usr_ip4_spec.proto = 0;
@@ -526,7 +534,7 @@ ice_parse_rx_flow_user_data(struct ethtool_rx_flow_spec *fsp,
*
* Returns the number of available flow director filters to this VSI
*/
-static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
+int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
{
u16 vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
u16 num_guar;
@@ -1194,6 +1202,122 @@ ice_set_fdir_ip6_usr_seg(struct ice_flow_seg_info *seg,
}
/**
+ * ice_fdir_vlan_valid - validate VLAN data for Flow Director rule
+ * @dev: network interface device structure
+ * @fsp: pointer to ethtool Rx flow specification
+ *
+ * Return: true if vlan data is valid, false otherwise
+ */
+static bool ice_fdir_vlan_valid(struct device *dev,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ if (fsp->m_ext.vlan_etype && !eth_type_vlan(fsp->h_ext.vlan_etype))
+ return false;
+
+ if (fsp->m_ext.vlan_tci && ntohs(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
+ return false;
+
+ /* proto and vlan must have vlan-etype defined */
+ if (fsp->m_u.ether_spec.h_proto && fsp->m_ext.vlan_tci &&
+ !fsp->m_ext.vlan_etype) {
+ dev_warn(dev, "Filter with proto and vlan require also vlan-etype");
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * ice_set_ether_flow_seg - set address and protocol segments for ether flow
+ * @dev: network interface device structure
+ * @seg: flow segment for programming
+ * @eth_spec: mask data from ethtool
+ *
+ * Return: 0 on success and errno in case of error.
+ */
+static int ice_set_ether_flow_seg(struct device *dev,
+ struct ice_flow_seg_info *seg,
+ struct ethhdr *eth_spec)
+{
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_ETH);
+
+ /* empty rules are not valid */
+ if (is_zero_ether_addr(eth_spec->h_source) &&
+ is_zero_ether_addr(eth_spec->h_dest) &&
+ !eth_spec->h_proto)
+ return -EINVAL;
+
+ /* Ethertype */
+ if (eth_spec->h_proto == htons(0xFFFF)) {
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_TYPE,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ } else if (eth_spec->h_proto) {
+ dev_warn(dev, "Only 0x0000 or 0xffff proto mask is allowed for flow-type ether");
+ return -EOPNOTSUPP;
+ }
+
+ /* Source MAC address */
+ if (is_broadcast_ether_addr(eth_spec->h_source))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_SA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!is_zero_ether_addr(eth_spec->h_source))
+ goto err_mask;
+
+ /* Destination MAC address */
+ if (is_broadcast_ether_addr(eth_spec->h_dest))
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_ETH_DA,
+ ICE_FLOW_FLD_OFF_INVAL, ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ else if (!is_zero_ether_addr(eth_spec->h_dest))
+ goto err_mask;
+
+ return 0;
+
+err_mask:
+ dev_warn(dev, "Only 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff MAC address mask is allowed for flow-type ether");
+ return -EOPNOTSUPP;
+}
+
+/**
+ * ice_set_fdir_vlan_seg - set vlan segments for ether flow
+ * @seg: flow segment for programming
+ * @ext_masks: masks for additional RX flow fields
+ *
+ * Return: 0 on success and errno in case of error.
+ */
+static int
+ice_set_fdir_vlan_seg(struct ice_flow_seg_info *seg,
+ struct ethtool_flow_ext *ext_masks)
+{
+ ICE_FLOW_SET_HDRS(seg, ICE_FLOW_SEG_HDR_VLAN);
+
+ if (ext_masks->vlan_etype) {
+ if (ext_masks->vlan_etype != htons(0xFFFF))
+ return -EOPNOTSUPP;
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_S_VLAN,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ if (ext_masks->vlan_tci) {
+ if (ext_masks->vlan_tci != htons(0xFFFF))
+ return -EOPNOTSUPP;
+
+ ice_flow_set_fld(seg, ICE_FLOW_FIELD_IDX_C_VLAN,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL,
+ ICE_FLOW_FLD_OFF_INVAL, false);
+ }
+
+ return 0;
+}
+
+/**
* ice_cfg_fdir_xtrct_seq - Configure extraction sequence for the given filter
* @pf: PF structure
* @fsp: pointer to ethtool Rx flow specification
@@ -1209,7 +1333,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
struct device *dev = ice_pf_to_dev(pf);
enum ice_fltr_ptype fltr_idx;
struct ice_hw *hw = &pf->hw;
- bool perfect_filter;
+ bool perfect_filter = false;
int ret;
seg = devm_kzalloc(dev, sizeof(*seg), GFP_KERNEL);
@@ -1262,6 +1386,16 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
ret = ice_set_fdir_ip6_usr_seg(seg, &fsp->m_u.usr_ip6_spec,
&perfect_filter);
break;
+ case ETHER_FLOW:
+ ret = ice_set_ether_flow_seg(dev, seg, &fsp->m_u.ether_spec);
+ if (!ret && (fsp->m_ext.vlan_etype || fsp->m_ext.vlan_tci)) {
+ if (!ice_fdir_vlan_valid(dev, fsp)) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = ice_set_fdir_vlan_seg(seg, &fsp->m_ext);
+ }
+ break;
default:
ret = -EINVAL;
}
@@ -1823,6 +1957,10 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp,
input->mask.v6.tc = fsp->m_u.usr_ip6_spec.tclass;
input->mask.v6.proto = fsp->m_u.usr_ip6_spec.l4_proto;
break;
+ case ETHER_FLOW:
+ input->eth = fsp->h_u.ether_spec;
+ input->eth_mask = fsp->m_u.ether_spec;
+ break;
default:
/* not doing un-parsed flow types */
return -EINVAL;
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 5840c3e04a5b..26b357c0ae15 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -4,6 +4,8 @@
#include "ice_common.h"
/* These are training packet headers used to program flow director filters. */
+static const u8 ice_fdir_eth_pkt[22];
+
static const u8 ice_fdir_tcpv4_pkt[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x45, 0x00,
@@ -417,6 +419,11 @@ static const u8 ice_fdir_ip6_tun_pkt[] = {
/* Flow Director no-op training packet table */
static const struct ice_fdir_base_pkt ice_fdir_pkt[] = {
{
+ ICE_FLTR_PTYPE_NONF_ETH,
+ sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt,
+ sizeof(ice_fdir_eth_pkt), ice_fdir_eth_pkt,
+ },
+ {
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
sizeof(ice_fdir_tcpv4_pkt), ice_fdir_tcpv4_pkt,
sizeof(ice_fdir_tcp4_tun_pkt), ice_fdir_tcp4_tun_pkt,
@@ -914,6 +921,21 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
* perspective. The input from user is from Rx filter perspective.
*/
switch (flow) {
+ case ICE_FLTR_PTYPE_NONF_ETH:
+ ice_pkt_insert_mac_addr(loc, input->eth.h_dest);
+ ice_pkt_insert_mac_addr(loc + ETH_ALEN, input->eth.h_source);
+ if (input->ext_data.vlan_tag || input->ext_data.vlan_type) {
+ ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET,
+ input->ext_data.vlan_type);
+ ice_pkt_insert_u16(loc, ICE_ETH_VLAN_TCI_OFFSET,
+ input->ext_data.vlan_tag);
+ ice_pkt_insert_u16(loc, ICE_ETH_TYPE_VLAN_OFFSET,
+ input->eth.h_proto);
+ } else {
+ ice_pkt_insert_u16(loc, ICE_ETH_TYPE_F_OFFSET,
+ input->eth.h_proto);
+ }
+ break;
case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
input->ip.v4.src_ip);
@@ -1189,52 +1211,58 @@ static int ice_cmp_ipv6_addr(__be32 *a, __be32 *b)
* ice_fdir_comp_rules - compare 2 filters
* @a: a Flow Director filter data structure
* @b: a Flow Director filter data structure
- * @v6: bool true if v6 filter
*
* Returns true if the filters match
*/
static bool
-ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b, bool v6)
+ice_fdir_comp_rules(struct ice_fdir_fltr *a, struct ice_fdir_fltr *b)
{
enum ice_fltr_ptype flow_type = a->flow_type;
/* The calling function already checks that the two filters have the
* same flow_type.
*/
- if (!v6) {
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP) {
- if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
- a->ip.v4.src_ip == b->ip.v4.src_ip &&
- a->ip.v4.dst_port == b->ip.v4.dst_port &&
- a->ip.v4.src_port == b->ip.v4.src_port)
- return true;
- } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER) {
- if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
- a->ip.v4.src_ip == b->ip.v4.src_ip &&
- a->ip.v4.l4_header == b->ip.v4.l4_header &&
- a->ip.v4.proto == b->ip.v4.proto &&
- a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
- a->ip.v4.tos == b->ip.v4.tos)
- return true;
- }
- } else {
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV6_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV6_SCTP) {
- if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
- a->ip.v6.src_port == b->ip.v6.src_port &&
- !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
- b->ip.v6.dst_ip) &&
- !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
- b->ip.v6.src_ip))
- return true;
- } else if (flow_type == ICE_FLTR_PTYPE_NONF_IPV6_OTHER) {
- if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
- a->ip.v6.src_port == b->ip.v6.src_port)
- return true;
- }
+ switch (flow_type) {
+ case ICE_FLTR_PTYPE_NONF_ETH:
+ if (!memcmp(&a->eth, &b->eth, sizeof(a->eth)))
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV4_SCTP:
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.dst_port == b->ip.v4.dst_port &&
+ a->ip.v4.src_port == b->ip.v4.src_port)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV4_OTHER:
+ if (a->ip.v4.dst_ip == b->ip.v4.dst_ip &&
+ a->ip.v4.src_ip == b->ip.v4.src_ip &&
+ a->ip.v4.l4_header == b->ip.v4.l4_header &&
+ a->ip.v4.proto == b->ip.v4.proto &&
+ a->ip.v4.ip_ver == b->ip.v4.ip_ver &&
+ a->ip.v4.tos == b->ip.v4.tos)
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_UDP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_TCP:
+ case ICE_FLTR_PTYPE_NONF_IPV6_SCTP:
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port &&
+ !ice_cmp_ipv6_addr(a->ip.v6.dst_ip,
+ b->ip.v6.dst_ip) &&
+ !ice_cmp_ipv6_addr(a->ip.v6.src_ip,
+ b->ip.v6.src_ip))
+ return true;
+ break;
+ case ICE_FLTR_PTYPE_NONF_IPV6_OTHER:
+ if (a->ip.v6.dst_port == b->ip.v6.dst_port &&
+ a->ip.v6.src_port == b->ip.v6.src_port)
+ return true;
+ break;
+ default:
+ break;
}
return false;
@@ -1253,19 +1281,10 @@ bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input)
bool ret = false;
list_for_each_entry(rule, &hw->fdir_list_head, fltr_node) {
- enum ice_fltr_ptype flow_type;
-
if (rule->flow_type != input->flow_type)
continue;
- flow_type = input->flow_type;
- if (flow_type == ICE_FLTR_PTYPE_NONF_IPV4_TCP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_UDP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_SCTP ||
- flow_type == ICE_FLTR_PTYPE_NONF_IPV4_OTHER)
- ret = ice_fdir_comp_rules(rule, input, false);
- else
- ret = ice_fdir_comp_rules(rule, input, true);
+ ret = ice_fdir_comp_rules(rule, input);
if (ret) {
if (rule->fltr_id == input->fltr_id &&
rule->q_index != input->q_index)
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index 1b9b84490689..ab5b118daa2d 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -8,6 +8,9 @@
#define ICE_FDIR_MAX_RAW_PKT_SIZE (512 + ICE_FDIR_TUN_PKT_OFF)
/* macros for offsets into packets for flow director programming */
+#define ICE_ETH_TYPE_F_OFFSET 12
+#define ICE_ETH_VLAN_TCI_OFFSET 14
+#define ICE_ETH_TYPE_VLAN_OFFSET 16
#define ICE_IPV4_SRC_ADDR_OFFSET 26
#define ICE_IPV4_DST_ADDR_OFFSET 30
#define ICE_IPV4_TCP_SRC_PORT_OFFSET 34
@@ -159,6 +162,8 @@ struct ice_fdir_fltr {
struct list_head fltr_node;
enum ice_fltr_ptype flow_type;
+ struct ethhdr eth, eth_mask;
+
union {
struct ice_fdir_v4 v4;
struct ice_fdir_v6 v6;
@@ -202,6 +207,8 @@ struct ice_fdir_base_pkt {
const u8 *tun_pkt;
};
+struct ice_vsi;
+
int ice_alloc_fd_res_cntr(struct ice_hw *hw, u16 *cntr_id);
int ice_free_fd_res_cntr(struct ice_hw *hw, u16 cntr_id);
int ice_alloc_fd_guar_item(struct ice_hw *hw, u16 *cntr_id, u16 num_fltr);
@@ -213,6 +220,7 @@ int
ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
u8 *pkt, bool frag, bool tun);
int ice_get_fdir_cnt_all(struct ice_hw *hw);
+int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi);
bool ice_fdir_is_dup_fltr(struct ice_hw *hw, struct ice_fdir_fltr *input);
bool ice_fdir_has_frag(enum ice_fltr_ptype flow);
struct ice_fdir_fltr *
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index d427a79d001a..817beca591e0 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -93,6 +93,7 @@ enum ice_tunnel_type {
TNL_GRETAP,
TNL_GTPC,
TNL_GTPU,
+ TNL_PFCP,
__TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
@@ -358,7 +359,8 @@ enum ice_prof_type {
ICE_PROF_TUN_GRE = 0x4,
ICE_PROF_TUN_GTPU = 0x8,
ICE_PROF_TUN_GTPC = 0x10,
- ICE_PROF_TUN_ALL = 0x1E,
+ ICE_PROF_TUN_PFCP = 0x20,
+ ICE_PROF_TUN_ALL = 0x3E,
ICE_PROF_ALL = 0xFF,
};
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 319a2d6fe26c..f81db6c107c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -286,10 +286,9 @@ ice_send_component_table(struct pldmfw *context, struct pldmfw_component *compon
*
* Returns: zero on success, or a negative error code on failure.
*/
-static int
-ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
- u16 block_size, u8 *block, bool last_cmd,
- u8 *reset_level, struct netlink_ext_ack *extack)
+int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ u8 *reset_level, struct netlink_ext_ack *extack)
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.h b/drivers/net/ethernet/intel/ice/ice_fw_update.h
index 750574885716..04b200462757 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.h
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.h
@@ -9,5 +9,8 @@ int ice_devlink_flash_update(struct devlink *devlink,
struct netlink_ext_ack *extack);
int ice_get_pending_updates(struct ice_pf *pf, u8 *pending,
struct netlink_ext_ack *extack);
+int ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
+ u16 block_size, u8 *block, bool last_cmd,
+ u8 *reset_level, struct netlink_ext_ack *extack);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index cfac1d432c15..91cbae1eec89 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -157,6 +157,8 @@
#define GLGEN_RTRIG_CORER_M BIT(0)
#define GLGEN_RTRIG_GLOBR_M BIT(1)
#define GLGEN_STAT 0x000B612C
+#define GLGEN_SWITCH_MODE_CONFIG 0x000B81E0
+#define GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M BIT(2)
#define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4))
#define PFGEN_CTRL 0x00091000
#define PFGEN_CTRL_PFSWR_M BIT(0)
@@ -177,6 +179,8 @@
#define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24)
#define GLINT_CTL_ITR_GRAN_25_S 28
#define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28)
+#define GLGEN_MAC_LINK_TOPO 0x000B81DC
+#define GLGEN_MAC_LINK_TOPO_LINK_TOPO_M GENMASK(1, 0)
#define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4))
#define GLINT_DYN_CTL_INTENA_M BIT(0)
#define GLINT_DYN_CTL_CLEARPBA_M BIT(1)
diff --git a/drivers/net/ethernet/intel/ice/ice_hwmon.c b/drivers/net/ethernet/intel/ice/ice_hwmon.c
index e4c2c1bff6c0..b7aa6812510a 100644
--- a/drivers/net/ethernet/intel/ice/ice_hwmon.c
+++ b/drivers/net/ethernet/intel/ice/ice_hwmon.c
@@ -96,7 +96,7 @@ static bool ice_is_internal_reading_supported(struct ice_pf *pf)
unsigned long sensors = pf->hw.dev_caps.supported_sensors;
- return _test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
+ return test_bit(ICE_SENSOR_SUPPORT_E810_INT_TEMP_BIT, &sensors);
};
void ice_hwmon_init(struct ice_pf *pf)
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index f0e76f0a6d60..1ccb572ce285 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -202,11 +202,12 @@ static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag)
* @act: rule action
* @recipe_id: recipe id for the new rule
* @rule_idx: pointer to rule index
+ * @direction: ICE_FLTR_RX or ICE_FLTR_TX
* @add: boolean on whether we are adding filters
*/
static int
ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
- bool add)
+ u8 direction, bool add)
{
struct ice_sw_rule_lkup_rx_tx *s_rule;
u16 s_rule_sz, vsi_num;
@@ -231,9 +232,16 @@ ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M, vsi_num);
- s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
s_rule->recipe_id = cpu_to_le16(recipe_id);
- s_rule->src = cpu_to_le16(hw->port_info->lport);
+ if (direction == ICE_FLTR_RX) {
+ s_rule->hdr.type =
+ cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->src = cpu_to_le16(hw->port_info->lport);
+ } else {
+ s_rule->hdr.type =
+ cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ s_rule->src = cpu_to_le16(vsi_num);
+ }
s_rule->act = cpu_to_le32(act);
s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN);
opc = ice_aqc_opc_add_sw_rules;
@@ -266,9 +274,27 @@ ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add)
{
u32 act = ICE_SINGLE_ACT_VSI_FORWARDING |
ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE;
+ int err;
+
+ err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id,
+ ICE_FLTR_RX, add);
+ if (err)
+ goto err_rx;
- return ice_lag_cfg_fltr(lag, act, lag->pf_recipe,
- &lag->pf_rule_id, add);
+ act = ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT |
+ ICE_SINGLE_ACT_LB_ENABLE;
+ err = ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_tx_rule_id,
+ ICE_FLTR_TX, add);
+ if (err)
+ goto err_tx;
+
+ return 0;
+
+err_tx:
+ ice_lag_cfg_fltr(lag, act, lag->pf_recipe, &lag->pf_rx_rule_id,
+ ICE_FLTR_RX, !add);
+err_rx:
+ return err;
}
/**
@@ -284,7 +310,7 @@ ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add)
ICE_SINGLE_ACT_DROP;
return ice_lag_cfg_fltr(lag, act, lag->lport_recipe,
- &lag->lport_rule_idx, add);
+ &lag->lport_rule_idx, ICE_FLTR_RX, add);
}
/**
@@ -310,7 +336,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
dev = ice_pf_to_dev(lag->pf);
/* interface not active - remove old default VSI rule */
- if (bonding_info->slave.state && lag->pf_rule_id) {
+ if (bonding_info->slave.state && lag->pf_rx_rule_id) {
if (ice_lag_cfg_dflt_fltr(lag, false))
dev_err(dev, "Error removing old default VSI filter\n");
if (ice_lag_cfg_drop_fltr(lag, true))
@@ -319,7 +345,7 @@ ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
}
/* interface becoming active - add new default VSI rule */
- if (!bonding_info->slave.state && !lag->pf_rule_id) {
+ if (!bonding_info->slave.state && !lag->pf_rx_rule_id) {
if (ice_lag_cfg_dflt_fltr(lag, true))
dev_err(dev, "Error adding new default VSI filter\n");
if (lag->lport_rule_idx && ice_lag_cfg_drop_fltr(lag, false))
@@ -714,8 +740,7 @@ static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
pf = lag->pf;
ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
- pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF)
ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
}
@@ -953,8 +978,7 @@ ice_lag_reclaim_vf_nodes(struct ice_lag *lag, struct ice_hw *src_hw)
pf = lag->pf;
ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
- pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF)
ice_for_each_traffic_class(tc)
ice_lag_reclaim_vf_tc(lag, src_hw, i, tc);
}
@@ -1976,8 +2000,7 @@ ice_lag_move_vf_nodes_sync(struct ice_lag *lag, struct ice_hw *dest_hw)
pf = lag->pf;
ice_for_each_vsi(pf, i)
- if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
- pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ if (pf->vsi[i] && pf->vsi[i]->type == ICE_VSI_VF)
ice_for_each_traffic_class(tc)
ice_lag_move_vf_nodes_tc_sync(lag, dest_hw, i,
tc);
@@ -2149,7 +2172,7 @@ void ice_lag_rebuild(struct ice_pf *pf)
ice_lag_cfg_cp_fltr(lag, true);
- if (lag->pf_rule_id)
+ if (lag->pf_rx_rule_id)
if (ice_lag_cfg_dflt_fltr(lag, true))
dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 183b38792ef2..bab2c83142a1 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -43,7 +43,8 @@ struct ice_lag {
u8 primary:1; /* this is primary */
u16 pf_recipe;
u16 lport_recipe;
- u16 pf_rule_id;
+ u16 pf_rx_rule_id;
+ u16 pf_tx_rule_id;
u16 cp_rule_idx;
u16 lport_rule_idx;
u8 role;
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index d384ddfcb83e..611577ebc29d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -160,64 +160,6 @@ struct ice_fltr_desc {
(0x1ULL << ICE_FXD_FLTR_WB_QW1_FAIL_PROF_S)
#define ICE_FXD_FLTR_WB_QW1_FAIL_PROF_YES 0x1ULL
-struct ice_rx_ptype_decoded {
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:2;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
-};
-
-enum ice_rx_ptype_outer_ip {
- ICE_RX_PTYPE_OUTER_L2 = 0,
- ICE_RX_PTYPE_OUTER_IP = 1,
-};
-
-enum ice_rx_ptype_outer_ip_ver {
- ICE_RX_PTYPE_OUTER_NONE = 0,
- ICE_RX_PTYPE_OUTER_IPV4 = 1,
- ICE_RX_PTYPE_OUTER_IPV6 = 2,
-};
-
-enum ice_rx_ptype_outer_fragmented {
- ICE_RX_PTYPE_NOT_FRAG = 0,
- ICE_RX_PTYPE_FRAG = 1,
-};
-
-enum ice_rx_ptype_tunnel_type {
- ICE_RX_PTYPE_TUNNEL_NONE = 0,
- ICE_RX_PTYPE_TUNNEL_IP_IP = 1,
- ICE_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- ICE_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum ice_rx_ptype_tunnel_end_prot {
- ICE_RX_PTYPE_TUNNEL_END_NONE = 0,
- ICE_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- ICE_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum ice_rx_ptype_inner_prot {
- ICE_RX_PTYPE_INNER_PROT_NONE = 0,
- ICE_RX_PTYPE_INNER_PROT_UDP = 1,
- ICE_RX_PTYPE_INNER_PROT_TCP = 2,
- ICE_RX_PTYPE_INNER_PROT_SCTP = 3,
- ICE_RX_PTYPE_INNER_PROT_ICMP = 4,
- ICE_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
-};
-
-enum ice_rx_ptype_payload_layer {
- ICE_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- ICE_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
/* Rx Flex Descriptor
* This descriptor is used instead of the legacy version descriptor when
* ice_rlan_ctx.adv_desc is set
@@ -651,266 +593,4 @@ struct ice_tlan_ctx {
u8 int_q_state; /* width not needed - internal - DO NOT WRITE!!! */
};
-/* The ice_ptype_lkup table is used to convert from the 10-bit ptype in the
- * hardware to a bit-field that can be used by SW to more easily determine the
- * packet type.
- *
- * Macros are used to shorten the table lines and make this table human
- * readable.
- *
- * We store the PTYPE in the top byte of the bit field - this is just so that
- * we can check that the table doesn't have a row missing, as the index into
- * the table should be the PTYPE.
- *
- * Typical work flow:
- *
- * IF NOT ice_ptype_lkup[ptype].known
- * THEN
- * Packet is unknown
- * ELSE IF ice_ptype_lkup[ptype].outer_ip == ICE_RX_PTYPE_OUTER_IP
- * Use the rest of the fields to look at the tunnels, inner protocols, etc
- * ELSE
- * Use the enum ice_rx_l2_ptype to decode the packet type
- * ENDIF
- */
-#define ICE_PTYPES \
- /* L2 Packet types */ \
- ICE_PTT_UNUSED_ENTRY(0), \
- ICE_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), \
- ICE_PTT_UNUSED_ENTRY(2), \
- ICE_PTT_UNUSED_ENTRY(3), \
- ICE_PTT_UNUSED_ENTRY(4), \
- ICE_PTT_UNUSED_ENTRY(5), \
- ICE_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
- ICE_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
- ICE_PTT_UNUSED_ENTRY(8), \
- ICE_PTT_UNUSED_ENTRY(9), \
- ICE_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
- ICE_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), \
- ICE_PTT_UNUSED_ENTRY(12), \
- ICE_PTT_UNUSED_ENTRY(13), \
- ICE_PTT_UNUSED_ENTRY(14), \
- ICE_PTT_UNUSED_ENTRY(15), \
- ICE_PTT_UNUSED_ENTRY(16), \
- ICE_PTT_UNUSED_ENTRY(17), \
- ICE_PTT_UNUSED_ENTRY(18), \
- ICE_PTT_UNUSED_ENTRY(19), \
- ICE_PTT_UNUSED_ENTRY(20), \
- ICE_PTT_UNUSED_ENTRY(21), \
- \
- /* Non Tunneled IPv4 */ \
- ICE_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), \
- ICE_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), \
- ICE_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(25), \
- ICE_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), \
- ICE_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), \
- ICE_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> IPv4 */ \
- ICE_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(32), \
- ICE_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> IPv6 */ \
- ICE_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(39), \
- ICE_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT */ \
- ICE_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
- \
- /* IPv4 --> GRE/NAT --> IPv4 */ \
- ICE_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(47), \
- ICE_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT --> IPv6 */ \
- ICE_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(54), \
- ICE_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT --> MAC */ \
- ICE_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
- \
- /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ \
- ICE_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(62), \
- ICE_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ \
- ICE_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(69), \
- ICE_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv4 --> GRE/NAT --> MAC/VLAN */ \
- ICE_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
- \
- /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ \
- ICE_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(77), \
- ICE_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ \
- ICE_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(84), \
- ICE_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), \
- \
- /* Non Tunneled IPv6 */ \
- ICE_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), \
- ICE_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), \
- ICE_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(91), \
- ICE_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), \
- ICE_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), \
- ICE_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> IPv4 */ \
- ICE_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(98), \
- ICE_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> IPv6 */ \
- ICE_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(105), \
- ICE_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT */ \
- ICE_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), \
- \
- /* IPv6 --> GRE/NAT -> IPv4 */ \
- ICE_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(113), \
- ICE_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> IPv6 */ \
- ICE_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(120), \
- ICE_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> MAC */ \
- ICE_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), \
- \
- /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ \
- ICE_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(128), \
- ICE_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ \
- ICE_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(135), \
- ICE_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> MAC/VLAN */ \
- ICE_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), \
- \
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ \
- ICE_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), \
- ICE_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), \
- ICE_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(143), \
- ICE_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), \
- ICE_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), \
- ICE_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), \
- \
- /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ \
- ICE_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), \
- ICE_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), \
- ICE_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), \
- ICE_PTT_UNUSED_ENTRY(150), \
- ICE_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), \
- ICE_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), \
- ICE_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4),
-
-#define ICE_NUM_DEFINED_PTYPES 154
-
-/* macro to make the table lines short, use explicit indexing with [PTYPE] */
-#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = { \
- 1, \
- ICE_RX_PTYPE_OUTER_##OUTER_IP, \
- ICE_RX_PTYPE_OUTER_##OUTER_IP_VER, \
- ICE_RX_PTYPE_##OUTER_FRAG, \
- ICE_RX_PTYPE_TUNNEL_##T, \
- ICE_RX_PTYPE_TUNNEL_END_##TE, \
- ICE_RX_PTYPE_##TEF, \
- ICE_RX_PTYPE_INNER_PROT_##I, \
- ICE_RX_PTYPE_PAYLOAD_LAYER_##PL }
-
-#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-
-/* shorter macros makes the table fit but are terse */
-#define ICE_RX_PTYPE_NOF ICE_RX_PTYPE_NOT_FRAG
-#define ICE_RX_PTYPE_FRG ICE_RX_PTYPE_FRAG
-
-/* Lookup table mapping in the 10-bit HW PTYPE to the bit field for decoding */
-static const struct ice_rx_ptype_decoded ice_ptype_lkup[BIT(10)] = {
- ICE_PTYPES
-
- /* unused entries */
- [ICE_NUM_DEFINED_PTYPES ... 1023] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }
-};
-
-static inline struct ice_rx_ptype_decoded ice_decode_rx_desc_ptype(u16 ptype)
-{
- return ice_ptype_lkup[ptype];
-}
-
-
#endif /* _ICE_LAN_TX_RX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 558422120312..f559e60992fa 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -7,7 +7,6 @@
#include "ice_lib.h"
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
-#include "ice_devlink.h"
#include "ice_vsi_vlan_ops.h"
/**
@@ -27,8 +26,6 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_CHNL";
case ICE_VSI_LB:
return "ICE_VSI_LB";
- case ICE_VSI_SWITCHDEV_CTRL:
- return "ICE_VSI_SWITCHDEV_CTRL";
default:
return "unknown";
}
@@ -117,14 +114,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
if (!vsi->q_vectors)
goto err_vectors;
- vsi->af_xdp_zc_qps = bitmap_zalloc(max_t(int, vsi->alloc_txq, vsi->alloc_rxq), GFP_KERNEL);
- if (!vsi->af_xdp_zc_qps)
- goto err_zc_qps;
-
return 0;
-err_zc_qps:
- devm_kfree(dev, vsi->q_vectors);
err_vectors:
devm_kfree(dev, vsi->rxq_map);
err_rxq_map:
@@ -144,7 +135,6 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -211,21 +201,6 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
- case ICE_VSI_SWITCHDEV_CTRL:
- /* The number of queues for ctrl VSI is equal to number of PRs
- * Each ring is associated to the corresponding VF_PR netdev.
- * Tx and Rx rings are always equal
- */
- if (vsi->req_txq && vsi->req_rxq) {
- vsi->alloc_txq = vsi->req_txq;
- vsi->alloc_rxq = vsi->req_rxq;
- } else {
- vsi->alloc_txq = 1;
- vsi->alloc_rxq = 1;
- }
-
- vsi->num_q_vectors = 1;
- break;
case ICE_VSI_VF:
if (vf->num_req_qs)
vf->num_vf_qs = vf->num_req_qs;
@@ -328,8 +303,6 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
- bitmap_free(vsi->af_xdp_zc_qps);
- vsi->af_xdp_zc_qps = NULL;
/* free the ring and vector containers */
devm_kfree(dev, vsi->q_vectors);
vsi->q_vectors = NULL;
@@ -522,22 +495,6 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
-{
- struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
- struct ice_pf *pf = q_vector->vsi->back;
- struct ice_repr *repr;
- unsigned long id;
-
- if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
- return IRQ_HANDLED;
-
- xa_for_each(&pf->eswitch.reprs, id, repr)
- napi_schedule(&repr->q_vector->napi);
-
- return IRQ_HANDLED;
-}
-
/**
* ice_vsi_alloc_stat_arrays - Allocate statistics arrays
* @vsi: VSI pointer
@@ -600,10 +557,6 @@ ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
}
switch (vsi->type) {
- case ICE_VSI_SWITCHDEV_CTRL:
- /* Setup eswitch MSIX irq handler for VSI */
- vsi->irq_handler = ice_eswitch_msix_clean_rings;
- break;
case ICE_VSI_PF:
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
@@ -933,11 +886,6 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
max_rss_size);
vsi->rss_lut_type = ICE_LUT_PF;
break;
- case ICE_VSI_SWITCHDEV_CTRL:
- vsi->rss_table_size = ICE_LUT_VSI_SIZE;
- vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
- vsi->rss_lut_type = ICE_LUT_VSI;
- break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
* For VSI_LUT, LUT size should be set to 64 bytes.
@@ -1263,7 +1211,6 @@ static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_CHNL:
ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
break;
@@ -2145,7 +2092,6 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
- case ICE_VSI_SWITCHDEV_CTRL:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2273,10 +2219,8 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
/**
* ice_vsi_cfg_def - configure default VSI based on the type
* @vsi: pointer to VSI
- * @params: the parameters to configure this VSI with
*/
-static int
-ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+static int ice_vsi_cfg_def(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
struct ice_pf *pf = vsi->back;
@@ -2284,7 +2228,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
vsi->vsw = pf->first_sw;
- ret = ice_vsi_alloc_def(vsi, params->ch);
+ ret = ice_vsi_alloc_def(vsi, vsi->ch);
if (ret)
return ret;
@@ -2309,7 +2253,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
ice_vsi_set_tc_cfg(vsi);
/* create the VSI */
- ret = ice_vsi_init(vsi, params->flags);
+ ret = ice_vsi_init(vsi, vsi->flags);
if (ret)
goto unroll_get_qs;
@@ -2317,7 +2261,6 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
switch (vsi->type) {
case ICE_VSI_CTRL:
- case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2331,22 +2274,23 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
if (ret)
goto unroll_vector_base;
- ice_vsi_map_rings_to_vectors(vsi);
-
- /* Associate q_vector rings to napi */
- ice_vsi_set_napi_queues(vsi);
-
- vsi->stat_offsets_loaded = false;
-
if (ice_is_xdp_ena_vsi(vsi)) {
ret = ice_vsi_determine_xdp_res(vsi);
if (ret)
goto unroll_vector_base;
- ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
+ ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
+ ICE_XDP_CFG_PART);
if (ret)
goto unroll_vector_base;
}
+ ice_vsi_map_rings_to_vectors(vsi);
+
+ /* Associate q_vector rings to napi */
+ ice_vsi_set_napi_queues(vsi);
+
+ vsi->stat_offsets_loaded = false;
+
/* ICE_VSI_CTRL does not need RSS so skip RSS processing */
if (vsi->type != ICE_VSI_CTRL)
/* Do not exit if configuring RSS had an issue, at
@@ -2430,23 +2374,16 @@ unroll_vsi_alloc:
/**
* ice_vsi_cfg - configure a previously allocated VSI
* @vsi: pointer to VSI
- * @params: parameters used to configure this VSI
*/
-int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
+int ice_vsi_cfg(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
int ret;
- if (WARN_ON(params->type == ICE_VSI_VF && !params->vf))
+ if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
- vsi->type = params->type;
- vsi->port_info = params->pi;
-
- /* For VSIs which don't have a connected VF, this will be NULL */
- vsi->vf = params->vf;
-
- ret = ice_vsi_cfg_def(vsi, params);
+ ret = ice_vsi_cfg_def(vsi);
if (ret)
return ret;
@@ -2493,7 +2430,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
/* return value check can be skipped here, it always returns
* 0 if reset is in progress
*/
- ice_destroy_xdp_rings(vsi);
+ ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
ice_vsi_clear_rings(vsi);
ice_vsi_free_q_vectors(vsi);
@@ -2532,7 +2469,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
* a port_info structure for it.
*/
if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
- WARN_ON(!params->pi))
+ WARN_ON(!params->port_info))
return NULL;
vsi = ice_vsi_alloc(pf);
@@ -2541,7 +2478,8 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
return NULL;
}
- ret = ice_vsi_cfg(vsi, params);
+ vsi->params = *params;
+ ret = ice_vsi_cfg(vsi);
if (ret)
goto err_vsi_cfg;
@@ -2642,8 +2580,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi)
if (!IS_ENABLED(CONFIG_RFS_ACCEL))
irq_set_affinity_notifier(irq_num, NULL);
- /* clear the affinity_mask in the IRQ descriptor */
- irq_set_affinity_hint(irq_num, NULL);
+ /* clear the affinity_hint in the IRQ descriptor */
+ irq_update_affinity_hint(irq_num, NULL);
synchronize_irq(irq_num);
devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
}
@@ -2750,8 +2688,7 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
} else {
ice_vsi_close(vsi);
}
- } else if (vsi->type == ICE_VSI_CTRL ||
- vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
+ } else if (vsi->type == ICE_VSI_CTRL) {
ice_vsi_close(vsi);
}
}
@@ -3089,7 +3026,6 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
*/
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
{
- struct ice_vsi_cfg_params params = {};
struct ice_coalesce_stored *coalesce;
int prev_num_q_vectors;
struct ice_pf *pf;
@@ -3098,9 +3034,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (!vsi)
return -EINVAL;
- params = ice_vsi_to_params(vsi);
- params.flags = vsi_flags;
-
+ vsi->flags = vsi_flags;
pf = vsi->back;
if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
return -EINVAL;
@@ -3110,7 +3044,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
goto err_vsi_cfg;
ice_vsi_decfg(vsi);
- ret = ice_vsi_cfg_def(vsi, &params);
+ ret = ice_vsi_cfg_def(vsi);
if (ret)
goto err_vsi_cfg;
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 9cd23afe5f15..94ce8964dda6 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -11,43 +11,6 @@
#define ICE_VSI_FLAG_INIT BIT(0)
#define ICE_VSI_FLAG_NO_INIT 0
-/**
- * struct ice_vsi_cfg_params - VSI configuration parameters
- * @pi: pointer to the port_info instance for the VSI
- * @ch: pointer to the channel structure for the VSI, may be NULL
- * @vf: pointer to the VF associated with this VSI, may be NULL
- * @type: the type of VSI to configure
- * @flags: VSI flags used for rebuild and configuration
- *
- * Parameter structure used when configuring a new VSI.
- */
-struct ice_vsi_cfg_params {
- struct ice_port_info *pi;
- struct ice_channel *ch;
- struct ice_vf *vf;
- enum ice_vsi_type type;
- u32 flags;
-};
-
-/**
- * ice_vsi_to_params - Get parameters for an existing VSI
- * @vsi: the VSI to get parameters for
- *
- * Fill a parameter structure for reconfiguring a VSI with its current
- * parameters, such as during a rebuild operation.
- */
-static inline struct ice_vsi_cfg_params ice_vsi_to_params(struct ice_vsi *vsi)
-{
- struct ice_vsi_cfg_params params = {};
-
- params.pi = vsi->port_info;
- params.ch = vsi->ch;
- params.vf = vsi->vf;
- params.type = vsi->type;
-
- return params;
-}
-
const char *ice_vsi_type_str(enum ice_vsi_type vsi_type);
bool ice_pf_state_is_nominal(struct ice_pf *pf);
@@ -101,7 +64,7 @@ void ice_vsi_decfg(struct ice_vsi *vsi);
void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags);
-int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params);
+int ice_vsi_cfg(struct ice_vsi *vsi);
bool ice_is_reset_in_progress(unsigned long *state);
int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 33a164fa325a..6f97ed471fe9 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -13,7 +13,8 @@
#include "ice_fltr.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
+#include "devlink/devlink_port.h"
#include "ice_hwmon.h"
/* Including ice_trace.h with CREATE_TRACE_POINTS defined will generate the
* ice tracepoint functions. This must be done exactly once across the
@@ -34,8 +35,8 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
#define ICE_DDP_PKG_PATH "intel/ice/ddp/"
#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_IMPORT_NS(LIBIE);
MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
@@ -558,6 +559,8 @@ ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
return;
+ synchronize_irq(pf->oicr_irq.virq);
+
ice_unplug_aux_dev(pf);
/* Notify VFs of impending reset */
@@ -621,7 +624,7 @@ skip:
if (hw->port_info)
ice_sched_clear_port(hw->port_info);
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, false);
set_bit(ICE_PREPARED_FOR_RESET, pf->state);
}
@@ -803,6 +806,9 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
}
switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ speed = "200 G";
+ break;
case ICE_AQ_LINK_SPEED_100GB:
speed = "100 G";
break;
@@ -1745,6 +1751,39 @@ static void ice_service_timer(struct timer_list *t)
}
/**
+ * ice_mdd_maybe_reset_vf - reset VF after MDD event
+ * @pf: pointer to the PF structure
+ * @vf: pointer to the VF structure
+ * @reset_vf_tx: whether Tx MDD has occurred
+ * @reset_vf_rx: whether Rx MDD has occurred
+ *
+ * Since the queue can get stuck on VF MDD events, the PF can be configured to
+ * automatically reset the VF by enabling the private ethtool flag
+ * mdd-auto-reset-vf.
+ */
+static void ice_mdd_maybe_reset_vf(struct ice_pf *pf, struct ice_vf *vf,
+ bool reset_vf_tx, bool reset_vf_rx)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags))
+ return;
+
+ /* VF MDD event counters will be cleared by reset, so print the event
+ * prior to reset.
+ */
+ if (reset_vf_tx)
+ ice_print_vf_tx_mdd_event(vf);
+
+ if (reset_vf_rx)
+ ice_print_vf_rx_mdd_event(vf);
+
+ dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n",
+ pf->hw.pf_id, vf->vf_id);
+ ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
+}
+
+/**
* ice_handle_mdd_event - handle malicious driver detect event
* @pf: pointer to the PF structure
*
@@ -1837,6 +1876,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
*/
mutex_lock(&pf->vfs.table_lock);
ice_for_each_vf(pf, bkt, vf) {
+ bool reset_vf_tx = false, reset_vf_rx = false;
+
reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id));
if (reg & VP_MDET_TX_PQM_VALID_M) {
wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF);
@@ -1845,6 +1886,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on VF %d\n",
vf->vf_id);
+
+ reset_vf_tx = true;
}
reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id));
@@ -1855,6 +1898,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on VF %d\n",
vf->vf_id);
+
+ reset_vf_tx = true;
}
reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id));
@@ -1865,6 +1910,8 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TDPU detected on VF %d\n",
vf->vf_id);
+
+ reset_vf_tx = true;
}
reg = rd32(hw, VP_MDET_RX(vf->vf_id));
@@ -1876,18 +1923,12 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event RX detected on VF %d\n",
vf->vf_id);
- /* Since the queue is disabled on VF Rx MDD events, the
- * PF can be configured to reset the VF through ethtool
- * private flag mdd-auto-reset-vf.
- */
- if (test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) {
- /* VF MDD event counters will be cleared by
- * reset, so print the event prior to reset.
- */
- ice_print_vf_rx_mdd_event(vf);
- ice_reset_vf(vf, ICE_VF_RESET_LOCK);
- }
+ reset_vf_rx = true;
}
+
+ if (reset_vf_tx || reset_vf_rx)
+ ice_mdd_maybe_reset_vf(pf, vf, reset_vf_tx,
+ reset_vf_rx);
}
mutex_unlock(&pf->vfs.table_lock);
@@ -2570,7 +2611,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
}
/* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+ irq_update_affinity_hint(irq_num, &q_vector->affinity_mask);
}
err = ice_set_cpu_rx_rmap(vsi);
@@ -2588,7 +2629,7 @@ free_q_irqs:
irq_num = vsi->q_vectors[vector]->irq.virq;
if (!IS_ENABLED(CONFIG_RFS_ACCEL))
irq_set_affinity_notifier(irq_num, NULL);
- irq_set_affinity_hint(irq_num, NULL);
+ irq_update_affinity_hint(irq_num, NULL);
devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]);
}
return err;
@@ -2670,17 +2711,72 @@ static void ice_vsi_assign_bpf_prog(struct ice_vsi *vsi, struct bpf_prog *prog)
bpf_prog_put(old_prog);
}
+static struct ice_tx_ring *ice_xdp_ring_from_qid(struct ice_vsi *vsi, int qid)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_tx_ring *ring;
+
+ if (static_key_enabled(&ice_xdp_locking_key))
+ return vsi->xdp_rings[qid % vsi->num_xdp_txq];
+
+ q_vector = vsi->rx_rings[qid]->q_vector;
+ ice_for_each_tx_ring(ring, q_vector->tx)
+ if (ice_ring_is_xdp(ring))
+ return ring;
+
+ return NULL;
+}
+
+/**
+ * ice_map_xdp_rings - Map XDP rings to interrupt vectors
+ * @vsi: the VSI with XDP rings being configured
+ *
+ * Map XDP rings to interrupt vectors and perform the configuration steps
+ * dependent on the mapping.
+ */
+void ice_map_xdp_rings(struct ice_vsi *vsi)
+{
+ int xdp_rings_rem = vsi->num_xdp_txq;
+ int v_idx, q_idx;
+
+ /* follow the logic from ice_vsi_map_rings_to_vectors */
+ ice_for_each_q_vector(vsi, v_idx) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
+ int xdp_rings_per_v, q_id, q_base;
+
+ xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
+ vsi->num_q_vectors - v_idx);
+ q_base = vsi->num_xdp_txq - xdp_rings_rem;
+
+ for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
+
+ xdp_ring->q_vector = q_vector;
+ xdp_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = xdp_ring;
+ }
+ xdp_rings_rem -= xdp_rings_per_v;
+ }
+
+ ice_for_each_rxq(vsi, q_idx) {
+ vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi,
+ q_idx);
+ ice_tx_xsk_pool(vsi, q_idx);
+ }
+}
+
/**
* ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
* @vsi: VSI to bring up Tx rings used by XDP
* @prog: bpf program that will be assigned to VSI
+ * @cfg_type: create from scratch or restore the existing configuration
*
* Return 0 on success and negative value on error
*/
-int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
+int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog,
+ enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- int xdp_rings_rem = vsi->num_xdp_txq;
struct ice_pf *pf = vsi->back;
struct ice_qs_cfg xdp_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex,
@@ -2693,8 +2789,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
.mapping_mode = ICE_VSI_MAP_CONTIG
};
struct device *dev;
- int i, v_idx;
- int status;
+ int status, i;
dev = ice_pf_to_dev(pf);
vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq,
@@ -2713,49 +2808,15 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;
- /* follow the logic from ice_vsi_map_rings_to_vectors */
- ice_for_each_q_vector(vsi, v_idx) {
- struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- int xdp_rings_per_v, q_id, q_base;
-
- xdp_rings_per_v = DIV_ROUND_UP(xdp_rings_rem,
- vsi->num_q_vectors - v_idx);
- q_base = vsi->num_xdp_txq - xdp_rings_rem;
-
- for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
- struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
-
- xdp_ring->q_vector = q_vector;
- xdp_ring->next = q_vector->tx.tx_ring;
- q_vector->tx.tx_ring = xdp_ring;
- }
- xdp_rings_rem -= xdp_rings_per_v;
- }
-
- ice_for_each_rxq(vsi, i) {
- if (static_key_enabled(&ice_xdp_locking_key)) {
- vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
- } else {
- struct ice_q_vector *q_vector = vsi->rx_rings[i]->q_vector;
- struct ice_tx_ring *ring;
-
- ice_for_each_tx_ring(ring, q_vector->tx) {
- if (ice_ring_is_xdp(ring)) {
- vsi->rx_rings[i]->xdp_ring = ring;
- break;
- }
- }
- }
- ice_tx_xsk_pool(vsi, i);
- }
-
/* omit the scheduler update if in reset path; XDP queues will be
* taken into account at the end of ice_vsi_rebuild, where
* ice_cfg_vsi_lan is being called
*/
- if (ice_is_reset_in_progress(pf->state))
+ if (cfg_type == ICE_XDP_CFG_PART)
return 0;
+ ice_map_xdp_rings(vsi);
+
/* tell the Tx scheduler that right now we have
* additional queues
*/
@@ -2805,22 +2866,21 @@ err_map_xdp:
/**
* ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
* @vsi: VSI to remove XDP rings
+ * @cfg_type: disable XDP permanently or allow it to be restored later
*
* Detach XDP rings from irq vectors, clean up the PF bitmap and free
* resources
*/
-int ice_destroy_xdp_rings(struct ice_vsi *vsi)
+int ice_destroy_xdp_rings(struct ice_vsi *vsi, enum ice_xdp_cfg cfg_type)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_pf *pf = vsi->back;
int i, v_idx;
/* q_vectors are freed in reset path so there's no point in detaching
- * rings; in case of rebuild being triggered not from reset bits
- * in pf->state won't be set, so additionally check first q_vector
- * against NULL
+ * rings
*/
- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ if (cfg_type == ICE_XDP_CFG_PART)
goto free_qmap;
ice_for_each_q_vector(vsi, v_idx) {
@@ -2861,7 +2921,7 @@ free_qmap:
if (static_key_enabled(&ice_xdp_locking_key))
static_branch_dec(&ice_xdp_locking_key);
- if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
+ if (cfg_type == ICE_XDP_CFG_PART)
return 0;
ice_vsi_assign_bpf_prog(vsi, NULL);
@@ -2890,7 +2950,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i) {
struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
- if (rx_ring->xsk_pool)
+ if (READ_ONCE(rx_ring->xsk_pool))
napi_schedule(&rx_ring->q_vector->napi);
}
}
@@ -2972,7 +3032,8 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
if (xdp_ring_err) {
NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
} else {
- xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog,
+ ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
}
@@ -2983,7 +3044,7 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Rx resources failed");
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_features_clear_redirect_target(vsi->netdev);
- xdp_ring_err = ice_destroy_xdp_rings(vsi);
+ xdp_ring_err = ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_FULL);
if (xdp_ring_err)
NL_SET_ERR_MSG_MOD(extack, "Freeing XDP Tx resources failed");
/* reallocate Rx queues that were used for zero-copy */
@@ -3648,7 +3709,7 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_PF;
- params.pi = pi;
+ params.port_info = pi;
params.flags = ICE_VSI_FLAG_INIT;
return ice_vsi_setup(pf, &params);
@@ -3661,7 +3722,7 @@ ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_CHNL;
- params.pi = pi;
+ params.port_info = pi;
params.ch = ch;
params.flags = ICE_VSI_FLAG_INIT;
@@ -3682,7 +3743,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_CTRL;
- params.pi = pi;
+ params.port_info = pi;
params.flags = ICE_VSI_FLAG_INIT;
return ice_vsi_setup(pf, &params);
@@ -3702,7 +3763,7 @@ ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi_cfg_params params = {};
params.type = ICE_VSI_LB;
- params.pi = pi;
+ params.port_info = pi;
params.flags = ICE_VSI_FLAG_INIT;
return ice_vsi_setup(pf, &params);
@@ -4079,7 +4140,7 @@ bool ice_is_wol_supported(struct ice_hw *hw)
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
{
struct ice_pf *pf = vsi->back;
- int err = 0, timeout = 50;
+ int i, err = 0, timeout = 50;
if (!new_rx && !new_tx)
return -EINVAL;
@@ -4098,15 +4159,32 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ if (err)
+ goto rebuild_err;
dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n");
goto done;
}
ice_vsi_close(vsi);
- ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
+ if (err)
+ goto rebuild_err;
+
+ ice_for_each_traffic_class(i) {
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(vsi->netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+ }
ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi);
+ goto done;
+
+rebuild_err:
+ dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n",
+ err);
done:
clear_bit(ICE_CFG_BUSY, pf->state);
return err;
@@ -4417,11 +4495,13 @@ static char *ice_get_opt_fw_name(struct ice_pf *pf)
/**
* ice_request_fw - Device initialization routine
* @pf: pointer to the PF instance
+ * @firmware: double pointer to firmware struct
+ *
+ * Return: zero when successful, negative values otherwise.
*/
-static void ice_request_fw(struct ice_pf *pf)
+static int ice_request_fw(struct ice_pf *pf, const struct firmware **firmware)
{
char *opt_fw_filename = ice_get_opt_fw_name(pf);
- const struct firmware *firmware = NULL;
struct device *dev = ice_pf_to_dev(pf);
int err = 0;
@@ -4430,29 +4510,95 @@ static void ice_request_fw(struct ice_pf *pf)
* and warning messages for other errors.
*/
if (opt_fw_filename) {
- err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
- if (err) {
- kfree(opt_fw_filename);
- goto dflt_pkg_load;
- }
-
- /* request for firmware was successful. Download to device */
- ice_load_pkg(firmware, pf);
+ err = firmware_request_nowarn(firmware, opt_fw_filename, dev);
kfree(opt_fw_filename);
- release_firmware(firmware);
- return;
+ if (!err)
+ return err;
+ }
+ err = request_firmware(firmware, ICE_DDP_PKG_FILE, dev);
+ if (err)
+ dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
+
+ return err;
+}
+
+/**
+ * ice_init_tx_topology - performs Tx topology initialization
+ * @hw: pointer to the hardware structure
+ * @firmware: pointer to firmware structure
+ *
+ * Return: zero when init was successful, negative values otherwise.
+ */
+static int
+ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
+{
+ u8 num_tx_sched_layers = hw->num_tx_sched_layers;
+ struct ice_pf *pf = hw->back;
+ struct device *dev;
+ u8 *buf_copy;
+ int err;
+
+ dev = ice_pf_to_dev(pf);
+ /* ice_cfg_tx_topo buf argument is not a constant,
+ * so we have to make a copy
+ */
+ buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL);
+
+ err = ice_cfg_tx_topo(hw, buf_copy, firmware->size);
+ if (!err) {
+ if (hw->num_tx_sched_layers > num_tx_sched_layers)
+ dev_info(dev, "Tx scheduling layers switching feature disabled\n");
+ else
+ dev_info(dev, "Tx scheduling layers switching feature enabled\n");
+ /* if there was a change in topology ice_cfg_tx_topo triggered
+ * a CORER and we need to re-init hw
+ */
+ ice_deinit_hw(hw);
+ err = ice_init_hw(hw);
+
+ return err;
+ } else if (err == -EIO) {
+ dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update to the latest DDP package and try again\n");
}
-dflt_pkg_load:
- err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
+ return 0;
+}
+
+/**
+ * ice_init_ddp_config - DDP related configuration
+ * @hw: pointer to the hardware structure
+ * @pf: pointer to pf structure
+ *
+ * This function loads DDP file from the disk, then initializes Tx
+ * topology. At the end DDP package is loaded on the card.
+ *
+ * Return: zero when init was successful, negative values otherwise.
+ */
+static int ice_init_ddp_config(struct ice_hw *hw, struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ const struct firmware *firmware = NULL;
+ int err;
+
+ err = ice_request_fw(pf, &firmware);
if (err) {
- dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
- return;
+ dev_err(dev, "Fail during requesting FW: %d\n", err);
+ return err;
}
- /* request for firmware was successful. Download to device */
+ err = ice_init_tx_topology(hw, firmware);
+ if (err) {
+ dev_err(dev, "Fail during initialization of Tx topology: %d\n",
+ err);
+ release_firmware(firmware);
+ return err;
+ }
+
+ /* Download firmware to device */
ice_load_pkg(firmware, pf);
release_firmware(firmware);
+
+ return 0;
}
/**
@@ -4625,9 +4771,11 @@ int ice_init_dev(struct ice_pf *pf)
ice_init_feature_support(pf);
- ice_request_fw(pf);
+ err = ice_init_ddp_config(hw, pf);
+ if (err)
+ return err;
- /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
+ /* if ice_init_ddp_config fails, ICE_FLAG_ADV_FEATURES bit won't be
* set in pf->state, which will cause ice_is_safe_mode to return
* true
*/
@@ -5093,6 +5241,7 @@ static int
ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
{
struct device *dev = &pdev->dev;
+ struct ice_adapter *adapter;
struct ice_pf *pf;
struct ice_hw *hw;
int err;
@@ -5145,7 +5294,12 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pci_set_master(pdev);
+ adapter = ice_adapter_get(pdev);
+ if (IS_ERR(adapter))
+ return PTR_ERR(adapter);
+
pf->pdev = pdev;
+ pf->adapter = adapter;
pci_set_drvdata(pdev, pf);
set_bit(ICE_DOWN, pf->state);
/* Disable service task until DOWN bit is cleared */
@@ -5179,23 +5333,23 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
devl_lock(priv_to_devlink(pf));
err = ice_load(pf);
- devl_unlock(priv_to_devlink(pf));
if (err)
goto err_load;
err = ice_init_devlink(pf);
if (err)
goto err_init_devlink;
+ devl_unlock(priv_to_devlink(pf));
return 0;
err_init_devlink:
- devl_lock(priv_to_devlink(pf));
ice_unload(pf);
- devl_unlock(priv_to_devlink(pf));
err_load:
+ devl_unlock(priv_to_devlink(pf));
ice_deinit(pf);
err_init:
+ ice_adapter_put(pdev);
pci_disable_device(pdev);
return err;
}
@@ -5290,9 +5444,9 @@ static void ice_remove(struct pci_dev *pdev)
if (!ice_is_safe_mode(pf))
ice_remove_arfs(pf);
+ devl_lock(priv_to_devlink(pf));
ice_deinit_devlink(pf);
- devl_lock(priv_to_devlink(pf));
ice_unload(pf);
devl_unlock(priv_to_devlink(pf));
@@ -5302,6 +5456,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_setup_mc_magic_wake(pf);
ice_set_wake(pf);
+ ice_adapter_put(pdev);
pci_disable_device(pdev);
}
@@ -5321,7 +5476,6 @@ static void ice_shutdown(struct pci_dev *pdev)
}
}
-#ifdef CONFIG_PM
/**
* ice_prepare_for_shutdown - prep for PCI shutdown
* @pf: board private structure
@@ -5346,7 +5500,7 @@ static void ice_prepare_for_shutdown(struct ice_pf *pf)
if (pf->vsi[v])
pf->vsi[v]->vsi_num = 0;
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, true);
}
/**
@@ -5410,7 +5564,7 @@ err_reinit:
* Power Management callback to quiesce the device and prepare
* for D3 transition.
*/
-static int __maybe_unused ice_suspend(struct device *dev)
+static int ice_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ice_pf *pf;
@@ -5431,7 +5585,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
*/
disabled = ice_service_task_stop(pf);
- ice_unplug_aux_dev(pf);
+ ice_deinit_rdma(pf);
/* Already suspended?, then there is nothing to do */
if (test_and_set_bit(ICE_SUSPENDED, pf->state)) {
@@ -5477,7 +5631,7 @@ static int __maybe_unused ice_suspend(struct device *dev)
* ice_resume - PM callback for waking up from D3
* @dev: generic device information structure
*/
-static int __maybe_unused ice_resume(struct device *dev)
+static int ice_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
enum ice_reset_req reset_type;
@@ -5511,6 +5665,11 @@ static int __maybe_unused ice_resume(struct device *dev)
if (ret)
dev_err(dev, "Cannot restore interrupt scheme: %d\n", ret);
+ ret = ice_init_rdma(pf);
+ if (ret)
+ dev_err(dev, "Reinitialize RDMA during resume failed: %d\n",
+ ret);
+
clear_bit(ICE_DOWN, pf->state);
/* Now perform PF reset and rebuild */
reset_type = ICE_RESET_PFR;
@@ -5528,7 +5687,6 @@ static int __maybe_unused ice_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
/**
* ice_pci_err_detected - warning that PCI error has been detected
@@ -5693,16 +5851,22 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_QSFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SFP), },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E825C_SGMII), },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_QSFP56) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830CC_SFP_DD) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_BACKPLANE), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_QSFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830C_SFP), },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_XXV_SFP), },
/* required last entry */
{}
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
-static __maybe_unused SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ice_pm_ops, ice_suspend, ice_resume);
static const struct pci_error_handlers ice_pci_err_handler = {
.error_detected = ice_pci_err_detected,
@@ -5717,9 +5881,7 @@ static struct pci_driver ice_driver = {
.id_table = ice_pci_tbl,
.probe = ice_probe,
.remove = ice_remove,
-#ifdef CONFIG_PM
- .driver.pm = &ice_pm_ops,
-#endif /* CONFIG_PM */
+ .driver.pm = pm_sleep_ptr(&ice_pm_ops),
.shutdown = ice_shutdown,
.sriov_configure = ice_sriov_configure,
.sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
@@ -7055,13 +7217,11 @@ int ice_down(struct ice_vsi *vsi)
WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state));
- if (vsi->netdev && vsi->type == ICE_VSI_PF) {
+ if (vsi->netdev) {
vlan_err = ice_vsi_del_vlan_zero(vsi);
ice_ptp_link_change(vsi->back, vsi->back->hw.pf_id, false);
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
- } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
- ice_eswitch_stop_all_tx_queues(vsi->back);
}
ice_vsi_dis_irq(vsi);
@@ -7544,12 +7704,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- err = ice_eswitch_rebuild(pf);
- if (err) {
- dev_err(dev, "Switchdev rebuild failed: %d\n", err);
- goto err_vsi_rebuild;
- }
-
if (reset_type == ICE_RESET_PFR) {
err = ice_rebuild_channels(pf);
if (err) {
@@ -7604,7 +7758,7 @@ err_vsi_rebuild:
err_sched_init_port:
ice_sched_cleanup_all(hw);
err_init_ctrlq:
- ice_shutdown_all_ctrlq(hw);
+ ice_shutdown_all_ctrlq(hw, false);
set_bit(ICE_RESET_FAILED, pf->state);
clear_recovery:
/* set this bit in PF state to control service task scheduling */
@@ -7666,7 +7820,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
return -EBUSY;
}
- netdev->mtu = (unsigned int)new_mtu;
+ WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu);
err = ice_down_up(vsi);
if (err)
return err;
@@ -7999,12 +8153,9 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- __u16 mode;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
+ __u16 mode = nla_get_u16(attr);
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
- mode = nla_get_u16(attr);
if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
return -EINVAL;
/* Continue if bridge mode is not being flipped */
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index d4e05d2cb30c..59e8879ac059 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -18,10 +18,9 @@
*
* Read the NVM using the admin queue commands (0x0701)
*/
-static int
-ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset, u16 length,
- void *data, bool last_command, bool read_shadow_ram,
- struct ice_sq_cd *cd)
+int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
struct ice_aqc_nvm *cmd;
@@ -375,11 +374,25 @@ ice_read_nvm_module(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u1
*
* Read the specified word from the copy of the Shadow RAM found in the
* specified NVM module.
+ *
+ * Note that the Shadow RAM copy is always located after the CSS header, and
+ * is aligned to 64-byte (32-word) offsets.
*/
static int
ice_read_nvm_sr_copy(struct ice_hw *hw, enum ice_bank_select bank, u32 offset, u16 *data)
{
- return ice_read_nvm_module(hw, bank, ICE_NVM_SR_COPY_WORD_OFFSET + offset, data);
+ u32 sr_copy;
+
+ switch (bank) {
+ case ICE_ACTIVE_FLASH_BANK:
+ sr_copy = roundup(hw->flash.banks.active_css_hdr_len, 32);
+ break;
+ case ICE_INACTIVE_FLASH_BANK:
+ sr_copy = roundup(hw->flash.banks.inactive_css_hdr_len, 32);
+ break;
+ }
+
+ return ice_read_nvm_module(hw, bank, sr_copy + offset, data);
}
/**
@@ -441,8 +454,7 @@ int
ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
u16 module_type)
{
- u16 pfa_len, pfa_ptr;
- u16 next_tlv;
+ u16 pfa_len, pfa_ptr, next_tlv, max_tlv;
int status;
status = ice_read_sr_word(hw, ICE_SR_PFA_PTR, &pfa_ptr);
@@ -455,11 +467,23 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
ice_debug(hw, ICE_DBG_INIT, "Failed to read PFA length.\n");
return status;
}
+
+ /* The Preserved Fields Area contains a sequence of Type-Length-Value
+ * structures which define its contents. The PFA length includes all
+ * of the TLVs, plus the initial length word itself, *and* one final
+ * word at the end after all of the TLVs.
+ */
+ if (check_add_overflow(pfa_ptr, pfa_len - 1, &max_tlv)) {
+ dev_warn(ice_hw_to_dev(hw), "PFA starts at offset %u. PFA length of %u caused 16-bit arithmetic overflow.\n",
+ pfa_ptr, pfa_len);
+ return -EINVAL;
+ }
+
/* Starting with first TLV after PFA length, iterate through the list
* of TLVs to find the requested one.
*/
next_tlv = pfa_ptr + 1;
- while (next_tlv < pfa_ptr + pfa_len) {
+ while (next_tlv < max_tlv) {
u16 tlv_sub_module_type;
u16 tlv_len;
@@ -483,10 +507,13 @@ ice_get_pfa_module_tlv(struct ice_hw *hw, u16 *module_tlv, u16 *module_tlv_len,
}
return -EINVAL;
}
- /* Check next TLV, i.e. current TLV pointer + length + 2 words
- * (for current TLV's type and length)
- */
- next_tlv = next_tlv + tlv_len + 2;
+
+ if (check_add_overflow(next_tlv, 2, &next_tlv) ||
+ check_add_overflow(next_tlv, tlv_len, &next_tlv)) {
+ dev_warn(ice_hw_to_dev(hw), "TLV of type %u and length 0x%04x caused 16-bit arithmetic overflow. The PFA starts at 0x%04x and has length of 0x%04x\n",
+ tlv_sub_module_type, tlv_len, pfa_ptr, pfa_len);
+ return -EINVAL;
+ }
}
/* Module does not exist */
return -ENOENT;
@@ -1011,6 +1038,72 @@ static int ice_determine_active_flash_banks(struct ice_hw *hw)
}
/**
+ * ice_get_nvm_css_hdr_len - Read the CSS header length from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the Authentication
+ * header size, and then convert to words.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int
+ice_get_nvm_css_hdr_len(struct ice_hw *hw, enum ice_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ int status;
+
+ status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (status)
+ return status;
+
+ status = ice_read_nvm_module(hw, bank, ICE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (status)
+ return status;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size
+ */
+ hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+ *hdr_len = (hdr_len_dword * 2) + ICE_NVM_AUTH_HEADER_LEN;
+
+ return 0;
+}
+
+/**
+ * ice_determine_css_hdr_len - Discover CSS header length for the device
+ * @hw: pointer to the HW struct
+ *
+ * Determine the size of the CSS header at the start of the NVM module. This
+ * is useful for locating the Shadow RAM copy in the NVM, as the Shadow RAM is
+ * always located just after the CSS header.
+ *
+ * Return: zero on success, or a negative error code on failure.
+ */
+static int ice_determine_css_hdr_len(struct ice_hw *hw)
+{
+ struct ice_bank_info *banks = &hw->flash.banks;
+ int status;
+
+ status = ice_get_nvm_css_hdr_len(hw, ICE_ACTIVE_FLASH_BANK,
+ &banks->active_css_hdr_len);
+ if (status)
+ return status;
+
+ status = ice_get_nvm_css_hdr_len(hw, ICE_INACTIVE_FLASH_BANK,
+ &banks->inactive_css_hdr_len);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+/**
* ice_init_nvm - initializes NVM setting
* @hw: pointer to the HW struct
*
@@ -1056,6 +1149,12 @@ int ice_init_nvm(struct ice_hw *hw)
return status;
}
+ status = ice_determine_css_hdr_len(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_NVM, "Failed to determine Shadow RAM copy offsets.\n");
+ return status;
+ }
+
status = ice_get_nvm_ver_info(hw, ICE_ACTIVE_FLASH_BANK, &flash->nvm);
if (status) {
ice_debug(hw, ICE_DBG_INIT, "Failed to read NVM info.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 774c2317967d..63cdc6bdac58 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -14,6 +14,9 @@ struct ice_orom_civd_info {
int ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_nvm(struct ice_hw *hw);
+int ice_aq_read_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram, struct ice_sq_cd *cd);
int
ice_read_flat_nvm(struct ice_hw *hw, u32 offset, u32 *length, u8 *data,
bool read_shadow_ram);
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index f6f27361c3cf..7c09ea0f03ba 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -7,18 +7,24 @@
/* Each recipe can match up to 5 different fields. Fields to match can be meta-
* data, values extracted from packet headers, or results from other recipes.
- * One of the 5 fields is reserved for matching the switch ID. So, up to 4
- * recipes can provide intermediate results to another one through chaining,
- * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4.
+ * Therefore, up to 5 recipes can provide intermediate results to another one
+ * through chaining, e.g. recipes 0, 1, 2, 3 and 4 can provide intermediate
+ * results to recipe 5. Note that one of the fields in one of the recipes must
+ * always be reserved for matching the switch ID.
*/
-#define ICE_NUM_WORDS_RECIPE 4
+#define ICE_NUM_WORDS_RECIPE 5
-/* Max recipes that can be chained */
+/* Max recipes that can be chained, not including the last one, which combines
+ * intermediate results.
+ */
#define ICE_MAX_CHAIN_RECIPE 5
-/* 1 word reserved for switch ID from allowed 5 words.
- * So a recipe can have max 4 words. And you can chain 5 such recipes
- * together. So maximum words that can be programmed for look up is 5 * 4.
+/* Total max recipes in chain recipe (including intermediate results) */
+#define ICE_MAX_CHAIN_RECIPE_RES (ICE_MAX_CHAIN_RECIPE + 1)
+
+/* A recipe can have max 5 words, and 5 recipes can be chained together (using
+ * the 6th one, which would contain only result indexes). So maximum words that
+ * can be programmed for lookup is 5 * 5 (not including intermediate results).
*/
#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE)
@@ -43,6 +49,7 @@ enum ice_protocol_type {
ICE_NVGRE,
ICE_GTP,
ICE_GTP_NO_PAY,
+ ICE_PFCP,
ICE_PPPOE,
ICE_L2TPV3,
ICE_VLAN_EX,
@@ -61,6 +68,7 @@ enum ice_sw_tunnel_type {
ICE_SW_TUN_NVGRE,
ICE_SW_TUN_GTPU,
ICE_SW_TUN_GTPC,
+ ICE_SW_TUN_PFCP,
ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
};
@@ -202,6 +210,15 @@ struct ice_udp_gtp_hdr {
u8 rsvrd;
};
+struct ice_pfcp_hdr {
+ u8 flags;
+ u8 msg_type;
+ __be16 length;
+ __be64 seid;
+ __be32 seq;
+ u8 spare;
+} __packed __aligned(__alignof__(u16));
+
struct ice_pppoe_hdr {
u8 rsrvd_ver_type;
u8 rsrvd_code;
@@ -418,6 +435,7 @@ union ice_prot_hdr {
struct ice_udp_tnl_hdr tnl_hdr;
struct ice_nvgre_hdr nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
+ struct ice_pfcp_hdr pfcp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
struct ice_hw_metadata metadata;
@@ -437,32 +455,11 @@ struct ice_prot_ext_tbl_entry {
/* Extractions to be looked up for a given recipe */
struct ice_prot_lkup_ext {
- u16 prot_type;
u8 n_val_words;
/* create a buffer to hold max words per recipe */
- u16 field_off[ICE_MAX_CHAIN_WORDS];
u16 field_mask[ICE_MAX_CHAIN_WORDS];
struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS];
-
- /* Indicate field offsets that have field vector indices assigned */
- DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS);
-};
-
-struct ice_pref_recipe_group {
- u8 n_val_pairs; /* Number of valid pairs */
- struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE];
- u16 mask[ICE_NUM_WORDS_RECIPE];
};
-struct ice_recp_grp_entry {
- struct list_head l_entry;
-
-#define ICE_INVAL_CHAIN_IND 0xFF
- u16 rid;
- u8 chain_idx;
- u16 fv_idx[ICE_NUM_WORDS_RECIPE];
- u16 fv_mask[ICE_NUM_WORDS_RECIPE];
- struct ice_pref_recipe_group r_group;
-};
#endif /* _ICE_PROTOCOL_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index c11eba07283c..ef2e858f49bb 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -7,8 +7,6 @@
#define E810_OUT_PROP_DELAY_NS 1
-#define UNKNOWN_INCVAL_E82X 0x100000000ULL
-
static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
/* name idx func chan */
{ "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
@@ -374,6 +372,7 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
u8 tmr_idx;
tmr_idx = ice_get_ptp_src_clock_index(hw);
+ guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
/* Read the system timestamp pre PHC read */
ptp_read_system_prets(sts);
@@ -812,7 +811,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
}
mutex_unlock(&pf->ptp.ports_owner.lock);
- for (i = 0; i < ICE_MAX_QUAD; i++) {
+ for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) {
u64 tstamp_ready;
int err;
@@ -1013,6 +1012,28 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
}
/**
+ * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this port. ETH56G PHYs
+ * have independent memory blocks for all ports.
+ *
+ * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker
+ */
+static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx,
+ u8 port)
+{
+ tx->block = port;
+ tx->offset = 0;
+ tx->len = INDEX_PER_PORT_ETH56G;
+ tx->has_ready_bitmap = 1;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
* ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps
* @pf: Board private structure
* @tx: the Tx tracking structure to initialize
@@ -1026,7 +1047,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
static int
ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
{
- tx->block = port / ICE_PORTS_PER_QUAD;
+ tx->block = ICE_GET_QUAD_NUM(port);
tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X;
tx->len = INDEX_PER_PORT_E82X;
tx->has_ready_bitmap = 1;
@@ -1166,26 +1187,6 @@ static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
}
/**
- * ice_ptp_read_time - Read the time from the device
- * @pf: Board private structure
- * @ts: timespec structure to hold the current time value
- * @sts: Optional parameter for holding a pair of system timestamps from
- * the system clock. Will be ignored if NULL is given.
- *
- * This function reads the source clock registers and stores them in a timespec.
- * However, since the registers are 64 bits of nanoseconds, we must convert the
- * result to a timespec before we can return.
- */
-static void
-ice_ptp_read_time(struct ice_pf *pf, struct timespec64 *ts,
- struct ptp_system_timestamp *sts)
-{
- u64 time_ns = ice_ptp_read_src_clk_reg(pf, sts);
-
- *ts = ns_to_timespec64(time_ns);
-}
-
-/**
* ice_ptp_write_init - Set PHC time to provided value
* @pf: Board private structure
* @ts: timespec structure that holds the new time value
@@ -1229,12 +1230,7 @@ static u64 ice_base_incval(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
u64 incval;
- if (ice_is_e810(hw))
- incval = ICE_PTP_NOMINAL_INCVAL_E810;
- else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ)
- incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
- else
- incval = UNKNOWN_INCVAL_E82X;
+ incval = ice_get_base_incval(hw);
dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n",
incval);
@@ -1248,8 +1244,8 @@ static u64 ice_base_incval(struct ice_pf *pf)
*/
static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port)
{
- int quad = port->port_num / ICE_PORTS_PER_QUAD;
int offs = port->port_num % ICE_PORTS_PER_QUAD;
+ int quad = ICE_GET_QUAD_NUM(port->port_num);
struct ice_pf *pf;
struct ice_hw *hw;
u32 val, phy_sts;
@@ -1367,10 +1363,19 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port)
mutex_lock(&ptp_port->ps_lock);
- kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_stop_phy_timer_eth56g(hw, port, true);
+ break;
+ case ICE_PHY_E82X:
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
- err = ice_stop_phy_timer_e82x(hw, port, true);
- if (err)
+ err = ice_stop_phy_timer_e82x(hw, port, true);
+ break;
+ default:
+ err = -ENODEV;
+ }
+ if (err && err != -EBUSY)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n",
port, err);
@@ -1404,27 +1409,39 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port)
mutex_lock(&ptp_port->ps_lock);
- kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_start_phy_timer_eth56g(hw, port);
+ break;
+ case ICE_PHY_E82X:
+ /* Start the PHY timer in Vernier mode */
+ kthread_cancel_delayed_work_sync(&ptp_port->ov_work);
- /* temporarily disable Tx timestamps while calibrating PHY offset */
- spin_lock_irqsave(&ptp_port->tx.lock, flags);
- ptp_port->tx.calibrating = true;
- spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
- ptp_port->tx_fifo_busy_cnt = 0;
+ /* temporarily disable Tx timestamps while calibrating
+ * PHY offset
+ */
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
+ ptp_port->tx.calibrating = true;
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
+ ptp_port->tx_fifo_busy_cnt = 0;
- /* Start the PHY timer in Vernier mode */
- err = ice_start_phy_timer_e82x(hw, port);
- if (err)
- goto out_unlock;
+ /* Start the PHY timer in Vernier mode */
+ err = ice_start_phy_timer_e82x(hw, port);
+ if (err)
+ break;
- /* Enable Tx timestamps right away */
- spin_lock_irqsave(&ptp_port->tx.lock, flags);
- ptp_port->tx.calibrating = false;
- spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
+ /* Enable Tx timestamps right away */
+ spin_lock_irqsave(&ptp_port->tx.lock, flags);
+ ptp_port->tx.calibrating = false;
+ spin_unlock_irqrestore(&ptp_port->tx.lock, flags);
- kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0);
+ kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work,
+ 0);
+ break;
+ default:
+ err = -ENODEV;
+ }
-out_unlock:
if (err)
dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n",
port, err);
@@ -1448,20 +1465,27 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
if (pf->ptp.state != ICE_PTP_READY)
return;
- if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS))
+ if (WARN_ON_ONCE(port >= hw->ptp.num_lports))
return;
ptp_port = &pf->ptp.port;
+ if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
+ port *= 2;
if (WARN_ON_ONCE(ptp_port->port_num != port))
return;
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
- switch (hw->phy_model) {
+ /* Skip HW writes if reset is in progress */
+ if (pf->hw.reset_ongoing)
+ return;
+
+ switch (hw->ptp.phy_model) {
case ICE_PHY_E810:
/* Do not reconfigure E810 PHY */
return;
+ case ICE_PHY_ETH56G:
case ICE_PHY_E82X:
ice_ptp_port_phy_restart(ptp_port);
return;
@@ -1476,42 +1500,62 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
* @ena: bool value to enable or disable interrupt
* @threshold: Minimum number of packets at which intr is triggered
*
- * Utility function to enable or disable Tx timestamp interrupt and threshold
+ * Utility function to configure all the PHY interrupt settings, including
+ * whether the PHY interrupt is enabled, and what threshold to use. Also
+ * configures The E82X timestamp owner to react to interrupts from all PHYs.
+ *
+ * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes
+ * when failed to configure PHY interrupt for E82X
*/
static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
- int err = 0;
- int quad;
- u32 val;
ice_ptp_reset_ts_memory(hw);
- for (quad = 0; quad < ICE_MAX_QUAD; quad++) {
- err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
- &val);
- if (err)
- break;
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G: {
+ int port;
- if (ena) {
- val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
- val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
- val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M,
- threshold);
- } else {
- val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold);
+ if (err) {
+ dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n",
+ port, err);
+ return err;
+ }
}
- err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG,
- val);
- if (err)
- break;
+ return 0;
}
+ case ICE_PHY_E82X: {
+ int quad;
- if (err)
- dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n",
- err);
- return err;
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports);
+ quad++) {
+ int err;
+
+ err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold);
+ if (err) {
+ dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n",
+ quad, err);
+ return err;
+ }
+ }
+
+ return 0;
+ }
+ case ICE_PHY_E810:
+ return 0;
+ case ICE_PHY_UNSUP:
+ default:
+ dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__,
+ hw->ptp.phy_model);
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -1578,6 +1622,10 @@ void ice_ptp_extts_event(struct ice_pf *pf)
u8 chan, tmr_idx;
u32 hi, lo;
+ /* Don't process timestamp events if PTP is not ready */
+ if (pf->ptp.state != ICE_PTP_READY)
+ return;
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Event time is captured by one of the two matched registers
* GLTSYN_EVNT_L: 32 LSB of sampled time event
@@ -1603,27 +1651,33 @@ void ice_ptp_extts_event(struct ice_pf *pf)
/**
* ice_ptp_cfg_extts - Configure EXTTS pin and channel
* @pf: Board private structure
- * @ena: true to enable; false to disable
* @chan: GPIO channel (0-3)
- * @gpio_pin: GPIO pin
- * @extts_flags: request flags from the ptp_extts_request.flags
+ * @config: desired EXTTS configuration.
+ * @store: If set to true, the values will be stored
+ *
+ * Configure an external timestamp event on the requested channel.
+ *
+ * Return: 0 on success, -EOPNOTUSPP on unsupported flags
*/
-static int
-ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
- unsigned int extts_flags)
+static int ice_ptp_cfg_extts(struct ice_pf *pf, unsigned int chan,
+ struct ice_extts_channel *config, bool store)
{
u32 func, aux_reg, gpio_reg, irq_reg;
struct ice_hw *hw = &pf->hw;
u8 tmr_idx;
- if (chan > (unsigned int)pf->ptp.info.n_ext_ts)
- return -EINVAL;
+ /* Reject requests with unsupported flags */
+ if (config->flags & ~(PTP_ENABLE_FEATURE |
+ PTP_RISING_EDGE |
+ PTP_FALLING_EDGE |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
irq_reg = rd32(hw, PFINT_OICR_ENA);
- if (ena) {
+ if (config->ena) {
/* Enable the interrupt */
irq_reg |= PFINT_OICR_TSYN_EVNT_M;
aux_reg = GLTSYN_AUX_IN_0_INT_ENA_M;
@@ -1632,9 +1686,9 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
#define GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE BIT(1)
/* set event level to requested edge */
- if (extts_flags & PTP_FALLING_EDGE)
+ if (config->flags & PTP_FALLING_EDGE)
aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_FALLING_EDGE;
- if (extts_flags & PTP_RISING_EDGE)
+ if (config->flags & PTP_RISING_EDGE)
aux_reg |= GLTSYN_AUX_IN_0_EVNTLVL_RISING_EDGE;
/* Write GPIO CTL reg.
@@ -1655,12 +1709,52 @@ ice_ptp_cfg_extts(struct ice_pf *pf, bool ena, unsigned int chan, u32 gpio_pin,
wr32(hw, PFINT_OICR_ENA, irq_reg);
wr32(hw, GLTSYN_AUX_IN(chan, tmr_idx), aux_reg);
- wr32(hw, GLGEN_GPIO_CTL(gpio_pin), gpio_reg);
+ wr32(hw, GLGEN_GPIO_CTL(config->gpio_pin), gpio_reg);
+
+ if (store)
+ memcpy(&pf->ptp.extts_channels[chan], config, sizeof(*config));
return 0;
}
/**
+ * ice_ptp_disable_all_extts - Disable all EXTTS channels
+ * @pf: Board private structure
+ */
+static void ice_ptp_disable_all_extts(struct ice_pf *pf)
+{
+ struct ice_extts_channel extts_cfg = {};
+ int i;
+
+ for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
+ if (pf->ptp.extts_channels[i].ena) {
+ extts_cfg.gpio_pin = pf->ptp.extts_channels[i].gpio_pin;
+ extts_cfg.ena = false;
+ ice_ptp_cfg_extts(pf, i, &extts_cfg, false);
+ }
+ }
+
+ synchronize_irq(pf->oicr_irq.virq);
+}
+
+/**
+ * ice_ptp_enable_all_extts - Enable all EXTTS channels
+ * @pf: Board private structure
+ *
+ * Called during reset to restore user configuration.
+ */
+static void ice_ptp_enable_all_extts(struct ice_pf *pf)
+{
+ int i;
+
+ for (i = 0; i < pf->ptp.info.n_ext_ts; i++) {
+ if (pf->ptp.extts_channels[i].ena)
+ ice_ptp_cfg_extts(pf, i, &pf->ptp.extts_channels[i],
+ false);
+ }
+}
+
+/**
* ice_ptp_cfg_clkout - Configure clock to generate periodic wave
* @pf: Board private structure
* @chan: GPIO channel (0-3)
@@ -1678,6 +1772,9 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
u32 func, val, gpio_pin;
u8 tmr_idx;
+ if (config && config->flags & ~PTP_PEROUT_PHASE)
+ return -EOPNOTSUPP;
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* 0. Reset mode & out_en in AUX_OUT */
@@ -1733,8 +1830,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan,
* maintaining phase
*/
if (start_time < current_time)
- start_time = div64_u64(current_time + NSEC_PER_SEC - 1,
- NSEC_PER_SEC) * NSEC_PER_SEC + phase;
+ start_time = roundup_u64(current_time, NSEC_PER_SEC) + phase;
if (ice_is_e810(hw))
start_time -= E810_OUT_PROP_DELAY_NS;
@@ -1814,17 +1910,18 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
struct ptp_clock_request *rq, int on)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- struct ice_perout_channel clk_cfg = {0};
bool sma_pres = false;
unsigned int chan;
u32 gpio_pin;
- int err;
if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
sma_pres = true;
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
+ {
+ struct ice_perout_channel clk_cfg = {};
+
chan = rq->perout.index;
if (sma_pres) {
if (chan == ice_pin_desc_e810t[SMA1].chan)
@@ -1844,15 +1941,19 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
clk_cfg.gpio_pin = chan;
}
+ clk_cfg.flags = rq->perout.flags;
clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
rq->perout.period.nsec);
clk_cfg.start_time = ((rq->perout.start.sec * NSEC_PER_SEC) +
rq->perout.start.nsec);
clk_cfg.ena = !!on;
- err = ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
- break;
+ return ice_ptp_cfg_clkout(pf, chan, &clk_cfg, true);
+ }
case PTP_CLK_REQ_EXTTS:
+ {
+ struct ice_extts_channel extts_cfg = {};
+
chan = rq->extts.index;
if (sma_pres) {
if (chan < ice_pin_desc_e810t[SMA2].chan)
@@ -1868,14 +1969,15 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
gpio_pin = chan;
}
- err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
- rq->extts.flags);
- break;
+ extts_cfg.flags = rq->extts.flags;
+ extts_cfg.gpio_pin = gpio_pin;
+ extts_cfg.ena = !!on;
+
+ return ice_ptp_cfg_extts(pf, chan, &extts_cfg, true);
+ }
default:
return -EOPNOTSUPP;
}
-
- return err;
}
/**
@@ -1888,26 +1990,32 @@ static int ice_ptp_gpio_enable_e823(struct ptp_clock_info *info,
struct ptp_clock_request *rq, int on)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- struct ice_perout_channel clk_cfg = {0};
- int err;
switch (rq->type) {
case PTP_CLK_REQ_PPS:
+ {
+ struct ice_perout_channel clk_cfg = {};
+
+ clk_cfg.flags = rq->perout.flags;
clk_cfg.gpio_pin = PPS_PIN_INDEX;
clk_cfg.period = NSEC_PER_SEC;
clk_cfg.ena = !!on;
- err = ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
- break;
+ return ice_ptp_cfg_clkout(pf, PPS_CLK_GEN_CHAN, &clk_cfg, true);
+ }
case PTP_CLK_REQ_EXTTS:
- err = ice_ptp_cfg_extts(pf, !!on, rq->extts.index,
- TIME_SYNC_PIN_INDEX, rq->extts.flags);
- break;
+ {
+ struct ice_extts_channel extts_cfg = {};
+
+ extts_cfg.flags = rq->extts.flags;
+ extts_cfg.gpio_pin = TIME_SYNC_PIN_INDEX;
+ extts_cfg.ena = !!on;
+
+ return ice_ptp_cfg_extts(pf, rq->extts.index, &extts_cfg, true);
+ }
default:
return -EOPNOTSUPP;
}
-
- return err;
}
/**
@@ -1925,16 +2033,10 @@ ice_ptp_gettimex64(struct ptp_clock_info *info, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
- struct ice_hw *hw = &pf->hw;
-
- if (!ice_ptp_lock(hw)) {
- dev_err(ice_pf_to_dev(pf), "PTP failed to get time\n");
- return -EBUSY;
- }
-
- ice_ptp_read_time(pf, ts, sts);
- ice_ptp_unlock(hw);
+ u64 time_ns;
+ time_ns = ice_ptp_read_src_clk_reg(pf, sts);
+ *ts = ns_to_timespec64(time_ns);
return 0;
}
@@ -1954,11 +2056,14 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
struct ice_hw *hw = &pf->hw;
int err;
- /* For Vernier mode, we need to recalibrate after new settime
- * Start with disabling timestamp block
+ /* For Vernier mode on E82X, we need to recalibrate after new settime.
+ * Start with marking timestamps as invalid.
*/
- if (pf->ptp.port.link_up)
- ice_ptp_port_phy_stop(&pf->ptp.port);
+ if (hw->ptp.phy_model == ICE_PHY_E82X) {
+ err = ice_ptp_clear_phy_offset_ready_e82x(hw);
+ if (err)
+ dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n");
+ }
if (!ice_ptp_lock(hw)) {
err = -EBUSY;
@@ -1978,7 +2083,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_enable_all_clkout(pf);
/* Recalibrate and re-enable timestamp blocks for E822/E823 */
- if (hw->phy_model == ICE_PHY_E82X)
+ if (hw->ptp.phy_model == ICE_PHY_E82X)
ice_ptp_restart_all_phy(pf);
exit:
if (err) {
@@ -2116,7 +2221,8 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
hh_ts_lo = rd32(hw, GLHH_ART_TIME_L);
hh_ts_hi = rd32(hw, GLHH_ART_TIME_H);
hh_ts = ((u64)hh_ts_hi << 32) | hh_ts_lo;
- *system = convert_art_ns_to_tsc(hh_ts);
+ system->cycles = hh_ts;
+ system->cs_id = CSID_X86_ART;
/* Read Device source clock time */
hh_ts_lo = rd32(hw, GLTSYN_HHTIME_L(tmr_idx));
hh_ts_hi = rd32(hw, GLTSYN_HHTIME_H(tmr_idx));
@@ -2603,7 +2709,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf)
if (!ice_pf_src_tmr_owned(pf))
return;
- for (i = 0; i < ICE_MAX_QUAD; i++) {
+ for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) {
u64 tstamp_ready;
int err;
@@ -2745,6 +2851,10 @@ static int ice_ptp_rebuild_owner(struct ice_pf *pf)
ice_ptp_restart_all_phy(pf);
}
+ /* Re-enable all periodic outputs and external timestamp events */
+ ice_ptp_enable_all_clkout(pf);
+ ice_ptp_enable_all_extts(pf);
+
return 0;
}
@@ -2809,7 +2919,7 @@ static struct ice_pf *
ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
{
struct ice_ptp_port_owner *ports_owner;
- struct auxiliary_driver *aux_drv;
+ const struct auxiliary_driver *aux_drv;
struct ice_ptp *owner_ptp;
if (!aux_dev->dev.driver)
@@ -3035,12 +3145,10 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
/* Release the global hardware lock */
ice_ptp_unlock(hw);
- if (!ice_is_e810(hw)) {
- /* Enable quad interrupts */
- err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
- if (err)
- goto err_exit;
- }
+ /* Configure PHY interrupt settings */
+ err = ice_ptp_cfg_phy_interrupt(pf, true, 1);
+ if (err)
+ goto err_exit;
/* Ensure we have a clock device */
err = ice_ptp_create_clock(pf);
@@ -3101,7 +3209,10 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
mutex_init(&ptp_port->ps_lock);
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx,
+ ptp_port->port_num);
case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
case ICE_PHY_E82X:
@@ -3196,7 +3307,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
*/
static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
{
- switch (pf->hw.phy_model) {
+ switch (pf->hw.ptp.phy_model) {
case ICE_PHY_E82X:
/* E822 based PHY has the clock owner process the interrupt
* for all ports.
@@ -3232,7 +3343,7 @@ void ice_ptp_init(struct ice_pf *pf)
ptp->state = ICE_PTP_INITIALIZING;
- ice_ptp_init_phy_model(hw);
+ ice_ptp_init_hw(hw);
ice_ptp_init_tx_interrupt_mode(pf);
@@ -3246,6 +3357,9 @@ void ice_ptp_init(struct ice_pf *pf)
}
ptp->port.port_num = hw->pf_id;
+ if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo)
+ ptp->port.port_num = hw->pf_id * 2;
+
err = ice_ptp_init_port(pf, &ptp->port);
if (err)
goto err;
@@ -3300,6 +3414,8 @@ void ice_ptp_release(struct ice_pf *pf)
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
+ ice_ptp_disable_all_extts(pf);
+
kthread_cancel_delayed_work_sync(&pf->ptp.work);
ice_ptp_port_phy_stop(&pf->ptp.port);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 3af20025043a..2db2257a0fb2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -29,10 +29,17 @@ enum ice_ptp_pin_e810t {
struct ice_perout_channel {
bool ena;
u32 gpio_pin;
+ u32 flags;
u64 period;
u64 start_time;
};
+struct ice_extts_channel {
+ bool ena;
+ u32 gpio_pin;
+ u32 flags;
+};
+
/* The ice hardware captures Tx hardware timestamps in the PHY. The timestamp
* is stored in a buffer of registers. Depending on the specific hardware,
* this buffer might be shared across multiple PHY ports.
@@ -153,6 +160,7 @@ struct ice_ptp_tx {
#define INDEX_PER_QUAD 64
#define INDEX_PER_PORT_E82X 16
#define INDEX_PER_PORT_E810 64
+#define INDEX_PER_PORT_ETH56G 64
/**
* struct ice_ptp_port - data used to initialize an external port for PTP
@@ -226,6 +234,7 @@ enum ice_ptp_state {
* @ext_ts_irq: the external timestamp IRQ in use
* @kworker: kwork thread for handling periodic work
* @perout_channels: periodic output data
+ * @extts_channels: channels for external timestamps
* @info: structure defining PTP hardware capabilities
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
@@ -249,6 +258,7 @@ struct ice_ptp {
u8 ext_ts_irq;
struct kthread_worker *kworker;
struct ice_perout_channel perout_channels[GLTSYN_TGT_H_IDX_MAX];
+ struct ice_extts_channel extts_channels[GLTSYN_TGT_H_IDX_MAX];
struct ptp_clock_info info;
struct ptp_clock *clock;
struct hwtstamp_config tstamp_config;
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
index 2c4dab0c48ab..e6980b94a6c1 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h
@@ -9,6 +9,321 @@
*/
/* Constants defined for the PTP 1588 clock hardware. */
+const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = {
+ /* ETH56G_PHY_REG_PTP */
+ {
+ /* base_addr */
+ {
+ 0x092000,
+ 0x126000,
+ 0x1BA000,
+ 0x24E000,
+ 0x2E2000,
+ },
+ /* step */
+ 0x98,
+ },
+ /* ETH56G_PHY_MEM_PTP */
+ {
+ /* base_addr */
+ {
+ 0x093000,
+ 0x127000,
+ 0x1BB000,
+ 0x24F000,
+ 0x2E3000,
+ },
+ /* step */
+ 0x200,
+ },
+ /* ETH56G_PHY_REG_XPCS */
+ {
+ /* base_addr */
+ {
+ 0x000000,
+ 0x009400,
+ 0x128000,
+ 0x1BC000,
+ 0x250000,
+ },
+ /* step */
+ 0x21000,
+ },
+ /* ETH56G_PHY_REG_MAC */
+ {
+ /* base_addr */
+ {
+ 0x085000,
+ 0x119000,
+ 0x1AD000,
+ 0x241000,
+ 0x2D5000,
+ },
+ /* step */
+ 0x1000,
+ },
+ /* ETH56G_PHY_REG_GPCS */
+ {
+ /* base_addr */
+ {
+ 0x084000,
+ 0x118000,
+ 0x1AC000,
+ 0x240000,
+ 0x2D4000,
+ },
+ /* step */
+ 0x400,
+ },
+};
+
+const
+struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = {
+ [ICE_ETH56G_LNK_SPD_1G] = {
+ .tx_mode = { .def = 6, },
+ .rx_mode = { .def = 6, },
+ .blks_per_clk = 1,
+ .blktime = 0x4000, /* 32 */
+ .tx_offset = {
+ .serdes = 0x6666, /* 51.2 */
+ .no_fec = 0xd066, /* 104.2 */
+ .sfd = 0x3000, /* 24 */
+ .onestep = 0x30000 /* 384 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffc59a, /* -29.2 */
+ .no_fec = 0xffff0a80, /* -122.75 */
+ .sfd = 0x2c00, /* 22 */
+ .bs_ds = 0x19a /* 0.8 */
+ /* Dynamic bitslip 0 equals to 10 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_2_5G] = {
+ .tx_mode = { .def = 6, },
+ .rx_mode = { .def = 6, },
+ .blks_per_clk = 1,
+ .blktime = 0x199a, /* 12.8 */
+ .tx_offset = {
+ .serdes = 0x28f6, /* 20.48 */
+ .no_fec = 0x53b8, /* 41.86 */
+ .sfd = 0x1333, /* 9.6 */
+ .onestep = 0x13333 /* 153.6 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffe8a4, /* -11.68 */
+ .no_fec = 0xffff9a76, /* -50.77 */
+ .sfd = 0xf33, /* 7.6 */
+ .bs_ds = 0xa4 /* 0.32 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_10G] = {
+ .tx_mode = { .def = 1, },
+ .rx_mode = { .def = 1, },
+ .blks_per_clk = 1,
+ .blktime = 0x666, /* 3.2 */
+ .tx_offset = {
+ .serdes = 0x234c, /* 17.6484848 */
+ .no_fec = 0x8e80, /* 71.25 */
+ .fc = 0xb4a4, /* 90.32 */
+ .sfd = 0x4a4, /* 2.32 */
+ .onestep = 0x4ccd /* 38.4 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffeb27, /* -10.42424 */
+ .no_fec = 0xffffcccd, /* -25.6 */
+ .fc = 0xfffe0014, /* -255.96 */
+ .sfd = 0x4a4, /* 2.32 */
+ .bs_ds = 0x32 /* 0.0969697 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_25G] = {
+ .tx_mode = {
+ .def = 1,
+ .rs = 4
+ },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = {
+ .def = 1,
+ .rs = 4
+ },
+ .rx_mk_dly = {
+ .def = 1,
+ .rs = 1
+ },
+ .rx_cw_dly = {
+ .def = 1,
+ .rs = 1
+ },
+ .blks_per_clk = 1,
+ .blktime = 0x28f, /* 1.28 */
+ .mktime = 0x147b, /* 10.24, only if RS-FEC enabled */
+ .tx_offset = {
+ .serdes = 0xe1e, /* 7.0593939 */
+ .no_fec = 0x3857, /* 28.17 */
+ .fc = 0x48c3, /* 36.38 */
+ .rs = 0x8100, /* 64.5 */
+ .sfd = 0x1dc, /* 0.93 */
+ .onestep = 0x1eb8 /* 15.36 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7a9, /* -4.1697 */
+ .no_fec = 0xffffe71a, /* -12.45 */
+ .fc = 0xfffe894d, /* -187.35 */
+ .rs = 0xfffff8cd, /* -3.6 */
+ .sfd = 0x1dc, /* 0.93 */
+ .bs_ds = 0x14 /* 0.0387879, RS-FEC 0 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_40G] = {
+ .tx_mode = { .def = 3 },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = { .def = 4 },
+ .rx_mk_dly = { .def = 1 },
+ .rx_cw_dly = { .def = 1 },
+ .blktime = 0x333, /* 1.6 */
+ .mktime = 0xccd, /* 6.4 */
+ .tx_offset = {
+ .serdes = 0x234c, /* 17.6484848 */
+ .no_fec = 0x5a8a, /* 45.27 */
+ .fc = 0x81b8, /* 64.86 */
+ .sfd = 0x4a4, /* 2.32 */
+ .onestep = 0x1333 /* 9.6 */
+ },
+ .rx_offset = {
+ .serdes = 0xffffeb27, /* -10.42424 */
+ .no_fec = 0xfffff594, /* -5.21 */
+ .fc = 0xfffe3080, /* -231.75 */
+ .sfd = 0x4a4, /* 2.32 */
+ .bs_ds = 0xccd /* 6.4 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_50G] = {
+ .tx_mode = { .def = 5 },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = { .def = 5 },
+ .rx_mk_dly = { .def = 1 },
+ .rx_cw_dly = { .def = 1 },
+ .blktime = 0x28f, /* 1.28 */
+ .mktime = 0xa3d, /* 5.12 */
+ .tx_offset = {
+ .serdes = 0x13ba, /* 9.86353 */
+ .rs = 0x5400, /* 42 */
+ .sfd = 0xe6, /* 0.45 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7e8, /* -4.04706 */
+ .rs = 0xfffff994, /* -3.21 */
+ .sfd = 0xe6 /* 0.45 */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_50G2] = {
+ .tx_mode = {
+ .def = 3,
+ .rs = 2
+ },
+ .tx_mk_dly = 4,
+ .tx_cw_dly = {
+ .def = 1,
+ .onestep = 6
+ },
+ .rx_mode = {
+ .def = 4,
+ .rs = 1
+ },
+ .rx_mk_dly = { .def = 1 },
+ .rx_cw_dly = { .def = 1 },
+ .blktime = 0x28f, /* 1.28 */
+ .mktime = 0xa3d, /* 5.12 */
+ .tx_offset = {
+ .serdes = 0xe1e, /* 7.0593939 */
+ .no_fec = 0x3d33, /* 30.6 */
+ .rs = 0x5057, /* 40.17 */
+ .sfd = 0x1dc, /* 0.93 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7a9, /* -4.1697 */
+ .no_fec = 0xfffff8cd, /* -3.6 */
+ .rs = 0xfffff21a, /* -6.95 */
+ .sfd = 0x1dc, /* 0.93 */
+ .bs_ds = 0xa3d /* 5.12, RS-FEC 0x633 (3.1) */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_100G] = {
+ .tx_mode = {
+ .def = 3,
+ .rs = 2
+ },
+ .tx_mk_dly = 10,
+ .tx_cw_dly = {
+ .def = 3,
+ .onestep = 6
+ },
+ .rx_mode = {
+ .def = 4,
+ .rs = 1
+ },
+ .rx_mk_dly = { .def = 5 },
+ .rx_cw_dly = { .def = 5 },
+ .blks_per_clk = 1,
+ .blktime = 0x148, /* 0.64 */
+ .mktime = 0x199a, /* 12.8 */
+ .tx_offset = {
+ .serdes = 0xe1e, /* 7.0593939 */
+ .no_fec = 0x67ec, /* 51.96 */
+ .rs = 0x44fb, /* 34.49 */
+ .sfd = 0x1dc, /* 0.93 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7a9, /* -4.1697 */
+ .no_fec = 0xfffff5a9, /* -5.17 */
+ .rs = 0xfffff6e6, /* -4.55 */
+ .sfd = 0x1dc, /* 0.93 */
+ .bs_ds = 0x199a /* 12.8, RS-FEC 0x31b (1.552) */
+ }
+ },
+ [ICE_ETH56G_LNK_SPD_100G2] = {
+ .tx_mode = { .def = 5 },
+ .tx_mk_dly = 10,
+ .tx_cw_dly = {
+ .def = 3,
+ .onestep = 6
+ },
+ .rx_mode = { .def = 5 },
+ .rx_mk_dly = { .def = 5 },
+ .rx_cw_dly = { .def = 5 },
+ .blks_per_clk = 1,
+ .blktime = 0x148, /* 0.64 */
+ .mktime = 0x199a, /* 12.8 */
+ .tx_offset = {
+ .serdes = 0x13ba, /* 9.86353 */
+ .rs = 0x460a, /* 35.02 */
+ .sfd = 0xe6, /* 0.45 */
+ .onestep = 0xf5c /* 7.68 */
+ },
+ .rx_offset = {
+ .serdes = 0xfffff7e8, /* -4.04706 */
+ .rs = 0xfffff548, /* -5.36 */
+ .sfd = 0xe6, /* 0.45 */
+ .bs_ds = 0x303 /* 1.506 */
+ }
+ }
+};
+
/* struct ice_time_ref_info_e82x
*
* E822 hardware can use different sources as the reference for the PTP
@@ -155,6 +470,93 @@ const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
},
};
+const
+struct ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ] = {
+ /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x19,
+ /* tspll_ndivratio */
+ 1,
+ /* tspll_fbdiv_intgr */
+ 320,
+ /* tspll_fbdiv_frac */
+ 0,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x29,
+ /* tspll_ndivratio */
+ 3,
+ /* tspll_fbdiv_intgr */
+ 195,
+ /* tspll_fbdiv_frac */
+ 1342177280UL,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x3E,
+ /* tspll_ndivratio */
+ 2,
+ /* tspll_fbdiv_intgr */
+ 128,
+ /* tspll_fbdiv_frac */
+ 0,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x33,
+ /* tspll_ndivratio */
+ 3,
+ /* tspll_fbdiv_intgr */
+ 156,
+ /* tspll_fbdiv_frac */
+ 1073741824UL,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x1F,
+ /* tspll_ndivratio */
+ 5,
+ /* tspll_fbdiv_intgr */
+ 256,
+ /* tspll_fbdiv_frac */
+ 0,
+ /* ref1588_ck_div */
+ 0,
+ },
+
+ /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */
+ {
+ /* tspll_ck_refclkfreq */
+ 0x52,
+ /* tspll_ndivratio */
+ 3,
+ /* tspll_fbdiv_intgr */
+ 97,
+ /* tspll_fbdiv_frac */
+ 2818572288UL,
+ /* ref1588_ck_div */
+ 0,
+ },
+};
+
/* struct ice_vernier_info_e82x
*
* E822 hardware calibrates the delay of the timestamp indication from the
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 187ce9b54e1a..3a33e6b9b313 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2021, Intel Corporation. */
#include <linux/delay.h>
+#include <linux/iopoll.h>
#include "ice_common.h"
#include "ice_ptp_hw.h"
#include "ice_ptp_consts.h"
@@ -227,40 +228,632 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
}
/**
- * ice_ptp_src_cmd - Prepare source timer for a timer command
- * @hw: pointer to HW structure
+ * ice_read_cgu_reg_e82x - Read a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to read
+ * @val: storage for register value read
+ *
+ * Read the contents of a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ *
+ * Return: 0 on success, other error codes when failed to read from CGU
+ */
+static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .opcode = ice_sbq_msg_rd,
+ .dest_dev = cgu,
+ .msg_addr_low = addr
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ *val = cgu_msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_write_cgu_reg_e82x - Write a CGU register
+ * @hw: pointer to the HW struct
+ * @addr: Register address to write
+ * @val: value to write into the register
+ *
+ * Write the specified value to a register of the Clock Generation Unit. Only
+ * applicable to E822 devices.
+ *
+ * Return: 0 on success, other error codes when failed to write to CGU
+ */
+static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
+{
+ struct ice_sbq_msg_input cgu_msg = {
+ .opcode = ice_sbq_msg_wr,
+ .dest_dev = cgu,
+ .msg_addr_low = addr,
+ .data = val
+ };
+ int err;
+
+ err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
+ addr, err);
+ return err;
+ }
+
+ return err;
+}
+
+/**
+ * ice_clk_freq_str - Convert time_ref_freq to string
+ * @clk_freq: Clock frequency
+ *
+ * Return: specified TIME_REF clock frequency converted to a string
+ */
+static const char *ice_clk_freq_str(enum ice_time_ref_freq clk_freq)
+{
+ switch (clk_freq) {
+ case ICE_TIME_REF_FREQ_25_000:
+ return "25 MHz";
+ case ICE_TIME_REF_FREQ_122_880:
+ return "122.88 MHz";
+ case ICE_TIME_REF_FREQ_125_000:
+ return "125 MHz";
+ case ICE_TIME_REF_FREQ_153_600:
+ return "153.6 MHz";
+ case ICE_TIME_REF_FREQ_156_250:
+ return "156.25 MHz";
+ case ICE_TIME_REF_FREQ_245_760:
+ return "245.76 MHz";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_clk_src_str - Convert time_ref_src to string
+ * @clk_src: Clock source
+ *
+ * Return: specified clock source converted to its string name
+ */
+static const char *ice_clk_src_str(enum ice_clk_src clk_src)
+{
+ switch (clk_src) {
+ case ICE_CLK_SRC_TCXO:
+ return "TCXO";
+ case ICE_CLK_SRC_TIME_REF:
+ return "TIME_REF";
+ default:
+ return "Unknown";
+ }
+}
+
+/**
+ * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
+ * @hw: pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TS PLL
+ * * %other - CGU read/write failure
+ */
+static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw,
+ enum ice_time_ref_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_bwm_lf bwm_lf;
+ union nac_cgu_dword19 dw19;
+ union nac_cgu_dword22 dw22;
+ union nac_cgu_dword24 dw24;
+ union nac_cgu_dword9 dw9;
+ int err;
+
+ if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return -EINVAL;
+ }
+
+ if (clk_src == ICE_CLK_SRC_TCXO &&
+ clk_freq != ICE_TIME_REF_FREQ_25_000) {
+ dev_warn(ice_hw_to_dev(hw),
+ "TCXO only supports 25 MHz frequency\n");
+ return -EINVAL;
+ }
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.time_ref_sel),
+ ice_clk_freq_str(dw9.time_ref_freq_sel),
+ bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw24.ts_pll_enable) {
+ dw24.ts_pll_enable = 0;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ dw9.time_ref_freq_sel = clk_freq;
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL feedback divisor */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
+ if (err)
+ return err;
+
+ dw19.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
+ dw19.tspll_ndivratio = 1;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL post divisor */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
+ if (err)
+ return err;
+
+ dw22.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
+ dw22.time1588clk_sel_div2 = 0;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL pre divisor and clock source */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ dw24.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
+ dw24.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
+ dw24.time_ref_sel = clk_src;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Finally, enable the PLL */
+ dw24.ts_pll_enable = 1;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Wait to verify if the PLL locks */
+ usleep_range(1000, 5000);
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
+ if (err)
+ return err;
+
+ if (!bwm_lf.plllock_true_lock_cri) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw24.time_ref_sel),
+ ice_clk_freq_str(dw9.time_ref_freq_sel),
+ bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ return 0;
+}
+
+/**
+ * ice_cfg_cgu_pll_e825c - Configure the Clock Generation Unit for E825-C
+ * @hw: pointer to the HW struct
+ * @clk_freq: Clock frequency to program
+ * @clk_src: Clock source to select (TIME_REF, or TCXO)
+ *
+ * Configure the Clock Generation Unit with the desired clock frequency and
+ * time reference, enabling the PLL which drives the PTP hardware clock.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EINVAL - input parameters are incorrect
+ * * %-EBUSY - failed to lock TS PLL
+ * * %other - CGU read/write failure
+ */
+static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw,
+ enum ice_time_ref_freq clk_freq,
+ enum ice_clk_src clk_src)
+{
+ union tspll_ro_lock_e825c ro_lock;
+ union nac_cgu_dword16_e825c dw16;
+ union nac_cgu_dword23_e825c dw23;
+ union nac_cgu_dword19 dw19;
+ union nac_cgu_dword22 dw22;
+ union nac_cgu_dword24 dw24;
+ union nac_cgu_dword9 dw9;
+ int err;
+
+ if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
+ clk_freq);
+ return -EINVAL;
+ }
+
+ if (clk_src >= NUM_ICE_CLK_SRC) {
+ dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
+ clk_src);
+ return -EINVAL;
+ }
+
+ if (clk_src == ICE_CLK_SRC_TCXO &&
+ clk_freq != ICE_TIME_REF_FREQ_156_250) {
+ dev_warn(ice_hw_to_dev(hw),
+ "TCXO only supports 156.25 MHz frequency\n");
+ return -EINVAL;
+ }
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, &dw16.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
+ if (err)
+ return err;
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
+ if (err)
+ return err;
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw23.time_ref_sel),
+ ice_clk_freq_str(dw9.time_ref_freq_sel),
+ ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ /* Disable the PLL before changing the clock source or frequency */
+ if (dw23.ts_pll_enable) {
+ dw23.ts_pll_enable = 0;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C,
+ dw23.val);
+ if (err)
+ return err;
+ }
+
+ /* Set the frequency */
+ dw9.time_ref_freq_sel = clk_freq;
+
+ /* Enable the correct receiver */
+ if (clk_src == ICE_CLK_SRC_TCXO) {
+ dw9.time_ref_en = 0;
+ dw9.clk_eref0_en = 1;
+ } else {
+ dw9.time_ref_en = 1;
+ dw9.clk_eref0_en = 0;
+ }
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
+ if (err)
+ return err;
+
+ /* Choose the referenced frequency */
+ dw16.tspll_ck_refclkfreq =
+ e825c_cgu_params[clk_freq].tspll_ck_refclkfreq;
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, dw16.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL feedback divisor */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
+ if (err)
+ return err;
+
+ dw19.tspll_fbdiv_intgr =
+ e825c_cgu_params[clk_freq].tspll_fbdiv_intgr;
+ dw19.tspll_ndivratio =
+ e825c_cgu_params[clk_freq].tspll_ndivratio;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL post divisor */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
+ if (err)
+ return err;
+
+ /* These two are constant for E825C */
+ dw22.time1588clk_div = 5;
+ dw22.time1588clk_sel_div2 = 0;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
+ if (err)
+ return err;
+
+ /* Configure the TS PLL pre divisor and clock source */
+ err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val);
+ if (err)
+ return err;
+
+ dw23.ref1588_ck_div =
+ e825c_cgu_params[clk_freq].ref1588_ck_div;
+ dw23.time_ref_sel = clk_src;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
+ if (err)
+ return err;
+
+ dw24.tspll_fbdiv_frac =
+ e825c_cgu_params[clk_freq].tspll_fbdiv_frac;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
+ if (err)
+ return err;
+
+ /* Finally, enable the PLL */
+ dw23.ts_pll_enable = 1;
+
+ err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val);
+ if (err)
+ return err;
+
+ /* Wait to verify if the PLL locks */
+ usleep_range(1000, 5000);
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val);
+ if (err)
+ return err;
+
+ if (!ro_lock.plllock_true_lock_cri) {
+ dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
+ return -EBUSY;
+ }
+
+ /* Log the current clock configuration */
+ ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
+ dw24.ts_pll_enable ? "enabled" : "disabled",
+ ice_clk_src_str(dw23.time_ref_sel),
+ ice_clk_freq_str(dw9.time_ref_freq_sel),
+ ro_lock.plllock_true_lock_cri ? "locked" : "unlocked");
+
+ return 0;
+}
+
+/**
+ * ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits
+ * @hw: pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
+ * losing TS PLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU
+ */
+static int ice_cfg_cgu_pll_dis_sticky_bits_e82x(struct ice_hw *hw)
+{
+ union tspll_cntr_bist_settings cntr_bist;
+ int err;
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
+ &cntr_bist.val);
+ if (err)
+ return err;
+
+ /* Disable sticky lock detection so lock err reported is accurate */
+ cntr_bist.i_plllock_sel_0 = 0;
+ cntr_bist.i_plllock_sel_1 = 0;
+
+ return ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
+ cntr_bist.val);
+}
+
+/**
+ * ice_cfg_cgu_pll_dis_sticky_bits_e825c - disable TS PLL sticky bits for E825-C
+ * @hw: pointer to the HW struct
+ *
+ * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on
+ * losing TS PLL lock, but always show current state.
+ *
+ * Return: 0 on success, other error codes when failed to read/write CGU
+ */
+static int ice_cfg_cgu_pll_dis_sticky_bits_e825c(struct ice_hw *hw)
+{
+ union tspll_bw_tdc_e825c bw_tdc;
+ int err;
+
+ err = ice_read_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, &bw_tdc.val);
+ if (err)
+ return err;
+
+ bw_tdc.i_plllock_sel_1_0 = 0;
+
+ return ice_write_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, bw_tdc.val);
+}
+
+/**
+ * ice_init_cgu_e82x - Initialize CGU with settings from firmware
+ * @hw: pointer to the HW structure
+ *
+ * Initialize the Clock Generation Unit of the E822 device.
+ *
+ * Return: 0 on success, other error codes when failed to read/write/cfg CGU
+ */
+static int ice_init_cgu_e82x(struct ice_hw *hw)
+{
+ struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
+ int err;
+
+ /* Disable sticky lock detection so lock err reported is accurate */
+ if (ice_is_e825c(hw))
+ err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw);
+ else
+ err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw);
+ if (err)
+ return err;
+
+ /* Configure the CGU PLL using the parameters from the function
+ * capabilities.
+ */
+ if (ice_is_e825c(hw))
+ err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+ else
+ err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
+ (enum ice_clk_src)ts_info->clk_src);
+
+ return err;
+}
+
+/**
+ * ice_ptp_tmr_cmd_to_src_reg - Convert to source timer command value
+ * @hw: pointer to HW struct
* @cmd: Timer command
*
- * Prepare the source timer for an upcoming timer sync command.
+ * Return: the source timer command register value for the given PTP timer
+ * command.
*/
-void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+static u32 ice_ptp_tmr_cmd_to_src_reg(struct ice_hw *hw,
+ enum ice_ptp_tmr_cmd cmd)
{
- u32 cmd_val;
- u8 tmr_idx;
+ u32 cmd_val, tmr_idx;
+
+ switch (cmd) {
+ case ICE_PTP_INIT_TIME:
+ cmd_val = GLTSYN_CMD_INIT_TIME;
+ break;
+ case ICE_PTP_INIT_INCVAL:
+ cmd_val = GLTSYN_CMD_INIT_INCVAL;
+ break;
+ case ICE_PTP_ADJ_TIME:
+ cmd_val = GLTSYN_CMD_ADJ_TIME;
+ break;
+ case ICE_PTP_ADJ_TIME_AT_TIME:
+ cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
+ break;
+ case ICE_PTP_NOP:
+ case ICE_PTP_READ_TIME:
+ cmd_val = GLTSYN_CMD_READ_TIME;
+ break;
+ default:
+ dev_warn(ice_hw_to_dev(hw),
+ "Ignoring unrecognized timer command %u\n", cmd);
+ cmd_val = 0;
+ }
tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_CPK_SRC;
+
+ return tmr_idx << SEL_CPK_SRC | cmd_val;
+}
+
+/**
+ * ice_ptp_tmr_cmd_to_port_reg- Convert to port timer command value
+ * @hw: pointer to HW struct
+ * @cmd: Timer command
+ *
+ * Note that some hardware families use a different command register value for
+ * the PHY ports, while other hardware families use the same register values
+ * as the source timer.
+ *
+ * Return: the PHY port timer command register value for the given PTP timer
+ * command.
+ */
+static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw,
+ enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val, tmr_idx;
+
+ /* Certain hardware families share the same register values for the
+ * port register and source timer register.
+ */
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_E810:
+ return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810;
+ default:
+ break;
+ }
switch (cmd) {
case ICE_PTP_INIT_TIME:
- cmd_val |= GLTSYN_CMD_INIT_TIME;
+ cmd_val = PHY_CMD_INIT_TIME;
break;
case ICE_PTP_INIT_INCVAL:
- cmd_val |= GLTSYN_CMD_INIT_INCVAL;
+ cmd_val = PHY_CMD_INIT_INCVAL;
break;
case ICE_PTP_ADJ_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_TIME;
+ cmd_val = PHY_CMD_ADJ_TIME;
break;
case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
+ cmd_val = PHY_CMD_ADJ_TIME_AT_TIME;
break;
case ICE_PTP_READ_TIME:
- cmd_val |= GLTSYN_CMD_READ_TIME;
+ cmd_val = PHY_CMD_READ_TIME;
break;
case ICE_PTP_NOP:
+ cmd_val = 0;
break;
+ default:
+ dev_warn(ice_hw_to_dev(hw),
+ "Ignoring unrecognized timer command %u\n", cmd);
+ cmd_val = 0;
}
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ return tmr_idx << SEL_PHY_SRC | cmd_val;
+}
+
+/**
+ * ice_ptp_src_cmd - Prepare source timer for a timer command
+ * @hw: pointer to HW structure
+ * @cmd: Timer command
+ *
+ * Prepare the source timer for an upcoming timer sync command.
+ */
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd);
+
wr32(hw, GLTSYN_CMD, cmd_val);
}
@@ -274,10 +867,1839 @@ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
*/
static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
{
+ struct ice_pf *pf = container_of(hw, struct ice_pf, hw);
+
+ guard(spinlock)(&pf->adapter->ptp_gltsyn_time_lock);
wr32(hw, GLTSYN_CMD_SYNC, SYNC_EXEC_CMD);
ice_flush(hw);
}
+/* 56G PHY device functions
+ *
+ * The following functions operate on devices with the ETH 56G PHY.
+ */
+
+/**
+ * ice_write_phy_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @phy_idx: PHY index
+ * @addr: PHY register address
+ * @val: Value to write
+ *
+ * Return: 0 on success, other error codes when failed to write to PHY
+ */
+static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
+ u32 val)
+{
+ struct ice_sbq_msg_input phy_msg;
+ int err;
+
+ phy_msg.opcode = ice_sbq_msg_wr;
+
+ phy_msg.msg_addr_low = lower_16_bits(addr);
+ phy_msg.msg_addr_high = upper_16_bits(addr);
+
+ phy_msg.data = val;
+ phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
+
+ err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
+
+ if (err)
+ ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
+ err);
+
+ return err;
+}
+
+/**
+ * ice_read_phy_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @phy_idx: PHY index
+ * @addr: PHY register address
+ * @val: Value to write
+ *
+ * Return: 0 on success, other error codes when failed to read from PHY
+ */
+static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr,
+ u32 *val)
+{
+ struct ice_sbq_msg_input phy_msg;
+ int err;
+
+ phy_msg.opcode = ice_sbq_msg_rd;
+
+ phy_msg.msg_addr_low = lower_16_bits(addr);
+ phy_msg.msg_addr_high = upper_16_bits(addr);
+
+ phy_msg.data = 0;
+ phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx];
+
+ err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n",
+ err);
+ return err;
+ }
+
+ *val = phy_msg.data;
+
+ return 0;
+}
+
+/**
+ * ice_phy_res_address_eth56g - Calculate a PHY port register address
+ * @port: Port number to be written
+ * @res_type: resource type (register/memory)
+ * @offset: Offset from PHY port register base
+ * @addr: The result address
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ */
+static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type,
+ u32 offset, u32 *addr)
+{
+ u8 lane = port % ICE_PORTS_PER_QUAD;
+ u8 phy = ICE_GET_QUAD_NUM(port);
+
+ if (res_type >= NUM_ETH56G_PHY_RES)
+ return -EINVAL;
+
+ *addr = eth56g_phy_res[res_type].base[phy] +
+ lane * eth56g_phy_res[res_type].step + offset;
+ return 0;
+}
+
+/**
+ * ice_write_port_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ * @res_type: resource type (register/memory)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 val, enum eth56g_res_type res_type)
+{
+ u8 phy_port = port % hw->ptp.ports_per_phy;
+ u8 phy_idx = port / hw->ptp.ports_per_phy;
+ u32 addr;
+ int err;
+
+ if (port >= hw->ptp.num_lports)
+ return -EINVAL;
+
+ err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ if (err)
+ return err;
+
+ return ice_write_phy_eth56g(hw, phy_idx, addr, val);
+}
+
+/**
+ * ice_read_port_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @offset: PHY register offset
+ * @port: Port number
+ * @val: Value to write
+ * @res_type: resource type (register/memory)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 *val, enum eth56g_res_type res_type)
+{
+ u8 phy_port = port % hw->ptp.ports_per_phy;
+ u8 phy_idx = port / hw->ptp.ports_per_phy;
+ u32 addr;
+ int err;
+
+ if (port >= hw->ptp.num_lports)
+ return -EINVAL;
+
+ err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr);
+ if (err)
+ return err;
+
+ return ice_read_phy_eth56g(hw, phy_idx, addr, val);
+}
+
+/**
+ * ice_write_ptp_reg_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_write_mac_reg_eth56g - Write a MAC PHY port register
+ * parameter
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_mac_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC);
+}
+
+/**
+ * ice_write_xpcs_reg_eth56g - Write a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be written
+ * @offset: Offset from PHY port register base
+ * @val: Value to write
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_xpcs_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val,
+ ETH56G_PHY_REG_XPCS);
+}
+
+/**
+ * ice_read_ptp_reg_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_read_mac_reg_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_mac_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC);
+}
+
+/**
+ * ice_read_gpcs_reg_eth56g - Read a PHY port register
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_gpcs_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_GPCS);
+}
+
+/**
+ * ice_read_port_mem_eth56g - Read a PHY port memory location
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to read from PHY
+ */
+static int ice_read_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 *val)
+{
+ return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP);
+}
+
+/**
+ * ice_write_port_mem_eth56g - Write a PHY port memory location
+ * @hw: pointer to the HW struct
+ * @port: Port number to be read
+ * @offset: Offset from PHY port register base
+ * @val: Pointer to the value to read (out param)
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - invalid port number or resource type
+ * * %other - failed to write to PHY
+ */
+static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset,
+ u32 val)
+{
+ return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP);
+}
+
+/**
+ * ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 64bit register
+ *
+ * Write the appropriate high register offset to use.
+ *
+ * Return: true if the provided low address is one of the known 64bit PHY values
+ * represented as two 32bit registers, false otherwise.
+ */
+static bool ice_is_64b_phy_reg_eth56g(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case PHY_REG_TX_TIMER_INC_PRE_L:
+ *high_addr = PHY_REG_TX_TIMER_INC_PRE_U;
+ return true;
+ case PHY_REG_RX_TIMER_INC_PRE_L:
+ *high_addr = PHY_REG_RX_TIMER_INC_PRE_U;
+ return true;
+ case PHY_REG_TX_CAPTURE_L:
+ *high_addr = PHY_REG_TX_CAPTURE_U;
+ return true;
+ case PHY_REG_RX_CAPTURE_L:
+ *high_addr = PHY_REG_RX_CAPTURE_U;
+ return true;
+ case PHY_REG_TOTAL_TX_OFFSET_L:
+ *high_addr = PHY_REG_TOTAL_TX_OFFSET_U;
+ return true;
+ case PHY_REG_TOTAL_RX_OFFSET_L:
+ *high_addr = PHY_REG_TOTAL_RX_OFFSET_U;
+ return true;
+ case PHY_REG_TX_MEMORY_STATUS_L:
+ *high_addr = PHY_REG_TX_MEMORY_STATUS_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_is_40b_phy_reg_eth56g - Check if this is a 40bit PHY register
+ * @low_addr: the low address to check
+ * @high_addr: on return, contains the high address of the 40bit value
+ *
+ * Write the appropriate high register offset to use.
+ *
+ * Return: true if the provided low address is one of the known 40bit PHY
+ * values split into two registers with the lower 8 bits in the low register and
+ * the upper 32 bits in the high register, false otherwise.
+ */
+static bool ice_is_40b_phy_reg_eth56g(u16 low_addr, u16 *high_addr)
+{
+ switch (low_addr) {
+ case PHY_REG_TIMETUS_L:
+ *high_addr = PHY_REG_TIMETUS_U;
+ return true;
+ case PHY_PCS_REF_TUS_L:
+ *high_addr = PHY_PCS_REF_TUS_U;
+ return true;
+ case PHY_PCS_REF_INC_L:
+ *high_addr = PHY_PCS_REF_INC_U;
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**
+ * ice_read_64b_phy_reg_eth56g - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ * @res_type: resource type
+ *
+ * Check if the caller has specified a known 40 bit register offset and read
+ * the two registers associated with a 40bit value and return it in the val
+ * pointer.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to read from PHY
+ */
+static int ice_read_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr,
+ u64 *val, enum eth56g_res_type res_type)
+{
+ u16 high_addr;
+ u32 lo, hi;
+ int err;
+
+ if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr))
+ return -EINVAL;
+
+ err = ice_read_port_eth56g(hw, port, low_addr, &lo, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register %#08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_read_port_eth56g(hw, port, high_addr, &hi, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register %#08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ *val = ((u64)hi << 32) | lo;
+
+ return 0;
+}
+
+/**
+ * ice_read_64b_ptp_reg_eth56g - Read a 64bit value from PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: on return, the contents of the 64bit value from the PHY registers
+ *
+ * Check if the caller has specified a known 40 bit register offset and read
+ * the two registers associated with a 40bit value and return it in the val
+ * pointer.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to read from PHY
+ */
+static int ice_read_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr,
+ u64 *val)
+{
+ return ice_read_64b_phy_reg_eth56g(hw, port, low_addr, val,
+ ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_write_40b_phy_reg_eth56g - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ * @res_type: resource type
+ *
+ * Check if the caller has specified a known 40 bit register offset and write
+ * provided 40b value to the two associated registers by splitting it up into
+ * two chunks, the lower 8 bits and the upper 32 bits.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 40 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_40b_phy_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val,
+ enum eth56g_res_type res_type)
+{
+ u16 high_addr;
+ u32 lo, hi;
+ int err;
+
+ if (!ice_is_40b_phy_reg_eth56g(low_addr, &high_addr))
+ return -EINVAL;
+
+ lo = FIELD_GET(P_REG_40B_LOW_M, val);
+ hi = (u32)(val >> P_REG_40B_HIGH_S);
+
+ err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_40b_ptp_reg_eth56g - Write a 40b value to the PHY
+ * @hw: pointer to the HW struct
+ * @port: port to write to
+ * @low_addr: offset of the low register
+ * @val: 40b value to write
+ *
+ * Check if the caller has specified a known 40 bit register offset and write
+ * provided 40b value to the two associated registers by splitting it up into
+ * two chunks, the lower 8 bits and the upper 32 bits.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 40 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_40b_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val)
+{
+ return ice_write_40b_phy_reg_eth56g(hw, port, low_addr, val,
+ ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_write_64b_phy_reg_eth56g - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ * @res_type: resource type
+ *
+ * Check if the caller has specified a known 64 bit register offset and write
+ * the 64bit value to the two associated 32bit PHY registers.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val,
+ enum eth56g_res_type res_type)
+{
+ u16 high_addr;
+ u32 lo, hi;
+ int err;
+
+ if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr))
+ return -EINVAL;
+
+ lo = lower_32_bits(val);
+ hi = upper_32_bits(val);
+
+ err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d",
+ low_addr, err);
+ return err;
+ }
+
+ err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d",
+ high_addr, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_write_64b_ptp_reg_eth56g - Write a 64bit value to PHY registers
+ * @hw: pointer to the HW struct
+ * @port: PHY port to read from
+ * @low_addr: offset of the lower register to read from
+ * @val: the contents of the 64bit value to write to PHY
+ *
+ * Check if the caller has specified a known 64 bit register offset and write
+ * the 64bit value to the two associated 32bit PHY registers.
+ *
+ * Return:
+ * * %0 - success
+ * * %EINVAL - not a 64 bit register
+ * * %other - failed to write to PHY
+ */
+static int ice_write_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port,
+ u16 low_addr, u64 val)
+{
+ return ice_write_64b_phy_reg_eth56g(hw, port, low_addr, val,
+ ETH56G_PHY_REG_PTP);
+}
+
+/**
+ * ice_read_ptp_tstamp_eth56g - Read a PHY timestamp out of the port memory
+ * @hw: pointer to the HW struct
+ * @port: the port to read from
+ * @idx: the timestamp index to read
+ * @tstamp: on return, the 40bit timestamp value
+ *
+ * Read a 40bit timestamp value out of the two associated entries in the
+ * port memory block of the internal PHYs of the 56G devices.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx,
+ u64 *tstamp)
+{
+ u16 lo_addr, hi_addr;
+ u32 lo, hi;
+ int err;
+
+ lo_addr = (u16)PHY_TSTAMP_L(idx);
+ hi_addr = (u16)PHY_TSTAMP_U(idx);
+
+ err = ice_read_port_mem_eth56g(hw, port, lo_addr, &lo);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_read_port_mem_eth56g(hw, port, hi_addr, &hi);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ /* For 56G based internal PHYs, the timestamp is reported with the
+ * lower 8 bits in the low register, and the upper 32 bits in the high
+ * register.
+ */
+ *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+
+ return 0;
+}
+
+/**
+ * ice_clear_ptp_tstamp_eth56g - Clear a timestamp from the quad block
+ * @hw: pointer to the HW struct
+ * @port: the quad to read from
+ * @idx: the timestamp index to reset
+ *
+ * Read and then forcibly clear the timestamp index to ensure the valid bit is
+ * cleared and the timestamp status bit is reset in the PHY port memory of
+ * internal PHYs of the 56G devices.
+ *
+ * To directly clear the contents of the timestamp block entirely, discarding
+ * all timestamp data at once, software should instead use
+ * ice_ptp_reset_ts_memory_quad_eth56g().
+ *
+ * This function should only be called on an idx whose bit is set according to
+ * ice_get_phy_tx_tstamp_ready().
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_clear_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx)
+{
+ u64 unused_tstamp;
+ u16 lo_addr;
+ int err;
+
+ /* Read the timestamp register to ensure the timestamp status bit is
+ * cleared.
+ */
+ err = ice_read_ptp_tstamp_eth56g(hw, port, idx, &unused_tstamp);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read the PHY timestamp register for port %u, idx %u, err %d\n",
+ port, idx, err);
+ }
+
+ lo_addr = (u16)PHY_TSTAMP_L(idx);
+
+ err = ice_write_port_mem_eth56g(hw, port, lo_addr, 0);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for port %u, idx %u, err %d\n",
+ port, idx, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_reset_ts_memory_eth56g - Clear all timestamps from the port block
+ * @hw: pointer to the HW struct
+ */
+static void ice_ptp_reset_ts_memory_eth56g(struct ice_hw *hw)
+{
+ unsigned int port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L,
+ 0);
+ ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_U,
+ 0);
+ }
+}
+
+/**
+ * ice_ptp_prep_port_time_eth56g - Prepare one PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @port: port number
+ * @time: time to initialize the PHY port clocks to
+ *
+ * Write a new initial time value into registers of a specific PHY port.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_port_time_eth56g(struct ice_hw *hw, u8 port,
+ u64 time)
+{
+ int err;
+
+ /* Tx case */
+ err = ice_write_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L,
+ time);
+ if (err)
+ return err;
+
+ /* Rx case */
+ return ice_write_64b_ptp_reg_eth56g(hw, port,
+ PHY_REG_RX_TIMER_INC_PRE_L, time);
+}
+
+/**
+ * ice_ptp_prep_phy_time_eth56g - Prepare PHY port with initial time
+ * @hw: pointer to the HW struct
+ * @time: Time to initialize the PHY port clocks to
+ *
+ * Program the PHY port registers with a new initial time value. The port
+ * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
+ * command. The time value is the upper 32 bits of the PHY timer, usually in
+ * units of nominal nanoseconds.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_phy_time_eth56g(struct ice_hw *hw, u32 time)
+{
+ u64 phy_time;
+ u8 port;
+
+ /* The time represents the upper 32 bits of the PHY timer, so we need
+ * to shift to account for this when programming.
+ */
+ phy_time = (u64)time << 32;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_ptp_prep_port_time_eth56g(hw, port, phy_time);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_port_adj_eth56g - Prepare a single port for time adjust
+ * @hw: pointer to HW struct
+ * @port: Port number to be programmed
+ * @time: time in cycles to adjust the port clocks
+ *
+ * Program the port for an atomic adjustment by writing the Tx and Rx timer
+ * registers. The atomic adjustment won't be completed until the driver issues
+ * an ICE_PTP_ADJ_TIME command.
+ *
+ * Note that time is not in units of nanoseconds. It is in clock time
+ * including the lower sub-nanosecond portion of the port timer.
+ *
+ * Negative adjustments are supported using 2s complement arithmetic.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_port_adj_eth56g(struct ice_hw *hw, u8 port, s64 time)
+{
+ u32 l_time, u_time;
+ int err;
+
+ l_time = lower_32_bits(time);
+ u_time = upper_32_bits(time);
+
+ /* Tx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ /* Rx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_L,
+ l_time);
+ if (err)
+ goto exit_err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_U,
+ u_time);
+ if (err)
+ goto exit_err;
+
+ return 0;
+
+exit_err:
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n",
+ port, err);
+ return err;
+}
+
+/**
+ * ice_ptp_prep_phy_adj_eth56g - Prep PHY ports for a time adjustment
+ * @hw: pointer to HW struct
+ * @adj: adjustment in nanoseconds
+ *
+ * Prepare the PHY ports for an atomic time adjustment by programming the PHY
+ * Tx and Rx port registers. The actual adjustment is completed by issuing an
+ * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_phy_adj_eth56g(struct ice_hw *hw, s32 adj)
+{
+ s64 cycles;
+ u8 port;
+
+ /* The port clock supports adjustment of the sub-nanosecond portion of
+ * the clock (lowest 32 bits). We shift the provided adjustment in
+ * nanoseconds by 32 to calculate the appropriate adjustment to program
+ * into the PHY ports.
+ */
+ cycles = (s64)adj << 32;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_ptp_prep_port_adj_eth56g(hw, port, cycles);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_prep_phy_incval_eth56g - Prepare PHY ports for time adjustment
+ * @hw: pointer to HW struct
+ * @incval: new increment value to prepare
+ *
+ * Prepare each of the PHY ports for a new increment value by programming the
+ * port's TIMETUS registers. The new increment value will be updated after
+ * issuing an ICE_PTP_INIT_INCVAL command.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_prep_phy_incval_eth56g(struct ice_hw *hw, u64 incval)
+{
+ u8 port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L,
+ incval);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_read_port_capture_eth56g - Read a port's local time capture
+ * @hw: pointer to HW struct
+ * @port: Port number to read
+ * @tx_ts: on return, the Tx port time capture
+ * @rx_ts: on return, the Rx port time capture
+ *
+ * Read the port's Tx and Rx local time capture values.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+static int ice_ptp_read_port_capture_eth56g(struct ice_hw *hw, u8 port,
+ u64 *tx_ts, u64 *rx_ts)
+{
+ int err;
+
+ /* Tx case */
+ err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_CAPTURE_L,
+ tx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "tx_init = %#016llx\n", *tx_ts);
+
+ /* Rx case */
+ err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_RX_CAPTURE_L,
+ rx_ts);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n",
+ err);
+ return err;
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "rx_init = %#016llx\n", *rx_ts);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_write_port_cmd_eth56g - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare the requested port for an upcoming timer sync command.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_ptp_write_port_cmd_eth56g(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd)
+{
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
+ int err;
+
+ /* Tx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ /* Rx case */
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TMR_CMD, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_phy_get_speed_eth56g - Get link speed based on PHY link type
+ * @li: pointer to link information struct
+ *
+ * Return: simplified ETH56G PHY speed
+ */
+static enum ice_eth56g_link_spd
+ice_phy_get_speed_eth56g(struct ice_link_status *li)
+{
+ u16 speed = ice_get_link_speed_based_on_phy_type(li->phy_type_low,
+ li->phy_type_high);
+
+ switch (speed) {
+ case ICE_AQ_LINK_SPEED_1000MB:
+ return ICE_ETH56G_LNK_SPD_1G;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ return ICE_ETH56G_LNK_SPD_2_5G;
+ case ICE_AQ_LINK_SPEED_10GB:
+ return ICE_ETH56G_LNK_SPD_10G;
+ case ICE_AQ_LINK_SPEED_25GB:
+ return ICE_ETH56G_LNK_SPD_25G;
+ case ICE_AQ_LINK_SPEED_40GB:
+ return ICE_ETH56G_LNK_SPD_40G;
+ case ICE_AQ_LINK_SPEED_50GB:
+ switch (li->phy_type_low) {
+ case ICE_PHY_TYPE_LOW_50GBASE_SR:
+ case ICE_PHY_TYPE_LOW_50GBASE_FR:
+ case ICE_PHY_TYPE_LOW_50GBASE_LR:
+ case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
+ case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
+ case ICE_PHY_TYPE_LOW_50G_AUI1:
+ return ICE_ETH56G_LNK_SPD_50G;
+ default:
+ return ICE_ETH56G_LNK_SPD_50G2;
+ }
+ case ICE_AQ_LINK_SPEED_100GB:
+ if (li->phy_type_high ||
+ li->phy_type_low == ICE_PHY_TYPE_LOW_100GBASE_SR2)
+ return ICE_ETH56G_LNK_SPD_100G2;
+ else
+ return ICE_ETH56G_LNK_SPD_100G;
+ default:
+ return ICE_ETH56G_LNK_SPD_1G;
+ }
+}
+
+/**
+ * ice_phy_cfg_parpcs_eth56g - Configure TUs per PAR/PCS clock cycle
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ *
+ * Configure the number of TUs for the PAR and PCS clocks used as part of the
+ * timestamp calibration process.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port)
+{
+ u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
+ u32 val;
+ int err;
+
+ err = ice_write_xpcs_reg_eth56g(hw, port, PHY_VENDOR_TXLANE_THRESH,
+ ICE_ETH56G_NOMINAL_THRESH4);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read VENDOR_TXLANE_THRESH, status: %d",
+ err);
+ return err;
+ }
+
+ switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
+ case ICE_ETH56G_LNK_SPD_1G:
+ case ICE_ETH56G_LNK_SPD_2_5G:
+ err = ice_read_ptp_reg_eth56g(hw, port_blk,
+ PHY_GPCS_CONFIG_REG0, &val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d",
+ err);
+ return err;
+ }
+
+ val &= ~PHY_GPCS_CONFIG_REG0_TX_THR_M;
+ val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M,
+ ICE_ETH56G_NOMINAL_TX_THRESH);
+
+ err = ice_write_ptp_reg_eth56g(hw, port_blk,
+ PHY_GPCS_CONFIG_REG0, val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d",
+ err);
+ return err;
+ }
+ break;
+ default:
+ break;
+ }
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_TUS_L,
+ ICE_ETH56G_NOMINAL_PCS_REF_TUS);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_TUS, status: %d",
+ err);
+ return err;
+ }
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_INC_L,
+ ICE_ETH56G_NOMINAL_PCS_REF_INC);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_INC, status: %d",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_phy_cfg_ptp_1step_eth56g - Configure 1-step PTP settings
+ * @hw: Pointer to the HW struct
+ * @port: Port to configure
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port)
+{
+ u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
+ u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1);
+ bool enable, sfd_ena;
+ u32 val, peer_delay;
+ int err;
+
+ enable = hw->ptp.phy.eth56g.onestep_ena;
+ peer_delay = hw->ptp.phy.eth56g.peer_delay;
+ sfd_ena = hw->ptp.phy.eth56g.sfd_ena;
+
+ /* PHY_PTP_1STEP_CONFIG */
+ err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val);
+ if (err)
+ return err;
+
+ if (enable)
+ val |= blk_port;
+ else
+ val &= ~blk_port;
+
+ val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M);
+
+ err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val);
+ if (err)
+ return err;
+
+ /* PHY_PTP_1STEP_PEER_DELAY */
+ val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay);
+ if (peer_delay)
+ val |= PHY_PTP_1STEP_PD_ADD_PD_M;
+ val |= PHY_PTP_1STEP_PD_DLY_V_M;
+ err = ice_write_ptp_reg_eth56g(hw, port_blk,
+ PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ if (err)
+ return err;
+
+ val &= ~PHY_PTP_1STEP_PD_DLY_V_M;
+ err = ice_write_ptp_reg_eth56g(hw, port_blk,
+ PHY_PTP_1STEP_PEER_DELAY(blk_port), val);
+ if (err)
+ return err;
+
+ /* PHY_MAC_XIF_MODE */
+ err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val);
+ if (err)
+ return err;
+
+ val &= ~(PHY_MAC_XIF_1STEP_ENA_M | PHY_MAC_XIF_TS_BIN_MODE_M |
+ PHY_MAC_XIF_TS_SFD_ENA_M | PHY_MAC_XIF_GMII_TS_SEL_M);
+
+ switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) {
+ case ICE_ETH56G_LNK_SPD_1G:
+ case ICE_ETH56G_LNK_SPD_2_5G:
+ val |= PHY_MAC_XIF_GMII_TS_SEL_M;
+ break;
+ default:
+ break;
+ }
+
+ val |= FIELD_PREP(PHY_MAC_XIF_1STEP_ENA_M, enable) |
+ FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) |
+ FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena);
+
+ return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val);
+}
+
+/**
+ * mul_u32_u32_fx_q9 - Multiply two u32 fixed point Q9 values
+ * @a: multiplier value
+ * @b: multiplicand value
+ *
+ * Return: result of multiplication
+ */
+static u32 mul_u32_u32_fx_q9(u32 a, u32 b)
+{
+ return (u32)(((u64)a * b) >> ICE_ETH56G_MAC_CFG_FRAC_W);
+}
+
+/**
+ * add_u32_u32_fx - Add two u32 fixed point values and discard overflow
+ * @a: first value
+ * @b: second value
+ *
+ * Return: result of addition
+ */
+static u32 add_u32_u32_fx(u32 a, u32 b)
+{
+ return lower_32_bits(((u64)a + b));
+}
+
+/**
+ * ice_ptp_calc_bitslip_eth56g - Calculate bitslip value
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ * @bs: bitslip multiplier
+ * @fc: FC-FEC enabled
+ * @rs: RS-FEC enabled
+ * @spd: link speed
+ *
+ * Return: calculated bitslip value
+ */
+static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs,
+ bool fc, bool rs,
+ enum ice_eth56g_link_spd spd)
+{
+ u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1);
+ u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1);
+ u32 bitslip;
+ int err;
+
+ if (!bs || rs)
+ return 0;
+
+ if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G)
+ err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP,
+ &bitslip);
+ else
+ err = ice_read_ptp_reg_eth56g(hw, port_blk,
+ PHY_REG_SD_BIT_SLIP(port_offset),
+ &bitslip);
+ if (err)
+ return 0;
+
+ if (spd == ICE_ETH56G_LNK_SPD_1G && !bitslip) {
+ /* Bitslip register value of 0 corresponds to 10 so substitute
+ * it for calculations
+ */
+ bitslip = 10;
+ } else if (spd == ICE_ETH56G_LNK_SPD_10G ||
+ spd == ICE_ETH56G_LNK_SPD_25G) {
+ if (fc)
+ bitslip = bitslip * 2 + 32;
+ else
+ bitslip = (u32)((s32)bitslip * -1 + 20);
+ }
+
+ bitslip <<= ICE_ETH56G_MAC_CFG_FRAC_W;
+ return mul_u32_u32_fx_q9(bitslip, bs);
+}
+
+/**
+ * ice_ptp_calc_deskew_eth56g - Calculate deskew value
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ * @ds: deskew multiplier
+ * @rs: RS-FEC enabled
+ * @spd: link speed
+ *
+ * Return: calculated deskew value
+ */
+static u32 ice_ptp_calc_deskew_eth56g(struct ice_hw *hw, u8 port, u32 ds,
+ bool rs, enum ice_eth56g_link_spd spd)
+{
+ u32 deskew_i, deskew_f;
+ int err;
+
+ if (!ds)
+ return 0;
+
+ read_poll_timeout(ice_read_ptp_reg_eth56g, err,
+ FIELD_GET(PHY_REG_DESKEW_0_VALID, deskew_i), 500,
+ 50 * USEC_PER_MSEC, false, hw, port, PHY_REG_DESKEW_0,
+ &deskew_i);
+ if (err)
+ return err;
+
+ deskew_f = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL_FRAC, deskew_i);
+ deskew_i = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL, deskew_i);
+
+ if (rs && spd == ICE_ETH56G_LNK_SPD_50G2)
+ ds = 0x633; /* 3.1 */
+ else if (rs && spd == ICE_ETH56G_LNK_SPD_100G)
+ ds = 0x31b; /* 1.552 */
+
+ deskew_i = FIELD_PREP(ICE_ETH56G_MAC_CFG_RX_OFFSET_INT, deskew_i);
+ /* Shift 3 fractional bits to the end of the integer part */
+ deskew_f <<= ICE_ETH56G_MAC_CFG_FRAC_W - PHY_REG_DESKEW_0_RLEVEL_FRAC_W;
+ return mul_u32_u32_fx_q9(deskew_i | deskew_f, ds);
+}
+
+/**
+ * ice_phy_set_offsets_eth56g - Set Tx/Rx offset values
+ * @hw: pointer to the HW struct
+ * @port: port to configure
+ * @spd: link speed
+ * @cfg: structure to store output values
+ * @fc: FC-FEC enabled
+ * @rs: RS-FEC enabled
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_phy_set_offsets_eth56g(struct ice_hw *hw, u8 port,
+ enum ice_eth56g_link_spd spd,
+ const struct ice_eth56g_mac_reg_cfg *cfg,
+ bool fc, bool rs)
+{
+ u32 rx_offset, tx_offset, bs_ds;
+ bool onestep, sfd;
+
+ onestep = hw->ptp.phy.eth56g.onestep_ena;
+ sfd = hw->ptp.phy.eth56g.sfd_ena;
+ bs_ds = cfg->rx_offset.bs_ds;
+
+ if (fc)
+ rx_offset = cfg->rx_offset.fc;
+ else if (rs)
+ rx_offset = cfg->rx_offset.rs;
+ else
+ rx_offset = cfg->rx_offset.no_fec;
+
+ rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.serdes);
+ if (sfd)
+ rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.sfd);
+
+ if (spd < ICE_ETH56G_LNK_SPD_40G)
+ bs_ds = ice_ptp_calc_bitslip_eth56g(hw, port, bs_ds, fc, rs,
+ spd);
+ else
+ bs_ds = ice_ptp_calc_deskew_eth56g(hw, port, bs_ds, rs, spd);
+ rx_offset = add_u32_u32_fx(rx_offset, bs_ds);
+ rx_offset &= ICE_ETH56G_MAC_CFG_RX_OFFSET_INT |
+ ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC;
+
+ if (fc)
+ tx_offset = cfg->tx_offset.fc;
+ else if (rs)
+ tx_offset = cfg->tx_offset.rs;
+ else
+ tx_offset = cfg->tx_offset.no_fec;
+ tx_offset += cfg->tx_offset.serdes + cfg->tx_offset.sfd * sfd +
+ cfg->tx_offset.onestep * onestep;
+
+ ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_OFFSET, rx_offset);
+ return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_OFFSET, tx_offset);
+}
+
+/**
+ * ice_phy_cfg_mac_eth56g - Configure MAC for PTP
+ * @hw: Pointer to the HW struct
+ * @port: Port to configure
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+static int ice_phy_cfg_mac_eth56g(struct ice_hw *hw, u8 port)
+{
+ const struct ice_eth56g_mac_reg_cfg *cfg;
+ enum ice_eth56g_link_spd spd;
+ struct ice_link_status *li;
+ bool fc = false;
+ bool rs = false;
+ bool onestep;
+ u32 val;
+ int err;
+
+ onestep = hw->ptp.phy.eth56g.onestep_ena;
+ li = &hw->port_info->phy.link_info;
+ spd = ice_phy_get_speed_eth56g(li);
+ if (!!(li->an_info & ICE_AQ_FEC_EN)) {
+ if (spd == ICE_ETH56G_LNK_SPD_10G) {
+ fc = true;
+ } else {
+ fc = !!(li->fec_info & ICE_AQ_LINK_25G_KR_FEC_EN);
+ rs = !!(li->fec_info & ~ICE_AQ_LINK_25G_KR_FEC_EN);
+ }
+ }
+ cfg = &eth56g_mac_cfg[spd];
+
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_MODULO, 0);
+ if (err)
+ return err;
+
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_MODULO, 0);
+ if (err)
+ return err;
+
+ val = FIELD_PREP(PHY_MAC_TSU_CFG_TX_MODE_M,
+ cfg->tx_mode.def + rs * cfg->tx_mode.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M, cfg->tx_mk_dly) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M,
+ cfg->tx_cw_dly.def +
+ onestep * cfg->tx_cw_dly.onestep) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_RX_MODE_M,
+ cfg->rx_mode.def + rs * cfg->rx_mode.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M,
+ cfg->rx_mk_dly.def + rs * cfg->rx_mk_dly.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M,
+ cfg->rx_cw_dly.def + rs * cfg->rx_cw_dly.rs) |
+ FIELD_PREP(PHY_MAC_TSU_CFG_BLKS_PER_CLK_M, cfg->blks_per_clk);
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TSU_CONFIG, val);
+ if (err)
+ return err;
+
+ err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_BLOCKTIME,
+ cfg->blktime);
+ if (err)
+ return err;
+
+ err = ice_phy_set_offsets_eth56g(hw, port, spd, cfg, fc, rs);
+ if (err)
+ return err;
+
+ if (spd == ICE_ETH56G_LNK_SPD_25G && !rs)
+ val = 0;
+ else
+ val = cfg->mktime;
+
+ return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_MARKERTIME, val);
+}
+
+/**
+ * ice_phy_cfg_intr_eth56g - Configure TX timestamp interrupt
+ * @hw: pointer to the HW struct
+ * @port: the timestamp port
+ * @ena: enable or disable interrupt
+ * @threshold: interrupt threshold
+ *
+ * Configure TX timestamp interrupt for the specified port
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold)
+{
+ int err;
+ u32 val;
+
+ err = ice_read_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, &val);
+ if (err)
+ return err;
+
+ if (ena) {
+ val |= PHY_TS_INT_CONFIG_ENA_M;
+ val &= ~PHY_TS_INT_CONFIG_THRESHOLD_M;
+ val |= FIELD_PREP(PHY_TS_INT_CONFIG_THRESHOLD_M, threshold);
+ } else {
+ val &= ~PHY_TS_INT_CONFIG_ENA_M;
+ }
+
+ return ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, val);
+}
+
+/**
+ * ice_read_phy_and_phc_time_eth56g - Simultaneously capture PHC and PHY time
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @phy_time: on return, the 64bit PHY timer value
+ * @phc_time: on return, the lower 64bits of PHC time
+ *
+ * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
+ * and PHC timer values.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port,
+ u64 *phy_time, u64 *phc_time)
+{
+ u64 tx_time, rx_time;
+ u32 zo, lo;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
+
+ /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME);
+ if (err)
+ return err;
+
+ /* Issue the sync to start the ICE_PTP_READ_TIME capture */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Read the captured PHC time from the shadow time registers */
+ zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx));
+ lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx));
+ *phc_time = (u64)lo << 32 | zo;
+
+ /* Read the captured PHY time from the PHY shadow registers */
+ err = ice_ptp_read_port_capture_eth56g(hw, port, &tx_time, &rx_time);
+ if (err)
+ return err;
+
+ /* If the PHY Tx and Rx timers don't match, log a warning message.
+ * Note that this should not happen in normal circumstances since the
+ * driver always programs them together.
+ */
+ if (tx_time != rx_time)
+ dev_warn(ice_hw_to_dev(hw), "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n",
+ port, tx_time, rx_time);
+
+ *phy_time = tx_time;
+
+ return 0;
+}
+
+/**
+ * ice_sync_phy_timer_eth56g - Synchronize the PHY timer with PHC timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to synchronize
+ *
+ * Perform an adjustment to ensure that the PHY and PHC timers are in sync.
+ * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
+ * simultaneous read of the PHY timer and PHC timer. Then we use the
+ * difference to calculate an appropriate 2s complement addition to add
+ * to the PHY timer in order to ensure it reads the same value as the
+ * primary PHC timer.
+ *
+ * Return:
+ * * %0 - success
+ * * %-EBUSY- failed to acquire PTP semaphore
+ * * %other - PHY read/write failed
+ */
+static int ice_sync_phy_timer_eth56g(struct ice_hw *hw, u8 port)
+{
+ u64 phc_time, phy_time, difference;
+ int err;
+
+ if (!ice_ptp_lock(hw)) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n");
+ return -EBUSY;
+ }
+
+ err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ /* Calculate the amount required to add to the port time in order for
+ * it to match the PHC time.
+ *
+ * Note that the port adjustment is done using 2s complement
+ * arithmetic. This is convenient since it means that we can simply
+ * calculate the difference between the PHC time and the port time,
+ * and it will be interpreted correctly.
+ */
+
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+ difference = phc_time - phy_time;
+
+ err = ice_ptp_prep_port_adj_eth56g(hw, port, (s64)difference);
+ if (err)
+ goto err_unlock;
+
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME);
+ if (err)
+ goto err_unlock;
+
+ /* Issue the sync to activate the time adjustment */
+ ice_ptp_exec_tmr_cmd(hw);
+
+ /* Re-capture the timer values to flush the command registers and
+ * verify that the time was properly adjusted.
+ */
+ err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time);
+ if (err)
+ goto err_unlock;
+
+ dev_info(ice_hw_to_dev(hw),
+ "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n",
+ port, phy_time, phc_time);
+
+err_unlock:
+ ice_ptp_unlock(hw);
+ return err;
+}
+
+/**
+ * ice_stop_phy_timer_eth56g - Stop the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to stop
+ * @soft_reset: if true, hold the SOFT_RESET bit of PHY_REG_PS
+ *
+ * Stop the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
+ */
+int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset)
+{
+ int err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 0);
+ if (err)
+ return err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 0);
+ if (err)
+ return err;
+
+ ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_start_phy_timer_eth56g - Start the PHY clock timer
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to start
+ *
+ * Start the clock of a PHY port. This must be done as part of the flow to
+ * re-calibrate Tx and Rx timestamping offsets whenever the clock time is
+ * initialized or when link speed changes.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - PHY read/write failed
+ */
+int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port)
+{
+ u32 lo, hi;
+ u64 incval;
+ u8 tmr_idx;
+ int err;
+
+ tmr_idx = ice_get_ptp_src_clock_index(hw);
+
+ err = ice_stop_phy_timer_eth56g(hw, port, false);
+ if (err)
+ return err;
+
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
+ err = ice_phy_cfg_parpcs_eth56g(hw, port);
+ if (err)
+ return err;
+
+ err = ice_phy_cfg_ptp_1step_eth56g(hw, port);
+ if (err)
+ return err;
+
+ err = ice_phy_cfg_mac_eth56g(hw, port);
+ if (err)
+ return err;
+
+ lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx));
+ hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx));
+ incval = (u64)hi << 32 | lo;
+
+ err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval);
+ if (err)
+ return err;
+
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
+ if (err)
+ return err;
+
+ ice_ptp_exec_tmr_cmd(hw);
+
+ err = ice_sync_phy_timer_eth56g(hw, port);
+ if (err)
+ return err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 1);
+ if (err)
+ return err;
+
+ err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 1);
+ if (err)
+ return err;
+
+ ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port);
+
+ return 0;
+}
+
+/**
+ * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access
+ * @hw: pointer to HW struct
+ * @enable: Enable or disable access
+ *
+ * Enable sideband devices (PHY and others) access.
+ */
+static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable)
+{
+ u32 val = rd32(hw, PF_SB_REM_DEV_CTL);
+
+ if (enable)
+ val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1);
+ else
+ val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1));
+
+ wr32(hw, PF_SB_REM_DEV_CTL, val);
+}
+
+/**
+ * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization
+ * @hw: pointer to HW struct
+ *
+ * Perform PHC initialization steps specific to E82X devices.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to initialize CGU
+ */
+static int ice_ptp_init_phc_eth56g(struct ice_hw *hw)
+{
+ ice_sb_access_ena_eth56g(hw, true);
+ /* Initialize the Clock Generation Unit */
+ return ice_init_cgu_e82x(hw);
+}
+
+/**
+ * ice_ptp_read_tx_hwtstamp_status_eth56g - Get TX timestamp status
+ * @hw: pointer to the HW struct
+ * @ts_status: the timestamp mask pointer
+ *
+ * Read the PHY Tx timestamp status mask indicating which ports have Tx
+ * timestamps available.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status)
+{
+ const struct ice_eth56g_params *params = &hw->ptp.phy.eth56g;
+ u8 phy, mask;
+ u32 status;
+
+ mask = (1 << hw->ptp.ports_per_phy) - 1;
+ *ts_status = 0;
+
+ for (phy = 0; phy < params->num_phys; phy++) {
+ int err;
+
+ err = ice_read_phy_eth56g(hw, phy, PHY_PTP_INT_STATUS, &status);
+ if (err)
+ return err;
+
+ *ts_status |= (status & mask) << (phy * hw->ptp.ports_per_phy);
+ }
+
+ ice_debug(hw, ICE_DBG_PTP, "PHY interrupt err: %x\n", *ts_status);
+
+ return 0;
+}
+
+/**
+ * ice_get_phy_tx_tstamp_ready_eth56g - Read the Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read from
+ * @tstamp_ready: contents of the Tx memory status register
+ *
+ * Read the PHY_REG_TX_MEMORY_STATUS register indicating which timestamps in
+ * the PHY are ready. A set bit means the corresponding timestamp is valid and
+ * ready to be captured from the PHY timestamp block.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to read from PHY
+ */
+static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port,
+ u64 *tstamp_ready)
+{
+ int err;
+
+ err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L,
+ tstamp_ready);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS for port %u, err %d\n",
+ port, err);
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_is_muxed_topo - detect breakout 2x50G topology for E825C
+ * @hw: pointer to the HW struct
+ *
+ * Return: true if it's 2x50 breakout topology, false otherwise
+ */
+static bool ice_is_muxed_topo(struct ice_hw *hw)
+{
+ u8 link_topo;
+ bool mux;
+ u32 val;
+
+ val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG);
+ mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val);
+ val = rd32(hw, GLGEN_MAC_LINK_TOPO);
+ link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val);
+
+ return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS);
+}
+
+/**
+ * ice_ptp_init_phy_e825c - initialize PHY parameters
+ * @hw: pointer to the HW struct
+ */
+static void ice_ptp_init_phy_e825c(struct ice_hw *hw)
+{
+ struct ice_ptp_hw *ptp = &hw->ptp;
+ struct ice_eth56g_params *params;
+ u8 phy;
+
+ ptp->phy_model = ICE_PHY_ETH56G;
+ params = &ptp->phy.eth56g;
+ params->onestep_ena = false;
+ params->peer_delay = 0;
+ params->sfd_ena = false;
+ params->phy_addr[0] = eth56g_phy_0;
+ params->phy_addr[1] = eth56g_phy_1;
+ params->num_phys = 2;
+ ptp->ports_per_phy = 4;
+ ptp->num_lports = params->num_phys * ptp->ports_per_phy;
+
+ ice_sb_access_ena_eth56g(hw, true);
+ for (phy = 0; phy < params->num_phys; phy++) {
+ u32 phy_rev;
+ int err;
+
+ err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev);
+ if (err || phy_rev != PHY_REVISION_ETH56G) {
+ ptp->phy_model = ICE_PHY_UNSUP;
+ return;
+ }
+ }
+
+ ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw);
+}
+
/* E822 family functions
*
* The following functions operate on the E822 family of devices.
@@ -285,18 +2707,21 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw)
/**
* ice_fill_phy_msg_e82x - Fill message data for a PHY register access
+ * @hw: pointer to the HW struct
* @msg: the PHY message buffer to fill in
* @port: the port to access
* @offset: the register offset
*/
-static void
-ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
+static void ice_fill_phy_msg_e82x(struct ice_hw *hw,
+ struct ice_sbq_msg_input *msg, u8 port,
+ u16 offset)
{
int phy_port, phy, quadtype;
- phy_port = port % ICE_PORTS_PER_PHY_E82X;
- phy = port / ICE_PORTS_PER_PHY_E82X;
- quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X;
+ phy_port = port % hw->ptp.ports_per_phy;
+ phy = port / hw->ptp.ports_per_phy;
+ quadtype = ICE_GET_QUAD_NUM(port) %
+ ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy);
if (quadtype == 0) {
msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
@@ -427,10 +2852,10 @@ ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e82x(&msg, port, offset);
+ ice_fill_phy_msg_e82x(hw, &msg, port, offset);
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -504,11 +2929,11 @@ ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val)
struct ice_sbq_msg_input msg = {0};
int err;
- ice_fill_phy_msg_e82x(&msg, port, offset);
+ ice_fill_phy_msg_e82x(hw, &msg, port, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -543,8 +2968,7 @@ ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
low_addr);
return -EINVAL;
}
-
- low = (u32)(val & P_REG_40B_LOW_M);
+ low = FIELD_GET(P_REG_40B_LOW_M, val);
high = (u32)(val >> P_REG_40B_HIGH_S);
err = ice_write_phy_reg_e82x(hw, port, low_addr, low);
@@ -614,24 +3038,30 @@ ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
/**
* ice_fill_quad_msg_e82x - Fill message data for quad register access
+ * @hw: pointer to the HW struct
* @msg: the PHY message buffer to fill in
* @quad: the quad to access
* @offset: the register offset
*
* Fill a message buffer for accessing a register in a quad shared between
* multiple PHYs.
+ *
+ * Return:
+ * * %0 - OK
+ * * %-EINVAL - invalid quad number
*/
-static int
-ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
+static int ice_fill_quad_msg_e82x(struct ice_hw *hw,
+ struct ice_sbq_msg_input *msg, u8 quad,
+ u16 offset)
{
u32 addr;
- if (quad >= ICE_MAX_QUAD)
+ if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports))
return -EINVAL;
msg->dest_dev = rmn_0;
- if ((quad % ICE_QUADS_PER_PHY_E82X) == 0)
+ if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy)))
addr = Q_0_BASE + offset;
else
addr = Q_1_BASE + offset;
@@ -658,13 +3088,13 @@ ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e82x(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset);
if (err)
return err;
msg.opcode = ice_sbq_msg_rd;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -692,14 +3122,14 @@ ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
struct ice_sbq_msg_input msg = {0};
int err;
- err = ice_fill_quad_msg_e82x(&msg, quad, offset);
+ err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset);
if (err)
return err;
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -748,7 +3178,7 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* lower 8 bits in the low register, and the upper 32 bits in the high
* register.
*/
- *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M);
+ *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo);
return 0;
}
@@ -813,294 +3243,11 @@ static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw)
{
unsigned int quad;
- for (quad = 0; quad < ICE_MAX_QUAD; quad++)
+ for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); quad++)
ice_ptp_reset_ts_memory_quad_e82x(hw, quad);
}
/**
- * ice_read_cgu_reg_e82x - Read a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to read
- * @val: storage for register value read
- *
- * Read the contents of a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- */
-static int
-ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val)
-{
- struct ice_sbq_msg_input cgu_msg;
- int err;
-
- cgu_msg.opcode = ice_sbq_msg_rd;
- cgu_msg.dest_dev = cgu;
- cgu_msg.msg_addr_low = addr;
- cgu_msg.msg_addr_high = 0x0;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- *val = cgu_msg.data;
-
- return err;
-}
-
-/**
- * ice_write_cgu_reg_e82x - Write a CGU register
- * @hw: pointer to the HW struct
- * @addr: Register address to write
- * @val: value to write into the register
- *
- * Write the specified value to a register of the Clock Generation Unit. Only
- * applicable to E822 devices.
- */
-static int
-ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val)
-{
- struct ice_sbq_msg_input cgu_msg;
- int err;
-
- cgu_msg.opcode = ice_sbq_msg_wr;
- cgu_msg.dest_dev = cgu;
- cgu_msg.msg_addr_low = addr;
- cgu_msg.msg_addr_high = 0x0;
- cgu_msg.data = val;
-
- err = ice_sbq_rw_reg(hw, &cgu_msg);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n",
- addr, err);
- return err;
- }
-
- return err;
-}
-
-/**
- * ice_clk_freq_str - Convert time_ref_freq to string
- * @clk_freq: Clock frequency
- *
- * Convert the specified TIME_REF clock frequency to a string.
- */
-static const char *ice_clk_freq_str(u8 clk_freq)
-{
- switch ((enum ice_time_ref_freq)clk_freq) {
- case ICE_TIME_REF_FREQ_25_000:
- return "25 MHz";
- case ICE_TIME_REF_FREQ_122_880:
- return "122.88 MHz";
- case ICE_TIME_REF_FREQ_125_000:
- return "125 MHz";
- case ICE_TIME_REF_FREQ_153_600:
- return "153.6 MHz";
- case ICE_TIME_REF_FREQ_156_250:
- return "156.25 MHz";
- case ICE_TIME_REF_FREQ_245_760:
- return "245.76 MHz";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_clk_src_str - Convert time_ref_src to string
- * @clk_src: Clock source
- *
- * Convert the specified clock source to its string name.
- */
-static const char *ice_clk_src_str(u8 clk_src)
-{
- switch ((enum ice_clk_src)clk_src) {
- case ICE_CLK_SRC_TCX0:
- return "TCX0";
- case ICE_CLK_SRC_TIME_REF:
- return "TIME_REF";
- default:
- return "Unknown";
- }
-}
-
-/**
- * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit
- * @hw: pointer to the HW struct
- * @clk_freq: Clock frequency to program
- * @clk_src: Clock source to select (TIME_REF, or TCX0)
- *
- * Configure the Clock Generation Unit with the desired clock frequency and
- * time reference, enabling the PLL which drives the PTP hardware clock.
- */
-static int
-ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq,
- enum ice_clk_src clk_src)
-{
- union tspll_ro_bwm_lf bwm_lf;
- union nac_cgu_dword19 dw19;
- union nac_cgu_dword22 dw22;
- union nac_cgu_dword24 dw24;
- union nac_cgu_dword9 dw9;
- int err;
-
- if (clk_freq >= NUM_ICE_TIME_REF_FREQ) {
- dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n",
- clk_freq);
- return -EINVAL;
- }
-
- if (clk_src >= NUM_ICE_CLK_SRC) {
- dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n",
- clk_src);
- return -EINVAL;
- }
-
- if (clk_src == ICE_CLK_SRC_TCX0 &&
- clk_freq != ICE_TIME_REF_FREQ_25_000) {
- dev_warn(ice_hw_to_dev(hw),
- "TCX0 only supports 25 MHz frequency\n");
- return -EINVAL;
- }
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.field.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.field.time_ref_sel),
- ice_clk_freq_str(dw9.field.time_ref_freq_sel),
- bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
-
- /* Disable the PLL before changing the clock source or frequency */
- if (dw24.field.ts_pll_enable) {
- dw24.field.ts_pll_enable = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
- }
-
- /* Set the frequency */
- dw9.field.time_ref_freq_sel = clk_freq;
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val);
- if (err)
- return err;
-
- /* Configure the TS PLL feedback divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val);
- if (err)
- return err;
-
- dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div;
- dw19.field.tspll_ndivratio = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val);
- if (err)
- return err;
-
- /* Configure the TS PLL post divisor */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val);
- if (err)
- return err;
-
- dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div;
- dw22.field.time1588clk_sel_div2 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val);
- if (err)
- return err;
-
- /* Configure the TS PLL pre divisor and clock source */
- err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val);
- if (err)
- return err;
-
- dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div;
- dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div;
- dw24.field.time_ref_sel = clk_src;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Finally, enable the PLL */
- dw24.field.ts_pll_enable = 1;
-
- err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val);
- if (err)
- return err;
-
- /* Wait to verify if the PLL locks */
- usleep_range(1000, 5000);
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val);
- if (err)
- return err;
-
- if (!bwm_lf.field.plllock_true_lock_cri) {
- dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n");
- return -EBUSY;
- }
-
- /* Log the current clock configuration */
- ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n",
- dw24.field.ts_pll_enable ? "enabled" : "disabled",
- ice_clk_src_str(dw24.field.time_ref_sel),
- ice_clk_freq_str(dw9.field.time_ref_freq_sel),
- bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked");
-
- return 0;
-}
-
-/**
- * ice_init_cgu_e82x - Initialize CGU with settings from firmware
- * @hw: pointer to the HW structure
- *
- * Initialize the Clock Generation Unit of the E822 device.
- */
-static int ice_init_cgu_e82x(struct ice_hw *hw)
-{
- struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info;
- union tspll_cntr_bist_settings cntr_bist;
- int err;
-
- err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- &cntr_bist.val);
- if (err)
- return err;
-
- /* Disable sticky lock detection so lock err reported is accurate */
- cntr_bist.field.i_plllock_sel_0 = 0;
- cntr_bist.field.i_plllock_sel_1 = 0;
-
- err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS,
- cntr_bist.val);
- if (err)
- return err;
-
- /* Configure the CGU PLL using the parameters from the function
- * capabilities.
- */
- err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref,
- (enum ice_clk_src)ts_info->clk_src);
- if (err)
- return err;
-
- return 0;
-}
-
-/**
* ice_ptp_set_vernier_wl - Set the window length for vernier calibration
* @hw: pointer to the HW struct
*
@@ -1110,7 +3257,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
{
u8 port;
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
err = ice_write_phy_reg_e82x(hw, port, P_REG_WL,
@@ -1134,15 +3281,14 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw)
static int ice_ptp_init_phc_e82x(struct ice_hw *hw)
{
int err;
- u32 regval;
+ u32 val;
/* Enable reading switch and PHY registers over the sideband queue */
#define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1)
#define PF_SB_REM_DEV_CTL_PHY0 BIT(2)
- regval = rd32(hw, PF_SB_REM_DEV_CTL);
- regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ |
- PF_SB_REM_DEV_CTL_PHY0);
- wr32(hw, PF_SB_REM_DEV_CTL, regval);
+ val = rd32(hw, PF_SB_REM_DEV_CTL);
+ val |= (PF_SB_REM_DEV_CTL_SWITCH_READ | PF_SB_REM_DEV_CTL_PHY0);
+ wr32(hw, PF_SB_REM_DEV_CTL, val);
/* Initialize the Clock Generation Unit */
err = ice_init_cgu_e82x(hw);
@@ -1175,7 +3321,7 @@ ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time)
*/
phy_time = (u64)time << 32;
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
/* Tx case */
err = ice_write_64b_phy_reg_e82x(hw, port,
P_REG_TX_TIMER_INC_PRE_L,
@@ -1278,7 +3424,7 @@ ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj)
else
cycles = -(((s64)-adj) << 32);
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
int err;
err = ice_ptp_prep_port_adj_e82x(hw, port, cycles);
@@ -1304,7 +3450,7 @@ ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval)
int err;
u8 port;
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ for (port = 0; port < hw->ptp.num_lports; port++) {
err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L,
incval);
if (err)
@@ -1369,51 +3515,20 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
*
* Prepare the requested port for an upcoming timer sync command.
*
- * Do not use this function directly. If you want to configure exactly one
- * port, use ice_ptp_one_port_cmd() instead.
+ * Note there is no equivalent of this operation on E810, as that device
+ * always handles all external PHYs internally.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write to PHY
*/
static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
enum ice_ptp_tmr_cmd cmd)
{
- u32 cmd_val, val;
- u8 tmr_idx;
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
int err;
- tmr_idx = ice_get_ptp_src_clock_index(hw);
- cmd_val = tmr_idx << SEL_PHY_SRC;
- switch (cmd) {
- case ICE_PTP_INIT_TIME:
- cmd_val |= PHY_CMD_INIT_TIME;
- break;
- case ICE_PTP_INIT_INCVAL:
- cmd_val |= PHY_CMD_INIT_INCVAL;
- break;
- case ICE_PTP_ADJ_TIME:
- cmd_val |= PHY_CMD_ADJ_TIME;
- break;
- case ICE_PTP_READ_TIME:
- cmd_val |= PHY_CMD_READ_TIME;
- break;
- case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
- break;
- case ICE_PTP_NOP:
- break;
- }
-
/* Tx case */
- /* Read, modify, write */
- err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n",
- err);
- return err;
- }
-
- /* Modify necessary bits only and perform write */
- val &= ~TS_CMD_MASK;
- val |= cmd_val;
-
err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n",
@@ -1422,19 +3537,8 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
}
/* Rx case */
- /* Read, modify, write */
- err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n",
- err);
- return err;
- }
-
- /* Modify necessary bits only and perform write */
- val &= ~TS_CMD_MASK;
- val |= cmd_val;
-
- err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val);
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD,
+ val | TS_CMD_RX_TYPE);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n",
err);
@@ -1444,63 +3548,6 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port,
return 0;
}
-/**
- * ice_ptp_one_port_cmd - Prepare one port for a timer command
- * @hw: pointer to the HW struct
- * @configured_port: the port to configure with configured_cmd
- * @configured_cmd: timer command to prepare on the configured_port
- *
- * Prepare the configured_port for the configured_cmd, and prepare all other
- * ports for ICE_PTP_NOP. This causes the configured_port to execute the
- * desired command while all other ports perform no operation.
- */
-static int
-ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
- enum ice_ptp_tmr_cmd configured_cmd)
-{
- u8 port;
-
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- enum ice_ptp_tmr_cmd cmd;
- int err;
-
- if (port == configured_port)
- cmd = configured_cmd;
- else
- cmd = ICE_PTP_NOP;
-
- err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-/**
- * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command
- * @hw: pointer to the HW struct
- * @cmd: timer command to prepare
- *
- * Prepare all ports connected to this device for an upcoming timer sync
- * command.
- */
-static int
-ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
-{
- u8 port;
-
- for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
- int err;
-
- err = ice_ptp_write_port_cmd_e82x(hw, port, cmd);
- if (err)
- return err;
- }
-
- return 0;
-}
-
/* E822 Vernier calibration functions
*
* The following functions are used as part of the vernier calibration of
@@ -1603,7 +3650,7 @@ static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port)
return;
}
- quad = port / ICE_PORTS_PER_QUAD;
+ quad = ICE_GET_QUAD_NUM(port);
err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
if (err) {
@@ -2324,6 +4371,40 @@ int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port)
}
/**
+ * ice_ptp_clear_phy_offset_ready_e82x - Clear PHY TX_/RX_OFFSET_READY registers
+ * @hw: pointer to the HW struct
+ *
+ * Clear PHY TX_/RX_OFFSET_READY registers, effectively marking all transmitted
+ * and received timestamps as invalid.
+ *
+ * Return: 0 on success, other error codes when failed to write to PHY
+ */
+int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw)
+{
+ u8 port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0);
+ if (err) {
+ dev_warn(ice_hw_to_dev(hw),
+ "Failed to clear PHY TX_OFFSET_READY register\n");
+ return err;
+ }
+
+ err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0);
+ if (err) {
+ dev_warn(ice_hw_to_dev(hw),
+ "Failed to clear PHY RX_OFFSET_READY register\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time
* @hw: pointer to the HW struct
* @port: the PHY port to read
@@ -2633,6 +4714,48 @@ ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready)
return 0;
}
+/**
+ * ice_phy_cfg_intr_e82x - Configure TX timestamp interrupt
+ * @hw: pointer to the HW struct
+ * @quad: the timestamp quad
+ * @ena: enable or disable interrupt
+ * @threshold: interrupt threshold
+ *
+ * Configure TX timestamp interrupt for the specified quad
+ *
+ * Return: 0 on success, other error codes when failed to read/write quad
+ */
+
+int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold)
+{
+ int err;
+ u32 val;
+
+ err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val);
+ if (err)
+ return err;
+
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ if (ena) {
+ val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M;
+ val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M;
+ val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, threshold);
+ }
+
+ return ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val);
+}
+
+/**
+ * ice_ptp_init_phy_e82x - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp)
+{
+ ptp->phy_model = ICE_PHY_E82X;
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 8;
+}
+
/* E810 functions
*
* The following functions operate on the E810 series devices which use
@@ -2657,7 +4780,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val)
msg.opcode = ice_sbq_msg_rd;
msg.dest_dev = rmn_0;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -2688,7 +4811,7 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
msg.dest_dev = rmn_0;
msg.data = val;
- err = ice_sbq_rw_reg(hw, &msg);
+ err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n",
err);
@@ -2860,17 +4983,21 @@ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
}
/**
- * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY
+ * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
* @hw: pointer to HW struct
*
- * Enable the timesync PTP functionality for the external PHY connected to
- * this function.
+ * Perform E810-specific PTP hardware clock initialization steps.
+ *
+ * Return: 0 on success, other error codes when failed to initialize TimeSync
*/
-int ice_ptp_init_phy_e810(struct ice_hw *hw)
+static int ice_ptp_init_phc_e810(struct ice_hw *hw)
{
u8 tmr_idx;
int err;
+ /* Ensure synchronization delay is zero */
+ wr32(hw, GLTSYN_SYNC_DLAY, 0);
+
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx),
GLTSYN_ENA_TSYN_ENA_M);
@@ -2882,21 +5009,6 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw)
}
/**
- * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization
- * @hw: pointer to HW struct
- *
- * Perform E810-specific PTP hardware clock initialization steps.
- */
-static int ice_ptp_init_phc_e810(struct ice_hw *hw)
-{
- /* Ensure synchronization delay is zero */
- wr32(hw, GLTSYN_SYNC_DLAY, 0);
-
- /* Initialize the PHY */
- return ice_ptp_init_phy_e810(hw);
-}
-
-/**
* ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time
* @hw: Board private structure
* @time: Time to initialize the PHY port clock to
@@ -3017,47 +5129,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
*/
static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
- u32 cmd_val, val;
- int err;
-
- switch (cmd) {
- case ICE_PTP_INIT_TIME:
- cmd_val = GLTSYN_CMD_INIT_TIME;
- break;
- case ICE_PTP_INIT_INCVAL:
- cmd_val = GLTSYN_CMD_INIT_INCVAL;
- break;
- case ICE_PTP_ADJ_TIME:
- cmd_val = GLTSYN_CMD_ADJ_TIME;
- break;
- case ICE_PTP_READ_TIME:
- cmd_val = GLTSYN_CMD_READ_TIME;
- break;
- case ICE_PTP_ADJ_TIME_AT_TIME:
- cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
- break;
- case ICE_PTP_NOP:
- return 0;
- }
+ u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd);
- /* Read, modify, write */
- err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err);
- return err;
- }
-
- /* Modify necessary bits only and perform write */
- val &= ~TS_CMD_MASK_E810;
- val |= cmd_val;
-
- err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err);
- return err;
- }
-
- return 0;
+ return ice_write_phy_reg_e810(hw, E810_ETH_GLTSYN_CMD, val);
}
/**
@@ -3239,6 +5313,17 @@ int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
}
+/**
+ * ice_ptp_init_phy_e810 - initialize PHY parameters
+ * @ptp: pointer to the PTP HW struct
+ */
+static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp)
+{
+ ptp->phy_model = ICE_PHY_E810;
+ ptp->num_lports = 8;
+ ptp->ports_per_phy = 4;
+}
+
/* Device agnostic functions
*
* The following functions implement shared behavior common to both E822 and
@@ -3296,18 +5381,126 @@ void ice_ptp_unlock(struct ice_hw *hw)
}
/**
- * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type
+ * ice_ptp_init_hw - Initialize hw based on device type
* @hw: pointer to the HW structure
*
- * Determine the PHY model for the device, and initialize hw->phy_model
+ * Determine the PHY model for the device, and initialize hw
* for use by other functions.
*/
-void ice_ptp_init_phy_model(struct ice_hw *hw)
+void ice_ptp_init_hw(struct ice_hw *hw)
{
- if (ice_is_e810(hw))
- hw->phy_model = ICE_PHY_E810;
+ struct ice_ptp_hw *ptp = &hw->ptp;
+
+ if (ice_is_e822(hw) || ice_is_e823(hw))
+ ice_ptp_init_phy_e82x(ptp);
+ else if (ice_is_e810(hw))
+ ice_ptp_init_phy_e810(ptp);
+ else if (ice_is_e825c(hw))
+ ice_ptp_init_phy_e825c(hw);
else
- hw->phy_model = ICE_PHY_E82X;
+ ptp->phy_model = ICE_PHY_UNSUP;
+}
+
+/**
+ * ice_ptp_write_port_cmd - Prepare a single PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @port: Port to which cmd has to be sent
+ * @cmd: Command to be sent to the port
+ *
+ * Prepare one port for the upcoming timer sync command. Do not use this for
+ * programming only a single port, instead use ice_ptp_one_port_cmd() to
+ * ensure non-modified ports get properly initialized to ICE_PTP_NOP.
+ *
+ * Return:
+ * * %0 - success
+ * %-EBUSY - PHY type not supported
+ * * %other - failed to write port command
+ */
+static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port,
+ enum ice_ptp_tmr_cmd cmd)
+{
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_ptp_write_port_cmd_eth56g(hw, port, cmd);
+ case ICE_PHY_E82X:
+ return ice_ptp_write_port_cmd_e82x(hw, port, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_ptp_one_port_cmd - Program one PHY port for a timer command
+ * @hw: pointer to HW struct
+ * @configured_port: the port that should execute the command
+ * @configured_cmd: the command to be executed on the configured port
+ *
+ * Prepare one port for executing a timer command, while preparing all other
+ * ports to ICE_PTP_NOP. This allows executing a command on a single port
+ * while ensuring all other ports do not execute stale commands.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write port command
+ */
+int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
+ enum ice_ptp_tmr_cmd configured_cmd)
+{
+ u32 port;
+
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ /* Program the configured port with the configured command,
+ * program all other ports with ICE_PTP_NOP.
+ */
+ if (port == configured_port)
+ err = ice_ptp_write_port_cmd(hw, port, configured_cmd);
+ else
+ err = ice_ptp_write_port_cmd(hw, port, ICE_PTP_NOP);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_port_cmd - Prepare PHY ports for a timer sync command
+ * @hw: pointer to HW struct
+ * @cmd: the timer command to setup
+ *
+ * Prepare all PHY ports on this device for the requested timer command. For
+ * some families this can be done in one shot, but for other families each
+ * port must be configured individually.
+ *
+ * Return:
+ * * %0 - success
+ * * %other - failed to write port command
+ */
+static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+{
+ u32 port;
+
+ /* PHY models which can program all ports simultaneously */
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_E810:
+ return ice_ptp_port_cmd_e810(hw, cmd);
+ default:
+ break;
+ }
+
+ /* PHY models which require programming each port separately */
+ for (port = 0; port < hw->ptp.num_lports; port++) {
+ int err;
+
+ err = ice_ptp_write_port_cmd(hw, port, cmd);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
/**
@@ -3328,17 +5521,7 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
ice_ptp_src_cmd(hw, cmd);
/* Next, prepare the ports */
- switch (hw->phy_model) {
- case ICE_PHY_E810:
- err = ice_ptp_port_cmd_e810(hw, cmd);
- break;
- case ICE_PHY_E82X:
- err = ice_ptp_port_cmd_e82x(hw, cmd);
- break;
- default:
- err = -EOPNOTSUPP;
- }
-
+ err = ice_ptp_port_cmd(hw, cmd);
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
cmd, err);
@@ -3380,7 +5563,11 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_ptp_prep_phy_time_eth56g(hw,
+ (u32)(time & 0xFFFFFFFF));
+ break;
case ICE_PHY_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
break;
@@ -3422,7 +5609,10 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_ptp_prep_phy_incval_eth56g(hw, incval);
+ break;
case ICE_PHY_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
break;
@@ -3488,7 +5678,10 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ err = ice_ptp_prep_phy_adj_eth56g(hw, adj);
+ break;
case ICE_PHY_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
break;
@@ -3518,7 +5711,9 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp);
case ICE_PHY_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
case ICE_PHY_E82X:
@@ -3546,7 +5741,9 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_clear_ptp_tstamp_eth56g(hw, block, idx);
case ICE_PHY_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
case ICE_PHY_E82X:
@@ -3607,7 +5804,10 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
*/
void ice_ptp_reset_ts_memory(struct ice_hw *hw)
{
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ ice_ptp_reset_ts_memory_eth56g(hw);
+ break;
case ICE_PHY_E82X:
ice_ptp_reset_ts_memory_e82x(hw);
break;
@@ -3633,7 +5833,9 @@ int ice_ptp_init_phc(struct ice_hw *hw)
/* Clear event err indications for auxiliary pins */
(void)rd32(hw, GLTSYN_STAT(src_idx));
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_ptp_init_phc_eth56g(hw);
case ICE_PHY_E810:
return ice_ptp_init_phc_e810(hw);
case ICE_PHY_E82X:
@@ -3656,7 +5858,10 @@ int ice_ptp_init_phc(struct ice_hw *hw)
*/
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
{
- switch (hw->phy_model) {
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ice_get_phy_tx_tstamp_ready_eth56g(hw, block,
+ tstamp_ready);
case ICE_PHY_E810:
return ice_get_phy_tx_tstamp_ready_e810(hw, block,
tstamp_ready);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 1f3e03124430..0852a34ade91 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -41,6 +41,41 @@ enum ice_ptp_fec_mode {
ICE_PTP_FEC_MODE_RS_FEC
};
+enum eth56g_res_type {
+ ETH56G_PHY_REG_PTP,
+ ETH56G_PHY_MEM_PTP,
+ ETH56G_PHY_REG_XPCS,
+ ETH56G_PHY_REG_MAC,
+ ETH56G_PHY_REG_GPCS,
+ NUM_ETH56G_PHY_RES
+};
+
+enum ice_eth56g_link_spd {
+ ICE_ETH56G_LNK_SPD_1G,
+ ICE_ETH56G_LNK_SPD_2_5G,
+ ICE_ETH56G_LNK_SPD_10G,
+ ICE_ETH56G_LNK_SPD_25G,
+ ICE_ETH56G_LNK_SPD_40G,
+ ICE_ETH56G_LNK_SPD_50G,
+ ICE_ETH56G_LNK_SPD_50G2,
+ ICE_ETH56G_LNK_SPD_100G,
+ ICE_ETH56G_LNK_SPD_100G2,
+ NUM_ICE_ETH56G_LNK_SPD /* Must be last */
+};
+
+/**
+ * struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters
+ * @base: base address for each PHY block
+ * @step: step between PHY lanes
+ *
+ * Characteristic information for the various PHY register parameters in the
+ * ETH56G devices
+ */
+struct ice_phy_reg_info_eth56g {
+ u32 base[NUM_ETH56G_PHY_RES];
+ u32 step;
+};
+
/**
* struct ice_time_ref_info_e82x
* @pll_freq: Frequency of PLL that drives timer ticks in Hz
@@ -94,8 +129,75 @@ struct ice_vernier_info_e82x {
u32 rx_fixed_delay;
};
+#define ICE_ETH56G_MAC_CFG_RX_OFFSET_INT GENMASK(19, 9)
+#define ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC GENMASK(8, 0)
+#define ICE_ETH56G_MAC_CFG_FRAC_W 9
/**
- * struct ice_cgu_pll_params_e82x
+ * struct ice_eth56g_mac_reg_cfg - MAC config values for specific PTP registers
+ * @tx_mode: Tx timestamp compensation mode
+ * @tx_mk_dly: Tx timestamp marker start strobe delay
+ * @tx_cw_dly: Tx timestamp codeword start strobe delay
+ * @rx_mode: Rx timestamp compensation mode
+ * @rx_mk_dly: Rx timestamp marker start strobe delay
+ * @rx_cw_dly: Rx timestamp codeword start strobe delay
+ * @blks_per_clk: number of blocks transferred per clock cycle
+ * @blktime: block time, fixed point
+ * @mktime: marker time, fixed point
+ * @tx_offset: total Tx offset, fixed point
+ * @rx_offset: total Rx offset, contains value for bitslip/deskew, fixed point
+ *
+ * All fixed point registers except Rx offset are 23 bit unsigned ints with
+ * a 9 bit fractional.
+ * Rx offset is 11 bit unsigned int with a 9 bit fractional.
+ */
+struct ice_eth56g_mac_reg_cfg {
+ struct {
+ u8 def;
+ u8 rs;
+ } tx_mode;
+ u8 tx_mk_dly;
+ struct {
+ u8 def;
+ u8 onestep;
+ } tx_cw_dly;
+ struct {
+ u8 def;
+ u8 rs;
+ } rx_mode;
+ struct {
+ u8 def;
+ u8 rs;
+ } rx_mk_dly;
+ struct {
+ u8 def;
+ u8 rs;
+ } rx_cw_dly;
+ u8 blks_per_clk;
+ u16 blktime;
+ u16 mktime;
+ struct {
+ u32 serdes;
+ u32 no_fec;
+ u32 fc;
+ u32 rs;
+ u32 sfd;
+ u32 onestep;
+ } tx_offset;
+ struct {
+ u32 serdes;
+ u32 no_fec;
+ u32 fc;
+ u32 rs;
+ u32 sfd;
+ u32 bs_ds;
+ } rx_offset;
+};
+
+extern
+const struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD];
+
+/**
+ * struct ice_cgu_pll_params_e82x - E82X CGU parameters
* @refclk_pre_div: Reference clock pre-divisor
* @feedback_div: Feedback divisor
* @frac_n_div: Fractional divisor
@@ -185,9 +287,34 @@ struct ice_cgu_pin_desc {
extern const struct
ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+/**
+ * struct ice_cgu_pll_params_e825c - E825C CGU parameters
+ * @tspll_ck_refclkfreq: tspll_ck_refclkfreq selection
+ * @tspll_ndivratio: ndiv ratio that goes directly to the pll
+ * @tspll_fbdiv_intgr: TS PLL integer feedback divide
+ * @tspll_fbdiv_frac: TS PLL fractional feedback divide
+ * @ref1588_ck_div: clock divider for tspll ref
+ *
+ * Clock Generation Unit parameters used to program the PLL based on the
+ * selected TIME_REF/TCXO frequency.
+ */
+struct ice_cgu_pll_params_e825c {
+ u32 tspll_ck_refclkfreq;
+ u32 tspll_ndivratio;
+ u32 tspll_fbdiv_intgr;
+ u32 tspll_fbdiv_frac;
+ u32 ref1588_ck_div;
+};
+
+extern const struct
+ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ];
+
#define E810C_QSFP_C827_0_HANDLE 2
#define E810C_QSFP_C827_1_HANDLE 3
+/* Table of constants related to possible ETH56G PHY resources */
+extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES];
+
/* Table of constants related to possible TIME_REF sources */
extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ];
@@ -197,7 +324,9 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD];
/* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for
* the E810 devices. Based off of a PLL with an 812.5 MHz frequency.
*/
-#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+#define ICE_E810_PLL_FREQ 812500000
+#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL
+#define E810_OUT_PROP_DELAY_NS 1
/* Device agnostic functions */
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
@@ -208,11 +337,15 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time);
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval);
int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj);
+int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw);
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp);
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
void ice_ptp_reset_ts_memory(struct ice_hw *hw);
int ice_ptp_init_phc(struct ice_hw *hw);
+void ice_ptp_init_hw(struct ice_hw *hw);
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
+int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
+ enum ice_ptp_tmr_cmd configured_cmd);
/* E822 family functions */
int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
@@ -264,9 +397,9 @@ int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset);
int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port);
int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port);
int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold);
/* E810 family functions */
-int ice_ptp_init_phy_e810(struct ice_hw *hw);
int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
@@ -280,11 +413,44 @@ int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
enum dpll_lock_status *dpll_state);
int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num);
-
-void ice_ptp_init_phy_model(struct ice_hw *hw);
int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
unsigned long *caps);
+/* ETH56G family functions */
+int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status);
+int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset);
+int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_tx_offset_eth56g(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_rx_offset_eth56g(struct ice_hw *hw, u8 port);
+int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold);
+int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port);
+
+#define ICE_ETH56G_NOMINAL_INCVAL 0x140000000ULL
+#define ICE_ETH56G_NOMINAL_PCS_REF_TUS 0x100000000ULL
+#define ICE_ETH56G_NOMINAL_PCS_REF_INC 0x300000000ULL
+#define ICE_ETH56G_NOMINAL_THRESH4 0x7777
+#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6
+
+/**
+ * ice_get_base_incval - Get base clock increment value
+ * @hw: pointer to the HW struct
+ *
+ * Return: base clock increment value for supported PHYs, 0 otherwise
+ */
+static inline u64 ice_get_base_incval(struct ice_hw *hw)
+{
+ switch (hw->ptp.phy_model) {
+ case ICE_PHY_ETH56G:
+ return ICE_ETH56G_NOMINAL_INCVAL;
+ case ICE_PHY_E810:
+ return ICE_PTP_NOMINAL_INCVAL_E810;
+ case ICE_PHY_E82X:
+ return ice_e82x_nominal_incval(ice_e82x_time_ref(hw));
+ default:
+ return 0;
+ }
+}
+
#define PFTSYN_SEM_BYTES 4
#define ICE_PTP_CLOCK_INDEX_0 0x00
@@ -312,6 +478,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define TS_CMD_MASK_E810 0xFF
#define TS_CMD_MASK 0xF
#define SYNC_EXEC_CMD 0x3
+#define TS_CMD_RX_TYPE ICE_M(0x18, 0x4)
/* Macros to derive port low and high addresses on both quads */
#define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF)
@@ -344,11 +511,8 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define Q_REG_TX_MEM_GBL_CFG 0xC08
#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S 0
#define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M BIT(0)
-#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S 1
#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M ICE_M(0xFF, 1)
-#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S 9
#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M ICE_M(0x3F, 9)
-#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S 15
#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M BIT(15)
/* Tx Timestamp data registers */
@@ -380,7 +544,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define P_REG_TIMETUS_L 0x410
#define P_REG_TIMETUS_U 0x414
-#define P_REG_40B_LOW_M 0xFF
+#define P_REG_40B_LOW_M GENMASK(7, 0)
#define P_REG_40B_HIGH_S 8
/* PHY window length registers */
@@ -487,7 +651,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
#define ETH_GLTSYN_SHADJ_H(_i) (0x0300037C + ((_i) * 32))
/* E810 timer command register */
-#define ETH_GLTSYN_CMD 0x03000344
+#define E810_ETH_GLTSYN_CMD 0x03000344
/* Source timer incval macros */
#define INCVAL_HIGH_M 0xFF
@@ -549,4 +713,115 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
/* E810T PCA9575 IO controller pin control */
#define ICE_E810T_P0_GNSS_PRSNT_N BIT(4)
+/* ETH56G PHY register addresses */
+/* Timestamp PHY incval registers */
+#define PHY_REG_TIMETUS_L 0x8
+#define PHY_REG_TIMETUS_U 0xC
+
+/* Timestamp PCS registers */
+#define PHY_PCS_REF_TUS_L 0x18
+#define PHY_PCS_REF_TUS_U 0x1C
+
+/* Timestamp PCS ref incval registers */
+#define PHY_PCS_REF_INC_L 0x20
+#define PHY_PCS_REF_INC_U 0x24
+
+/* Timestamp init registers */
+#define PHY_REG_RX_TIMER_INC_PRE_L 0x64
+#define PHY_REG_RX_TIMER_INC_PRE_U 0x68
+#define PHY_REG_TX_TIMER_INC_PRE_L 0x44
+#define PHY_REG_TX_TIMER_INC_PRE_U 0x48
+
+/* Timestamp match and adjust target registers */
+#define PHY_REG_RX_TIMER_CNT_ADJ_L 0x6C
+#define PHY_REG_RX_TIMER_CNT_ADJ_U 0x70
+#define PHY_REG_TX_TIMER_CNT_ADJ_L 0x4C
+#define PHY_REG_TX_TIMER_CNT_ADJ_U 0x50
+
+/* Timestamp command registers */
+#define PHY_REG_TX_TMR_CMD 0x40
+#define PHY_REG_RX_TMR_CMD 0x60
+
+/* Phy offset ready registers */
+#define PHY_REG_TX_OFFSET_READY 0x54
+#define PHY_REG_RX_OFFSET_READY 0x74
+
+/* Phy total offset registers */
+#define PHY_REG_TOTAL_TX_OFFSET_L 0x38
+#define PHY_REG_TOTAL_TX_OFFSET_U 0x3C
+#define PHY_REG_TOTAL_RX_OFFSET_L 0x58
+#define PHY_REG_TOTAL_RX_OFFSET_U 0x5C
+
+/* Timestamp capture registers */
+#define PHY_REG_TX_CAPTURE_L 0x78
+#define PHY_REG_TX_CAPTURE_U 0x7C
+#define PHY_REG_RX_CAPTURE_L 0x8C
+#define PHY_REG_RX_CAPTURE_U 0x90
+
+/* Memory status registers */
+#define PHY_REG_TX_MEMORY_STATUS_L 0x80
+#define PHY_REG_TX_MEMORY_STATUS_U 0x84
+
+/* Interrupt config register */
+#define PHY_REG_TS_INT_CONFIG 0x88
+
+/* XIF mode config register */
+#define PHY_MAC_XIF_MODE 0x24
+#define PHY_MAC_XIF_1STEP_ENA_M ICE_M(0x1, 5)
+#define PHY_MAC_XIF_TS_BIN_MODE_M ICE_M(0x1, 11)
+#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20)
+#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21)
+
+/* GPCS config register */
+#define PHY_GPCS_CONFIG_REG0 0x268
+#define PHY_GPCS_CONFIG_REG0_TX_THR_M ICE_M(0xF, 24)
+#define PHY_GPCS_BITSLIP 0x5C
+
+#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0)
+#define PHY_TS_INT_CONFIG_ENA_M BIT(6)
+
+/* 1-step PTP config */
+#define PHY_PTP_1STEP_CONFIG 0x270
+#define PHY_PTP_1STEP_T1S_UP64_M ICE_M(0xF, 4)
+#define PHY_PTP_1STEP_T1S_DELTA_M ICE_M(0xF, 8)
+#define PHY_PTP_1STEP_PEER_DELAY(_port) (0x274 + 4 * (_port))
+#define PHY_PTP_1STEP_PD_ADD_PD_M ICE_M(0x1, 0)
+#define PHY_PTP_1STEP_PD_DELAY_M ICE_M(0x3fffffff, 1)
+#define PHY_PTP_1STEP_PD_DLY_V_M ICE_M(0x1, 31)
+
+/* Macros to derive offsets for TimeStampLow and TimeStampHigh */
+#define PHY_TSTAMP_L(x) (((x) * 8) + 0)
+#define PHY_TSTAMP_U(x) (((x) * 8) + 4)
+
+#define PHY_REG_REVISION 0x85000
+
+#define PHY_REG_DESKEW_0 0x94
+#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0)
+#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7)
+#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3
+#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10)
+
+#define PHY_REG_GPCS_BITSLIP 0x5C
+#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset))
+#define PHY_REVISION_ETH56G 0x10200
+#define PHY_VENDOR_TXLANE_THRESH 0x2000C
+
+#define PHY_MAC_TSU_CONFIG 0x40
+#define PHY_MAC_TSU_CFG_RX_MODE_M ICE_M(0x7, 0)
+#define PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M ICE_M(0x7, 4)
+#define PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M ICE_M(0x7, 8)
+#define PHY_MAC_TSU_CFG_TX_MODE_M ICE_M(0x7, 12)
+#define PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M ICE_M(0x1F, 16)
+#define PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M ICE_M(0x1F, 21)
+#define PHY_MAC_TSU_CFG_BLKS_PER_CLK_M ICE_M(0x1, 28)
+#define PHY_MAC_RX_MODULO 0x44
+#define PHY_MAC_RX_OFFSET 0x48
+#define PHY_MAC_RX_OFFSET_M ICE_M(0xFFFFFF, 0)
+#define PHY_MAC_TX_MODULO 0x4C
+#define PHY_MAC_BLOCKTIME 0x50
+#define PHY_MAC_MARKERTIME 0x54
+#define PHY_MAC_TX_OFFSET 0x58
+
+#define PHY_PTP_INT_STATUS 0x7FD140
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 5f30fb131f74..bdda3401e343 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -3,42 +3,51 @@
#include "ice.h"
#include "ice_eswitch.h"
-#include "ice_devlink.h"
+#include "devlink/devlink.h"
+#include "devlink/devlink_port.h"
#include "ice_sriov.h"
#include "ice_tc_lib.h"
#include "ice_dcb_lib.h"
/**
- * ice_repr_get_sw_port_id - get port ID associated with representor
- * @repr: pointer to port representor
+ * ice_repr_inc_tx_stats - increment Tx statistic by one packet
+ * @repr: repr to increment stats on
+ * @len: length of the packet
+ * @xmit_status: value returned by xmit function
*/
-static int ice_repr_get_sw_port_id(struct ice_repr *repr)
+void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
+ int xmit_status)
{
- return repr->src_vsi->back->hw.port_info->lport;
+ struct ice_repr_pcpu_stats *stats;
+
+ if (unlikely(xmit_status != NET_XMIT_SUCCESS &&
+ xmit_status != NET_XMIT_CN)) {
+ this_cpu_inc(repr->stats->tx_drops);
+ return;
+ }
+
+ stats = this_cpu_ptr(repr->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
}
/**
- * ice_repr_get_phys_port_name - get phys port name
- * @netdev: pointer to port representor netdev
- * @buf: write here port name
- * @len: max length of buf
+ * ice_repr_inc_rx_stats - increment Rx statistic by one packet
+ * @netdev: repr netdev to increment stats on
+ * @len: length of the packet
*/
-static int
-ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
+void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_repr *repr = np->repr;
- int res;
-
- /* Devlink port is registered and devlink core is taking care of name formatting. */
- if (repr->vf->devlink_port.devlink)
- return -EOPNOTSUPP;
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_repr_pcpu_stats *stats;
- res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
- repr->id);
- if (res <= 0)
- return -EOPNOTSUPP;
- return 0;
+ stats = this_cpu_ptr(repr->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ u64_stats_update_end(&stats->syncp);
}
/**
@@ -76,7 +85,7 @@ ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
* ice_netdev_to_repr - Get port representor for given netdevice
* @netdev: pointer to port representor netdev
*/
-struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
+struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
@@ -139,38 +148,35 @@ static int ice_repr_stop(struct net_device *netdev)
* ice_repr_sp_stats64 - get slow path stats for port representor
* @dev: network interface device structure
* @stats: netlink stats structure
- *
- * RX/TX stats are being swapped here to be consistent with VF stats. In slow
- * path, port representor receives data when the corresponding VF is sending it
- * (and vice versa), TX and RX bytes/packets are effectively swapped on port
- * representor.
*/
static int
ice_repr_sp_stats64(const struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
- struct ice_netdev_priv *np = netdev_priv(dev);
- int vf_id = np->repr->vf->vf_id;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
- u64 pkts, bytes;
-
- tx_ring = np->vsi->tx_rings[vf_id];
- ice_fetch_u64_stats_per_ring(&tx_ring->ring_stats->syncp,
- tx_ring->ring_stats->stats,
- &pkts, &bytes);
- stats->rx_packets = pkts;
- stats->rx_bytes = bytes;
-
- rx_ring = np->vsi->rx_rings[vf_id];
- ice_fetch_u64_stats_per_ring(&rx_ring->ring_stats->syncp,
- rx_ring->ring_stats->stats,
- &pkts, &bytes);
- stats->tx_packets = pkts;
- stats->tx_bytes = bytes;
- stats->tx_dropped = rx_ring->ring_stats->rx_stats.alloc_page_failed +
- rx_ring->ring_stats->rx_stats.alloc_buf_failed;
-
+ struct ice_repr *repr = ice_netdev_to_repr(dev);
+ int i;
+
+ for_each_possible_cpu(i) {
+ u64 tbytes, tpkts, tdrops, rbytes, rpkts;
+ struct ice_repr_pcpu_stats *repr_stats;
+ unsigned int start;
+
+ repr_stats = per_cpu_ptr(repr->stats, i);
+ do {
+ start = u64_stats_fetch_begin(&repr_stats->syncp);
+ tbytes = repr_stats->tx_bytes;
+ tpkts = repr_stats->tx_packets;
+ tdrops = repr_stats->tx_drops;
+ rbytes = repr_stats->rx_bytes;
+ rpkts = repr_stats->rx_packets;
+ } while (u64_stats_fetch_retry(&repr_stats->syncp, start));
+
+ stats->tx_bytes += tbytes;
+ stats->tx_packets += tpkts;
+ stats->tx_dropped += tdrops;
+ stats->rx_bytes += rbytes;
+ stats->rx_packets += rpkts;
+ }
return 0;
}
@@ -240,7 +246,6 @@ ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
}
static const struct net_device_ops ice_repr_netdev_ops = {
- .ndo_get_phys_port_name = ice_repr_get_phys_port_name,
.ndo_get_stats64 = ice_repr_get_stats64,
.ndo_open = ice_repr_open,
.ndo_stop = ice_repr_stop,
@@ -280,9 +285,7 @@ ice_repr_reg_netdev(struct net_device *netdev)
static void ice_repr_remove_node(struct devlink_port *devlink_port)
{
- devl_lock(devlink_port->devlink);
devl_rate_leaf_destroy(devlink_port);
- devl_unlock(devlink_port->devlink);
}
/**
@@ -291,7 +294,7 @@ static void ice_repr_remove_node(struct devlink_port *devlink_port)
*/
static void ice_repr_rem(struct ice_repr *repr)
{
- kfree(repr->q_vector);
+ free_percpu(repr->stats);
free_netdev(repr->netdev);
kfree(repr);
}
@@ -303,6 +306,7 @@ static void ice_repr_rem(struct ice_repr *repr)
void ice_repr_rem_vf(struct ice_repr *repr)
{
ice_repr_remove_node(&repr->vf->devlink_port);
+ ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac);
unregister_netdev(repr->netdev);
ice_devlink_destroy_vf_port(repr->vf);
ice_virtchnl_set_dflt_ops(repr->vf);
@@ -331,7 +335,6 @@ static void ice_repr_set_tx_topology(struct ice_pf *pf)
static struct ice_repr *
ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
{
- struct ice_q_vector *q_vector;
struct ice_netdev_priv *np;
struct ice_repr *repr;
int err;
@@ -346,23 +349,22 @@ ice_repr_add(struct ice_pf *pf, struct ice_vsi *src_vsi, const u8 *parent_mac)
goto err_alloc;
}
+ repr->stats = netdev_alloc_pcpu_stats(struct ice_repr_pcpu_stats);
+ if (!repr->stats) {
+ err = -ENOMEM;
+ goto err_stats;
+ }
+
repr->src_vsi = src_vsi;
+ repr->id = src_vsi->vsi_num;
np = netdev_priv(repr->netdev);
np->repr = repr;
- q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
- if (!q_vector) {
- err = -ENOMEM;
- goto err_alloc_q_vector;
- }
- repr->q_vector = q_vector;
- repr->q_id = repr->id;
-
ether_addr_copy(repr->parent_mac, parent_mac);
return repr;
-err_alloc_q_vector:
+err_stats:
free_netdev(repr->netdev);
err_alloc:
kfree(repr);
@@ -400,11 +402,17 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf)
if (err)
goto err_netdev;
+ err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac);
+ if (err)
+ goto err_cfg_vsi;
+
ice_virtchnl_set_repr_ops(vf);
ice_repr_set_tx_topology(vf->pf);
return repr;
+err_cfg_vsi:
+ unregister_netdev(repr->netdev);
err_netdev:
ice_repr_rem(repr);
err_repr_add:
@@ -412,12 +420,9 @@ err_repr_add:
return ERR_PTR(err);
}
-struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi)
+struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id)
{
- if (!vsi->vf)
- return NULL;
-
- return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id);
+ return xa_load(&pf->eswitch.reprs, id);
}
/**
@@ -439,15 +444,3 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr)
netif_carrier_off(repr->netdev);
netif_tx_stop_all_queues(repr->netdev);
}
-
-/**
- * ice_repr_set_traffic_vsi - set traffic VSI for port representor
- * @repr: repr on with VSI will be set
- * @vsi: pointer to VSI that will be used by port representor to pass traffic
- */
-void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi)
-{
- struct ice_netdev_priv *np = netdev_priv(repr->netdev);
-
- np->vsi = vsi;
-}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index f9aede315716..488661b2900b 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -6,20 +6,24 @@
#include <net/dst_metadata.h>
+struct ice_repr_pcpu_stats {
+ struct u64_stats_sync syncp;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_drops;
+};
+
struct ice_repr {
struct ice_vsi *src_vsi;
struct ice_vf *vf;
- struct ice_q_vector *q_vector;
struct net_device *netdev;
struct metadata_dst *dst;
struct ice_esw_br_port *br_port;
- int q_id;
+ struct ice_repr_pcpu_stats __percpu *stats;
u32 id;
u8 parent_mac[ETH_ALEN];
-#ifdef CONFIG_ICE_SWITCHDEV
- /* info about slow path rule */
- struct ice_rule_query_data sp_rule;
-#endif
};
struct ice_repr *ice_repr_add_vf(struct ice_vf *vf);
@@ -28,10 +32,11 @@ void ice_repr_rem_vf(struct ice_repr *repr);
void ice_repr_start_tx_queues(struct ice_repr *repr);
void ice_repr_stop_tx_queues(struct ice_repr *repr);
-void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
-
-struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
+struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev);
bool ice_is_port_repr_netdev(const struct net_device *netdev);
-struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi);
+void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len,
+ int xmit_status);
+void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
+struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
index ead75fe2bcda..3b0054faf70c 100644
--- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h
@@ -47,10 +47,12 @@ struct ice_sbq_evt_desc {
};
enum ice_sbq_msg_dev {
- rmn_0 = 0x02,
- rmn_1 = 0x03,
- rmn_2 = 0x04,
- cgu = 0x06
+ eth56g_phy_0 = 0x02,
+ rmn_0 = 0x02,
+ rmn_1 = 0x03,
+ rmn_2 = 0x04,
+ cgu = 0x06,
+ eth56g_phy_1 = 0x0D,
};
enum ice_sbq_msg_opcode {
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index a1525992d14b..ecf8f5d60292 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1128,12 +1128,11 @@ u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* 5 or less sw_entry_point_layer
*/
/* calculate the VSI layer based on number of layers. */
- if (hw->num_tx_sched_layers > ICE_VSI_LAYER_OFFSET + 1) {
- u8 layer = hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
-
- if (layer > hw->sw_entry_point_layer)
- return layer;
- }
+ if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
+ return hw->num_tx_sched_layers - ICE_VSI_LAYER_OFFSET;
+ else if (hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS)
+ /* qgroup and VSI layers are same */
+ return hw->num_tx_sched_layers - ICE_QGRP_LAYER_OFFSET;
return hw->sw_entry_point_layer;
}
@@ -1150,13 +1149,10 @@ u8 ice_sched_get_agg_layer(struct ice_hw *hw)
* 7 or less sw_entry_point_layer
*/
/* calculate the aggregator layer based on number of layers. */
- if (hw->num_tx_sched_layers > ICE_AGG_LAYER_OFFSET + 1) {
- u8 layer = hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
-
- if (layer > hw->sw_entry_point_layer)
- return layer;
- }
- return hw->sw_entry_point_layer;
+ if (hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS)
+ return hw->num_tx_sched_layers - ICE_AGG_LAYER_OFFSET;
+ else
+ return hw->sw_entry_point_layer;
}
/**
@@ -1510,10 +1506,11 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
{
struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
+ u8 qgrp_layer, vsi_layer;
u16 max_children;
- u8 qgrp_layer;
qgrp_layer = ice_sched_get_qgrp_layer(pi->hw);
+ vsi_layer = ice_sched_get_vsi_layer(pi->hw);
max_children = pi->hw->max_children[qgrp_layer];
vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
@@ -1524,6 +1521,12 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
if (!vsi_node)
return NULL;
+ /* If the queue group and VSI layer are same then queues
+ * are all attached directly to VSI
+ */
+ if (qgrp_layer == vsi_layer)
+ return vsi_node;
+
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) {
@@ -3199,7 +3202,7 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
u8 profile_type;
int status;
- if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ if (!pi || layer_num >= pi->hw->num_tx_sched_layers)
return NULL;
switch (rl_type) {
case ICE_MIN_BW:
@@ -3215,8 +3218,6 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
return NULL;
}
- if (!pi)
- return NULL;
hw = pi->hw;
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
@@ -3446,7 +3447,7 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
struct ice_aqc_rl_profile_info *rl_prof_elem;
int status = 0;
- if (layer_num >= ICE_AQC_TOPO_MAX_LEVEL_NUM)
+ if (layer_num >= pi->hw->num_tx_sched_layers)
return -EINVAL;
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 1aef05ea5a57..7b668083be07 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -6,6 +6,17 @@
#include "ice_common.h"
+/**
+ * DOC: ice_sched.h
+ *
+ * This header file stores everything that is needed for broadly understood
+ * scheduler. It consists of defines related to layers, structures related to
+ * aggregator, functions declarations and others.
+ */
+
+#define ICE_SCHED_5_LAYERS 5
+#define ICE_SCHED_9_LAYERS 9
+
#define SCHED_NODE_NAME_MAX_LEN 32
#define ICE_QGRP_LAYER_OFFSET 2
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index a958fcf3e6be..55ef33208456 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -170,8 +170,6 @@ void ice_free_vfs(struct ice_pf *pf)
else
dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
- ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
-
mutex_lock(&vfs->table_lock);
ice_for_each_vf(pf, bkt, vf) {
@@ -227,7 +225,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
struct ice_vsi *vsi;
params.type = ICE_VSI_VF;
- params.pi = ice_vf_get_port_info(vf);
+ params.port_info = ice_vf_get_port_info(vf);
params.vf = vf;
params.flags = ICE_VSI_FLAG_INIT;
@@ -362,13 +360,14 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
* @vf: VF to calculate the register index for
* @q_vector: a q_vector associated to the VF
*/
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
+void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
{
if (!vf || !q_vector)
- return -EINVAL;
+ return;
/* always add one to account for the OICR being the first MSIX */
- return vf->first_vector_idx + q_vector->v_idx + 1;
+ q_vector->vf_reg_idx = q_vector->v_idx + ICE_NONQ_VECS_VF;
+ q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx;
}
/**
@@ -833,11 +832,6 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
pci_dev_get(vfdev);
- /* set default number of MSI-X */
- vf->num_msix = pf->vfs.num_msix_per;
- vf->num_vf_qs = pf->vfs.num_qps_per;
- ice_vc_set_default_allowlist(vf);
-
hash_add_rcu(vfs->table, &vf->entry, vf_id);
}
@@ -897,7 +891,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
goto err_unroll_sriov;
}
- ice_eswitch_reserve_cp_queues(pf, num_vfs);
ret = ice_start_vfs(pf);
if (ret) {
dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
@@ -1423,21 +1416,23 @@ out_put_vf:
}
/**
- * ice_set_vf_mac
- * @netdev: network interface device structure
+ * __ice_set_vf_mac - program VF MAC address
+ * @pf: PF to be configure
* @vf_id: VF identifier
* @mac: MAC address
*
* program VF MAC address
+ * Return: zero on success or an error code on failure
*/
-int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac)
{
- struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct device *dev;
struct ice_vf *vf;
int ret;
+ dev = ice_pf_to_dev(pf);
if (is_multicast_ether_addr(mac)) {
- netdev_err(netdev, "%pM not a valid unicast address\n", mac);
+ dev_err(dev, "%pM not a valid unicast address\n", mac);
return -EINVAL;
}
@@ -1466,13 +1461,13 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
if (is_zero_ether_addr(mac)) {
/* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
vf->pf_set_mac = false;
- netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
- vf->vf_id);
+ dev_info(dev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
+ vf->vf_id);
} else {
/* PF will add MAC rule for the VF */
vf->pf_set_mac = true;
- netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
- mac, vf_id);
+ dev_info(dev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
+ mac, vf_id);
}
ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
@@ -1484,6 +1479,20 @@ out_put_vf:
}
/**
+ * ice_set_vf_mac - .ndo_set_vf_mac handler
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @mac: MAC address
+ *
+ * program VF MAC address
+ * Return: zero on success or an error code on failure
+ */
+int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ return __ice_set_vf_mac(ice_netdev_to_pf(netdev), vf_id, mac);
+}
+
+/**
* ice_set_vf_trust
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -1869,6 +1878,24 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
}
/**
+ * ice_print_vf_tx_mdd_event - print VF Tx malicious driver detect event
+ * @vf: pointer to the VF structure
+ */
+void ice_print_vf_tx_mdd_event(struct ice_vf *vf)
+{
+ struct ice_pf *pf = vf->pf;
+ struct device *dev;
+
+ dev = ice_pf_to_dev(pf);
+
+ dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
+ vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id,
+ vf->dev_lan_addr,
+ test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
+ ? "on" : "off");
+}
+
+/**
* ice_print_vfs_mdd_events - print VFs malicious driver detect event
* @pf: pointer to the PF structure
*
@@ -1876,8 +1903,6 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
*/
void ice_print_vfs_mdd_events(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
- struct ice_hw *hw = &pf->hw;
struct ice_vf *vf;
unsigned int bkt;
@@ -1904,10 +1929,7 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
vf->mdd_tx_events.last_printed =
vf->mdd_tx_events.count;
-
- dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM.\n",
- vf->mdd_tx_events.count, hw->pf_id, vf->vf_id,
- vf->dev_lan_addr);
+ ice_print_vf_tx_mdd_event(vf);
}
}
mutex_unlock(&pf->vfs.table_lock);
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 8488df38b586..96549ca5c52c 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -28,6 +28,7 @@
#ifdef CONFIG_PCI_IOV
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
+int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
@@ -49,7 +50,7 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
-int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
+void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
int
ice_get_vf_stats(struct net_device *netdev, int vf_id,
@@ -58,6 +59,7 @@ void
ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_print_vfs_mdd_events(struct ice_pf *pf);
void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
+void ice_print_vf_tx_mdd_event(struct ice_vf *vf);
bool
ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev);
@@ -69,6 +71,7 @@ static inline
void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
+static inline void ice_print_vf_tx_mdd_event(struct ice_vf *vf) { }
static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { }
static inline int
@@ -79,6 +82,13 @@ ice_sriov_configure(struct pci_dev __always_unused *pdev,
}
static inline int
+__ice_set_vf_mac(struct ice_pf __always_unused *pf,
+ u16 __always_unused vf_id, const u8 __always_unused *mac)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
ice_set_vf_mac(struct net_device __always_unused *netdev,
int __always_unused vf_id, u8 __always_unused *mac)
{
@@ -130,11 +140,10 @@ ice_set_vf_bw(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-static inline int
+static inline void
ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
struct ice_q_vector __always_unused *q_vector)
{
- return 0;
}
static inline int
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index b4ea935e8300..fe8847184cb1 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -3,6 +3,7 @@
#include "ice_lib.h"
#include "ice_switch.h"
+#include "ice_trace.h"
#define ICE_ETH_DA_OFFSET 0
#define ICE_ETH_ETHTYPE_OFFSET 12
@@ -42,6 +43,7 @@ enum {
ICE_PKT_KMALLOC = BIT(9),
ICE_PKT_PPPOE = BIT(10),
ICE_PKT_L2TPV3 = BIT(11),
+ ICE_PKT_PFCP = BIT(12),
};
struct ice_dummy_pkt_offsets {
@@ -1110,6 +1112,77 @@ ICE_DECLARE_PKT_TEMPLATE(ipv6_gtp) = {
0x00, 0x00,
};
+ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv4) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_ILOS, 34 },
+ { ICE_PFCP, 42 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv4) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x2c, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 34 */
+ 0x00, 0x18, 0x00, 0x00,
+
+ 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(pfcp_session_ipv6) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_ILOS, 54 },
+ { ICE_PFCP, 62 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(pfcp_session_ipv6) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xdd, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 14 */
+ 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x22, 0x65, /* ICE_UDP_ILOS 54 */
+ 0x00, 0x18, 0x00, 0x00,
+
+ 0x21, 0x01, 0x00, 0x0c, /* ICE_PFCP 62 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
ICE_DECLARE_PKT_OFFSETS(pppoe_ipv4_tcp) = {
{ ICE_MAC_OFOS, 0 },
{ ICE_ETYPE_OL, 12 },
@@ -1343,6 +1416,8 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(ipv4_gtpu_ipv4_tcp, ICE_PKT_TUN_GTPU),
ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPC | ICE_PKT_OUTER_IPV6),
ICE_PKT_PROFILE(ipv4_gtpu_ipv4, ICE_PKT_TUN_GTPC),
+ ICE_PKT_PROFILE(pfcp_session_ipv6, ICE_PKT_PFCP | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(pfcp_session_ipv4, ICE_PKT_PFCP),
ICE_PKT_PROFILE(pppoe_ipv6_udp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6 |
ICE_PKT_INNER_UDP),
ICE_PKT_PROFILE(pppoe_ipv6_tcp, ICE_PKT_PPPOE | ICE_PKT_OUTER_IPV6),
@@ -1397,7 +1472,6 @@ int ice_init_def_sw_recp(struct ice_hw *hw)
recps[i].root_rid = i;
INIT_LIST_HEAD(&recps[i].filt_rules);
INIT_LIST_HEAD(&recps[i].filt_replay_rules);
- INIT_LIST_HEAD(&recps[i].rg_list);
mutex_init(&recps[i].filt_rule_lock);
}
@@ -1825,7 +1899,8 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
- lkup_type == ICE_SW_LKUP_DFLT) {
+ lkup_type == ICE_SW_LKUP_DFLT ||
+ lkup_type == ICE_SW_LKUP_LAST) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
if (opc == ice_aqc_opc_alloc_res)
@@ -1887,6 +1962,15 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
status = -ENOENT;
+ if (!status) {
+ if (opc == ice_aqc_opc_add_sw_rules)
+ hw->switch_info->rule_cnt += num_rules;
+ else if (opc == ice_aqc_opc_remove_sw_rules)
+ hw->switch_info->rule_cnt -= num_rules;
+ }
+
+ trace_ice_aq_sw_rules(hw->switch_info);
+
return status;
}
@@ -2075,6 +2159,18 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
}
/**
+ * ice_init_chk_recipe_reuse_support - check if recipe reuse is supported
+ * @hw: pointer to the hardware structure
+ */
+void ice_init_chk_recipe_reuse_support(struct ice_hw *hw)
+{
+ struct ice_nvm_info *nvm = &hw->flash.nvm;
+
+ hw->recp_reuse = (nvm->major == 0x4 && nvm->minor >= 0x30) ||
+ nvm->major > 0x4;
+}
+
+/**
* ice_alloc_recipe - add recipe resource
* @hw: pointer to the hardware structure
* @rid: recipe ID returned as response to AQ call
@@ -2083,21 +2179,97 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
u16 buf_len = __struct_size(sw_buf);
+ u16 res_type;
int status;
sw_buf->num_elems = cpu_to_le16(1);
- sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
- ICE_AQC_RES_TYPE_S) |
- ICE_AQC_RES_TYPE_FLAG_SHARED);
+ res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE);
+ if (hw->recp_reuse)
+ res_type |= ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED;
+ else
+ res_type |= ICE_AQC_RES_TYPE_FLAG_SHARED;
+ sw_buf->res_type = cpu_to_le16(res_type);
status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
ice_aqc_opc_alloc_res);
- if (!status)
+ if (!status) {
*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
+ hw->switch_info->recp_cnt++;
+ }
+
+ return status;
+}
+
+/**
+ * ice_free_recipe_res - free recipe resource
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID to free
+ *
+ * Return: 0 on success, and others on error
+ */
+static int ice_free_recipe_res(struct ice_hw *hw, u16 rid)
+{
+ int status;
+
+ status = ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid);
+ if (!status)
+ hw->switch_info->recp_cnt--;
return status;
}
/**
+ * ice_release_recipe_res - disassociate and free recipe resource
+ * @hw: pointer to the hardware structure
+ * @recp: the recipe struct resource to unassociate and free
+ *
+ * Return: 0 on success, and others on error
+ */
+static int ice_release_recipe_res(struct ice_hw *hw,
+ struct ice_sw_recipe *recp)
+{
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ struct ice_switch_info *sw = hw->switch_info;
+ u64 recp_assoc;
+ u32 rid, prof;
+ int status;
+
+ for_each_set_bit(rid, recp->r_bitmap, ICE_MAX_NUM_RECIPES) {
+ for_each_set_bit(prof, recipe_to_profile[rid],
+ ICE_MAX_NUM_PROFILES) {
+ status = ice_aq_get_recipe_to_profile(hw, prof,
+ &recp_assoc,
+ NULL);
+ if (status)
+ return status;
+
+ bitmap_from_arr64(r_bitmap, &recp_assoc,
+ ICE_MAX_NUM_RECIPES);
+ bitmap_andnot(r_bitmap, r_bitmap, recp->r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ bitmap_to_arr64(&recp_assoc, r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ ice_aq_map_recipe_to_profile(hw, prof,
+ recp_assoc, NULL);
+
+ clear_bit(rid, profile_to_recipe[prof]);
+ clear_bit(prof, recipe_to_profile[rid]);
+ }
+
+ status = ice_free_recipe_res(hw, rid);
+ if (status)
+ return status;
+
+ sw->recp_list[rid].recp_created = false;
+ sw->recp_list[rid].adv_rule = false;
+ memset(&sw->recp_list[rid].lkup_exts, 0,
+ sizeof(sw->recp_list[rid].lkup_exts));
+ clear_bit(rid, recp->r_bitmap);
+ }
+
+ return 0;
+}
+
+/**
* ice_get_recp_to_prof_map - updates recipe to profile mapping
* @hw: pointer to hardware structure
*
@@ -2127,25 +2299,12 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw)
}
/**
- * ice_collect_result_idx - copy result index values
- * @buf: buffer that contains the result index
- * @recp: the recipe struct to copy data into
- */
-static void
-ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
- struct ice_sw_recipe *recp)
-{
- if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
- set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
- recp->res_idxs);
-}
-
-/**
* ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
* @hw: pointer to hardware structure
* @recps: struct that we need to populate
* @rid: recipe ID that we are populating
* @refresh_required: true if we should get recipe to profile mapping from FW
+ * @is_add: flag of adding recipe
*
* This function is used to populate all the necessary entries into our
* bookkeeping so that we have a current list of all the recipes that are
@@ -2153,7 +2312,7 @@ ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
*/
static int
ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
- bool *refresh_required)
+ bool *refresh_required, bool is_add)
{
DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
struct ice_aqc_recipe_data_elem *tmp;
@@ -2197,18 +2356,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
- struct ice_recp_grp_entry *rg_entry;
u8 i, prof, idx, prot = 0;
bool is_root;
u16 off = 0;
- rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
- GFP_KERNEL);
- if (!rg_entry) {
- status = -ENOMEM;
- goto err_unroll;
- }
-
idx = root_bufs.recipe_indx;
is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
@@ -2221,11 +2372,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
prof = find_first_bit(recipe_to_profile[idx],
ICE_MAX_NUM_PROFILES);
for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
- u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
-
- rg_entry->fv_idx[i] = lkup_indx;
- rg_entry->fv_mask[i] =
- le16_to_cpu(root_bufs.content.mask[i + 1]);
+ u8 lkup_indx = root_bufs.content.lkup_indx[i];
+ u16 lkup_mask = le16_to_cpu(root_bufs.content.mask[i]);
/* If the recipe is a chained recipe then all its
* child recipe's result will have a result index.
@@ -2236,42 +2384,38 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
* has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
* valid offset value.
*/
- if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
- rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
- rg_entry->fv_idx[i] == 0)
+ if (!lkup_indx ||
+ (lkup_indx & ICE_AQ_RECIPE_LKUP_IGNORE) ||
+ test_bit(lkup_indx,
+ hw->switch_info->prof_res_bm[prof]))
continue;
- ice_find_prot_off(hw, ICE_BLK_SW, prof,
- rg_entry->fv_idx[i], &prot, &off);
+ ice_find_prot_off(hw, ICE_BLK_SW, prof, lkup_indx,
+ &prot, &off);
lkup_exts->fv_words[fv_word_idx].prot_id = prot;
lkup_exts->fv_words[fv_word_idx].off = off;
- lkup_exts->field_mask[fv_word_idx] =
- rg_entry->fv_mask[i];
+ lkup_exts->field_mask[fv_word_idx] = lkup_mask;
fv_word_idx++;
}
- /* populate rg_list with the data from the child entry of this
- * recipe
- */
- list_add(&rg_entry->l_entry, &recps[rid].rg_list);
/* Propagate some data to the recipe database */
- recps[idx].is_root = !!is_root;
recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
- recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
- ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
- recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl &
- ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
+ recps[idx].need_pass_l2 = !!(root_bufs.content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_NEED_PASS_L2);
+ recps[idx].allow_pass_l2 = !!(root_bufs.content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2);
bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
- recps[idx].chain_idx = root_bufs.content.result_indx &
- ~ICE_AQ_RECIPE_RESULT_EN;
- set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
- } else {
- recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
+ set_bit(root_bufs.content.result_indx &
+ ~ICE_AQ_RECIPE_RESULT_EN, recps[idx].res_idxs);
}
- if (!is_root)
+ if (!is_root) {
+ if (hw->recp_reuse && is_add)
+ recps[idx].recp_created = true;
+
continue;
+ }
/* Only do the following for root recipes entries */
memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
@@ -2283,19 +2427,11 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
/* Complete initialization of the root recipe entry */
lkup_exts->n_val_words = fv_word_idx;
- recps[rid].big_recp = (num_recps > 1);
- recps[rid].n_grp_count = (u8)num_recps;
- recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
- recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
- GFP_KERNEL);
- if (!recps[rid].root_buf) {
- status = -ENOMEM;
- goto err_unroll;
- }
/* Copy result indexes */
bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
- recps[rid].recp_created = true;
+ if (is_add)
+ recps[rid].recp_created = true;
err_unroll:
kfree(tmp);
@@ -2446,6 +2582,9 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
fi->lan_en = true;
}
}
+
+ if (fi->flag & ICE_FLTR_TX_ONLY)
+ fi->lan_en = false;
}
/**
@@ -2759,7 +2898,8 @@ ice_update_vsi_list_rule(struct ice_hw *hw, u16 *vsi_handle_arr, u16 num_vsi,
lkup_type == ICE_SW_LKUP_ETHERTYPE_MAC ||
lkup_type == ICE_SW_LKUP_PROMISC ||
lkup_type == ICE_SW_LKUP_PROMISC_VLAN ||
- lkup_type == ICE_SW_LKUP_DFLT)
+ lkup_type == ICE_SW_LKUP_DFLT ||
+ lkup_type == ICE_SW_LKUP_LAST)
rule_type = remove ? ICE_AQC_SW_RULES_T_VSI_LIST_CLEAR :
ICE_AQC_SW_RULES_T_VSI_LIST_SET;
else if (lkup_type == ICE_SW_LKUP_VLAN)
@@ -3821,6 +3961,7 @@ ice_cfg_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle, bool set,
} else if (f_info.flag & ICE_FLTR_TX) {
f_info.src_id = ICE_SRC_ID_VSI;
f_info.src = hw_vsi_id;
+ f_info.flag |= ICE_FLTR_TX_ONLY;
}
f_list_entry.fltr_info = f_info;
@@ -4528,6 +4669,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
ICE_PROTOCOL_ENTRY(ICE_NVGRE, 0, 2, 4, 6),
ICE_PROTOCOL_ENTRY(ICE_GTP, 8, 10, 12, 14, 16, 18, 20, 22),
ICE_PROTOCOL_ENTRY(ICE_GTP_NO_PAY, 8, 10, 12, 14),
+ ICE_PROTOCOL_ENTRY(ICE_PFCP, 8, 10, 12, 14, 16, 18, 20, 22),
ICE_PROTOCOL_ENTRY(ICE_PPPOE, 0, 2, 4, 6),
ICE_PROTOCOL_ENTRY(ICE_L2TPV3, 0, 2, 4, 6, 8, 10),
ICE_PROTOCOL_ENTRY(ICE_VLAN_EX, 2, 0),
@@ -4561,6 +4703,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_NVGRE, ICE_GRE_OF_HW },
{ ICE_GTP, ICE_UDP_OF_HW },
{ ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
+ { ICE_PFCP, ICE_UDP_ILOS_HW },
{ ICE_PPPOE, ICE_PPPOE_HW },
{ ICE_L2TPV3, ICE_L2TPV3_HW },
{ ICE_VLAN_EX, ICE_VLAN_OF_HW },
@@ -4573,12 +4716,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
* @hw: pointer to the hardware structure
* @lkup_exts: extension sequence to match
* @rinfo: information regarding the rule e.g. priority and action info
+ * @is_add: flag of adding recipe
*
* Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
*/
static u16
ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
- const struct ice_adv_rule_info *rinfo)
+ const struct ice_adv_rule_info *rinfo, bool is_add)
{
bool refresh_required = true;
struct ice_sw_recipe *recp;
@@ -4592,16 +4736,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
* entry update it in our SW bookkeeping and continue with the
* matching.
*/
- if (!recp[i].recp_created)
+ if (hw->recp_reuse) {
if (ice_get_recp_frm_fw(hw,
hw->switch_info->recp_list, i,
- &refresh_required))
+ &refresh_required, is_add))
continue;
-
- /* Skip inverse action recipes */
- if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
- ICE_AQ_RECIPE_ACT_INV_ACT)
- continue;
+ }
/* if number of words we are looking for match */
if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
@@ -4727,110 +4867,55 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
}
/**
- * ice_create_first_fit_recp_def - Create a recipe grouping
- * @hw: pointer to the hardware structure
- * @lkup_exts: an array of protocol header extractions
- * @rg_list: pointer to a list that stores new recipe groups
- * @recp_cnt: pointer to a variable that stores returned number of recipe groups
- *
- * Using first fit algorithm, take all the words that are still not done
- * and start grouping them in 4-word groups. Each group makes up one
- * recipe.
- */
-static int
-ice_create_first_fit_recp_def(struct ice_hw *hw,
- struct ice_prot_lkup_ext *lkup_exts,
- struct list_head *rg_list,
- u8 *recp_cnt)
-{
- struct ice_pref_recipe_group *grp = NULL;
- u8 j;
-
- *recp_cnt = 0;
-
- /* Walk through every word in the rule to check if it is not done. If so
- * then this word needs to be part of a new recipe.
- */
- for (j = 0; j < lkup_exts->n_val_words; j++)
- if (!test_bit(j, lkup_exts->done)) {
- if (!grp ||
- grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
- struct ice_recp_grp_entry *entry;
-
- entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*entry),
- GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- list_add(&entry->l_entry, rg_list);
- grp = &entry->r_group;
- (*recp_cnt)++;
- }
-
- grp->pairs[grp->n_val_pairs].prot_id =
- lkup_exts->fv_words[j].prot_id;
- grp->pairs[grp->n_val_pairs].off =
- lkup_exts->fv_words[j].off;
- grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
- grp->n_val_pairs++;
- }
-
- return 0;
-}
-
-/**
* ice_fill_fv_word_index - fill in the field vector indices for a recipe group
* @hw: pointer to the hardware structure
- * @fv_list: field vector with the extraction sequence information
- * @rg_list: recipe groupings with protocol-offset pairs
+ * @rm: recipe management list entry
*
* Helper function to fill in the field vector indices for protocol-offset
* pairs. These indexes are then ultimately programmed into a recipe.
*/
static int
-ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
- struct list_head *rg_list)
+ice_fill_fv_word_index(struct ice_hw *hw, struct ice_sw_recipe *rm)
{
struct ice_sw_fv_list_entry *fv;
- struct ice_recp_grp_entry *rg;
struct ice_fv_word *fv_ext;
+ u8 i;
- if (list_empty(fv_list))
- return 0;
+ if (list_empty(&rm->fv_list))
+ return -EINVAL;
- fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
+ fv = list_first_entry(&rm->fv_list, struct ice_sw_fv_list_entry,
list_entry);
fv_ext = fv->fv_ptr->ew;
- list_for_each_entry(rg, rg_list, l_entry) {
- u8 i;
-
- for (i = 0; i < rg->r_group.n_val_pairs; i++) {
- struct ice_fv_word *pr;
- bool found = false;
- u16 mask;
- u8 j;
-
- pr = &rg->r_group.pairs[i];
- mask = rg->r_group.mask[i];
+ /* Add switch id as the first word. */
+ rm->fv_idx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ rm->fv_mask[0] = ICE_AQ_SW_ID_LKUP_MASK;
+ rm->n_ext_words++;
- for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
- if (fv_ext[j].prot_id == pr->prot_id &&
- fv_ext[j].off == pr->off) {
- found = true;
+ for (i = 1; i < rm->n_ext_words; i++) {
+ struct ice_fv_word *fv_word = &rm->ext_words[i - 1];
+ u16 fv_mask = rm->word_masks[i - 1];
+ bool found = false;
+ u8 j;
- /* Store index of field vector */
- rg->fv_idx[i] = j;
- rg->fv_mask[i] = mask;
- break;
- }
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) {
+ if (fv_ext[j].prot_id == fv_word->prot_id &&
+ fv_ext[j].off == fv_word->off) {
+ found = true;
- /* Protocol/offset could not be found, caller gave an
- * invalid pair
- */
- if (!found)
- return -EINVAL;
+ /* Store index of field vector */
+ rm->fv_idx[i] = j;
+ rm->fv_mask[i] = fv_mask;
+ break;
+ }
}
+
+ /* Protocol/offset could not be found, caller gave an invalid
+ * pair.
+ */
+ if (!found)
+ return -EINVAL;
}
return 0;
@@ -4904,335 +4989,223 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
}
/**
- * ice_add_sw_recipe - function to call AQ calls to create switch recipe
- * @hw: pointer to hardware structure
- * @rm: recipe management list entry
- * @profiles: bitmap of profiles that will be associated.
+ * ice_calc_recp_cnt - calculate number of recipes based on word count
+ * @word_cnt: number of lookup words
+ *
+ * Word count should include switch ID word and regular lookup words.
+ * Returns: number of recipes required to fit @word_cnt, including extra recipes
+ * needed for recipe chaining (if needed).
*/
-static int
-ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
- unsigned long *profiles)
+static int ice_calc_recp_cnt(u8 word_cnt)
{
- DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
- struct ice_aqc_recipe_content *content;
- struct ice_aqc_recipe_data_elem *tmp;
- struct ice_aqc_recipe_data_elem *buf;
- struct ice_recp_grp_entry *entry;
- u16 free_res_idx;
- u16 recipe_count;
- u8 chain_idx;
- u8 recps = 0;
- int status;
+ /* All words fit in a single recipe, no need for chaining. */
+ if (word_cnt <= ICE_NUM_WORDS_RECIPE)
+ return 1;
- /* When more than one recipe are required, another recipe is needed to
- * chain them together. Matching a tunnel metadata ID takes up one of
- * the match fields in the chaining recipe reducing the number of
- * chained recipes by one.
+ /* Recipe chaining required. Result indexes are fitted right after
+ * regular lookup words. In some cases a new recipe must be added in
+ * order to fit result indexes.
+ *
+ * While the word count increases, every 5 words an extra recipe needs
+ * to be added. However, by adding a recipe, one word for its result
+ * index must also be added, therefore every 4 words recipe count
+ * increases by 1. This calculation does not apply to word count == 1,
+ * which is handled above.
*/
- /* check number of free result indices */
- bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
- free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
+ return (word_cnt + 2) / (ICE_NUM_WORDS_RECIPE - 1);
+}
- ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
- free_res_idx, rm->n_grp_count);
+static void fill_recipe_template(struct ice_aqc_recipe_data_elem *recp, u16 rid,
+ const struct ice_sw_recipe *rm)
+{
+ int i;
- if (rm->n_grp_count > 1) {
- if (rm->n_grp_count > free_res_idx)
- return -ENOSPC;
+ recp->recipe_indx = rid;
+ recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_PRUNE_INDX_M;
- rm->n_grp_count++;
+ for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
+ recp->content.lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
+ recp->content.mask[i] = cpu_to_le16(0);
}
- if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
- return -ENOSPC;
-
- tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
+ set_bit(rid, (unsigned long *)recp->recipe_bitmap);
+ recp->content.act_ctrl_fwd_priority = rm->priority;
- buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
- GFP_KERNEL);
- if (!buf) {
- status = -ENOMEM;
- goto err_mem;
- }
+ if (rm->need_pass_l2)
+ recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
- bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
- recipe_count = ICE_MAX_NUM_RECIPES;
- status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
- NULL);
- if (status || recipe_count == 0)
- goto err_unroll;
+ if (rm->allow_pass_l2)
+ recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
+}
- /* Allocate the recipe resources, and configure them according to the
- * match fields from protocol headers and extracted field vectors.
- */
- chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
- list_for_each_entry(entry, &rm->rg_list, l_entry) {
- u8 i;
+static void bookkeep_recipe(struct ice_sw_recipe *recipe,
+ struct ice_aqc_recipe_data_elem *r,
+ const struct ice_sw_recipe *rm)
+{
+ memcpy(recipe->r_bitmap, r->recipe_bitmap, sizeof(recipe->r_bitmap));
- status = ice_alloc_recipe(hw, &entry->rid);
- if (status)
- goto err_unroll;
+ recipe->priority = r->content.act_ctrl_fwd_priority;
+ recipe->tun_type = rm->tun_type;
+ recipe->need_pass_l2 = rm->need_pass_l2;
+ recipe->allow_pass_l2 = rm->allow_pass_l2;
+ recipe->recp_created = true;
+}
- content = &buf[recps].content;
+/* For memcpy in ice_add_sw_recipe. */
+static_assert(sizeof_field(struct ice_aqc_recipe_data_elem, recipe_bitmap) ==
+ sizeof_field(struct ice_sw_recipe, r_bitmap));
- /* Clear the result index of the located recipe, as this will be
- * updated, if needed, later in the recipe creation process.
- */
- tmp[0].content.result_indx = 0;
+/**
+ * ice_add_sw_recipe - function to call AQ calls to create switch recipe
+ * @hw: pointer to hardware structure
+ * @rm: recipe management list entry
+ * @profiles: bitmap of profiles that will be associated.
+ */
+static int
+ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
+ unsigned long *profiles)
+{
+ struct ice_aqc_recipe_data_elem *buf __free(kfree) = NULL;
+ DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_data_elem *root;
+ struct ice_sw_recipe *recipe;
+ u16 free_res_idx, rid;
+ int lookup = 0;
+ int recp_cnt;
+ int status;
+ int word;
+ int i;
- buf[recps] = tmp[0];
- buf[recps].recipe_indx = (u8)entry->rid;
- /* if the recipe is a non-root recipe RID should be programmed
- * as 0 for the rules to be applied correctly.
- */
- content->rid = 0;
- memset(&content->lkup_indx, 0,
- sizeof(content->lkup_indx));
-
- /* All recipes use look-up index 0 to match switch ID. */
- content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
- /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
- * to be 0
- */
- for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- content->lkup_indx[i] = 0x80;
- content->mask[i] = 0;
- }
+ recp_cnt = ice_calc_recp_cnt(rm->n_ext_words);
- for (i = 0; i < entry->r_group.n_val_pairs; i++) {
- content->lkup_indx[i + 1] = entry->fv_idx[i];
- content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]);
- }
+ bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
+ bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
- if (rm->n_grp_count > 1) {
- /* Checks to see if there really is a valid result index
- * that can be used.
- */
- if (chain_idx >= ICE_MAX_FV_WORDS) {
- ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
- status = -ENOSPC;
- goto err_unroll;
- }
+ /* Check number of free result indices */
+ free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
- entry->chain_idx = chain_idx;
- content->result_indx =
- ICE_AQ_RECIPE_RESULT_EN |
- FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M,
- chain_idx);
- clear_bit(chain_idx, result_idx_bm);
- chain_idx = find_first_bit(result_idx_bm,
- ICE_MAX_FV_WORDS);
- }
+ ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
+ free_res_idx, recp_cnt);
- /* fill recipe dependencies */
- bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
- ICE_MAX_NUM_RECIPES);
- set_bit(buf[recps].recipe_indx,
- (unsigned long *)buf[recps].recipe_bitmap);
- content->act_ctrl_fwd_priority = rm->priority;
+ /* Last recipe doesn't need result index */
+ if (recp_cnt - 1 > free_res_idx)
+ return -ENOSPC;
- if (rm->need_pass_l2)
- content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
+ if (recp_cnt > ICE_MAX_CHAIN_RECIPE_RES)
+ return -E2BIG;
- if (rm->allow_pass_l2)
- content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
- recps++;
- }
+ buf = kcalloc(recp_cnt, sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- if (rm->n_grp_count == 1) {
- rm->root_rid = buf[0].recipe_indx;
- set_bit(buf[0].recipe_indx, rm->r_bitmap);
- buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
- if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
- memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
- sizeof(buf[0].recipe_bitmap));
- } else {
- status = -EINVAL;
- goto err_unroll;
- }
- /* Applicable only for ROOT_RECIPE, set the fwd_priority for
- * the recipe which is getting created if specified
- * by user. Usually any advanced switch filter, which results
- * into new extraction sequence, ended up creating a new recipe
- * of type ROOT and usually recipes are associated with profiles
- * Switch rule referreing newly created recipe, needs to have
- * either/or 'fwd' or 'join' priority, otherwise switch rule
- * evaluation will not happen correctly. In other words, if
- * switch rule to be evaluated on priority basis, then recipe
- * needs to have priority, otherwise it will be evaluated last.
- */
- buf[0].content.act_ctrl_fwd_priority = rm->priority;
- } else {
- struct ice_recp_grp_entry *last_chain_entry;
- u16 rid, i;
+ /* Setup the non-root subrecipes. These do not contain lookups for other
+ * subrecipes results. Set associated recipe only to own recipe index.
+ * Each non-root subrecipe needs a free result index from FV.
+ *
+ * Note: only done if there is more than one recipe.
+ */
+ for (i = 0; i < recp_cnt - 1; i++) {
+ struct ice_aqc_recipe_content *content;
+ u8 result_idx;
- /* Allocate the last recipe that will chain the outcomes of the
- * other recipes together
- */
status = ice_alloc_recipe(hw, &rid);
if (status)
- goto err_unroll;
+ return status;
- content = &buf[recps].content;
+ fill_recipe_template(&buf[i], rid, rm);
- buf[recps].recipe_indx = (u8)rid;
- content->rid = (u8)rid;
- content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
- /* the new entry created should also be part of rg_list to
- * make sure we have complete recipe
+ result_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
+ /* Check if there really is a valid result index that can be
+ * used.
*/
- last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*last_chain_entry),
- GFP_KERNEL);
- if (!last_chain_entry) {
- status = -ENOMEM;
- goto err_unroll;
- }
- last_chain_entry->rid = rid;
- memset(&content->lkup_indx, 0, sizeof(content->lkup_indx));
- /* All recipes use look-up index 0 to match switch ID. */
- content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
- for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
- content->mask[i] = 0;
+ if (result_idx >= ICE_MAX_FV_WORDS) {
+ ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
+ return -ENOSPC;
}
+ clear_bit(result_idx, result_idx_bm);
- i = 1;
- /* update r_bitmap with the recp that is used for chaining */
+ content = &buf[i].content;
+ content->result_indx = ICE_AQ_RECIPE_RESULT_EN |
+ FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M,
+ result_idx);
+
+ /* Set recipe association to be used for root recipe */
set_bit(rid, rm->r_bitmap);
- /* this is the recipe that chains all the other recipes so it
- * should not have a chaining ID to indicate the same
- */
- last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
- list_for_each_entry(entry, &rm->rg_list, l_entry) {
- last_chain_entry->fv_idx[i] = entry->chain_idx;
- content->lkup_indx[i] = entry->chain_idx;
- content->mask[i++] = cpu_to_le16(0xFFFF);
- set_bit(entry->rid, rm->r_bitmap);
- }
- list_add(&last_chain_entry->l_entry, &rm->rg_list);
- if (sizeof(buf[recps].recipe_bitmap) >=
- sizeof(rm->r_bitmap)) {
- memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
- sizeof(buf[recps].recipe_bitmap));
- } else {
- status = -EINVAL;
- goto err_unroll;
+
+ word = 0;
+ while (lookup < rm->n_ext_words &&
+ word < ICE_NUM_WORDS_RECIPE) {
+ content->lkup_indx[word] = rm->fv_idx[lookup];
+ content->mask[word] = cpu_to_le16(rm->fv_mask[lookup]);
+
+ lookup++;
+ word++;
}
- content->act_ctrl_fwd_priority = rm->priority;
- recps++;
- rm->root_rid = (u8)rid;
+ recipe = &hw->switch_info->recp_list[rid];
+ set_bit(result_idx, recipe->res_idxs);
+ bookkeep_recipe(recipe, &buf[i], rm);
}
- status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
- if (status)
- goto err_unroll;
- status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
- ice_release_change_lock(hw);
+ /* Setup the root recipe */
+ status = ice_alloc_recipe(hw, &rid);
if (status)
- goto err_unroll;
-
- /* Every recipe that just got created add it to the recipe
- * book keeping list
- */
- list_for_each_entry(entry, &rm->rg_list, l_entry) {
- struct ice_switch_info *sw = hw->switch_info;
- bool is_root, idx_found = false;
- struct ice_sw_recipe *recp;
- u16 idx, buf_idx = 0;
-
- /* find buffer index for copying some data */
- for (idx = 0; idx < rm->n_grp_count; idx++)
- if (buf[idx].recipe_indx == entry->rid) {
- buf_idx = idx;
- idx_found = true;
- }
+ return status;
- if (!idx_found) {
- status = -EIO;
- goto err_unroll;
- }
+ recipe = &hw->switch_info->recp_list[rid];
+ root = &buf[recp_cnt - 1];
+ fill_recipe_template(root, rid, rm);
- recp = &sw->recp_list[entry->rid];
- is_root = (rm->root_rid == entry->rid);
- recp->is_root = is_root;
+ /* Set recipe association, use previously set bitmap and own rid */
+ set_bit(rid, rm->r_bitmap);
+ memcpy(root->recipe_bitmap, rm->r_bitmap, sizeof(root->recipe_bitmap));
- recp->root_rid = entry->rid;
- recp->big_recp = (is_root && rm->n_grp_count > 1);
+ /* For non-root recipes rid should be 0, for root it should be correct
+ * rid value ored with 0x80 (is root bit).
+ */
+ root->content.rid = rid | ICE_AQ_RECIPE_ID_IS_ROOT;
- memcpy(&recp->ext_words, entry->r_group.pairs,
- entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
+ /* Fill remaining lookups in root recipe */
+ word = 0;
+ while (lookup < rm->n_ext_words &&
+ word < ICE_NUM_WORDS_RECIPE /* should always be true */) {
+ root->content.lkup_indx[word] = rm->fv_idx[lookup];
+ root->content.mask[word] = cpu_to_le16(rm->fv_mask[lookup]);
- memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
- sizeof(recp->r_bitmap));
+ lookup++;
+ word++;
+ }
- /* Copy non-result fv index values and masks to recipe. This
- * call will also update the result recipe bitmask.
+ /* Fill result indexes as lookups */
+ i = 0;
+ while (i < recp_cnt - 1 &&
+ word < ICE_NUM_WORDS_RECIPE /* should always be true */) {
+ root->content.lkup_indx[word] = buf[i].content.result_indx &
+ ~ICE_AQ_RECIPE_RESULT_EN;
+ root->content.mask[word] = cpu_to_le16(0xffff);
+ /* For bookkeeping, it is needed to mark FV index as used for
+ * intermediate result.
*/
- ice_collect_result_idx(&buf[buf_idx], recp);
+ set_bit(root->content.lkup_indx[word], recipe->res_idxs);
- /* for non-root recipes, also copy to the root, this allows
- * easier matching of a complete chained recipe
- */
- if (!is_root)
- ice_collect_result_idx(&buf[buf_idx],
- &sw->recp_list[rm->root_rid]);
-
- recp->n_ext_words = entry->r_group.n_val_pairs;
- recp->chain_idx = entry->chain_idx;
- recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
- recp->n_grp_count = rm->n_grp_count;
- recp->tun_type = rm->tun_type;
- recp->need_pass_l2 = rm->need_pass_l2;
- recp->allow_pass_l2 = rm->allow_pass_l2;
- recp->recp_created = true;
+ i++;
+ word++;
}
- rm->root_buf = buf;
- kfree(tmp);
- return status;
-
-err_unroll:
-err_mem:
- kfree(tmp);
- devm_kfree(ice_hw_to_dev(hw), buf);
- return status;
-}
-/**
- * ice_create_recipe_group - creates recipe group
- * @hw: pointer to hardware structure
- * @rm: recipe management list entry
- * @lkup_exts: lookup elements
- */
-static int
-ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
- struct ice_prot_lkup_ext *lkup_exts)
-{
- u8 recp_count = 0;
- int status;
+ rm->root_rid = rid;
+ bookkeep_recipe(&hw->switch_info->recp_list[rid], root, rm);
- rm->n_grp_count = 0;
+ /* Program the recipe */
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ return status;
- /* Create recipes for words that are marked not done by packing them
- * as best fit.
- */
- status = ice_create_first_fit_recp_def(hw, lkup_exts,
- &rm->rg_list, &recp_count);
- if (!status) {
- rm->n_grp_count += recp_count;
- rm->n_ext_words = lkup_exts->n_val_words;
- memcpy(&rm->ext_words, lkup_exts->fv_words,
- sizeof(rm->ext_words));
- memcpy(rm->word_masks, lkup_exts->field_mask,
- sizeof(rm->word_masks));
- }
+ status = ice_aq_add_recipe(hw, buf, recp_cnt, NULL);
+ ice_release_change_lock(hw);
+ if (status)
+ return status;
- return status;
+ return 0;
}
/* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
@@ -5268,6 +5241,9 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
case ICE_SW_TUN_GTPC:
prof_type = ICE_PROF_TUN_GTPC;
break;
+ case ICE_SW_TUN_PFCP:
+ prof_type = ICE_PROF_TUN_PFCP;
+ break;
case ICE_SW_TUN_AND_NON_TUN:
default:
prof_type = ICE_PROF_ALL;
@@ -5278,6 +5254,49 @@ ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
}
/**
+ * ice_subscribe_recipe - subscribe to an existing recipe
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID to subscribe to
+ *
+ * Return: 0 on success, and others on error
+ */
+static int ice_subscribe_recipe(struct ice_hw *hw, u16 rid)
+{
+ DEFINE_RAW_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ u16 buf_len = __struct_size(sw_buf);
+ u16 res_type;
+ int status;
+
+ /* Prepare buffer to allocate resource */
+ sw_buf->num_elems = cpu_to_le16(1);
+ res_type = FIELD_PREP(ICE_AQC_RES_TYPE_M, ICE_AQC_RES_TYPE_RECIPE) |
+ ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_SHARED |
+ ICE_AQC_RES_TYPE_FLAG_SUBSCRIBE_CTL;
+ sw_buf->res_type = cpu_to_le16(res_type);
+
+ sw_buf->elem[0].e.sw_resp = cpu_to_le16(rid);
+
+ status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res);
+
+ return status;
+}
+
+/**
+ * ice_subscribable_recp_shared - share an existing subscribable recipe
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID to subscribe to
+ */
+static void ice_subscribable_recp_shared(struct ice_hw *hw, u16 rid)
+{
+ struct ice_sw_recipe *recps = hw->switch_info->recp_list;
+ u16 sub_rid;
+
+ for_each_set_bit(sub_rid, recps[rid].r_bitmap, ICE_MAX_NUM_RECIPES)
+ ice_subscribe_recipe(hw, sub_rid);
+}
+
+/**
* ice_add_adv_recipe - Add an advanced recipe that is not part of the default
* @hw: pointer to hardware structure
* @lkups: lookup elements or match criteria for the advanced recipe, one
@@ -5293,12 +5312,11 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
struct ice_prot_lkup_ext *lkup_exts;
- struct ice_recp_grp_entry *r_entry;
struct ice_sw_fv_list_entry *fvit;
- struct ice_recp_grp_entry *r_tmp;
struct ice_sw_fv_list_entry *tmp;
struct ice_sw_recipe *rm;
int status = 0;
+ u16 rid_tmp;
u8 i;
if (!lkups_cnt)
@@ -5336,7 +5354,6 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
* headers being programmed.
*/
INIT_LIST_HEAD(&rm->fv_list);
- INIT_LIST_HEAD(&rm->rg_list);
/* Get bitmap of field vectors (profiles) that are compatible with the
* rule request; only these will be searched in the subsequent call to
@@ -5348,12 +5365,10 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (status)
goto err_unroll;
- /* Group match words into recipes using preferred recipe grouping
- * criteria.
- */
- status = ice_create_recipe_group(hw, rm, lkup_exts);
- if (status)
- goto err_unroll;
+ /* Copy FV words and masks from lkup_exts to recipe struct. */
+ rm->n_ext_words = lkup_exts->n_val_words;
+ memcpy(rm->ext_words, lkup_exts->fv_words, sizeof(rm->ext_words));
+ memcpy(rm->word_masks, lkup_exts->field_mask, sizeof(rm->word_masks));
/* set the recipe priority if specified */
rm->priority = (u8)rinfo->priority;
@@ -5364,7 +5379,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* Find offsets from the field vector. Pick the first one for all the
* recipes.
*/
- status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
+ status = ice_fill_fv_word_index(hw, rm);
if (status)
goto err_unroll;
@@ -5376,10 +5391,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
/* Look for a recipe which matches our requested fv / mask list */
- *rid = ice_find_recp(hw, lkup_exts, rinfo);
- if (*rid < ICE_MAX_NUM_RECIPES)
+ *rid = ice_find_recp(hw, lkup_exts, rinfo, true);
+ if (*rid < ICE_MAX_NUM_RECIPES) {
/* Success if found a recipe that match the existing criteria */
+ if (hw->recp_reuse)
+ ice_subscribable_recp_shared(hw, *rid);
+
goto err_unroll;
+ }
rm->tun_type = rinfo->tun_type;
/* Recipe we need does not exist, add a recipe */
@@ -5398,14 +5417,14 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
&recp_assoc, NULL);
if (status)
- goto err_unroll;
+ goto err_free_recipe;
bitmap_from_arr64(r_bitmap, &recp_assoc, ICE_MAX_NUM_RECIPES);
bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
ICE_MAX_NUM_RECIPES);
status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
if (status)
- goto err_unroll;
+ goto err_free_recipe;
bitmap_to_arr64(&recp_assoc, r_bitmap, ICE_MAX_NUM_RECIPES);
status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
@@ -5413,7 +5432,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
ice_release_change_lock(hw);
if (status)
- goto err_unroll;
+ goto err_free_recipe;
/* Update profile to recipe bitmap array */
bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
@@ -5427,18 +5446,22 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
*rid = rm->root_rid;
memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
sizeof(*lkup_exts));
-err_unroll:
- list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
- list_del(&r_entry->l_entry);
- devm_kfree(ice_hw_to_dev(hw), r_entry);
+ goto err_unroll;
+
+err_free_recipe:
+ if (hw->recp_reuse) {
+ for_each_set_bit(rid_tmp, rm->r_bitmap, ICE_MAX_NUM_RECIPES) {
+ if (!ice_free_recipe_res(hw, rid_tmp))
+ clear_bit(rid_tmp, rm->r_bitmap);
+ }
}
+err_unroll:
list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
list_del(&fvit->list_entry);
devm_kfree(ice_hw_to_dev(hw), fvit);
}
- devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
kfree(rm);
err_free_lkup_exts:
@@ -5552,6 +5575,9 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_SW_TUN_VXLAN:
match |= ICE_PKT_TUN_UDP;
break;
+ case ICE_SW_TUN_PFCP:
+ match |= ICE_PKT_PFCP;
+ break;
default:
break;
}
@@ -5692,6 +5718,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_GTP:
len = sizeof(struct ice_udp_gtp_hdr);
break;
+ case ICE_PFCP:
+ len = sizeof(struct ice_pfcp_hdr);
+ break;
case ICE_PPPOE:
len = sizeof(struct ice_pppoe_hdr);
break;
@@ -6440,7 +6469,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
return -EIO;
}
- rid = ice_find_recp(hw, &lkup_exts, rinfo);
+ rid = ice_find_recp(hw, &lkup_exts, rinfo, false);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
return -EINVAL;
@@ -6484,14 +6513,21 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
ice_aqc_opc_remove_sw_rules, NULL);
if (!status || status == -ENOENT) {
struct ice_switch_info *sw = hw->switch_info;
+ struct ice_sw_recipe *r_list = sw->recp_list;
mutex_lock(rule_lock);
list_del(&list_elem->list_entry);
devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
devm_kfree(ice_hw_to_dev(hw), list_elem);
mutex_unlock(rule_lock);
- if (list_empty(&sw->recp_list[rid].filt_rules))
- sw->recp_list[rid].adv_rule = false;
+ if (list_empty(&r_list[rid].filt_rules)) {
+ r_list[rid].adv_rule = false;
+
+ /* All rules for this recipe are now removed */
+ if (hw->recp_reuse)
+ ice_release_recipe_res(hw,
+ &r_list[rid]);
+ }
}
kfree(s_rule);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index 89ffa1b51b5a..671d7a5f359f 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -8,8 +8,9 @@
#define ICE_SW_CFG_MAX_BUF_LEN 2048
#define ICE_DFLT_VSI_INVAL 0xff
-#define ICE_FLTR_RX BIT(0)
-#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_RX BIT(0)
+#define ICE_FLTR_TX BIT(1)
+#define ICE_FLTR_TX_ONLY BIT(2)
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
@@ -21,6 +22,8 @@
#define ICE_PROFID_IPV6_GTPC_NO_TEID 45
#define ICE_PROFID_IPV6_GTPU_TEID 46
#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
+#define ICE_PROFID_IPV4_PFCP_NODE 79
+#define ICE_PROFID_IPV6_PFCP_SESSION 82
#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
@@ -213,7 +216,6 @@ struct ice_sw_recipe {
/* For a chained recipe the root recipe is what should be used for
* programming rules
*/
- u8 is_root;
u8 root_rid;
u8 recp_created;
@@ -224,19 +226,8 @@ struct ice_sw_recipe {
*/
struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS];
u16 word_masks[ICE_MAX_CHAIN_WORDS];
-
- /* if this recipe is a collection of other recipe */
- u8 big_recp;
-
- /* if this recipe is part of another bigger recipe then chain index
- * corresponding to this recipe
- */
- u8 chain_idx;
-
- /* if this recipe is a collection of other recipe then count of other
- * recipes and recipe IDs of those recipes
- */
- u8 n_grp_count;
+ u8 fv_idx[ICE_MAX_CHAIN_WORDS];
+ u16 fv_mask[ICE_MAX_CHAIN_WORDS];
/* Bit map specifying the IDs associated with this group of recipe */
DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
@@ -269,10 +260,6 @@ struct ice_sw_recipe {
u8 need_pass_l2:1;
u8 allow_pass_l2:1;
- struct list_head rg_list;
-
- /* AQ buffer associated with this recipe */
- struct ice_aqc_recipe_data_elem *root_buf;
/* This struct saves the fv_words for a given lookup */
struct ice_prot_lkup_ext lkup_exts;
};
@@ -429,5 +416,6 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 *r_assoc,
int
ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u64 r_assoc,
struct ice_sq_cd *cd);
+void ice_init_chk_recipe_reuse_support(struct ice_hw *hw);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 688ccb0615ab..e6923f8121a9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -37,7 +37,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & ICE_TC_FLWR_FIELD_ENC_DST_MAC)
lkups_cnt++;
- if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS)
+ if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS)
+ lkups_cnt++;
+
+ if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS)
lkups_cnt++;
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
@@ -140,6 +143,8 @@ ice_proto_type_from_tunnel(enum ice_tunnel_type type)
return ICE_GTP;
case TNL_GTPC:
return ICE_GTP_NO_PAY;
+ case TNL_PFCP:
+ return ICE_PFCP;
default:
return 0;
}
@@ -159,6 +164,8 @@ ice_sw_type_from_tunnel(enum ice_tunnel_type type)
return ICE_SW_TUN_GTPU;
case TNL_GTPC:
return ICE_SW_TUN_GTPC;
+ case TNL_PFCP:
+ return ICE_SW_TUN_PFCP;
default:
return ICE_NON_TUN;
}
@@ -221,8 +228,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
- if (flags & ICE_TC_FLWR_FIELD_ENC_OPTS &&
- (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
+ if (flags & ICE_TC_FLWR_FIELD_GTP_OPTS) {
list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
if (fltr->gtp_pdu_info_masks.pdu_type) {
@@ -239,6 +245,22 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
+ if (flags & ICE_TC_FLWR_FIELD_PFCP_OPTS) {
+ struct ice_pfcp_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.pfcp_hdr;
+ hdr_m = &list[i].m_u.pfcp_hdr;
+ list[i].type = ICE_PFCP;
+
+ hdr_h->flags = fltr->pfcp_meta_keys.type;
+ hdr_m->flags = fltr->pfcp_meta_masks.type & 0x01;
+
+ hdr_h->seid = fltr->pfcp_meta_keys.seid;
+ hdr_m->seid = fltr->pfcp_meta_masks.seid;
+
+ i++;
+ }
+
if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
list[i].type = ice_proto_type_from_ipv4(false);
@@ -374,8 +396,11 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
if (tc_fltr->tunnel_type != TNL_LAST) {
i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
- headers = &tc_fltr->inner_headers;
- inner = true;
+ /* PFCP is considered non-tunneled - don't swap headers. */
+ if (tc_fltr->tunnel_type != TNL_PFCP) {
+ headers = &tc_fltr->inner_headers;
+ inner = true;
+ }
}
if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
@@ -629,6 +654,8 @@ static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
*/
if (netif_is_gtp(tunnel_dev))
return TNL_GTPU;
+ if (netif_is_pfcp(tunnel_dev))
+ return TNL_PFCP;
return TNL_LAST;
}
@@ -642,13 +669,19 @@ static bool ice_tc_is_dev_uplink(struct net_device *dev)
return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
}
-static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
- struct ice_tc_flower_fltr *fltr,
- struct net_device *target_dev)
+static int ice_tc_setup_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr,
+ struct net_device *target_dev,
+ enum ice_sw_fwd_act_type action)
{
struct ice_repr *repr;
- fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ if (action != ICE_FWD_TO_VSI && action != ICE_MIRROR_PACKET) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action to setup provided");
+ return -EINVAL;
+ }
+
+ fltr->action.fltr_act = action;
if (ice_is_port_repr_netdev(filter_dev) &&
ice_is_port_repr_netdev(target_dev)) {
@@ -696,41 +729,6 @@ ice_tc_setup_drop_action(struct net_device *filter_dev,
return 0;
}
-static int ice_tc_setup_mirror_action(struct net_device *filter_dev,
- struct ice_tc_flower_fltr *fltr,
- struct net_device *target_dev)
-{
- struct ice_repr *repr;
-
- fltr->action.fltr_act = ICE_MIRROR_PACKET;
-
- if (ice_is_port_repr_netdev(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
- repr = ice_netdev_to_repr(target_dev);
-
- fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else if (ice_is_port_repr_netdev(filter_dev) &&
- ice_tc_is_dev_uplink(target_dev)) {
- repr = ice_netdev_to_repr(filter_dev);
-
- fltr->dest_vsi = repr->src_vsi->back->eswitch.uplink_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else if (ice_tc_is_dev_uplink(filter_dev) &&
- ice_is_port_repr_netdev(target_dev)) {
- repr = ice_netdev_to_repr(target_dev);
-
- fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
- } else {
- NL_SET_ERR_MSG_MOD(fltr->extack,
- "Unsupported netdevice in switchdev mode");
- return -EINVAL;
- }
-
- return 0;
-}
-
static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
struct ice_tc_flower_fltr *fltr,
struct flow_action_entry *act)
@@ -746,16 +744,19 @@ static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
break;
case FLOW_ACTION_REDIRECT:
- err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
+ err = ice_tc_setup_action(filter_dev, fltr,
+ act->dev, ICE_FWD_TO_VSI);
if (err)
return err;
break;
case FLOW_ACTION_MIRRED:
- err = ice_tc_setup_mirror_action(filter_dev, fltr, act->dev);
+ err = ice_tc_setup_action(filter_dev, fltr,
+ act->dev, ICE_MIRROR_PACKET);
if (err)
return err;
+
break;
default:
@@ -1352,6 +1353,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct ice_tc_flower_fltr *fltr)
{
struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct netlink_ext_ack *extack = fltr->extack;
struct flow_match_control enc_control;
fltr->tunnel_type = ice_tc_tun_get_type(dev);
@@ -1372,6 +1374,9 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
flow_rule_match_enc_control(rule, &enc_control);
+ if (flow_rule_has_enc_control_flags(enc_control.mask->flags, extack))
+ return -EOPNOTSUPP;
+
if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
@@ -1409,7 +1414,8 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
}
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) {
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
+ (fltr->tunnel_type == TNL_GTPU || fltr->tunnel_type == TNL_GTPC)) {
struct flow_match_enc_opts match;
flow_rule_match_enc_opts(rule, &match);
@@ -1420,7 +1426,21 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
memcpy(&fltr->gtp_pdu_info_masks, &match.mask->data[0],
sizeof(struct gtp_pdu_session_info));
- fltr->flags |= ICE_TC_FLWR_FIELD_ENC_OPTS;
+ fltr->flags |= ICE_TC_FLWR_FIELD_GTP_OPTS;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS) &&
+ fltr->tunnel_type == TNL_PFCP) {
+ struct flow_match_enc_opts match;
+
+ flow_rule_match_enc_opts(rule, &match);
+
+ memcpy(&fltr->pfcp_meta_keys, match.key->data,
+ sizeof(struct pfcp_metadata));
+ memcpy(&fltr->pfcp_meta_masks, match.mask->data,
+ sizeof(struct pfcp_metadata));
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_PFCP_OPTS;
}
return 0;
@@ -1481,10 +1501,14 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return err;
}
- /* header pointers should point to the inner headers, outer
- * header were already set by ice_parse_tunnel_attr
- */
- headers = &fltr->inner_headers;
+ /* PFCP is considered non-tunneled - don't swap headers. */
+ if (fltr->tunnel_type != TNL_PFCP) {
+ /* Header pointers should point to the inner headers,
+ * outer header were already set by
+ * ice_parse_tunnel_attr().
+ */
+ headers = &fltr->inner_headers;
+ }
} else if (dissector->used_keys &
(BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
@@ -1638,6 +1662,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ fltr->extack))
+ return -EOPNOTSUPP;
}
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 65d387163a46..d84f153517ec 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -4,6 +4,9 @@
#ifndef _ICE_TC_LIB_H_
#define _ICE_TC_LIB_H_
+#include <linux/bits.h>
+#include <net/pfcp.h>
+
#define ICE_TC_FLWR_FIELD_DST_MAC BIT(0)
#define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1)
#define ICE_TC_FLWR_FIELD_VLAN BIT(2)
@@ -22,7 +25,7 @@
#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
-#define ICE_TC_FLWR_FIELD_ENC_OPTS BIT(18)
+#define ICE_TC_FLWR_FIELD_GTP_OPTS BIT(18)
#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20)
#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21)
@@ -34,6 +37,7 @@
#define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27)
#define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28)
#define ICE_TC_FLWR_FIELD_VLAN_TPID BIT(29)
+#define ICE_TC_FLWR_FIELD_PFCP_OPTS BIT(30)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
@@ -161,6 +165,8 @@ struct ice_tc_flower_fltr {
__be32 tenant_id;
struct gtp_pdu_session_info gtp_pdu_info_keys;
struct gtp_pdu_session_info gtp_pdu_info_masks;
+ struct pfcp_metadata pfcp_meta_keys;
+ struct pfcp_metadata pfcp_meta_masks;
u32 flags;
u8 tunnel_type;
struct ice_tc_flower_action action;
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index b2f5c9fe0149..07aab6e130cd 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -69,7 +69,7 @@ DECLARE_EVENT_CLASS(ice_rx_dim_template,
TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim;
- __assign_str(devname, q_vector->rx.rx_ring->netdev->name);),
+ __assign_str(devname);),
TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname),
@@ -96,7 +96,7 @@ DECLARE_EVENT_CLASS(ice_tx_dim_template,
TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim;
- __assign_str(devname, q_vector->tx.tx_ring->netdev->name);),
+ __assign_str(devname);),
TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname),
@@ -128,7 +128,7 @@ DECLARE_EVENT_CLASS(ice_tx_template,
TP_fast_assign(__entry->ring = ring;
__entry->desc = desc;
__entry->buf = buf;
- __assign_str(devname, ring->netdev->name);),
+ __assign_str(devname);),
TP_printk("netdev: %s ring: %pK desc: %pK buf %pK", __get_str(devname),
__entry->ring, __entry->desc, __entry->buf)
@@ -156,7 +156,7 @@ DECLARE_EVENT_CLASS(ice_rx_template,
TP_fast_assign(__entry->ring = ring;
__entry->desc = desc;
- __assign_str(devname, ring->netdev->name);),
+ __assign_str(devname);),
TP_printk("netdev: %s ring: %pK desc: %pK", __get_str(devname),
__entry->ring, __entry->desc)
@@ -180,7 +180,7 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template,
TP_fast_assign(__entry->ring = ring;
__entry->desc = desc;
__entry->skb = skb;
- __assign_str(devname, ring->netdev->name);),
+ __assign_str(devname);),
TP_printk("netdev: %s ring: %pK desc: %pK skb %pK", __get_str(devname),
__entry->ring, __entry->desc, __entry->skb)
@@ -203,7 +203,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template,
TP_fast_assign(__entry->ring = ring;
__entry->skb = skb;
- __assign_str(devname, ring->netdev->name);),
+ __assign_str(devname);),
TP_printk("netdev: %s skb: %pK ring: %pK", __get_str(devname),
__entry->skb, __entry->ring)
@@ -330,6 +330,24 @@ DEFINE_EVENT(ice_esw_br_port_template,
TP_ARGS(port)
);
+DECLARE_EVENT_CLASS(ice_switch_stats_template,
+ TP_PROTO(struct ice_switch_info *sw_info),
+ TP_ARGS(sw_info),
+ TP_STRUCT__entry(__field(u16, rule_cnt)
+ __field(u8, recp_cnt)),
+ TP_fast_assign(__entry->rule_cnt = sw_info->rule_cnt;
+ __entry->recp_cnt = sw_info->recp_cnt;),
+ TP_printk("rules=%u recipes=%u",
+ __entry->rule_cnt,
+ __entry->recp_cnt)
+);
+
+DEFINE_EVENT(ice_switch_stats_template,
+ ice_aq_sw_rules,
+ TP_PROTO(struct ice_switch_info *sw_info),
+ TP_ARGS(sw_info)
+);
+
/* End tracepoints */
#endif /* _ICE_TRACE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 97d41d6ebf1f..c9bc3f1add5d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -456,7 +456,7 @@ void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
if (rx_ring->vsi->type == ICE_VSI_PF)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
- rx_ring->xdp_prog = NULL;
+ WRITE_ONCE(rx_ring->xdp_prog, NULL);
if (rx_ring->xsk_pool) {
kfree(rx_ring->xdp_buf);
rx_ring->xdp_buf = NULL;
@@ -522,30 +522,6 @@ err:
}
/**
- * ice_rx_frame_truesize
- * @rx_ring: ptr to Rx ring
- * @size: size
- *
- * calculate the truesize with taking into the account PAGE_SIZE of
- * underlying arch
- */
-static unsigned int
-ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size)
-{
- unsigned int truesize;
-
-#if (PAGE_SIZE < 8192)
- truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
-#else
- truesize = rx_ring->rx_offset ?
- SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
- SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
- SKB_DATA_ALIGN(size);
-#endif
- return truesize;
-}
-
-/**
* ice_run_xdp - Executes an XDP program on initialized xdp_buff
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
@@ -837,16 +813,15 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf)
if (!dev_page_is_reusable(page))
return false;
-#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
if (unlikely(rx_buf->pgcnt - pagecnt_bias > 1))
return false;
-#else
+#if (PAGE_SIZE >= 8192)
#define ICE_LAST_OFFSET \
- (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_3072)
if (rx_buf->page_offset > ICE_LAST_OFFSET)
return false;
-#endif /* PAGE_SIZE < 8192) */
+#endif /* PAGE_SIZE >= 8192) */
/* If we have drained the page fragment pool we need to update
* the pagecnt_bias and page count so that we fully restock the
@@ -949,12 +924,7 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
struct ice_rx_buf *rx_buf;
rx_buf = &rx_ring->rx_buf[ntc];
- rx_buf->pgcnt =
-#if (PAGE_SIZE < 8192)
- page_count(rx_buf->page);
-#else
- 0;
-#endif
+ rx_buf->pgcnt = page_count(rx_buf->page);
prefetchw(rx_buf->page);
if (!size)
@@ -1051,8 +1021,7 @@ ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
}
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE);
if (unlikely(!skb))
return NULL;
@@ -1161,11 +1130,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
bool failure;
u32 first;
- /* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
-#if (PAGE_SIZE < 8192)
- xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0);
-#endif
-
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (xdp_prog) {
xdp_ring = rx_ring->xdp_ring;
@@ -1224,10 +1188,6 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
offset;
xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
-#if (PAGE_SIZE > 4096)
- /* At larger PAGE_SIZE, frame_sz depend on len size */
- xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size);
-#endif
xdp_buff_clear_frags_flag(xdp);
} else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
break;
@@ -1522,10 +1482,11 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool);
bool wd;
- if (tx_ring->xsk_pool)
- wd = ice_xmit_zc(tx_ring);
+ if (xsk_pool)
+ wd = ice_xmit_zc(tx_ring, xsk_pool);
else if (ice_ring_is_xdp(tx_ring))
wd = true;
else
@@ -1551,6 +1512,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = budget;
ice_for_each_rx_ring(rx_ring, q_vector->rx) {
+ struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool);
int cleaned;
/* A dedicated path for zero-copy allows making a single
@@ -1558,7 +1520,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
cleaned = rx_ring->xsk_pool ?
- ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
+ ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) :
ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index af955b0e5dc5..feba314a3fe4 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -365,6 +365,7 @@ struct ice_rx_ring {
u8 ptp_rx;
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
+#define ICE_RX_FLAGS_MULTIDEV BIT(3)
u8 flags;
/* CL5 - 5th cacheline starts here */
struct xdp_rxq_info xdp_rxq;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index f8f1d2bdc1be..2719f0e20933 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include <linux/filter.h>
+#include <linux/net/intel/libie/rx.h>
#include "ice_txrx_lib.h"
#include "ice_eswitch.h"
@@ -39,30 +40,6 @@ void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
}
/**
- * ice_ptype_to_htype - get a hash type
- * @ptype: the ptype value from the descriptor
- *
- * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
- * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
- * Rx desc.
- */
-static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
-{
- struct ice_rx_ptype_decoded decoded = ice_decode_rx_desc_ptype(ptype);
-
- if (!decoded.known)
- return PKT_HASH_TYPE_NONE;
- if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY4)
- return PKT_HASH_TYPE_L4;
- if (decoded.payload_layer == ICE_RX_PTYPE_PAYLOAD_LAYER_PAY3)
- return PKT_HASH_TYPE_L3;
- if (decoded.outer_ip == ICE_RX_PTYPE_OUTER_L2)
- return PKT_HASH_TYPE_L2;
-
- return PKT_HASH_TYPE_NONE;
-}
-
-/**
* ice_get_rx_hash - get RX hash value from descriptor
* @rx_desc: specific descriptor
*
@@ -91,14 +68,16 @@ ice_rx_hash_to_skb(const struct ice_rx_ring *rx_ring,
const union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 rx_ptype)
{
+ struct libeth_rx_pt decoded;
u32 hash;
- if (!(rx_ring->netdev->features & NETIF_F_RXHASH))
+ decoded = libie_rx_pt_parse(rx_ptype);
+ if (!libeth_rx_pt_has_hash(rx_ring->netdev, decoded))
return;
hash = ice_get_rx_hash(rx_desc);
if (likely(hash))
- skb_set_hash(skb, hash, ice_ptype_to_htype(rx_ptype));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
/**
@@ -114,34 +93,26 @@ static void
ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{
- struct ice_rx_ptype_decoded decoded;
+ struct libeth_rx_pt decoded;
u16 rx_status0, rx_status1;
bool ipv4, ipv6;
- rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
- rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
-
- decoded = ice_decode_rx_desc_ptype(ptype);
-
/* Start with CHECKSUM_NONE and by default csum_level = 0 */
skb->ip_summed = CHECKSUM_NONE;
- skb_checksum_none_assert(skb);
- /* check if Rx checksum is enabled */
- if (!(ring->netdev->features & NETIF_F_RXCSUM))
+ decoded = libie_rx_pt_parse(ptype);
+ if (!libeth_rx_pt_has_checksum(ring->netdev, decoded))
return;
+ rx_status0 = le16_to_cpu(rx_desc->wb.status_error0);
+ rx_status1 = le16_to_cpu(rx_desc->wb.status_error1);
+
/* check if HW has decoded the packet and checksum */
if (!(rx_status0 & BIT(ICE_RX_FLEX_DESC_STATUS0_L3L4P_S)))
return;
- if (!(decoded.known && decoded.outer_ip))
- return;
-
- ipv4 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV4);
- ipv6 = (decoded.outer_ip == ICE_RX_PTYPE_OUTER_IP) &&
- (decoded.outer_ip_ver == ICE_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (ipv4 && (rx_status0 & (BIT(ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))) {
ring->vsi->back->hw_rx_eipe_error++;
@@ -169,19 +140,10 @@ ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
- if (decoded.tunnel_type >= ICE_RX_PTYPE_TUNNEL_IP_GRENAT)
+ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
- /* Only report checksum unnecessary for TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case ICE_RX_PTYPE_INNER_PROT_TCP:
- case ICE_RX_PTYPE_INNER_PROT_UDP:
- case ICE_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- default:
- break;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
checksum_fail:
@@ -236,7 +198,16 @@ ice_process_skb_fields(struct ice_rx_ring *rx_ring,
ice_rx_hash_to_skb(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ if (unlikely(rx_ring->flags & ICE_RX_FLAGS_MULTIDEV)) {
+ struct net_device *netdev = ice_eswitch_get_target(rx_ring,
+ rx_desc);
+
+ if (ice_is_port_repr_netdev(netdev))
+ ice_repr_inc_rx_stats(netdev, skb->len);
+ skb->protocol = eth_type_trans(skb, netdev);
+ } else {
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ }
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
@@ -527,42 +498,6 @@ static int ice_xdp_rx_hw_ts(const struct xdp_md *ctx, u64 *ts_ns)
return 0;
}
-/* Define a ptype index -> XDP hash type lookup table.
- * It uses the same ptype definitions as ice_decode_rx_desc_ptype[],
- * avoiding possible copy-paste errors.
- */
-#undef ICE_PTT
-#undef ICE_PTT_UNUSED_ENTRY
-
-#define ICE_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\
- [PTYPE] = XDP_RSS_L3_##OUTER_IP_VER | XDP_RSS_L4_##I | XDP_RSS_TYPE_##PL
-
-#define ICE_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = 0
-
-/* A few supplementary definitions for when XDP hash types do not coincide
- * with what can be generated from ptype definitions
- * by means of preprocessor concatenation.
- */
-#define XDP_RSS_L3_NONE XDP_RSS_TYPE_NONE
-#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
-#define XDP_RSS_TYPE_PAY2 XDP_RSS_TYPE_L2
-#define XDP_RSS_TYPE_PAY3 XDP_RSS_TYPE_NONE
-#define XDP_RSS_TYPE_PAY4 XDP_RSS_L4
-
-static const enum xdp_rss_hash_type
-ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
- ICE_PTYPES
-};
-
-#undef XDP_RSS_L3_NONE
-#undef XDP_RSS_L4_NONE
-#undef XDP_RSS_TYPE_PAY2
-#undef XDP_RSS_TYPE_PAY3
-#undef XDP_RSS_TYPE_PAY4
-
-#undef ICE_PTT
-#undef ICE_PTT_UNUSED_ENTRY
-
/**
* ice_xdp_rx_hash_type - Get XDP-specific hash type from the RX descriptor
* @eop_desc: End of Packet descriptor
@@ -570,12 +505,7 @@ ice_ptype_to_xdp_hash[ICE_NUM_DEFINED_PTYPES] = {
static enum xdp_rss_hash_type
ice_xdp_rx_hash_type(const union ice_32b_rx_flex_desc *eop_desc)
{
- u16 ptype = ice_get_ptype(eop_desc);
-
- if (unlikely(ptype >= ICE_NUM_DEFINED_PTYPES))
- return 0;
-
- return ice_ptype_to_xdp_hash[ptype];
+ return libie_rx_pt_parse(ice_get_ptype(eop_desc)).hash_type;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 9ff92dba5823..96037bef3e78 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -71,6 +71,14 @@ enum ice_aq_res_ids {
ICE_GLOBAL_CFG_LOCK_RES_ID
};
+enum ice_fec_stats_types {
+ ICE_FEC_CORR_LOW,
+ ICE_FEC_CORR_HIGH,
+ ICE_FEC_UNCORR_LOW,
+ ICE_FEC_UNCORR_HIGH,
+ ICE_FEC_MAX
+};
+
/* FW update timeout definitions are in milliseconds */
#define ICE_NVM_TIMEOUT 180000
#define ICE_CHANGE_LOCK_TIMEOUT 1000
@@ -150,7 +158,6 @@ enum ice_vsi_type {
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
- ICE_VSI_SWITCHDEV_CTRL = 7,
};
struct ice_link_status {
@@ -204,6 +211,7 @@ struct ice_phy_info {
enum ice_fltr_ptype {
/* NONE - used for undef/error */
ICE_FLTR_PTYPE_NONF_NONE = 0,
+ ICE_FLTR_PTYPE_NONF_ETH,
ICE_FLTR_PTYPE_NONF_IPV4_UDP,
ICE_FLTR_PTYPE_NONF_IPV4_TCP,
ICE_FLTR_PTYPE_NONF_IPV4_SCTP,
@@ -296,6 +304,7 @@ struct ice_hw_common_caps {
bool pcie_reset_avoidance;
/* Post update reset restriction */
bool reset_restrict_support;
+ bool tx_sched_topo_comp_mode_en;
};
/* IEEE 1588 TIME_SYNC specific info */
@@ -321,12 +330,14 @@ enum ice_time_ref_freq {
ICE_TIME_REF_FREQ_156_250 = 4,
ICE_TIME_REF_FREQ_245_760 = 5,
- NUM_ICE_TIME_REF_FREQ
+ NUM_ICE_TIME_REF_FREQ,
+
+ ICE_TIME_REF_FREQ_INVALID = -1,
};
/* Clock source specification */
enum ice_clk_src {
- ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */
+ ICE_CLK_SRC_TCXO = 0, /* Temperature compensated oscillator */
ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */
NUM_ICE_CLK_SRC
@@ -371,6 +382,15 @@ struct ice_ts_dev_info {
u8 ts_ll_int_read;
};
+#define ICE_NAC_TOPO_PRIMARY_M BIT(0)
+#define ICE_NAC_TOPO_DUAL_M BIT(1)
+#define ICE_NAC_TOPO_ID_M GENMASK(0xF, 0)
+
+struct ice_nac_topology {
+ u32 mode;
+ u8 id;
+};
+
/* Function specific capabilities */
struct ice_hw_func_caps {
struct ice_hw_common_caps common_cap;
@@ -392,6 +412,7 @@ struct ice_hw_dev_caps {
u32 num_flow_director_fltr; /* Number of FD filters available */
struct ice_ts_dev_info ts_dev_info;
u32 num_funcs;
+ struct ice_nac_topology nac_topo;
/* bitmap of supported sensors
* bit 0 - internal temperature sensor
* bit 31:1 - Reserved
@@ -481,6 +502,8 @@ struct ice_bank_info {
u32 orom_size; /* Size of OROM bank */
u32 netlist_ptr; /* Pointer to 1st Netlist bank */
u32 netlist_size; /* Size of Netlist bank */
+ u32 active_css_hdr_len; /* Active CSS header length */
+ u32 inactive_css_hdr_len; /* Inactive CSS header length */
enum ice_flash_bank nvm_bank; /* Active NVM bank */
enum ice_flash_bank orom_bank; /* Active OROM bank */
enum ice_flash_bank netlist_bank; /* Active Netlist bank */
@@ -715,6 +738,7 @@ struct ice_port_info {
u16 sw_id; /* Initial switch ID belongs to port */
u16 pf_vf_num;
u8 port_state;
+ u8 local_fwd_mode;
#define ICE_SCHED_PORT_STATE_INIT 0x0
#define ICE_SCHED_PORT_STATE_READY 0x1
u8 lport;
@@ -738,6 +762,8 @@ struct ice_switch_info {
struct ice_sw_recipe *recp_list;
u16 prof_res_bm_init;
u16 max_used_prof_index;
+ u16 rule_cnt;
+ u8 recp_cnt;
DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
@@ -817,11 +843,43 @@ struct ice_mbx_data {
u16 async_watermark_val;
};
+#define ICE_PORTS_PER_QUAD 4
+#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD)
+
+struct ice_eth56g_params {
+ u8 num_phys;
+ u8 phy_addr[2];
+ bool onestep_ena;
+ bool sfd_ena;
+ u32 peer_delay;
+};
+
+union ice_phy_params {
+ struct ice_eth56g_params eth56g;
+};
+
/* PHY model */
enum ice_phy_model {
ICE_PHY_UNSUP = -1,
- ICE_PHY_E810 = 1,
+ ICE_PHY_E810 = 1,
ICE_PHY_E82X,
+ ICE_PHY_ETH56G,
+};
+
+/* Global Link Topology */
+enum ice_global_link_topo {
+ ICE_LINK_TOPO_UP_TO_2_LINKS,
+ ICE_LINK_TOPO_UP_TO_4_LINKS,
+ ICE_LINK_TOPO_UP_TO_8_LINKS,
+ ICE_LINK_TOPO_RESERVED,
+};
+
+struct ice_ptp_hw {
+ enum ice_phy_model phy_model;
+ union ice_phy_params phy;
+ u8 num_lports;
+ u8 ports_per_phy;
+ bool is_2x50g_muxed_topo;
};
/* Port hardware description */
@@ -845,10 +903,11 @@ struct ice_hw {
u8 revision_id;
u8 pf_id; /* device profile info */
- enum ice_phy_model phy_model;
u16 max_burst_size; /* driver sets this value */
+ u8 recp_reuse:1; /* indicates whether FW supports recipe reuse */
+
/* Tx Scheduler values */
u8 num_tx_sched_layers;
u8 num_tx_sched_phys_layers;
@@ -906,12 +965,7 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
-#define ICE_MAX_QUAD 2
-#define ICE_QUADS_PER_PHY_E82X 2
-#define ICE_PORTS_PER_PHY_E82X 8
-#define ICE_PORTS_PER_QUAD 4
-#define ICE_PORTS_PER_PHY_E810 4
-#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
+ struct ice_ptp_hw ptp;
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
@@ -1084,17 +1138,13 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_SR_SECTOR_SIZE_IN_WORDS 0x800
/* CSS Header words */
+#define ICE_NVM_CSS_HDR_LEN_L 0x02
+#define ICE_NVM_CSS_HDR_LEN_H 0x03
#define ICE_NVM_CSS_SREV_L 0x14
#define ICE_NVM_CSS_SREV_H 0x15
-/* Length of CSS header section in words */
-#define ICE_CSS_HEADER_LENGTH 330
-
-/* Offset of Shadow RAM copy in the NVM bank area. */
-#define ICE_NVM_SR_COPY_WORD_OFFSET roundup(ICE_CSS_HEADER_LENGTH, 32)
-
-/* Size in bytes of Option ROM trailer */
-#define ICE_NVM_OROM_TRAILER_LENGTH (2 * ICE_CSS_HEADER_LENGTH)
+/* Length of Authentication header section in words */
+#define ICE_NVM_AUTH_HEADER_LEN 0x08
/* The Link Topology Netlist section is stored as a series of words. It is
* stored in the NVM as a TLV, with the first two words containing the type
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index d10a4be965b5..5635e9da2212 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -259,20 +259,18 @@ static void ice_vf_pre_vsi_rebuild(struct ice_vf *vf)
int ice_vf_reconfig_vsi(struct ice_vf *vf)
{
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- struct ice_vsi_cfg_params params = {};
struct ice_pf *pf = vf->pf;
int err;
if (WARN_ON(!vsi))
return -EINVAL;
- params = ice_vsi_to_params(vsi);
- params.flags = ICE_VSI_FLAG_NO_INIT;
+ vsi->flags = ICE_VSI_FLAG_NO_INIT;
ice_vsi_decfg(vsi);
ice_fltr_remove_all(vsi);
- err = ice_vsi_cfg(vsi, &params);
+ err = ice_vsi_cfg(vsi);
if (err) {
dev_err(ice_pf_to_dev(pf),
"Failed to reconfigure the VF%u's VSI, error %d\n",
@@ -950,7 +948,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
goto out_unlock;
}
- ice_eswitch_update_repr(vf->repr_id, vsi);
+ ice_eswitch_update_repr(&vf->repr_id, vsi);
/* if the VF has been reset allow it to come up again */
ice_mbx_clear_malvf(&vf->mbx_info);
@@ -992,10 +990,13 @@ void ice_initialize_vf_entry(struct ice_vf *vf)
/* assign default capabilities */
vf->spoofchk = true;
- vf->num_vf_qs = vfs->num_qps_per;
ice_vc_set_default_allowlist(vf);
ice_virtchnl_set_dflt_ops(vf);
+ /* set default number of MSI-X */
+ vf->num_msix = vfs->num_msix_per;
+ vf->num_vf_qs = vfs->num_qps_per;
+
/* ctrl_vsi_idx will be set to a valid value only when iAVF
* creates its first fdir rule.
*/
@@ -1240,7 +1241,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
struct ice_vsi *vsi;
params.type = ICE_VSI_CTRL;
- params.pi = ice_vf_get_port_info(vf);
+ params.port_info = ice_vf_get_port_info(vf);
params.vf = vf;
params.flags = ICE_VSI_FLAG_INIT;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index 1ff9818b4c84..1c6ce0c4ed4e 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -1505,13 +1505,12 @@ error_param:
* ice_cfg_interrupt
* @vf: pointer to the VF info
* @vsi: the VSI being configured
- * @vector_id: vector ID
* @map: vector map for mapping vectors to queues
* @q_vector: structure for interrupt vector
* configure the IRQ to queue map
*/
-static int
-ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
+static enum virtchnl_status_code
+ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi,
struct virtchnl_vector_map *map,
struct ice_q_vector *q_vector)
{
@@ -1531,7 +1530,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
q_vector->num_ring_rx++;
q_vector->rx.itr_idx = map->rxitr_idx;
vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
+ ice_cfg_rxq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
q_vector->rx.itr_idx);
}
@@ -1545,7 +1545,8 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id,
q_vector->num_ring_tx++;
q_vector->tx.itr_idx = map->txitr_idx;
vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
- ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
+ ice_cfg_txq_interrupt(vsi, vsi_q_id,
+ q_vector->vf_reg_idx,
q_vector->tx.itr_idx);
}
@@ -1619,8 +1620,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
}
/* lookout for the invalid queue index */
- v_ret = (enum virtchnl_status_code)
- ice_cfg_interrupt(vf, vsi, vector_id, map, q_vector);
+ v_ret = ice_cfg_interrupt(vf, vsi, map, q_vector);
if (v_ret)
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index 8e4ff3af86c6..b4feb0927687 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -536,6 +536,8 @@ static void ice_vc_fdir_reset_cnt_all(struct ice_vf_fdir *fdir)
fdir->fdir_fltr_cnt[flow][0] = 0;
fdir->fdir_fltr_cnt[flow][1] = 0;
}
+
+ fdir->fdir_fltr_cnt_total = 0;
}
/**
@@ -1560,6 +1562,7 @@ ice_vc_add_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
resp->status = status;
resp->flow_id = conf->flow_id;
vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]++;
+ vf->fdir.fdir_fltr_cnt_total++;
ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
(u8 *)resp, len);
@@ -1624,6 +1627,7 @@ ice_vc_del_fdir_fltr_post(struct ice_vf *vf, struct ice_vf_fdir_ctx *ctx,
resp->status = status;
ice_vc_fdir_remove_entry(vf, conf, conf->flow_id);
vf->fdir.fdir_fltr_cnt[conf->input.flow_type][is_tun]--;
+ vf->fdir.fdir_fltr_cnt_total--;
ret = ice_vc_send_msg_to_vf(vf, ctx->v_opcode, v_ret,
(u8 *)resp, len);
@@ -1790,6 +1794,7 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
struct virtchnl_fdir_add *stat = NULL;
struct virtchnl_fdir_fltr_conf *conf;
enum virtchnl_status_code v_ret;
+ struct ice_vsi *vf_vsi;
struct device *dev;
struct ice_pf *pf;
int is_tun = 0;
@@ -1798,6 +1803,17 @@ int ice_vc_add_fdir_fltr(struct ice_vf *vf, u8 *msg)
pf = vf->pf;
dev = ice_pf_to_dev(pf);
+ vf_vsi = ice_get_vf_vsi(vf);
+
+#define ICE_VF_MAX_FDIR_FILTERS 128
+ if (!ice_fdir_num_avail_fltr(&pf->hw, vf_vsi) ||
+ vf->fdir.fdir_fltr_cnt_total >= ICE_VF_MAX_FDIR_FILTERS) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ dev_err(dev, "Max number of FDIR filters for VF %d is reached\n",
+ vf->vf_id);
+ goto err_exit;
+ }
+
ret = ice_vc_fdir_param_check(vf, fltr->vsi_id);
if (ret) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
index c5bcc8d7481c..ac6dcab454b4 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.h
@@ -29,6 +29,7 @@ struct ice_vf_fdir_ctx {
struct ice_vf_fdir {
u16 fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
int prof_entry_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
+ u16 fdir_fltr_cnt_total;
struct ice_fd_hw_prof **fdir_prof;
struct idr fdir_rule_idr;
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 2e9ad27cb9d1..6e8f2aab6080 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -45,14 +45,15 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return -EINVAL;
err = ice_fltr_add_vlan(vsi, vlan);
- if (err && err != -EEXIST) {
+ if (!err)
+ vsi->num_vlan++;
+ else if (err == -EEXIST)
+ err = 0;
+ else
dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
vlan->vid, vsi->vsi_num, err);
- return err;
- }
- vsi->num_vlan++;
- return 0;
+ return err;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
index 4a6c850d83ac..7aae7fdcfcdb 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.c
@@ -72,7 +72,6 @@ void ice_vsi_init_vlan_ops(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_PF:
- case ICE_VSI_SWITCHDEV_CTRL:
ice_pf_vsi_init_vlan_ops(vsi);
break;
case ICE_VSI_VF:
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 1857220d27fe..240a7bec242b 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -52,10 +52,8 @@ static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
{
ice_clean_tx_ring(vsi->tx_rings[q_idx]);
- if (ice_is_xdp_ena_vsi(vsi)) {
- synchronize_rcu();
+ if (ice_is_xdp_ena_vsi(vsi))
ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
- }
ice_clean_rx_ring(vsi->rx_rings[q_idx]);
}
@@ -112,25 +110,29 @@ ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
* ice_qvec_cfg_msix - Enable IRQ for given queue vector
* @vsi: the VSI that contains queue vector
* @q_vector: queue vector
+ * @qid: queue index
*/
static void
-ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
+ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector, u16 qid)
{
u16 reg_idx = q_vector->reg_idx;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- struct ice_tx_ring *tx_ring;
- struct ice_rx_ring *rx_ring;
+ int q, _qid = qid;
ice_cfg_itr(hw, q_vector);
- ice_for_each_tx_ring(tx_ring, q_vector->tx)
- ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
- q_vector->tx.itr_idx);
+ for (q = 0; q < q_vector->num_ring_tx; q++) {
+ ice_cfg_txq_interrupt(vsi, _qid, reg_idx, q_vector->tx.itr_idx);
+ _qid++;
+ }
+
+ _qid = qid;
- ice_for_each_rx_ring(rx_ring, q_vector->rx)
- ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
- q_vector->rx.itr_idx);
+ for (q = 0; q < q_vector->num_ring_rx; q++) {
+ ice_cfg_rxq_interrupt(vsi, _qid, reg_idx, q_vector->rx.itr_idx);
+ _qid++;
+ }
ice_flush(hw);
}
@@ -164,6 +166,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
int timeout = 50;
+ int fail = 0;
int err;
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
@@ -180,15 +183,17 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
usleep_range(1000, 2000);
}
+ synchronize_net();
+ netif_carrier_off(vsi->netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+
ice_qvec_dis_irq(vsi, rx_ring, q_vector);
ice_qvec_toggle_napi(vsi, q_vector, false);
- netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-
ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
@@ -196,17 +201,15 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
&txq_meta);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
}
- err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true);
- if (err)
- return err;
+ ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, false);
ice_qp_clean_rings(vsi, q_idx);
ice_qp_reset_stats(vsi, q_idx);
- return 0;
+ return fail;
}
/**
@@ -219,40 +222,48 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
struct ice_q_vector *q_vector;
+ int fail = 0;
+ bool link_up;
int err;
err = ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
err = ice_vsi_cfg_single_txq(vsi, vsi->xdp_rings, q_idx);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_single_rxq(vsi, q_idx);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
q_vector = vsi->rx_rings[q_idx]->q_vector;
- ice_qvec_cfg_msix(vsi, q_vector);
+ ice_qvec_cfg_msix(vsi, q_vector, q_idx);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
- if (err)
- return err;
+ if (!fail)
+ fail = err;
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
- netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ /* make sure NAPI sees updated ice_{t,x}_ring::xsk_pool */
+ synchronize_net();
+ ice_get_link_status(vsi->port_info, &link_up);
+ if (link_up) {
+ netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
+ netif_carrier_on(vsi->netdev);
+ }
clear_bit(ICE_CFG_BUSY, vsi->state);
- return 0;
+ return fail;
}
/**
@@ -269,7 +280,6 @@ static int ice_xsk_pool_disable(struct ice_vsi *vsi, u16 qid)
if (!pool)
return -EINVAL;
- clear_bit(qid, vsi->af_xdp_zc_qps);
xsk_pool_dma_unmap(pool, ICE_RX_DMA_ATTR);
return 0;
@@ -300,8 +310,6 @@ ice_xsk_pool_enable(struct ice_vsi *vsi, struct xsk_buff_pool *pool, u16 qid)
if (err)
return err;
- set_bit(qid, vsi->af_xdp_zc_qps);
-
return 0;
}
@@ -349,11 +357,13 @@ ice_realloc_rx_xdp_bufs(struct ice_rx_ring *rx_ring, bool pool_present)
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc)
{
struct ice_rx_ring *rx_ring;
- unsigned long q;
+ uint i;
+
+ ice_for_each_rxq(vsi, i) {
+ rx_ring = vsi->rx_rings[i];
+ if (!rx_ring->xsk_pool)
+ continue;
- for_each_set_bit(q, vsi->af_xdp_zc_qps,
- max_t(int, vsi->alloc_txq, vsi->alloc_rxq)) {
- rx_ring = vsi->rx_rings[q];
if (ice_realloc_rx_xdp_bufs(rx_ring, zc))
return -ENOMEM;
}
@@ -460,6 +470,7 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
/**
* __ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
* @rx_ring: Rx ring
+ * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
* @count: The number of buffers to allocate
*
* Place the @count of descriptors onto Rx ring. Handle the ring wrap
@@ -468,7 +479,8 @@ static u16 ice_fill_rx_descs(struct xsk_buff_pool *pool, struct xdp_buff **xdp,
*
* Returns true if all allocations were successful, false if any fail.
*/
-static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
{
u32 nb_buffs_extra = 0, nb_buffs = 0;
union ice_32b_rx_flex_desc *rx_desc;
@@ -480,8 +492,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
xdp = ice_xdp_buf(rx_ring, ntu);
if (ntu + count >= rx_ring->count) {
- nb_buffs_extra = ice_fill_rx_descs(rx_ring->xsk_pool, xdp,
- rx_desc,
+ nb_buffs_extra = ice_fill_rx_descs(xsk_pool, xdp, rx_desc,
rx_ring->count - ntu);
if (nb_buffs_extra != rx_ring->count - ntu) {
ntu += nb_buffs_extra;
@@ -494,7 +505,7 @@ static bool __ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
ice_release_rx_desc(rx_ring, 0);
}
- nb_buffs = ice_fill_rx_descs(rx_ring->xsk_pool, xdp, rx_desc, count);
+ nb_buffs = ice_fill_rx_descs(xsk_pool, xdp, rx_desc, count);
ntu += nb_buffs;
if (ntu == rx_ring->count)
@@ -510,6 +521,7 @@ exit:
/**
* ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
* @rx_ring: Rx ring
+ * @xsk_pool: XSK buffer pool to pick buffers to be filled by HW
* @count: The number of buffers to allocate
*
* Wrapper for internal allocation routine; figure out how many tail
@@ -517,7 +529,8 @@ exit:
*
* Returns true if all calls to internal alloc routine succeeded
*/
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count)
{
u16 rx_thresh = ICE_RING_QUARTER(rx_ring);
u16 leftover, i, tail_bumps;
@@ -526,9 +539,9 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
leftover = count - (tail_bumps * rx_thresh);
for (i = 0; i < tail_bumps; i++)
- if (!__ice_alloc_rx_bufs_zc(rx_ring, rx_thresh))
+ if (!__ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, rx_thresh))
return false;
- return __ice_alloc_rx_bufs_zc(rx_ring, leftover);
+ return __ice_alloc_rx_bufs_zc(rx_ring, xsk_pool, leftover);
}
/**
@@ -555,8 +568,7 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
}
net_prefetch(xdp->data_meta);
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
if (unlikely(!skb))
return NULL;
@@ -598,8 +610,10 @@ out:
/**
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
* @xdp_ring: XDP Tx ring
+ * @xsk_pool: AF_XDP buffer pool pointer
*/
-static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool)
{
u16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *tx_desc;
@@ -650,7 +664,7 @@ skip:
if (xdp_ring->next_to_clean >= cnt)
xdp_ring->next_to_clean -= cnt;
if (xsk_frames)
- xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+ xsk_tx_completed(xsk_pool, xsk_frames);
return completed_frames;
}
@@ -659,6 +673,7 @@ skip:
* ice_xmit_xdp_tx_zc - AF_XDP ZC handler for XDP_TX
* @xdp: XDP buffer to xmit
* @xdp_ring: XDP ring to produce descriptor onto
+ * @xsk_pool: AF_XDP buffer pool pointer
*
* note that this function works directly on xdp_buff, no need to convert
* it to xdp_frame. xdp_buff pointer is stored to ice_tx_buf so that cleaning
@@ -668,7 +683,8 @@ skip:
* was not enough space on XDP ring
*/
static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
- struct ice_tx_ring *xdp_ring)
+ struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool)
{
struct skb_shared_info *sinfo = NULL;
u32 size = xdp->data_end - xdp->data;
@@ -682,7 +698,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
free_space = ICE_DESC_UNUSED(xdp_ring);
if (free_space < ICE_RING_QUARTER(xdp_ring))
- free_space += ice_clean_xdp_irq_zc(xdp_ring);
+ free_space += ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
if (unlikely(!free_space))
goto busy;
@@ -702,7 +718,7 @@ static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
dma_addr_t dma;
dma = xsk_buff_xdp_get_dma(xdp);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, size);
tx_buf->xdp = xdp;
tx_buf->type = ICE_TX_BUF_XSK_TX;
@@ -744,12 +760,14 @@ busy:
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
* @xdp_ring: ring to be used for XDP_TX action
+ * @xsk_pool: AF_XDP buffer pool pointer
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool)
{
int err, result = ICE_XDP_PASS;
u32 act;
@@ -760,7 +778,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
if (!err)
return ICE_XDP_REDIR;
- if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS)
+ if (xsk_uses_need_wakeup(xsk_pool) && err == -ENOBUFS)
result = ICE_XDP_EXIT;
else
result = ICE_XDP_CONSUMED;
@@ -771,7 +789,7 @@ ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
case XDP_PASS:
break;
case XDP_TX:
- result = ice_xmit_xdp_tx_zc(xdp, xdp_ring);
+ result = ice_xmit_xdp_tx_zc(xdp, xdp_ring, xsk_pool);
if (result == ICE_XDP_CONSUMED)
goto out_failure;
break;
@@ -823,14 +841,16 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
+ * @xsk_pool: AF_XDP buffer pool pointer
* @budget: NAPI budget
*
* Returns number of processed packets on success, remaining budget on failure.
*/
-int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool,
+ int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
u32 ntc = rx_ring->next_to_clean;
u32 ntu = rx_ring->next_to_use;
struct xdp_buff *first = NULL;
@@ -879,7 +899,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
ICE_RX_FLX_DESC_PKT_LEN_M;
xsk_buff_set_size(xdp, size);
- xsk_buff_dma_sync_for_cpu(xdp, xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xdp);
if (!first) {
first = xdp;
@@ -893,7 +913,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
if (ice_is_non_eop(rx_ring, rx_desc))
continue;
- xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
+ xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring,
+ xsk_pool);
if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
xdp_xmit |= xdp_res;
} else if (xdp_res == ICE_XDP_EXIT) {
@@ -942,7 +963,8 @@ construct_skb:
rx_ring->next_to_clean = ntc;
entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
- failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
+ failure |= !ice_alloc_rx_bufs_zc(rx_ring, xsk_pool,
+ entries_to_alloc);
ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
@@ -965,17 +987,19 @@ construct_skb:
/**
* ice_xmit_pkt - produce a single HW Tx descriptor out of AF_XDP descriptor
* @xdp_ring: XDP ring to produce the HW Tx descriptor on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
* @desc: AF_XDP descriptor to pull the DMA address and length from
* @total_bytes: bytes accumulator that will be used for stats update
*/
-static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
+static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool, struct xdp_desc *desc,
unsigned int *total_bytes)
{
struct ice_tx_desc *tx_desc;
dma_addr_t dma;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);
+ dma = xsk_buff_raw_get_dma(xsk_pool, desc->addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, desc->len);
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
tx_desc->buf_addr = cpu_to_le64(dma);
@@ -988,10 +1012,13 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
/**
* ice_xmit_pkt_batch - produce a batch of HW Tx descriptors out of AF_XDP descriptors
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
* @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
* @total_bytes: bytes accumulator that will be used for stats update
*/
-static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
+static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool,
+ struct xdp_desc *descs,
unsigned int *total_bytes)
{
u16 ntu = xdp_ring->next_to_use;
@@ -1001,8 +1028,8 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
dma_addr_t dma;
- dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, descs[i].addr);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, descs[i].len);
+ dma = xsk_buff_raw_get_dma(xsk_pool, descs[i].addr);
+ xsk_buff_raw_dma_sync_for_device(xsk_pool, dma, descs[i].len);
tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
tx_desc->buf_addr = cpu_to_le64(dma);
@@ -1018,60 +1045,69 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
/**
* ice_fill_tx_hw_ring - produce the number of Tx descriptors onto ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: XSK buffer pool to pick buffers to be consumed by HW
* @descs: AF_XDP descriptors to pull the DMA addresses and lengths from
* @nb_pkts: count of packets to be send
* @total_bytes: bytes accumulator that will be used for stats update
*/
-static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring, struct xdp_desc *descs,
- u32 nb_pkts, unsigned int *total_bytes)
+static void ice_fill_tx_hw_ring(struct ice_tx_ring *xdp_ring,
+ struct xsk_buff_pool *xsk_pool,
+ struct xdp_desc *descs, u32 nb_pkts,
+ unsigned int *total_bytes)
{
u32 batched, leftover, i;
batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH);
leftover = nb_pkts & (PKTS_PER_BATCH - 1);
for (i = 0; i < batched; i += PKTS_PER_BATCH)
- ice_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
+ ice_xmit_pkt_batch(xdp_ring, xsk_pool, &descs[i], total_bytes);
for (; i < batched + leftover; i++)
- ice_xmit_pkt(xdp_ring, &descs[i], total_bytes);
+ ice_xmit_pkt(xdp_ring, xsk_pool, &descs[i], total_bytes);
}
/**
* ice_xmit_zc - take entries from XSK Tx ring and place them onto HW Tx ring
* @xdp_ring: XDP ring to produce the HW Tx descriptors on
+ * @xsk_pool: AF_XDP buffer pool pointer
*
* Returns true if there is no more work that needs to be done, false otherwise
*/
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring)
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool)
{
- struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs;
+ struct xdp_desc *descs = xsk_pool->tx_descs;
u32 nb_pkts, nb_processed = 0;
unsigned int total_bytes = 0;
int budget;
- ice_clean_xdp_irq_zc(xdp_ring);
+ ice_clean_xdp_irq_zc(xdp_ring, xsk_pool);
+
+ if (!netif_carrier_ok(xdp_ring->vsi->netdev) ||
+ !netif_running(xdp_ring->vsi->netdev))
+ return true;
budget = ICE_DESC_UNUSED(xdp_ring);
budget = min_t(u16, budget, ICE_RING_QUARTER(xdp_ring));
- nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget);
+ nb_pkts = xsk_tx_peek_release_desc_batch(xsk_pool, budget);
if (!nb_pkts)
return true;
if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
nb_processed = xdp_ring->count - xdp_ring->next_to_use;
- ice_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
+ ice_fill_tx_hw_ring(xdp_ring, xsk_pool, descs, nb_processed,
+ &total_bytes);
xdp_ring->next_to_use = 0;
}
- ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
- &total_bytes);
+ ice_fill_tx_hw_ring(xdp_ring, xsk_pool, &descs[nb_processed],
+ nb_pkts - nb_processed, &total_bytes);
ice_set_rs_bit(xdp_ring);
ice_xdp_ring_update_tail(xdp_ring);
ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes);
- if (xsk_uses_need_wakeup(xdp_ring->xsk_pool))
- xsk_set_tx_need_wakeup(xdp_ring->xsk_pool);
+ if (xsk_uses_need_wakeup(xsk_pool))
+ xsk_set_tx_need_wakeup(xsk_pool);
return nb_pkts < budget;
}
@@ -1093,7 +1129,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_vsi *vsi = np->vsi;
struct ice_tx_ring *ring;
- if (test_bit(ICE_VSI_DOWN, vsi->state))
+ if (test_bit(ICE_VSI_DOWN, vsi->state) || !netif_carrier_ok(netdev))
return -ENETDOWN;
if (!ice_is_xdp_ena_vsi(vsi))
@@ -1104,7 +1140,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
ring = vsi->rx_rings[queue_id]->xdp_ring;
- if (!ring->xsk_pool)
+ if (!READ_ONCE(ring->xsk_pool))
return -EINVAL;
/* The idea here is that if NAPI is running, mark a miss, so
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index 6fa181f080ef..45adeb513253 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -20,16 +20,20 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
-int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool,
+ int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring,
+ struct xsk_buff_pool *xsk_pool, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
-bool ice_xmit_zc(struct ice_tx_ring *xdp_ring);
+bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool);
int ice_realloc_zc_buf(struct ice_vsi *vsi, bool zc);
#else
-static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring)
+static inline bool ice_xmit_zc(struct ice_tx_ring __always_unused *xdp_ring,
+ struct xsk_buff_pool __always_unused *xsk_pool)
{
return false;
}
@@ -44,6 +48,7 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
static inline int
ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
+ struct xsk_buff_pool __always_unused *xsk_pool,
int __always_unused budget)
{
return 0;
@@ -51,6 +56,7 @@ ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
static inline bool
ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
+ struct xsk_buff_pool __always_unused *xsk_pool,
u16 __always_unused count)
{
return false;
diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig
new file mode 100644
index 000000000000..1addd663acad
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/Kconfig
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+config IDPF
+ tristate "Intel(R) Infrastructure Data Path Function Support"
+ depends on PCI_MSI
+ select DIMLIB
+ select LIBETH
+ help
+ This driver supports Intel(R) Infrastructure Data Path Function
+ devices.
+
+ To compile this driver as a module, choose M here. The module
+ will be called idpf.
+
+if IDPF
+
+config IDPF_SINGLEQ
+ bool "idpf singleq support"
+ help
+ This option enables support for legacy single Rx/Tx queues w/no
+ completion and fill queues. Only enable if you have hardware which
+ wants to work in this mode as it increases the driver size and adds
+ runtme checks on hotpath.
+
+endif # IDPF
diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile
index 6844ead2f3ac..2ce01a0b5898 100644
--- a/drivers/net/ethernet/intel/idpf/Makefile
+++ b/drivers/net/ethernet/intel/idpf/Makefile
@@ -12,7 +12,8 @@ idpf-y := \
idpf_ethtool.o \
idpf_lib.o \
idpf_main.o \
- idpf_singleq_txrx.o \
idpf_txrx.o \
idpf_virtchnl.o \
idpf_vf_dev.o
+
+idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index e7a036538246..2c31ad87587a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -17,10 +17,8 @@ struct idpf_vport_max_q;
#include <linux/sctp.h>
#include <linux/ethtool_netlink.h>
#include <net/gro.h>
-#include <linux/dim.h>
#include "virtchnl2.h"
-#include "idpf_lan_txrx.h"
#include "idpf_txrx.h"
#include "idpf_controlq.h"
@@ -266,7 +264,6 @@ struct idpf_port_stats {
* the worst case.
* @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping
* @bufq_desc_count: Buffer queue descriptor count
- * @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc)
* @num_rxq_grp: Number of RX queues in a group
* @rxq_grps: Total number of RX groups. Number of groups * number of RX per
* group will yield total number of RX queues.
@@ -302,7 +299,7 @@ struct idpf_vport {
u16 num_txq_grp;
struct idpf_txq_group *txq_grps;
u32 txq_model;
- struct idpf_queue **txqs;
+ struct idpf_tx_queue **txqs;
bool crc_enable;
u16 num_rxq;
@@ -310,11 +307,10 @@ struct idpf_vport {
u32 rxq_desc_count;
u8 num_bufqs_per_qgrp;
u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP];
- u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP];
u16 num_rxq_grp;
struct idpf_rxq_group *rxq_grps;
u32 rxq_model;
- struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE];
+ struct libeth_rx_pt *rx_ptype_lkup;
struct idpf_adapter *adapter;
struct net_device *netdev;
@@ -601,7 +597,8 @@ struct idpf_adapter {
*/
static inline int idpf_is_queue_model_split(u16 q_model)
{
- return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
+ return !IS_ENABLED(CONFIG_IDPF_SINGLEQ) ||
+ q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT;
}
#define idpf_is_cap_ena(adapter, field, flag) \
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 986d429d1175..3806ddd3ce4a 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -222,14 +222,19 @@ static int idpf_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct idpf_vport_config *vport_config;
- u16 combined, num_txq, num_rxq;
unsigned int num_req_tx_q;
unsigned int num_req_rx_q;
struct idpf_vport *vport;
+ u16 num_txq, num_rxq;
struct device *dev;
int err = 0;
u16 idx;
+ if (ch->rx_count && ch->tx_count) {
+ netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n");
+ return -EINVAL;
+ }
+
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
@@ -239,20 +244,6 @@ static int idpf_set_channels(struct net_device *netdev,
num_txq = vport_config->user_config.num_req_tx_qs;
num_rxq = vport_config->user_config.num_req_rx_qs;
- combined = min(num_txq, num_rxq);
-
- /* these checks are for cases where user didn't specify a particular
- * value on cmd line but we get non-zero value anyway via
- * get_channels(); look at ethtool.c in ethtool repository (the user
- * space part), particularly, do_schannels() routine
- */
- if (ch->combined_count == combined)
- ch->combined_count = 0;
- if (ch->combined_count && ch->rx_count == num_rxq - combined)
- ch->rx_count = 0;
- if (ch->combined_count && ch->tx_count == num_txq - combined)
- ch->tx_count = 0;
-
num_req_tx_q = ch->combined_count + ch->tx_count;
num_req_rx_q = ch->combined_count + ch->rx_count;
@@ -376,7 +367,8 @@ static int idpf_set_ringparam(struct net_device *netdev,
new_tx_count);
if (new_tx_count == vport->txq_desc_count &&
- new_rx_count == vport->rxq_desc_count)
+ new_rx_count == vport->rxq_desc_count &&
+ kring->tcp_data_split == idpf_vport_get_hsplit(vport))
goto unlock_mutex;
if (!idpf_vport_set_hsplit(vport, kring->tcp_data_split)) {
@@ -445,22 +437,24 @@ struct idpf_stats {
.stat_offset = offsetof(_type, _stat) \
}
-/* Helper macro for defining some statistics related to queues */
-#define IDPF_QUEUE_STAT(_name, _stat) \
- IDPF_STAT(struct idpf_queue, _name, _stat)
+/* Helper macros for defining some statistics related to queues */
+#define IDPF_RX_QUEUE_STAT(_name, _stat) \
+ IDPF_STAT(struct idpf_rx_queue, _name, _stat)
+#define IDPF_TX_QUEUE_STAT(_name, _stat) \
+ IDPF_STAT(struct idpf_tx_queue, _name, _stat)
/* Stats associated with a Tx queue */
static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = {
- IDPF_QUEUE_STAT("pkts", q_stats.tx.packets),
- IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes),
- IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts),
+ IDPF_TX_QUEUE_STAT("pkts", q_stats.packets),
+ IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes),
+ IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts),
};
/* Stats associated with an Rx queue */
static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = {
- IDPF_QUEUE_STAT("pkts", q_stats.rx.packets),
- IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes),
- IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts),
+ IDPF_RX_QUEUE_STAT("pkts", q_stats.packets),
+ IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes),
+ IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts),
};
#define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats)
@@ -571,8 +565,6 @@ static void idpf_get_stat_strings(struct net_device *netdev, u8 *data)
for (i = 0; i < vport_config->max_q.max_rxq; i++)
idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats,
"rx", i);
-
- page_pool_ethtool_stats_get_strings(data);
}
/**
@@ -606,7 +598,6 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
u16 max_txq, max_rxq;
- unsigned int size;
if (sset != ETH_SS_STATS)
return -EINVAL;
@@ -625,11 +616,8 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
max_txq = vport_config->max_q.max_txq;
max_rxq = vport_config->max_q.max_rxq;
- size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
+ return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) +
(IDPF_RX_QUEUE_STATS_LEN * max_rxq);
- size += page_pool_ethtool_stats_get_count();
-
- return size;
}
/**
@@ -641,7 +629,7 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset)
* Copies the stat data defined by the pointer and stat structure pair into
* the memory supplied as data. If the pointer is null, data will be zero'd.
*/
-static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
+static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat,
const struct idpf_stats *stat)
{
char *p;
@@ -679,6 +667,7 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
* idpf_add_queue_stats - copy queue statistics into supplied buffer
* @data: ethtool stats buffer
* @q: the queue to copy
+ * @type: type of the queue
*
* Queue statistics must be copied while protected by u64_stats_fetch_begin,
* so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats
@@ -689,19 +678,23 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat,
*
* This function expects to be called while under rcu_read_lock().
*/
-static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
+static void idpf_add_queue_stats(u64 **data, const void *q,
+ enum virtchnl2_queue_type type)
{
+ const struct u64_stats_sync *stats_sync;
const struct idpf_stats *stats;
unsigned int start;
unsigned int size;
unsigned int i;
- if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
size = IDPF_RX_QUEUE_STATS_LEN;
stats = idpf_gstrings_rx_queue_stats;
+ stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync;
} else {
size = IDPF_TX_QUEUE_STATS_LEN;
stats = idpf_gstrings_tx_queue_stats;
+ stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync;
}
/* To avoid invalid statistics values, ensure that we keep retrying
@@ -709,10 +702,10 @@ static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q)
* u64_stats_fetch_retry.
*/
do {
- start = u64_stats_fetch_begin(&q->stats_sync);
+ start = u64_stats_fetch_begin(stats_sync);
for (i = 0; i < size; i++)
idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]);
- } while (u64_stats_fetch_retry(&q->stats_sync, start));
+ } while (u64_stats_fetch_retry(stats_sync, start));
/* Once we successfully copy the stats in, update the data pointer */
*data += size;
@@ -801,7 +794,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
for (j = 0; j < num_rxq; j++) {
u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs;
struct idpf_rx_queue_stats *stats;
- struct idpf_queue *rxq;
+ struct idpf_rx_queue *rxq;
unsigned int start;
if (idpf_is_queue_model_split(vport->rxq_model))
@@ -815,7 +808,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
do {
start = u64_stats_fetch_begin(&rxq->stats_sync);
- stats = &rxq->q_stats.rx;
+ stats = &rxq->q_stats;
hw_csum_err = u64_stats_read(&stats->hw_csum_err);
hsplit = u64_stats_read(&stats->hsplit_pkts);
hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf);
@@ -836,7 +829,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
for (j = 0; j < txq_grp->num_txq; j++) {
u64 linearize, qbusy, skb_drops, dma_map_errs;
- struct idpf_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue *txq = txq_grp->txqs[j];
struct idpf_tx_queue_stats *stats;
unsigned int start;
@@ -846,7 +839,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport)
do {
start = u64_stats_fetch_begin(&txq->stats_sync);
- stats = &txq->q_stats.tx;
+ stats = &txq->q_stats;
linearize = u64_stats_read(&stats->linearize);
qbusy = u64_stats_read(&stats->q_busy);
skb_drops = u64_stats_read(&stats->skb_drops);
@@ -877,7 +870,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
{
struct idpf_netdev_priv *np = netdev_priv(netdev);
struct idpf_vport_config *vport_config;
- struct page_pool_stats pp_stats = { };
struct idpf_vport *vport;
unsigned int total = 0;
unsigned int i, j;
@@ -904,12 +896,12 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
qtype = VIRTCHNL2_QUEUE_TYPE_TX;
for (j = 0; j < txq_grp->num_txq; j++, total++) {
- struct idpf_queue *txq = txq_grp->txqs[j];
+ struct idpf_tx_queue *txq = txq_grp->txqs[j];
if (!txq)
idpf_add_empty_queue_stats(&data, qtype);
else
- idpf_add_queue_stats(&data, txq);
+ idpf_add_queue_stats(&data, txq, qtype);
}
}
@@ -937,7 +929,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
num_rxq = rxq_grp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, total++) {
- struct idpf_queue *rxq;
+ struct idpf_rx_queue *rxq;
if (is_splitq)
rxq = &rxq_grp->splitq.rxq_sets[j]->rxq;
@@ -946,93 +938,77 @@ static void idpf_get_ethtool_stats(struct net_device *netdev,
if (!rxq)
idpf_add_empty_queue_stats(&data, qtype);
else
- idpf_add_queue_stats(&data, rxq);
-
- /* In splitq mode, don't get page pool stats here since
- * the pools are attached to the buffer queues
- */
- if (is_splitq)
- continue;
-
- if (rxq)
- page_pool_get_stats(rxq->pp, &pp_stats);
- }
- }
-
- for (i = 0; i < vport->num_rxq_grp; i++) {
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
- struct idpf_queue *rxbufq =
- &vport->rxq_grps[i].splitq.bufq_sets[j].bufq;
-
- page_pool_get_stats(rxbufq->pp, &pp_stats);
+ idpf_add_queue_stats(&data, rxq, qtype);
}
}
for (; total < vport_config->max_q.max_rxq; total++)
idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX);
- page_pool_ethtool_stats_get(data, &pp_stats);
-
rcu_read_unlock();
idpf_vport_ctrl_unlock(netdev);
}
/**
- * idpf_find_rxq - find rxq from q index
+ * idpf_find_rxq_vec - find rxq vector from q index
* @vport: virtual port associated to queue
* @q_num: q index used to find queue
*
- * returns pointer to rx queue
+ * returns pointer to rx vector
*/
-static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num)
+static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
+ int q_num)
{
int q_grp, q_idx;
if (!idpf_is_queue_model_split(vport->rxq_model))
- return vport->rxq_grps->singleq.rxqs[q_num];
+ return vport->rxq_grps->singleq.rxqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP;
- return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq;
+ return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector;
}
/**
- * idpf_find_txq - find txq from q index
+ * idpf_find_txq_vec - find txq vector from q index
* @vport: virtual port associated to queue
* @q_num: q index used to find queue
*
- * returns pointer to tx queue
+ * returns pointer to tx vector
*/
-static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num)
+static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport,
+ int q_num)
{
int q_grp;
if (!idpf_is_queue_model_split(vport->txq_model))
- return vport->txqs[q_num];
+ return vport->txqs[q_num]->q_vector;
q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP;
- return vport->txq_grps[q_grp].complq;
+ return vport->txq_grps[q_grp].complq->q_vector;
}
/**
* __idpf_get_q_coalesce - get ITR values for specific queue
* @ec: ethtool structure to fill with driver's coalesce settings
- * @q: quuee of Rx or Tx
+ * @q_vector: queue vector corresponding to this queue
+ * @type: queue type
*/
static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec,
- struct idpf_queue *q)
+ const struct idpf_q_vector *q_vector,
+ enum virtchnl2_queue_type type)
{
- if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) {
+ if (type == VIRTCHNL2_QUEUE_TYPE_RX) {
ec->use_adaptive_rx_coalesce =
- IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode);
- ec->rx_coalesce_usecs = q->q_vector->rx_itr_value;
+ IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode);
+ ec->rx_coalesce_usecs = q_vector->rx_itr_value;
} else {
ec->use_adaptive_tx_coalesce =
- IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode);
- ec->tx_coalesce_usecs = q->q_vector->tx_itr_value;
+ IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode);
+ ec->tx_coalesce_usecs = q_vector->tx_itr_value;
}
}
@@ -1048,8 +1024,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
u32 q_num)
{
- struct idpf_netdev_priv *np = netdev_priv(netdev);
- struct idpf_vport *vport;
+ const struct idpf_netdev_priv *np = netdev_priv(netdev);
+ const struct idpf_vport *vport;
int err = 0;
idpf_vport_ctrl_lock(netdev);
@@ -1064,10 +1040,12 @@ static int idpf_get_q_coalesce(struct net_device *netdev,
}
if (q_num < vport->num_rxq)
- __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num));
+ __idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num),
+ VIRTCHNL2_QUEUE_TYPE_RX);
if (q_num < vport->num_txq)
- __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num));
+ __idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num),
+ VIRTCHNL2_QUEUE_TYPE_TX);
unlock_mutex:
idpf_vport_ctrl_unlock(netdev);
@@ -1111,16 +1089,15 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* __idpf_set_q_coalesce - set ITR values for specific queue
* @ec: ethtool structure from user to update ITR settings
- * @q: queue for which itr values has to be set
+ * @qv: queue vector for which itr values has to be set
* @is_rxq: is queue type rx
*
* Returns 0 on success, negative otherwise.
*/
-static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
- struct idpf_queue *q, bool is_rxq)
+static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec,
+ struct idpf_q_vector *qv, bool is_rxq)
{
u32 use_adaptive_coalesce, coalesce_usecs;
- struct idpf_q_vector *qv = q->q_vector;
bool is_dim_ena = false;
u16 itr_val;
@@ -1136,7 +1113,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
itr_val = qv->tx_itr_value;
}
if (coalesce_usecs != itr_val && use_adaptive_coalesce) {
- netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
+ netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n");
return -EINVAL;
}
@@ -1145,7 +1122,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
return 0;
if (coalesce_usecs > IDPF_ITR_MAX) {
- netdev_err(q->vport->netdev,
+ netdev_err(qv->vport->netdev,
"Invalid value, %d-usecs range is 0-%d\n",
coalesce_usecs, IDPF_ITR_MAX);
@@ -1154,7 +1131,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
if (coalesce_usecs % 2) {
coalesce_usecs--;
- netdev_info(q->vport->netdev,
+ netdev_info(qv->vport->netdev,
"HW only supports even ITR values, ITR rounded to %d\n",
coalesce_usecs);
}
@@ -1193,15 +1170,16 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec,
*
* Return 0 on success, and negative on failure
*/
-static int idpf_set_q_coalesce(struct idpf_vport *vport,
- struct ethtool_coalesce *ec,
+static int idpf_set_q_coalesce(const struct idpf_vport *vport,
+ const struct ethtool_coalesce *ec,
int q_num, bool is_rxq)
{
- struct idpf_queue *q;
+ struct idpf_q_vector *qv;
- q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num);
+ qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) :
+ idpf_find_txq_vec(vport, q_num);
- if (q && __idpf_set_q_coalesce(ec, q, is_rxq))
+ if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq))
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
index a5752dcab888..8c7f8ef8f1a1 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h
@@ -4,6 +4,8 @@
#ifndef _IDPF_LAN_TXRX_H_
#define _IDPF_LAN_TXRX_H_
+#include <linux/bits.h>
+
enum idpf_rss_hash {
IDPF_HASH_INVALID = 0,
/* Values 1 - 28 are reserved for future use */
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 5d3532c27d57..0b6c8fd5bc90 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -4,8 +4,7 @@
#include "idpf.h"
#include "idpf_virtchnl.h"
-static const struct net_device_ops idpf_netdev_ops_splitq;
-static const struct net_device_ops idpf_netdev_ops_singleq;
+static const struct net_device_ops idpf_netdev_ops;
/**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index
@@ -69,7 +68,7 @@ static void idpf_deinit_vector_stack(struct idpf_adapter *adapter)
static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
{
clear_bit(IDPF_MB_INTR_MODE, adapter->flags);
- free_irq(adapter->msix_entries[0].vector, adapter);
+ kfree(free_irq(adapter->msix_entries[0].vector, adapter));
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
}
@@ -124,15 +123,14 @@ static void idpf_mb_irq_enable(struct idpf_adapter *adapter)
*/
static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter)
{
- struct idpf_q_vector *mb_vector = &adapter->mb_vector;
int irq_num, mb_vidx = 0, err;
+ char *name;
irq_num = adapter->msix_entries[mb_vidx].vector;
- mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
- dev_driver_string(&adapter->pdev->dev),
- "Mailbox", mb_vidx);
- err = request_irq(irq_num, adapter->irq_mb_handler, 0,
- mb_vector->name, adapter);
+ name = kasprintf(GFP_KERNEL, "%s-%s-%d",
+ dev_driver_string(&adapter->pdev->dev),
+ "Mailbox", mb_vidx);
+ err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"IRQ request for mailbox failed, error: %d\n", err);
@@ -765,10 +763,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
}
/* assign netdev_ops */
- if (idpf_is_queue_model_split(vport->txq_model))
- netdev->netdev_ops = &idpf_netdev_ops_splitq;
- else
- netdev->netdev_ops = &idpf_netdev_ops_singleq;
+ netdev->netdev_ops = &idpf_netdev_ops;
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
@@ -905,8 +900,8 @@ static void idpf_vport_stop(struct idpf_vport *vport)
vport->link_up = false;
idpf_vport_intr_deinit(vport);
- idpf_vport_intr_rel(vport);
idpf_vport_queues_rel(vport);
+ idpf_vport_intr_rel(vport);
np->state = __IDPF_VPORT_DOWN;
}
@@ -946,6 +941,9 @@ static void idpf_decfg_netdev(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
+ kfree(vport->rx_ptype_lkup);
+ vport->rx_ptype_lkup = NULL;
+
unregister_netdev(vport->netdev);
free_netdev(vport->netdev);
vport->netdev = NULL;
@@ -1318,14 +1316,14 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
if (idpf_is_queue_model_split(vport->rxq_model)) {
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
- struct idpf_queue *q =
+ const struct idpf_buf_queue *q =
&grp->splitq.bufq_sets[j].bufq;
writel(q->next_to_alloc, q->tail);
}
} else {
for (j = 0; j < grp->singleq.num_rxq; j++) {
- struct idpf_queue *q =
+ const struct idpf_rx_queue *q =
grp->singleq.rxqs[j];
writel(q->next_to_alloc, q->tail);
@@ -1337,9 +1335,8 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport)
/**
* idpf_vport_open - Bring up a vport
* @vport: vport to bring up
- * @alloc_res: allocate queue resources
*/
-static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
+static int idpf_vport_open(struct idpf_vport *vport)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct idpf_adapter *adapter = vport->adapter;
@@ -1352,48 +1349,47 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
/* we do not allow interface up just yet */
netif_carrier_off(vport->netdev);
- if (alloc_res) {
- err = idpf_vport_queues_alloc(vport);
- if (err)
- return err;
- }
-
err = idpf_vport_intr_alloc(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to allocate interrupts for vport %u: %d\n",
vport->vport_id, err);
- goto queues_rel;
+ return err;
}
+ err = idpf_vport_queues_alloc(vport);
+ if (err)
+ goto intr_rel;
+
err = idpf_vport_queue_ids_init(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue ids for vport %u: %d\n",
vport->vport_id, err);
- goto intr_rel;
+ goto queues_rel;
}
err = idpf_vport_intr_init(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize interrupts for vport %u: %d\n",
vport->vport_id, err);
- goto intr_rel;
+ goto queues_rel;
}
err = idpf_rx_bufs_init_all(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
vport->vport_id, err);
- goto intr_rel;
+ goto queues_rel;
}
err = idpf_queue_reg_init(vport);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
vport->vport_id, err);
- goto intr_rel;
+ goto queues_rel;
}
idpf_rx_init_buf_tail(vport);
+ idpf_vport_intr_ena(vport);
err = idpf_send_config_queues_msg(vport);
if (err) {
@@ -1456,10 +1452,10 @@ unmap_queue_vectors:
idpf_send_map_unmap_queue_vector_msg(vport, false);
intr_deinit:
idpf_vport_intr_deinit(vport);
-intr_rel:
- idpf_vport_intr_rel(vport);
queues_rel:
idpf_vport_queues_rel(vport);
+intr_rel:
+ idpf_vport_intr_rel(vport);
return err;
}
@@ -1540,7 +1536,7 @@ void idpf_init_task(struct work_struct *work)
np = netdev_priv(vport->netdev);
np->state = __IDPF_VPORT_DOWN;
if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
- idpf_vport_open(vport, true);
+ idpf_vport_open(vport);
/* Spawn and return 'idpf_init_task' work queue until all the
* default vports are created
@@ -1854,7 +1850,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_state current_state = np->state;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vport *new_vport;
- int err, i;
+ int err;
/* If the system is low on memory, we can end up in bad state if we
* free all the memory for queue resources and try to allocate them
@@ -1899,9 +1895,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
goto free_vport;
}
- err = idpf_vport_queues_alloc(new_vport);
- if (err)
- goto free_vport;
if (current_state <= __IDPF_VPORT_DOWN) {
idpf_send_delete_queues_msg(vport);
} else {
@@ -1928,62 +1921,28 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
*/
memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
- /* Since idpf_vport_queues_alloc was called with new_port, the queue
- * back pointers are currently pointing to the local new_vport. Reset
- * the backpointers to the original vport here
- */
- for (i = 0; i < vport->num_txq_grp; i++) {
- struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- int j;
-
- tx_qgrp->vport = vport;
- for (j = 0; j < tx_qgrp->num_txq; j++)
- tx_qgrp->txqs[j]->vport = vport;
-
- if (idpf_is_queue_model_split(vport->txq_model))
- tx_qgrp->complq->vport = vport;
- }
-
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
- struct idpf_queue *q;
- u16 num_rxq;
- int j;
-
- rx_qgrp->vport = vport;
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++)
- rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport;
-
- if (idpf_is_queue_model_split(vport->rxq_model))
- num_rxq = rx_qgrp->splitq.num_rxq_sets;
- else
- num_rxq = rx_qgrp->singleq.num_rxq;
-
- for (j = 0; j < num_rxq; j++) {
- if (idpf_is_queue_model_split(vport->rxq_model))
- q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- else
- q = rx_qgrp->singleq.rxqs[j];
- q->vport = vport;
- }
- }
-
if (reset_cause == IDPF_SR_Q_CHANGE)
idpf_vport_alloc_vec_indexes(vport);
err = idpf_set_real_num_queues(vport);
if (err)
- goto err_reset;
+ goto err_open;
if (current_state == __IDPF_VPORT_UP)
- err = idpf_vport_open(vport, false);
+ err = idpf_vport_open(vport);
kfree(new_vport);
return err;
err_reset:
- idpf_vport_queues_rel(new_vport);
+ idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq,
+ vport->num_rxq, vport->num_bufq);
+
+err_open:
+ if (current_state == __IDPF_VPORT_UP)
+ idpf_vport_open(vport);
+
free_vport:
kfree(new_vport);
@@ -2212,7 +2171,7 @@ static int idpf_open(struct net_device *netdev)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- err = idpf_vport_open(vport, true);
+ err = idpf_vport_open(vport);
idpf_vport_ctrl_unlock(netdev);
@@ -2234,7 +2193,7 @@ static int idpf_change_mtu(struct net_device *netdev, int new_mtu)
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE);
@@ -2392,24 +2351,10 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
mem->pa = 0;
}
-static const struct net_device_ops idpf_netdev_ops_splitq = {
- .ndo_open = idpf_open,
- .ndo_stop = idpf_stop,
- .ndo_start_xmit = idpf_tx_splitq_start,
- .ndo_features_check = idpf_features_check,
- .ndo_set_rx_mode = idpf_set_rx_mode,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = idpf_set_mac,
- .ndo_change_mtu = idpf_change_mtu,
- .ndo_get_stats64 = idpf_get_stats64,
- .ndo_set_features = idpf_set_features,
- .ndo_tx_timeout = idpf_tx_timeout,
-};
-
-static const struct net_device_ops idpf_netdev_ops_singleq = {
+static const struct net_device_ops idpf_netdev_ops = {
.ndo_open = idpf_open,
.ndo_stop = idpf_stop,
- .ndo_start_xmit = idpf_tx_singleq_start,
+ .ndo_start_xmit = idpf_tx_start,
.ndo_features_check = idpf_features_check,
.ndo_set_rx_mode = idpf_set_rx_mode,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index f784eea044bd..db476b3314c8 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -8,6 +8,7 @@
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_IMPORT_NS(LIBETH);
MODULE_LICENSE("GPL");
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
index 27b93592c4ba..fe64febf7436 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <net/libeth/rx.h>
+
#include "idpf.h"
/**
@@ -186,7 +188,7 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb,
* and gets a physical address for each memory location and programs
* it and the length into the transmit base mode descriptor.
*/
-static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
+static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q,
struct idpf_tx_buf *first,
struct idpf_tx_offload_params *offloads)
{
@@ -205,12 +207,12 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
data_len = skb->data_len;
size = skb_headlen(skb);
- tx_desc = IDPF_BASE_TX_DESC(tx_q, i);
+ tx_desc = &tx_q->base_tx[i];
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
/* write each descriptor with CRC bit */
- if (tx_q->vport->crc_enable)
+ if (idpf_queue_has(CRC_EN, tx_q))
td_cmd |= IDPF_TX_DESC_CMD_ICRC;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
@@ -239,7 +241,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
i++;
if (i == tx_q->desc_count) {
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
i = 0;
}
@@ -259,7 +261,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
i++;
if (i == tx_q->desc_count) {
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
i = 0;
}
@@ -285,7 +287,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
/* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc;
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
netdev_tx_sent_queue(nq, first->bytecount);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
@@ -299,7 +301,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q,
* ring entry to reflect that this index is a context descriptor
*/
static struct idpf_base_tx_ctx_desc *
-idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
+idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq)
{
struct idpf_base_tx_ctx_desc *ctx_desc;
int ntu = txq->next_to_use;
@@ -307,7 +309,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf));
txq->tx_buf[ntu].ctx_entry = true;
- ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu);
+ ctx_desc = &txq->base_ctx[ntu];
IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu);
txq->next_to_use = ntu;
@@ -320,7 +322,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq)
* @txq: queue to send buffer on
* @offload: offload parameter structure
**/
-static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
+static void idpf_tx_singleq_build_ctx_desc(struct idpf_tx_queue *txq,
struct idpf_tx_offload_params *offload)
{
struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq);
@@ -333,7 +335,7 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss);
u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.tx.lso_pkts);
+ u64_stats_inc(&txq->q_stats.lso_pkts);
u64_stats_update_end(&txq->stats_sync);
}
@@ -351,8 +353,8 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq,
*
* Returns NETDEV_TX_OK if sent, else an error code
*/
-static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
- struct idpf_queue *tx_q)
+netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+ struct idpf_tx_queue *tx_q)
{
struct idpf_tx_offload_params offload = { };
struct idpf_tx_buf *first;
@@ -409,53 +411,25 @@ out_drop:
}
/**
- * idpf_tx_singleq_start - Selects the right Tx queue to send buffer
- * @skb: send buffer
- * @netdev: network interface device structure
- *
- * Returns NETDEV_TX_OK if sent, else an error code
- */
-netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
- struct net_device *netdev)
-{
- struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_queue *tx_q;
-
- tx_q = vport->txqs[skb_get_queue_mapping(skb)];
-
- /* hardware can't handle really short frames, hardware padding works
- * beyond this point
- */
- if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) {
- idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
-
- return NETDEV_TX_OK;
- }
-
- return idpf_tx_singleq_frame(skb, tx_q);
-}
-
-/**
* idpf_tx_singleq_clean - Reclaim resources from queue
* @tx_q: Tx queue to clean
* @napi_budget: Used to determine if we are in netpoll
* @cleaned: returns number of packets cleaned
*
*/
-static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
+static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget,
int *cleaned)
{
- unsigned int budget = tx_q->vport->compln_clean_budget;
unsigned int total_bytes = 0, total_pkts = 0;
struct idpf_base_tx_desc *tx_desc;
+ u32 budget = tx_q->clean_budget;
s16 ntc = tx_q->next_to_clean;
struct idpf_netdev_priv *np;
struct idpf_tx_buf *tx_buf;
- struct idpf_vport *vport;
struct netdev_queue *nq;
bool dont_wake;
- tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc);
+ tx_desc = &tx_q->base_tx[ntc];
tx_buf = &tx_q->tx_buf[ntc];
ntc -= tx_q->desc_count;
@@ -517,7 +491,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget,
if (unlikely(!ntc)) {
ntc -= tx_q->desc_count;
tx_buf = tx_q->tx_buf;
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
}
/* unmap any remaining paged data */
@@ -540,7 +514,7 @@ fetch_next_txq_desc:
if (unlikely(!ntc)) {
ntc -= tx_q->desc_count;
tx_buf = tx_q->tx_buf;
- tx_desc = IDPF_BASE_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->base_tx[0];
}
} while (likely(budget));
@@ -550,16 +524,15 @@ fetch_next_txq_desc:
*cleaned += total_pkts;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.tx.packets, total_pkts);
- u64_stats_add(&tx_q->q_stats.tx.bytes, total_bytes);
+ u64_stats_add(&tx_q->q_stats.packets, total_pkts);
+ u64_stats_add(&tx_q->q_stats.bytes, total_bytes);
u64_stats_update_end(&tx_q->stats_sync);
- vport = tx_q->vport;
- np = netdev_priv(vport->netdev);
- nq = netdev_get_tx_queue(vport->netdev, tx_q->idx);
+ np = netdev_priv(tx_q->netdev);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
dont_wake = np->state != __IDPF_VPORT_UP ||
- !netif_carrier_ok(vport->netdev);
+ !netif_carrier_ok(tx_q->netdev);
__netif_txq_completed_wake(nq, total_pkts, total_bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
dont_wake);
@@ -584,7 +557,7 @@ static bool idpf_tx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
budget_per_q = num_txq ? max(budget / num_txq, 1) : 0;
for (i = 0; i < num_txq; i++) {
- struct idpf_queue *q;
+ struct idpf_tx_queue *q;
q = q_vec->tx[i];
clean_complete &= idpf_tx_singleq_clean(q, budget_per_q,
@@ -614,14 +587,9 @@ static bool idpf_rx_singleq_test_staterr(const union virtchnl2_rx_desc *rx_desc,
/**
* idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers
- * @rxq: Rx ring being processed
* @rx_desc: Rx descriptor for current buffer
- * @skb: Current socket buffer containing buffer in progress
- * @ntc: next to clean
*/
-static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq,
- union virtchnl2_rx_desc *rx_desc,
- struct sk_buff *skb, u16 ntc)
+static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ)))
@@ -635,98 +603,82 @@ static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq,
* @rxq: Rx ring being processed
* @skb: skb currently being received and modified
* @csum_bits: checksum bits from descriptor
- * @ptype: the packet type decoded by hardware
+ * @decoded: the packet type decoded by hardware
*
* skb->protocol must be set before this function is called
*/
-static void idpf_rx_singleq_csum(struct idpf_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded *csum_bits,
- u16 ptype)
+static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq,
+ struct sk_buff *skb,
+ struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_pt decoded)
{
- struct idpf_rx_ptype_decoded decoded;
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (unlikely(!(rxq->vport->netdev->features & NETIF_F_RXCSUM)))
+ if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
return;
/* check if HW has decoded the packet and checksum */
- if (unlikely(!(csum_bits->l3l4p)))
- return;
-
- decoded = rxq->vport->rx_ptype_lkup[ptype];
- if (unlikely(!(decoded.known && decoded.outer_ip)))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV4);
- ipv6 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
/* Check if there were any checksum errors */
- if (unlikely(ipv4 && (csum_bits->ipe || csum_bits->eipe)))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
/* Device could not do any checksum offload for certain extension
* headers as indicated by setting IPV6EXADD bit
*/
- if (unlikely(ipv6 && csum_bits->ipv6exadd))
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* check for L4 errors and handle packets that were not able to be
* checksummed due to arrival speed
*/
- if (unlikely(csum_bits->l4e))
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
- if (unlikely(csum_bits->nat && csum_bits->eudpe))
+ if (unlikely(csum_bits.nat && csum_bits.eudpe))
goto checksum_fail;
/* Handle packets that were not able to be checksummed due to arrival
* speed, in this case the stack can compute the csum.
*/
- if (unlikely(csum_bits->pprs))
+ if (unlikely(csum_bits.pprs))
return;
/* If there is an outer header present that might contain a checksum
* we need to bump the checksum level by 1 to reflect the fact that
* we are indicating we validated the inner checksum.
*/
- if (decoded.tunnel_type >= IDPF_RX_PTYPE_TUNNEL_IP_GRENAT)
+ if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT)
skb->csum_level = 1;
- /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
- switch (decoded.inner_prot) {
- case IDPF_RX_PTYPE_INNER_PROT_ICMP:
- case IDPF_RX_PTYPE_INNER_PROT_TCP:
- case IDPF_RX_PTYPE_INNER_PROT_UDP:
- case IDPF_RX_PTYPE_INNER_PROT_SCTP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- return;
- default:
- return;
- }
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ return;
checksum_fail:
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
+ u64_stats_inc(&rxq->q_stats.hw_csum_err);
u64_stats_update_end(&rxq->stats_sync);
}
/**
* idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum
- * @rx_q: Rx completion queue
- * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- * @ptype: Rx packet type
*
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format.
+ *
+ * Return: parsed checksum status.
**/
-static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q,
- struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+static struct idpf_rx_csum_decoded
+idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct idpf_rx_csum_decoded csum_bits = { };
u32 rx_error, rx_status;
u64 qword;
@@ -745,28 +697,23 @@ static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q,
rx_status);
csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M,
rx_status);
- csum_bits.nat = 0;
- csum_bits.eudpe = 0;
- idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
+ return csum_bits;
}
/**
* idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum
- * @rx_q: Rx completion queue
- * @skb: skb currently being received and modified
* @rx_desc: the receive descriptor
- * @ptype: Rx packet type
*
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format.
+ *
+ * Return: parsed checksum status.
**/
-static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
- struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+static struct idpf_rx_csum_decoded
+idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits;
+ struct idpf_rx_csum_decoded csum_bits = { };
u16 rx_status0, rx_status1;
rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0);
@@ -786,9 +733,8 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
rx_status0);
csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M,
rx_status1);
- csum_bits.pprs = 0;
- idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype);
+ return csum_bits;
}
/**
@@ -801,14 +747,14 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format.
**/
-static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
+static void idpf_rx_singleq_base_hash(struct idpf_rx_queue *rx_q,
struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+ const union virtchnl2_rx_desc *rx_desc,
+ struct libeth_rx_pt decoded)
{
u64 mask, qw1;
- if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
+ if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
return;
mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M;
@@ -817,7 +763,7 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
if (FIELD_GET(mask, qw1) == mask) {
u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss);
- skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
}
@@ -831,18 +777,20 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format.
**/
-static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q,
+static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q,
struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+ const union virtchnl2_rx_desc *rx_desc,
+ struct libeth_rx_pt decoded)
{
- if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH)))
+ if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded))
return;
if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M,
- le16_to_cpu(rx_desc->flex_nic_wb.status_error0)))
- skb_set_hash(skb, le32_to_cpu(rx_desc->flex_nic_wb.rss_hash),
- idpf_ptype_to_htype(decoded));
+ le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) {
+ u32 hash = le32_to_cpu(rx_desc->flex_nic_wb.rss_hash);
+
+ libeth_rx_pt_set_hash(skb, hash, decoded);
+ }
}
/**
@@ -857,25 +805,45 @@ static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q,
* order to populate the hash, checksum, VLAN, protocol, and
* other fields within the skb.
*/
-static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q,
- struct sk_buff *skb,
- union virtchnl2_rx_desc *rx_desc,
- u16 ptype)
+static void
+idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q,
+ struct sk_buff *skb,
+ const union virtchnl2_rx_desc *rx_desc,
+ u16 ptype)
{
- struct idpf_rx_ptype_decoded decoded =
- rx_q->vport->rx_ptype_lkup[ptype];
+ struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype];
+ struct idpf_rx_csum_decoded csum_bits;
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_q->vport->netdev);
+ skb->protocol = eth_type_trans(skb, rx_q->netdev);
/* Check if we're using base mode descriptor IDs */
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) {
- idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded);
- idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype);
+ idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, decoded);
+ csum_bits = idpf_rx_singleq_base_csum(rx_desc);
} else {
- idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded);
- idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype);
+ idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, decoded);
+ csum_bits = idpf_rx_singleq_flex_csum(rx_desc);
}
+
+ idpf_rx_singleq_csum(rx_q, skb, csum_bits, decoded);
+ skb_record_rx_queue(skb, rx_q->idx);
+}
+
+/**
+ * idpf_rx_buf_hw_update - Store the new tail and head values
+ * @rxq: queue to bump
+ * @val: new head index
+ */
+static void idpf_rx_buf_hw_update(struct idpf_rx_queue *rxq, u32 val)
+{
+ rxq->next_to_use = val;
+
+ if (unlikely(!rxq->tail))
+ return;
+
+ /* writel has an implicit memory barrier */
+ writel(val, rxq->tail);
}
/**
@@ -885,24 +853,28 @@ static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q,
*
* Returns false if all allocations were successful, true if any fail
*/
-bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
+bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q,
u16 cleaned_count)
{
struct virtchnl2_singleq_rx_buf_desc *desc;
+ const struct libeth_fq_fp fq = {
+ .pp = rx_q->pp,
+ .fqes = rx_q->rx_buf,
+ .truesize = rx_q->truesize,
+ .count = rx_q->desc_count,
+ };
u16 nta = rx_q->next_to_alloc;
- struct idpf_rx_buf *buf;
if (!cleaned_count)
return false;
- desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta);
- buf = &rx_q->rx_buf.buf[nta];
+ desc = &rx_q->single_buf[nta];
do {
dma_addr_t addr;
- addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size);
- if (unlikely(addr == DMA_MAPPING_ERROR))
+ addr = libeth_rx_alloc(&fq, nta);
+ if (addr == DMA_MAPPING_ERROR)
break;
/* Refresh the desc even if buffer_addrs didn't change
@@ -912,11 +884,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
desc->hdr_addr = 0;
desc++;
- buf++;
nta++;
if (unlikely(nta == rx_q->desc_count)) {
- desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0);
- buf = rx_q->rx_buf.buf;
+ desc = &rx_q->single_buf[0];
nta = 0;
}
@@ -933,7 +903,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
/**
* idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor
- * @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
*
@@ -943,9 +912,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte
* descriptor writeback format.
*/
-static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+static void
+idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
{
u64 qword;
@@ -957,7 +926,6 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
/**
* idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor
- * @rx_q: Rx descriptor queue
* @rx_desc: the descriptor to process
* @fields: storage for extracted values
*
@@ -967,9 +935,9 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q,
* This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible
* descriptor writeback format.
*/
-static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+static void
+idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
{
fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M,
le16_to_cpu(rx_desc->flex_nic_wb.pkt_len));
@@ -984,14 +952,15 @@ static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q,
* @fields: storage for extracted values
*
*/
-static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q,
- union virtchnl2_rx_desc *rx_desc,
- struct idpf_rx_extracted *fields)
+static void
+idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q,
+ const union virtchnl2_rx_desc *rx_desc,
+ struct idpf_rx_extracted *fields)
{
if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M)
- idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields);
+ idpf_rx_singleq_extract_base_fields(rx_desc, fields);
else
- idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields);
+ idpf_rx_singleq_extract_flex_fields(rx_desc, fields);
}
/**
@@ -1001,7 +970,7 @@ static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q,
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
+static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0;
struct sk_buff *skb = rx_q->skb;
@@ -1016,7 +985,7 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
struct idpf_rx_buf *rx_buf;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
- rx_desc = IDPF_RX_DESC(rx_q, ntc);
+ rx_desc = &rx_q->rx[ntc];
/* status_error_ptype_len will always be zero for unused
* descriptors because it's cleared in cleanup, and overlaps
@@ -1036,29 +1005,27 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget)
idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields);
- rx_buf = &rx_q->rx_buf.buf[ntc];
- if (!fields.size) {
- idpf_rx_put_page(rx_buf);
+ rx_buf = &rx_q->rx_buf[ntc];
+ if (!libeth_rx_sync_for_cpu(rx_buf, fields.size))
goto skip_data;
- }
- idpf_rx_sync_for_cpu(rx_buf, fields.size);
if (skb)
idpf_rx_add_frag(rx_buf, skb, fields.size);
else
- skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size);
+ skb = idpf_rx_build_skb(rx_buf, fields.size);
/* exit if we failed to retrieve a buffer */
if (!skb)
break;
skip_data:
- IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
+ rx_buf->page = NULL;
+ IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc);
cleaned_count++;
/* skip if it is non EOP desc */
- if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc))
+ if (idpf_rx_singleq_is_non_eop(rx_desc) || unlikely(!skb))
continue;
#define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \
@@ -1084,7 +1051,7 @@ skip_data:
rx_desc, fields.rx_ptype);
/* send completed skb up the stack */
- napi_gro_receive(&rx_q->q_vector->napi, skb);
+ napi_gro_receive(rx_q->pp->p.napi, skb);
skb = NULL;
/* update budget accounting */
@@ -1095,12 +1062,13 @@ skip_data:
rx_q->next_to_clean = ntc;
+ page_pool_nid_changed(rx_q->pp, numa_mem_id());
if (cleaned_count)
failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count);
u64_stats_update_begin(&rx_q->stats_sync);
- u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts);
- u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes);
+ u64_stats_add(&rx_q->q_stats.packets, total_rx_pkts);
+ u64_stats_add(&rx_q->q_stats.bytes, total_rx_bytes);
u64_stats_update_end(&rx_q->stats_sync);
/* guarantee a trip back through this routine if there was a failure */
@@ -1127,7 +1095,7 @@ static bool idpf_rx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget,
*/
budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
for (i = 0; i < num_rxq; i++) {
- struct idpf_queue *rxq = q_vec->rx[i];
+ struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q;
pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index f5bc4a278074..585c3dadd9bf 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -1,9 +1,14 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <net/libeth/rx.h>
+
#include "idpf.h"
#include "idpf_virtchnl.h"
+static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
+ unsigned int count);
+
/**
* idpf_buf_lifo_push - push a buffer pointer onto stack
* @stack: pointer to stack struct
@@ -60,7 +65,8 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
* @tx_q: the queue that owns the buffer
* @tx_buf: the buffer to free
*/
-static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
+static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q,
+ struct idpf_tx_buf *tx_buf)
{
if (tx_buf->skb) {
if (dma_unmap_len(tx_buf, len))
@@ -86,8 +92,9 @@ static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf)
* idpf_tx_buf_rel_all - Free any empty Tx buffers
* @txq: queue to be cleaned
*/
-static void idpf_tx_buf_rel_all(struct idpf_queue *txq)
+static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq)
{
+ struct idpf_buf_lifo *buf_stack;
u16 i;
/* Buffers already cleared, nothing to do */
@@ -101,39 +108,58 @@ static void idpf_tx_buf_rel_all(struct idpf_queue *txq)
kfree(txq->tx_buf);
txq->tx_buf = NULL;
- if (!txq->buf_stack.bufs)
+ if (!idpf_queue_has(FLOW_SCH_EN, txq))
return;
- for (i = 0; i < txq->buf_stack.size; i++)
- kfree(txq->buf_stack.bufs[i]);
+ buf_stack = &txq->stash->buf_stack;
+ if (!buf_stack->bufs)
+ return;
- kfree(txq->buf_stack.bufs);
- txq->buf_stack.bufs = NULL;
+ for (i = 0; i < buf_stack->size; i++)
+ kfree(buf_stack->bufs[i]);
+
+ kfree(buf_stack->bufs);
+ buf_stack->bufs = NULL;
}
/**
* idpf_tx_desc_rel - Free Tx resources per queue
* @txq: Tx descriptor ring for a specific queue
- * @bufq: buffer q or completion q
*
* Free all transmit software resources
*/
-static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq)
+static void idpf_tx_desc_rel(struct idpf_tx_queue *txq)
{
- if (bufq)
- idpf_tx_buf_rel_all(txq);
+ idpf_tx_buf_rel_all(txq);
if (!txq->desc_ring)
return;
dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma);
txq->desc_ring = NULL;
- txq->next_to_alloc = 0;
txq->next_to_use = 0;
txq->next_to_clean = 0;
}
/**
+ * idpf_compl_desc_rel - Free completion resources per queue
+ * @complq: completion queue
+ *
+ * Free all completion software resources.
+ */
+static void idpf_compl_desc_rel(struct idpf_compl_queue *complq)
+{
+ if (!complq->comp)
+ return;
+
+ dma_free_coherent(complq->netdev->dev.parent, complq->size,
+ complq->comp, complq->dma);
+ complq->comp = NULL;
+ complq->next_to_use = 0;
+ complq->next_to_clean = 0;
+}
+
+/**
* idpf_tx_desc_rel_all - Free Tx Resources for All Queues
* @vport: virtual port structure
*
@@ -150,10 +176,10 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
for (j = 0; j < txq_grp->num_txq; j++)
- idpf_tx_desc_rel(txq_grp->txqs[j], true);
+ idpf_tx_desc_rel(txq_grp->txqs[j]);
if (idpf_is_queue_model_split(vport->txq_model))
- idpf_tx_desc_rel(txq_grp->complq, false);
+ idpf_compl_desc_rel(txq_grp->complq);
}
}
@@ -163,8 +189,9 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport)
*
* Returns 0 on success, negative on failure
*/
-static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
+static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q)
{
+ struct idpf_buf_lifo *buf_stack;
int buf_size;
int i;
@@ -180,22 +207,26 @@ static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
for (i = 0; i < tx_q->desc_count; i++)
tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
+ if (!idpf_queue_has(FLOW_SCH_EN, tx_q))
+ return 0;
+
+ buf_stack = &tx_q->stash->buf_stack;
+
/* Initialize tx buf stack for out-of-order completions if
* flow scheduling offload is enabled
*/
- tx_q->buf_stack.bufs =
- kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *),
- GFP_KERNEL);
- if (!tx_q->buf_stack.bufs)
+ buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs),
+ GFP_KERNEL);
+ if (!buf_stack->bufs)
return -ENOMEM;
- tx_q->buf_stack.size = tx_q->desc_count;
- tx_q->buf_stack.top = tx_q->desc_count;
+ buf_stack->size = tx_q->desc_count;
+ buf_stack->top = tx_q->desc_count;
for (i = 0; i < tx_q->desc_count; i++) {
- tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]),
- GFP_KERNEL);
- if (!tx_q->buf_stack.bufs[i])
+ buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]),
+ GFP_KERNEL);
+ if (!buf_stack->bufs[i])
return -ENOMEM;
}
@@ -204,28 +235,22 @@ static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q)
/**
* idpf_tx_desc_alloc - Allocate the Tx descriptors
+ * @vport: vport to allocate resources for
* @tx_q: the tx ring to set up
- * @bufq: buffer or completion queue
*
* Returns 0 on success, negative on failure
*/
-static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
+static int idpf_tx_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_tx_queue *tx_q)
{
struct device *dev = tx_q->dev;
- u32 desc_sz;
int err;
- if (bufq) {
- err = idpf_tx_buf_alloc_all(tx_q);
- if (err)
- goto err_alloc;
-
- desc_sz = sizeof(struct idpf_base_tx_desc);
- } else {
- desc_sz = sizeof(struct idpf_splitq_tx_compl_desc);
- }
+ err = idpf_tx_buf_alloc_all(tx_q);
+ if (err)
+ goto err_alloc;
- tx_q->size = tx_q->desc_count * desc_sz;
+ tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx);
/* Allocate descriptors also round up to nearest 4K */
tx_q->size = ALIGN(tx_q->size, 4096);
@@ -238,20 +263,44 @@ static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq)
goto err_alloc;
}
- tx_q->next_to_alloc = 0;
tx_q->next_to_use = 0;
tx_q->next_to_clean = 0;
- set_bit(__IDPF_Q_GEN_CHK, tx_q->flags);
+ idpf_queue_set(GEN_CHK, tx_q);
return 0;
err_alloc:
- idpf_tx_desc_rel(tx_q, bufq);
+ idpf_tx_desc_rel(tx_q);
return err;
}
/**
+ * idpf_compl_desc_alloc - allocate completion descriptors
+ * @vport: vport to allocate resources for
+ * @complq: completion queue to set up
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_compl_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_compl_queue *complq)
+{
+ complq->size = array_size(complq->desc_count, sizeof(*complq->comp));
+
+ complq->comp = dma_alloc_coherent(complq->netdev->dev.parent,
+ complq->size, &complq->dma,
+ GFP_KERNEL);
+ if (!complq->comp)
+ return -ENOMEM;
+
+ complq->next_to_use = 0;
+ complq->next_to_clean = 0;
+ idpf_queue_set(GEN_CHK, complq);
+
+ return 0;
+}
+
+/**
* idpf_tx_desc_alloc_all - allocate all queues Tx resources
* @vport: virtual port private structure
*
@@ -259,7 +308,6 @@ err_alloc:
*/
static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
{
- struct device *dev = &vport->adapter->pdev->dev;
int err = 0;
int i, j;
@@ -268,13 +316,14 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
*/
for (i = 0; i < vport->num_txq_grp; i++) {
for (j = 0; j < vport->txq_grps[i].num_txq; j++) {
- struct idpf_queue *txq = vport->txq_grps[i].txqs[j];
+ struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j];
u8 gen_bits = 0;
u16 bufidx_mask;
- err = idpf_tx_desc_alloc(txq, true);
+ err = idpf_tx_desc_alloc(vport, txq);
if (err) {
- dev_err(dev, "Allocation for Tx Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Allocation for Tx Queue %u failed\n",
i);
goto err_out;
}
@@ -312,9 +361,10 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport)
continue;
/* Setup completion queues */
- err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false);
+ err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq);
if (err) {
- dev_err(dev, "Allocation for Tx Completion Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Allocation for Tx Completion Queue %u failed\n",
i);
goto err_out;
}
@@ -329,70 +379,97 @@ err_out:
/**
* idpf_rx_page_rel - Release an rx buffer page
- * @rxq: the queue that owns the buffer
* @rx_buf: the buffer to free
*/
-static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf)
+static void idpf_rx_page_rel(struct libeth_fqe *rx_buf)
{
if (unlikely(!rx_buf->page))
return;
- page_pool_put_full_page(rxq->pp, rx_buf->page, false);
+ page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false);
rx_buf->page = NULL;
- rx_buf->page_offset = 0;
+ rx_buf->offset = 0;
}
/**
* idpf_rx_hdr_buf_rel_all - Release header buffer memory
- * @rxq: queue to use
+ * @bufq: queue to use
*/
-static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq)
+static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq)
{
- struct idpf_adapter *adapter = rxq->vport->adapter;
+ struct libeth_fq fq = {
+ .fqes = bufq->hdr_buf,
+ .pp = bufq->hdr_pp,
+ };
+
+ for (u32 i = 0; i < bufq->desc_count; i++)
+ idpf_rx_page_rel(&bufq->hdr_buf[i]);
- dma_free_coherent(&adapter->pdev->dev,
- rxq->desc_count * IDPF_HDR_BUF_SIZE,
- rxq->rx_buf.hdr_buf_va,
- rxq->rx_buf.hdr_buf_pa);
- rxq->rx_buf.hdr_buf_va = NULL;
+ libeth_rx_fq_destroy(&fq);
+ bufq->hdr_buf = NULL;
+ bufq->hdr_pp = NULL;
}
/**
- * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue
- * @rxq: queue to be cleaned
+ * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue
+ * @bufq: queue to be cleaned
*/
-static void idpf_rx_buf_rel_all(struct idpf_queue *rxq)
+static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq)
{
- u16 i;
+ struct libeth_fq fq = {
+ .fqes = bufq->buf,
+ .pp = bufq->pp,
+ };
/* queue already cleared, nothing to do */
- if (!rxq->rx_buf.buf)
+ if (!bufq->buf)
return;
/* Free all the bufs allocated and given to hw on Rx queue */
- for (i = 0; i < rxq->desc_count; i++)
- idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]);
+ for (u32 i = 0; i < bufq->desc_count; i++)
+ idpf_rx_page_rel(&bufq->buf[i]);
- if (rxq->rx_hsplit_en)
- idpf_rx_hdr_buf_rel_all(rxq);
+ if (idpf_queue_has(HSPLIT_EN, bufq))
+ idpf_rx_hdr_buf_rel_all(bufq);
- page_pool_destroy(rxq->pp);
- rxq->pp = NULL;
+ libeth_rx_fq_destroy(&fq);
+ bufq->buf = NULL;
+ bufq->pp = NULL;
+}
+
+/**
+ * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue
+ * @rxq: queue to be cleaned
+ */
+static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq)
+{
+ struct libeth_fq fq = {
+ .fqes = rxq->rx_buf,
+ .pp = rxq->pp,
+ };
+
+ if (!rxq->rx_buf)
+ return;
+
+ for (u32 i = 0; i < rxq->desc_count; i++)
+ idpf_rx_page_rel(&rxq->rx_buf[i]);
- kfree(rxq->rx_buf.buf);
- rxq->rx_buf.buf = NULL;
+ libeth_rx_fq_destroy(&fq);
+ rxq->rx_buf = NULL;
+ rxq->pp = NULL;
}
/**
* idpf_rx_desc_rel - Free a specific Rx q resources
* @rxq: queue to clean the resources from
- * @bufq: buffer q or completion q
- * @q_model: single or split q model
+ * @dev: device to free DMA memory
+ * @model: single or split queue model
*
* Free a specific rx queue resources
*/
-static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
+static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev,
+ u32 model)
{
if (!rxq)
return;
@@ -402,7 +479,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
rxq->skb = NULL;
}
- if (bufq || !idpf_is_queue_model_split(q_model))
+ if (!idpf_is_queue_model_split(model))
idpf_rx_buf_rel_all(rxq);
rxq->next_to_alloc = 0;
@@ -411,11 +488,35 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
if (!rxq->desc_ring)
return;
- dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma);
+ dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma);
rxq->desc_ring = NULL;
}
/**
+ * idpf_rx_desc_rel_bufq - free buffer queue resources
+ * @bufq: buffer queue to clean the resources from
+ * @dev: device to free DMA memory
+ */
+static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq,
+ struct device *dev)
+{
+ if (!bufq)
+ return;
+
+ idpf_rx_buf_rel_bufq(bufq);
+
+ bufq->next_to_alloc = 0;
+ bufq->next_to_clean = 0;
+ bufq->next_to_use = 0;
+
+ if (!bufq->split_buf)
+ return;
+
+ dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma);
+ bufq->split_buf = NULL;
+}
+
+/**
* idpf_rx_desc_rel_all - Free Rx Resources for All Queues
* @vport: virtual port structure
*
@@ -423,6 +524,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model)
*/
static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
{
+ struct device *dev = &vport->adapter->pdev->dev;
struct idpf_rxq_group *rx_qgrp;
u16 num_rxq;
int i, j;
@@ -435,15 +537,15 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
if (!idpf_is_queue_model_split(vport->rxq_model)) {
for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
- idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j],
- false, vport->rxq_model);
+ idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev,
+ VIRTCHNL2_QUEUE_MODEL_SINGLE);
continue;
}
num_rxq = rx_qgrp->splitq.num_rxq_sets;
for (j = 0; j < num_rxq; j++)
idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq,
- false, vport->rxq_model);
+ dev, VIRTCHNL2_QUEUE_MODEL_SPLIT);
if (!rx_qgrp->splitq.bufq_sets)
continue;
@@ -452,45 +554,50 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport)
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
- idpf_rx_desc_rel(&bufq_set->bufq, true,
- vport->rxq_model);
+ idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev);
}
}
}
/**
* idpf_rx_buf_hw_update - Store the new tail and head values
- * @rxq: queue to bump
+ * @bufq: queue to bump
* @val: new head index
*/
-void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val)
+static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val)
{
- rxq->next_to_use = val;
+ bufq->next_to_use = val;
- if (unlikely(!rxq->tail))
+ if (unlikely(!bufq->tail))
return;
/* writel has an implicit memory barrier */
- writel(val, rxq->tail);
+ writel(val, bufq->tail);
}
/**
* idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers
- * @rxq: ring to use
+ * @bufq: ring to use
*
* Returns 0 on success, negative on failure.
*/
-static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq)
+static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq)
{
- struct idpf_adapter *adapter = rxq->vport->adapter;
-
- rxq->rx_buf.hdr_buf_va =
- dma_alloc_coherent(&adapter->pdev->dev,
- IDPF_HDR_BUF_SIZE * rxq->desc_count,
- &rxq->rx_buf.hdr_buf_pa,
- GFP_KERNEL);
- if (!rxq->rx_buf.hdr_buf_va)
- return -ENOMEM;
+ struct libeth_fq fq = {
+ .count = bufq->desc_count,
+ .type = LIBETH_FQE_HDR,
+ .nid = idpf_q_vector_to_mem(bufq->q_vector),
+ };
+ int ret;
+
+ ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
+ if (ret)
+ return ret;
+
+ bufq->hdr_pp = fq.pp;
+ bufq->hdr_buf = fq.fqes;
+ bufq->hdr_truesize = fq.truesize;
+ bufq->rx_hbuf_size = fq.buf_len;
return 0;
}
@@ -502,19 +609,20 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq)
*/
static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
{
- u16 nta = refillq->next_to_alloc;
+ u32 nta = refillq->next_to_use;
/* store the buffer ID and the SW maintained GEN bit to the refillq */
refillq->ring[nta] =
FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) |
FIELD_PREP(IDPF_RX_BI_GEN_M,
- test_bit(__IDPF_Q_GEN_CHK, refillq->flags));
+ idpf_queue_has(GEN_CHK, refillq));
if (unlikely(++nta == refillq->desc_count)) {
nta = 0;
- change_bit(__IDPF_Q_GEN_CHK, refillq->flags);
+ idpf_queue_change(GEN_CHK, refillq);
}
- refillq->next_to_alloc = nta;
+
+ refillq->next_to_use = nta;
}
/**
@@ -524,24 +632,35 @@ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id)
*
* Returns false if buffer could not be allocated, true otherwise.
*/
-static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
+static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id)
{
struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL;
+ struct libeth_fq_fp fq = {
+ .count = bufq->desc_count,
+ };
u16 nta = bufq->next_to_alloc;
- struct idpf_rx_buf *buf;
dma_addr_t addr;
- splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta);
- buf = &bufq->rx_buf.buf[buf_id];
+ splitq_rx_desc = &bufq->split_buf[nta];
+
+ if (idpf_queue_has(HSPLIT_EN, bufq)) {
+ fq.pp = bufq->hdr_pp;
+ fq.fqes = bufq->hdr_buf;
+ fq.truesize = bufq->hdr_truesize;
- if (bufq->rx_hsplit_en) {
- splitq_rx_desc->hdr_addr =
- cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
- (u32)buf_id * IDPF_HDR_BUF_SIZE);
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
+ return false;
+
+ splitq_rx_desc->hdr_addr = cpu_to_le64(addr);
}
- addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
- if (unlikely(addr == DMA_MAPPING_ERROR))
+ fq.pp = bufq->pp;
+ fq.fqes = bufq->buf;
+ fq.truesize = bufq->truesize;
+
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
return false;
splitq_rx_desc->pkt_addr = cpu_to_le64(addr);
@@ -562,7 +681,8 @@ static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id)
*
* Returns true if @working_set bufs were posted successfully, false otherwise.
*/
-static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set)
+static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq,
+ u16 working_set)
{
int i;
@@ -571,95 +691,114 @@ static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set)
return false;
}
- idpf_rx_buf_hw_update(bufq,
- bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1));
+ idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc,
+ IDPF_RX_BUF_STRIDE));
return true;
}
/**
- * idpf_rx_create_page_pool - Create a page pool
- * @rxbufq: RX queue to create page pool for
+ * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources
+ * @rxq: queue for which the buffers are allocated
*
- * Returns &page_pool on success, casted -errno on failure
+ * Return: 0 on success, -ENOMEM on failure.
*/
-static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq)
+static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq)
{
- struct page_pool_params pp = {
- .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
- .order = 0,
- .pool_size = rxbufq->desc_count,
- .nid = NUMA_NO_NODE,
- .dev = rxbufq->vport->netdev->dev.parent,
- .max_len = PAGE_SIZE,
- .dma_dir = DMA_FROM_DEVICE,
- .offset = 0,
+ if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1))
+ goto err;
+
+ return 0;
+
+err:
+ idpf_rx_buf_rel_all(rxq);
+
+ return -ENOMEM;
+}
+
+/**
+ * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs
+ * @rxq: buffer queue to create page pool for
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
+{
+ struct libeth_fq fq = {
+ .count = rxq->desc_count,
+ .type = LIBETH_FQE_MTU,
+ .nid = idpf_q_vector_to_mem(rxq->q_vector),
};
+ int ret;
+
+ ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi);
+ if (ret)
+ return ret;
+
+ rxq->pp = fq.pp;
+ rxq->rx_buf = fq.fqes;
+ rxq->truesize = fq.truesize;
+ rxq->rx_buf_size = fq.buf_len;
- return page_pool_create(&pp);
+ return idpf_rx_buf_alloc_singleq(rxq);
}
/**
* idpf_rx_buf_alloc_all - Allocate memory for all buffer resources
- * @rxbufq: queue for which the buffers are allocated; equivalent to
- * rxq when operating in singleq mode
+ * @rxbufq: queue for which the buffers are allocated
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq)
+static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq)
{
int err = 0;
- /* Allocate book keeping buffers */
- rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count,
- sizeof(struct idpf_rx_buf), GFP_KERNEL);
- if (!rxbufq->rx_buf.buf) {
- err = -ENOMEM;
- goto rx_buf_alloc_all_out;
- }
-
- if (rxbufq->rx_hsplit_en) {
+ if (idpf_queue_has(HSPLIT_EN, rxbufq)) {
err = idpf_rx_hdr_buf_alloc_all(rxbufq);
if (err)
goto rx_buf_alloc_all_out;
}
/* Allocate buffers to be given to HW. */
- if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) {
- int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq);
-
- if (!idpf_rx_post_init_bufs(rxbufq, working_set))
- err = -ENOMEM;
- } else {
- if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq,
- rxbufq->desc_count - 1))
- err = -ENOMEM;
- }
+ if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq)))
+ err = -ENOMEM;
rx_buf_alloc_all_out:
if (err)
- idpf_rx_buf_rel_all(rxbufq);
+ idpf_rx_buf_rel_bufq(rxbufq);
return err;
}
/**
* idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW
- * @rxbufq: RX queue to create page pool for
+ * @bufq: buffer queue to create page pool for
+ * @type: type of Rx buffers to allocate
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_bufs_init(struct idpf_queue *rxbufq)
+static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
+ enum libeth_fqe_type type)
{
- struct page_pool *pool;
+ struct libeth_fq fq = {
+ .truesize = bufq->truesize,
+ .count = bufq->desc_count,
+ .type = type,
+ .hsplit = idpf_queue_has(HSPLIT_EN, bufq),
+ .nid = idpf_q_vector_to_mem(bufq->q_vector),
+ };
+ int ret;
- pool = idpf_rx_create_page_pool(rxbufq);
- if (IS_ERR(pool))
- return PTR_ERR(pool);
+ ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi);
+ if (ret)
+ return ret;
- rxbufq->pp = pool;
+ bufq->pp = fq.pp;
+ bufq->buf = fq.fqes;
+ bufq->truesize = fq.truesize;
+ bufq->rx_buf_size = fq.buf_len;
- return idpf_rx_buf_alloc_all(rxbufq);
+ return idpf_rx_buf_alloc_all(bufq);
}
/**
@@ -670,20 +809,22 @@ static int idpf_rx_bufs_init(struct idpf_queue *rxbufq)
*/
int idpf_rx_bufs_init_all(struct idpf_vport *vport)
{
- struct idpf_rxq_group *rx_qgrp;
- struct idpf_queue *q;
+ bool split = idpf_is_queue_model_split(vport->rxq_model);
int i, j, err;
for (i = 0; i < vport->num_rxq_grp; i++) {
- rx_qgrp = &vport->rxq_grps[i];
+ struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
+ u32 truesize = 0;
/* Allocate bufs for the rxq itself in singleq */
- if (!idpf_is_queue_model_split(vport->rxq_model)) {
+ if (!split) {
int num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+
q = rx_qgrp->singleq.rxqs[j];
- err = idpf_rx_bufs_init(q);
+ err = idpf_rx_bufs_init_singleq(q);
if (err)
return err;
}
@@ -693,10 +834,19 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
/* Otherwise, allocate bufs for the buffer queues */
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ enum libeth_fqe_type type;
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- err = idpf_rx_bufs_init(q);
+ q->truesize = truesize;
+
+ type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU;
+
+ err = idpf_rx_bufs_init(q, type);
if (err)
return err;
+
+ truesize = q->truesize >> 1;
}
}
@@ -705,22 +855,17 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
/**
* idpf_rx_desc_alloc - Allocate queue Rx resources
+ * @vport: vport to allocate resources for
* @rxq: Rx queue for which the resources are setup
- * @bufq: buffer or completion queue
- * @q_model: single or split queue model
*
* Returns 0 on success, negative on failure
*/
-static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
+static int idpf_rx_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_rx_queue *rxq)
{
- struct device *dev = rxq->dev;
+ struct device *dev = &vport->adapter->pdev->dev;
- if (bufq)
- rxq->size = rxq->desc_count *
- sizeof(struct virtchnl2_splitq_rx_buf_desc);
- else
- rxq->size = rxq->desc_count *
- sizeof(union virtchnl2_rx_desc);
+ rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc);
/* Allocate descriptors and also round up to nearest 4K */
rxq->size = ALIGN(rxq->size, 4096);
@@ -735,7 +880,35 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
rxq->next_to_alloc = 0;
rxq->next_to_clean = 0;
rxq->next_to_use = 0;
- set_bit(__IDPF_Q_GEN_CHK, rxq->flags);
+ idpf_queue_set(GEN_CHK, rxq);
+
+ return 0;
+}
+
+/**
+ * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring
+ * @vport: vport to allocate resources for
+ * @bufq: buffer queue for which the resources are set up
+ *
+ * Return: 0 on success, -ENOMEM on failure.
+ */
+static int idpf_bufq_desc_alloc(const struct idpf_vport *vport,
+ struct idpf_buf_queue *bufq)
+{
+ struct device *dev = &vport->adapter->pdev->dev;
+
+ bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf));
+
+ bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma,
+ GFP_KERNEL);
+ if (!bufq->split_buf)
+ return -ENOMEM;
+
+ bufq->next_to_alloc = 0;
+ bufq->next_to_clean = 0;
+ bufq->next_to_use = 0;
+
+ idpf_queue_set(GEN_CHK, bufq);
return 0;
}
@@ -748,9 +921,7 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
*/
static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
{
- struct device *dev = &vport->adapter->pdev->dev;
struct idpf_rxq_group *rx_qgrp;
- struct idpf_queue *q;
int i, j, err;
u16 num_rxq;
@@ -762,13 +933,17 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+
if (idpf_is_queue_model_split(vport->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
- err = idpf_rx_desc_alloc(q, false, vport->rxq_model);
+
+ err = idpf_rx_desc_alloc(vport, q);
if (err) {
- dev_err(dev, "Memory allocation for Rx Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Memory allocation for Rx Queue %u failed\n",
i);
goto err_out;
}
@@ -778,10 +953,14 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport)
continue;
for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- err = idpf_rx_desc_alloc(q, true, vport->rxq_model);
+
+ err = idpf_bufq_desc_alloc(vport, q);
if (err) {
- dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n",
+ pci_err(vport->adapter->pdev,
+ "Memory allocation for Rx Buffer Queue %u failed\n",
i);
goto err_out;
}
@@ -802,11 +981,16 @@ err_out:
*/
static void idpf_txq_group_rel(struct idpf_vport *vport)
{
+ bool split, flow_sch_en;
int i, j;
if (!vport->txq_grps)
return;
+ split = idpf_is_queue_model_split(vport->txq_model);
+ flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
+ VIRTCHNL2_CAP_SPLITQ_QSCHED);
+
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *txq_grp = &vport->txq_grps[i];
@@ -814,8 +998,15 @@ static void idpf_txq_group_rel(struct idpf_vport *vport)
kfree(txq_grp->txqs[j]);
txq_grp->txqs[j] = NULL;
}
+
+ if (!split)
+ continue;
+
kfree(txq_grp->complq);
txq_grp->complq = NULL;
+
+ if (flow_sch_en)
+ kfree(txq_grp->stashes);
}
kfree(vport->txq_grps);
vport->txq_grps = NULL;
@@ -919,7 +1110,7 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport)
{
int i, j, k = 0;
- vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *),
+ vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs),
GFP_KERNEL);
if (!vport->txqs)
@@ -967,17 +1158,11 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport,
/* Adjust number of buffer queues per Rx queue group. */
if (!idpf_is_queue_model_split(vport->rxq_model)) {
vport->num_bufqs_per_qgrp = 0;
- vport->bufq_size[0] = IDPF_RX_BUF_2048;
return;
}
vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP;
- /* Bufq[0] default buffer size is 4K
- * Bufq[1] default buffer size is 2K
- */
- vport->bufq_size[0] = IDPF_RX_BUF_4096;
- vport->bufq_size[1] = IDPF_RX_BUF_2048;
}
/**
@@ -1137,9 +1322,10 @@ static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport,
* @q: rx queue for which descids are set
*
*/
-static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
+static void idpf_rxq_set_descids(const struct idpf_vport *vport,
+ struct idpf_rx_queue *q)
{
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M;
} else {
if (vport->base_rxd)
@@ -1158,20 +1344,22 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q)
*/
static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
{
- bool flow_sch_en;
- int err, i;
+ bool split, flow_sch_en;
+ int i;
vport->txq_grps = kcalloc(vport->num_txq_grp,
sizeof(*vport->txq_grps), GFP_KERNEL);
if (!vport->txq_grps)
return -ENOMEM;
+ split = idpf_is_queue_model_split(vport->txq_model);
flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS,
VIRTCHNL2_CAP_SPLITQ_QSCHED);
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_txq_stash *stashes;
int j;
tx_qgrp->vport = vport;
@@ -1180,45 +1368,62 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
for (j = 0; j < tx_qgrp->num_txq; j++) {
tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]),
GFP_KERNEL);
- if (!tx_qgrp->txqs[j]) {
- err = -ENOMEM;
+ if (!tx_qgrp->txqs[j])
goto err_alloc;
- }
+ }
+
+ if (split && flow_sch_en) {
+ stashes = kcalloc(num_txq, sizeof(*stashes),
+ GFP_KERNEL);
+ if (!stashes)
+ goto err_alloc;
+
+ tx_qgrp->stashes = stashes;
}
for (j = 0; j < tx_qgrp->num_txq; j++) {
- struct idpf_queue *q = tx_qgrp->txqs[j];
+ struct idpf_tx_queue *q = tx_qgrp->txqs[j];
q->dev = &adapter->pdev->dev;
q->desc_count = vport->txq_desc_count;
q->tx_max_bufs = idpf_get_max_tx_bufs(adapter);
q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter);
- q->vport = vport;
+ q->netdev = vport->netdev;
q->txq_grp = tx_qgrp;
- hash_init(q->sched_buf_hash);
- if (flow_sch_en)
- set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags);
+ if (!split) {
+ q->clean_budget = vport->compln_clean_budget;
+ idpf_queue_assign(CRC_EN, q,
+ vport->crc_enable);
+ }
+
+ if (!flow_sch_en)
+ continue;
+
+ if (split) {
+ q->stash = &stashes[j];
+ hash_init(q->stash->sched_buf_hash);
+ }
+
+ idpf_queue_set(FLOW_SCH_EN, q);
}
- if (!idpf_is_queue_model_split(vport->txq_model))
+ if (!split)
continue;
tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP,
sizeof(*tx_qgrp->complq),
GFP_KERNEL);
- if (!tx_qgrp->complq) {
- err = -ENOMEM;
+ if (!tx_qgrp->complq)
goto err_alloc;
- }
- tx_qgrp->complq->dev = &adapter->pdev->dev;
tx_qgrp->complq->desc_count = vport->complq_desc_count;
- tx_qgrp->complq->vport = vport;
tx_qgrp->complq->txq_grp = tx_qgrp;
+ tx_qgrp->complq->netdev = vport->netdev;
+ tx_qgrp->complq->clean_budget = vport->compln_clean_budget;
if (flow_sch_en)
- __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags);
+ idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq);
}
return 0;
@@ -1226,7 +1431,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq)
err_alloc:
idpf_txq_group_rel(vport);
- return err;
+ return -ENOMEM;
}
/**
@@ -1238,8 +1443,6 @@ err_alloc:
*/
static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct idpf_queue *q;
int i, k, err = 0;
bool hs;
@@ -1292,21 +1495,13 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_bufq_set *bufq_set =
&rx_qgrp->splitq.bufq_sets[j];
int swq_size = sizeof(struct idpf_sw_queue);
+ struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- q->dev = &adapter->pdev->dev;
q->desc_count = vport->bufq_desc_count[j];
- q->vport = vport;
- q->rxq_grp = rx_qgrp;
- q->idx = j;
- q->rx_buf_size = vport->bufq_size[j];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
- q->rx_buf_stride = IDPF_RX_BUF_STRIDE;
- if (hs) {
- q->rx_hsplit_en = true;
- q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
- }
+ idpf_queue_assign(HSPLIT_EN, q, hs);
bufq_set->num_refillqs = num_rxq;
bufq_set->refillqs = kcalloc(num_rxq, swq_size,
@@ -1319,13 +1514,12 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
struct idpf_sw_queue *refillq =
&bufq_set->refillqs[k];
- refillq->dev = &vport->adapter->pdev->dev;
refillq->desc_count =
vport->bufq_desc_count[j];
- set_bit(__IDPF_Q_GEN_CHK, refillq->flags);
- set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
+ idpf_queue_set(GEN_CHK, refillq);
+ idpf_queue_set(RFL_GEN_CHK, refillq);
refillq->ring = kcalloc(refillq->desc_count,
- sizeof(u16),
+ sizeof(*refillq->ring),
GFP_KERNEL);
if (!refillq->ring) {
err = -ENOMEM;
@@ -1336,36 +1530,30 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
skip_splitq_rx_init:
for (j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
+
if (!idpf_is_queue_model_split(vport->rxq_model)) {
q = rx_qgrp->singleq.rxqs[j];
goto setup_rxq;
}
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- rx_qgrp->splitq.rxq_sets[j]->refillq0 =
+ rx_qgrp->splitq.rxq_sets[j]->refillq[0] =
&rx_qgrp->splitq.bufq_sets[0].refillqs[j];
if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP)
- rx_qgrp->splitq.rxq_sets[j]->refillq1 =
+ rx_qgrp->splitq.rxq_sets[j]->refillq[1] =
&rx_qgrp->splitq.bufq_sets[1].refillqs[j];
- if (hs) {
- q->rx_hsplit_en = true;
- q->rx_hbuf_size = IDPF_HDR_BUF_SIZE;
- }
+ idpf_queue_assign(HSPLIT_EN, q, hs);
setup_rxq:
- q->dev = &adapter->pdev->dev;
q->desc_count = vport->rxq_desc_count;
- q->vport = vport;
- q->rxq_grp = rx_qgrp;
+ q->rx_ptype_lkup = vport->rx_ptype_lkup;
+ q->netdev = vport->netdev;
+ q->bufq_sets = rx_qgrp->splitq.bufq_sets;
q->idx = (i * num_rxq) + j;
- /* In splitq mode, RXQ buffer size should be
- * set to that of the first buffer queue
- * associated with this RXQ
- */
- q->rx_buf_size = vport->bufq_size[0];
q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK;
q->rx_max_pkt_size = vport->netdev->mtu +
- IDPF_PACKET_HDR_PAD;
+ LIBETH_RX_LL_LEN;
idpf_rxq_set_descids(vport, q);
}
}
@@ -1445,12 +1633,13 @@ err_out:
* idpf_tx_handle_sw_marker - Handle queue marker packet
* @tx_q: tx queue to handle software marker
*/
-static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
+static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q)
{
- struct idpf_vport *vport = tx_q->vport;
+ struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev);
+ struct idpf_vport *vport = priv->vport;
int i;
- clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags);
+ idpf_queue_clear(SW_MARKER, tx_q);
/* Hardware must write marker packets to all queues associated with
* completion queues. So check if all queues received marker packets
*/
@@ -1458,7 +1647,7 @@ static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
/* If we're still waiting on any other TXQ marker completions,
* just return now since we cannot wake up the marker_wq yet.
*/
- if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags))
+ if (idpf_queue_has(SW_MARKER, vport->txqs[i]))
return;
/* Drain complete */
@@ -1474,7 +1663,7 @@ static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
* @cleaned: pointer to stats struct to track cleaned packets/bytes
* @napi_budget: Used to determine if we are in netpoll
*/
-static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q,
+static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q,
struct idpf_tx_buf *tx_buf,
struct idpf_cleaned_stats *cleaned,
int napi_budget)
@@ -1505,7 +1694,8 @@ static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q,
* @cleaned: pointer to stats struct to track cleaned packets/bytes
* @budget: Used to determine if we are in netpoll
*/
-static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
+static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq,
+ u16 compl_tag,
struct idpf_cleaned_stats *cleaned,
int budget)
{
@@ -1513,7 +1703,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
struct hlist_node *tmp_buf;
/* Buffer completion */
- hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf,
+ hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf,
hlist, compl_tag) {
if (unlikely(stash->buf.compl_tag != (int)compl_tag))
continue;
@@ -1530,7 +1720,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
}
/* Push shadow buf back onto stack */
- idpf_buf_lifo_push(&txq->buf_stack, stash);
+ idpf_buf_lifo_push(&txq->stash->buf_stack, stash);
hash_del(&stash->hlist);
}
@@ -1542,7 +1732,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag,
* @txq: Tx queue to clean
* @tx_buf: buffer to store
*/
-static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
+static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq,
struct idpf_tx_buf *tx_buf)
{
struct idpf_tx_stash *stash;
@@ -1551,10 +1741,10 @@ static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
!dma_unmap_len(tx_buf, len)))
return 0;
- stash = idpf_buf_lifo_pop(&txq->buf_stack);
+ stash = idpf_buf_lifo_pop(&txq->stash->buf_stack);
if (unlikely(!stash)) {
net_err_ratelimited("%s: No out-of-order TX buffers left!\n",
- txq->vport->netdev->name);
+ netdev_name(txq->netdev));
return -ENOMEM;
}
@@ -1568,7 +1758,8 @@ static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq,
stash->buf.compl_tag = tx_buf->compl_tag;
/* Add buffer to buf_hash table to be freed later */
- hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag);
+ hash_add(txq->stash->sched_buf_hash, &stash->hlist,
+ stash->buf.compl_tag);
memset(tx_buf, 0, sizeof(struct idpf_tx_buf));
@@ -1584,7 +1775,7 @@ do { \
if (unlikely(!(ntc))) { \
ntc -= (txq)->desc_count; \
buf = (txq)->tx_buf; \
- desc = IDPF_FLEX_TX_DESC(txq, 0); \
+ desc = &(txq)->flex_tx[0]; \
} else { \
(buf)++; \
(desc)++; \
@@ -1607,7 +1798,7 @@ do { \
* and the buffers will be cleaned separately. The stats are not updated from
* this function when using flow-based scheduling.
*/
-static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
+static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end,
int napi_budget,
struct idpf_cleaned_stats *cleaned,
bool descs_only)
@@ -1617,8 +1808,8 @@ static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end,
s16 ntc = tx_q->next_to_clean;
struct idpf_tx_buf *tx_buf;
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc);
- next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end);
+ tx_desc = &tx_q->flex_tx[ntc];
+ next_pending_desc = &tx_q->flex_tx[end];
tx_buf = &tx_q->tx_buf[ntc];
ntc -= tx_q->desc_count;
@@ -1703,7 +1894,7 @@ do { \
* stashed. Returns the byte/segment count for the cleaned packet associated
* this completion tag.
*/
-static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag,
+static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag,
struct idpf_cleaned_stats *cleaned,
int budget)
{
@@ -1772,14 +1963,14 @@ static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag,
*
* Returns bytes/packets cleaned
*/
-static void idpf_tx_handle_rs_completion(struct idpf_queue *txq,
+static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq,
struct idpf_splitq_tx_compl_desc *desc,
struct idpf_cleaned_stats *cleaned,
int budget)
{
u16 compl_tag;
- if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) {
+ if (!idpf_queue_has(FLOW_SCH_EN, txq)) {
u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head);
return idpf_tx_splitq_clean(txq, head, budget, cleaned, false);
@@ -1802,24 +1993,23 @@ static void idpf_tx_handle_rs_completion(struct idpf_queue *txq,
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
+static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget,
int *cleaned)
{
struct idpf_splitq_tx_compl_desc *tx_desc;
- struct idpf_vport *vport = complq->vport;
s16 ntc = complq->next_to_clean;
struct idpf_netdev_priv *np;
unsigned int complq_budget;
bool complq_ok = true;
int i;
- complq_budget = vport->compln_clean_budget;
- tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc);
+ complq_budget = complq->clean_budget;
+ tx_desc = &complq->comp[ntc];
ntc -= complq->desc_count;
do {
struct idpf_cleaned_stats cleaned_stats = { };
- struct idpf_queue *tx_q;
+ struct idpf_tx_queue *tx_q;
int rel_tx_qid;
u16 hw_head;
u8 ctype; /* completion type */
@@ -1828,7 +2018,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
/* if the descriptor isn't done, no work yet to do */
gen = le16_get_bits(tx_desc->qid_comptype_gen,
IDPF_TXD_COMPLQ_GEN_M);
- if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen)
+ if (idpf_queue_has(GEN_CHK, complq) != gen)
break;
/* Find necessary info of TX queue to clean buffers */
@@ -1836,8 +2026,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
IDPF_TXD_COMPLQ_QID_M);
if (rel_tx_qid >= complq->txq_grp->num_txq ||
!complq->txq_grp->txqs[rel_tx_qid]) {
- dev_err(&complq->vport->adapter->pdev->dev,
- "TxQ not found\n");
+ netdev_err(complq->netdev, "TxQ not found\n");
goto fetch_next_desc;
}
tx_q = complq->txq_grp->txqs[rel_tx_qid];
@@ -1860,15 +2049,14 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
idpf_tx_handle_sw_marker(tx_q);
break;
default:
- dev_err(&tx_q->vport->adapter->pdev->dev,
- "Unknown TX completion type: %d\n",
- ctype);
+ netdev_err(tx_q->netdev,
+ "Unknown TX completion type: %d\n", ctype);
goto fetch_next_desc;
}
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets);
- u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes);
+ u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets);
+ u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes);
tx_q->cleaned_pkts += cleaned_stats.packets;
tx_q->cleaned_bytes += cleaned_stats.bytes;
complq->num_completions++;
@@ -1879,8 +2067,8 @@ fetch_next_desc:
ntc++;
if (unlikely(!ntc)) {
ntc -= complq->desc_count;
- tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0);
- change_bit(__IDPF_Q_GEN_CHK, complq->flags);
+ tx_desc = &complq->comp[0];
+ idpf_queue_change(GEN_CHK, complq);
}
prefetch(tx_desc);
@@ -1896,9 +2084,9 @@ fetch_next_desc:
IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq)))
complq_ok = false;
- np = netdev_priv(complq->vport->netdev);
+ np = netdev_priv(complq->netdev);
for (i = 0; i < complq->txq_grp->num_txq; ++i) {
- struct idpf_queue *tx_q = complq->txq_grp->txqs[i];
+ struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i];
struct netdev_queue *nq;
bool dont_wake;
@@ -1909,11 +2097,11 @@ fetch_next_desc:
*cleaned += tx_q->cleaned_pkts;
/* Update BQL */
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) ||
np->state != __IDPF_VPORT_UP ||
- !netif_carrier_ok(tx_q->vport->netdev);
+ !netif_carrier_ok(tx_q->netdev);
/* Check if the TXQ needs to and can be restarted */
__netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes,
IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH,
@@ -1976,7 +2164,7 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc,
*
* Returns 0 if stop is not needed
*/
-int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
+int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size)
{
struct netdev_queue *nq;
@@ -1984,10 +2172,10 @@ int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
return 0;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.q_busy);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size);
}
@@ -1999,7 +2187,7 @@ int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size)
*
* Returns 0 if stop is not needed
*/
-static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
+static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q,
unsigned int descs_needed)
{
if (idpf_tx_maybe_stop_common(tx_q, descs_needed))
@@ -2023,9 +2211,9 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q,
splitq_stop:
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.q_busy);
+ u64_stats_inc(&tx_q->q_stats.q_busy);
u64_stats_update_end(&tx_q->stats_sync);
- netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx);
+ netif_stop_subqueue(tx_q->netdev, tx_q->idx);
return -EBUSY;
}
@@ -2040,12 +2228,12 @@ splitq_stop:
* to do a register write to update our queue status. We know this can only
* mean tail here as HW should be owning head for TX.
*/
-void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
+void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more)
{
struct netdev_queue *nq;
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
tx_q->next_to_use = val;
idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED);
@@ -2069,7 +2257,7 @@ void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
*
* Returns number of data descriptors needed for this skb.
*/
-unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
+unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb)
{
const struct skb_shared_info *shinfo;
@@ -2102,7 +2290,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
count = idpf_size_to_txd_count(skb->len);
u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.tx.linearize);
+ u64_stats_inc(&txq->q_stats.linearize);
u64_stats_update_end(&txq->stats_sync);
}
@@ -2116,11 +2304,11 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
* @first: original first buffer info buffer for packet
* @idx: starting point on ring to unwind
*/
-void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
+void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 idx)
{
u64_stats_update_begin(&txq->stats_sync);
- u64_stats_inc(&txq->q_stats.tx.dma_map_errs);
+ u64_stats_inc(&txq->q_stats.dma_map_errs);
u64_stats_update_end(&txq->stats_sync);
/* clear dma mappings for failed tx_buf map */
@@ -2143,7 +2331,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
* used one additional descriptor for a context
* descriptor. Reset that here.
*/
- tx_desc = IDPF_FLEX_TX_DESC(txq, idx);
+ tx_desc = &txq->flex_tx[idx];
memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc));
if (idx == 0)
idx = txq->desc_count;
@@ -2159,7 +2347,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
* @txq: the tx ring to wrap
* @ntu: ring index to bump
*/
-static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
+static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu)
{
ntu++;
@@ -2181,7 +2369,7 @@ static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu)
* and gets a physical address for each memory location and programs
* it and the length into the transmit flex descriptor.
*/
-static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
+static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q,
struct idpf_tx_splitq_params *params,
struct idpf_tx_buf *first)
{
@@ -2202,7 +2390,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
data_len = skb->data_len;
size = skb_headlen(skb);
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, i);
+ tx_desc = &tx_q->flex_tx[i];
dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE);
@@ -2275,7 +2463,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
i++;
if (i == tx_q->desc_count) {
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->flex_tx[0];
i = 0;
tx_q->compl_tag_cur_gen =
IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
@@ -2320,7 +2508,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
i++;
if (i == tx_q->desc_count) {
- tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0);
+ tx_desc = &tx_q->flex_tx[0];
i = 0;
tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q);
}
@@ -2348,7 +2536,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q,
tx_q->txq_grp->num_completions_pending++;
/* record bytecount for BQL */
- nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx);
+ nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx);
netdev_tx_sent_queue(nq, first->bytecount);
idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more());
@@ -2525,8 +2713,8 @@ static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs)
* E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO
* header, 1 for segment payload, and then 7 for the fragments.
*/
-bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count)
+static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
+ unsigned int count)
{
if (likely(count < max_bufs))
return false;
@@ -2544,7 +2732,7 @@ bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
* ring entry to reflect that this index is a context descriptor
*/
static struct idpf_flex_tx_ctx_desc *
-idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
+idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq)
{
struct idpf_flex_tx_ctx_desc *desc;
int i = txq->next_to_use;
@@ -2553,7 +2741,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG;
/* grab the next descriptor */
- desc = IDPF_FLEX_TX_CTX_DESC(txq, i);
+ desc = &txq->flex_ctx[i];
txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i);
return desc;
@@ -2564,10 +2752,10 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq)
* @tx_q: queue to send buffer on
* @skb: pointer to skb
*/
-netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
+netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb)
{
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.skb_drops);
+ u64_stats_inc(&tx_q->q_stats.skb_drops);
u64_stats_update_end(&tx_q->stats_sync);
idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false);
@@ -2585,7 +2773,7 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb)
* Returns NETDEV_TX_OK if sent, else an error code
*/
static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
- struct idpf_queue *tx_q)
+ struct idpf_tx_queue *tx_q)
{
struct idpf_tx_splitq_params tx_params = { };
struct idpf_tx_buf *first;
@@ -2625,7 +2813,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len;
u64_stats_update_begin(&tx_q->stats_sync);
- u64_stats_inc(&tx_q->q_stats.tx.lso_pkts);
+ u64_stats_inc(&tx_q->q_stats.lso_pkts);
u64_stats_update_end(&tx_q->stats_sync);
}
@@ -2642,7 +2830,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
}
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) {
+ if (idpf_queue_has(FLOW_SCH_EN, tx_q)) {
tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE;
tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP;
/* Set the RE bit to catch any packets that may have not been
@@ -2672,17 +2860,16 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb,
}
/**
- * idpf_tx_splitq_start - Selects the right Tx queue to send buffer
+ * idpf_tx_start - Selects the right Tx queue to send buffer
* @skb: send buffer
* @netdev: network interface device structure
*
* Returns NETDEV_TX_OK if sent, else an error code
*/
-netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
- struct net_device *netdev)
+netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev)
{
struct idpf_vport *vport = idpf_netdev_to_vport(netdev);
- struct idpf_queue *tx_q;
+ struct idpf_tx_queue *tx_q;
if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) {
dev_kfree_skb_any(skb);
@@ -2701,31 +2888,10 @@ netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- return idpf_tx_splitq_frame(skb, tx_q);
-}
-
-/**
- * idpf_ptype_to_htype - get a hash type
- * @decoded: Decoded Rx packet type related fields
- *
- * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by
- * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of
- * Rx desc.
- */
-enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded)
-{
- if (!decoded->known)
- return PKT_HASH_TYPE_NONE;
- if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
- decoded->inner_prot)
- return PKT_HASH_TYPE_L4;
- if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 &&
- decoded->outer_ip)
- return PKT_HASH_TYPE_L3;
- if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2)
- return PKT_HASH_TYPE_L2;
-
- return PKT_HASH_TYPE_NONE;
+ if (idpf_is_queue_model_split(vport->txq_model))
+ return idpf_tx_splitq_frame(skb, tx_q);
+ else
+ return idpf_tx_singleq_frame(skb, tx_q);
}
/**
@@ -2735,20 +2901,21 @@ enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *deco
* @rx_desc: Receive descriptor
* @decoded: Decoded Rx packet type related fields
*/
-static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb,
- struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+static void
+idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct libeth_rx_pt decoded)
{
u32 hash;
- if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH)))
+ if (!libeth_rx_pt_has_hash(rxq->netdev, decoded))
return;
hash = le16_to_cpu(rx_desc->hash1) |
(rx_desc->ff2_mirrid_hash2.hash2 << 16) |
(rx_desc->hash3 << 24);
- skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded));
+ libeth_rx_pt_set_hash(skb, hash, decoded);
}
/**
@@ -2760,92 +2927,83 @@ static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb,
*
* skb->protocol must be set before this function is called
*/
-static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb,
- struct idpf_rx_csum_decoded *csum_bits,
- struct idpf_rx_ptype_decoded *decoded)
+static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ struct idpf_rx_csum_decoded csum_bits,
+ struct libeth_rx_pt decoded)
{
bool ipv4, ipv6;
/* check if Rx checksum is enabled */
- if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM)))
+ if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded))
return;
/* check if HW has decoded the packet and checksum */
- if (!(csum_bits->l3l4p))
+ if (unlikely(!csum_bits.l3l4p))
return;
- ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
- ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
- if (ipv4 && (csum_bits->ipe || csum_bits->eipe))
+ if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe)))
goto checksum_fail;
- if (ipv6 && csum_bits->ipv6exadd)
+ if (unlikely(ipv6 && csum_bits.ipv6exadd))
return;
/* check for L4 errors and handle packets that were not able to be
* checksummed
*/
- if (csum_bits->l4e)
+ if (unlikely(csum_bits.l4e))
goto checksum_fail;
- /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */
- switch (decoded->inner_prot) {
- case IDPF_RX_PTYPE_INNER_PROT_ICMP:
- case IDPF_RX_PTYPE_INNER_PROT_TCP:
- case IDPF_RX_PTYPE_INNER_PROT_UDP:
- if (!csum_bits->raw_csum_inv) {
- u16 csum = csum_bits->raw_csum;
-
- skb->csum = csum_unfold((__force __sum16)~swab16(csum));
- skb->ip_summed = CHECKSUM_COMPLETE;
- } else {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- }
- break;
- case IDPF_RX_PTYPE_INNER_PROT_SCTP:
+ if (csum_bits.raw_csum_inv ||
+ decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- default:
- break;
+ return;
}
+ skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum));
+ skb->ip_summed = CHECKSUM_COMPLETE;
+
return;
checksum_fail:
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hw_csum_err);
+ u64_stats_inc(&rxq->q_stats.hw_csum_err);
u64_stats_update_end(&rxq->stats_sync);
}
/**
* idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor
* @rx_desc: receive descriptor
- * @csum: structure to extract checksum fields
*
+ * Return: parsed checksum status.
**/
-static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
- struct idpf_rx_csum_decoded *csum)
+static struct idpf_rx_csum_decoded
+idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
+ struct idpf_rx_csum_decoded csum = { };
u8 qword0, qword1;
qword0 = rx_desc->status_err0_qw0;
qword1 = rx_desc->status_err0_qw1;
- csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
+ csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M,
+ qword1);
+ csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
qword1);
- csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M,
+ csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
+ qword1);
+ csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
qword1);
- csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M,
- qword1);
- csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M,
- qword1);
- csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
- qword0);
- csum->raw_csum_inv =
+ csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M,
+ qword0);
+ csum.raw_csum_inv =
le16_get_bits(rx_desc->ptype_err_fflags0,
VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M);
- csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
+ csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs);
+
+ return csum;
}
/**
@@ -2860,23 +3018,24 @@ static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_n
* Populate the skb fields with the total number of RSC segments, RSC payload
* length and packet type.
*/
-static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
- struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
- struct idpf_rx_ptype_decoded *decoded)
+static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc,
+ struct libeth_rx_pt decoded)
{
u16 rsc_segments, rsc_seg_len;
bool ipv4, ipv6;
int len;
- if (unlikely(!decoded->outer_ip))
+ if (unlikely(libeth_rx_pt_get_ip_ver(decoded) ==
+ LIBETH_RX_PT_OUTER_L2))
return -EINVAL;
rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen);
if (unlikely(!rsc_seg_len))
return -EINVAL;
- ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4);
- ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6);
+ ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4;
+ ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6;
if (unlikely(!(ipv4 ^ ipv6)))
return -EINVAL;
@@ -2914,7 +3073,7 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
tcp_gro_complete(skb);
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.rsc_pkts);
+ u64_stats_inc(&rxq->q_stats.rsc_pkts);
u64_stats_update_end(&rxq->stats_sync);
return 0;
@@ -2930,35 +3089,31 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb,
* order to populate the hash, checksum, protocol, and
* other fields within the skb.
*/
-static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
- struct sk_buff *skb,
- struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
+static int
+idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb,
+ const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc)
{
- struct idpf_rx_csum_decoded csum_bits = { };
- struct idpf_rx_ptype_decoded decoded;
+ struct idpf_rx_csum_decoded csum_bits;
+ struct libeth_rx_pt decoded;
u16 rx_ptype;
rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0,
VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M);
-
- skb->protocol = eth_type_trans(skb, rxq->vport->netdev);
-
- decoded = rxq->vport->rx_ptype_lkup[rx_ptype];
- /* If we don't know the ptype we can't do anything else with it. Just
- * pass it up the stack as-is.
- */
- if (!decoded.known)
- return 0;
+ decoded = rxq->rx_ptype_lkup[rx_ptype];
/* process RSS/hash */
- idpf_rx_hash(rxq, skb, rx_desc, &decoded);
+ idpf_rx_hash(rxq, skb, rx_desc, decoded);
+
+ skb->protocol = eth_type_trans(skb, rxq->netdev);
if (le16_get_bits(rx_desc->hdrlen_flags,
VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M))
- return idpf_rx_rsc(rxq, skb, rx_desc, &decoded);
+ return idpf_rx_rsc(rxq, skb, rx_desc, decoded);
- idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits);
- idpf_rx_csum(rxq, skb, &csum_bits, &decoded);
+ csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc);
+ idpf_rx_csum(rxq, skb, csum_bits, decoded);
+
+ skb_record_rx_queue(skb, rxq->idx);
return 0;
}
@@ -2976,104 +3131,73 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq,
void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size)
{
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
- rx_buf->page_offset, size, rx_buf->truesize);
+ u32 hr = rx_buf->page->pp->p.offset;
- rx_buf->page = NULL;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
+ rx_buf->offset + hr, size, rx_buf->truesize);
}
/**
- * idpf_rx_construct_skb - Allocate skb and populate it
- * @rxq: Rx descriptor queue
- * @rx_buf: Rx buffer to pull data from
- * @size: the length of the packet
+ * idpf_rx_hsplit_wa - handle header buffer overflows and split errors
+ * @hdr: Rx buffer for the headers
+ * @buf: Rx buffer for the payload
+ * @data_len: number of bytes received to the payload buffer
+ *
+ * When a header buffer overflow occurs or the HW was unable do parse the
+ * packet type to perform header split, the whole frame gets placed to the
+ * payload buffer. We can't build a valid skb around a payload buffer when
+ * the header split is active since it doesn't reserve any head- or tailroom.
+ * In that case, copy either the whole frame when it's short or just the
+ * Ethernet header to the header buffer to be able to build an skb and adjust
+ * the data offset in the payload buffer, IOW emulate the header split.
*
- * This function allocates an skb. It then populates it with the page
- * data from the current receive descriptor, taking care to set up the
- * skb correctly.
+ * Return: number of bytes copied to the header buffer.
*/
-struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
- struct idpf_rx_buf *rx_buf,
- unsigned int size)
+static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr,
+ struct libeth_fqe *buf, u32 data_len)
{
- unsigned int headlen;
- struct sk_buff *skb;
- void *va;
+ u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN;
+ const void *src;
+ void *dst;
- va = page_address(rx_buf->page) + rx_buf->page_offset;
-
- /* prefetch first cache line of first page */
- net_prefetch(va);
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE,
- GFP_ATOMIC);
- if (unlikely(!skb)) {
- idpf_rx_put_page(rx_buf);
-
- return NULL;
- }
-
- skb_record_rx_queue(skb, rxq->idx);
- skb_mark_for_recycle(skb);
-
- /* Determine available headroom for copy */
- headlen = size;
- if (headlen > IDPF_RX_HDR_SIZE)
- headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
-
- /* if we exhaust the linear part then add what is left as a frag */
- size -= headlen;
- if (!size) {
- idpf_rx_put_page(rx_buf);
-
- return skb;
- }
+ if (!libeth_rx_sync_for_cpu(buf, copy))
+ return 0;
- skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen,
- size, rx_buf->truesize);
+ dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset;
+ src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset;
+ memcpy(dst, src, LARGEST_ALIGN(copy));
- /* Since we're giving the page to the stack, clear our reference to it.
- * We'll get a new one during buffer posting.
- */
- rx_buf->page = NULL;
+ buf->offset += copy;
- return skb;
+ return copy;
}
/**
- * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer
- * @rxq: Rx descriptor queue
- * @va: Rx buffer to pull data from
+ * idpf_rx_build_skb - Allocate skb and populate it from header buffer
+ * @buf: Rx buffer to pull data from
* @size: the length of the packet
*
* This function allocates an skb. It then populates it with the page data from
* the current receive descriptor, taking care to set up the skb correctly.
- * This specifically uses a header buffer to start building the skb.
*/
-static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq,
- const void *va,
- unsigned int size)
+struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size)
{
+ u32 hr = buf->page->pp->p.offset;
struct sk_buff *skb;
+ void *va;
+
+ va = page_address(buf->page) + buf->offset;
+ prefetch(va + hr);
- /* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rxq->q_vector->napi, size, GFP_ATOMIC);
+ skb = napi_build_skb(va, buf->truesize);
if (unlikely(!skb))
return NULL;
- skb_record_rx_queue(skb, rxq->idx);
-
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* More than likely, a payload fragment, which will use a page from
- * page_pool will be added to the SKB so mark it for recycle
- * preemptively. And if not, it's inconsequential.
- */
skb_mark_for_recycle(skb);
+ skb_reserve(skb, hr);
+ __skb_put(skb, size);
+
return skb;
}
@@ -3116,31 +3240,27 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de
*
* Returns amount of work completed
*/
-static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
+static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget)
{
int total_rx_bytes = 0, total_rx_pkts = 0;
- struct idpf_queue *rx_bufq = NULL;
+ struct idpf_buf_queue *rx_bufq = NULL;
struct sk_buff *skb = rxq->skb;
u16 ntc = rxq->next_to_clean;
/* Process Rx packets bounded by budget */
while (likely(total_rx_pkts < budget)) {
struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc;
+ struct libeth_fqe *hdr, *rx_buf = NULL;
struct idpf_sw_queue *refillq = NULL;
struct idpf_rxq_set *rxq_set = NULL;
- struct idpf_rx_buf *rx_buf = NULL;
- union virtchnl2_rx_desc *desc;
unsigned int pkt_len = 0;
unsigned int hdr_len = 0;
u16 gen_id, buf_id = 0;
- /* Header buffer overflow only valid for header split */
- bool hbo = false;
int bufq_id;
u8 rxdid;
/* get the Rx desc from Rx queue based on 'next_to_clean' */
- desc = IDPF_RX_DESC(rxq, ntc);
- rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc;
+ rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb;
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc
@@ -3151,7 +3271,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M);
- if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id)
+ if (idpf_queue_has(GEN_CHK, rxq) != gen_id)
break;
rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M,
@@ -3159,7 +3279,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) {
IDPF_RX_BUMP_NTC(rxq, ntc);
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.bad_descs);
+ u64_stats_inc(&rxq->q_stats.bad_descs);
u64_stats_update_end(&rxq->stats_sync);
continue;
}
@@ -3167,71 +3287,79 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget)
pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M);
- hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M,
- rx_desc->status_err0_qw1);
-
- if (unlikely(hbo)) {
- /* If a header buffer overflow, occurs, i.e. header is
- * too large to fit in the header split buffer, HW will
- * put the entire packet, including headers, in the
- * data/payload buffer.
- */
- u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf);
- u64_stats_update_end(&rxq->stats_sync);
- goto bypass_hsplit;
- }
-
- hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
- VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M);
-
-bypass_hsplit:
bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id,
VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M);
rxq_set = container_of(rxq, struct idpf_rxq_set, rxq);
- if (!bufq_id)
- refillq = rxq_set->refillq0;
- else
- refillq = rxq_set->refillq1;
+ refillq = rxq_set->refillq[bufq_id];
/* retrieve buffer from the rxq */
- rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq;
+ rx_bufq = &rxq->bufq_sets[bufq_id].bufq;
buf_id = le16_to_cpu(rx_desc->buf_id);
- rx_buf = &rx_bufq->rx_buf.buf[buf_id];
+ rx_buf = &rx_bufq->buf[buf_id];
- if (hdr_len) {
- const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va +
- (u32)buf_id * IDPF_HDR_BUF_SIZE;
+ if (!rx_bufq->hdr_pp)
+ goto payload;
+
+#define __HBO_BIT VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M
+#define __HDR_LEN_MASK VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M
+ if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT)))
+ /* If a header buffer overflow, occurs, i.e. header is
+ * too large to fit in the header split buffer, HW will
+ * put the entire packet, including headers, in the
+ * data/payload buffer.
+ */
+ hdr_len = le16_get_bits(rx_desc->hdrlen_flags,
+ __HDR_LEN_MASK);
+#undef __HDR_LEN_MASK
+#undef __HBO_BIT
+
+ hdr = &rx_bufq->hdr_buf[buf_id];
+
+ if (unlikely(!hdr_len && !skb)) {
+ hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len);
+ pkt_len -= hdr_len;
- skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len);
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts);
+ u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf);
u64_stats_update_end(&rxq->stats_sync);
}
- if (pkt_len) {
- idpf_rx_sync_for_cpu(rx_buf, pkt_len);
- if (skb)
- idpf_rx_add_frag(rx_buf, skb, pkt_len);
- else
- skb = idpf_rx_construct_skb(rxq, rx_buf,
- pkt_len);
- } else {
- idpf_rx_put_page(rx_buf);
+ if (libeth_rx_sync_for_cpu(hdr, hdr_len)) {
+ skb = idpf_rx_build_skb(hdr, hdr_len);
+ if (!skb)
+ break;
+
+ u64_stats_update_begin(&rxq->stats_sync);
+ u64_stats_inc(&rxq->q_stats.hsplit_pkts);
+ u64_stats_update_end(&rxq->stats_sync);
}
+ hdr->page = NULL;
+
+payload:
+ if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len))
+ goto skip_data;
+
+ if (skb)
+ idpf_rx_add_frag(rx_buf, skb, pkt_len);
+ else
+ skb = idpf_rx_build_skb(rx_buf, pkt_len);
+
/* exit if we failed to retrieve a buffer */
if (!skb)
break;
- idpf_rx_post_buf_refill(refillq, buf_id);
+skip_data:
+ rx_buf->page = NULL;
+ idpf_rx_post_buf_refill(refillq, buf_id);
IDPF_RX_BUMP_NTC(rxq, ntc);
+
/* skip if it is non EOP desc */
- if (!idpf_rx_splitq_is_eop(rx_desc))
+ if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb))
continue;
/* pad skb if needed (to make valid ethernet frame) */
@@ -3251,7 +3379,7 @@ bypass_hsplit:
}
/* send completed skb up the stack */
- napi_gro_receive(&rxq->q_vector->napi, skb);
+ napi_gro_receive(rxq->napi, skb);
skb = NULL;
/* update budget accounting */
@@ -3262,8 +3390,8 @@ bypass_hsplit:
rxq->skb = skb;
u64_stats_update_begin(&rxq->stats_sync);
- u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts);
- u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes);
+ u64_stats_add(&rxq->q_stats.packets, total_rx_pkts);
+ u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes);
u64_stats_update_end(&rxq->stats_sync);
/* guarantee a trip back through this routine if there was a failure */
@@ -3273,34 +3401,41 @@ bypass_hsplit:
/**
* idpf_rx_update_bufq_desc - Update buffer queue descriptor
* @bufq: Pointer to the buffer queue
- * @refill_desc: SW Refill queue descriptor containing buffer ID
+ * @buf_id: buffer ID
* @buf_desc: Buffer queue descriptor
*
* Return 0 on success and negative on failure.
*/
-static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc,
+static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id,
struct virtchnl2_splitq_rx_buf_desc *buf_desc)
{
- struct idpf_rx_buf *buf;
+ struct libeth_fq_fp fq = {
+ .pp = bufq->pp,
+ .fqes = bufq->buf,
+ .truesize = bufq->truesize,
+ .count = bufq->desc_count,
+ };
dma_addr_t addr;
- u16 buf_id;
-
- buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
- buf = &bufq->rx_buf.buf[buf_id];
-
- addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size);
- if (unlikely(addr == DMA_MAPPING_ERROR))
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
return -ENOMEM;
buf_desc->pkt_addr = cpu_to_le64(addr);
buf_desc->qword0.buf_id = cpu_to_le16(buf_id);
- if (!bufq->rx_hsplit_en)
+ if (!idpf_queue_has(HSPLIT_EN, bufq))
return 0;
- buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa +
- (u32)buf_id * IDPF_HDR_BUF_SIZE);
+ fq.pp = bufq->hdr_pp;
+ fq.fqes = bufq->hdr_buf;
+ fq.truesize = bufq->hdr_truesize;
+
+ addr = libeth_rx_alloc(&fq, buf_id);
+ if (addr == DMA_MAPPING_ERROR)
+ return -ENOMEM;
+
+ buf_desc->hdr_addr = cpu_to_le64(addr);
return 0;
}
@@ -3312,38 +3447,37 @@ static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc,
*
* This function takes care of the buffer refill management
*/
-static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
+static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq,
struct idpf_sw_queue *refillq)
{
struct virtchnl2_splitq_rx_buf_desc *buf_desc;
u16 bufq_nta = bufq->next_to_alloc;
u16 ntc = refillq->next_to_clean;
int cleaned = 0;
- u16 gen;
- buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta);
+ buf_desc = &bufq->split_buf[bufq_nta];
/* make sure we stop at ring wrap in the unlikely case ring is full */
while (likely(cleaned < refillq->desc_count)) {
- u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc);
+ u32 buf_id, refill_desc = refillq->ring[ntc];
bool failure;
- gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc);
- if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen)
+ if (idpf_queue_has(RFL_GEN_CHK, refillq) !=
+ !!(refill_desc & IDPF_RX_BI_GEN_M))
break;
- failure = idpf_rx_update_bufq_desc(bufq, refill_desc,
- buf_desc);
+ buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc);
+ failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc);
if (failure)
break;
if (unlikely(++ntc == refillq->desc_count)) {
- change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags);
+ idpf_queue_change(RFL_GEN_CHK, refillq);
ntc = 0;
}
if (unlikely(++bufq_nta == bufq->desc_count)) {
- buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0);
+ buf_desc = &bufq->split_buf[0];
bufq_nta = 0;
} else {
buf_desc++;
@@ -3372,16 +3506,21 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq,
/**
* idpf_rx_clean_refillq_all - Clean all refill queues
* @bufq: buffer queue with refill queues
+ * @nid: ID of the closest NUMA node with memory
*
* Iterates through all refill queues assigned to the buffer queue assigned to
* this vector. Returns true if clean is complete within budget, false
* otherwise.
*/
-static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq)
+static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid)
{
struct idpf_bufq_set *bufq_set;
int i;
+ page_pool_nid_changed(bufq->pp, nid);
+ if (bufq->hdr_pp)
+ page_pool_nid_changed(bufq->hdr_pp, nid);
+
bufq_set = container_of(bufq, struct idpf_bufq_set, bufq);
for (i = 0; i < bufq_set->num_refillqs; i++)
idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]);
@@ -3437,39 +3576,21 @@ static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport)
*/
void idpf_vport_intr_rel(struct idpf_vport *vport)
{
- int i, j, v_idx;
-
- for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
+ for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx];
+ kfree(q_vector->complq);
+ q_vector->complq = NULL;
kfree(q_vector->bufq);
q_vector->bufq = NULL;
kfree(q_vector->tx);
q_vector->tx = NULL;
kfree(q_vector->rx);
q_vector->rx = NULL;
- }
- /* Clean up the mapping of queues to vectors */
- for (i = 0; i < vport->num_rxq_grp; i++) {
- struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
-
- if (idpf_is_queue_model_split(vport->rxq_model))
- for (j = 0; j < rx_qgrp->splitq.num_rxq_sets; j++)
- rx_qgrp->splitq.rxq_sets[j]->rxq.q_vector = NULL;
- else
- for (j = 0; j < rx_qgrp->singleq.num_rxq; j++)
- rx_qgrp->singleq.rxqs[j]->q_vector = NULL;
+ free_cpumask_var(q_vector->affinity_mask);
}
- if (idpf_is_queue_model_split(vport->txq_model))
- for (i = 0; i < vport->num_txq_grp; i++)
- vport->txq_grps[i].complq->q_vector = NULL;
- else
- for (i = 0; i < vport->num_txq_grp; i++)
- for (j = 0; j < vport->txq_grps[i].num_txq; j++)
- vport->txq_grps[i].txqs[j]->q_vector = NULL;
-
kfree(vport->q_vectors);
vport->q_vectors = NULL;
}
@@ -3496,7 +3617,7 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport)
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(irq_num, NULL);
- free_irq(irq_num, q_vector);
+ kfree(free_irq(irq_num, q_vector));
}
}
@@ -3580,13 +3701,13 @@ static void idpf_net_dim(struct idpf_q_vector *q_vector)
goto check_rx_itr;
for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) {
- struct idpf_queue *txq = q_vector->tx[i];
+ struct idpf_tx_queue *txq = q_vector->tx[i];
unsigned int start;
do {
start = u64_stats_fetch_begin(&txq->stats_sync);
- packets += u64_stats_read(&txq->q_stats.tx.packets);
- bytes += u64_stats_read(&txq->q_stats.tx.bytes);
+ packets += u64_stats_read(&txq->q_stats.packets);
+ bytes += u64_stats_read(&txq->q_stats.bytes);
} while (u64_stats_fetch_retry(&txq->stats_sync, start));
}
@@ -3599,13 +3720,13 @@ check_rx_itr:
return;
for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) {
- struct idpf_queue *rxq = q_vector->rx[i];
+ struct idpf_rx_queue *rxq = q_vector->rx[i];
unsigned int start;
do {
start = u64_stats_fetch_begin(&rxq->stats_sync);
- packets += u64_stats_read(&rxq->q_stats.rx.packets);
- bytes += u64_stats_read(&rxq->q_stats.rx.bytes);
+ packets += u64_stats_read(&rxq->q_stats.packets);
+ bytes += u64_stats_read(&rxq->q_stats.bytes);
} while (u64_stats_fetch_retry(&rxq->stats_sync, start));
}
@@ -3637,16 +3758,19 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector)
/**
* idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
* @vport: main vport structure
- * @basename: name for the vector
*/
-static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
+static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
{
struct idpf_adapter *adapter = vport->adapter;
+ const char *drv_name, *if_name, *vec_name;
int vector, err, irq_num, vidx;
- const char *vec_name;
+
+ drv_name = dev_driver_string(&adapter->pdev->dev);
+ if_name = netdev_name(vport->netdev);
for (vector = 0; vector < vport->num_q_vectors; vector++) {
struct idpf_q_vector *q_vector = &vport->q_vectors[vector];
+ char *name;
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
@@ -3660,18 +3784,18 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename)
else
continue;
- q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d",
- basename, vec_name, vidx);
+ name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
+ vec_name, vidx);
err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
- q_vector->name, q_vector);
+ name, q_vector);
if (err) {
netdev_err(vport->netdev,
"Request_irq failed, error: %d\n", err);
goto free_q_irqs;
}
/* assign the mask for this irq */
- irq_set_affinity_hint(irq_num, &q_vector->affinity_mask);
+ irq_set_affinity_hint(irq_num, q_vector->affinity_mask);
}
return 0;
@@ -3680,7 +3804,7 @@ free_q_irqs:
while (--vector >= 0) {
vidx = vport->q_vector_idxs[vector];
irq_num = adapter->msix_entries[vidx].vector;
- free_irq(irq_num, &vport->q_vectors[vector]);
+ kfree(free_irq(irq_num, &vport->q_vectors[vector]));
}
return err;
@@ -3747,9 +3871,9 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
*/
void idpf_vport_intr_deinit(struct idpf_vport *vport)
{
+ idpf_vport_intr_dis_irq_all(vport);
idpf_vport_intr_napi_dis_all(vport);
idpf_vport_intr_napi_del_all(vport);
- idpf_vport_intr_dis_irq_all(vport);
idpf_vport_intr_rel_irq(vport);
}
@@ -3847,16 +3971,17 @@ static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport)
static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec,
int budget, int *cleaned)
{
- u16 num_txq = q_vec->num_txq;
+ u16 num_complq = q_vec->num_complq;
bool clean_complete = true;
int i, budget_per_q;
- if (unlikely(!num_txq))
+ if (unlikely(!num_complq))
return true;
- budget_per_q = DIV_ROUND_UP(budget, num_txq);
- for (i = 0; i < num_txq; i++)
- clean_complete &= idpf_tx_clean_complq(q_vec->tx[i],
+ budget_per_q = DIV_ROUND_UP(budget, num_complq);
+
+ for (i = 0; i < num_complq; i++)
+ clean_complete &= idpf_tx_clean_complq(q_vec->complq[i],
budget_per_q, cleaned);
return clean_complete;
@@ -3877,13 +4002,14 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
bool clean_complete = true;
int pkts_cleaned = 0;
int i, budget_per_q;
+ int nid;
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
*/
budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0;
for (i = 0; i < num_rxq; i++) {
- struct idpf_queue *rxq = q_vec->rx[i];
+ struct idpf_rx_queue *rxq = q_vec->rx[i];
int pkts_cleaned_per_q;
pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q);
@@ -3894,8 +4020,10 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget,
}
*cleaned = pkts_cleaned;
+ nid = numa_mem_id();
+
for (i = 0; i < q_vec->num_bufq; i++)
- idpf_rx_clean_refillq_all(q_vec->bufq[i]);
+ idpf_rx_clean_refillq_all(q_vec->bufq[i], nid);
return clean_complete;
}
@@ -3938,8 +4066,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
* queues virtchnl message, as the interrupts will be disabled after
* that
*/
- if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE,
- q_vector->tx[0]->flags)))
+ if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE,
+ q_vector->tx[0])))
return budget;
else
return work_done;
@@ -3953,27 +4081,28 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
*/
static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
{
+ bool split = idpf_is_queue_model_split(vport->rxq_model);
u16 num_txq_grp = vport->num_txq_grp;
- int i, j, qv_idx, bufq_vidx = 0;
struct idpf_rxq_group *rx_qgrp;
struct idpf_txq_group *tx_qgrp;
- struct idpf_queue *q, *bufq;
- u16 q_index;
+ u32 i, qv_idx, q_index;
for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) {
u16 num_rxq;
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
rx_qgrp = &vport->rxq_grps[i];
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (split)
num_rxq = rx_qgrp->splitq.num_rxq_sets;
else
num_rxq = rx_qgrp->singleq.num_rxq;
- for (j = 0; j < num_rxq; j++) {
- if (qv_idx >= vport->num_q_vectors)
- qv_idx = 0;
+ for (u32 j = 0; j < num_rxq; j++) {
+ struct idpf_rx_queue *q;
- if (idpf_is_queue_model_split(vport->rxq_model))
+ if (split)
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
@@ -3981,52 +4110,53 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport)
q_index = q->q_vector->num_rxq;
q->q_vector->rx[q_index] = q;
q->q_vector->num_rxq++;
- qv_idx++;
+
+ if (split)
+ q->napi = &q->q_vector->napi;
}
- if (idpf_is_queue_model_split(vport->rxq_model)) {
- for (j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ if (split) {
+ for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) {
+ struct idpf_buf_queue *bufq;
+
bufq = &rx_qgrp->splitq.bufq_sets[j].bufq;
- bufq->q_vector = &vport->q_vectors[bufq_vidx];
+ bufq->q_vector = &vport->q_vectors[qv_idx];
q_index = bufq->q_vector->num_bufq;
bufq->q_vector->bufq[q_index] = bufq;
bufq->q_vector->num_bufq++;
}
- if (++bufq_vidx >= vport->num_q_vectors)
- bufq_vidx = 0;
}
+
+ qv_idx++;
}
+ split = idpf_is_queue_model_split(vport->txq_model);
+
for (i = 0, qv_idx = 0; i < num_txq_grp; i++) {
u16 num_txq;
+ if (qv_idx >= vport->num_q_vectors)
+ qv_idx = 0;
+
tx_qgrp = &vport->txq_grps[i];
num_txq = tx_qgrp->num_txq;
- if (idpf_is_queue_model_split(vport->txq_model)) {
- if (qv_idx >= vport->num_q_vectors)
- qv_idx = 0;
+ for (u32 j = 0; j < num_txq; j++) {
+ struct idpf_tx_queue *q;
- q = tx_qgrp->complq;
+ q = tx_qgrp->txqs[j];
q->q_vector = &vport->q_vectors[qv_idx];
- q_index = q->q_vector->num_txq;
- q->q_vector->tx[q_index] = q;
- q->q_vector->num_txq++;
- qv_idx++;
- } else {
- for (j = 0; j < num_txq; j++) {
- if (qv_idx >= vport->num_q_vectors)
- qv_idx = 0;
+ q->q_vector->tx[q->q_vector->num_txq++] = q;
+ }
- q = tx_qgrp->txqs[j];
- q->q_vector = &vport->q_vectors[qv_idx];
- q_index = q->q_vector->num_txq;
- q->q_vector->tx[q_index] = q;
- q->q_vector->num_txq++;
+ if (split) {
+ struct idpf_compl_queue *q = tx_qgrp->complq;
- qv_idx++;
- }
+ q->q_vector = &vport->q_vectors[qv_idx];
+ q->q_vector->complq[q->q_vector->num_complq++] = q;
}
+
+ qv_idx++;
}
}
@@ -4087,7 +4217,7 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport)
/* only set affinity_mask if the CPU is online */
if (cpu_online(v_idx))
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ cpumask_set_cpu(v_idx, q_vector->affinity_mask);
}
}
@@ -4102,18 +4232,22 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
{
u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector;
struct idpf_q_vector *q_vector;
- int v_idx, err;
+ u32 complqs_per_vector, v_idx;
vport->q_vectors = kcalloc(vport->num_q_vectors,
sizeof(struct idpf_q_vector), GFP_KERNEL);
if (!vport->q_vectors)
return -ENOMEM;
- txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors);
- rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors);
+ txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
+ vport->num_q_vectors);
+ rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp,
+ vport->num_q_vectors);
bufqs_per_vector = vport->num_bufqs_per_qgrp *
DIV_ROUND_UP(vport->num_rxq_grp,
vport->num_q_vectors);
+ complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp,
+ vport->num_q_vectors);
for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) {
q_vector = &vport->q_vectors[v_idx];
@@ -4127,32 +4261,33 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC;
q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0;
- q_vector->tx = kcalloc(txqs_per_vector,
- sizeof(struct idpf_queue *),
+ if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL))
+ goto error;
+
+ q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx),
GFP_KERNEL);
- if (!q_vector->tx) {
- err = -ENOMEM;
+ if (!q_vector->tx)
goto error;
- }
- q_vector->rx = kcalloc(rxqs_per_vector,
- sizeof(struct idpf_queue *),
+ q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx),
GFP_KERNEL);
- if (!q_vector->rx) {
- err = -ENOMEM;
+ if (!q_vector->rx)
goto error;
- }
if (!idpf_is_queue_model_split(vport->rxq_model))
continue;
q_vector->bufq = kcalloc(bufqs_per_vector,
- sizeof(struct idpf_queue *),
+ sizeof(*q_vector->bufq),
GFP_KERNEL);
- if (!q_vector->bufq) {
- err = -ENOMEM;
+ if (!q_vector->bufq)
+ goto error;
+
+ q_vector->complq = kcalloc(complqs_per_vector,
+ sizeof(*q_vector->complq),
+ GFP_KERNEL);
+ if (!q_vector->complq)
goto error;
- }
}
return 0;
@@ -4160,7 +4295,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport)
error:
idpf_vport_intr_rel(vport);
- return err;
+ return -ENOMEM;
}
/**
@@ -4171,7 +4306,6 @@ error:
*/
int idpf_vport_intr_init(struct idpf_vport *vport)
{
- char *int_name;
int err;
err = idpf_vport_intr_init_vec_idx(vport);
@@ -4180,31 +4314,29 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
idpf_vport_intr_map_vector_to_qs(vport);
idpf_vport_intr_napi_add_all(vport);
- idpf_vport_intr_napi_ena_all(vport);
err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
if (err)
goto unroll_vectors_alloc;
- int_name = kasprintf(GFP_KERNEL, "%s-%s",
- dev_driver_string(&vport->adapter->pdev->dev),
- vport->netdev->name);
-
- err = idpf_vport_intr_req_irq(vport, int_name);
+ err = idpf_vport_intr_req_irq(vport);
if (err)
goto unroll_vectors_alloc;
- idpf_vport_intr_ena_irq_all(vport);
-
return 0;
unroll_vectors_alloc:
- idpf_vport_intr_napi_dis_all(vport);
idpf_vport_intr_napi_del_all(vport);
return err;
}
+void idpf_vport_intr_ena(struct idpf_vport *vport)
+{
+ idpf_vport_intr_napi_ena_all(vport);
+ idpf_vport_intr_ena_irq_all(vport);
+}
+
/**
* idpf_config_rss - Send virtchnl messages to configure RSS
* @vport: virtual port
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index df76493faa75..6215dbee5546 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -4,10 +4,15 @@
#ifndef _IDPF_TXRX_H_
#define _IDPF_TXRX_H_
-#include <net/page_pool/helpers.h>
+#include <linux/dim.h>
+
+#include <net/libeth/cache.h>
#include <net/tcp.h>
#include <net/netdev_queues.h>
+#include "idpf_lan_txrx.h"
+#include "virtchnl2_lan_desc.h"
+
#define IDPF_LARGE_MAX_Q 256
#define IDPF_MAX_Q 16
#define IDPF_MIN_Q 2
@@ -81,7 +86,7 @@
do { \
if (unlikely(++(ntc) == (rxq)->desc_count)) { \
ntc = 0; \
- change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \
+ idpf_queue_change(GEN_CHK, rxq); \
} \
} while (0)
@@ -91,16 +96,10 @@ do { \
idx = 0; \
} while (0)
-#define IDPF_RX_HDR_SIZE 256
-#define IDPF_RX_BUF_2048 2048
-#define IDPF_RX_BUF_4096 4096
#define IDPF_RX_BUF_STRIDE 32
#define IDPF_RX_BUF_POST_STRIDE 16
#define IDPF_LOW_WATERMARK 64
-/* Size of header buffer specifically for header split */
-#define IDPF_HDR_BUF_SIZE 256
-#define IDPF_PACKET_HDR_PAD \
- (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2)
+
#define IDPF_TX_TSO_MIN_MSS 88
/* Minimum number of descriptors between 2 descriptors with the RE bit set;
@@ -108,36 +107,17 @@ do { \
*/
#define IDPF_TX_SPLITQ_RE_MIN_GAP 64
-#define IDPF_RX_BI_BUFID_S 0
-#define IDPF_RX_BI_BUFID_M GENMASK(14, 0)
-#define IDPF_RX_BI_GEN_S 15
-#define IDPF_RX_BI_GEN_M BIT(IDPF_RX_BI_GEN_S)
+#define IDPF_RX_BI_GEN_M BIT(16)
+#define IDPF_RX_BI_BUFID_M GENMASK(15, 0)
+
#define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M
#define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M
-#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \
- (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i]))
-#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \
- (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i]))
-#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i])
-
-#define IDPF_BASE_TX_DESC(txq, i) \
- (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i]))
-#define IDPF_BASE_TX_CTX_DESC(txq, i) \
- (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i]))
-#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \
- (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i]))
-
-#define IDPF_FLEX_TX_DESC(txq, i) \
- (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i]))
-#define IDPF_FLEX_TX_CTX_DESC(txq, i) \
- (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i]))
-
#define IDPF_DESC_UNUSED(txq) \
((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \
(txq)->next_to_clean - (txq)->next_to_use - 1)
-#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top)
+#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top)
#define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \
(txq)->desc_count >> 2)
@@ -313,16 +293,7 @@ struct idpf_rx_extracted {
#define IDPF_TX_MAX_DESC_DATA_ALIGNED \
ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE)
-#define IDPF_RX_DMA_ATTR \
- (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
-#define IDPF_RX_DESC(rxq, i) \
- (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i]))
-
-struct idpf_rx_buf {
- struct page *page;
- unsigned int page_offset;
- u16 truesize;
-};
+#define idpf_rx_buf libeth_fqe
#define IDPF_RX_MAX_PTYPE_PROTO_IDS 32
#define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \
@@ -346,72 +317,6 @@ struct idpf_rx_buf {
#define IDPF_RX_MAX_BASE_PTYPE 256
#define IDPF_INVALID_PTYPE_ID 0xFFFF
-/* Packet type non-ip values */
-enum idpf_rx_ptype_l2 {
- IDPF_RX_PTYPE_L2_RESERVED = 0,
- IDPF_RX_PTYPE_L2_MAC_PAY2 = 1,
- IDPF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2,
- IDPF_RX_PTYPE_L2_FIP_PAY2 = 3,
- IDPF_RX_PTYPE_L2_OUI_PAY2 = 4,
- IDPF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5,
- IDPF_RX_PTYPE_L2_LLDP_PAY2 = 6,
- IDPF_RX_PTYPE_L2_ECP_PAY2 = 7,
- IDPF_RX_PTYPE_L2_EVB_PAY2 = 8,
- IDPF_RX_PTYPE_L2_QCN_PAY2 = 9,
- IDPF_RX_PTYPE_L2_EAPOL_PAY2 = 10,
- IDPF_RX_PTYPE_L2_ARP = 11,
-};
-
-enum idpf_rx_ptype_outer_ip {
- IDPF_RX_PTYPE_OUTER_L2 = 0,
- IDPF_RX_PTYPE_OUTER_IP = 1,
-};
-
-#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv) \
- (((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \
- ((ptype)->outer_ip_ver == (ipv)))
-
-enum idpf_rx_ptype_outer_ip_ver {
- IDPF_RX_PTYPE_OUTER_NONE = 0,
- IDPF_RX_PTYPE_OUTER_IPV4 = 1,
- IDPF_RX_PTYPE_OUTER_IPV6 = 2,
-};
-
-enum idpf_rx_ptype_outer_fragmented {
- IDPF_RX_PTYPE_NOT_FRAG = 0,
- IDPF_RX_PTYPE_FRAG = 1,
-};
-
-enum idpf_rx_ptype_tunnel_type {
- IDPF_RX_PTYPE_TUNNEL_NONE = 0,
- IDPF_RX_PTYPE_TUNNEL_IP_IP = 1,
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT = 2,
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3,
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4,
-};
-
-enum idpf_rx_ptype_tunnel_end_prot {
- IDPF_RX_PTYPE_TUNNEL_END_NONE = 0,
- IDPF_RX_PTYPE_TUNNEL_END_IPV4 = 1,
- IDPF_RX_PTYPE_TUNNEL_END_IPV6 = 2,
-};
-
-enum idpf_rx_ptype_inner_prot {
- IDPF_RX_PTYPE_INNER_PROT_NONE = 0,
- IDPF_RX_PTYPE_INNER_PROT_UDP = 1,
- IDPF_RX_PTYPE_INNER_PROT_TCP = 2,
- IDPF_RX_PTYPE_INNER_PROT_SCTP = 3,
- IDPF_RX_PTYPE_INNER_PROT_ICMP = 4,
- IDPF_RX_PTYPE_INNER_PROT_TIMESYNC = 5,
-};
-
-enum idpf_rx_ptype_payload_layer {
- IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0,
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1,
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2,
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3,
-};
-
enum idpf_tunnel_state {
IDPF_PTYPE_TUNNEL_IP = BIT(0),
IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1),
@@ -419,22 +324,9 @@ enum idpf_tunnel_state {
};
struct idpf_ptype_state {
- bool outer_ip;
- bool outer_frag;
- u8 tunnel_state;
-};
-
-struct idpf_rx_ptype_decoded {
- u32 ptype:10;
- u32 known:1;
- u32 outer_ip:1;
- u32 outer_ip_ver:2;
- u32 outer_frag:1;
- u32 tunnel_type:3;
- u32 tunnel_end_prot:2;
- u32 tunnel_end_frag:1;
- u32 inner_prot:4;
- u32 payload_layer:3;
+ bool outer_ip:1;
+ bool outer_frag:1;
+ u8 tunnel_state:6;
};
/**
@@ -450,23 +342,37 @@ struct idpf_rx_ptype_decoded {
* to 1 and knows that reading a gen bit of 1 in any
* descriptor on the initial pass of the ring indicates a
* writeback. It also flips on every ring wrap.
- * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit
- * and RFLGQ_GEN is the SW bit.
+ * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW
+ * bit and Q_RFL_GEN is the SW bit.
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
* @__IDPF_Q_POLL_MODE: Enable poll mode
+ * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode
+ * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq)
* @__IDPF_Q_FLAGS_NBITS: Must be last
*/
enum idpf_queue_flags_t {
__IDPF_Q_GEN_CHK,
- __IDPF_RFLQ_GEN_CHK,
+ __IDPF_Q_RFL_GEN_CHK,
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
__IDPF_Q_POLL_MODE,
+ __IDPF_Q_CRC_EN,
+ __IDPF_Q_HSPLIT_EN,
__IDPF_Q_FLAGS_NBITS,
};
+#define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags)
+
+#define idpf_queue_has_clear(f, q) \
+ __test_and_clear_bit(__IDPF_Q_##f, (q)->flags)
+#define idpf_queue_assign(f, q, v) \
+ __assign_bit(__IDPF_Q_##f, (q)->flags, v)
+
/**
* struct idpf_vec_regs
* @dyn_ctl_reg: Dynamic control interrupt register offset
@@ -507,54 +413,68 @@ struct idpf_intr_reg {
/**
* struct idpf_q_vector
* @vport: Vport back pointer
- * @affinity_mask: CPU affinity mask
- * @napi: napi handler
- * @v_idx: Vector index
- * @intr_reg: See struct idpf_intr_reg
+ * @num_rxq: Number of RX queues
* @num_txq: Number of TX queues
+ * @num_bufq: Number of buffer queues
+ * @num_complq: number of completion queues
+ * @rx: Array of RX queues to service
* @tx: Array of TX queues to service
+ * @bufq: Array of buffer queues to service
+ * @complq: array of completion queues
+ * @intr_reg: See struct idpf_intr_reg
+ * @napi: napi handler
+ * @total_events: Number of interrupts processed
* @tx_dim: Data for TX net_dim algorithm
* @tx_itr_value: TX interrupt throttling rate
* @tx_intr_mode: Dynamic ITR or not
* @tx_itr_idx: TX ITR index
- * @num_rxq: Number of RX queues
- * @rx: Array of RX queues to service
* @rx_dim: Data for RX net_dim algorithm
* @rx_itr_value: RX interrupt throttling rate
* @rx_intr_mode: Dynamic ITR or not
* @rx_itr_idx: RX ITR index
- * @num_bufq: Number of buffer queues
- * @bufq: Array of buffer queues to service
- * @total_events: Number of interrupts processed
- * @name: Queue vector name
+ * @v_idx: Vector index
+ * @affinity_mask: CPU affinity mask
*/
struct idpf_q_vector {
+ __cacheline_group_begin_aligned(read_mostly);
struct idpf_vport *vport;
- cpumask_t affinity_mask;
- struct napi_struct napi;
- u16 v_idx;
- struct idpf_intr_reg intr_reg;
+ u16 num_rxq;
u16 num_txq;
- struct idpf_queue **tx;
+ u16 num_bufq;
+ u16 num_complq;
+ struct idpf_rx_queue **rx;
+ struct idpf_tx_queue **tx;
+ struct idpf_buf_queue **bufq;
+ struct idpf_compl_queue **complq;
+
+ struct idpf_intr_reg intr_reg;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ struct napi_struct napi;
+ u16 total_events;
+
struct dim tx_dim;
u16 tx_itr_value;
bool tx_intr_mode;
u32 tx_itr_idx;
- u16 num_rxq;
- struct idpf_queue **rx;
struct dim rx_dim;
u16 rx_itr_value;
bool rx_intr_mode;
u32 rx_itr_idx;
+ __cacheline_group_end_aligned(read_write);
- u16 num_bufq;
- struct idpf_queue **bufq;
+ __cacheline_group_begin_aligned(cold);
+ u16 v_idx;
- u16 total_events;
- char *name;
+ cpumask_var_t affinity_mask;
+ __cacheline_group_end_aligned(cold);
};
+libeth_cacheline_set_assert(struct idpf_q_vector, 104,
+ 424 + 2 * sizeof(struct dim),
+ 8 + sizeof(cpumask_var_t));
struct idpf_rx_queue_stats {
u64_stats_t packets;
@@ -581,11 +501,6 @@ struct idpf_cleaned_stats {
u32 bytes;
};
-union idpf_queue_stats {
- struct idpf_rx_queue_stats rx;
- struct idpf_tx_queue_stats tx;
-};
-
#define IDPF_ITR_DYNAMIC 1
#define IDPF_ITR_MAX 0x1FE0
#define IDPF_ITR_20K 0x0032
@@ -601,68 +516,123 @@ union idpf_queue_stats {
#define IDPF_DIM_DEFAULT_PROFILE_IX 1
/**
- * struct idpf_queue
- * @dev: Device back pointer for DMA mapping
- * @vport: Back pointer to associated vport
- * @txq_grp: See struct idpf_txq_group
- * @rxq_grp: See struct idpf_rxq_group
- * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean,
- * buffer queue uses this index to determine which group of refill queues
- * to clean.
- * For TX queue, it is used as index to map between TX queue group and
- * hot path TX pointers stored in vport. Used in both singleq/splitq.
- * For RX queue, it is used to index to total RX queue across groups and
+ * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode
+ * @buf_stack: Stack of empty buffers to store buffer info for out of order
+ * buffer completions. See struct idpf_buf_lifo
+ * @sched_buf_hash: Hash table to store buffers
+ */
+struct idpf_txq_stash {
+ struct idpf_buf_lifo buf_stack;
+ DECLARE_HASHTABLE(sched_buf_hash, 12);
+} ____cacheline_aligned;
+
+/**
+ * struct idpf_rx_queue - software structure representing a receive queue
+ * @rx: universal receive descriptor array
+ * @single_buf: buffer descriptor array in singleq
+ * @desc_ring: virtual descriptor ring address
+ * @bufq_sets: Pointer to the array of buffer queues in splitq mode
+ * @napi: NAPI instance corresponding to this queue (splitq)
+ * @rx_buf: See struct &libeth_fqe
+ * @pp: Page pool pointer in singleq mode
+ * @netdev: &net_device corresponding to this queue
+ * @tail: Tail offset. Used for both queue models single and split.
+ * @flags: See enum idpf_queue_flags_t
+ * @idx: For RX queue, it is used to index to total RX queue across groups and
* used for skb reporting.
- * @tail: Tail offset. Used for both queue models single and split. In splitq
- * model relevant only for TX queue and RX queue.
- * @tx_buf: See struct idpf_tx_buf
- * @rx_buf: Struct with RX buffer related members
- * @rx_buf.buf: See struct idpf_rx_buf
- * @rx_buf.hdr_buf_pa: DMA handle
- * @rx_buf.hdr_buf_va: Virtual address
- * @pp: Page pool pointer
- * @skb: Pointer to the skb
- * @q_type: Queue type (TX, RX, TX completion, RX buffer)
- * @q_id: Queue id
* @desc_count: Number of descriptors
- * @next_to_use: Next descriptor to use. Relevant in both split & single txq
- * and bufq.
- * @next_to_clean: Next descriptor to clean. In split queue model, only
- * relevant to TX completion queue and RX queue.
- * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model
- * only relevant to RX queue.
- * @flags: See enum idpf_queue_flags_t
- * @q_stats: See union idpf_queue_stats
+ * @rxdids: Supported RX descriptor ids
+ * @rx_ptype_lkup: LUT of Rx ptypes
+ * @next_to_use: Next descriptor to use
+ * @next_to_clean: Next descriptor to clean
+ * @next_to_alloc: RX buffer to allocate at
+ * @skb: Pointer to the skb
+ * @truesize: data buffer truesize in singleq
* @stats_sync: See struct u64_stats_sync
- * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
- * the TX completion queue, it can be for any TXQ associated
- * with that completion queue. This means we can clean up to
- * N TXQs during a single call to clean the completion queue.
- * cleaned_bytes|pkts tracks the clean stats per TXQ during
- * that single call to clean the completion queue. By doing so,
- * we can update BQL with aggregate cleaned stats for each TXQ
- * only once at the end of the cleaning routine.
- * @cleaned_pkts: Number of packets cleaned for the above said case
- * @rx_hsplit_en: RX headsplit enable
+ * @q_stats: See union idpf_rx_queue_stats
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
+ * @rx_buffer_low_watermark: RX buffer low watermark
* @rx_hbuf_size: Header buffer size
* @rx_buf_size: Buffer size
* @rx_max_pkt_size: RX max packet size
- * @rx_buf_stride: RX buffer stride
- * @rx_buffer_low_watermark: RX buffer low watermark
- * @rxdids: Supported RX descriptor ids
- * @q_vector: Backreference to associated vector
- * @size: Length of descriptor ring in bytes
- * @dma: Physical address of ring
- * @desc_ring: Descriptor ring memory
- * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
+ */
+struct idpf_rx_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ union {
+ union virtchnl2_rx_desc *rx;
+ struct virtchnl2_singleq_rx_buf_desc *single_buf;
+
+ void *desc_ring;
+ };
+ union {
+ struct {
+ struct idpf_bufq_set *bufq_sets;
+ struct napi_struct *napi;
+ };
+ struct {
+ struct libeth_fqe *rx_buf;
+ struct page_pool *pp;
+ };
+ };
+ struct net_device *netdev;
+ void __iomem *tail;
+
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u16 idx;
+ u16 desc_count;
+
+ u32 rxdids;
+ const struct libeth_rx_pt *rx_ptype_lkup;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_to_alloc;
+
+ struct sk_buff *skb;
+ u32 truesize;
+
+ struct u64_stats_sync stats_sync;
+ struct idpf_rx_queue_stats q_stats;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
+ dma_addr_t dma;
+
+ struct idpf_q_vector *q_vector;
+
+ u16 rx_buffer_low_watermark;
+ u16 rx_hbuf_size;
+ u16 rx_buf_size;
+ u16 rx_max_pkt_size;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_rx_queue, 64,
+ 80 + sizeof(struct u64_stats_sync),
+ 32);
+
+/**
+ * struct idpf_tx_queue - software structure representing a transmit queue
+ * @base_tx: base Tx descriptor array
+ * @base_ctx: base Tx context descriptor array
+ * @flex_tx: flex Tx descriptor array
+ * @flex_ctx: flex Tx context descriptor array
+ * @desc_ring: virtual descriptor ring address
+ * @tx_buf: See struct idpf_tx_buf
+ * @txq_grp: See struct idpf_txq_group
+ * @dev: Device back pointer for DMA mapping
+ * @tail: Tail offset. Used for both queue models single and split
+ * @flags: See enum idpf_queue_flags_t
+ * @idx: For TX queue, it is used as index to map between TX queue group and
+ * hot path TX pointers stored in vport. Used in both singleq/splitq.
+ * @desc_count: Number of descriptors
* @tx_min_pkt_len: Min supported packet length
- * @num_completions: Only relevant for TX completion queue. It tracks the
- * number of completions received to compare against the
- * number of completions pending, as accumulated by the
- * TX queues.
- * @buf_stack: Stack of empty buffers to store buffer info for out of order
- * buffer completions. See struct idpf_buf_lifo.
- * @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_gen_s: Completion tag generation bit
* The format of the completion tag will change based on the TXQ
* descriptor ring size so that we can maintain roughly the same level
@@ -683,108 +653,238 @@ union idpf_queue_stats {
* --------------------------------
*
* This gives us 8*8160 = 65280 possible unique values.
+ * @netdev: &net_device corresponding to this queue
+ * @next_to_use: Next descriptor to use
+ * @next_to_clean: Next descriptor to clean
+ * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on
+ * the TX completion queue, it can be for any TXQ associated
+ * with that completion queue. This means we can clean up to
+ * N TXQs during a single call to clean the completion queue.
+ * cleaned_bytes|pkts tracks the clean stats per TXQ during
+ * that single call to clean the completion queue. By doing so,
+ * we can update BQL with aggregate cleaned stats for each TXQ
+ * only once at the end of the cleaning routine.
+ * @clean_budget: singleq only, queue cleaning budget
+ * @cleaned_pkts: Number of packets cleaned for the above said case
+ * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather
+ * @stash: Tx buffer stash for Flow-based scheduling mode
+ * @compl_tag_bufid_m: Completion tag buffer id mask
* @compl_tag_cur_gen: Used to keep track of current completion tag generation
* @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset
- * @sched_buf_hash: Hash table to stores buffers
+ * @stats_sync: See struct u64_stats_sync
+ * @q_stats: See union idpf_tx_queue_stats
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
*/
-struct idpf_queue {
- struct device *dev;
- struct idpf_vport *vport;
+struct idpf_tx_queue {
+ __cacheline_group_begin_aligned(read_mostly);
union {
- struct idpf_txq_group *txq_grp;
- struct idpf_rxq_group *rxq_grp;
+ struct idpf_base_tx_desc *base_tx;
+ struct idpf_base_tx_ctx_desc *base_ctx;
+ union idpf_tx_flex_desc *flex_tx;
+ struct idpf_flex_tx_ctx_desc *flex_ctx;
+
+ void *desc_ring;
};
- u16 idx;
+ struct idpf_tx_buf *tx_buf;
+ struct idpf_txq_group *txq_grp;
+ struct device *dev;
void __iomem *tail;
- union {
- struct idpf_tx_buf *tx_buf;
- struct {
- struct idpf_rx_buf *buf;
- dma_addr_t hdr_buf_pa;
- void *hdr_buf_va;
- } rx_buf;
- };
- struct page_pool *pp;
- struct sk_buff *skb;
- u16 q_type;
- u32 q_id;
+
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u16 idx;
u16 desc_count;
+ u16 tx_min_pkt_len;
+ u16 compl_tag_gen_s;
+
+ struct net_device *netdev;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
u16 next_to_use;
u16 next_to_clean;
- u16 next_to_alloc;
- DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
- union idpf_queue_stats q_stats;
+ union {
+ u32 cleaned_bytes;
+ u32 clean_budget;
+ };
+ u16 cleaned_pkts;
+
+ u16 tx_max_bufs;
+ struct idpf_txq_stash *stash;
+
+ u16 compl_tag_bufid_m;
+ u16 compl_tag_cur_gen;
+ u16 compl_tag_gen_max;
+
struct u64_stats_sync stats_sync;
+ struct idpf_tx_queue_stats q_stats;
+ __cacheline_group_end_aligned(read_write);
- u32 cleaned_bytes;
- u16 cleaned_pkts;
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
+ dma_addr_t dma;
- bool rx_hsplit_en;
- u16 rx_hbuf_size;
- u16 rx_buf_size;
- u16 rx_max_pkt_size;
- u16 rx_buf_stride;
- u8 rx_buffer_low_watermark;
- u64 rxdids;
struct idpf_q_vector *q_vector;
- unsigned int size;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_tx_queue, 64,
+ 88 + sizeof(struct u64_stats_sync),
+ 24);
+
+/**
+ * struct idpf_buf_queue - software structure representing a buffer queue
+ * @split_buf: buffer descriptor array
+ * @hdr_buf: &libeth_fqe for header buffers
+ * @hdr_pp: &page_pool for header buffers
+ * @buf: &libeth_fqe for data buffers
+ * @pp: &page_pool for data buffers
+ * @tail: Tail offset
+ * @flags: See enum idpf_queue_flags_t
+ * @desc_count: Number of descriptors
+ * @next_to_use: Next descriptor to use
+ * @next_to_clean: Next descriptor to clean
+ * @next_to_alloc: RX buffer to allocate at
+ * @hdr_truesize: truesize for buffer headers
+ * @truesize: truesize for data buffers
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
+ * @rx_buffer_low_watermark: RX buffer low watermark
+ * @rx_hbuf_size: Header buffer size
+ * @rx_buf_size: Buffer size
+ */
+struct idpf_buf_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ struct virtchnl2_splitq_rx_buf_desc *split_buf;
+ struct libeth_fqe *hdr_buf;
+ struct page_pool *hdr_pp;
+ struct libeth_fqe *buf;
+ struct page_pool *pp;
+ void __iomem *tail;
+
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u32 desc_count;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+ u32 next_to_alloc;
+
+ u32 hdr_truesize;
+ u32 truesize;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
dma_addr_t dma;
- void *desc_ring;
- u16 tx_max_bufs;
- u8 tx_min_pkt_len;
+ struct idpf_q_vector *q_vector;
- u32 num_completions;
+ u16 rx_buffer_low_watermark;
+ u16 rx_hbuf_size;
+ u16 rx_buf_size;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32);
- struct idpf_buf_lifo buf_stack;
+/**
+ * struct idpf_compl_queue - software structure representing a completion queue
+ * @comp: completion descriptor array
+ * @txq_grp: See struct idpf_txq_group
+ * @flags: See enum idpf_queue_flags_t
+ * @desc_count: Number of descriptors
+ * @clean_budget: queue cleaning budget
+ * @netdev: &net_device corresponding to this queue
+ * @next_to_use: Next descriptor to use. Relevant in both split & single txq
+ * and bufq.
+ * @next_to_clean: Next descriptor to clean
+ * @num_completions: Only relevant for TX completion queue. It tracks the
+ * number of completions received to compare against the
+ * number of completions pending, as accumulated by the
+ * TX queues.
+ * @q_id: Queue id
+ * @size: Length of descriptor ring in bytes
+ * @dma: Physical address of ring
+ * @q_vector: Backreference to associated vector
+ */
+struct idpf_compl_queue {
+ __cacheline_group_begin_aligned(read_mostly);
+ struct idpf_splitq_tx_compl_desc *comp;
+ struct idpf_txq_group *txq_grp;
- u16 compl_tag_bufid_m;
- u16 compl_tag_gen_s;
+ DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
+ u32 desc_count;
- u16 compl_tag_cur_gen;
- u16 compl_tag_gen_max;
+ u32 clean_budget;
+ struct net_device *netdev;
+ __cacheline_group_end_aligned(read_mostly);
- DECLARE_HASHTABLE(sched_buf_hash, 12);
-} ____cacheline_internodealigned_in_smp;
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+
+ u32 num_completions;
+ __cacheline_group_end_aligned(read_write);
+
+ __cacheline_group_begin_aligned(cold);
+ u32 q_id;
+ u32 size;
+ dma_addr_t dma;
+
+ struct idpf_q_vector *q_vector;
+ __cacheline_group_end_aligned(cold);
+};
+libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24);
/**
* struct idpf_sw_queue
- * @next_to_clean: Next descriptor to clean
- * @next_to_alloc: Buffer to allocate at
- * @flags: See enum idpf_queue_flags_t
* @ring: Pointer to the ring
+ * @flags: See enum idpf_queue_flags_t
* @desc_count: Descriptor count
- * @dev: Device back pointer for DMA mapping
+ * @next_to_use: Buffer to allocate at
+ * @next_to_clean: Next descriptor to clean
*
* Software queues are used in splitq mode to manage buffers between rxq
* producer and the bufq consumer. These are required in order to maintain a
* lockless buffer management system and are strictly software only constructs.
*/
struct idpf_sw_queue {
- u16 next_to_clean;
- u16 next_to_alloc;
+ __cacheline_group_begin_aligned(read_mostly);
+ u32 *ring;
+
DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS);
- u16 *ring;
- u16 desc_count;
- struct device *dev;
-} ____cacheline_internodealigned_in_smp;
+ u32 desc_count;
+ __cacheline_group_end_aligned(read_mostly);
+
+ __cacheline_group_begin_aligned(read_write);
+ u32 next_to_use;
+ u32 next_to_clean;
+ __cacheline_group_end_aligned(read_write);
+};
+libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24);
+libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8);
+libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8);
/**
* struct idpf_rxq_set
* @rxq: RX queue
- * @refillq0: Pointer to refill queue 0
- * @refillq1: Pointer to refill queue 1
+ * @refillq: pointers to refill queues
*
* Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs.
* Each rxq needs a refillq to return used buffers back to the respective bufq.
* Bufqs then clean these refillqs for buffers to give to hardware.
*/
struct idpf_rxq_set {
- struct idpf_queue rxq;
- struct idpf_sw_queue *refillq0;
- struct idpf_sw_queue *refillq1;
+ struct idpf_rx_queue rxq;
+ struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP];
};
/**
@@ -803,7 +903,7 @@ struct idpf_rxq_set {
* managed by at most two bufqs (depending on performance configuration).
*/
struct idpf_bufq_set {
- struct idpf_queue bufq;
+ struct idpf_buf_queue bufq;
int num_refillqs;
struct idpf_sw_queue *refillqs;
};
@@ -829,7 +929,7 @@ struct idpf_rxq_group {
union {
struct {
u16 num_rxq;
- struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q];
+ struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q];
} singleq;
struct {
u16 num_rxq_sets;
@@ -844,6 +944,7 @@ struct idpf_rxq_group {
* @vport: Vport back pointer
* @num_txq: Number of TX queues associated
* @txqs: Array of TX queue pointers
+ * @stashes: array of OOO stashes for the queues
* @complq: Associated completion queue pointer, split queue only
* @num_completions_pending: Total number of completions pending for the
* completion queue, acculumated for all TX queues
@@ -857,13 +958,26 @@ struct idpf_txq_group {
struct idpf_vport *vport;
u16 num_txq;
- struct idpf_queue *txqs[IDPF_LARGE_MAX_Q];
+ struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q];
+ struct idpf_txq_stash *stashes;
- struct idpf_queue *complq;
+ struct idpf_compl_queue *complq;
u32 num_completions_pending;
};
+static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector)
+{
+ u32 cpu;
+
+ if (!q_vector)
+ return NUMA_NO_NODE;
+
+ cpu = cpumask_first(q_vector->affinity_mask);
+
+ return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE;
+}
+
/**
* idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag
* @size: transmit request size in bytes
@@ -919,60 +1033,6 @@ static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc,
idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size);
}
-/**
- * idpf_alloc_page - Allocate a new RX buffer from the page pool
- * @pool: page_pool to allocate from
- * @buf: metadata struct to populate with page info
- * @buf_size: 2K or 4K
- *
- * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise.
- */
-static inline dma_addr_t idpf_alloc_page(struct page_pool *pool,
- struct idpf_rx_buf *buf,
- unsigned int buf_size)
-{
- if (buf_size == IDPF_RX_BUF_2048)
- buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset,
- buf_size);
- else
- buf->page = page_pool_dev_alloc_pages(pool);
-
- if (!buf->page)
- return DMA_MAPPING_ERROR;
-
- buf->truesize = buf_size;
-
- return page_pool_get_dma_addr(buf->page) + buf->page_offset +
- pool->p.offset;
-}
-
-/**
- * idpf_rx_put_page - Return RX buffer page to pool
- * @rx_buf: RX buffer metadata struct
- */
-static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf)
-{
- page_pool_put_page(rx_buf->page->pp, rx_buf->page,
- rx_buf->truesize, true);
- rx_buf->page = NULL;
-}
-
-/**
- * idpf_rx_sync_for_cpu - Synchronize DMA buffer
- * @rx_buf: RX buffer metadata struct
- * @len: frame length from descriptor
- */
-static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len)
-{
- struct page *page = rx_buf->page;
- struct page_pool *pp = page->pp;
-
- dma_sync_single_range_for_cpu(pp->p.dev,
- page_pool_get_dma_addr(page),
- rx_buf->page_offset + pp->p.offset, len,
- page_pool_get_dma_dir(pp));
-}
-
int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget);
void idpf_vport_init_num_qs(struct idpf_vport *vport,
struct virtchnl2_create_vport *vport_msg);
@@ -988,35 +1048,28 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport);
void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
void idpf_vport_intr_deinit(struct idpf_vport *vport);
int idpf_vport_intr_init(struct idpf_vport *vport);
-enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
+void idpf_vport_intr_ena(struct idpf_vport *vport);
int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss(struct idpf_vport *vport);
void idpf_deinit_rss(struct idpf_vport *vport);
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb,
unsigned int size);
-struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq,
- struct idpf_rx_buf *rx_buf,
- unsigned int size);
-bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf);
-void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val);
-void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val,
+struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size);
+void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val,
bool xmit_more);
unsigned int idpf_size_to_txd_count(unsigned int size);
-netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb);
-void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb,
+netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb);
+void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb,
struct idpf_tx_buf *first, u16 ring_idx);
-unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq,
+unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq,
struct sk_buff *skb);
-bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs,
- unsigned int count);
-int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size);
+int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size);
void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue);
-netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb,
- struct net_device *netdev);
-netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb,
- struct net_device *netdev);
-bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq,
+netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb,
+ struct idpf_tx_queue *tx_q);
+netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev);
+bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq,
u16 cleaned_count);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index a5f9b7a5effe..70986e12da28 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (C) 2023 Intel Corporation */
+#include <net/libeth/rx.h>
+
#include "idpf.h"
#include "idpf_virtchnl.h"
@@ -750,7 +752,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
int i;
for (i = 0; i < vport->num_txq; i++)
- set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags);
+ idpf_queue_set(SW_MARKER, vport->txqs[i]);
event = wait_event_timeout(vport->sw_marker_wq,
test_and_clear_bit(IDPF_VPORT_SW_MARKER,
@@ -758,7 +760,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
msecs_to_jiffies(500));
for (i = 0; i < vport->num_txq; i++)
- clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+ idpf_queue_clear(POLL_MODE, vport->txqs[i]);
if (event)
return 0;
@@ -1092,7 +1094,6 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
int num_regs, u32 q_type)
{
struct idpf_adapter *adapter = vport->adapter;
- struct idpf_queue *q;
int i, j, k = 0;
switch (q_type) {
@@ -1111,6 +1112,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
u16 num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_regs; j++, k++) {
+ struct idpf_rx_queue *q;
+
q = rx_qgrp->singleq.rxqs[j];
q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]);
@@ -1123,6 +1126,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals,
u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_regs; j++, k++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
q->tail = idpf_get_reg_addr(adapter,
reg_vals[k]);
@@ -1253,12 +1258,12 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT);
vport_msg->vport_index = cpu_to_le16(idx);
- if (adapter->req_tx_splitq)
+ if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
else
vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
- if (adapter->req_rx_splitq)
+ if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ))
vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT);
else
vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE);
@@ -1320,10 +1325,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
vport_msg = adapter->vport_params_recvd[vport->idx];
+ if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) &&
+ (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE ||
+ vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) {
+ pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n");
+ return -EOPNOTSUPP;
+ }
+
rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids);
tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids);
- if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) {
+ if (idpf_is_queue_model_split(vport->rxq_model)) {
if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) {
dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n");
vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
@@ -1333,7 +1345,7 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
vport->base_rxd = true;
}
- if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT)
+ if (!idpf_is_queue_model_split(vport->txq_model))
return 0;
if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) {
@@ -1449,19 +1461,19 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].model =
cpu_to_le16(vport->txq_model);
qi[k].type =
- cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qi[k].ring_len =
cpu_to_le16(tx_qgrp->txqs[j]->desc_count);
qi[k].dma_ring_addr =
cpu_to_le64(tx_qgrp->txqs[j]->dma);
if (idpf_is_queue_model_split(vport->txq_model)) {
- struct idpf_queue *q = tx_qgrp->txqs[j];
+ struct idpf_tx_queue *q = tx_qgrp->txqs[j];
qi[k].tx_compl_queue_id =
cpu_to_le16(tx_qgrp->complq->q_id);
qi[k].relative_queue_id = cpu_to_le16(j);
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags))
+ if (idpf_queue_has(FLOW_SCH_EN, q))
qi[k].sched_mode =
cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW);
else
@@ -1478,11 +1490,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qi[k].model = cpu_to_le16(vport->txq_model);
- qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
+ qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma);
- if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags))
+ if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq))
sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW;
else
sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE;
@@ -1567,17 +1579,18 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
goto setup_rxqs;
for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_queue *bufq =
+ struct idpf_buf_queue *bufq =
&rx_qgrp->splitq.bufq_sets[j].bufq;
qi[k].queue_id = cpu_to_le32(bufq->q_id);
qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(bufq->q_type);
+ qi[k].type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M);
qi[k].ring_len = cpu_to_le16(bufq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(bufq->dma);
qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size);
- qi[k].buffer_notif_stride = bufq->rx_buf_stride;
+ qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE;
qi[k].rx_buffer_low_watermark =
cpu_to_le16(bufq->rx_buffer_low_watermark);
if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
@@ -1591,35 +1604,47 @@ setup_rxqs:
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_queue *rxq;
+ const struct idpf_bufq_set *sets;
+ struct idpf_rx_queue *rxq;
if (!idpf_is_queue_model_split(vport->rxq_model)) {
rxq = rx_qgrp->singleq.rxqs[j];
goto common_qi_fields;
}
+
rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
- qi[k].rx_bufq1_id =
- cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id);
+ sets = rxq->bufq_sets;
+
+ /* In splitq mode, RXQ buffer size should be
+ * set to that of the first buffer queue
+ * associated with this RXQ.
+ */
+ rxq->rx_buf_size = sets[0].bufq.rx_buf_size;
+
+ qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id);
if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) {
qi[k].bufq2_ena = IDPF_BUFQ2_ENA;
qi[k].rx_bufq2_id =
- cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id);
+ cpu_to_le16(sets[1].bufq.q_id);
}
qi[k].rx_buffer_low_watermark =
cpu_to_le16(rxq->rx_buffer_low_watermark);
if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW))
qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC);
-common_qi_fields:
- if (rxq->rx_hsplit_en) {
+ rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size;
+
+ if (idpf_queue_has(HSPLIT_EN, rxq)) {
qi[k].qflags |=
cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT);
qi[k].hdr_buffer_size =
cpu_to_le16(rxq->rx_hbuf_size);
}
+
+common_qi_fields:
qi[k].queue_id = cpu_to_le32(rxq->q_id);
qi[k].model = cpu_to_le16(vport->rxq_model);
- qi[k].type = cpu_to_le32(rxq->q_type);
+ qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
qi[k].ring_len = cpu_to_le16(rxq->desc_count);
qi[k].dma_ring_addr = cpu_to_le64(rxq->dma);
qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size);
@@ -1706,7 +1731,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1720,7 +1745,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
for (i = 0; i < vport->num_txq_grp; i++, k++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type);
+ qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION);
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1741,12 +1766,12 @@ setup_rx:
qc[k].start_queue_id =
cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id);
qc[k].type =
- cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type);
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
} else {
qc[k].start_queue_id =
cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id);
qc[k].type =
- cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type);
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
}
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1761,10 +1786,11 @@ setup_rx:
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) {
- struct idpf_queue *q;
+ const struct idpf_buf_queue *q;
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
- qc[k].type = cpu_to_le32(q->q_type);
+ qc[k].type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER);
qc[k].start_queue_id = cpu_to_le32(q->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
@@ -1849,7 +1875,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
for (j = 0; j < tx_qgrp->num_txq; j++, k++) {
- vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type);
+ vqv[k].queue_type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX);
vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id);
if (idpf_is_queue_model_split(vport->txq_model)) {
@@ -1879,14 +1906,15 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq; j++, k++) {
- struct idpf_queue *rxq;
+ struct idpf_rx_queue *rxq;
if (idpf_is_queue_model_split(vport->rxq_model))
rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
rxq = rx_qgrp->singleq.rxqs[j];
- vqv[k].queue_type = cpu_to_le32(rxq->q_type);
+ vqv[k].queue_type =
+ cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX);
vqv[k].queue_id = cpu_to_le32(rxq->q_id);
vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx);
vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx);
@@ -1975,7 +2003,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
* queues virtchnl message is sent
*/
for (i = 0; i < vport->num_txq; i++)
- set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);
+ idpf_queue_set(POLL_MODE, vport->txqs[i]);
/* schedule the napi to receive all the marker packets */
local_bh_disable();
@@ -2469,39 +2497,52 @@ do_memcpy:
* @frag: fragmentation allowed
*
*/
-static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
+static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype,
struct idpf_ptype_state *pstate,
bool ipv4, bool frag)
{
if (!pstate->outer_ip || !pstate->outer_frag) {
- ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP;
pstate->outer_ip = true;
if (ipv4)
- ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4;
+ ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4;
else
- ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6;
+ ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6;
if (frag) {
- ptype->outer_frag = IDPF_RX_PTYPE_FRAG;
+ ptype->outer_frag = LIBETH_RX_PT_FRAG;
pstate->outer_frag = true;
}
} else {
- ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP;
+ ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP;
pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP;
if (ipv4)
- ptype->tunnel_end_prot =
- IDPF_RX_PTYPE_TUNNEL_END_IPV4;
+ ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4;
else
- ptype->tunnel_end_prot =
- IDPF_RX_PTYPE_TUNNEL_END_IPV6;
+ ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6;
if (frag)
- ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG;
+ ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG;
}
}
+static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype)
+{
+ if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
+ ptype->inner_prot)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4;
+ else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 &&
+ ptype->outer_ip)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3;
+ else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2)
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2;
+ else
+ ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE;
+
+ libeth_rx_pt_gen_hash_type(ptype);
+}
+
/**
* idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info
* @vport: virtual port data structure
@@ -2512,7 +2553,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{
struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
- struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
+ struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL;
int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter;
struct idpf_vc_xn_params xn_params = {};
@@ -2520,12 +2561,17 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
ssize_t reply_sz;
int i, j, k;
+ if (vport->rx_ptype_lkup)
+ return 0;
+
if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE;
else
max_ptype = IDPF_RX_MAX_BASE_PTYPE;
- memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+ ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL);
+ if (!ptype_lkup)
+ return -ENOMEM;
get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
if (!get_ptype_info)
@@ -2583,16 +2629,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
IDPF_INVALID_PTYPE_ID)
- return 0;
+ goto out;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
else
k = ptype->ptype_id_8;
- if (ptype->proto_id_count)
- ptype_lkup[k].known = 1;
-
for (j = 0; j < ptype->proto_id_count; j++) {
id = le16_to_cpu(ptype->proto_id[j]);
switch (id) {
@@ -2600,18 +2643,18 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
if (pstate.tunnel_state ==
IDPF_PTYPE_TUNNEL_IP) {
ptype_lkup[k].tunnel_type =
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT;
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT;
pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT;
}
break;
case VIRTCHNL2_PROTO_HDR_MAC:
ptype_lkup[k].outer_ip =
- IDPF_RX_PTYPE_OUTER_L2;
+ LIBETH_RX_PT_OUTER_L2;
if (pstate.tunnel_state ==
IDPF_TUN_IP_GRE) {
ptype_lkup[k].tunnel_type =
- IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC;
+ LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC;
pstate.tunnel_state |=
IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC;
}
@@ -2638,23 +2681,23 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break;
case VIRTCHNL2_PROTO_HDR_UDP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_UDP;
+ LIBETH_RX_PT_INNER_UDP;
break;
case VIRTCHNL2_PROTO_HDR_TCP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_TCP;
+ LIBETH_RX_PT_INNER_TCP;
break;
case VIRTCHNL2_PROTO_HDR_SCTP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_SCTP;
+ LIBETH_RX_PT_INNER_SCTP;
break;
case VIRTCHNL2_PROTO_HDR_ICMP:
ptype_lkup[k].inner_prot =
- IDPF_RX_PTYPE_INNER_PROT_ICMP;
+ LIBETH_RX_PT_INNER_ICMP;
break;
case VIRTCHNL2_PROTO_HDR_PAY:
ptype_lkup[k].payload_layer =
- IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2;
+ LIBETH_RX_PT_PAYLOAD_L2;
break;
case VIRTCHNL2_PROTO_HDR_ICMPV6:
case VIRTCHNL2_PROTO_HDR_IPV6_EH:
@@ -2708,9 +2751,14 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
break;
}
}
+
+ idpf_finalize_ptype_lookup(&ptype_lkup[k]);
}
}
+out:
+ vport->rx_ptype_lkup = no_free_ptr(ptype_lkup);
+
return 0;
}
@@ -3125,7 +3173,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q)
rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size);
ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr);
- vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD;
+ vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN;
/* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */
memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS);
@@ -3242,7 +3290,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
int num_qids,
u32 q_type)
{
- struct idpf_queue *q;
int i, j, k = 0;
switch (q_type) {
@@ -3250,11 +3297,8 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
for (i = 0; i < vport->num_txq_grp; i++) {
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
- for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) {
+ for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++)
tx_qgrp->txqs[j]->q_id = qids[k];
- tx_qgrp->txqs[j]->q_type =
- VIRTCHNL2_QUEUE_TYPE_TX;
- }
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX:
@@ -3268,12 +3312,13 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
num_rxq = rx_qgrp->singleq.num_rxq;
for (j = 0; j < num_rxq && k < num_qids; j++, k++) {
+ struct idpf_rx_queue *q;
+
if (idpf_is_queue_model_split(vport->rxq_model))
q = &rx_qgrp->splitq.rxq_sets[j]->rxq;
else
q = rx_qgrp->singleq.rxqs[j];
q->q_id = qids[k];
- q->q_type = VIRTCHNL2_QUEUE_TYPE_RX;
}
}
break;
@@ -3282,8 +3327,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i];
tx_qgrp->complq->q_id = qids[k];
- tx_qgrp->complq->q_type =
- VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION;
}
break;
case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER:
@@ -3292,9 +3335,10 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport,
u8 num_bufqs = vport->num_bufqs_per_qgrp;
for (j = 0; j < num_bufqs && k < num_qids; j++, k++) {
+ struct idpf_buf_queue *q;
+
q = &rx_qgrp->splitq.bufq_sets[j].bufq;
q->q_id = qids[k];
- q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER;
}
}
break;
diff --git a/drivers/net/ethernet/intel/idpf/virtchnl2.h b/drivers/net/ethernet/intel/idpf/virtchnl2.h
index 4a3c4454d25a..63deb120359c 100644
--- a/drivers/net/ethernet/intel/idpf/virtchnl2.h
+++ b/drivers/net/ethernet/intel/idpf/virtchnl2.h
@@ -4,6 +4,8 @@
#ifndef _VIRTCHNL2_H_
#define _VIRTCHNL2_H_
+#include <linux/if_ether.h>
+
/* All opcodes associated with virtchnl2 are prefixed with virtchnl2 or
* VIRTCHNL2. Any future opcodes, offloads/capabilities, structures,
* and defines must be prefixed with virtchnl2 or VIRTCHNL2 to avoid confusion.
@@ -17,8 +19,6 @@
* must remain unchanged over time, so we specify explicit values for all enums.
*/
-#include "virtchnl2_lan_desc.h"
-
/* This macro is used to generate compilation errors if a structure
* is not exactly the correct length.
*/
@@ -555,7 +555,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_queue_reg_chunk);
struct virtchnl2_queue_reg_chunks {
__le16 num_chunks;
u8 pad[6];
- struct virtchnl2_queue_reg_chunk chunks[];
+ struct virtchnl2_queue_reg_chunk chunks[] __counted_by_le(num_chunks);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_reg_chunks);
@@ -703,7 +703,7 @@ struct virtchnl2_config_tx_queues {
__le32 vport_id;
__le16 num_qinfo;
u8 pad[10];
- struct virtchnl2_txq_info qinfo[];
+ struct virtchnl2_txq_info qinfo[] __counted_by_le(num_qinfo);
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_config_tx_queues);
@@ -782,7 +782,7 @@ struct virtchnl2_config_rx_queues {
__le32 vport_id;
__le16 num_qinfo;
u8 pad[18];
- struct virtchnl2_rxq_info qinfo[];
+ struct virtchnl2_rxq_info qinfo[] __counted_by_le(num_qinfo);
};
VIRTCHNL2_CHECK_STRUCT_LEN(24, virtchnl2_config_rx_queues);
@@ -868,7 +868,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(32, virtchnl2_vector_chunk);
struct virtchnl2_vector_chunks {
__le16 num_vchunks;
u8 pad[14];
- struct virtchnl2_vector_chunk vchunks[];
+ struct virtchnl2_vector_chunk vchunks[] __counted_by_le(num_vchunks);
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_vector_chunks);
@@ -912,7 +912,7 @@ struct virtchnl2_rss_lut {
__le16 lut_entries_start;
__le16 lut_entries;
u8 pad[4];
- __le32 lut[];
+ __le32 lut[] __counted_by_le(lut_entries);
};
VIRTCHNL2_CHECK_STRUCT_LEN(12, virtchnl2_rss_lut);
@@ -977,7 +977,7 @@ struct virtchnl2_ptype {
u8 ptype_id_8;
u8 proto_id_count;
__le16 pad;
- __le16 proto_id[];
+ __le16 proto_id[] __counted_by(proto_id_count);
} __packed __aligned(2);
VIRTCHNL2_CHECK_STRUCT_LEN(6, virtchnl2_ptype);
@@ -1104,7 +1104,7 @@ struct virtchnl2_rss_key {
__le32 vport_id;
__le16 key_len;
u8 pad;
- u8 key_flex[];
+ u8 key_flex[] __counted_by_le(key_len);
} __packed;
VIRTCHNL2_CHECK_STRUCT_LEN(7, virtchnl2_rss_key);
@@ -1131,7 +1131,7 @@ VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_chunk);
struct virtchnl2_queue_chunks {
__le16 num_chunks;
u8 pad[6];
- struct virtchnl2_queue_chunk chunks[];
+ struct virtchnl2_queue_chunk chunks[] __counted_by_le(num_chunks);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_queue_chunks);
@@ -1195,7 +1195,7 @@ struct virtchnl2_queue_vector_maps {
__le32 vport_id;
__le16 num_qv_maps;
u8 pad[10];
- struct virtchnl2_queue_vector qv_maps[];
+ struct virtchnl2_queue_vector qv_maps[] __counted_by_le(num_qv_maps);
};
VIRTCHNL2_CHECK_STRUCT_LEN(16, virtchnl2_queue_vector_maps);
@@ -1247,7 +1247,7 @@ struct virtchnl2_mac_addr_list {
__le32 vport_id;
__le16 num_mac_addr;
u8 pad[2];
- struct virtchnl2_mac_addr mac_addr_list[];
+ struct virtchnl2_mac_addr mac_addr_list[] __counted_by_le(num_mac_addr);
};
VIRTCHNL2_CHECK_STRUCT_LEN(8, virtchnl2_mac_addr_list);
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index 394c1e0656b9..463c0d26b9d4 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -6,6 +6,6 @@
obj-$(CONFIG_IGB) += igb.o
-igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \
- e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
- e1000_i210.o igb_ptp.o igb_hwmon.o
+igb-y := igb_main.o igb_ethtool.o e1000_82575.o \
+ e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \
+ e1000_i210.o igb_ptp.o igb_hwmon.o
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 99977a22b843..06b9970dffad 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -2381,7 +2381,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
static int igb_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct igb_adapter *adapter = netdev_priv(dev);
@@ -3272,19 +3272,6 @@ static int igb_get_module_eeprom(struct net_device *netdev,
return 0;
}
-static int igb_ethtool_begin(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- pm_runtime_get_sync(&adapter->pdev->dev);
- return 0;
-}
-
-static void igb_ethtool_complete(struct net_device *netdev)
-{
- struct igb_adapter *adapter = netdev_priv(netdev);
- pm_runtime_put(&adapter->pdev->dev);
-}
-
static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
{
return IGB_RETA_SIZE;
@@ -3508,8 +3495,6 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_channels = igb_set_channels,
.get_priv_flags = igb_get_priv_flags,
.set_priv_flags = igb_set_priv_flags,
- .begin = igb_ethtool_begin,
- .complete = igb_ethtool_complete,
.get_link_ksettings = igb_get_link_ksettings,
.set_link_ksettings = igb_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a3f100769e39..33a42b4c21e0 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -106,8 +106,6 @@ static int igb_setup_all_rx_resources(struct igb_adapter *);
static void igb_free_all_tx_resources(struct igb_adapter *);
static void igb_free_all_rx_resources(struct igb_adapter *);
static void igb_setup_mrqc(struct igb_adapter *);
-static int igb_probe(struct pci_dev *, const struct pci_device_id *);
-static void igb_remove(struct pci_dev *pdev);
static void igb_init_queue_configuration(struct igb_adapter *adapter);
static int igb_sw_init(struct igb_adapter *);
int igb_open(struct net_device *);
@@ -178,20 +176,6 @@ static int igb_vf_configure(struct igb_adapter *adapter, int vf);
static int igb_disable_sriov(struct pci_dev *dev, bool reinit);
#endif
-static int igb_suspend(struct device *);
-static int igb_resume(struct device *);
-static int igb_runtime_suspend(struct device *dev);
-static int igb_runtime_resume(struct device *dev);
-static int igb_runtime_idle(struct device *dev);
-#ifdef CONFIG_PM
-static const struct dev_pm_ops igb_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
- SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
- igb_runtime_idle)
-};
-#endif
-static void igb_shutdown(struct pci_dev *);
-static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
#ifdef CONFIG_IGB_DCA
static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
static struct notifier_block dca_notifier = {
@@ -219,20 +203,6 @@ static const struct pci_error_handlers igb_err_handler = {
static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
-static struct pci_driver igb_driver = {
- .name = igb_driver_name,
- .id_table = igb_pci_tbl,
- .probe = igb_probe,
- .remove = igb_remove,
-#ifdef CONFIG_PM
- .driver.pm = &igb_pm_ops,
-#endif
- .shutdown = igb_shutdown,
- .sriov_configure = igb_pci_sriov_configure,
- .err_handler = &igb_err_handler
-};
-
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
MODULE_LICENSE("GPL v2");
@@ -647,6 +617,8 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
return adapter->netdev;
}
+static struct pci_driver igb_driver;
+
/**
* igb_init_module - Driver Registration Routine
*
@@ -2624,6 +2596,9 @@ static int igb_parse_cls_flower(struct igb_adapter *adapter,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
struct flow_match_eth_addrs match;
@@ -4833,6 +4808,7 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
#if (PAGE_SIZE < 8192)
if (adapter->max_frame_size > IGB_MAX_FRAME_BUILD_SKB ||
+ IGB_2K_TOO_SMALL_WITH_PADDING ||
rd32(E1000_RCTL) & E1000_RCTL_SBP)
set_ring_uses_large_buffer(rx_ring);
#endif
@@ -6668,7 +6644,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igb_up(adapter);
@@ -9163,6 +9139,10 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
return -EIO;
break;
case SIOCSMIIREG:
+ if (igb_write_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+ data->val_in))
+ return -EIO;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -9453,12 +9433,12 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int __maybe_unused igb_suspend(struct device *dev)
+static int igb_suspend(struct device *dev)
{
return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
-static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
+static int __igb_resume(struct device *dev, bool rpm)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -9514,12 +9494,12 @@ static int __maybe_unused __igb_resume(struct device *dev, bool rpm)
return err;
}
-static int __maybe_unused igb_resume(struct device *dev)
+static int igb_resume(struct device *dev)
{
return __igb_resume(dev, false);
}
-static int __maybe_unused igb_runtime_idle(struct device *dev)
+static int igb_runtime_idle(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct igb_adapter *adapter = netdev_priv(netdev);
@@ -9530,12 +9510,12 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
return -EBUSY;
}
-static int __maybe_unused igb_runtime_suspend(struct device *dev)
+static int igb_runtime_suspend(struct device *dev)
{
return __igb_shutdown(to_pci_dev(dev), NULL, 1);
}
-static int __maybe_unused igb_runtime_resume(struct device *dev)
+static int igb_runtime_resume(struct device *dev)
{
return __igb_resume(dev, true);
}
@@ -10157,4 +10137,20 @@ static void igb_nfc_filter_restore(struct igb_adapter *adapter)
spin_unlock(&adapter->nfc_lock);
}
+
+static _DEFINE_DEV_PM_OPS(igb_pm_ops, igb_suspend, igb_resume,
+ igb_runtime_suspend, igb_runtime_resume,
+ igb_runtime_idle);
+
+static struct pci_driver igb_driver = {
+ .name = igb_driver_name,
+ .id_table = igb_pci_tbl,
+ .probe = igb_probe,
+ .remove = igb_remove,
+ .driver.pm = pm_ptr(&igb_pm_ops),
+ .shutdown = igb_shutdown,
+ .sriov_configure = igb_pci_sriov_configure,
+ .err_handler = &igb_err_handler
+};
+
/* igb_main.c */
diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile
index afd3e36eae75..902711d5e691 100644
--- a/drivers/net/ethernet/intel/igbvf/Makefile
+++ b/drivers/net/ethernet/intel/igbvf/Makefile
@@ -6,8 +6,4 @@
obj-$(CONFIG_IGBVF) += igbvf.o
-igbvf-objs := vf.o \
- mbx.o \
- ethtool.o \
- netdev.o
-
+igbvf-y := vf.o mbx.o ethtool.o netdev.o
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index b0cf310e6f7b..925d7286a8ee 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -2434,7 +2434,7 @@ static int igbvf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_dbg(netdev, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igbvf_up(adapter);
@@ -2470,7 +2470,7 @@ static int igbvf_suspend(struct device *dev_d)
return 0;
}
-static int __maybe_unused igbvf_resume(struct device *dev_d)
+static int igbvf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -2957,7 +2957,7 @@ static const struct pci_device_id igbvf_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
-static SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(igbvf_pm_ops, igbvf_suspend, igbvf_resume);
/* PCI Device API Driver */
static struct pci_driver igbvf_driver = {
@@ -2965,7 +2965,7 @@ static struct pci_driver igbvf_driver = {
.id_table = igbvf_pci_tbl,
.probe = igbvf_probe,
.remove = igbvf_remove,
- .driver.pm = &igbvf_pm_ops,
+ .driver.pm = pm_sleep_ptr(&igbvf_pm_ops),
.shutdown = igbvf_shutdown,
.err_handler = &igbvf_err_handler
};
@@ -3001,7 +3001,6 @@ static void __exit igbvf_exit_module(void)
}
module_exit(igbvf_exit_module);
-MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile
index ebffd3054285..efc5e7983dad 100644
--- a/drivers/net/ethernet/intel/igc/Makefile
+++ b/drivers/net/ethernet/intel/igc/Makefile
@@ -6,7 +6,7 @@
#
obj-$(CONFIG_IGC) += igc.o
-igc-$(CONFIG_IGC_LEDS) += igc_leds.o
-igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
-igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
+igc-y := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \
+ igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o
+igc-$(CONFIG_IGC_LEDS) += igc_leds.o
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 6bc56c7c181e..c38b4d0f00ce 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -72,13 +72,46 @@ struct igc_rx_packet_stats {
u64 other_packets;
};
+enum igc_tx_buffer_type {
+ IGC_TX_BUFFER_TYPE_SKB,
+ IGC_TX_BUFFER_TYPE_XDP,
+ IGC_TX_BUFFER_TYPE_XSK,
+};
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct igc_tx_buffer {
+ union igc_adv_tx_desc *next_to_watch;
+ unsigned long time_stamp;
+ enum igc_tx_buffer_type type;
+ union {
+ struct sk_buff *skb;
+ struct xdp_frame *xdpf;
+ };
+ unsigned int bytecount;
+ u16 gso_segs;
+ __be16 protocol;
+
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ u32 tx_flags;
+ bool xsk_pending_ts;
+};
+
struct igc_tx_timestamp_request {
- struct sk_buff *skb; /* reference to the packet being timestamped */
+ union { /* reference to the packet being timestamped */
+ struct sk_buff *skb;
+ struct igc_tx_buffer *xsk_tx_buffer;
+ };
+ enum igc_tx_buffer_type buffer_type;
unsigned long start; /* when the tstamp request started (jiffies) */
u32 mask; /* _TSYNCTXCTL_TXTT_{X} bit for this request */
u32 regl; /* which TXSTMPL_{X} register should be used */
u32 regh; /* which TXSTMPH_{X} register should be used */
u32 flags; /* flags that should be added to the tx_buffer */
+ u8 xsk_queue_index; /* Tx queue which requesting timestamp */
+ struct xsk_tx_metadata_compl xsk_meta; /* ref to xsk Tx metadata */
};
struct igc_inline_rx_tstamps {
@@ -169,7 +202,6 @@ struct igc_adapter {
struct net_device *netdev;
struct ethtool_keee eee;
- u16 eee_advert;
unsigned long state;
unsigned int flags;
@@ -323,6 +355,9 @@ void igc_disable_tx_ring(struct igc_ring *ring);
void igc_enable_tx_ring(struct igc_ring *ring);
int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+/* AF_XDP TX metadata operations */
+extern const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops;
+
/* igc_dump declarations */
void igc_rings_dump(struct igc_adapter *adapter);
void igc_regs_dump(struct igc_adapter *adapter);
@@ -508,32 +543,6 @@ enum igc_boards {
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGC_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
-enum igc_tx_buffer_type {
- IGC_TX_BUFFER_TYPE_SKB,
- IGC_TX_BUFFER_TYPE_XDP,
- IGC_TX_BUFFER_TYPE_XSK,
-};
-
-/* wrapper around a pointer to a socket buffer,
- * so a DMA handle can be stored along with the buffer
- */
-struct igc_tx_buffer {
- union igc_adv_tx_desc *next_to_watch;
- unsigned long time_stamp;
- enum igc_tx_buffer_type type;
- union {
- struct sk_buff *skb;
- struct xdp_frame *xdpf;
- };
- unsigned int bytecount;
- u16 gso_segs;
- __be16 protocol;
-
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
- u32 tx_flags;
-};
-
struct igc_rx_buffer {
union {
struct {
@@ -557,6 +566,13 @@ struct igc_xdp_buff {
struct igc_inline_rx_tstamps *rx_ts; /* data indication bit IGC_RXDADV_STAT_TSIP */
};
+struct igc_metadata_request {
+ struct igc_tx_buffer *tx_buffer;
+ struct xsk_tx_metadata *meta;
+ struct igc_ring *tx_ring;
+ u32 cmd_type;
+};
+
struct igc_q_vector {
struct igc_adapter *adapter; /* backlink */
void __iomem *itr_register;
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 5f92b3c7c3d4..511384f3ec5c 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -404,6 +404,12 @@
#define IGC_DTXMXPKTSZ_TSN 0x19 /* 1600 bytes of max TX DMA packet size */
#define IGC_DTXMXPKTSZ_DEFAULT 0x98 /* 9728-byte Jumbo frames */
+/* Retry Buffer Control */
+#define IGC_RETX_CTL 0x041C
+#define IGC_RETX_CTL_WATERMARK_MASK 0xF
+#define IGC_RETX_CTL_QBVFULLTH_SHIFT 8 /* QBV Retry Buffer Full Threshold */
+#define IGC_RETX_CTL_QBVFULLEN 0x1000 /* Enable QBV Retry Buffer Full Threshold */
+
/* Transmit Scheduling Latency */
/* Latency between transmission scheduling (LaunchTime) and the time
* the packet is transmitted to the network in nanosecond.
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 1a64f1ca6ca8..3d3ef4e1547c 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -1559,7 +1559,7 @@ static int igc_ethtool_set_channels(struct net_device *netdev,
}
static int igc_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct igc_adapter *adapter = netdev_priv(dev);
@@ -1629,11 +1629,12 @@ static int igc_ethtool_get_eee(struct net_device *netdev,
struct igc_hw *hw = &adapter->hw;
u32 eeer;
- if (hw->dev_spec._base.eee_enable)
- mii_eee_cap1_mod_linkmode_t(edata->advertised,
- adapter->eee_advert);
-
- *edata = adapter->eee;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ edata->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ edata->supported);
eeer = rd32(IGC_EEER);
@@ -1695,8 +1696,6 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return -EINVAL;
}
- adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised);
-
if (hw->dev_spec._base.eee_enable != edata->eee_enabled) {
hw->dev_spec._base.eee_enable = edata->eee_enabled;
adapter->flags |= IGC_FLAG_EEE;
@@ -1711,21 +1710,6 @@ static int igc_ethtool_set_eee(struct net_device *netdev,
return 0;
}
-static int igc_ethtool_begin(struct net_device *netdev)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
-
- pm_runtime_get_sync(&adapter->pdev->dev);
- return 0;
-}
-
-static void igc_ethtool_complete(struct net_device *netdev)
-{
- struct igc_adapter *adapter = netdev_priv(netdev);
-
- pm_runtime_put(&adapter->pdev->dev);
-}
-
static int igc_ethtool_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
@@ -2025,8 +2009,6 @@ static const struct ethtool_ops igc_ethtool_ops = {
.set_priv_flags = igc_ethtool_set_priv_flags,
.get_eee = igc_ethtool_get_eee,
.set_eee = igc_ethtool_set_eee,
- .begin = igc_ethtool_begin,
- .complete = igc_ethtool_complete,
.get_link_ksettings = igc_ethtool_get_link_ksettings,
.set_link_ksettings = igc_ethtool_set_link_ksettings,
.self_test = igc_ethtool_diag_test,
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 4d975d620a8e..dfd6c00b4205 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -12,6 +12,7 @@
#include <linux/bpf_trace.h>
#include <net/xdp_sock_drv.h>
#include <linux/pci.h>
+#include <linux/mdio.h>
#include <net/ipv6.h>
@@ -31,7 +32,6 @@
static int debug = -1;
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
module_param(debug, int, 0);
@@ -2712,8 +2712,7 @@ static struct sk_buff *igc_construct_skb_zc(struct igc_ring *ring,
net_prefetch(xdp->data_meta);
- skb = __napi_alloc_skb(&ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&ring->q_vector->napi, totalsize);
if (unlikely(!skb))
return NULL;
@@ -2813,7 +2812,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget)
}
bi->xdp->data_end = bi->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(bi->xdp);
res = __igc_xdp_run_prog(adapter, prog, bi->xdp);
switch (res) {
@@ -2874,6 +2873,89 @@ static void igc_update_tx_stats(struct igc_q_vector *q_vector,
q_vector->tx.total_packets += packets;
}
+static void igc_xsk_request_timestamp(void *_priv)
+{
+ struct igc_metadata_request *meta_req = _priv;
+ struct igc_ring *tx_ring = meta_req->tx_ring;
+ struct igc_tx_timestamp_request *tstamp;
+ u32 tx_flags = IGC_TX_FLAGS_TSTAMP;
+ struct igc_adapter *adapter;
+ unsigned long lock_flags;
+ bool found = false;
+ int i;
+
+ if (test_bit(IGC_RING_FLAG_TX_HWTSTAMP, &tx_ring->flags)) {
+ adapter = netdev_priv(tx_ring->netdev);
+
+ spin_lock_irqsave(&adapter->ptp_tx_lock, lock_flags);
+
+ /* Search for available tstamp regs */
+ for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
+ tstamp = &adapter->tx_tstamp[i];
+
+ /* tstamp->skb and tstamp->xsk_tx_buffer are in union.
+ * When tstamp->skb is equal to NULL,
+ * tstamp->xsk_tx_buffer is equal to NULL as well.
+ * This condition means that the particular tstamp reg
+ * is not occupied by other packet.
+ */
+ if (!tstamp->skb) {
+ found = true;
+ break;
+ }
+ }
+
+ /* Return if no available tstamp regs */
+ if (!found) {
+ adapter->tx_hwtstamp_skipped++;
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock,
+ lock_flags);
+ return;
+ }
+
+ tstamp->start = jiffies;
+ tstamp->xsk_queue_index = tx_ring->queue_index;
+ tstamp->xsk_tx_buffer = meta_req->tx_buffer;
+ tstamp->buffer_type = IGC_TX_BUFFER_TYPE_XSK;
+
+ /* Hold the transmit completion until timestamp is ready */
+ meta_req->tx_buffer->xsk_pending_ts = true;
+
+ /* Keep the pointer to tx_timestamp, which is located in XDP
+ * metadata area. It is the location to store the value of
+ * tx hardware timestamp.
+ */
+ xsk_tx_metadata_to_compl(meta_req->meta, &tstamp->xsk_meta);
+
+ /* Set timestamp bit based on the _TSTAMP(_X) bit. */
+ tx_flags |= tstamp->flags;
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP,
+ (IGC_ADVTXD_MAC_TSTAMP));
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP_1,
+ (IGC_ADVTXD_TSTAMP_REG_1));
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP_2,
+ (IGC_ADVTXD_TSTAMP_REG_2));
+ meta_req->cmd_type |= IGC_SET_FLAG(tx_flags,
+ IGC_TX_FLAGS_TSTAMP_3,
+ (IGC_ADVTXD_TSTAMP_REG_3));
+
+ spin_unlock_irqrestore(&adapter->ptp_tx_lock, lock_flags);
+ }
+}
+
+static u64 igc_xsk_fill_timestamp(void *_priv)
+{
+ return *(u64 *)_priv;
+}
+
+const struct xsk_tx_metadata_ops igc_xsk_tx_metadata_ops = {
+ .tmo_request_timestamp = igc_xsk_request_timestamp,
+ .tmo_fill_timestamp = igc_xsk_fill_timestamp,
+};
+
static void igc_xdp_xmit_zc(struct igc_ring *ring)
{
struct xsk_buff_pool *pool = ring->xsk_pool;
@@ -2895,24 +2977,34 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring)
budget = igc_desc_unused(ring);
while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) {
- u32 cmd_type, olinfo_status;
+ struct igc_metadata_request meta_req;
+ struct xsk_tx_metadata *meta = NULL;
struct igc_tx_buffer *bi;
+ u32 olinfo_status;
dma_addr_t dma;
- cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
- IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
- xdp_desc.len;
+ meta_req.cmd_type = IGC_ADVTXD_DTYP_DATA |
+ IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS |
+ IGC_TXD_DCMD | xdp_desc.len;
olinfo_status = xdp_desc.len << IGC_ADVTXD_PAYLEN_SHIFT;
dma = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
xsk_buff_raw_dma_sync_for_device(pool, dma, xdp_desc.len);
+ bi = &ring->tx_buffer_info[ntu];
+
+ meta_req.tx_ring = ring;
+ meta_req.tx_buffer = bi;
+ meta_req.meta = meta;
+ xsk_tx_metadata_request(meta, &igc_xsk_tx_metadata_ops,
+ &meta_req);
tx_desc = IGC_TX_DESC(ring, ntu);
- tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ tx_desc->read.cmd_type_len = cpu_to_le32(meta_req.cmd_type);
tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
tx_desc->read.buffer_addr = cpu_to_le64(dma);
- bi = &ring->tx_buffer_info[ntu];
bi->type = IGC_TX_BUFFER_TYPE_XSK;
bi->protocol = 0;
bi->bytecount = xdp_desc.len;
@@ -2975,6 +3067,13 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget)
if (!(eop_desc->wb.status & cpu_to_le32(IGC_TXD_STAT_DD)))
break;
+ /* Hold the completions while there's a pending tx hardware
+ * timestamp request from XDP Tx metadata.
+ */
+ if (tx_buffer->type == IGC_TX_BUFFER_TYPE_XSK &&
+ tx_buffer->xsk_pending_ts)
+ break;
+
/* clear next_to_watch to prevent false hangs */
tx_buffer->next_to_watch = NULL;
@@ -5176,7 +5275,7 @@ static int igc_change_mtu(struct net_device *netdev, int new_mtu)
igc_down(adapter);
netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
igc_up(adapter);
@@ -5930,15 +6029,6 @@ static int __igc_open(struct net_device *netdev, bool resuming)
if (err)
goto err_req_irq;
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
- if (err)
- goto err_set_queues;
-
- err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
- if (err)
- goto err_set_queues;
-
clear_bit(__IGC_DOWN, &adapter->state);
for (i = 0; i < adapter->num_q_vectors; i++)
@@ -5959,8 +6049,6 @@ static int __igc_open(struct net_device *netdev, bool resuming)
return IGC_SUCCESS;
-err_set_queues:
- igc_free_irq(adapter);
err_req_irq:
igc_release_hw_control(adapter);
igc_power_down_phy_copper_base(&adapter->hw);
@@ -5977,6 +6065,17 @@ err_setup_tx:
int igc_open(struct net_device *netdev)
{
+ struct igc_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_queues(netdev, adapter->num_tx_queues,
+ adapter->num_rx_queues);
+ if (err) {
+ netdev_err(netdev, "error setting real queue count\n");
+ return err;
+ }
+
return __igc_open(netdev, false);
}
@@ -6207,21 +6306,6 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
size_t n;
int i;
- switch (qopt->cmd) {
- case TAPRIO_CMD_REPLACE:
- break;
- case TAPRIO_CMD_DESTROY:
- return igc_tsn_clear_schedule(adapter);
- case TAPRIO_CMD_STATS:
- igc_taprio_stats(adapter->netdev, &qopt->stats);
- return 0;
- case TAPRIO_CMD_QUEUE_STATS:
- igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
- return 0;
- default:
- return -EOPNOTSUPP;
- }
-
if (qopt->base_time < 0)
return -ERANGE;
@@ -6231,12 +6315,16 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
if (!validate_schedule(adapter, qopt))
return -EINVAL;
+ igc_ptp_read(adapter, &now);
+
+ if (igc_tsn_is_taprio_activated_by_user(adapter) &&
+ is_base_time_past(qopt->base_time, &now))
+ adapter->qbv_config_change_errors++;
+
adapter->cycle_time = qopt->cycle_time;
adapter->base_time = qopt->base_time;
adapter->taprio_offload_enable = true;
- igc_ptp_read(adapter, &now);
-
for (n = 0; n < qopt->num_entries; n++) {
struct tc_taprio_sched_entry *e = &qopt->entries[n];
@@ -6330,7 +6418,23 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter,
if (hw->mac.type != igc_i225)
return -EOPNOTSUPP;
- err = igc_save_qbv_schedule(adapter, qopt);
+ switch (qopt->cmd) {
+ case TAPRIO_CMD_REPLACE:
+ err = igc_save_qbv_schedule(adapter, qopt);
+ break;
+ case TAPRIO_CMD_DESTROY:
+ err = igc_tsn_clear_schedule(adapter);
+ break;
+ case TAPRIO_CMD_STATS:
+ igc_taprio_stats(adapter->netdev, &qopt->stats);
+ return 0;
+ case TAPRIO_CMD_QUEUE_STATS:
+ igc_taprio_queue_stats(adapter->netdev, &qopt->queue_stats);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+
if (err)
return err;
@@ -6803,6 +6907,7 @@ static int igc_probe(struct pci_dev *pdev,
netdev->netdev_ops = &igc_netdev_ops;
netdev->xdp_metadata_ops = &igc_xdp_metadata_ops;
+ netdev->xsk_tx_metadata_ops = &igc_xsk_tx_metadata_ops;
igc_ethtool_set_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
@@ -7108,8 +7213,7 @@ static int __igc_shutdown(struct pci_dev *pdev, bool *enable_wake,
return 0;
}
-#ifdef CONFIG_PM
-static int __maybe_unused igc_runtime_suspend(struct device *dev)
+static int igc_runtime_suspend(struct device *dev)
{
return __igc_shutdown(to_pci_dev(dev), NULL, 1);
}
@@ -7144,7 +7248,7 @@ static void igc_deliver_wake_packet(struct net_device *netdev)
netif_rx(skb);
}
-static int __maybe_unused igc_resume(struct device *dev)
+static int igc_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7186,23 +7290,21 @@ static int __maybe_unused igc_resume(struct device *dev)
wr32(IGC_WUS, ~0);
- rtnl_lock();
- if (!err && netif_running(netdev))
+ if (netif_running(netdev)) {
err = __igc_open(netdev, true);
-
- if (!err)
- netif_device_attach(netdev);
- rtnl_unlock();
+ if (!err)
+ netif_device_attach(netdev);
+ }
return err;
}
-static int __maybe_unused igc_runtime_resume(struct device *dev)
+static int igc_runtime_resume(struct device *dev)
{
return igc_resume(dev);
}
-static int __maybe_unused igc_suspend(struct device *dev)
+static int igc_suspend(struct device *dev)
{
return __igc_shutdown(to_pci_dev(dev), NULL, 0);
}
@@ -7217,7 +7319,6 @@ static int __maybe_unused igc_runtime_idle(struct device *dev)
return -EBUSY;
}
-#endif /* CONFIG_PM */
static void igc_shutdown(struct pci_dev *pdev)
{
@@ -7332,22 +7433,16 @@ static const struct pci_error_handlers igc_err_handler = {
.resume = igc_io_resume,
};
-#ifdef CONFIG_PM
-static const struct dev_pm_ops igc_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(igc_suspend, igc_resume)
- SET_RUNTIME_PM_OPS(igc_runtime_suspend, igc_runtime_resume,
- igc_runtime_idle)
-};
-#endif
+static _DEFINE_DEV_PM_OPS(igc_pm_ops, igc_suspend, igc_resume,
+ igc_runtime_suspend, igc_runtime_resume,
+ igc_runtime_idle);
static struct pci_driver igc_driver = {
.name = igc_driver_name,
.id_table = igc_pci_tbl,
.probe = igc_probe,
.remove = igc_remove,
-#ifdef CONFIG_PM
- .driver.pm = &igc_pm_ops,
-#endif
+ .driver.pm = pm_ptr(&igc_pm_ops),
.shutdown = igc_shutdown,
.err_handler = &igc_err_handler,
};
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 885faaa7b9de..946edbad4302 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -11,6 +11,7 @@
#include <linux/ktime.h>
#include <linux/delay.h>
#include <linux/iopoll.h>
+#include <net/xdp_sock_drv.h>
#define INCVALUE_MASK 0x7fffffff
#define ISGN 0x80000000
@@ -545,6 +546,30 @@ static void igc_ptp_enable_rx_timestamp(struct igc_adapter *adapter)
wr32(IGC_TSYNCRXCTL, val);
}
+static void igc_ptp_free_tx_buffer(struct igc_adapter *adapter,
+ struct igc_tx_timestamp_request *tstamp)
+{
+ if (tstamp->buffer_type == IGC_TX_BUFFER_TYPE_XSK) {
+ /* Release the transmit completion */
+ tstamp->xsk_tx_buffer->xsk_pending_ts = false;
+
+ /* Note: tstamp->skb and tstamp->xsk_tx_buffer are in union.
+ * By setting tstamp->xsk_tx_buffer to NULL, tstamp->skb will
+ * become NULL as well.
+ */
+ tstamp->xsk_tx_buffer = NULL;
+ tstamp->buffer_type = 0;
+
+ /* Trigger txrx interrupt for transmit completion */
+ igc_xsk_wakeup(adapter->netdev, tstamp->xsk_queue_index, 0);
+
+ return;
+ }
+
+ dev_kfree_skb_any(tstamp->skb);
+ tstamp->skb = NULL;
+}
+
static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter)
{
unsigned long flags;
@@ -555,8 +580,8 @@ static void igc_ptp_clear_tx_tstamp(struct igc_adapter *adapter)
for (i = 0; i < IGC_MAX_TX_TSTAMP_REGS; i++) {
struct igc_tx_timestamp_request *tstamp = &adapter->tx_tstamp[i];
- dev_kfree_skb_any(tstamp->skb);
- tstamp->skb = NULL;
+ if (tstamp->skb)
+ igc_ptp_free_tx_buffer(adapter, tstamp);
}
spin_unlock_irqrestore(&adapter->ptp_tx_lock, flags);
@@ -657,8 +682,9 @@ static int igc_ptp_set_timestamp_mode(struct igc_adapter *adapter,
static void igc_ptp_tx_timeout(struct igc_adapter *adapter,
struct igc_tx_timestamp_request *tstamp)
{
- dev_kfree_skb_any(tstamp->skb);
- tstamp->skb = NULL;
+ if (tstamp->skb)
+ igc_ptp_free_tx_buffer(adapter, tstamp);
+
adapter->tx_hwtstamp_timeouts++;
netdev_warn(adapter->netdev, "Tx timestamp timeout\n");
@@ -729,10 +755,21 @@ static void igc_ptp_tx_reg_to_stamp(struct igc_adapter *adapter,
shhwtstamps.hwtstamp =
ktime_add_ns(shhwtstamps.hwtstamp, adjust);
- tstamp->skb = NULL;
+ /* Copy the tx hardware timestamp into xdp metadata or skb */
+ if (tstamp->buffer_type == IGC_TX_BUFFER_TYPE_XSK) {
+ struct xsk_buff_pool *xsk_pool;
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
+ xsk_pool = adapter->tx_ring[tstamp->xsk_queue_index]->xsk_pool;
+ if (xsk_pool && xp_tx_metadata_enabled(xsk_pool)) {
+ xsk_tx_metadata_complete(&tstamp->xsk_meta,
+ &igc_xsk_tx_metadata_ops,
+ &shhwtstamps.hwtstamp);
+ }
+ } else {
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+
+ igc_ptp_free_tx_buffer(adapter, tstamp);
}
/**
@@ -901,7 +938,11 @@ static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
{
#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML)
- return convert_art_ns_to_tsc(tstamp);
+ return (struct system_counterval_t) {
+ .cs_id = CSID_X86_ART,
+ .cycles = tstamp,
+ .use_nsecs = true,
+ };
#else
return (struct system_counterval_t) { };
#endif
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c
index 22cefb1eeedf..d68fa7f3d5f0 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.c
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.c
@@ -49,12 +49,19 @@ static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter)
return new_flags;
}
+static bool igc_tsn_is_tx_mode_in_tsn(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ return !!(rd32(IGC_TQAVCTRL) & IGC_TQAVCTRL_TRANSMIT_MODE_TSN);
+}
+
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
u16 txoffset;
- if (!is_any_launchtime(adapter))
+ if (!igc_tsn_is_tx_mode_in_tsn(adapter))
return;
switch (adapter->link_speed) {
@@ -78,6 +85,23 @@ void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter)
wr32(IGC_GTXOFFSET, txoffset);
}
+static void igc_tsn_restore_retx_default(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 retxctl;
+
+ retxctl = rd32(IGC_RETX_CTL) & IGC_RETX_CTL_WATERMARK_MASK;
+ wr32(IGC_RETX_CTL, retxctl);
+}
+
+bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+
+ return (rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
+ adapter->taprio_offload_enable;
+}
+
/* Returns the TSN specific registers to their default values after
* the adapter is reset.
*/
@@ -91,6 +115,9 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT);
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT);
+ if (igc_is_device_id_i226(hw))
+ igc_tsn_restore_retx_default(adapter);
+
tqavctrl = rd32(IGC_TQAVCTRL);
tqavctrl &= ~(IGC_TQAVCTRL_TRANSMIT_MODE_TSN |
IGC_TQAVCTRL_ENHANCED_QAV | IGC_TQAVCTRL_FUTSCDDIS);
@@ -111,6 +138,25 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter)
return 0;
}
+/* To partially fix i226 HW errata, reduce MAC internal buffering from 192 Bytes
+ * to 88 Bytes by setting RETX_CTL register using the recommendation from:
+ * a) Ethernet Controller I225/I226 Specification Update Rev 2.1
+ * Item 9: TSN: Packet Transmission Might Cross the Qbv Window
+ * b) I225/6 SW User Manual Rev 1.2.4: Section 8.11.5 Retry Buffer Control
+ */
+static void igc_tsn_set_retx_qbvfullthreshold(struct igc_adapter *adapter)
+{
+ struct igc_hw *hw = &adapter->hw;
+ u32 retxctl, watermark;
+
+ retxctl = rd32(IGC_RETX_CTL);
+ watermark = retxctl & IGC_RETX_CTL_WATERMARK_MASK;
+ /* Set QBVFULLTH value using watermark and set QBVFULLEN */
+ retxctl |= (watermark << IGC_RETX_CTL_QBVFULLTH_SHIFT) |
+ IGC_RETX_CTL_QBVFULLEN;
+ wr32(IGC_RETX_CTL, retxctl);
+}
+
static int igc_tsn_enable_offload(struct igc_adapter *adapter)
{
struct igc_hw *hw = &adapter->hw;
@@ -123,6 +169,9 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter)
wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_TSN);
wr32(IGC_TXPBS, IGC_TXPBSIZE_TSN);
+ if (igc_is_device_id_i226(hw))
+ igc_tsn_set_retx_qbvfullthreshold(adapter);
+
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
u32 txqctl = 0;
@@ -262,14 +311,6 @@ skip_cbs:
s64 n = div64_s64(ktime_sub_ns(systim, base_time), cycle);
base_time = ktime_add_ns(base_time, (n + 1) * cycle);
-
- /* Increase the counter if scheduling into the past while
- * Gate Control List (GCL) is running.
- */
- if ((rd32(IGC_BASET_H) || rd32(IGC_BASET_L)) &&
- (adapter->tc_setup_type == TC_SETUP_QDISC_TAPRIO) &&
- (adapter->qbv_count > 1))
- adapter->qbv_config_change_errors++;
} else {
if (igc_is_device_id_i226(hw)) {
ktime_t adjust_time, expires_time;
@@ -331,15 +372,22 @@ int igc_tsn_reset(struct igc_adapter *adapter)
return err;
}
-int igc_tsn_offload_apply(struct igc_adapter *adapter)
+static bool igc_tsn_will_tx_mode_change(struct igc_adapter *adapter)
{
- struct igc_hw *hw = &adapter->hw;
+ bool any_tsn_enabled = !!(igc_tsn_new_flags(adapter) &
+ IGC_FLAG_TSN_ANY_ENABLED);
- /* Per I225/6 HW Design Section 7.5.2.1, transmit mode
- * cannot be changed dynamically. Require reset the adapter.
+ return (any_tsn_enabled && !igc_tsn_is_tx_mode_in_tsn(adapter)) ||
+ (!any_tsn_enabled && igc_tsn_is_tx_mode_in_tsn(adapter));
+}
+
+int igc_tsn_offload_apply(struct igc_adapter *adapter)
+{
+ /* Per I225/6 HW Design Section 7.5.2.1 guideline, if tx mode change
+ * from legacy->tsn or tsn->legacy, then reset adapter is needed.
*/
if (netif_running(adapter->netdev) &&
- (igc_is_device_id_i225(hw) || !adapter->qbv_count)) {
+ igc_tsn_will_tx_mode_change(adapter)) {
schedule_work(&adapter->reset_task);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h
index b53e6af560b7..98ec845a86bf 100644
--- a/drivers/net/ethernet/intel/igc/igc_tsn.h
+++ b/drivers/net/ethernet/intel/igc/igc_tsn.h
@@ -7,5 +7,6 @@
int igc_tsn_offload_apply(struct igc_adapter *adapter);
int igc_tsn_reset(struct igc_adapter *adapter);
void igc_tsn_adjust_txtime_offset(struct igc_adapter *adapter);
+bool igc_tsn_is_taprio_activated_by_user(struct igc_adapter *adapter);
#endif /* _IGC_BASE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 4fb0d9e3f2da..965e5ce1b326 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -6,10 +6,10 @@
obj-$(CONFIG_IXGBE) += ixgbe.o
-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
- ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
- ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
- ixgbe_xsk.o
+ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
+ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
+ ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \
+ ixgbe_xsk.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 6e6e6f1847b6..4cac76254966 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3170,7 +3170,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev,
}
static int ixgbe_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index f985252c8c8d..8057cef61f39 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -162,7 +162,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
@@ -6847,7 +6846,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
@@ -6974,7 +6973,7 @@ int ixgbe_close(struct net_device *netdev)
return 0;
}
-static int __maybe_unused ixgbe_resume(struct device *dev_d)
+static int ixgbe_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
@@ -7082,7 +7081,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
return 0;
}
-static int __maybe_unused ixgbe_suspend(struct device *dev_d)
+static int ixgbe_suspend(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
int retval;
@@ -10061,15 +10060,10 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- int status;
- __u16 mode;
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
+ __u16 mode = nla_get_u16(attr);
+ int status = ixgbe_configure_bridge_mode(adapter, mode);
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
- mode = nla_get_u16(attr);
- status = ixgbe_configure_bridge_mode(adapter, mode);
if (status)
return status;
@@ -11588,14 +11582,14 @@ static const struct pci_error_handlers ixgbe_err_handler = {
.resume = ixgbe_io_resume,
};
-static SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ixgbe_pm_ops, ixgbe_suspend, ixgbe_resume);
static struct pci_driver ixgbe_driver = {
.name = ixgbe_driver_name,
.id_table = ixgbe_pci_tbl,
.probe = ixgbe_probe,
.remove = ixgbe_remove,
- .driver.pm = &ixgbe_pm_ops,
+ .driver.pm = pm_sleep_ptr(&ixgbe_pm_ops),
.shutdown = ixgbe_shutdown,
.sriov_configure = ixgbe_pci_sriov_configure,
.err_handler = &ixgbe_err_handler
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index ed440dd0c4f9..346e3d9114a8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -2179,7 +2179,6 @@ enum {
#define IXGBE_PCI_LINK_SPEED_5000 0x2
#define IXGBE_PCI_LINK_SPEED_8000 0x3
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
-#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf
@@ -3676,9 +3675,7 @@ struct ixgbe_info {
#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
-#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238)
#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
-#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918)
#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
@@ -3688,7 +3685,6 @@ struct ixgbe_info {
#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
-#define IXGBE_KRM_FLX_TMRS_CTRL_ST31(P) ((P) ? 0x9180 : 0x5180)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 2decb0710b6e..a5f644934445 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1722,59 +1722,9 @@ static int ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
return -EINVAL;
}
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* change mode enforcement rules to hybrid */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= 0x0400;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* manually control the config */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= 0x20002240;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* move the AN base page values */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= 0x1;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* set the AN37 over CB mode */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= 0x20000000;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
-
- /* restart AN manually */
- (void)mac->ops.read_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
- reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
-
- (void)mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ status = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
/* Toggle port SW reset by AN reset. */
status = ixgbe_restart_an_internal_phy_x550em(hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index d34d715c59eb..3e3b471e53f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -220,8 +220,7 @@ static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
net_prefetch(xdp->data_meta);
/* allocate a skb to store the frags */
- skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize);
if (unlikely(!skb))
return NULL;
@@ -304,7 +303,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
}
bi->xdp->data_end = bi->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(bi->xdp);
xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
if (likely(xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index 186a4bb24fde..01d3e892f3fa 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -6,9 +6,5 @@
obj-$(CONFIG_IXGBEVF) += ixgbevf.o
-ixgbevf-objs := vf.o \
- mbx.o \
- ethtool.o \
- ixgbevf_main.o
+ixgbevf-y := vf.o mbx.o ethtool.o ixgbevf_main.o
ixgbevf-$(CONFIG_IXGBEVF_IPSEC) += ipsec.o
-
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9c960017a6de..149911e3002a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -76,7 +76,6 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = {
};
MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
-MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL v2");
@@ -4292,7 +4291,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu, new_mtu);
/* must set new MTU before calling down or up */
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
ixgbevf_reinit_locked(adapter);
@@ -4300,7 +4299,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
-static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
+static int ixgbevf_suspend(struct device *dev_d)
{
struct net_device *netdev = dev_get_drvdata(dev_d);
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -4317,7 +4316,7 @@ static int __maybe_unused ixgbevf_suspend(struct device *dev_d)
return 0;
}
-static int __maybe_unused ixgbevf_resume(struct device *dev_d)
+static int ixgbevf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -4854,7 +4853,7 @@ static const struct pci_error_handlers ixgbevf_err_handler = {
.resume = ixgbevf_io_resume,
};
-static SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
+static DEFINE_SIMPLE_DEV_PM_OPS(ixgbevf_pm_ops, ixgbevf_suspend, ixgbevf_resume);
static struct pci_driver ixgbevf_driver = {
.name = ixgbevf_driver_name,
@@ -4863,7 +4862,7 @@ static struct pci_driver ixgbevf_driver = {
.remove = ixgbevf_remove,
/* Power Management Hooks */
- .driver.pm = &ixgbevf_pm_ops,
+ .driver.pm = pm_sleep_ptr(&ixgbevf_pm_ops),
.shutdown = ixgbevf_shutdown,
.err_handler = &ixgbevf_err_handler
diff --git a/drivers/net/ethernet/intel/libeth/Kconfig b/drivers/net/ethernet/intel/libeth/Kconfig
new file mode 100644
index 000000000000..480293b71dbc
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/Kconfig
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+config LIBETH
+ tristate
+ select PAGE_POOL
+ help
+ libeth is a common library containing routines shared between several
+ drivers, but not yet promoted to the generic kernel API.
diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile
new file mode 100644
index 000000000000..52492b081132
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+obj-$(CONFIG_LIBETH) += libeth.o
+
+libeth-y := rx.o
diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c
new file mode 100644
index 000000000000..f20926669318
--- /dev/null
+++ b/drivers/net/ethernet/intel/libeth/rx.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include <net/libeth/rx.h>
+
+/* Rx buffer management */
+
+/**
+ * libeth_rx_hw_len_mtu - get the actual buffer size to be passed to HW
+ * @pp: &page_pool_params of the netdev to calculate the size for
+ * @max_len: maximum buffer size for a single descriptor
+ *
+ * Return: HW-writeable length per one buffer to pass it to the HW accounting:
+ * MTU the @dev has, HW required alignment, minimum and maximum allowed values,
+ * and system's page size.
+ */
+static u32 libeth_rx_hw_len_mtu(const struct page_pool_params *pp, u32 max_len)
+{
+ u32 len;
+
+ len = READ_ONCE(pp->netdev->mtu) + LIBETH_RX_LL_LEN;
+ len = ALIGN(len, LIBETH_RX_BUF_STRIDE);
+ len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
+ pp->max_len);
+
+ return len;
+}
+
+/**
+ * libeth_rx_hw_len_truesize - get the short buffer size to be passed to HW
+ * @pp: &page_pool_params of the netdev to calculate the size for
+ * @max_len: maximum buffer size for a single descriptor
+ * @truesize: desired truesize for the buffers
+ *
+ * Return: HW-writeable length per one buffer to pass it to the HW ignoring the
+ * MTU and closest to the passed truesize. Can be used for "short" buffer
+ * queues to fragment pages more efficiently.
+ */
+static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp,
+ u32 max_len, u32 truesize)
+{
+ u32 min, len;
+
+ min = SKB_HEAD_ALIGN(pp->offset + LIBETH_RX_BUF_STRIDE);
+ truesize = clamp(roundup_pow_of_two(truesize), roundup_pow_of_two(min),
+ PAGE_SIZE << LIBETH_RX_PAGE_ORDER);
+
+ len = SKB_WITH_OVERHEAD(truesize - pp->offset);
+ len = ALIGN_DOWN(len, LIBETH_RX_BUF_STRIDE) ? : LIBETH_RX_BUF_STRIDE;
+ len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE),
+ pp->max_len);
+
+ return len;
+}
+
+/**
+ * libeth_rx_page_pool_params - calculate params with the stack overhead
+ * @fq: buffer queue to calculate the size for
+ * @pp: &page_pool_params of the netdev
+ *
+ * Set the PP params to will all needed stack overhead (headroom, tailroom) and
+ * both the HW buffer length and the truesize for all types of buffers. For
+ * "short" buffers, truesize never exceeds the "wanted" one; for the rest,
+ * it can be up to the page size.
+ *
+ * Return: true on success, false on invalid input params.
+ */
+static bool libeth_rx_page_pool_params(struct libeth_fq *fq,
+ struct page_pool_params *pp)
+{
+ pp->offset = LIBETH_SKB_HEADROOM;
+ /* HW-writeable / syncable length per one page */
+ pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset);
+
+ /* HW-writeable length per buffer */
+ switch (fq->type) {
+ case LIBETH_FQE_MTU:
+ fq->buf_len = libeth_rx_hw_len_mtu(pp, fq->buf_len);
+ break;
+ case LIBETH_FQE_SHORT:
+ fq->buf_len = libeth_rx_hw_len_truesize(pp, fq->buf_len,
+ fq->truesize);
+ break;
+ case LIBETH_FQE_HDR:
+ fq->buf_len = ALIGN(LIBETH_MAX_HEAD, LIBETH_RX_BUF_STRIDE);
+ break;
+ default:
+ return false;
+ }
+
+ /* Buffer size to allocate */
+ fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp->offset +
+ fq->buf_len));
+
+ return true;
+}
+
+/**
+ * libeth_rx_page_pool_params_zc - calculate params without the stack overhead
+ * @fq: buffer queue to calculate the size for
+ * @pp: &page_pool_params of the netdev
+ *
+ * Set the PP params to exclude the stack overhead and both the buffer length
+ * and the truesize, which are equal for the data buffers. Note that this
+ * requires separate header buffers to be always active and account the
+ * overhead.
+ * With the MTU == ``PAGE_SIZE``, this allows the kernel to enable the zerocopy
+ * mode.
+ *
+ * Return: true on success, false on invalid input params.
+ */
+static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq,
+ struct page_pool_params *pp)
+{
+ u32 mtu, max;
+
+ pp->offset = 0;
+ pp->max_len = PAGE_SIZE << LIBETH_RX_PAGE_ORDER;
+
+ switch (fq->type) {
+ case LIBETH_FQE_MTU:
+ mtu = READ_ONCE(pp->netdev->mtu);
+ break;
+ case LIBETH_FQE_SHORT:
+ mtu = fq->truesize;
+ break;
+ default:
+ return false;
+ }
+
+ mtu = roundup_pow_of_two(mtu);
+ max = min(rounddown_pow_of_two(fq->buf_len ? : U32_MAX),
+ pp->max_len);
+
+ fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max);
+ fq->truesize = fq->buf_len;
+
+ return true;
+}
+
+/**
+ * libeth_rx_fq_create - create a PP with the default libeth settings
+ * @fq: buffer queue struct to fill
+ * @napi: &napi_struct covering this PP (no usage outside its poll loops)
+ *
+ * Return: %0 on success, -%errno on failure.
+ */
+int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi)
+{
+ struct page_pool_params pp = {
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .order = LIBETH_RX_PAGE_ORDER,
+ .pool_size = fq->count,
+ .nid = fq->nid,
+ .dev = napi->dev->dev.parent,
+ .netdev = napi->dev,
+ .napi = napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ };
+ struct libeth_fqe *fqes;
+ struct page_pool *pool;
+ bool ret;
+
+ if (!fq->hsplit)
+ ret = libeth_rx_page_pool_params(fq, &pp);
+ else
+ ret = libeth_rx_page_pool_params_zc(fq, &pp);
+ if (!ret)
+ return -EINVAL;
+
+ pool = page_pool_create(&pp);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ fqes = kvcalloc_node(fq->count, sizeof(*fqes), GFP_KERNEL, fq->nid);
+ if (!fqes)
+ goto err_buf;
+
+ fq->fqes = fqes;
+ fq->pp = pool;
+
+ return 0;
+
+err_buf:
+ page_pool_destroy(pool);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_create, LIBETH);
+
+/**
+ * libeth_rx_fq_destroy - destroy a &page_pool created by libeth
+ * @fq: buffer queue to process
+ */
+void libeth_rx_fq_destroy(struct libeth_fq *fq)
+{
+ kvfree(fq->fqes);
+ page_pool_destroy(fq->pp);
+}
+EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, LIBETH);
+
+/**
+ * libeth_rx_recycle_slow - recycle a libeth page from the NAPI context
+ * @page: page to recycle
+ *
+ * To be used on exceptions or rare cases not requiring fast inline recycling.
+ */
+void libeth_rx_recycle_slow(struct page *page)
+{
+ page_pool_recycle_direct(page->pp, page);
+}
+EXPORT_SYMBOL_NS_GPL(libeth_rx_recycle_slow, LIBETH);
+
+/* Converting abstract packet type numbers into a software structure with
+ * the packet parameters to do O(1) lookup on Rx.
+ */
+
+static const u16 libeth_rx_pt_xdp_oip[] = {
+ [LIBETH_RX_PT_OUTER_L2] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_OUTER_IPV4] = XDP_RSS_L3_IPV4,
+ [LIBETH_RX_PT_OUTER_IPV6] = XDP_RSS_L3_IPV6,
+};
+
+static const u16 libeth_rx_pt_xdp_iprot[] = {
+ [LIBETH_RX_PT_INNER_NONE] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_INNER_UDP] = XDP_RSS_L4_UDP,
+ [LIBETH_RX_PT_INNER_TCP] = XDP_RSS_L4_TCP,
+ [LIBETH_RX_PT_INNER_SCTP] = XDP_RSS_L4_SCTP,
+ [LIBETH_RX_PT_INNER_ICMP] = XDP_RSS_L4_ICMP,
+ [LIBETH_RX_PT_INNER_TIMESYNC] = XDP_RSS_TYPE_NONE,
+};
+
+static const u16 libeth_rx_pt_xdp_pl[] = {
+ [LIBETH_RX_PT_PAYLOAD_NONE] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_PAYLOAD_L2] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_PAYLOAD_L3] = XDP_RSS_TYPE_NONE,
+ [LIBETH_RX_PT_PAYLOAD_L4] = XDP_RSS_L4,
+};
+
+/**
+ * libeth_rx_pt_gen_hash_type - generate an XDP RSS hash type for a PT
+ * @pt: PT structure to evaluate
+ *
+ * Generates ```hash_type``` field with XDP RSS type values from the parsed
+ * packet parameters if they're obtained dynamically at runtime.
+ */
+void libeth_rx_pt_gen_hash_type(struct libeth_rx_pt *pt)
+{
+ pt->hash_type = 0;
+ pt->hash_type |= libeth_rx_pt_xdp_oip[pt->outer_ip];
+ pt->hash_type |= libeth_rx_pt_xdp_iprot[pt->inner_prot];
+ pt->hash_type |= libeth_rx_pt_xdp_pl[pt->payload_layer];
+}
+EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH);
+
+/* Module */
+
+MODULE_DESCRIPTION("Common Ethernet library");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/intel/libie/Kconfig b/drivers/net/ethernet/intel/libie/Kconfig
new file mode 100644
index 000000000000..33aff6bc8f81
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/Kconfig
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+config LIBIE
+ tristate
+ select LIBETH
+ help
+ libie (Intel Ethernet library) is a common library built on top of
+ libeth and containing vendor-specific routines shared between several
+ Intel Ethernet drivers.
diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile
new file mode 100644
index 000000000000..ffd27fab916a
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copyright (C) 2024 Intel Corporation
+
+obj-$(CONFIG_LIBIE) += libie.o
+
+libie-y := rx.o
diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c
new file mode 100644
index 000000000000..aceb8d8813c4
--- /dev/null
+++ b/drivers/net/ethernet/intel/libie/rx.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2024 Intel Corporation */
+
+#include <linux/net/intel/libie/rx.h>
+
+/* O(1) converting i40e/ice/iavf's 8/10-bit hardware packet type to a parsed
+ * bitfield struct.
+ */
+
+/* A few supplementary definitions for when XDP hash types do not coincide
+ * with what can be generated from ptype definitions by means of preprocessor
+ * concatenation.
+ */
+#define XDP_RSS_L3_L2 XDP_RSS_TYPE_NONE
+#define XDP_RSS_L4_NONE XDP_RSS_TYPE_NONE
+#define XDP_RSS_L4_TIMESYNC XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_L3 XDP_RSS_TYPE_NONE
+#define XDP_RSS_TYPE_L4 XDP_RSS_L4
+
+#define LIBIE_RX_PT(oip, ofrag, tun, tp, tefr, iprot, pl) { \
+ .outer_ip = LIBETH_RX_PT_OUTER_##oip, \
+ .outer_frag = LIBETH_RX_PT_##ofrag, \
+ .tunnel_type = LIBETH_RX_PT_TUNNEL_IP_##tun, \
+ .tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_##tp, \
+ .tunnel_end_frag = LIBETH_RX_PT_##tefr, \
+ .inner_prot = LIBETH_RX_PT_INNER_##iprot, \
+ .payload_layer = LIBETH_RX_PT_PAYLOAD_##pl, \
+ .hash_type = XDP_RSS_L3_##oip | \
+ XDP_RSS_L4_##iprot | \
+ XDP_RSS_TYPE_##pl, \
+ }
+
+#define LIBIE_RX_PT_UNUSED { }
+
+#define __LIBIE_RX_PT_L2(iprot, pl) \
+ LIBIE_RX_PT(L2, NOT_FRAG, NONE, NONE, NOT_FRAG, iprot, pl)
+#define LIBIE_RX_PT_L2 __LIBIE_RX_PT_L2(NONE, L2)
+#define LIBIE_RX_PT_TS __LIBIE_RX_PT_L2(TIMESYNC, L2)
+#define LIBIE_RX_PT_L3 __LIBIE_RX_PT_L2(NONE, L3)
+
+#define LIBIE_RX_PT_IP_FRAG(oip) \
+ LIBIE_RX_PT(IPV##oip, FRAG, NONE, NONE, NOT_FRAG, NONE, L3)
+#define LIBIE_RX_PT_IP_L3(oip, tun, teprot, tefr) \
+ LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, tefr, NONE, L3)
+#define LIBIE_RX_PT_IP_L4(oip, tun, teprot, iprot) \
+ LIBIE_RX_PT(IPV##oip, NOT_FRAG, tun, teprot, NOT_FRAG, iprot, L4)
+
+#define LIBIE_RX_PT_IP_NOF(oip, tun, ver) \
+ LIBIE_RX_PT_IP_L3(oip, tun, ver, NOT_FRAG), \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, UDP), \
+ LIBIE_RX_PT_UNUSED, \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, TCP), \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, SCTP), \
+ LIBIE_RX_PT_IP_L4(oip, tun, ver, ICMP)
+
+/* IPv oip --> tun --> IPv ver */
+#define LIBIE_RX_PT_IP_TUN_VER(oip, tun, ver) \
+ LIBIE_RX_PT_IP_L3(oip, tun, ver, FRAG), \
+ LIBIE_RX_PT_IP_NOF(oip, tun, ver)
+
+/* Non Tunneled IPv oip */
+#define LIBIE_RX_PT_IP_RAW(oip) \
+ LIBIE_RX_PT_IP_FRAG(oip), \
+ LIBIE_RX_PT_IP_NOF(oip, NONE, NONE)
+
+/* IPv oip --> tun --> { IPv4, IPv6 } */
+#define LIBIE_RX_PT_IP_TUN(oip, tun) \
+ LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV4), \
+ LIBIE_RX_PT_IP_TUN_VER(oip, tun, IPV6)
+
+/* IPv oip --> GRE/NAT tun --> { x, IPv4, IPv6 } */
+#define LIBIE_RX_PT_IP_GRE(oip, tun) \
+ LIBIE_RX_PT_IP_L3(oip, tun, NONE, NOT_FRAG), \
+ LIBIE_RX_PT_IP_TUN(oip, tun)
+
+/* Non Tunneled IPv oip
+ * IPv oip --> { IPv4, IPv6 }
+ * IPv oip --> GRE/NAT --> { x, IPv4, IPv6 }
+ * IPv oip --> GRE/NAT --> MAC --> { x, IPv4, IPv6 }
+ * IPv oip --> GRE/NAT --> MAC/VLAN --> { x, IPv4, IPv6 }
+ */
+#define LIBIE_RX_PT_IP(oip) \
+ LIBIE_RX_PT_IP_RAW(oip), \
+ LIBIE_RX_PT_IP_TUN(oip, IP), \
+ LIBIE_RX_PT_IP_GRE(oip, GRENAT), \
+ LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC), \
+ LIBIE_RX_PT_IP_GRE(oip, GRENAT_MAC_VLAN)
+
+/* Lookup table mapping for O(1) parsing */
+const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = {
+ /* L2 packet types */
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_TS,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_UNUSED,
+ LIBIE_RX_PT_L2,
+ LIBIE_RX_PT_UNUSED,
+
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+ LIBIE_RX_PT_L3,
+
+ LIBIE_RX_PT_IP(4),
+ LIBIE_RX_PT_IP(6),
+};
+EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, LIBIE);
+
+MODULE_DESCRIPTION("Intel(R) Ethernet common library");
+MODULE_IMPORT_NS(LIBETH);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 1732ec3c3dbd..d8be0e4dcb07 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -946,15 +946,13 @@ jme_udpsum(struct sk_buff *skb)
if (skb->protocol != htons(ETH_P_IP))
return csum;
skb_set_network_header(skb, ETH_HLEN);
- if ((ip_hdr(skb)->protocol != IPPROTO_UDP) ||
- (skb->len < (ETH_HLEN +
- (ip_hdr(skb)->ihl << 2) +
- sizeof(struct udphdr)))) {
+
+ if (ip_hdr(skb)->protocol != IPPROTO_UDP ||
+ skb->len < (ETH_HLEN + ip_hdrlen(skb) + sizeof(struct udphdr))) {
skb_reset_network_header(skb);
return csum;
}
- skb_set_transport_header(skb,
- ETH_HLEN + (ip_hdr(skb)->ihl << 2));
+ skb_set_transport_header(skb, ETH_HLEN + ip_hdrlen(skb));
csum = udp_hdr(skb)->check;
skb_reset_transport_header(skb);
skb_reset_network_header(skb);
@@ -2301,7 +2299,7 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
{
struct jme_adapter *jme = netdev_priv(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
netdev_update_features(netdev);
jme_restart_rx_engine(jme);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 1d5b7bb6380f..9e6984815386 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -217,9 +217,9 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
if (ch->dma.irq)
free_irq(ch->dma.irq, priv);
if (IS_RX(ch->idx)) {
- int desc;
+ struct ltq_dma_channel *dma = &ch->dma;
- for (desc = 0; desc < LTQ_DESC_NUM; desc++)
+ for (dma->desc = 0; dma->desc < LTQ_DESC_NUM; dma->desc++)
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
}
}
@@ -519,7 +519,7 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
struct ltq_etop_priv *priv = netdev_priv(dev);
unsigned long flags;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
spin_lock_irqsave(&priv->lock, flags);
ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, LTQ_ETOP_IGPLEN);
@@ -675,7 +675,6 @@ ltq_etop_probe(struct platform_device *pdev)
err = -ENOMEM;
goto err_out;
}
- strcpy(dev->name, "eth%d");
dev->netdev_ops = &ltq_eth_netdev_ops;
dev->ethtool_ops = &ltq_etop_ethtool_ops;
priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 8bd4def3622e..07904a528f21 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -419,7 +419,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
int curr_desc;
int ret = 0;
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
priv->rx_buf_size = xrx200_buffer_size(new_mtu);
priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
@@ -440,7 +440,7 @@ xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
buff = ch_rx->rx_buff[ch_rx->dma.desc];
ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag);
if (ret) {
- net_dev->mtu = old_mtu;
+ WRITE_ONCE(net_dev->mtu, old_mtu);
priv->rx_buf_size = xrx200_buffer_size(old_mtu);
priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size);
break;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index f0bdc06d253d..f35ae2c88091 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2562,7 +2562,7 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
mv643xx_eth_recalc_skb_size(mp);
tx_set_rate(mp, 1000000000, 16777216);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 40a5f1431e4e..41894834fb53 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -3259,7 +3259,8 @@ static void mvneta_link_change(struct mvneta_port *pp)
{
u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS);
- phylink_mac_change(pp->phylink, !!(gmac_stat & MVNETA_GMAC_LINK_UP));
+ phylink_pcs_change(&pp->phylink_pcs,
+ !!(gmac_stat & MVNETA_GMAC_LINK_UP));
}
/* NAPI handler
@@ -3860,7 +3861,7 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
return -EINVAL;
}
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
if (!netif_running(dev)) {
if (pp->bm_priv)
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index 23adf53c2aa1..0d62a33afa80 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -953,13 +953,13 @@ static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
{
struct mvpp2_port *port;
- int i;
+ int i, j;
for (i = 0; i < priv->port_count; i++) {
port = priv->port_list[i];
if (port->priv->percpu_pools) {
- for (i = 0; i < port->nrxqs; i++)
- mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
+ for (j = 0; j < port->nrxqs; j++)
+ mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[j],
port->tx_fc & en);
} else {
mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
@@ -1375,7 +1375,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
}
out_set:
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
dev->wanted_features = dev->features;
netdev_update_features(dev);
@@ -3434,12 +3434,13 @@ static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
mvpp2_isr_handle_ptp_queue(port, 1);
}
-static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
+static void mvpp2_isr_handle_link(struct mvpp2_port *port,
+ struct phylink_pcs *pcs, bool link)
{
struct net_device *dev = port->dev;
if (port->phylink) {
- phylink_mac_change(port->phylink, link);
+ phylink_pcs_change(pcs, link);
return;
}
@@ -3472,7 +3473,7 @@ static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
if (val & MVPP22_XLG_INT_STAT_LINK) {
val = readl(port->base + MVPP22_XLG_STATUS);
link = (val & MVPP22_XLG_STATUS_LINK_UP);
- mvpp2_isr_handle_link(port, link);
+ mvpp2_isr_handle_link(port, &port->pcs_xlg, link);
}
}
@@ -3488,7 +3489,7 @@ static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
if (val & MVPP22_GMAC_INT_STAT_LINK) {
val = readl(port->base + MVPP2_GMAC_STATUS0);
link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
- mvpp2_isr_handle_link(port, link);
+ mvpp2_isr_handle_link(port, &port->pcs_gmac, link);
}
}
}
@@ -4013,7 +4014,10 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
}
}
- skb = build_skb(data, frag_size);
+ if (frag_size)
+ skb = build_skb(data, frag_size);
+ else
+ skb = slab_build_skb(data);
if (!skb) {
netdev_warn(port->dev, "skb build failed\n");
goto err_drop_frame;
@@ -5255,7 +5259,7 @@ static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
}
static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mvpp2_port *port = netdev_priv(dev);
@@ -6903,6 +6907,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
/* 9704 == 9728 - 20 and rounding to 8 */
dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
device_set_node(&dev->dev, port_fwnode);
+ dev->dev_port = port->id;
port->pcs_gmac.ops = &mvpp2_phylink_gmac_pcs_ops;
port->pcs_gmac.neg_mode = true;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 7c9faa714a10..549436efc204 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -1096,7 +1096,7 @@ static int octep_change_mtu(struct net_device *netdev, int new_mtu)
true);
if (!err) {
oct->link_info.mtu = new_mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
}
return err;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
index 2e2c3be8a0b4..e6eb98d70f3c 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
@@ -15,6 +15,7 @@
#include <linux/io.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
+#include <linux/vmalloc.h>
#include "octep_config.h"
#include "octep_main.h"
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
index dd49d0b8b494..7e6771c9cdbb 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
@@ -881,7 +881,7 @@ static int octep_vf_change_mtu(struct net_device *netdev, int new_mtu)
err = octep_vf_mbox_set_mtu(oct, new_mtu);
if (!err) {
oct->link_info.mtu = new_mtu;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
}
return err;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
index 2eab21e43048..445b626efe11 100644
--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
+++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_mbox.c
@@ -7,6 +7,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
#include "octep_vf_config.h"
#include "octep_vf_main.h"
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index b86f3224f0b7..27935c54b91b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -24,6 +24,8 @@
#define DRV_NAME "Marvell-CGX/RPM"
#define DRV_STRING "Marvell CGX/RPM Driver"
+#define CGX_RX_STAT_GLOBAL_INDEX 9
+
static LIST_HEAD(cgx_list);
/* Convert firmware speed encoding to user format(Mbps) */
@@ -701,6 +703,30 @@ u64 cgx_features_get(void *cgxd)
return ((struct cgx *)cgxd)->hw_features;
}
+int cgx_stats_reset(void *cgxd, int lmac_id)
+{
+ struct cgx *cgx = cgxd;
+ int stat_id;
+
+ if (!is_lmac_valid(cgx, lmac_id))
+ return -ENODEV;
+
+ for (stat_id = 0 ; stat_id < CGX_RX_STATS_COUNT; stat_id++) {
+ if (stat_id >= CGX_RX_STAT_GLOBAL_INDEX)
+ /* pass lmac as 0 for CGX_CMR_RX_STAT9-12 */
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
+ else
+ cgx_write(cgx, lmac_id,
+ (CGXX_CMRX_RX_STAT0 + (stat_id * 8)), 0);
+ }
+
+ for (stat_id = 0 ; stat_id < CGX_TX_STATS_COUNT; stat_id++)
+ cgx_write(cgx, lmac_id, CGXX_CMRX_TX_STAT0 + (stat_id * 8), 0);
+
+ return 0;
+}
+
static int cgx_set_fec_stats_count(struct cgx_link_user_info *linfo)
{
if (!linfo->fec)
@@ -1788,6 +1814,7 @@ static struct mac_ops cgx_mac_ops = {
.pfc_config = cgx_lmac_pfc_config,
.mac_get_pfc_frm_cfg = cgx_lmac_get_pfc_frm_cfg,
.mac_reset = cgx_lmac_reset,
+ .mac_stats_reset = cgx_stats_reset,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 6f7d1dee5830..dc9ace30554a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -141,6 +141,7 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id);
int cgx_lmac_evh_unregister(void *cgxd, int lmac_id);
int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat);
int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
+int cgx_stats_reset(void *cgxd, int lmac_id);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index 0b4cba03f2e8..9ffc6790c513 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -132,6 +132,7 @@ struct mac_ops {
/* FEC stats */
int (*get_fec_stats)(void *cgxd, int lmac_id,
struct cgx_fec_stats_rsp *rsp);
+ int (*mac_stats_reset)(void *cgxd, int lmac_id);
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index eb2a20b5a0d0..ed2160cc5acb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -139,6 +139,7 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \
M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
msg_rsp) \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
@@ -174,6 +175,7 @@ M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
cgx_set_link_mode_rsp) \
M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_STATS_RST, 0x21A, cgx_stats_rst, msg_req, msg_rsp) \
M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
@@ -1213,10 +1215,8 @@ struct nix_bp_cfg_req {
/* bpid_per_chan = 1 assigns separate bp id for each channel */
};
-/* PF can be mapped to either CGX or LBK interface,
- * so maximum 64 channels are possible.
- */
-#define NIX_MAX_BPID_CHAN 64
+/* Maximum channels any single NIX interface can have */
+#define NIX_MAX_BPID_CHAN 256
struct nix_bp_cfg_rsp {
struct mbox_msghdr hdr;
u16 chan_bpid[NIX_MAX_BPID_CHAN]; /* Channel and bpid mapping */
@@ -1717,6 +1717,13 @@ struct lmtst_tbl_setup_req {
u64 rsvd[4];
};
+struct ndc_sync_op {
+ struct mbox_msghdr hdr;
+ u8 nix_lf_tx_sync;
+ u8 nix_lf_rx_sync;
+ u8 npa_lf_sync;
+};
+
/* CPT mailbox error codes
* Range 901 - 1000.
*/
@@ -1746,7 +1753,7 @@ struct cpt_lf_alloc_req_msg {
u16 nix_pf_func;
u16 sso_pf_func;
u16 eng_grpmsk;
- int blkaddr;
+ u8 blkaddr;
u8 ctx_ilen_valid : 1;
u8 ctx_ilen : 7;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index d883157393ea..6c3aca6f278d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -63,8 +63,13 @@ enum npc_kpu_lb_ltype {
NPC_LT_LB_CUSTOM1 = 0xF,
};
+/* Don't modify ltypes up to IP6_EXT, otherwise length and checksum of IP
+ * headers may not be checked correctly. IPv4 ltypes and IPv6 ltypes must
+ * differ only at bit 0 so mask 0xE can be used to detect extended headers.
+ */
enum npc_kpu_lc_ltype {
- NPC_LT_LC_IP = 1,
+ NPC_LT_LC_PTP = 1,
+ NPC_LT_LC_IP,
NPC_LT_LC_IP_OPT,
NPC_LT_LC_IP6,
NPC_LT_LC_IP6_EXT,
@@ -72,7 +77,6 @@ enum npc_kpu_lc_ltype {
NPC_LT_LC_RARP,
NPC_LT_LC_MPLS,
NPC_LT_LC_NSH,
- NPC_LT_LC_PTP,
NPC_LT_LC_FCOE,
NPC_LT_LC_NGIO,
NPC_LT_LC_CUSTOM0 = 0xE,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 76218f1cb459..1b34cf9c9703 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -38,6 +38,7 @@ static struct mac_ops rpm_mac_ops = {
.pfc_config = rpm_lmac_pfc_config,
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
.mac_reset = rpm_lmac_reset,
+ .mac_stats_reset = rpm_stats_reset,
};
static struct mac_ops rpm2_mac_ops = {
@@ -70,6 +71,7 @@ static struct mac_ops rpm2_mac_ops = {
.pfc_config = rpm_lmac_pfc_config,
.mac_get_pfc_frm_cfg = rpm_lmac_get_pfc_frm_cfg,
.mac_reset = rpm_lmac_reset,
+ .mac_stats_reset = rpm_stats_reset,
};
bool is_dev_rpm2(void *rpmd)
@@ -443,6 +445,21 @@ int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat)
return 0;
}
+int rpm_stats_reset(void *rpmd, int lmac_id)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return -ENODEV;
+
+ cfg = rpm_read(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL);
+ cfg |= RPMX_CMD_CLEAR_TX | RPMX_CMD_CLEAR_RX | BIT_ULL(lmac_id);
+ rpm_write(rpm, 0, RPMX_MTI_STAT_STATN_CONTROL, cfg);
+
+ return 0;
+}
+
u8 rpm_get_lmac_type(void *rpmd, int lmac_id)
{
rpm_t *rpm = rpmd;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index b79cfbc6f877..34b11deb0f3c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -85,6 +85,8 @@
#define RPMX_MTI_STAT_STATN_CONTROL 0x10018
#define RPMX_MTI_STAT_DATA_HI_CDC 0x10038
#define RPMX_RSFEC_RX_CAPTURE BIT_ULL(27)
+#define RPMX_CMD_CLEAR_RX BIT_ULL(30)
+#define RPMX_CMD_CLEAR_TX BIT_ULL(31)
#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_2 0x40050
#define RPMX_MTI_RSFEC_STAT_COUNTER_CAPTURE_3 0x40058
#define RPMX_MTI_FCFECX_VL0_CCW_LO 0x38618
@@ -134,4 +136,5 @@ int rpm2_get_nr_lmacs(void *rpmd);
bool is_dev_rpm2(void *rpmd);
int rpm_get_fec_stats(void *cgxd, int lmac_id, struct cgx_fec_stats_rsp *rsp);
int rpm_lmac_reset(void *rpmd, int lmac_id, u8 pf_req_flr);
+int rpm_stats_reset(void *rpmd, int lmac_id);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index ff78251f92d4..ac7ee3f3598c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -1643,7 +1643,7 @@ static int rvu_check_rsrc_availability(struct rvu *rvu,
if (req->ssow > block->lf.max) {
dev_err(&rvu->pdev->dev,
"Func 0x%x: Invalid SSOW req, %d > max %d\n",
- pcifunc, req->sso, block->lf.max);
+ pcifunc, req->ssow, block->lf.max);
return -EINVAL;
}
mappedlfs = rvu_get_rsrc_mapcount(pfvf, block->addr);
@@ -2014,6 +2014,13 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req,
return 0;
}
+int rvu_ndc_sync(struct rvu *rvu, int lfblkaddr, int lfidx, u64 lfoffset)
+{
+ /* Sync cached info for this LF in NDC to LLC/DRAM */
+ rvu_write64(rvu, lfblkaddr, lfoffset, BIT_ULL(12) | lfidx);
+ return rvu_poll_reg(rvu, lfblkaddr, lfoffset, BIT_ULL(12), true);
+}
+
int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req,
struct get_hw_cap_rsp *rsp)
{
@@ -2068,6 +2075,65 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req,
return 0;
}
+int rvu_mbox_handler_ndc_sync_op(struct rvu *rvu,
+ struct ndc_sync_op *req,
+ struct msg_rsp *rsp)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ u16 pcifunc = req->hdr.pcifunc;
+ int err, lfidx, lfblkaddr;
+
+ if (req->npa_lf_sync) {
+ /* Get NPA LF data */
+ lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc);
+ if (lfblkaddr < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
+ if (lfidx < 0)
+ return NPA_AF_ERR_AF_LF_INVALID;
+
+ /* Sync NPA NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NPA_AF_NDC_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NPA sync failed for LF %u\n", lfidx);
+ }
+
+ if (!req->nix_lf_tx_sync && !req->nix_lf_rx_sync)
+ return 0;
+
+ /* Get NIX LF data */
+ lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (lfblkaddr < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0);
+ if (lfidx < 0)
+ return NIX_AF_ERR_AF_LF_INVALID;
+
+ if (req->nix_lf_tx_sync) {
+ /* Sync NIX TX NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NIX_AF_NDC_TX_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NIX-TX sync fail for LF %u\n", lfidx);
+ }
+
+ if (req->nix_lf_rx_sync) {
+ /* Sync NIX RX NDC */
+ err = rvu_ndc_sync(rvu, lfblkaddr,
+ lfidx, NIX_AF_NDC_RX_SYNC);
+ if (err)
+ dev_err(rvu->dev,
+ "NDC-NIX-RX sync failed for LF %u\n", lfidx);
+ }
+
+ return 0;
+}
+
static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid,
struct mbox_msghdr *req)
{
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 35834687e40f..03ee93fd9e94 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -76,6 +76,7 @@ struct rvu_debugfs {
struct dump_ctx nix_cq_ctx;
struct dump_ctx nix_rq_ctx;
struct dump_ctx nix_sq_ctx;
+ struct dump_ctx nix_tm_ctx;
struct cpt_ctx cpt_ctx[MAX_CPT_BLKS];
int npa_qsize_id;
int nix_qsize_id;
@@ -799,6 +800,7 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
int rvu_get_num_lbk_chans(void);
+int rvu_ndc_sync(struct rvu *rvu, int lfblkid, int lfidx, u64 lfoffset);
int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
u16 global_slot, u16 *slot_in_block);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index e9bf9231b018..266ecbc1b97a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -604,6 +604,35 @@ int rvu_mbox_handler_rpm_stats(struct rvu *rvu, struct msg_req *req,
return rvu_lmac_get_stats(rvu, req, (void *)rsp);
}
+int rvu_mbox_handler_cgx_stats_rst(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ struct rvu_pfvf *parent_pf;
+ struct mac_ops *mac_ops;
+ u8 cgx_idx, lmac;
+ void *cgxd;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return LMAC_AF_ERR_PERM_DENIED;
+
+ parent_pf = &rvu->pf[pf];
+ /* To ensure reset cgx stats won't affect VF stats,
+ * check if it used by only PF interface.
+ * If not, return
+ */
+ if (parent_pf->cgx_users > 1) {
+ dev_info(rvu->dev, "CGX busy, could not reset statistics\n");
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac);
+ cgxd = rvu_cgx_pdata(cgx_idx, rvu);
+ mac_ops = get_mac_ops(cgxd);
+
+ return mac_ops->mac_stats_reset(cgxd, lmac);
+}
+
int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu,
struct msg_req *req,
struct cgx_fec_stats_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index f047185f38e0..daf4b951e905 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -632,7 +632,9 @@ int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
return ret;
}
-static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
+static bool validate_and_update_reg_offset(struct rvu *rvu,
+ struct cpt_rd_wr_reg_msg *req,
+ u64 *reg_offset)
{
u64 offset = req->reg_offset;
int blkaddr, num_lfs, lf;
@@ -663,6 +665,11 @@ static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
if (lf < 0)
return false;
+ /* Translate local LF's offset to global CPT LF's offset to
+ * access LFX register.
+ */
+ *reg_offset = (req->reg_offset & 0xFF000) + (lf << 3);
+
return true;
} else if (!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK)) {
/* Registers that can be accessed from PF */
@@ -696,6 +703,7 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
struct cpt_rd_wr_reg_msg *req,
struct cpt_rd_wr_reg_msg *rsp)
{
+ u64 offset = req->reg_offset;
int blkaddr;
blkaddr = validate_and_get_cpt_blkaddr(req->blkaddr);
@@ -707,17 +715,17 @@ int rvu_mbox_handler_cpt_rd_wr_register(struct rvu *rvu,
!is_cpt_vf(rvu, req->hdr.pcifunc))
return CPT_AF_ERR_ACCESS_DENIED;
+ if (!validate_and_update_reg_offset(rvu, req, &offset))
+ return CPT_AF_ERR_ACCESS_DENIED;
+
rsp->reg_offset = req->reg_offset;
rsp->ret_val = req->ret_val;
rsp->is_write = req->is_write;
- if (!is_valid_offset(rvu, req))
- return CPT_AF_ERR_ACCESS_DENIED;
-
if (req->is_write)
- rvu_write64(rvu, blkaddr, req->reg_offset, req->val);
+ rvu_write64(rvu, blkaddr, offset, req->val);
else
- rsp->val = rvu_read64(rvu, blkaddr, req->reg_offset);
+ rsp->val = rvu_read64(rvu, blkaddr, offset);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 881d704644fb..4a4ef5bd9e0b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1603,6 +1603,367 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m,
(u64)sq_ctx->dropped_pkts);
}
+static void print_tm_tree(struct seq_file *m,
+ struct nix_aq_enq_rsp *rsp, u64 sq)
+{
+ struct nix_sq_ctx_s *sq_ctx = &rsp->sq;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ u16 p1, p2, p3, p4, schq;
+ int blkaddr;
+ u64 cfg;
+
+ blkaddr = nix_hw->blkaddr;
+ schq = sq_ctx->smq;
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq));
+ p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1));
+ p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2));
+ p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg);
+
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3));
+ p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg);
+ seq_printf(m,
+ "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n",
+ sq, schq, p1, p2, p3, p4);
+}
+
+/*dumps given tm_tree registers*/
+static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused)
+{
+ int qidx, nixlf, rc, id, max_id = 0;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_aq_enq_req aq_req;
+ struct nix_aq_enq_rsp rsp;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+
+ nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
+ id = rvu->rvu_dbg.nix_tm_ctx.id;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ max_id = pfvf->sq_ctx->qsize;
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = NIX_AQ_CTYPE_SQ;
+ aq_req.op = NIX_AQ_INSTOP_READ;
+ seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
+ for (qidx = id; qidx < max_id; qidx++) {
+ aq_req.qidx = qidx;
+
+ /* Skip SQ's if not initialized */
+ if (!test_bit(qidx, pfvf->sq_bmap))
+ continue;
+
+ rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp);
+
+ if (rc) {
+ seq_printf(m, "Failed to read SQ(%d) context\n",
+ aq_req.qidx);
+ continue;
+ }
+ print_tm_tree(m, &rsp, aq_req.qidx);
+ }
+ return 0;
+}
+
+static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+ u64 nixlf;
+ int ret;
+
+ ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
+ if (ret)
+ return ret;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->sq_ctx) {
+ dev_warn(rvu->dev, "SQ context is not initialized\n");
+ return -EINVAL;
+ }
+
+ rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
+ return count;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write);
+
+static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ int blkaddr, link, link_level;
+ struct rvu_hwinfo *hw;
+
+ hw = rvu->hw;
+ blkaddr = nix_hw->blkaddr;
+ if (lvl == NIX_TXSCH_LVL_MDQ) {
+ seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)));
+ seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_MDQX_OUT_MD_COUNT(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_MDQX_SCHEDULE(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)));
+ seq_puts(m, "\n");
+ }
+
+ if (lvl == NIX_TXSCH_LVL_TL4) {
+ seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_SDP_LINK_CFG(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_SCHEDULE(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_TOPOLOGY(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_MD_DEBUG0(schq)));
+ seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL4X_MD_DEBUG1(schq)));
+ seq_puts(m, "\n");
+ }
+
+ if (lvl == NIX_TXSCH_LVL_TL3) {
+ seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3X_SCHEDULE(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3X_TOPOLOGY(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3X_MD_DEBUG0(schq)));
+ seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3X_MD_DEBUG1(schq)));
+
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
+ & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ if (lvl == link_level) {
+ seq_printf(m,
+ "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
+ schq, rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_BP_STATUS(schq)));
+ for (link = 0; link < hw->cgx_links; link++)
+ seq_printf(m,
+ "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
+ schq, link,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
+ }
+ seq_puts(m, "\n");
+ }
+
+ if (lvl == NIX_TXSCH_LVL_TL2) {
+ seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL2X_TOPOLOGY(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL2X_MD_DEBUG0(schq)));
+ seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL2X_MD_DEBUG1(schq)));
+
+ link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL)
+ & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2;
+ if (lvl == link_level) {
+ seq_printf(m,
+ "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n",
+ schq, rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_BP_STATUS(schq)));
+ for (link = 0; link < hw->cgx_links; link++)
+ seq_printf(m,
+ "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n",
+ schq, link, rvu_read64(rvu, blkaddr,
+ NIX_AF_TL3_TL2X_LINKX_CFG(schq, link)));
+ }
+ seq_puts(m, "\n");
+ }
+
+ if (lvl == NIX_TXSCH_LVL_TL1) {
+ seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n",
+ schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_NORM_CREDIT(schq)));
+ seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TX_LINKX_HW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_SCHEDULE(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_TOPOLOGY(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_MD_DEBUG0(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_MD_DEBUG1(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n",
+ schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_DROPPED_PACKETS(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_DROPPED_BYTES(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_RED_PACKETS(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_RED_BYTES(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_YELLOW_PACKETS(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_YELLOW_BYTES(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_GREEN_PACKETS(schq)));
+ seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq,
+ rvu_read64(rvu, blkaddr,
+ NIX_AF_TL1X_GREEN_BYTES(schq)));
+ seq_puts(m, "\n");
+ }
+}
+
+/*dumps given tm_topo registers*/
+static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused)
+{
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct nix_aq_enq_req aq_req;
+ struct nix_txsch *txsch;
+ int nixlf, lvl, schq;
+ u16 pcifunc;
+
+ nixlf = rvu->rvu_dbg.nix_tm_ctx.lf;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ memset(&aq_req, 0, sizeof(struct nix_aq_enq_req));
+ aq_req.hdr.pcifunc = pcifunc;
+ aq_req.ctype = NIX_AQ_CTYPE_SQ;
+ aq_req.op = NIX_AQ_INSTOP_READ;
+ seq_printf(m, "pcifunc is 0x%x\n", pcifunc);
+
+ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) {
+ txsch = &nix_hw->txsch[lvl];
+ for (schq = 0; schq < txsch->schq.max; schq++) {
+ if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc)
+ print_tm_topo(m, schq, lvl);
+ }
+ }
+ return 0;
+}
+
+static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ struct nix_hw *nix_hw = m->private;
+ struct rvu *rvu = nix_hw->rvu;
+ struct rvu_pfvf *pfvf;
+ u16 pcifunc;
+ u64 nixlf;
+ int ret;
+
+ ret = kstrtoull_from_user(buffer, count, 10, &nixlf);
+ if (ret)
+ return ret;
+
+ if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc))
+ return -EINVAL;
+
+ pfvf = rvu_get_pfvf(rvu, pcifunc);
+ if (!pfvf->sq_ctx) {
+ dev_warn(rvu->dev, "SQ context is not initialized\n");
+ return -EINVAL;
+ }
+
+ rvu->rvu_dbg.nix_tm_ctx.lf = nixlf;
+ return count;
+}
+
+RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write);
+
/* Dumps given nix_sq's context */
static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp)
{
@@ -2349,6 +2710,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr)
nix_hw = &rvu->hw->nix[1];
}
+ debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_tm_tree_fops);
+ debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw,
+ &rvu_dbg_nix_tm_topo_fops);
debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
&rvu_dbg_nix_sq_ctx_fops);
debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 96c04f7d93f8..7498ab429963 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1202,7 +1202,8 @@ static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1256,7 +1257,8 @@ static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
}
static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1310,7 +1312,8 @@ static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32
}
static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
@@ -1367,7 +1370,8 @@ static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
}
static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct rvu_devlink *rvu_dl = devlink_priv(devlink);
struct rvu *rvu = rvu_dl->rvu;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 00af8888e329..222f9e00b836 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -2497,9 +2497,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc)
}
mutex_unlock(&rvu->rsrc_lock);
- /* Sync cached info for this LF in NDC-TX to LLC/DRAM */
- rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf);
- err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true);
+ err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC);
if (err)
dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf);
@@ -3864,6 +3862,11 @@ static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg)
return -ERANGE;
}
+/* Mask to match ipv6(NPC_LT_LC_IP6) and ipv6 ext(NPC_LT_LC_IP6_EXT) */
+#define NPC_LT_LC_IP6_MATCH_MSK ((~(NPC_LT_LC_IP6 ^ NPC_LT_LC_IP6_EXT)) & 0xf)
+/* Mask to match both ipv4(NPC_LT_LC_IP) and ipv4 ext(NPC_LT_LC_IP_OPT) */
+#define NPC_LT_LC_IP_MATCH_MSK ((~(NPC_LT_LC_IP ^ NPC_LT_LC_IP_OPT)) & 0xf)
+
static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
{
int idx, nr_field, key_off, field_marker, keyoff_marker;
@@ -3933,7 +3936,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->hdr_offset = 9; /* offset */
field->bytesm1 = 0; /* 1 byte */
field->ltype_match = NPC_LT_LC_IP;
- field->ltype_mask = 0xF;
+ field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
break;
case NIX_FLOW_KEY_TYPE_IPV4:
case NIX_FLOW_KEY_TYPE_INNR_IPV4:
@@ -3960,8 +3963,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->bytesm1 = 3; /* DIP, 4 bytes */
}
}
-
- field->ltype_mask = 0xF; /* Match only IPv4 */
+ field->ltype_mask = NPC_LT_LC_IP_MATCH_MSK;
keyoff_marker = false;
break;
case NIX_FLOW_KEY_TYPE_IPV6:
@@ -3990,7 +3992,7 @@ static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg)
field->bytesm1 = 15; /* DIP,16 bytes */
}
}
- field->ltype_mask = 0xF; /* Match only IPv6 */
+ field->ltype_mask = NPC_LT_LC_IP6_MATCH_MSK;
break;
case NIX_FLOW_KEY_TYPE_TCP:
case NIX_FLOW_KEY_TYPE_UDP:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index e8b73b9d75e3..97722ce8c4cb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -2519,7 +2519,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
* - when available free entries are less.
* Lower priority ones out of avaialble free entries are always
* chosen when 'high vs low' question arises.
+ *
+ * For a VF base MCAM match rule is set by its PF. And all the
+ * further MCAM rules installed by VF on its own are
+ * concatenated with the base rule set by its PF. Hence PF entries
+ * should be at lower priority compared to VF entries. Otherwise
+ * base rule is hit always and rules installed by VF will be of
+ * no use. Hence if the request is from PF then allocate low
+ * priority entries.
*/
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK))
+ goto lprio_alloc;
/* Get the search range for priority allocation request */
if (req->priority) {
@@ -2528,17 +2538,6 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc,
goto alloc;
}
- /* For a VF base MCAM match rule is set by its PF. And all the
- * further MCAM rules installed by VF on its own are
- * concatenated with the base rule set by its PF. Hence PF entries
- * should be at lower priority compared to VF entries. Otherwise
- * base rule is hit always and rules installed by VF will be of
- * no use. Hence if the request is from PF and NOT a priority
- * allocation request then allocate low priority entries.
- */
- if (!(pcifunc & RVU_PFVF_FUNC_MASK))
- goto lprio_alloc;
-
/* Find out the search range for non-priority allocation request
*
* Get MCAM free entry count in middle zone.
@@ -2568,6 +2567,18 @@ lprio_alloc:
reverse = true;
start = 0;
end = mcam->bmap_entries;
+ /* Ensure PF requests are always at bottom and if PF requests
+ * for higher/lower priority entry wrt reference entry then
+ * honour that criteria and start search for entries from bottom
+ * and not in mid zone.
+ */
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+ req->priority == NPC_MCAM_HIGHER_PRIO)
+ end = req->ref_entry;
+
+ if (!(pcifunc & RVU_PFVF_FUNC_MASK) &&
+ req->priority == NPC_MCAM_LOWER_PRIO)
+ start = req->ref_entry;
}
alloc:
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index c181e7aa9eb6..150635de2bd5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1187,6 +1187,8 @@ static int npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf,
action.pf_func = target;
action.op = NIX_RX_ACTIONOP_UCAST;
}
+ if (req->match_id)
+ action.match_id = req->match_id;
}
entry->action = *(u64 *)&action;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 086f05c0376f..d56be5fb7eb4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -121,6 +121,7 @@
#define NPA_AF_LF_RST (0x0020)
#define NPA_AF_GEN_CFG (0x0030)
#define NPA_AF_NDC_CFG (0x0040)
+#define NPA_AF_NDC_SYNC (0x0050)
#define NPA_AF_INP_CTL (0x00D0)
#define NPA_AF_ACTIVE_CYCLES_PC (0x00F0)
#define NPA_AF_AVG_DELAY (0x0100)
@@ -239,6 +240,7 @@
#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3)
#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3)
+#define NIX_AF_NDC_RX_SYNC (0x03E0)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
#define NIX_AF_AQ_CFG (0x0400)
#define NIX_AF_AQ_BASE (0x0410)
@@ -429,6 +431,8 @@
#define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16)
#define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17)
#define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16)
+#define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16)
+#define NIX_AF_MDQX_OUT_MD_COUNT(a) (0xdb0 | (a) << 16)
#define NIX_PRIV_AF_INT_CFG (0x8000000)
#define NIX_PRIV_LFX_CFG (0x8000010)
@@ -442,6 +446,11 @@
#define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12)
#define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0)
+#define NIX_AF_MDQ_PARENT_MASK GENMASK_ULL(24, 16)
+#define NIX_AF_TL4_PARENT_MASK GENMASK_ULL(23, 16)
+#define NIX_AF_TL3_PARENT_MASK GENMASK_ULL(23, 16)
+#define NIX_AF_TL2_PARENT_MASK GENMASK_ULL(20, 16)
+
/* SSO */
#define SSO_AF_CONST (0x1000)
#define SSO_AF_CONST1 (0x1008)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
index 28984d0e848a..5704520f9b02 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h
@@ -24,7 +24,7 @@ TRACE_EVENT(otx2_msg_alloc,
__field(u16, id)
__field(u64, size)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ TP_fast_assign(__assign_str(dev);
__entry->id = id;
__entry->size = size;
),
@@ -39,7 +39,7 @@ TRACE_EVENT(otx2_msg_send,
__field(u16, num_msgs)
__field(u64, msg_size)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ TP_fast_assign(__assign_str(dev);
__entry->num_msgs = num_msgs;
__entry->msg_size = msg_size;
),
@@ -55,7 +55,7 @@ TRACE_EVENT(otx2_msg_check,
__field(u16, rspid)
__field(int, rc)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ TP_fast_assign(__assign_str(dev);
__entry->reqid = reqid;
__entry->rspid = rspid;
__entry->rc = rc;
@@ -72,8 +72,8 @@ TRACE_EVENT(otx2_msg_interrupt,
__string(str, msg)
__field(u64, intr)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev));
- __assign_str(str, msg);
+ TP_fast_assign(__assign_str(dev);
+ __assign_str(str);
__entry->intr = intr;
),
TP_printk("[%s] mbox interrupt %s (0x%llx)\n", __get_str(dev),
@@ -87,7 +87,7 @@ TRACE_EVENT(otx2_msg_process,
__field(u16, id)
__field(int, err)
),
- TP_fast_assign(__assign_str(dev, pci_name(pdev));
+ TP_fast_assign(__assign_str(dev);
__entry->id = id;
__entry->err = err;
),
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 5664f768cb0c..64a97a0a10ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -9,10 +9,9 @@ obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o qos_sq.o qos.o
-rvu_nicvf-y := otx2_vf.o otx2_devlink.o
+rvu_nicvf-y := otx2_vf.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
-rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index a85ac039d779..87d5776e3b88 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -648,14 +648,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
} else if (lvl == NIX_TXSCH_LVL_TL4) {
parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
@@ -670,11 +670,11 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
} else if (lvl == NIX_TXSCH_LVL_TL2) {
parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
- req->regval[0] = parent << 16;
+ req->regval[0] = (u64)parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq);
- req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val;
+ req->regval[1] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 | dwrr_val;
if (lvl == hw->txschq_link_cfg_lvl) {
req->num_regs++;
@@ -698,7 +698,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for
req->num_regs++;
req->reg[1] = NIX_AF_TL1X_TOPOLOGY(schq);
- req->regval[1] = (TXSCH_TL1_DFLT_RR_PRIO << 1);
+ req->regval[1] = hw->txschq_aggr_lvl_rr_prio << 1;
req->num_regs++;
req->reg[2] = NIX_AF_TL1X_CIR(schq);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 7e16a341ec58..f27a3456ae64 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -346,12 +346,9 @@ struct otx2_flow_config {
u16 *def_ent;
u16 nr_flows;
#define OTX2_DEFAULT_FLOWCOUNT 16
-#define OTX2_MAX_UNICAST_FLOWS 8
+#define OTX2_DEFAULT_UNICAST_FLOWS 4
#define OTX2_MAX_VLAN_FLOWS 1
#define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT
-#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \
- OTX2_MAX_UNICAST_FLOWS + \
- OTX2_MAX_VLAN_FLOWS)
u16 unicast_offset;
u16 rx_vlan_offset;
u16 vf_vlan_offset;
@@ -363,7 +360,9 @@ struct otx2_flow_config {
struct list_head flow_list;
u32 dmacflt_max_flows;
u16 max_flows;
+ refcount_t mark_flows;
struct list_head flow_list_tc;
+ u8 ucast_flt_cnt;
bool ntuple;
};
@@ -465,6 +464,7 @@ struct otx2_nic {
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
#define OTX2_FLAG_PTP_ONESTEP_SYNC BIT_ULL(15)
#define OTX2_FLAG_ADPTV_INT_COAL_ENABLED BIT_ULL(16)
+#define OTX2_FLAG_TC_MARK_ENABLED BIT_ULL(17)
u64 flags;
u64 *cq_op_addr;
@@ -961,6 +961,7 @@ void otx2_get_mac_from_af(struct net_device *netdev);
void otx2_config_irq_coalescing(struct otx2_nic *pfvf, int qidx);
int otx2_config_pause_frm(struct otx2_nic *pfvf);
void otx2_setup_segmentation(struct otx2_nic *pfvf);
+int otx2_reset_mac_stats(struct otx2_nic *pfvf);
/* RVU block related APIs */
int otx2_attach_npa_nix(struct otx2_nic *pfvf);
@@ -1064,6 +1065,7 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev,
int otx2_smq_flush(struct otx2_nic *pfvf, int smq);
void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool,
u64 iova, int size);
+int otx2_mcam_entry_init(struct otx2_nic *pfvf);
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
index 28fb643d2917..aa01110f04a3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dcbnl.c
@@ -54,6 +54,7 @@ int otx2_pfc_txschq_config(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_config);
static int otx2_pfc_txschq_alloc_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -122,6 +123,7 @@ int otx2_pfc_txschq_alloc(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_alloc);
static int otx2_pfc_txschq_stop_one(struct otx2_nic *pfvf, u8 prio)
{
@@ -260,6 +262,7 @@ update_sq_smq_map:
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_update);
int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
{
@@ -282,6 +285,7 @@ int otx2_pfc_txschq_stop(struct otx2_nic *pfvf)
return 0;
}
+EXPORT_SYMBOL(otx2_pfc_txschq_stop);
int otx2_config_priority_flow_ctrl(struct otx2_nic *pfvf)
{
@@ -321,6 +325,7 @@ unlock:
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+EXPORT_SYMBOL(otx2_config_priority_flow_ctrl);
void otx2_update_bpid_in_rqctx(struct otx2_nic *pfvf, int vlan_prio, int qidx,
bool pfc_enable)
@@ -385,6 +390,7 @@ out:
"Updating BPIDs in CQ and Aura contexts of RQ%d failed with err %d\n",
qidx, err);
}
+EXPORT_SYMBOL(otx2_update_bpid_in_rqctx);
static int otx2_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
@@ -472,3 +478,4 @@ int otx2_dcbnl_set_ops(struct net_device *dev)
return 0;
}
+EXPORT_SYMBOL(otx2_dcbnl_set_ops);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 4e1130496573..53f14aa944bd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -32,7 +32,8 @@ static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id,
}
static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct otx2_devlink *otx2_dl = devlink_priv(devlink);
struct otx2_nic *pfvf = otx2_dl->pfvf;
@@ -63,9 +64,68 @@ static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id,
return 0;
}
+static int otx2_dl_ucast_flt_cnt_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+ int err;
+
+ pfvf->flow_cfg->ucast_flt_cnt = ctx->val.vu8;
+
+ otx2_mcam_flow_del(pfvf);
+ err = otx2_mcam_entry_init(pfvf);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int otx2_dl_ucast_flt_cnt_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ ctx->val.vu8 = pfvf->flow_cfg ? pfvf->flow_cfg->ucast_flt_cnt : 0;
+
+ return 0;
+}
+
+static int otx2_dl_ucast_flt_cnt_validate(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack)
+{
+ struct otx2_devlink *otx2_dl = devlink_priv(devlink);
+ struct otx2_nic *pfvf = otx2_dl->pfvf;
+
+ /* Check for UNICAST filter support*/
+ if (!(pfvf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unicast filter not enabled");
+ return -EINVAL;
+ }
+
+ if (!pfvf->flow_cfg) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "pfvf->flow_cfg not initialized");
+ return -EINVAL;
+ }
+
+ if (pfvf->flow_cfg->nr_flows) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot modify count when there are active rules");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
enum otx2_dl_param_id {
OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
OTX2_DEVLINK_PARAM_ID_MCAM_COUNT,
+ OTX2_DEVLINK_PARAM_ID_UCAST_FLT_CNT,
};
static const struct devlink_param otx2_dl_params[] = {
@@ -74,6 +134,11 @@ static const struct devlink_param otx2_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
otx2_dl_mcam_count_get, otx2_dl_mcam_count_set,
otx2_dl_mcam_count_validate),
+ DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_UCAST_FLT_CNT,
+ "unicast_filter_count", DEVLINK_PARAM_TYPE_U8,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ otx2_dl_ucast_flt_cnt_get, otx2_dl_ucast_flt_cnt_set,
+ otx2_dl_ucast_flt_cnt_validate),
};
static const struct devlink_ops otx2_devlink_ops = {
@@ -112,6 +177,7 @@ err_dl:
devlink_free(dl);
return err;
}
+EXPORT_SYMBOL(otx2_register_dl);
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
@@ -123,3 +189,4 @@ void otx2_unregister_dl(struct otx2_nic *pfvf)
ARRAY_SIZE(otx2_dl_params));
devlink_free(dl);
}
+EXPORT_SYMBOL(otx2_unregister_dl);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index 7f786de61014..0db62eb0dab3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -954,7 +954,7 @@ static u32 otx2_get_link(struct net_device *netdev)
}
static int otx2_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 97a71e9b8563..98c31a16c70b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -12,8 +12,6 @@
#define OTX2_DEFAULT_ACTION 0x1
-static int otx2_mcam_entry_init(struct otx2_nic *pfvf);
-
struct otx2_flow {
struct ethtool_rx_flow_spec flow_spec;
struct list_head list;
@@ -161,7 +159,7 @@ exit:
}
EXPORT_SYMBOL(otx2_alloc_mcam_entries);
-static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
+int otx2_mcam_entry_init(struct otx2_nic *pfvf)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct npc_get_field_status_req *freq;
@@ -172,7 +170,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
int ent, count;
vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
- count = OTX2_MAX_UNICAST_FLOWS +
+ count = flow_cfg->ucast_flt_cnt +
OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
@@ -214,7 +212,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
flow_cfg->vf_vlan_offset = 0;
flow_cfg->unicast_offset = vf_vlan_max_flows;
flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
- OTX2_MAX_UNICAST_FLOWS;
+ flow_cfg->ucast_flt_cnt;
pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
/* Check if NPC_DMAC field is supported
@@ -252,8 +250,10 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf)
pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
+ refcount_set(&flow_cfg->mark_flows, 1);
return 0;
}
+EXPORT_SYMBOL(otx2_mcam_entry_init);
/* TODO : revisit on size */
#define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32)
@@ -301,6 +301,8 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc);
+ pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS;
+
/* Allocate bare minimum number of MCAM entries needed for
* unicast and ntuple filters.
*/
@@ -313,7 +315,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
return 0;
pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
- * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
+ * pf->flow_cfg->ucast_flt_cnt, GFP_KERNEL);
if (!pf->mac_table)
return -ENOMEM;
@@ -355,7 +357,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
return -ENOMEM;
/* dont have free mcam entries or uc list is greater than alloted */
- if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
+ if (netdev_uc_count(pf->netdev) > pf->flow_cfg->ucast_flt_cnt)
return -ENOMEM;
mutex_lock(&pf->mbox.lock);
@@ -366,7 +368,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
}
/* unicast offset starts with 32 0..31 for ntuple */
- for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) {
if (pf->mac_table[i].inuse)
continue;
ether_addr_copy(pf->mac_table[i].addr, mac);
@@ -409,7 +411,7 @@ static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
{
int i;
- for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
+ for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) {
if (!pf->mac_table[i].inuse)
continue;
@@ -1393,6 +1395,7 @@ int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
}
pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
+ flow_cfg->max_flows = 0;
mutex_unlock(&pfvf->mbox.lock);
return 0;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 3f46d5e0fb2e..5492dea547a1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -67,7 +67,7 @@ static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (if_up)
err = otx2_open(netdev);
@@ -450,7 +450,6 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
struct mbox_msghdr *msg = NULL;
int offset, vf_idx, id, err;
struct otx2_mbox_dev *mdev;
- struct mbox_hdr *req_hdr;
struct otx2_mbox *mbox;
struct mbox *vf_mbox;
struct otx2_nic *pf;
@@ -461,9 +460,8 @@ static void otx2_pfvf_mbox_handler(struct work_struct *work)
mbox = &pf->mbox_pfvf[0].mbox;
mdev = &mbox->dev[vf_idx];
- req_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- offset = ALIGN(sizeof(*req_hdr), MBOX_MSG_ALIGN);
+ offset = ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < vf_mbox->num_msgs; id++) {
msg = (struct mbox_msghdr *)(mdev->mbase + mbox->rx_start +
@@ -494,7 +492,6 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
struct otx2_nic *pf = vf_mbox->pfvf;
struct otx2_mbox_dev *mdev;
int offset, id, vf_idx = 0;
- struct mbox_hdr *rsp_hdr;
struct mbox_msghdr *msg;
struct otx2_mbox *mbox;
@@ -502,8 +499,7 @@ static void otx2_pfvf_mbox_up_handler(struct work_struct *work)
mbox = &pf->mbox_pfvf[0].mbox_up;
mdev = &mbox->dev[vf_idx];
- rsp_hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
- offset = mbox->rx_start + ALIGN(sizeof(*rsp_hdr), MBOX_MSG_ALIGN);
+ offset = mbox->rx_start + ALIGN(sizeof(struct mbox_hdr), MBOX_MSG_ALIGN);
for (id = 0; id < vf_mbox->up_num_msgs; id++) {
msg = mdev->mbase + offset;
@@ -1150,6 +1146,23 @@ static int otx2_cgx_config_linkevents(struct otx2_nic *pf, bool enable)
return err;
}
+int otx2_reset_mac_stats(struct otx2_nic *pfvf)
+{
+ struct msg_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_stats_rst(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
+}
+
static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
{
struct msg_req *msg;
@@ -1701,7 +1714,7 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf)
return;
if ((netdev->flags & IFF_PROMISC) ||
- (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
+ (netdev_uc_count(netdev) > pf->flow_cfg->ucast_flt_cnt)) {
promisc = true;
}
@@ -1873,9 +1886,17 @@ int otx2_open(struct net_device *netdev)
vec = pf->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
for (qidx = 0; qidx < pf->hw.cint_cnt; qidx++) {
irq_name = &pf->hw.irq_name[vec * NAME_SIZE];
+ int name_len;
- snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d", pf->netdev->name,
- qidx);
+ name_len = snprintf(irq_name, NAME_SIZE, "%s-rxtx-%d",
+ pf->netdev->name, qidx);
+ if (name_len >= NAME_SIZE) {
+ dev_err(pf->dev,
+ "RVUPF%d: IRQ registration failed for CQ%d, irq name is too long\n",
+ rvu_get_pf(pf->pcifunc), qidx);
+ err = -EINVAL;
+ goto err_free_cints;
+ }
err = request_irq(pci_irq_vector(pf->pdev, vec),
otx2_cq_intr_handler, 0, irq_name,
@@ -3038,6 +3059,9 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
netdev->min_mtu = OTX2_MIN_MTU;
netdev->max_mtu = otx2_get_max_mtu(pf);
+ /* reset CGX/RPM MAC stats */
+ otx2_reset_mac_stats(pf);
+
err = register_netdev(netdev);
if (err) {
dev_err(dev, "Failed to register netdevice\n");
@@ -3221,6 +3245,29 @@ static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs)
return otx2_sriov_enable(pdev, numvfs);
}
+static void otx2_ndc_sync(struct otx2_nic *pf)
+{
+ struct mbox *mbox = &pf->mbox;
+ struct ndc_sync_op *req;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_ndc_sync_op(mbox);
+ if (!req) {
+ mutex_unlock(&mbox->lock);
+ return;
+ }
+
+ req->nix_lf_tx_sync = 1;
+ req->nix_lf_rx_sync = 1;
+ req->npa_lf_sync = 1;
+
+ if (!otx2_sync_mbox_msg(mbox))
+ dev_err(pf->dev, "NDC sync operation failed\n");
+
+ mutex_unlock(&mbox->lock);
+}
+
static void otx2_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
@@ -3269,6 +3316,7 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_shutdown_qos(pf);
+ otx2_ndc_sync(pf);
otx2_detach_resources(&pf->mbox);
if (pf->hw.lmt_info)
free_percpu(pf->hw.lmt_info);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
index 45a32e4b49d1..e3aee6e36215 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h
@@ -139,33 +139,34 @@
#define NIX_LF_CINTX_ENA_W1C(a) (NIX_LFBASE | 0xD50 | (a) << 12)
/* NIX AF transmit scheduler registers */
-#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
-#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (a) << 16)
-#define NIX_AF_TL1X_CIR(a) (0xC20 | (a) << 16)
-#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (a) << 16)
-#define NIX_AF_TL2X_PARENT(a) (0xE88 | (a) << 16)
-#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (a) << 16)
-#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (a) << 16)
-#define NIX_AF_TL2X_CIR(a) (0xE20 | (a) << 16)
-#define NIX_AF_TL2X_PIR(a) (0xE30 | (a) << 16)
-#define NIX_AF_TL3X_PARENT(a) (0x1088 | (a) << 16)
-#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (a) << 16)
-#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (a) << 16)
-#define NIX_AF_TL3X_CIR(a) (0x1020 | (a) << 16)
-#define NIX_AF_TL3X_PIR(a) (0x1030 | (a) << 16)
-#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (a) << 16)
-#define NIX_AF_TL4X_PARENT(a) (0x1288 | (a) << 16)
-#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (a) << 16)
-#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (a) << 16)
-#define NIX_AF_TL4X_CIR(a) (0x1220 | (a) << 16)
-#define NIX_AF_TL4X_PIR(a) (0x1230 | (a) << 16)
-#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (a) << 16)
-#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (a) << 16)
-#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (a) << 16)
-#define NIX_AF_MDQX_CIR(a) (0x1420 | (a) << 16)
-#define NIX_AF_MDQX_PIR(a) (0x1430 | (a) << 16)
-#define NIX_AF_MDQX_PARENT(a) (0x1480 | (a) << 16)
-#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (a) << 16 | (b) << 3)
+#define NIX_AF_SMQX_CFG(a) (0x700 | (u64)(a) << 16)
+#define NIX_AF_TL4X_SDP_LINK_CFG(a) (0xB10 | (u64)(a) << 16)
+#define NIX_AF_TL1X_SCHEDULE(a) (0xC00 | (u64)(a) << 16)
+#define NIX_AF_TL1X_CIR(a) (0xC20 | (u64)(a) << 16)
+#define NIX_AF_TL1X_TOPOLOGY(a) (0xC80 | (u64)(a) << 16)
+#define NIX_AF_TL2X_PARENT(a) (0xE88 | (u64)(a) << 16)
+#define NIX_AF_TL2X_SCHEDULE(a) (0xE00 | (u64)(a) << 16)
+#define NIX_AF_TL2X_TOPOLOGY(a) (0xE80 | (u64)(a) << 16)
+#define NIX_AF_TL2X_CIR(a) (0xE20 | (u64)(a) << 16)
+#define NIX_AF_TL2X_PIR(a) (0xE30 | (u64)(a) << 16)
+#define NIX_AF_TL3X_PARENT(a) (0x1088 | (u64)(a) << 16)
+#define NIX_AF_TL3X_SCHEDULE(a) (0x1000 | (u64)(a) << 16)
+#define NIX_AF_TL3X_SHAPE(a) (0x1010 | (u64)(a) << 16)
+#define NIX_AF_TL3X_CIR(a) (0x1020 | (u64)(a) << 16)
+#define NIX_AF_TL3X_PIR(a) (0x1030 | (u64)(a) << 16)
+#define NIX_AF_TL3X_TOPOLOGY(a) (0x1080 | (u64)(a) << 16)
+#define NIX_AF_TL4X_PARENT(a) (0x1288 | (u64)(a) << 16)
+#define NIX_AF_TL4X_SCHEDULE(a) (0x1200 | (u64)(a) << 16)
+#define NIX_AF_TL4X_SHAPE(a) (0x1210 | (u64)(a) << 16)
+#define NIX_AF_TL4X_CIR(a) (0x1220 | (u64)(a) << 16)
+#define NIX_AF_TL4X_PIR(a) (0x1230 | (u64)(a) << 16)
+#define NIX_AF_TL4X_TOPOLOGY(a) (0x1280 | (u64)(a) << 16)
+#define NIX_AF_MDQX_SCHEDULE(a) (0x1400 | (u64)(a) << 16)
+#define NIX_AF_MDQX_SHAPE(a) (0x1410 | (u64)(a) << 16)
+#define NIX_AF_MDQX_CIR(a) (0x1420 | (u64)(a) << 16)
+#define NIX_AF_MDQX_PIR(a) (0x1430 | (u64)(a) << 16)
+#define NIX_AF_MDQX_PARENT(a) (0x1480 | (u64)(a) << 16)
+#define NIX_AF_TL3_TL2X_LINKX_CFG(a, b) (0x1700 | (u64)(a) << 16 | (b) << 3)
/* LMT LF registers */
#define LMT_LFBASE BIT_ULL(RVU_FUNC_BLKADDR_SHIFT)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index f4655a8c0705..e63cc1eb6d89 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -511,7 +511,15 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
nr_police++;
break;
case FLOW_ACTION_MARK:
+ if (act->mark & ~OTX2_RX_MATCH_ID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack, "Bad flow mark, only 16 bit supported");
+ return -EOPNOTSUPP;
+ }
mark = act->mark;
+ req->match_id = mark & OTX2_RX_MATCH_ID_MASK;
+ req->op = NIX_RX_ACTION_DEFAULT;
+ nic->flags |= OTX2_FLAG_TC_MARK_ENABLED;
+ refcount_inc(&nic->flow_cfg->mark_flows);
break;
case FLOW_ACTION_RX_QUEUE_MAPPING:
@@ -692,10 +700,6 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
u32 val;
flow_rule_match_control(rule, &match);
- if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
- NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later");
- return -EOPNOTSUPP;
- }
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
@@ -713,6 +717,10 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
return -EOPNOTSUPP;
}
}
+
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT,
+ match.mask->flags, extack))
+ return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
@@ -1187,6 +1195,11 @@ static int otx2_tc_del_flow(struct otx2_nic *nic,
return -EINVAL;
}
+ /* Disable TC MARK flag if they are no rules with skbedit mark action */
+ if (flow_node->req.match_id)
+ if (!refcount_dec_and_test(&flow_cfg->mark_flows))
+ nic->flags &= ~OTX2_FLAG_TC_MARK_ENABLED;
+
if (flow_node->is_act_police) {
__clear_bit(flow_node->rq, &nic->rq_bmap);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index f828d32737af..3eb85949677a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -380,6 +380,9 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
if (pfvf->netdev->features & NETIF_F_RXCSUM)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (pfvf->flags & OTX2_FLAG_TC_MARK_ENABLED)
+ skb->mark = parse->match_id;
+
skb_mark_for_recycle(skb);
napi_gro_frags(napi);
@@ -510,7 +513,7 @@ process_cqe:
static void otx2_adjust_adaptive_coalese(struct otx2_nic *pfvf, struct otx2_cq_poll *cq_poll)
{
- struct dim_sample dim_sample;
+ struct dim_sample dim_sample = { 0 };
u64 rx_frames, rx_bytes;
u64 tx_frames, tx_bytes;
@@ -1171,8 +1174,11 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq,
if (skb_shinfo(skb)->gso_size && !is_hw_tso_supported(pfvf, skb)) {
/* Insert vlan tag before giving pkt to tso */
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tag_present(skb)) {
skb = __vlan_hwaccel_push_inside(skb);
+ if (!skb)
+ return true;
+ }
otx2_sq_append_tso(pfvf, sq, skb, qidx);
return true;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index a82ffca8ce1b..3f1d2655ff77 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -62,6 +62,9 @@
#define CQ_OP_STAT_OP_ERR 63
#define CQ_OP_STAT_CQ_ERR 46
+/* Packet mark mask */
+#define OTX2_RX_MATCH_ID_MASK 0x0000ffff
+
struct queue_stats {
u64 bytes;
u64 pkts;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index cf0aa16d7540..99fcc5661674 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -456,7 +456,7 @@ static int otx2vf_change_mtu(struct net_device *netdev, int new_mtu)
netdev_info(netdev, "Changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (if_up)
err = otx2vf_open(netdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
index 1723e9912ae0..0f844c14485a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/qos.c
@@ -153,7 +153,6 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
num_regs++;
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
-
} else if (level == NIX_TXSCH_LVL_TL4) {
otx2_config_sched_shaping(pfvf, node, cfg, &num_regs);
} else if (level == NIX_TXSCH_LVL_TL3) {
@@ -176,7 +175,7 @@ static void __otx2_qos_txschq_cfg(struct otx2_nic *pfvf,
/* check if node is root */
if (node->qid == OTX2_QOS_QID_INNER && !node->parent) {
cfg->reg[num_regs] = NIX_AF_TL2X_SCHEDULE(node->schq);
- cfg->regval[num_regs] = TXSCH_TL1_DFLT_RR_PRIO << 24 |
+ cfg->regval[num_regs] = (u64)hw->txschq_aggr_lvl_rr_prio << 24 |
mtu_to_dwrr_weight(pfvf,
pfvf->tx_max_pktlen);
num_regs++;
@@ -545,6 +544,20 @@ otx2_qos_sw_create_leaf_node(struct otx2_nic *pfvf,
return node;
}
+static struct otx2_qos_node
+*otx2_sw_node_find_by_qid(struct otx2_nic *pfvf, u16 qid)
+{
+ struct otx2_qos_node *node = NULL;
+ int bkt;
+
+ hash_for_each(pfvf->qos.qos_hlist, bkt, node, hlist) {
+ if (node->qid == qid)
+ break;
+ }
+
+ return node;
+}
+
static struct otx2_qos_node *
otx2_sw_node_find(struct otx2_nic *pfvf, u32 classid)
{
@@ -917,6 +930,7 @@ static void otx2_qos_enadis_sq(struct otx2_nic *pfvf,
otx2_qos_disable_sq(pfvf, qid);
pfvf->qos.qid_to_sqmap[qid] = node->schq;
+ otx2_qos_txschq_config(pfvf, node);
otx2_qos_enable_sq(pfvf, qid);
}
@@ -1407,7 +1421,10 @@ static int otx2_qos_leaf_to_inner(struct otx2_nic *pfvf, u16 classid,
otx2_qos_read_txschq_cfg(pfvf, node, old_cfg);
/* delete the txschq nodes allocated for this node */
+ otx2_qos_disable_sq(pfvf, qid);
+ otx2_qos_free_hw_node_schq(pfvf, node);
otx2_qos_free_sw_node_schq(pfvf, node);
+ pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
/* mark this node as htb inner node */
WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER);
@@ -1475,13 +1492,45 @@ out:
return ret;
}
+static int otx2_qos_cur_leaf_nodes(struct otx2_nic *pfvf)
+{
+ int last = find_last_bit(pfvf->qos.qos_sq_bmap, pfvf->hw.tc_tx_queues);
+
+ return last == pfvf->hw.tc_tx_queues ? 0 : last + 1;
+}
+
+static void otx2_reset_qdisc(struct net_device *dev, u16 qid)
+{
+ struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, qid);
+ struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc_sleeping);
+
+ if (!qdisc)
+ return;
+
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
+}
+
+static void otx2_cfg_smq(struct otx2_nic *pfvf, struct otx2_qos_node *node,
+ int qid)
+{
+ struct otx2_qos_node *tmp;
+
+ list_for_each_entry(tmp, &node->child_schq_list, list)
+ if (tmp->level == NIX_TXSCH_LVL_MDQ) {
+ otx2_qos_txschq_config(pfvf, tmp);
+ pfvf->qos.qid_to_sqmap[qid] = tmp->schq;
+ }
+}
+
static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
struct netlink_ext_ack *extack)
{
struct otx2_qos_node *node, *parent;
int dwrr_del_node = false;
+ u16 qid, moved_qid;
u64 prio;
- u16 qid;
netdev_dbg(pfvf->netdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid);
@@ -1517,6 +1566,37 @@ static int otx2_qos_leaf_del(struct otx2_nic *pfvf, u16 *classid,
if (!parent->child_static_cnt)
parent->max_static_prio = 0;
+ moved_qid = otx2_qos_cur_leaf_nodes(pfvf);
+
+ /* last node just deleted */
+ if (moved_qid == 0 || moved_qid == qid)
+ return 0;
+
+ moved_qid--;
+
+ node = otx2_sw_node_find_by_qid(pfvf, moved_qid);
+ if (!node)
+ return 0;
+
+ /* stop traffic to the old queue and disable
+ * SQ associated with it
+ */
+ node->qid = OTX2_QOS_QID_INNER;
+ __clear_bit(moved_qid, pfvf->qos.qos_sq_bmap);
+ otx2_qos_disable_sq(pfvf, moved_qid);
+
+ otx2_reset_qdisc(pfvf->netdev, pfvf->hw.tx_queues + moved_qid);
+
+ /* enable SQ associated with qid and
+ * update the node
+ */
+ otx2_cfg_smq(pfvf, node, qid);
+
+ otx2_qos_enable_sq(pfvf, qid);
+ __set_bit(qid, pfvf->qos.qos_sq_bmap);
+ node->qid = qid;
+
+ *classid = node->classid;
return 0;
}
@@ -1554,6 +1634,7 @@ static int otx2_qos_leaf_del_last(struct otx2_nic *pfvf, u16 classid, bool force
dwrr_del_node = true;
/* destroy the leaf node */
+ otx2_qos_disable_sq(pfvf, qid);
otx2_qos_destroy_node(pfvf, node);
pfvf->qos.qid_to_sqmap[qid] = OTX2_QOS_INVALID_SQ;
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
index 8b9455d8a4f7..418101a93149 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c
@@ -229,6 +229,10 @@ static int prestera_flower_parse(struct prestera_flow_block *block,
flow_rule_match_control(f_rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (flow_rule_match_key(f_rule, FLOW_DISSECTOR_KEY_BASIC)) {
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index fc6f7d2746e8..197198ba61b1 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -419,15 +419,6 @@ struct prestera_msg_vtcam_destroy_req {
__le32 vtcam_id;
};
-struct prestera_msg_vtcam_rule_add_req {
- struct prestera_msg_cmd cmd;
- __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
- __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
- __le32 vtcam_id;
- __le32 prio;
- __le32 n_act;
-};
-
struct prestera_msg_vtcam_rule_del_req {
struct prestera_msg_cmd cmd;
__le32 vtcam_id;
@@ -471,6 +462,16 @@ struct prestera_msg_acl_action {
};
};
+struct prestera_msg_vtcam_rule_add_req {
+ struct prestera_msg_cmd cmd;
+ __le32 key[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 keymask[__PRESTERA_ACL_RULE_MATCH_TYPE_MAX];
+ __le32 vtcam_id;
+ __le32 prio;
+ __le32 n_act;
+ struct prestera_msg_acl_action actions_msg[] __counted_by_le(n_act);
+};
+
struct prestera_msg_counter_req {
struct prestera_msg_cmd cmd;
__le32 client;
@@ -702,12 +703,6 @@ struct prestera_msg_flood_domain_destroy_req {
__le32 flood_domain_idx;
};
-struct prestera_msg_flood_domain_ports_set_req {
- struct prestera_msg_cmd cmd;
- __le32 flood_domain_idx;
- __le32 ports_num;
-};
-
struct prestera_msg_flood_domain_ports_reset_req {
struct prestera_msg_cmd cmd;
__le32 flood_domain_idx;
@@ -725,6 +720,13 @@ struct prestera_msg_flood_domain_port {
__le16 port_type;
};
+struct prestera_msg_flood_domain_ports_set_req {
+ struct prestera_msg_cmd cmd;
+ __le32 flood_domain_idx;
+ __le32 ports_num;
+ struct prestera_msg_flood_domain_port ports[] __counted_by_le(ports_num);
+};
+
struct prestera_msg_mdb_create_req {
struct prestera_msg_cmd cmd;
__le32 flood_domain_idx;
@@ -1371,23 +1373,18 @@ int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
struct prestera_acl_hw_action_info *act,
u8 n_act, u32 *rule_id)
{
- struct prestera_msg_acl_action *actions_msg;
struct prestera_msg_vtcam_rule_add_req *req;
struct prestera_msg_vtcam_resp resp;
- void *buff;
- u32 size;
+ size_t size;
int err;
u8 i;
- size = sizeof(*req) + sizeof(*actions_msg) * n_act;
-
- buff = kzalloc(size, GFP_KERNEL);
- if (!buff)
+ size = struct_size(req, actions_msg, n_act);
+ req = kzalloc(size, GFP_KERNEL);
+ if (!req)
return -ENOMEM;
- req = buff;
req->n_act = __cpu_to_le32(n_act);
- actions_msg = buff + sizeof(*req);
/* put acl matches into the message */
memcpy(req->key, key, sizeof(req->key));
@@ -1395,7 +1392,7 @@ int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
/* put acl actions into the message */
for (i = 0; i < n_act; i++) {
- err = prestera_acl_rule_add_put_action(&actions_msg[i],
+ err = prestera_acl_rule_add_put_action(&req->actions_msg[i],
&act[i]);
if (err)
goto free_buff;
@@ -1411,7 +1408,7 @@ int prestera_hw_vtcam_rule_add(struct prestera_switch *sw,
*rule_id = __le32_to_cpu(resp.rule_id);
free_buff:
- kfree(buff);
+ kfree(req);
return err;
}
@@ -2461,14 +2458,13 @@ int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
{
struct prestera_flood_domain_port *flood_domain_port;
struct prestera_msg_flood_domain_ports_set_req *req;
- struct prestera_msg_flood_domain_port *ports;
struct prestera_switch *sw = domain->sw;
struct prestera_port *port;
u32 ports_num = 0;
- int buf_size;
- void *buff;
+ size_t buf_size;
u16 lag_id;
int err;
+ int i = 0;
list_for_each_entry(flood_domain_port, &domain->flood_domain_port_list,
flood_domain_port_node)
@@ -2477,15 +2473,11 @@ int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
if (!ports_num)
return -EINVAL;
- buf_size = sizeof(*req) + sizeof(*ports) * ports_num;
-
- buff = kmalloc(buf_size, GFP_KERNEL);
- if (!buff)
+ buf_size = struct_size(req, ports, ports_num);
+ req = kmalloc(buf_size, GFP_KERNEL);
+ if (!req)
return -ENOMEM;
- req = buff;
- ports = buff + sizeof(*req);
-
req->flood_domain_idx = __cpu_to_le32(domain->idx);
req->ports_num = __cpu_to_le32(ports_num);
@@ -2494,31 +2486,30 @@ int prestera_hw_flood_domain_ports_set(struct prestera_flood_domain *domain)
if (netif_is_lag_master(flood_domain_port->dev)) {
if (prestera_lag_id(sw, flood_domain_port->dev,
&lag_id)) {
- kfree(buff);
+ kfree(req);
return -EINVAL;
}
- ports->port_type =
+ req->ports[i].port_type =
__cpu_to_le16(PRESTERA_HW_FLOOD_DOMAIN_PORT_TYPE_LAG);
- ports->lag_id = __cpu_to_le16(lag_id);
+ req->ports[i].lag_id = __cpu_to_le16(lag_id);
} else {
port = prestera_port_dev_lower_find(flood_domain_port->dev);
- ports->port_type =
+ req->ports[i].port_type =
__cpu_to_le16(PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT);
- ports->dev_num = __cpu_to_le32(port->dev_id);
- ports->port_num = __cpu_to_le32(port->hw_id);
+ req->ports[i].dev_num = __cpu_to_le32(port->dev_id);
+ req->ports[i].port_num = __cpu_to_le32(port->hw_id);
}
- ports->vid = __cpu_to_le16(flood_domain_port->vid);
-
- ports++;
+ req->ports[i].vid = __cpu_to_le16(flood_domain_port->vid);
+ i++;
}
err = prestera_cmd(sw, PRESTERA_CMD_TYPE_FLOOD_DOMAIN_PORTS_SET,
&req->cmd, buf_size);
- kfree(buff);
+ kfree(req);
return err;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 4fb886c57cd7..63ae01954dfc 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -489,7 +489,7 @@ static int prestera_port_change_mtu(struct net_device *dev, int mtu)
if (err)
return err;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
}
@@ -821,7 +821,7 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
if (port->state_mac.oper) {
if (port->phy_link)
- phylink_mac_change(port->phy_link, true);
+ phylink_pcs_change(&port->phylink_pcs, true);
else
netif_carrier_on(port->dev);
@@ -829,7 +829,7 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
queue_delayed_work(prestera_wq, caching_dw, 0);
} else {
if (port->phy_link)
- phylink_mac_change(port->phy_link, false);
+ phylink_pcs_change(&port->phylink_pcs, false);
else if (netif_running(port->dev) && netif_carrier_ok(port->dev))
netif_carrier_off(port->dev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index cc2a9ae794be..39d9bf82c115 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -96,7 +96,7 @@ struct prestera_sdma {
struct dma_pool *desc_pool;
struct work_struct tx_work;
struct napi_struct rx_napi;
- struct net_device napi_dev;
+ struct net_device *napi_dev;
u32 map_addr;
u64 dma_mask;
/* protect SDMA with concurrent access from multiple CPUs */
@@ -654,13 +654,21 @@ static int prestera_sdma_switch_init(struct prestera_switch *sw)
if (err)
goto err_evt_register;
- init_dummy_netdev(&sdma->napi_dev);
+ sdma->napi_dev = alloc_netdev_dummy(0);
+ if (!sdma->napi_dev) {
+ dev_err(dev, "not able to initialize dummy device\n");
+ err = -ENOMEM;
+ goto err_alloc_dummy;
+ }
- netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
+ netif_napi_add(sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
napi_enable(&sdma->rx_napi);
return 0;
+err_alloc_dummy:
+ prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
+ prestera_rxtx_handle_event);
err_evt_register:
err_tx_init:
prestera_sdma_tx_fini(sdma);
@@ -677,6 +685,7 @@ static void prestera_sdma_switch_fini(struct prestera_switch *sw)
napi_disable(&sdma->rx_napi);
netif_napi_del(&sdma->rx_napi);
+ free_netdev(sdma->napi_dev);
prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
prestera_rxtx_handle_event);
prestera_sdma_tx_fini(sdma);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index dd6ca2e4fd51..1a59c952aa01 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1188,7 +1188,7 @@ static int pxa168_eth_change_mtu(struct net_device *dev, int mtu)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
set_port_config_ext(pep);
if (!netif_running(dev))
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 1b43704baceb..fcfb34561882 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -2905,13 +2905,13 @@ static int skge_change_mtu(struct net_device *dev, int new_mtu)
int err;
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
skge_down(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = skge_up(dev);
if (err)
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 07720841a8d7..a7a16eac1891 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -34,6 +34,7 @@
#include <linux/mii.h>
#include <linux/of_net.h>
#include <linux/dmi.h>
+#include <linux/skbuff_ref.h>
#include <asm/irq.h>
@@ -2383,7 +2384,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
u32 imask;
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
return 0;
}
@@ -2406,7 +2407,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_rx_stop(sky2);
sky2_rx_clean(sky2);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA;
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
index da0db417ab69..95c4405b7d7b 100644
--- a/drivers/net/ethernet/mediatek/Kconfig
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -1,12 +1,20 @@
# SPDX-License-Identifier: GPL-2.0-only
config NET_VENDOR_MEDIATEK
bool "MediaTek devices"
- depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
+ depends on ARCH_MEDIATEK || ARCH_AIROHA || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
help
If you have a Mediatek SoC with ethernet, say Y.
if NET_VENDOR_MEDIATEK
+config NET_AIROHA
+ tristate "Airoha SoC Gigabit Ethernet support"
+ depends on NET_DSA || !NET_DSA
+ select PAGE_POOL
+ help
+ This driver supports the gigabit ethernet MACs in the
+ Airoha SoC family.
+
config NET_MEDIATEK_SOC_WED
depends on ARCH_MEDIATEK || COMPILE_TEST
def_bool NET_MEDIATEK_SOC != n
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
index 03e008fbc859..ddbb7f4a516c 100644
--- a/drivers/net/ethernet/mediatek/Makefile
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -11,3 +11,4 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
endif
obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
+obj-$(CONFIG_NET_AIROHA) += airoha_eth.o
diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c
new file mode 100644
index 000000000000..1c5b85a86df1
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/airoha_eth.c
@@ -0,0 +1,2731 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024 AIROHA Inc
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ */
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/tcp.h>
+#include <linux/u64_stats_sync.h>
+#include <net/dsa.h>
+#include <net/page_pool/helpers.h>
+#include <uapi/linux/ppp_defs.h>
+
+#define AIROHA_MAX_NUM_GDM_PORTS 1
+#define AIROHA_MAX_NUM_RSTS 3
+#define AIROHA_MAX_NUM_XSI_RSTS 5
+#define AIROHA_MAX_MTU 2000
+#define AIROHA_MAX_PACKET_SIZE 2048
+#define AIROHA_NUM_TX_RING 32
+#define AIROHA_NUM_RX_RING 32
+#define AIROHA_FE_MC_MAX_VLAN_TABLE 64
+#define AIROHA_FE_MC_MAX_VLAN_PORT 16
+#define AIROHA_NUM_TX_IRQ 2
+#define HW_DSCP_NUM 2048
+#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048)
+#define TX_DSCP_NUM 1024
+#define RX_DSCP_NUM(_n) \
+ ((_n) == 2 ? 128 : \
+ (_n) == 11 ? 128 : \
+ (_n) == 15 ? 128 : \
+ (_n) == 0 ? 1024 : 16)
+
+#define PSE_RSV_PAGES 128
+#define PSE_QUEUE_RSV_PAGES 64
+
+/* FE */
+#define PSE_BASE 0x0100
+#define CSR_IFC_BASE 0x0200
+#define CDM1_BASE 0x0400
+#define GDM1_BASE 0x0500
+#define PPE1_BASE 0x0c00
+
+#define CDM2_BASE 0x1400
+#define GDM2_BASE 0x1500
+
+#define GDM3_BASE 0x1100
+#define GDM4_BASE 0x2500
+
+#define GDM_BASE(_n) \
+ ((_n) == 4 ? GDM4_BASE : \
+ (_n) == 3 ? GDM3_BASE : \
+ (_n) == 2 ? GDM2_BASE : GDM1_BASE)
+
+#define REG_FE_DMA_GLO_CFG 0x0000
+#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4)
+#define FE_DMA_GLO_PG_SZ_MASK BIT(3)
+
+#define REG_FE_RST_GLO_CFG 0x0004
+#define FE_RST_GDM4_MBI_ARB_MASK BIT(3)
+#define FE_RST_GDM3_MBI_ARB_MASK BIT(2)
+#define FE_RST_CORE_MASK BIT(0)
+
+#define REG_FE_LAN_MAC_H 0x0040
+#define REG_FE_LAN_MAC_LMIN 0x0044
+#define REG_FE_LAN_MAC_LMAX 0x0048
+
+#define REG_FE_CDM1_OQ_MAP0 0x0050
+#define REG_FE_CDM1_OQ_MAP1 0x0054
+#define REG_FE_CDM1_OQ_MAP2 0x0058
+#define REG_FE_CDM1_OQ_MAP3 0x005c
+
+#define REG_FE_PCE_CFG 0x0070
+#define PCE_DPI_EN_MASK BIT(2)
+#define PCE_KA_EN_MASK BIT(1)
+#define PCE_MC_EN_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_WR 0x0080
+#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24)
+#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16)
+#define PSE_CFG_WR_EN_MASK BIT(8)
+#define PSE_CFG_OQRSV_SEL_MASK BIT(0)
+
+#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084
+#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0)
+
+#define PSE_FQ_CFG 0x008c
+#define PSE_FQ_LIMIT_MASK GENMASK(14, 0)
+
+#define REG_FE_PSE_BUF_SET 0x0090
+#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16)
+#define PSE_ALLRSV_MASK GENMASK(14, 0)
+
+#define REG_PSE_SHARE_USED_THD 0x0094
+#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16)
+#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0)
+
+#define REG_GDM_MISC_CFG 0x0148
+#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9)
+#define GDM2_CHN_VLD_MODE_MASK BIT(5)
+
+#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE
+#define FE_IFC_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PORT_EN 0x01f0
+#define REG_FE_IFC_PORT_EN 0x01f4
+
+#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08)
+#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16)
+
+#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c)
+#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8)
+#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0)
+
+#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3))
+#define PATN_FCPU_EN_MASK BIT(7)
+#define PATN_SWP_EN_MASK BIT(6)
+#define PATN_DP_EN_MASK BIT(5)
+#define PATN_SP_EN_MASK BIT(4)
+#define PATN_TYPE_MASK GENMASK(3, 1)
+#define PATN_EN_MASK BIT(0)
+
+#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3))
+#define PATN_DP_MASK GENMASK(31, 16)
+#define PATN_SP_MASK GENMASK(15, 0)
+
+#define REG_CDM1_VLAN_CTRL CDM1_BASE
+#define CDM1_VLAN_MASK GENMASK(31, 16)
+
+#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08)
+#define CDM1_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2))
+#define CDM1_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08)
+#define CDM2_OAM_QSEL_MASK GENMASK(31, 27)
+#define CDM2_VIP_QSEL_MASK GENMASK(24, 20)
+
+#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2))
+#define CDM2_CRSN_QSEL_REASON_MASK(_n) \
+ GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3))
+
+#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n)
+#define GDM_DROP_CRC_ERR BIT(23)
+#define GDM_IP4_CKSUM BIT(22)
+#define GDM_TCP_CKSUM BIT(21)
+#define GDM_UDP_CKSUM BIT(20)
+#define GDM_UCFQ_MASK GENMASK(15, 12)
+#define GDM_BCFQ_MASK GENMASK(11, 8)
+#define GDM_MCFQ_MASK GENMASK(7, 4)
+#define GDM_OCFQ_MASK GENMASK(3, 0)
+
+#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10)
+#define GDM_INGRESS_FC_EN_MASK BIT(1)
+#define GDM_STAG_EN_MASK BIT(0)
+
+#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
+#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
+#define GDM_LONG_LEN_MASK GENMASK(29, 16)
+
+#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
+#define FE_CPORT_PAD BIT(26)
+#define FE_CPORT_PORT_XFC_MASK BIT(25)
+#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
+
+#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
+#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
+#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0)
+
+#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4)
+#define FE_STRICT_RFC2819_MODE_MASK BIT(31)
+#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17)
+#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16)
+#define FE_TX_MIB_ID_MASK GENMASK(15, 8)
+#define FE_RX_MIB_ID_MASK GENMASK(7, 0)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114)
+#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118)
+#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c)
+#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120)
+#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124)
+#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128)
+#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c)
+#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130)
+#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134)
+#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138)
+#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148)
+#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c)
+#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150)
+#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154)
+#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164)
+#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168)
+#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c)
+#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170)
+#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174)
+#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178)
+#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c)
+#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180)
+#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184)
+#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188)
+#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c)
+#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190)
+#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194)
+#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c)
+
+#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250)
+#define PPE1_SRAM_TABLE_EN_MASK BIT(0)
+#define PPE1_SRAM_HASH1_EN_MASK BIT(8)
+#define PPE1_DRAM_TABLE_EN_MASK BIT(16)
+#define PPE1_DRAM_HASH1_EN_MASK BIT(24)
+
+#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280)
+#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284)
+#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288)
+#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c)
+
+#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290)
+#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294)
+#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298)
+#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c)
+#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8)
+#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc)
+#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0)
+#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4)
+#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8)
+#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc)
+#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8)
+#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec)
+#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0)
+#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4)
+#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8)
+#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc)
+
+#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20)
+#define MBI_RX_AGE_SEL_MASK GENMASK(26, 25)
+#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17)
+
+#define REG_GDM3_FWD_CFG GDM3_BASE
+#define GDM3_PAD_EN_MASK BIT(28)
+
+#define REG_GDM4_FWD_CFG (GDM4_BASE + 0x100)
+#define GDM4_PAD_EN_MASK BIT(28)
+#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8)
+
+#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x33c)
+#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16)
+#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12)
+#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8)
+
+#define REG_IP_FRAG_FP 0x2010
+#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21)
+#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16)
+#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5)
+#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0)
+
+#define REG_MC_VLAN_EN 0x2100
+#define MC_VLAN_EN_MASK BIT(0)
+
+#define REG_MC_VLAN_CFG 0x2104
+#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31)
+#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16)
+#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8)
+#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4)
+#define MC_VLAN_CFG_RW_MASK BIT(0)
+
+#define REG_MC_VLAN_DATA 0x2108
+
+#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4
+
+/* QDMA */
+#define REG_QDMA_GLOBAL_CFG 0x0004
+#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31)
+#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29)
+#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28)
+#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27)
+#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26)
+#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25)
+#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24)
+#define GLOBAL_CFG_RESET_MASK BIT(23)
+#define GLOBAL_CFG_RESET_DONE_MASK BIT(22)
+#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21)
+#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20)
+#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19)
+#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18)
+#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17)
+#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16)
+#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8)
+#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7)
+#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6)
+#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4)
+#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3)
+#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2)
+#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1)
+#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0)
+
+#define REG_FWD_DSCP_BASE 0x0010
+#define REG_FWD_BUF_BASE 0x0014
+
+#define REG_HW_FWD_DSCP_CFG 0x0018
+#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28)
+#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16)
+#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0)
+
+#define REG_INT_STATUS(_n) \
+ (((_n) == 4) ? 0x0730 : \
+ ((_n) == 3) ? 0x0724 : \
+ ((_n) == 2) ? 0x0720 : \
+ ((_n) == 1) ? 0x0024 : 0x0020)
+
+#define REG_INT_ENABLE(_n) \
+ (((_n) == 4) ? 0x0750 : \
+ ((_n) == 3) ? 0x0744 : \
+ ((_n) == 2) ? 0x0740 : \
+ ((_n) == 1) ? 0x002c : 0x0028)
+
+/* QDMA_CSR_INT_ENABLE1 */
+#define RX15_COHERENT_INT_MASK BIT(31)
+#define RX14_COHERENT_INT_MASK BIT(30)
+#define RX13_COHERENT_INT_MASK BIT(29)
+#define RX12_COHERENT_INT_MASK BIT(28)
+#define RX11_COHERENT_INT_MASK BIT(27)
+#define RX10_COHERENT_INT_MASK BIT(26)
+#define RX9_COHERENT_INT_MASK BIT(25)
+#define RX8_COHERENT_INT_MASK BIT(24)
+#define RX7_COHERENT_INT_MASK BIT(23)
+#define RX6_COHERENT_INT_MASK BIT(22)
+#define RX5_COHERENT_INT_MASK BIT(21)
+#define RX4_COHERENT_INT_MASK BIT(20)
+#define RX3_COHERENT_INT_MASK BIT(19)
+#define RX2_COHERENT_INT_MASK BIT(18)
+#define RX1_COHERENT_INT_MASK BIT(17)
+#define RX0_COHERENT_INT_MASK BIT(16)
+#define TX7_COHERENT_INT_MASK BIT(15)
+#define TX6_COHERENT_INT_MASK BIT(14)
+#define TX5_COHERENT_INT_MASK BIT(13)
+#define TX4_COHERENT_INT_MASK BIT(12)
+#define TX3_COHERENT_INT_MASK BIT(11)
+#define TX2_COHERENT_INT_MASK BIT(10)
+#define TX1_COHERENT_INT_MASK BIT(9)
+#define TX0_COHERENT_INT_MASK BIT(8)
+#define CNT_OVER_FLOW_INT_MASK BIT(7)
+#define IRQ1_FULL_INT_MASK BIT(5)
+#define IRQ1_INT_MASK BIT(4)
+#define HWFWD_DSCP_LOW_INT_MASK BIT(3)
+#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2)
+#define IRQ0_FULL_INT_MASK BIT(1)
+#define IRQ0_INT_MASK BIT(0)
+
+#define TX_DONE_INT_MASK(_n) \
+ ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
+ : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_TX_MASK \
+ (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
+ IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
+
+#define INT_IDX0_MASK \
+ (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \
+ TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \
+ TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \
+ TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \
+ RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \
+ RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \
+ RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \
+ RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \
+ RX15_COHERENT_INT_MASK | INT_TX_MASK)
+
+/* QDMA_CSR_INT_ENABLE2 */
+#define RX15_NO_CPU_DSCP_INT_MASK BIT(31)
+#define RX14_NO_CPU_DSCP_INT_MASK BIT(30)
+#define RX13_NO_CPU_DSCP_INT_MASK BIT(29)
+#define RX12_NO_CPU_DSCP_INT_MASK BIT(28)
+#define RX11_NO_CPU_DSCP_INT_MASK BIT(27)
+#define RX10_NO_CPU_DSCP_INT_MASK BIT(26)
+#define RX9_NO_CPU_DSCP_INT_MASK BIT(25)
+#define RX8_NO_CPU_DSCP_INT_MASK BIT(24)
+#define RX7_NO_CPU_DSCP_INT_MASK BIT(23)
+#define RX6_NO_CPU_DSCP_INT_MASK BIT(22)
+#define RX5_NO_CPU_DSCP_INT_MASK BIT(21)
+#define RX4_NO_CPU_DSCP_INT_MASK BIT(20)
+#define RX3_NO_CPU_DSCP_INT_MASK BIT(19)
+#define RX2_NO_CPU_DSCP_INT_MASK BIT(18)
+#define RX1_NO_CPU_DSCP_INT_MASK BIT(17)
+#define RX0_NO_CPU_DSCP_INT_MASK BIT(16)
+#define RX15_DONE_INT_MASK BIT(15)
+#define RX14_DONE_INT_MASK BIT(14)
+#define RX13_DONE_INT_MASK BIT(13)
+#define RX12_DONE_INT_MASK BIT(12)
+#define RX11_DONE_INT_MASK BIT(11)
+#define RX10_DONE_INT_MASK BIT(10)
+#define RX9_DONE_INT_MASK BIT(9)
+#define RX8_DONE_INT_MASK BIT(8)
+#define RX7_DONE_INT_MASK BIT(7)
+#define RX6_DONE_INT_MASK BIT(6)
+#define RX5_DONE_INT_MASK BIT(5)
+#define RX4_DONE_INT_MASK BIT(4)
+#define RX3_DONE_INT_MASK BIT(3)
+#define RX2_DONE_INT_MASK BIT(2)
+#define RX1_DONE_INT_MASK BIT(1)
+#define RX0_DONE_INT_MASK BIT(0)
+
+#define RX_DONE_INT_MASK \
+ (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \
+ RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \
+ RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \
+ RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \
+ RX15_DONE_INT_MASK)
+#define INT_IDX1_MASK \
+ (RX_DONE_INT_MASK | \
+ RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \
+ RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \
+ RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \
+ RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \
+ RX15_NO_CPU_DSCP_INT_MASK)
+
+/* QDMA_CSR_INT_ENABLE5 */
+#define TX31_COHERENT_INT_MASK BIT(31)
+#define TX30_COHERENT_INT_MASK BIT(30)
+#define TX29_COHERENT_INT_MASK BIT(29)
+#define TX28_COHERENT_INT_MASK BIT(28)
+#define TX27_COHERENT_INT_MASK BIT(27)
+#define TX26_COHERENT_INT_MASK BIT(26)
+#define TX25_COHERENT_INT_MASK BIT(25)
+#define TX24_COHERENT_INT_MASK BIT(24)
+#define TX23_COHERENT_INT_MASK BIT(23)
+#define TX22_COHERENT_INT_MASK BIT(22)
+#define TX21_COHERENT_INT_MASK BIT(21)
+#define TX20_COHERENT_INT_MASK BIT(20)
+#define TX19_COHERENT_INT_MASK BIT(19)
+#define TX18_COHERENT_INT_MASK BIT(18)
+#define TX17_COHERENT_INT_MASK BIT(17)
+#define TX16_COHERENT_INT_MASK BIT(16)
+#define TX15_COHERENT_INT_MASK BIT(15)
+#define TX14_COHERENT_INT_MASK BIT(14)
+#define TX13_COHERENT_INT_MASK BIT(13)
+#define TX12_COHERENT_INT_MASK BIT(12)
+#define TX11_COHERENT_INT_MASK BIT(11)
+#define TX10_COHERENT_INT_MASK BIT(10)
+#define TX9_COHERENT_INT_MASK BIT(9)
+#define TX8_COHERENT_INT_MASK BIT(8)
+
+#define INT_IDX4_MASK \
+ (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \
+ TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \
+ TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \
+ TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \
+ TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \
+ TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \
+ TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \
+ TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \
+ TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \
+ TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \
+ TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \
+ TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK)
+
+#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050)
+
+#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054)
+#define TX_IRQ_THR_MASK GENMASK(27, 16)
+#define TX_IRQ_DEPTH_MASK GENMASK(11, 0)
+
+#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058)
+#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0)
+
+#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c)
+#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16)
+#define IRQ_HEAD_IDX_MASK GENMASK(11, 0)
+
+#define REG_TX_RING_BASE(_n) \
+ (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5))
+
+#define REG_TX_RING_BLOCKING(_n) \
+ (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5))
+
+#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6)
+#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4)
+#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2)
+#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1)
+#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0)
+
+#define REG_TX_CPU_IDX(_n) \
+ (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5))
+
+#define TX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_TX_DMA_IDX(_n) \
+ (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5))
+
+#define TX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define IRQ_RING_IDX_MASK GENMASK(20, 16)
+#define IRQ_DESC_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_RING_BASE(_n) \
+ (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5))
+
+#define REG_RX_RING_SIZE(_n) \
+ (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5))
+
+#define RX_RING_THR_MASK GENMASK(31, 16)
+#define RX_RING_SIZE_MASK GENMASK(15, 0)
+
+#define REG_RX_CPU_IDX(_n) \
+ (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5))
+
+#define RX_RING_CPU_IDX_MASK GENMASK(15, 0)
+
+#define REG_RX_DMA_IDX(_n) \
+ (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5))
+
+#define REG_RX_DELAY_INT_IDX(_n) \
+ (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5))
+
+#define RX_DELAY_INT_MASK GENMASK(15, 0)
+
+#define RX_RING_DMA_IDX_MASK GENMASK(15, 0)
+
+#define REG_INGRESS_TRTCM_CFG 0x0070
+#define INGRESS_TRTCM_EN_MASK BIT(31)
+#define INGRESS_TRTCM_MODE_MASK BIT(30)
+#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define INGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0)
+#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2)
+
+#define REG_LMGR_INIT_CFG 0x1000
+#define LMGR_INIT_START BIT(31)
+#define LMGR_SRAM_MODE_MASK BIT(30)
+#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20)
+#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0)
+
+#define REG_FWD_DSCP_LOW_THR 0x1004
+#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0)
+
+#define REG_EGRESS_RATE_METER_CFG 0x100c
+#define EGRESS_RATE_METER_EN_MASK BIT(29)
+#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17)
+#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12)
+#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0)
+
+#define REG_EGRESS_TRTCM_CFG 0x1010
+#define EGRESS_TRTCM_EN_MASK BIT(31)
+#define EGRESS_TRTCM_MODE_MASK BIT(30)
+#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define EGRESS_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXWRR_MODE_CFG 0x1020
+#define TWRR_WEIGHT_SCALE_MASK BIT(31)
+#define TWRR_WEIGHT_BASE_MASK BIT(3)
+
+#define REG_PSE_BUF_USAGE_CFG 0x1028
+#define PSE_BUF_ESTIMATE_EN_MASK BIT(29)
+
+#define REG_GLB_TRTCM_CFG 0x1080
+#define GLB_TRTCM_EN_MASK BIT(31)
+#define GLB_TRTCM_MODE_MASK BIT(30)
+#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define GLB_FAST_TICK_MASK GENMASK(15, 0)
+
+#define REG_TXQ_CNGST_CFG 0x10a0
+#define TXQ_CNGST_DROP_EN BIT(31)
+#define TXQ_CNGST_DEI_DROP_EN BIT(30)
+
+#define REG_SLA_TRTCM_CFG 0x1150
+#define SLA_TRTCM_EN_MASK BIT(31)
+#define SLA_TRTCM_MODE_MASK BIT(30)
+#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16)
+#define SLA_FAST_TICK_MASK GENMASK(15, 0)
+
+/* CTRL */
+#define QDMA_DESC_DONE_MASK BIT(31)
+#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */
+#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */
+#define QDMA_DESC_DEI_MASK BIT(25)
+#define QDMA_DESC_NO_DROP_MASK BIT(24)
+#define QDMA_DESC_LEN_MASK GENMASK(15, 0)
+/* DATA */
+#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0)
+/* TX MSG0 */
+#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30)
+#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14)
+#define QDMA_ETH_TXMSG_ICO_MASK BIT(13)
+#define QDMA_ETH_TXMSG_UCO_MASK BIT(12)
+#define QDMA_ETH_TXMSG_TCO_MASK BIT(11)
+#define QDMA_ETH_TXMSG_TSO_MASK BIT(10)
+#define QDMA_ETH_TXMSG_FAST_MASK BIT(9)
+#define QDMA_ETH_TXMSG_OAM_MASK BIT(8)
+#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3)
+#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0)
+/* TX MSG1 */
+#define QDMA_ETH_TXMSG_NO_DROP BIT(31)
+#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */
+#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20)
+#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15)
+#define QDMA_ETH_TXMSG_HWF_MASK BIT(14)
+#define QDMA_ETH_TXMSG_HOP_MASK BIT(13)
+#define QDMA_ETH_TXMSG_PTP_MASK BIT(12)
+#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */
+#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */
+
+/* RX MSG1 */
+#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
+#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
+#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
+#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
+#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
+#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
+#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
+#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
+#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
+
+struct airoha_qdma_desc {
+ __le32 rsv;
+ __le32 ctrl;
+ __le32 addr;
+ __le32 data;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 msg2;
+ __le32 msg3;
+};
+
+/* CTRL0 */
+#define QDMA_FWD_DESC_CTX_MASK BIT(31)
+#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28)
+#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16)
+#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0)
+/* CTRL1 */
+#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0)
+/* CTRL2 */
+#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0)
+
+struct airoha_qdma_fwd_desc {
+ __le32 addr;
+ __le32 ctrl0;
+ __le32 ctrl1;
+ __le32 ctrl2;
+ __le32 msg0;
+ __le32 msg1;
+ __le32 rsv0;
+ __le32 rsv1;
+};
+
+enum {
+ QDMA_INT_REG_IDX0,
+ QDMA_INT_REG_IDX1,
+ QDMA_INT_REG_IDX2,
+ QDMA_INT_REG_IDX3,
+ QDMA_INT_REG_IDX4,
+ QDMA_INT_REG_MAX
+};
+
+enum {
+ XSI_PCIE0_PORT,
+ XSI_PCIE1_PORT,
+ XSI_USB_PORT,
+ XSI_AE_PORT,
+ XSI_ETH_PORT,
+};
+
+enum {
+ XSI_PCIE0_VIP_PORT_MASK = BIT(22),
+ XSI_PCIE1_VIP_PORT_MASK = BIT(23),
+ XSI_USB_VIP_PORT_MASK = BIT(25),
+ XSI_ETH_VIP_PORT_MASK = BIT(24),
+};
+
+enum {
+ DEV_STATE_INITIALIZED,
+};
+
+enum {
+ CDM_CRSN_QSEL_Q1 = 1,
+ CDM_CRSN_QSEL_Q5 = 5,
+ CDM_CRSN_QSEL_Q6 = 6,
+ CDM_CRSN_QSEL_Q15 = 15,
+};
+
+enum {
+ CRSN_08 = 0x8,
+ CRSN_21 = 0x15, /* KA */
+ CRSN_22 = 0x16, /* hit bind and force route to CPU */
+ CRSN_24 = 0x18,
+ CRSN_25 = 0x19,
+};
+
+enum {
+ FE_PSE_PORT_CDM1,
+ FE_PSE_PORT_GDM1,
+ FE_PSE_PORT_GDM2,
+ FE_PSE_PORT_GDM3,
+ FE_PSE_PORT_PPE1,
+ FE_PSE_PORT_CDM2,
+ FE_PSE_PORT_CDM3,
+ FE_PSE_PORT_CDM4,
+ FE_PSE_PORT_PPE2,
+ FE_PSE_PORT_GDM4,
+ FE_PSE_PORT_CDM5,
+ FE_PSE_PORT_DROP = 0xf,
+};
+
+struct airoha_queue_entry {
+ union {
+ void *buf;
+ struct sk_buff *skb;
+ };
+ dma_addr_t dma_addr;
+ u16 dma_len;
+};
+
+struct airoha_queue {
+ struct airoha_eth *eth;
+
+ /* protect concurrent queue accesses */
+ spinlock_t lock;
+ struct airoha_queue_entry *entry;
+ struct airoha_qdma_desc *desc;
+ u16 head;
+ u16 tail;
+
+ int queued;
+ int ndesc;
+ int free_thr;
+ int buf_size;
+
+ struct napi_struct napi;
+ struct page_pool *page_pool;
+};
+
+struct airoha_tx_irq_queue {
+ struct airoha_eth *eth;
+
+ struct napi_struct napi;
+ u32 *q;
+
+ int size;
+ int queued;
+ u16 head;
+};
+
+struct airoha_hw_stats {
+ /* protect concurrent hw_stats accesses */
+ spinlock_t lock;
+ struct u64_stats_sync syncp;
+
+ /* get_stats64 */
+ u64 rx_ok_pkts;
+ u64 tx_ok_pkts;
+ u64 rx_ok_bytes;
+ u64 tx_ok_bytes;
+ u64 rx_multicast;
+ u64 rx_errors;
+ u64 rx_drops;
+ u64 tx_drops;
+ u64 rx_crc_error;
+ u64 rx_over_errors;
+ /* ethtool stats */
+ u64 tx_broadcast;
+ u64 tx_multicast;
+ u64 tx_len[7];
+ u64 rx_broadcast;
+ u64 rx_fragment;
+ u64 rx_jabber;
+ u64 rx_len[7];
+};
+
+struct airoha_gdm_port {
+ struct net_device *dev;
+ struct airoha_eth *eth;
+ int id;
+
+ struct airoha_hw_stats stats;
+};
+
+struct airoha_eth {
+ struct device *dev;
+
+ unsigned long state;
+
+ void __iomem *qdma_regs;
+ void __iomem *fe_regs;
+
+ /* protect concurrent irqmask accesses */
+ spinlock_t irq_lock;
+ u32 irqmask[QDMA_INT_REG_MAX];
+ int irq;
+
+ struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS];
+ struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS];
+
+ struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS];
+
+ struct net_device *napi_dev;
+ struct airoha_queue q_tx[AIROHA_NUM_TX_RING];
+ struct airoha_queue q_rx[AIROHA_NUM_RX_RING];
+
+ struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
+
+ /* descriptor and packet buffers for qdma hw forward */
+ struct {
+ void *desc;
+ void *q;
+ } hfwd;
+};
+
+static u32 airoha_rr(void __iomem *base, u32 offset)
+{
+ return readl(base + offset);
+}
+
+static void airoha_wr(void __iomem *base, u32 offset, u32 val)
+{
+ writel(val, base + offset);
+}
+
+static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
+{
+ val |= (airoha_rr(base, offset) & ~mask);
+ airoha_wr(base, offset, val);
+
+ return val;
+}
+
+#define airoha_fe_rr(eth, offset) \
+ airoha_rr((eth)->fe_regs, (offset))
+#define airoha_fe_wr(eth, offset, val) \
+ airoha_wr((eth)->fe_regs, (offset), (val))
+#define airoha_fe_rmw(eth, offset, mask, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (mask), (val))
+#define airoha_fe_set(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), 0, (val))
+#define airoha_fe_clear(eth, offset, val) \
+ airoha_rmw((eth)->fe_regs, (offset), (val), 0)
+
+#define airoha_qdma_rr(eth, offset) \
+ airoha_rr((eth)->qdma_regs, (offset))
+#define airoha_qdma_wr(eth, offset, val) \
+ airoha_wr((eth)->qdma_regs, (offset), (val))
+#define airoha_qdma_rmw(eth, offset, mask, val) \
+ airoha_rmw((eth)->qdma_regs, (offset), (mask), (val))
+#define airoha_qdma_set(eth, offset, val) \
+ airoha_rmw((eth)->qdma_regs, (offset), 0, (val))
+#define airoha_qdma_clear(eth, offset, val) \
+ airoha_rmw((eth)->qdma_regs, (offset), (val), 0)
+
+static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index,
+ u32 clear, u32 set)
+{
+ unsigned long flags;
+
+ if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask)))
+ return;
+
+ spin_lock_irqsave(&eth->irq_lock, flags);
+
+ eth->irqmask[index] &= ~clear;
+ eth->irqmask[index] |= set;
+ airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]);
+ /* Read irq_enable register in order to guarantee the update above
+ * completes in the spinlock critical section.
+ */
+ airoha_qdma_rr(eth, REG_INT_ENABLE(index));
+
+ spin_unlock_irqrestore(&eth->irq_lock, flags);
+}
+
+static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index,
+ u32 mask)
+{
+ airoha_qdma_set_irqmask(eth, index, 0, mask);
+}
+
+static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index,
+ u32 mask)
+{
+ airoha_qdma_set_irqmask(eth, index, mask, 0);
+}
+
+static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr)
+{
+ u32 val;
+
+ val = (addr[0] << 16) | (addr[1] << 8) | addr[2];
+ airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val);
+
+ val = (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val);
+ airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val);
+}
+
+static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr,
+ u32 val)
+{
+ airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK,
+ FIELD_PREP(GDM_OCFQ_MASK, val));
+ airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK,
+ FIELD_PREP(GDM_MCFQ_MASK, val));
+ airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK,
+ FIELD_PREP(GDM_BCFQ_MASK, val));
+ airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK,
+ FIELD_PREP(GDM_UCFQ_MASK, val));
+}
+
+static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable)
+{
+ u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP;
+ u32 vip_port, cfg_addr;
+
+ switch (port) {
+ case XSI_PCIE0_PORT:
+ vip_port = XSI_PCIE0_VIP_PORT_MASK;
+ cfg_addr = REG_GDM_FWD_CFG(3);
+ break;
+ case XSI_PCIE1_PORT:
+ vip_port = XSI_PCIE1_VIP_PORT_MASK;
+ cfg_addr = REG_GDM_FWD_CFG(3);
+ break;
+ case XSI_USB_PORT:
+ vip_port = XSI_USB_VIP_PORT_MASK;
+ cfg_addr = REG_GDM_FWD_CFG(4);
+ break;
+ case XSI_ETH_PORT:
+ vip_port = XSI_ETH_VIP_PORT_MASK;
+ cfg_addr = REG_GDM_FWD_CFG(4);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (enable) {
+ airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port);
+ airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port);
+ } else {
+ airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port);
+ airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port);
+ }
+
+ airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val);
+
+ return 0;
+}
+
+static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable)
+{
+ const int port_list[] = {
+ XSI_PCIE0_PORT,
+ XSI_PCIE1_PORT,
+ XSI_USB_PORT,
+ XSI_ETH_PORT
+ };
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(port_list); i++) {
+ err = airoha_set_gdm_port(eth, port_list[i], enable);
+ if (err)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ for (i--; i >= 0; i--)
+ airoha_set_gdm_port(eth, port_list[i], false);
+
+ return err;
+}
+
+static void airoha_fe_maccr_init(struct airoha_eth *eth)
+{
+ int p;
+
+ for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) {
+ airoha_fe_set(eth, REG_GDM_FWD_CFG(p),
+ GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM |
+ GDM_DROP_CRC_ERR);
+ airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p),
+ FE_PSE_PORT_CDM1);
+ airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p),
+ GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
+ FIELD_PREP(GDM_SHORT_LEN_MASK, 60) |
+ FIELD_PREP(GDM_LONG_LEN_MASK, 4004));
+ }
+
+ airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK,
+ FIELD_PREP(CDM1_VLAN_MASK, 0x8100));
+
+ airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD);
+}
+
+static void airoha_fe_vip_setup(struct airoha_eth *eth)
+{
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(4),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(6),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(7),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ /* BOOTP (0x43) */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(8),
+ PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ /* BOOTP (0x44) */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(9),
+ PATN_FCPU_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ /* ISAKMP */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(10),
+ PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(11),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ /* DHCPv6 */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(12),
+ PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK |
+ FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(19),
+ PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) |
+ PATN_EN_MASK);
+
+ /* ETH->ETH_P_1905 (0x893a) */
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(20),
+ PATN_FCPU_EN_MASK | PATN_EN_MASK);
+
+ airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP);
+ airoha_fe_wr(eth, REG_FE_VIP_EN(21),
+ PATN_FCPU_EN_MASK | PATN_EN_MASK);
+}
+
+static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth,
+ u32 port, u32 queue)
+{
+ u32 val;
+
+ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
+ PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK,
+ FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
+ FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue));
+ val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL);
+
+ return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val);
+}
+
+static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth,
+ u32 port, u32 queue, u32 val)
+{
+ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK,
+ FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val));
+ airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR,
+ PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK |
+ PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK,
+ FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) |
+ FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) |
+ PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK);
+}
+
+static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
+ u32 port, u32 queue, u32 val)
+{
+ u32 orig_val, tmp, all_rsv, fq_limit;
+
+ airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val);
+
+ /* modify all rsv */
+ orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue);
+ tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET);
+ all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp);
+ all_rsv += (val - orig_val);
+ airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK,
+ FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
+
+ /* modify hthd */
+ tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
+ fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
+ tmp = fq_limit - all_rsv - 0x20;
+ airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
+ PSE_SHARE_USED_HTHD_MASK,
+ FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp));
+
+ tmp = fq_limit - all_rsv - 0x100;
+ airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD,
+ PSE_SHARE_USED_MTHD_MASK,
+ FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp));
+ tmp = (3 * tmp) >> 2;
+ airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET,
+ PSE_SHARE_USED_LTHD_MASK,
+ FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
+
+ return 0;
+}
+
+static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
+{
+ const u32 pse_port_num_queues[] = {
+ [FE_PSE_PORT_CDM1] = 6,
+ [FE_PSE_PORT_GDM1] = 6,
+ [FE_PSE_PORT_GDM2] = 32,
+ [FE_PSE_PORT_GDM3] = 6,
+ [FE_PSE_PORT_PPE1] = 4,
+ [FE_PSE_PORT_CDM2] = 6,
+ [FE_PSE_PORT_CDM3] = 8,
+ [FE_PSE_PORT_CDM4] = 10,
+ [FE_PSE_PORT_PPE2] = 4,
+ [FE_PSE_PORT_GDM4] = 2,
+ [FE_PSE_PORT_CDM5] = 2,
+ };
+ int q;
+
+ /* hw misses PPE2 oq rsv */
+ airoha_fe_set(eth, REG_FE_PSE_BUF_SET,
+ PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]);
+
+ /* CMD1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* GMD1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* GMD2 */
+ for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0);
+ /* GMD3 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* PPE1 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) {
+ if (q < pse_port_num_queues[FE_PSE_PORT_PPE1])
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q,
+ PSE_QUEUE_RSV_PAGES);
+ else
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0);
+ }
+ /* CDM2 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* CDM3 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0);
+ /* CDM4 */
+ for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* PPE2 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) {
+ if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q,
+ PSE_QUEUE_RSV_PAGES);
+ else
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0);
+ }
+ /* GMD4 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q,
+ PSE_QUEUE_RSV_PAGES);
+ /* CDM5 */
+ for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++)
+ airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q,
+ PSE_QUEUE_RSV_PAGES);
+}
+
+static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) {
+ int err, j;
+ u32 val;
+
+ airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
+
+ val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
+ MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK;
+ airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
+ err = read_poll_timeout(airoha_fe_rr, val,
+ val & MC_VLAN_CFG_CMD_DONE_MASK,
+ USEC_PER_MSEC, 5 * USEC_PER_MSEC,
+ false, eth, REG_MC_VLAN_CFG);
+ if (err)
+ return err;
+
+ for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) {
+ airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0);
+
+ val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) |
+ FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) |
+ MC_VLAN_CFG_RW_MASK;
+ airoha_fe_wr(eth, REG_MC_VLAN_CFG, val);
+ err = read_poll_timeout(airoha_fe_rr, val,
+ val & MC_VLAN_CFG_CMD_DONE_MASK,
+ USEC_PER_MSEC,
+ 5 * USEC_PER_MSEC, false, eth,
+ REG_MC_VLAN_CFG);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth)
+{
+ /* CDM1_CRSN_QSEL */
+ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2),
+ CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
+ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2),
+ CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
+ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2),
+ CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
+ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2),
+ CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
+ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24),
+ CDM_CRSN_QSEL_Q6));
+ airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2),
+ CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
+ FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25),
+ CDM_CRSN_QSEL_Q1));
+ /* CDM2_CRSN_QSEL */
+ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2),
+ CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
+ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2),
+ CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
+ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2),
+ CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
+ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22),
+ CDM_CRSN_QSEL_Q1));
+ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2),
+ CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
+ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24),
+ CDM_CRSN_QSEL_Q6));
+ airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2),
+ CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
+ FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25),
+ CDM_CRSN_QSEL_Q1));
+}
+
+static int airoha_fe_init(struct airoha_eth *eth)
+{
+ airoha_fe_maccr_init(eth);
+
+ /* PSE IQ reserve */
+ airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK,
+ FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10));
+ airoha_fe_rmw(eth, REG_PSE_IQ_REV2,
+ PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK,
+ FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) |
+ FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34));
+
+ /* enable FE copy engine for MC/KA/DPI */
+ airoha_fe_wr(eth, REG_FE_PCE_CFG,
+ PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK);
+ /* set vip queue selection to ring 1 */
+ airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK,
+ FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4));
+ airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK,
+ FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4));
+ /* set GDM4 source interface offset to 8 */
+ airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET,
+ GDM4_SPORT_OFF2_MASK |
+ GDM4_SPORT_OFF1_MASK |
+ GDM4_SPORT_OFF0_MASK,
+ FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) |
+ FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) |
+ FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8));
+
+ /* set PSE Page as 128B */
+ airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
+ FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
+ FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
+ FE_DMA_GLO_PG_SZ_MASK);
+ airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
+ FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
+ FE_RST_GDM4_MBI_ARB_MASK);
+ usleep_range(1000, 2000);
+
+ /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1
+ * connect other rings to PSE Port0 OQ-0
+ */
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4));
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28));
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4));
+ airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28));
+
+ airoha_fe_vip_setup(eth);
+ airoha_fe_pse_ports_init(eth);
+
+ airoha_fe_set(eth, REG_GDM_MISC_CFG,
+ GDM2_RDM_ACK_WAIT_PREF_MASK |
+ GDM2_CHN_VLD_MODE_MASK);
+ airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 15);
+
+ /* init fragment and assemble Force Port */
+ /* NPU Core-3, NPU Bridge Channel-3 */
+ airoha_fe_rmw(eth, REG_IP_FRAG_FP,
+ IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
+ FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
+ FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
+ /* QDMA LAN, RX Ring-22 */
+ airoha_fe_rmw(eth, REG_IP_FRAG_FP,
+ IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
+ FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
+ FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
+
+ airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK);
+ airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK);
+
+ airoha_fe_crsn_qsel_init(eth);
+
+ airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
+ airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
+
+ /* default aging mode for mbi unlock issue */
+ airoha_fe_rmw(eth, REG_GDM2_CHN_RLS,
+ MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK,
+ FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) |
+ FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3));
+
+ /* disable IFC by default */
+ airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
+
+ /* enable 1:N vlan action, init vlan table */
+ airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
+
+ return airoha_fe_mc_vlan_clear(eth);
+}
+
+static int airoha_qdma_fill_rx_queue(struct airoha_queue *q)
+{
+ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
+ struct airoha_eth *eth = q->eth;
+ int qid = q - &eth->q_rx[0];
+ int nframes = 0;
+
+ while (q->queued < q->ndesc - 1) {
+ struct airoha_queue_entry *e = &q->entry[q->head];
+ struct airoha_qdma_desc *desc = &q->desc[q->head];
+ struct page *page;
+ int offset;
+ u32 val;
+
+ page = page_pool_dev_alloc_frag(q->page_pool, &offset,
+ q->buf_size);
+ if (!page)
+ break;
+
+ q->head = (q->head + 1) % q->ndesc;
+ q->queued++;
+ nframes++;
+
+ e->buf = page_address(page) + offset;
+ e->dma_addr = page_pool_get_dma_addr(page) + offset;
+ e->dma_len = SKB_WITH_OVERHEAD(q->buf_size);
+
+ dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len,
+ dir);
+
+ val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len);
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
+ WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr));
+ val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head);
+ WRITE_ONCE(desc->data, cpu_to_le32(val));
+ WRITE_ONCE(desc->msg0, 0);
+ WRITE_ONCE(desc->msg1, 0);
+ WRITE_ONCE(desc->msg2, 0);
+ WRITE_ONCE(desc->msg3, 0);
+
+ airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK,
+ FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head));
+ }
+
+ return nframes;
+}
+
+static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
+ struct airoha_qdma_desc *desc)
+{
+ u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
+
+ sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
+ switch (sport) {
+ case 0x10 ... 0x13:
+ port = 0;
+ break;
+ case 0x2 ... 0x4:
+ port = sport - 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port;
+}
+
+static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
+{
+ enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool);
+ struct airoha_eth *eth = q->eth;
+ int qid = q - &eth->q_rx[0];
+ int done = 0;
+
+ while (done < budget) {
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+ struct airoha_qdma_desc *desc = &q->desc[q->tail];
+ dma_addr_t dma_addr = le32_to_cpu(desc->addr);
+ u32 desc_ctrl = le32_to_cpu(desc->ctrl);
+ struct sk_buff *skb;
+ int len, p;
+
+ if (!(desc_ctrl & QDMA_DESC_DONE_MASK))
+ break;
+
+ if (!dma_addr)
+ break;
+
+ len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl);
+ if (!len)
+ break;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ dma_sync_single_for_cpu(eth->dev, dma_addr,
+ SKB_WITH_OVERHEAD(q->buf_size), dir);
+
+ p = airoha_qdma_get_gdm_port(eth, desc);
+ if (p < 0 || !eth->ports[p]) {
+ page_pool_put_full_page(q->page_pool,
+ virt_to_head_page(e->buf),
+ true);
+ continue;
+ }
+
+ skb = napi_build_skb(e->buf, q->buf_size);
+ if (!skb) {
+ page_pool_put_full_page(q->page_pool,
+ virt_to_head_page(e->buf),
+ true);
+ break;
+ }
+
+ skb_reserve(skb, 2);
+ __skb_put(skb, len);
+ skb_mark_for_recycle(skb);
+ skb->dev = eth->ports[p]->dev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, qid);
+ napi_gro_receive(&q->napi, skb);
+
+ done++;
+ }
+ airoha_qdma_fill_rx_queue(q);
+
+ return done;
+}
+
+static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct airoha_queue *q = container_of(napi, struct airoha_queue, napi);
+ struct airoha_eth *eth = q->eth;
+ int cur, done = 0;
+
+ do {
+ cur = airoha_qdma_rx_process(q, budget - done);
+ done += cur;
+ } while (cur && done < budget);
+
+ if (done < budget && napi_complete(napi))
+ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1,
+ RX_DONE_INT_MASK);
+
+ return done;
+}
+
+static int airoha_qdma_init_rx_queue(struct airoha_eth *eth,
+ struct airoha_queue *q, int ndesc)
+{
+ const struct page_pool_params pp_params = {
+ .order = 0,
+ .pool_size = 256,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .dma_dir = DMA_FROM_DEVICE,
+ .max_len = PAGE_SIZE,
+ .nid = NUMA_NO_NODE,
+ .dev = eth->dev,
+ .napi = &q->napi,
+ };
+ int qid = q - &eth->q_rx[0], thr;
+ dma_addr_t dma_addr;
+
+ q->buf_size = PAGE_SIZE / 2;
+ q->ndesc = ndesc;
+ q->eth = eth;
+
+ q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(q->page_pool)) {
+ int err = PTR_ERR(q->page_pool);
+
+ q->page_pool = NULL;
+ return err;
+ }
+
+ q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
+ &dma_addr, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll);
+
+ airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK,
+ FIELD_PREP(RX_RING_SIZE_MASK, ndesc));
+
+ thr = clamp(ndesc >> 3, 1, 32);
+ airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK,
+ FIELD_PREP(RX_RING_THR_MASK, thr));
+ airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK,
+ FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head));
+
+ airoha_qdma_fill_rx_queue(q);
+
+ return 0;
+}
+
+static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q)
+{
+ struct airoha_eth *eth = q->eth;
+
+ while (q->queued) {
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+ struct page *page = virt_to_head_page(e->buf);
+
+ dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len,
+ page_pool_get_dma_dir(q->page_pool));
+ page_pool_put_full_page(q->page_pool, page, false);
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+ }
+}
+
+static int airoha_qdma_init_rx(struct airoha_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ int err;
+
+ if (!(RX_DONE_INT_MASK & BIT(i))) {
+ /* rx-queue not binded to irq */
+ continue;
+ }
+
+ err = airoha_qdma_init_rx_queue(eth, &eth->q_rx[i],
+ RX_DSCP_NUM(i));
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct airoha_tx_irq_queue *irq_q;
+ struct airoha_eth *eth;
+ int id, done = 0;
+
+ irq_q = container_of(napi, struct airoha_tx_irq_queue, napi);
+ eth = irq_q->eth;
+ id = irq_q - &eth->q_tx_irq[0];
+
+ while (irq_q->queued > 0 && done < budget) {
+ u32 qid, last, val = irq_q->q[irq_q->head];
+ struct airoha_queue *q;
+
+ if (val == 0xff)
+ break;
+
+ irq_q->q[irq_q->head] = 0xff; /* mark as done */
+ irq_q->head = (irq_q->head + 1) % irq_q->size;
+ irq_q->queued--;
+ done++;
+
+ last = FIELD_GET(IRQ_DESC_IDX_MASK, val);
+ qid = FIELD_GET(IRQ_RING_IDX_MASK, val);
+
+ if (qid >= ARRAY_SIZE(eth->q_tx))
+ continue;
+
+ q = &eth->q_tx[qid];
+ if (!q->ndesc)
+ continue;
+
+ spin_lock_bh(&q->lock);
+
+ while (q->queued > 0) {
+ struct airoha_qdma_desc *desc = &q->desc[q->tail];
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+ u32 desc_ctrl = le32_to_cpu(desc->ctrl);
+ struct sk_buff *skb = e->skb;
+ u16 index = q->tail;
+
+ if (!(desc_ctrl & QDMA_DESC_DONE_MASK) &&
+ !(desc_ctrl & QDMA_DESC_DROP_MASK))
+ break;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+
+ dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+
+ WRITE_ONCE(desc->msg0, 0);
+ WRITE_ONCE(desc->msg1, 0);
+
+ if (skb) {
+ struct netdev_queue *txq;
+
+ txq = netdev_get_tx_queue(skb->dev, qid);
+ if (netif_tx_queue_stopped(txq) &&
+ q->ndesc - q->queued >= q->free_thr)
+ netif_tx_wake_queue(txq);
+
+ dev_kfree_skb_any(skb);
+ e->skb = NULL;
+ }
+
+ if (index == last)
+ break;
+ }
+
+ spin_unlock_bh(&q->lock);
+ }
+
+ if (done) {
+ int i, len = done >> 7;
+
+ for (i = 0; i < len; i++)
+ airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
+ IRQ_CLEAR_LEN_MASK, 0x80);
+ airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id),
+ IRQ_CLEAR_LEN_MASK, (done & 0x7f));
+ }
+
+ if (done < budget && napi_complete(napi))
+ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(id));
+
+ return done;
+}
+
+static int airoha_qdma_init_tx_queue(struct airoha_eth *eth,
+ struct airoha_queue *q, int size)
+{
+ int i, qid = q - &eth->q_tx[0];
+ dma_addr_t dma_addr;
+
+ spin_lock_init(&q->lock);
+ q->ndesc = size;
+ q->eth = eth;
+ q->free_thr = 1 + MAX_SKB_FRAGS;
+
+ q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc),
+ &dma_addr, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ for (i = 0; i < q->ndesc; i++) {
+ u32 val;
+
+ val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1);
+ WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val));
+ }
+
+ airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr);
+ airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head));
+ airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK,
+ FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head));
+
+ return 0;
+}
+
+static int airoha_qdma_tx_irq_init(struct airoha_eth *eth,
+ struct airoha_tx_irq_queue *irq_q,
+ int size)
+{
+ int id = irq_q - &eth->q_tx_irq[0];
+ dma_addr_t dma_addr;
+
+ netif_napi_add_tx(eth->napi_dev, &irq_q->napi,
+ airoha_qdma_tx_napi_poll);
+ irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32),
+ &dma_addr, GFP_KERNEL);
+ if (!irq_q->q)
+ return -ENOMEM;
+
+ memset(irq_q->q, 0xff, size * sizeof(u32));
+ irq_q->size = size;
+ irq_q->eth = eth;
+
+ airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr);
+ airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK,
+ FIELD_PREP(TX_IRQ_DEPTH_MASK, size));
+ airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK,
+ FIELD_PREP(TX_IRQ_THR_MASK, 1));
+
+ return 0;
+}
+
+static int airoha_qdma_init_tx(struct airoha_eth *eth)
+{
+ int i, err;
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
+ err = airoha_qdma_tx_irq_init(eth, &eth->q_tx_irq[i],
+ IRQ_QUEUE_LEN(i));
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
+ err = airoha_qdma_init_tx_queue(eth, &eth->q_tx[i],
+ TX_DSCP_NUM);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q)
+{
+ struct airoha_eth *eth = q->eth;
+
+ spin_lock_bh(&q->lock);
+ while (q->queued) {
+ struct airoha_queue_entry *e = &q->entry[q->tail];
+
+ dma_unmap_single(eth->dev, e->dma_addr, e->dma_len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(e->skb);
+ e->skb = NULL;
+
+ q->tail = (q->tail + 1) % q->ndesc;
+ q->queued--;
+ }
+ spin_unlock_bh(&q->lock);
+}
+
+static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth)
+{
+ dma_addr_t dma_addr;
+ u32 status;
+ int size;
+
+ size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc);
+ eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!eth->hfwd.desc)
+ return -ENOMEM;
+
+ airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr);
+
+ size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM;
+ eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr,
+ GFP_KERNEL);
+ if (!eth->hfwd.q)
+ return -ENOMEM;
+
+ airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr);
+
+ airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG,
+ HW_FWD_DSCP_PAYLOAD_SIZE_MASK,
+ FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0));
+ airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK,
+ FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128));
+ airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG,
+ LMGR_INIT_START | LMGR_SRAM_MODE_MASK |
+ HW_FWD_DESC_NUM_MASK,
+ FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) |
+ LMGR_INIT_START);
+
+ return read_poll_timeout(airoha_qdma_rr, status,
+ !(status & LMGR_INIT_START), USEC_PER_MSEC,
+ 30 * USEC_PER_MSEC, true, eth,
+ REG_LMGR_INIT_CFG);
+}
+
+static void airoha_qdma_init_qos(struct airoha_eth *eth)
+{
+ airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
+ airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
+
+ airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG,
+ PSE_BUF_ESTIMATE_EN_MASK);
+
+ airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_EN_MASK |
+ EGRESS_RATE_METER_EQ_RATE_EN_MASK);
+ /* 2047us x 31 = 63.457ms */
+ airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_WINDOW_SZ_MASK,
+ FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f));
+ airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG,
+ EGRESS_RATE_METER_TIMESLICE_MASK,
+ FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff));
+
+ /* ratelimit init */
+ airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK);
+ /* fast-tick 25us */
+ airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK,
+ FIELD_PREP(GLB_FAST_TICK_MASK, 25));
+ airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40));
+
+ airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK);
+ airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK,
+ FIELD_PREP(EGRESS_FAST_TICK_MASK, 25));
+ airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG,
+ EGRESS_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40));
+
+ airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK);
+ airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG,
+ INGRESS_TRTCM_MODE_MASK);
+ airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK,
+ FIELD_PREP(INGRESS_FAST_TICK_MASK, 125));
+ airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG,
+ INGRESS_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8));
+
+ airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK);
+ airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK,
+ FIELD_PREP(SLA_FAST_TICK_MASK, 25));
+ airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK,
+ FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40));
+}
+
+static int airoha_qdma_hw_init(struct airoha_eth *eth)
+{
+ int i;
+
+ /* clear pending irqs */
+ for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++)
+ airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff);
+
+ /* setup irqs */
+ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
+ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
+ airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
+
+ /* setup irq binding */
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
+ if (!eth->q_tx[i].ndesc)
+ continue;
+
+ if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i))
+ airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i),
+ TX_RING_IRQ_BLOCKING_CFG_MASK);
+ else
+ airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i),
+ TX_RING_IRQ_BLOCKING_CFG_MASK);
+ }
+
+ airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG,
+ GLOBAL_CFG_RX_2B_OFFSET_MASK |
+ FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) |
+ GLOBAL_CFG_CPU_TXR_RR_MASK |
+ GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK |
+ GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK |
+ GLOBAL_CFG_MULTICAST_EN_MASK |
+ GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK |
+ GLOBAL_CFG_TX_WB_DONE_MASK |
+ FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2));
+
+ airoha_qdma_init_qos(eth);
+
+ /* disable qdma rx delay interrupt */
+ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ if (!eth->q_rx[i].ndesc)
+ continue;
+
+ airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i),
+ RX_DELAY_INT_MASK);
+ }
+
+ airoha_qdma_set(eth, REG_TXQ_CNGST_CFG,
+ TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN);
+
+ return 0;
+}
+
+static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
+{
+ struct airoha_eth *eth = dev_instance;
+ u32 intr[ARRAY_SIZE(eth->irqmask)];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) {
+ intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i));
+ intr[i] &= eth->irqmask[i];
+ airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]);
+ }
+
+ if (!test_bit(DEV_STATE_INITIALIZED, &eth->state))
+ return IRQ_NONE;
+
+ if (intr[1] & RX_DONE_INT_MASK) {
+ airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1,
+ RX_DONE_INT_MASK);
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ if (!eth->q_rx[i].ndesc)
+ continue;
+
+ if (intr[1] & BIT(i))
+ napi_schedule(&eth->q_rx[i].napi);
+ }
+ }
+
+ if (intr[0] & INT_TX_MASK) {
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
+ struct airoha_tx_irq_queue *irq_q = &eth->q_tx_irq[i];
+ u32 status, head;
+
+ if (!(intr[0] & TX_DONE_INT_MASK(i)))
+ continue;
+
+ airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0,
+ TX_DONE_INT_MASK(i));
+
+ status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i));
+ head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
+ irq_q->head = head % irq_q->size;
+ irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status);
+
+ napi_schedule(&eth->q_tx_irq[i].napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int airoha_qdma_init(struct airoha_eth *eth)
+{
+ int err;
+
+ err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler,
+ IRQF_SHARED, KBUILD_MODNAME, eth);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_rx(eth);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_tx(eth);
+ if (err)
+ return err;
+
+ err = airoha_qdma_init_hfwd_queues(eth);
+ if (err)
+ return err;
+
+ err = airoha_qdma_hw_init(eth);
+ if (err)
+ return err;
+
+ set_bit(DEV_STATE_INITIALIZED, &eth->state);
+
+ return 0;
+}
+
+static int airoha_hw_init(struct airoha_eth *eth)
+{
+ int err;
+
+ /* disable xsi */
+ reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts);
+
+ reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ msleep(20);
+ reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts);
+ msleep(20);
+
+ err = airoha_fe_init(eth);
+ if (err)
+ return err;
+
+ return airoha_qdma_init(eth);
+}
+
+static void airoha_hw_cleanup(struct airoha_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ if (!eth->q_rx[i].ndesc)
+ continue;
+
+ napi_disable(&eth->q_rx[i].napi);
+ netif_napi_del(&eth->q_rx[i].napi);
+ airoha_qdma_cleanup_rx_queue(&eth->q_rx[i]);
+ if (eth->q_rx[i].page_pool)
+ page_pool_destroy(eth->q_rx[i].page_pool);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) {
+ napi_disable(&eth->q_tx_irq[i].napi);
+ netif_napi_del(&eth->q_tx_irq[i].napi);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) {
+ if (!eth->q_tx[i].ndesc)
+ continue;
+
+ airoha_qdma_cleanup_tx_queue(&eth->q_tx[i]);
+ }
+}
+
+static void airoha_qdma_start_napi(struct airoha_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++)
+ napi_enable(&eth->q_tx_irq[i].napi);
+
+ for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) {
+ if (!eth->q_rx[i].ndesc)
+ continue;
+
+ napi_enable(&eth->q_rx[i].napi);
+ }
+}
+
+static void airoha_update_hw_stats(struct airoha_gdm_port *port)
+{
+ struct airoha_eth *eth = port->eth;
+ u32 val, i = 0;
+
+ spin_lock(&port->stats.lock);
+ u64_stats_update_begin(&port->stats.syncp);
+
+ /* TX */
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id));
+ port->stats.tx_ok_pkts += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id));
+ port->stats.tx_ok_pkts += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id));
+ port->stats.tx_ok_bytes += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id));
+ port->stats.tx_ok_bytes += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id));
+ port->stats.tx_drops += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id));
+ port->stats.tx_broadcast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id));
+ port->stats.tx_multicast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id));
+ port->stats.tx_len[i] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id));
+ port->stats.tx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id));
+ port->stats.tx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id));
+ port->stats.tx_len[i++] += val;
+
+ /* RX */
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id));
+ port->stats.rx_ok_pkts += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id));
+ port->stats.rx_ok_pkts += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id));
+ port->stats.rx_ok_bytes += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id));
+ port->stats.rx_ok_bytes += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id));
+ port->stats.rx_drops += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id));
+ port->stats.rx_broadcast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id));
+ port->stats.rx_multicast += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id));
+ port->stats.rx_errors += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id));
+ port->stats.rx_crc_error += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id));
+ port->stats.rx_over_errors += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id));
+ port->stats.rx_fragment += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id));
+ port->stats.rx_jabber += val;
+
+ i = 0;
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id));
+ port->stats.rx_len[i] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id));
+ port->stats.rx_len[i] += ((u64)val << 32);
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id));
+ port->stats.rx_len[i++] += val;
+
+ val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id));
+ port->stats.rx_len[i++] += val;
+
+ /* reset mib counters */
+ airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id),
+ FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK);
+
+ u64_stats_update_end(&port->stats.syncp);
+ spin_unlock(&port->stats.lock);
+}
+
+static int airoha_dev_open(struct net_device *dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->eth;
+ int err;
+
+ netif_tx_start_all_queues(dev);
+ err = airoha_set_gdm_ports(eth, true);
+ if (err)
+ return err;
+
+ if (netdev_uses_dsa(dev))
+ airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id),
+ GDM_STAG_EN_MASK);
+ else
+ airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id),
+ GDM_STAG_EN_MASK);
+
+ airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
+ airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
+
+ return 0;
+}
+
+static int airoha_dev_stop(struct net_device *dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->eth;
+ int err;
+
+ netif_tx_disable(dev);
+ err = airoha_set_gdm_ports(eth, false);
+ if (err)
+ return err;
+
+ airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK);
+ airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK);
+
+ return 0;
+}
+
+static int airoha_dev_set_macaddr(struct net_device *dev, void *p)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ int err;
+
+ err = eth_mac_addr(dev, p);
+ if (err)
+ return err;
+
+ airoha_set_macaddr(port->eth, dev->dev_addr);
+
+ return 0;
+}
+
+static int airoha_dev_init(struct net_device *dev)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+
+ airoha_set_macaddr(port->eth, dev->dev_addr);
+
+ return 0;
+}
+
+static void airoha_dev_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ unsigned int start;
+
+ airoha_update_hw_stats(port);
+ do {
+ start = u64_stats_fetch_begin(&port->stats.syncp);
+ storage->rx_packets = port->stats.rx_ok_pkts;
+ storage->tx_packets = port->stats.tx_ok_pkts;
+ storage->rx_bytes = port->stats.rx_ok_bytes;
+ storage->tx_bytes = port->stats.tx_ok_bytes;
+ storage->multicast = port->stats.rx_multicast;
+ storage->rx_errors = port->stats.rx_errors;
+ storage->rx_dropped = port->stats.rx_drops;
+ storage->tx_dropped = port->stats.tx_drops;
+ storage->rx_crc_errors = port->stats.rx_crc_error;
+ storage->rx_over_errors = port->stats.rx_over_errors;
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+}
+
+static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ u32 msg0 = 0, msg1, len = skb_headlen(skb);
+ int i, qid = skb_get_queue_mapping(skb);
+ struct airoha_eth *eth = port->eth;
+ u32 nr_frags = 1 + sinfo->nr_frags;
+ struct netdev_queue *txq;
+ struct airoha_queue *q;
+ void *data = skb->data;
+ u16 index;
+ u8 fport;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) |
+ FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) |
+ FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1);
+
+ /* TSO: fill MSS info in tcp checksum field */
+ if (skb_is_gso(skb)) {
+ if (skb_cow_head(skb, 0))
+ goto error;
+
+ if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ __be16 csum = cpu_to_be16(sinfo->gso_size);
+
+ tcp_hdr(skb)->check = (__force __sum16)csum;
+ msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1);
+ }
+ }
+
+ fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id;
+ msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
+ FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
+
+ q = &eth->q_tx[qid];
+ if (WARN_ON_ONCE(!q->ndesc))
+ goto error;
+
+ spin_lock_bh(&q->lock);
+
+ txq = netdev_get_tx_queue(dev, qid);
+ if (q->queued + nr_frags > q->ndesc) {
+ /* not enough space in the queue */
+ netif_tx_stop_queue(txq);
+ spin_unlock_bh(&q->lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ index = q->head;
+ for (i = 0; i < nr_frags; i++) {
+ struct airoha_qdma_desc *desc = &q->desc[index];
+ struct airoha_queue_entry *e = &q->entry[index];
+ skb_frag_t *frag = &sinfo->frags[i];
+ dma_addr_t addr;
+ u32 val;
+
+ addr = dma_map_single(dev->dev.parent, data, len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, addr)))
+ goto error_unmap;
+
+ index = (index + 1) % q->ndesc;
+
+ val = FIELD_PREP(QDMA_DESC_LEN_MASK, len);
+ if (i < nr_frags - 1)
+ val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1);
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(val));
+ WRITE_ONCE(desc->addr, cpu_to_le32(addr));
+ val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index);
+ WRITE_ONCE(desc->data, cpu_to_le32(val));
+ WRITE_ONCE(desc->msg0, cpu_to_le32(msg0));
+ WRITE_ONCE(desc->msg1, cpu_to_le32(msg1));
+ WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff));
+
+ e->skb = i ? NULL : skb;
+ e->dma_addr = addr;
+ e->dma_len = len;
+
+ airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
+ FIELD_PREP(TX_RING_CPU_IDX_MASK, index));
+
+ data = skb_frag_address(frag);
+ len = skb_frag_size(frag);
+ }
+
+ q->head = index;
+ q->queued += i;
+
+ skb_tx_timestamp(skb);
+ if (q->ndesc - q->queued < q->free_thr)
+ netif_tx_stop_queue(txq);
+
+ spin_unlock_bh(&q->lock);
+
+ return NETDEV_TX_OK;
+
+error_unmap:
+ for (i--; i >= 0; i--) {
+ index = (q->head + i) % q->ndesc;
+ dma_unmap_single(dev->dev.parent, q->entry[index].dma_addr,
+ q->entry[index].dma_len, DMA_TO_DEVICE);
+ }
+
+ spin_unlock_bh(&q->lock);
+error:
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+
+ return NETDEV_TX_OK;
+}
+
+static void airoha_ethtool_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_eth *eth = port->eth;
+
+ strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info));
+}
+
+static void airoha_ethtool_get_mac_stats(struct net_device *dev,
+ struct ethtool_eth_mac_stats *stats)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ unsigned int start;
+
+ airoha_update_hw_stats(port);
+ do {
+ start = u64_stats_fetch_begin(&port->stats.syncp);
+ stats->MulticastFramesXmittedOK = port->stats.tx_multicast;
+ stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast;
+ stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast;
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+}
+
+static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = {
+ { 0, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1518 },
+ { 1519, 10239 },
+ {},
+};
+
+static void
+airoha_ethtool_get_rmon_stats(struct net_device *dev,
+ struct ethtool_rmon_stats *stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct airoha_gdm_port *port = netdev_priv(dev);
+ struct airoha_hw_stats *hw_stats = &port->stats;
+ unsigned int start;
+
+ BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
+ ARRAY_SIZE(hw_stats->tx_len) + 1);
+ BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) !=
+ ARRAY_SIZE(hw_stats->rx_len) + 1);
+
+ *ranges = airoha_ethtool_rmon_ranges;
+ airoha_update_hw_stats(port);
+ do {
+ int i;
+
+ start = u64_stats_fetch_begin(&port->stats.syncp);
+ stats->fragments = hw_stats->rx_fragment;
+ stats->jabbers = hw_stats->rx_jabber;
+ for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1;
+ i++) {
+ stats->hist[i] = hw_stats->rx_len[i];
+ stats->hist_tx[i] = hw_stats->tx_len[i];
+ }
+ } while (u64_stats_fetch_retry(&port->stats.syncp, start));
+}
+
+static const struct net_device_ops airoha_netdev_ops = {
+ .ndo_init = airoha_dev_init,
+ .ndo_open = airoha_dev_open,
+ .ndo_stop = airoha_dev_stop,
+ .ndo_start_xmit = airoha_dev_xmit,
+ .ndo_get_stats64 = airoha_dev_get_stats64,
+ .ndo_set_mac_address = airoha_dev_set_macaddr,
+};
+
+static const struct ethtool_ops airoha_ethtool_ops = {
+ .get_drvinfo = airoha_ethtool_get_drvinfo,
+ .get_eth_mac_stats = airoha_ethtool_get_mac_stats,
+ .get_rmon_stats = airoha_ethtool_get_rmon_stats,
+};
+
+static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np)
+{
+ const __be32 *id_ptr = of_get_property(np, "reg", NULL);
+ struct airoha_gdm_port *port;
+ struct net_device *dev;
+ int err, index;
+ u32 id;
+
+ if (!id_ptr) {
+ dev_err(eth->dev, "missing gdm port id\n");
+ return -EINVAL;
+ }
+
+ id = be32_to_cpup(id_ptr);
+ index = id - 1;
+
+ if (!id || id > ARRAY_SIZE(eth->ports)) {
+ dev_err(eth->dev, "invalid gdm port id: %d\n", id);
+ return -EINVAL;
+ }
+
+ if (eth->ports[index]) {
+ dev_err(eth->dev, "duplicate gdm port id: %d\n", id);
+ return -EINVAL;
+ }
+
+ dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port),
+ AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING);
+ if (!dev) {
+ dev_err(eth->dev, "alloc_etherdev failed\n");
+ return -ENOMEM;
+ }
+
+ dev->netdev_ops = &airoha_netdev_ops;
+ dev->ethtool_ops = &airoha_ethtool_ops;
+ dev->max_mtu = AIROHA_MAX_MTU;
+ dev->watchdog_timeo = 5 * HZ;
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_TSO6 | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO;
+ dev->features |= dev->hw_features;
+ dev->dev.of_node = np;
+ SET_NETDEV_DEV(dev, eth->dev);
+
+ err = of_get_ethdev_address(np, dev);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ return err;
+
+ eth_hw_addr_random(dev);
+ dev_info(eth->dev, "generated random MAC address %pM\n",
+ dev->dev_addr);
+ }
+
+ port = netdev_priv(dev);
+ u64_stats_init(&port->stats.syncp);
+ spin_lock_init(&port->stats.lock);
+ port->dev = dev;
+ port->eth = eth;
+ port->id = id;
+ eth->ports[index] = port;
+
+ return register_netdev(dev);
+}
+
+static int airoha_probe(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct airoha_eth *eth;
+ int i, err;
+
+ eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
+ if (!eth)
+ return -ENOMEM;
+
+ eth->dev = &pdev->dev;
+
+ err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(eth->dev, "failed configuring DMA mask\n");
+ return err;
+ }
+
+ eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe");
+ if (IS_ERR(eth->fe_regs))
+ return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
+ "failed to iomap fe regs\n");
+
+ eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0");
+ if (IS_ERR(eth->qdma_regs))
+ return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs),
+ "failed to iomap qdma regs\n");
+
+ eth->rsts[0].id = "fe";
+ eth->rsts[1].id = "pdma";
+ eth->rsts[2].id = "qdma";
+ err = devm_reset_control_bulk_get_exclusive(eth->dev,
+ ARRAY_SIZE(eth->rsts),
+ eth->rsts);
+ if (err) {
+ dev_err(eth->dev, "failed to get bulk reset lines\n");
+ return err;
+ }
+
+ eth->xsi_rsts[0].id = "xsi-mac";
+ eth->xsi_rsts[1].id = "hsi0-mac";
+ eth->xsi_rsts[2].id = "hsi1-mac";
+ eth->xsi_rsts[3].id = "hsi-mac";
+ eth->xsi_rsts[4].id = "xfp-mac";
+ err = devm_reset_control_bulk_get_exclusive(eth->dev,
+ ARRAY_SIZE(eth->xsi_rsts),
+ eth->xsi_rsts);
+ if (err) {
+ dev_err(eth->dev, "failed to get bulk xsi reset lines\n");
+ return err;
+ }
+
+ spin_lock_init(&eth->irq_lock);
+ eth->irq = platform_get_irq(pdev, 0);
+ if (eth->irq < 0)
+ return eth->irq;
+
+ eth->napi_dev = alloc_netdev_dummy(0);
+ if (!eth->napi_dev)
+ return -ENOMEM;
+
+ /* Enable threaded NAPI by default */
+ eth->napi_dev->threaded = true;
+ strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name));
+ platform_set_drvdata(pdev, eth);
+
+ err = airoha_hw_init(eth);
+ if (err)
+ goto error;
+
+ airoha_qdma_start_napi(eth);
+ for_each_child_of_node(pdev->dev.of_node, np) {
+ if (!of_device_is_compatible(np, "airoha,eth-mac"))
+ continue;
+
+ if (!of_device_is_available(np))
+ continue;
+
+ err = airoha_alloc_gdm_port(eth, np);
+ if (err) {
+ of_node_put(np);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ airoha_hw_cleanup(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+
+ if (port && port->dev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(port->dev);
+ }
+ free_netdev(eth->napi_dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return err;
+}
+
+static void airoha_remove(struct platform_device *pdev)
+{
+ struct airoha_eth *eth = platform_get_drvdata(pdev);
+ int i;
+
+ airoha_hw_cleanup(eth);
+ for (i = 0; i < ARRAY_SIZE(eth->ports); i++) {
+ struct airoha_gdm_port *port = eth->ports[i];
+
+ if (!port)
+ continue;
+
+ airoha_dev_stop(port->dev);
+ unregister_netdev(port->dev);
+ }
+ free_netdev(eth->napi_dev);
+
+ platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id of_airoha_match[] = {
+ { .compatible = "airoha,en7581-eth" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver airoha_driver = {
+ .probe = airoha_probe,
+ .remove_new = airoha_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = of_airoha_match,
+ },
+};
+module_platform_driver(airoha_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
+MODULE_DESCRIPTION("Ethernet driver for Airoha SoC");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index caa13b9cedff..16ca427cf4c3 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -80,7 +80,9 @@ static const struct mtk_reg_map mtk_reg_map = {
.fq_blen = 0x1b2c,
},
.gdm1_cnt = 0x2400,
- .gdma_to_ppe = 0x4444,
+ .gdma_to_ppe = {
+ [0] = 0x4444,
+ },
.ppe_base = 0x0c00,
.wdma_base = {
[0] = 0x2800,
@@ -110,16 +112,16 @@ static const struct mtk_reg_map mt7986_reg_map = {
.tx_irq_mask = 0x461c,
.tx_irq_status = 0x4618,
.pdma = {
- .rx_ptr = 0x6100,
- .rx_cnt_cfg = 0x6104,
- .pcrx_ptr = 0x6108,
- .glo_cfg = 0x6204,
- .rst_idx = 0x6208,
- .delay_irq = 0x620c,
- .irq_status = 0x6220,
- .irq_mask = 0x6228,
- .adma_rx_dbg0 = 0x6238,
- .int_grp = 0x6250,
+ .rx_ptr = 0x4100,
+ .rx_cnt_cfg = 0x4104,
+ .pcrx_ptr = 0x4108,
+ .glo_cfg = 0x4204,
+ .rst_idx = 0x4208,
+ .delay_irq = 0x420c,
+ .irq_status = 0x4220,
+ .irq_mask = 0x4228,
+ .adma_rx_dbg0 = 0x4238,
+ .int_grp = 0x4250,
},
.qdma = {
.qtx_cfg = 0x4400,
@@ -144,7 +146,10 @@ static const struct mtk_reg_map mt7986_reg_map = {
.tx_sch_rate = 0x4798,
},
.gdm1_cnt = 0x1c00,
- .gdma_to_ppe = 0x3333,
+ .gdma_to_ppe = {
+ [0] = 0x3333,
+ [1] = 0x4444,
+ },
.ppe_base = 0x2000,
.wdma_base = {
[0] = 0x4800,
@@ -192,7 +197,11 @@ static const struct mtk_reg_map mt7988_reg_map = {
.tx_sch_rate = 0x4798,
},
.gdm1_cnt = 0x1c00,
- .gdma_to_ppe = 0x3333,
+ .gdma_to_ppe = {
+ [0] = 0x3333,
+ [1] = 0x4444,
+ [2] = 0xcccc,
+ },
.ppe_base = 0x2000,
.wdma_base = {
[0] = 0x4800,
@@ -1107,7 +1116,7 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
}
@@ -1131,51 +1140,57 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
dma_addr_t phy_ring_tail;
- int cnt = MTK_QDMA_RING_SIZE;
+ int cnt = soc->tx.fq_dma_size;
dma_addr_t dma_addr;
- int i;
+ int i, j, len;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
eth->scratch_ring = eth->sram_base;
else
eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
- cnt * soc->txrx.txd_size,
+ cnt * soc->tx.desc_size,
&eth->phy_scratch_ring,
GFP_KERNEL);
+
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
- eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
- if (unlikely(!eth->scratch_head))
- return -ENOMEM;
+ phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
- dma_addr = dma_map_single(eth->dma_dev,
- eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
- return -ENOMEM;
+ for (j = 0; j < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); j++) {
+ len = min_t(int, cnt - j * MTK_FQ_DMA_LENGTH, MTK_FQ_DMA_LENGTH);
+ eth->scratch_head[j] = kcalloc(len, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
- phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
+ if (unlikely(!eth->scratch_head[j]))
+ return -ENOMEM;
- for (i = 0; i < cnt; i++) {
- dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
- struct mtk_tx_dma_v2 *txd;
+ dma_addr = dma_map_single(eth->dma_dev,
+ eth->scratch_head[j], len * MTK_QDMA_PAGE_SIZE,
+ DMA_FROM_DEVICE);
- txd = eth->scratch_ring + i * soc->txrx.txd_size;
- txd->txd1 = addr;
- if (i < cnt - 1)
- txd->txd2 = eth->phy_scratch_ring +
- (i + 1) * soc->txrx.txd_size;
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ return -ENOMEM;
- txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
- if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
- txd->txd3 |= TX_DMA_PREP_ADDR64(addr);
- txd->txd4 = 0;
- if (mtk_is_netsys_v2_or_greater(eth)) {
- txd->txd5 = 0;
- txd->txd6 = 0;
- txd->txd7 = 0;
- txd->txd8 = 0;
+ for (i = 0; i < cnt; i++) {
+ struct mtk_tx_dma_v2 *txd;
+
+ txd = eth->scratch_ring + (j * MTK_FQ_DMA_LENGTH + i) * soc->tx.desc_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ if (j * MTK_FQ_DMA_LENGTH + i < cnt)
+ txd->txd2 = eth->phy_scratch_ring +
+ (j * MTK_FQ_DMA_LENGTH + i + 1) * soc->tx.desc_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
+ txd->txd3 |= TX_DMA_PREP_ADDR64(dma_addr + i * MTK_QDMA_PAGE_SIZE);
+
+ txd->txd4 = 0;
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
}
}
@@ -1416,7 +1431,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (itxd == ring->last_free)
return -ENOMEM;
- itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
memset(itx_buf, 0, sizeof(*itx_buf));
txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
@@ -1457,7 +1472,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
txd_info.size = min_t(unsigned int, frag_size,
- soc->txrx.dma_max_len);
+ soc->tx.dma_max_len);
txd_info.qid = queue;
txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
!(frag_size - txd_info.size);
@@ -1470,7 +1485,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
mtk_tx_set_dma_desc(dev, txd, &txd_info);
tx_buf = mtk_desc_to_tx_buf(ring, txd,
- soc->txrx.txd_size);
+ soc->tx.desc_size);
if (new_desc)
memset(tx_buf, 0, sizeof(*tx_buf));
tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
@@ -1513,7 +1528,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
} else {
int next_idx;
- next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
ring->dma_size);
mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
}
@@ -1522,7 +1537,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
/* unmap dma */
mtk_tx_unmap(eth, tx_buf, NULL, false);
@@ -1547,7 +1562,7 @@ static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
nfrags += DIV_ROUND_UP(skb_frag_size(frag),
- eth->soc->txrx.dma_max_len);
+ eth->soc->tx.dma_max_len);
}
} else {
nfrags += skb_shinfo(skb)->nr_frags;
@@ -1654,7 +1669,7 @@ static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
ring = &eth->rx_ring[i];
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + idx * eth->soc->rx.desc_size;
if (rxd->rxd2 & RX_DMA_DONE) {
ring->calc_idx_update = true;
return ring;
@@ -1710,7 +1725,7 @@ static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
if (IS_ERR(pp))
return pp;
- err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
+ err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
eth->rx_napi.napi_id, PAGE_SIZE);
if (err < 0)
goto err_free_pp;
@@ -1822,7 +1837,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
}
htxd = txd;
- tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
memset(tx_buf, 0, sizeof(*tx_buf));
htx_buf = tx_buf;
@@ -1841,7 +1856,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
goto unmap;
tx_buf = mtk_desc_to_tx_buf(ring, txd,
- soc->txrx.txd_size);
+ soc->tx.desc_size);
memset(tx_buf, 0, sizeof(*tx_buf));
n_desc++;
}
@@ -1879,7 +1894,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
} else {
int idx;
- idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ idx = txd_to_idx(ring, txd, soc->tx.desc_size);
mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
MT7628_TX_CTX_IDX0);
}
@@ -1890,7 +1905,7 @@ static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
unmap:
while (htxd != txd) {
- tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
mtk_tx_unmap(eth, tx_buf, NULL, false);
htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
@@ -2009,6 +2024,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
dma_addr_t dma_addr = DMA_MAPPING_ERROR;
+ int ppe_idx = 0;
while (done < budget) {
unsigned int pktlen, *rxdcsum;
@@ -2021,14 +2037,14 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto rx_done;
idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
- rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + idx * eth->soc->rx.desc_size;
data = ring->data[idx];
if (!mtk_rx_get_desc(eth, &trxd, rxd))
break;
/* find out which mac the packet come from. values start at 1 */
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
switch (val) {
@@ -2052,6 +2068,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
goto release_desc;
netdev = eth->netdev[mac];
+ ppe_idx = eth->mac[mac]->ppe_idx;
if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
goto release_desc;
@@ -2140,7 +2157,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
skb->dev = netdev;
bytes += skb->len;
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
if (hash != MTK_RXD5_FOE_ENTRY)
@@ -2156,7 +2173,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
rxdcsum = &trxd.rxd4;
}
- if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
+ if (*rxdcsum & eth->soc->rx.dma_l4_valid)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb_checksum_none_assert(skb);
@@ -2175,7 +2192,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
}
if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- mtk_ppe_check_skb(eth->ppe[0], skb, hash);
+ mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash);
skb_record_rx_queue(skb, 0);
napi_gro_receive(napi, skb);
@@ -2280,7 +2297,7 @@ static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
break;
tx_buf = mtk_desc_to_tx_buf(ring, desc,
- eth->soc->txrx.txd_size);
+ eth->soc->tx.desc_size);
if (!tx_buf->data)
break;
@@ -2331,7 +2348,7 @@ static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
}
mtk_tx_unmap(eth, tx_buf, &bq, true);
- desc = ring->dma + cpu * eth->soc->txrx.txd_size;
+ desc = ring->dma + cpu * eth->soc->tx.desc_size;
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -2421,7 +2438,7 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
do {
int rx_done;
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
+ mtk_w32(eth, eth->soc->rx.irq_done_mask,
reg_map->pdma.irq_status);
rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
rx_done_total += rx_done;
@@ -2437,10 +2454,10 @@ static int mtk_napi_rx(struct napi_struct *napi, int budget)
return budget;
} while (mtk_r32(eth, reg_map->pdma.irq_status) &
- eth->soc->txrx.rx_irq_done_mask);
+ eth->soc->rx.irq_done_mask);
if (napi_complete_done(napi, rx_done_total))
- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
return rx_done_total;
}
@@ -2449,7 +2466,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
{
const struct mtk_soc_data *soc = eth->soc;
struct mtk_tx_ring *ring = &eth->tx_ring;
- int i, sz = soc->txrx.txd_size;
+ int i, sz = soc->tx.desc_size;
struct mtk_tx_dma_v2 *txd;
int ring_size;
u32 ofs, val;
@@ -2457,7 +2474,7 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
ring_size = MTK_QDMA_RING_SIZE;
else
- ring_size = MTK_DMA_SIZE;
+ ring_size = soc->tx.dma_size;
ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
GFP_KERNEL);
@@ -2465,8 +2482,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
goto no_tx_mem;
if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
- ring->dma = eth->sram_base + ring_size * sz;
- ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
+ ring->dma = eth->sram_base + soc->tx.fq_dma_size * sz;
+ ring->phys = eth->phy_scratch_ring + soc->tx.fq_dma_size * (dma_addr_t)sz;
} else {
ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
&ring->phys, GFP_KERNEL);
@@ -2572,14 +2589,14 @@ static void mtk_tx_clean(struct mtk_eth *eth)
}
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * soc->txrx.txd_size,
+ ring->dma_size * soc->tx.desc_size,
ring->dma, ring->phys);
ring->dma = NULL;
}
if (ring->dma_pdma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * soc->txrx.txd_size,
+ ring->dma_size * soc->tx.desc_size,
ring->dma_pdma, ring->phys_pdma);
ring->dma_pdma = NULL;
}
@@ -2588,6 +2605,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
{
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ const struct mtk_soc_data *soc = eth->soc;
struct mtk_rx_ring *ring;
int rx_data_len, rx_dma_size, tx_ring_size;
int i;
@@ -2595,7 +2613,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
tx_ring_size = MTK_QDMA_RING_SIZE;
else
- tx_ring_size = MTK_DMA_SIZE;
+ tx_ring_size = soc->tx.dma_size;
if (rx_flag == MTK_RX_FLAGS_QDMA) {
if (ring_no)
@@ -2610,7 +2628,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rx_dma_size = MTK_HW_LRO_DMA_SIZE;
} else {
rx_data_len = ETH_DATA_LEN;
- rx_dma_size = MTK_DMA_SIZE;
+ rx_dma_size = soc->rx.dma_size;
}
ring->frag_size = mtk_max_frag_size(rx_data_len);
@@ -2634,15 +2652,15 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
rx_flag != MTK_RX_FLAGS_NORMAL) {
ring->dma = dma_alloc_coherent(eth->dma_dev,
- rx_dma_size * eth->soc->txrx.rxd_size,
- &ring->phys, GFP_KERNEL);
+ rx_dma_size * eth->soc->rx.desc_size,
+ &ring->phys, GFP_KERNEL);
} else {
struct mtk_tx_ring *tx_ring = &eth->tx_ring;
ring->dma = tx_ring->dma + tx_ring_size *
- eth->soc->txrx.txd_size * (ring_no + 1);
+ eth->soc->tx.desc_size * (ring_no + 1);
ring->phys = tx_ring->phys + tx_ring_size *
- eth->soc->txrx.txd_size * (ring_no + 1);
+ eth->soc->tx.desc_size * (ring_no + 1);
}
if (!ring->dma)
@@ -2653,7 +2671,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
dma_addr_t dma_addr;
void *data;
- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + i * eth->soc->rx.desc_size;
if (ring->page_pool) {
data = mtk_page_pool_get_buff(ring->page_pool,
&dma_addr, GFP_KERNEL);
@@ -2690,7 +2708,7 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
rxd->rxd3 = 0;
rxd->rxd4 = 0;
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
rxd->rxd5 = 0;
rxd->rxd6 = 0;
rxd->rxd7 = 0;
@@ -2744,7 +2762,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
if (!ring->data[i])
continue;
- rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ rxd = ring->dma + i * eth->soc->rx.desc_size;
if (!rxd->rxd1)
continue;
@@ -2761,7 +2779,7 @@ static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_
if (!in_sram && ring->dma) {
dma_free_coherent(eth->dma_dev,
- ring->dma_size * eth->soc->txrx.rxd_size,
+ ring->dma_size * eth->soc->rx.desc_size,
ring->dma, ring->phys);
ring->dma = NULL;
}
@@ -3124,7 +3142,7 @@ static void mtk_dma_free(struct mtk_eth *eth)
netdev_reset_queue(eth->netdev[i]);
if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
dma_free_coherent(eth->dma_dev,
- MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
+ MTK_QDMA_RING_SIZE * soc->tx.desc_size,
eth->scratch_ring, eth->phy_scratch_ring);
eth->scratch_ring = NULL;
eth->phy_scratch_ring = 0;
@@ -3139,7 +3157,10 @@ static void mtk_dma_free(struct mtk_eth *eth)
mtk_rx_clean(eth, &eth->rx_ring[i], false);
}
- kfree(eth->scratch_head);
+ for (i = 0; i < DIV_ROUND_UP(soc->tx.fq_dma_size, MTK_FQ_DMA_LENGTH); i++) {
+ kfree(eth->scratch_head[i]);
+ eth->scratch_head[i] = NULL;
+ }
}
static bool mtk_hw_reset_check(struct mtk_eth *eth)
@@ -3174,7 +3195,7 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
__napi_schedule(&eth->rx_napi);
}
@@ -3200,9 +3221,9 @@ static irqreturn_t mtk_handle_irq(int irq, void *_eth)
const struct mtk_reg_map *reg_map = eth->soc->reg_map;
if (mtk_r32(eth, reg_map->pdma.irq_mask) &
- eth->soc->txrx.rx_irq_done_mask) {
+ eth->soc->rx.irq_done_mask) {
if (mtk_r32(eth, reg_map->pdma.irq_status) &
- eth->soc->txrx.rx_irq_done_mask)
+ eth->soc->rx.irq_done_mask)
mtk_handle_irq_rx(irq, _eth);
}
if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
@@ -3220,10 +3241,10 @@ static void mtk_poll_controller(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
mtk_handle_irq_rx(eth->irq[2], dev);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
}
#endif
@@ -3266,37 +3287,27 @@ static int mtk_start_dma(struct mtk_eth *eth)
return 0;
}
-static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
+static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config)
{
- int i;
+ u32 val;
if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
return;
- for (i = 0; i < MTK_MAX_DEVS; i++) {
- u32 val;
-
- if (!eth->netdev[i])
- continue;
-
- val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+ val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id));
- /* default setup the forward port to send frame to PDMA */
- val &= ~0xffff;
+ /* default setup the forward port to send frame to PDMA */
+ val &= ~0xffff;
- /* Enable RX checksum */
- val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
+ /* Enable RX checksum */
+ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
- val |= config;
+ val |= config;
- if (netdev_uses_dsa(eth->netdev[i]))
- val |= MTK_GDMA_SPECIAL_TAG;
+ if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id]))
+ val |= MTK_GDMA_SPECIAL_TAG;
- mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
- }
- /* Reset and enable PSE */
- mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
- mtk_w32(eth, 0, MTK_RST_GL);
+ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id));
}
@@ -3356,7 +3367,10 @@ static int mtk_open(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
- int i, err;
+ struct mtk_mac *target_mac;
+ int i, err, ppe_num;
+
+ ppe_num = eth->soc->ppe_num;
err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
if (err) {
@@ -3380,18 +3394,38 @@ static int mtk_open(struct net_device *dev)
for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
mtk_ppe_start(eth->ppe[i]);
- gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
- : MTK_GDMA_TO_PDMA;
- mtk_gdm_config(eth, gdm_config);
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ target_mac = netdev_priv(eth->netdev[i]);
+ if (!soc->offload_version) {
+ target_mac->ppe_idx = 0;
+ gdm_config = MTK_GDMA_TO_PDMA;
+ } else if (ppe_num >= 3 && target_mac->id == 2) {
+ target_mac->ppe_idx = 2;
+ gdm_config = soc->reg_map->gdma_to_ppe[2];
+ } else if (ppe_num >= 2 && target_mac->id == 1) {
+ target_mac->ppe_idx = 1;
+ gdm_config = soc->reg_map->gdma_to_ppe[1];
+ } else {
+ target_mac->ppe_idx = 0;
+ gdm_config = soc->reg_map->gdma_to_ppe[0];
+ }
+ mtk_gdm_config(eth, target_mac->id, gdm_config);
+ }
+ /* Reset and enable PSE */
+ mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
+ mtk_w32(eth, 0, MTK_RST_GL);
napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
refcount_set(&eth->dma_refcnt, 1);
- }
- else
+ } else {
refcount_inc(&eth->dma_refcnt);
+ }
phylink_start(mac->phylink);
netif_tx_start_all_queues(dev);
@@ -3468,10 +3502,11 @@ static int mtk_stop(struct net_device *dev)
if (!refcount_dec_and_test(&eth->dma_refcnt))
return 0;
- mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+ for (i = 0; i < MTK_MAX_DEVS; i++)
+ mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
- mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
@@ -3893,7 +3928,7 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
else
mtk_hw_reset(eth);
- if (mtk_is_netsys_v2_or_greater(eth)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
/* Set FE to PDMAv2 if necessary */
val = mtk_r32(eth, MTK_FE_GLO_MISC);
mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
@@ -3947,9 +3982,9 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset)
/* FE int grouping */
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
- mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
+ mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
if (mtk_is_netsys_v3_or_greater(eth)) {
@@ -4055,7 +4090,7 @@ static int mtk_change_mtu(struct net_device *dev, int new_mtu)
}
mtk_set_mcr_max_rx(mac, length);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -4427,6 +4462,20 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return ret;
}
+static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ phylink_ethtool_get_pauseparam(mac->phylink, pause);
+}
+
+static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return phylink_ethtool_set_pauseparam(mac->phylink, pause);
+}
+
static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
struct net_device *sb_dev)
{
@@ -4455,8 +4504,10 @@ static const struct ethtool_ops mtk_ethtool_ops = {
.get_strings = mtk_get_strings,
.get_sset_count = mtk_get_sset_count,
.get_ethtool_stats = mtk_get_ethtool_stats,
+ .get_pauseparam = mtk_get_pauseparam,
+ .set_pauseparam = mtk_set_pauseparam,
.get_rxnfc = mtk_get_rxnfc,
- .set_rxnfc = mtk_set_rxnfc,
+ .set_rxnfc = mtk_set_rxnfc,
};
static const struct net_device_ops mtk_netdev_ops = {
@@ -4947,23 +4998,24 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
+ u8 ppe_num = eth->soc->ppe_num;
- num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
- for (i = 0; i < num_ppe; i++) {
- u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
+ ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num);
+ for (i = 0; i < ppe_num; i++) {
+ u32 ppe_addr = eth->soc->reg_map->ppe_base;
+ ppe_addr += (i == 2 ? 0xc00 : i * 0x400);
eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
if (!eth->ppe[i]) {
err = -ENOMEM;
goto err_deinit_ppe;
}
- }
+ err = mtk_eth_offload_init(eth, i);
- err = mtk_eth_offload_init(eth);
- if (err)
- goto err_deinit_ppe;
+ if (err)
+ goto err_deinit_ppe;
+ }
}
for (i = 0; i < MTK_MAX_DEVS; i++) {
@@ -4983,9 +5035,14 @@ static int mtk_probe(struct platform_device *pdev)
/* we run 2 devices on the same DMA ring so we need a dummy device
* for NAPI to work
*/
- init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
+ eth->dummy_dev = alloc_netdev_dummy(0);
+ if (!eth->dummy_dev) {
+ err = -ENOMEM;
+ dev_err(eth->dev, "failed to allocated dummy device\n");
+ goto err_unreg_netdev;
+ }
+ netif_napi_add(eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
+ netif_napi_add(eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
platform_set_drvdata(pdev, eth);
schedule_delayed_work(&eth->reset.monitor_work,
@@ -4993,6 +5050,8 @@ static int mtk_probe(struct platform_device *pdev)
return 0;
+err_unreg_netdev:
+ mtk_unreg_dev(eth);
err_deinit_ppe:
mtk_ppe_deinit(eth);
mtk_mdio_cleanup(eth);
@@ -5029,6 +5088,7 @@ static void mtk_remove(struct platform_device *pdev)
netif_napi_del(&eth->tx_napi);
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
+ free_netdev(eth->dummy_dev);
mtk_mdio_cleanup(eth);
}
@@ -5039,11 +5099,18 @@ static const struct mtk_soc_data mt2701_data = {
.required_clks = MT7623_CLKS_BITMAP,
.required_pctl = true,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5057,13 +5124,21 @@ static const struct mtk_soc_data mt7621_data = {
.required_pctl = false,
.version = 1,
.offload_version = 1,
+ .ppe_num = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5078,14 +5153,22 @@ static const struct mtk_soc_data mt7622_data = {
.required_pctl = false,
.version = 1,
.offload_version = 2,
+ .ppe_num = 1,
.hash_offset = 2,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5099,14 +5182,22 @@ static const struct mtk_soc_data mt7623_data = {
.required_pctl = true,
.version = 1,
.offload_version = 1,
+ .ppe_num = 1,
.hash_offset = 2,
.foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
.disable_pll_modes = true,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5121,11 +5212,18 @@ static const struct mtk_soc_data mt7629_data = {
.required_pctl = false,
.has_accounting = true,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_size = MTK_DMA_SIZE(2K),
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
},
@@ -5140,16 +5238,24 @@ static const struct mtk_soc_data mt7981_data = {
.required_pctl = false,
.version = 2,
.offload_version = 2,
+ .ppe_num = 2,
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5162,16 +5268,24 @@ static const struct mtk_soc_data mt7986_data = {
.required_pctl = false,
.version = 2,
.offload_version = 2,
+ .ppe_num = 2,
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5184,16 +5298,24 @@ static const struct mtk_soc_data mt7988_data = {
.required_pctl = false,
.version = 3,
.offload_version = 2,
+ .ppe_num = 3,
.hash_offset = 4,
.has_accounting = true,
.foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma_v2),
- .rxd_size = sizeof(struct mtk_rx_dma_v2),
- .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma_v2),
.dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
.dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
+ .fq_dma_size = MTK_DMA_SIZE(4K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma_v2),
+ .irq_done_mask = MTK_RX_DONE_INT_V2,
+ .dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
@@ -5204,13 +5326,19 @@ static const struct mtk_soc_data rt5350_data = {
.required_clks = MT7628_CLKS_BITMAP,
.required_pctl = false,
.version = 1,
- .txrx = {
- .txd_size = sizeof(struct mtk_tx_dma),
- .rxd_size = sizeof(struct mtk_rx_dma),
- .rx_irq_done_mask = MTK_RX_DONE_INT,
- .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .tx = {
+ .desc_size = sizeof(struct mtk_tx_dma),
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
+ },
+ .rx = {
+ .desc_size = sizeof(struct mtk_rx_dma),
+ .irq_done_mask = MTK_RX_DONE_INT,
+ .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
.dma_max_len = MTK_TX_DMA_BUF_LEN,
.dma_len_offset = 16,
+ .dma_size = MTK_DMA_SIZE(2K),
},
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 9ae3b8a71d0e..eb1708b43aa3 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -32,7 +32,9 @@
#define MTK_TX_DMA_BUF_LEN 0x3fff
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
#define MTK_QDMA_RING_SIZE 2048
-#define MTK_DMA_SIZE 512
+#define MTK_DMA_SIZE(x) (SZ_##x)
+#define MTK_FQ_DMA_HEAD 32
+#define MTK_FQ_DMA_LENGTH 2048
#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
#define MTK_DMA_DUMMY_DESC 0xffffffff
@@ -327,8 +329,8 @@
/* QDMA descriptor txd3 */
#define TX_DMA_OWNER_CPU BIT(31)
#define TX_DMA_LS0 BIT(30)
-#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
-#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
+#define TX_DMA_PLEN0(x) (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
+#define TX_DMA_PLEN1(x) ((x) & eth->soc->tx.dma_max_len)
#define TX_DMA_SWC BIT(14)
#define TX_DMA_PQID GENMASK(3, 0)
#define TX_DMA_ADDR64_MASK GENMASK(3, 0)
@@ -348,8 +350,8 @@
/* QDMA descriptor rxd2 */
#define RX_DMA_DONE BIT(31)
#define RX_DMA_LSO BIT(30)
-#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
-#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
#define RX_DMA_VTAG BIT(15)
#define RX_DMA_ADDR64_MASK GENMASK(3, 0)
#if IS_ENABLED(CONFIG_64BIT)
@@ -1130,7 +1132,7 @@ struct mtk_reg_map {
u32 tx_sch_rate; /* tx scheduler rate control registers */
} qdma;
u32 gdm1_cnt;
- u32 gdma_to_ppe;
+ u32 gdma_to_ppe[3];
u32 ppe_base;
u32 wdma_base[3];
u32 pse_iq_sta;
@@ -1153,10 +1155,9 @@ struct mtk_reg_map {
* @foe_entry_size Foe table entry size.
* @has_accounting Bool indicating support for accounting of
* offloaded flows.
- * @txd_size Tx DMA descriptor size.
- * @rxd_size Rx DMA descriptor size.
- * @rx_irq_done_mask Rx irq done register mask.
- * @rx_dma_l4_valid Rx DMA valid register mask.
+ * @desc_size Tx/Rx DMA descriptor size.
+ * @irq_done_mask Rx irq done register mask.
+ * @dma_l4_valid Rx DMA valid register mask.
* @dma_max_len Max DMA tx/rx buffer length.
* @dma_len_offset Tx/Rx DMA length field offset.
*/
@@ -1169,18 +1170,26 @@ struct mtk_soc_data {
u8 offload_version;
u8 hash_offset;
u8 version;
+ u8 ppe_num;
u16 foe_entry_size;
netdev_features_t hw_features;
bool has_accounting;
bool disable_pll_modes;
struct {
- u32 txd_size;
- u32 rxd_size;
- u32 rx_irq_done_mask;
- u32 rx_dma_l4_valid;
+ u32 desc_size;
u32 dma_max_len;
u32 dma_len_offset;
- } txrx;
+ u32 dma_size;
+ u32 fq_dma_size;
+ } tx;
+ struct {
+ u32 desc_size;
+ u32 irq_done_mask;
+ u32 dma_l4_valid;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+ u32 dma_size;
+ } rx;
};
#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
@@ -1242,7 +1251,7 @@ struct mtk_eth {
spinlock_t page_lock;
spinlock_t tx_irq_lock;
spinlock_t rx_irq_lock;
- struct net_device dummy_dev;
+ struct net_device *dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
int irq[3];
@@ -1261,7 +1270,7 @@ struct mtk_eth {
struct napi_struct rx_napi;
void *scratch_ring;
dma_addr_t phy_scratch_ring;
- void *scratch_head;
+ void *scratch_head[MTK_FQ_DMA_HEAD];
struct clk *clks[MTK_CLK_MAX];
struct mii_bus *mii_bus;
@@ -1286,7 +1295,7 @@ struct mtk_eth {
struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS];
- struct mtk_ppe *ppe[2];
+ struct mtk_ppe *ppe[3];
struct rhashtable flow_table;
struct bpf_prog __rcu *prog;
@@ -1311,6 +1320,7 @@ struct mtk_eth {
struct mtk_mac {
int id;
phy_interface_t interface;
+ u8 ppe_idx;
int speed;
struct device_node *of_node;
struct phylink *phylink;
@@ -1432,7 +1442,7 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
-int mtk_eth_offload_init(struct mtk_eth *eth);
+int mtk_eth_offload_init(struct mtk_eth *eth, u8 id);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
index 6ce0db3a1a92..0acee405a749 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -580,7 +580,7 @@ mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
idle = cur_idle;
entry->data.ib1 &= ~ib1_ts_mask;
- entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
+ entry->data.ib1 |= ib1 & ib1_ts_mask;
}
}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
index 691806bca372..223f709e2704 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -8,7 +8,7 @@
#include <linux/bitfield.h>
#include <linux/rhashtable.h>
-#define MTK_PPE_ENTRIES_SHIFT 3
+#define MTK_PPE_ENTRIES_SHIFT 4
#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
#define MTK_PPE_WAIT_TIMEOUT_US 1000000
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index fbb5e9d5af13..f20bb390df3a 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -245,10 +245,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
int ppe_index)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct net_device *idev = NULL, *odev = NULL;
struct flow_action_entry *act;
struct mtk_flow_data data = {};
struct mtk_foe_entry foe;
- struct net_device *odev = NULL;
struct mtk_flow_entry *entry;
int offload_type = 0;
int wed_index = -1;
@@ -264,6 +264,17 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
struct flow_match_meta match;
flow_rule_match_meta(rule, &match);
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex);
+ if (idev && idev->netdev_ops == eth->netdev[0]->netdev_ops) {
+ struct mtk_mac *mac = netdev_priv(idev);
+
+ if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num))
+ return -EINVAL;
+
+ ppe_index = mac->ppe_idx;
+ }
+ }
} else {
return -EOPNOTSUPP;
}
@@ -273,6 +284,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
} else {
return -EOPNOTSUPP;
}
@@ -633,7 +648,9 @@ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
-int mtk_eth_offload_init(struct mtk_eth *eth)
+int mtk_eth_offload_init(struct mtk_eth *eth, u8 id)
{
+ if (!eth->ppe[id] || !eth->ppe[id]->foe_table)
+ return 0;
return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 31aebeb2e285..25989c79c92e 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -1524,6 +1524,7 @@ static int mtk_star_probe(struct platform_device *pdev)
{
struct device_node *of_node;
struct mtk_star_priv *priv;
+ struct phy_device *phydev;
struct net_device *ndev;
struct device *dev;
void __iomem *base;
@@ -1649,6 +1650,12 @@ static int mtk_star_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
+ phydev = of_phy_find_device(priv->phy_node);
+ if (phydev) {
+ phydev->mac_managed_pm = true;
+ put_device(&phydev->mdio.dev);
+ }
+
return devm_register_netdev(dev, ndev);
}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
index 61334a71058c..e212a4ba9275 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -2666,14 +2666,15 @@ mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_pri
{
struct mtk_wed_flow_block_priv *priv = cb_priv;
struct flow_cls_offload *cls = type_data;
- struct mtk_wed_hw *hw = priv->hw;
+ struct mtk_wed_hw *hw = NULL;
- if (!tc_can_offload(priv->dev))
+ if (!priv || !tc_can_offload(priv->dev))
return -EOPNOTSUPP;
if (type != TC_SETUP_CLSFLOWER)
return -EOPNOTSUPP;
+ hw = priv->hw;
return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
}
@@ -2729,6 +2730,7 @@ mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
flow_block_cb_remove(block_cb, f);
list_del(&block_cb->driver_list);
kfree(block_cb->cb_priv);
+ block_cb->cb_priv = NULL;
}
return 0;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 1184ac5751e1..461cc2c79c71 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -126,6 +126,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq_idx = cq_idx % priv->rx_ring_num;
rx_cq = priv->rx_cq[cq_idx];
cq->vector = rx_cq->vector;
+ irq = mlx4_eq_get_irq(mdev->dev, cq->vector);
}
if (cq->type == RX)
@@ -142,18 +143,23 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
if (err)
goto free_eq;
+ cq->cq_idx = cq_idx;
cq->mcq.event = mlx4_en_cq_event;
switch (cq->type) {
case TX:
cq->mcq.comp = mlx4_en_tx_irq;
netif_napi_add_tx(cq->dev, &cq->napi, mlx4_en_poll_tx_cq);
+ netif_napi_set_irq(&cq->napi, irq);
napi_enable(&cq->napi);
+ netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_TX, &cq->napi);
break;
case RX:
cq->mcq.comp = mlx4_en_rx_irq;
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq);
+ netif_napi_set_irq(&cq->napi, irq);
napi_enable(&cq->napi);
+ netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_RX, &cq->napi);
break;
case TX_XDP:
/* nothing regarding napi, it's shared with rx ring */
@@ -189,6 +195,14 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
{
if (cq->type != TX_XDP) {
+ enum netdev_queue_type qtype;
+
+ if (cq->type == RX)
+ qtype = NETDEV_QUEUE_TYPE_RX;
+ else
+ qtype = NETDEV_QUEUE_TYPE_TX;
+
+ netif_queue_set_napi(cq->dev, cq->cq_idx, qtype, NULL);
napi_disable(&cq->napi);
netif_napi_del(&cq->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 619e1c3ef7f9..943d6918c2ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -450,7 +450,6 @@ static void mlx4_en_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- int index = 0;
int i, strings = 0;
struct bitmap_iterator it;
@@ -459,10 +458,10 @@ static void mlx4_en_get_strings(struct net_device *dev,
switch (stringset) {
case ETH_SS_TEST:
for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
- strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ ethtool_puts(&data, mlx4_en_test_names[i]);
if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
for (; i < MLX4_EN_NUM_SELF_TEST; i++)
- strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
+ ethtool_puts(&data, mlx4_en_test_names[i]);
break;
case ETH_SS_STATS:
@@ -470,74 +469,56 @@ static void mlx4_en_get_strings(struct net_device *dev,
for (i = 0; i < NUM_MAIN_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PORT_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PF_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_FLOW_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PKT_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_XDP_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < NUM_PHY_STATS; i++, strings++,
bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[strings]);
+ ethtool_puts(&data, main_strings[strings]);
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "tx%d_bytes", i);
+ ethtool_sprintf(&data, "tx%d_packets", i);
+ ethtool_sprintf(&data, "tx%d_bytes", i);
}
for (i = 0; i < priv->rx_ring_num; i++) {
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_packets", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_bytes", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_dropped", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_drop", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_redirect", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_redirect_fail", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_tx", i);
- sprintf(data + (index++) * ETH_GSTRING_LEN,
- "rx%d_xdp_tx_full", i);
+ ethtool_sprintf(&data, "rx%d_packets", i);
+ ethtool_sprintf(&data, "rx%d_bytes", i);
+ ethtool_sprintf(&data, "rx%d_dropped", i);
+ ethtool_sprintf(&data, "rx%d_xdp_drop", i);
+ ethtool_sprintf(&data, "rx%d_xdp_redirect", i);
+ ethtool_sprintf(&data, "rx%d_xdp_redirect_fail", i);
+ ethtool_sprintf(&data, "rx%d_xdp_tx", i);
+ ethtool_sprintf(&data, "rx%d_xdp_tx_full", i);
}
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- mlx4_en_priv_flags[i]);
+ ethtool_puts(&data, mlx4_en_priv_flags[i]);
break;
}
@@ -1903,7 +1884,7 @@ out:
}
static int mlx4_en_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5d3fde63b273..281b34af0bb4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -43,6 +43,7 @@
#include <net/vxlan.h>
#include <net/devlink.h>
#include <net/rps.h>
+#include <net/netdev_queues.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/device.h>
@@ -1649,7 +1650,7 @@ int mlx4_en_start_port(struct net_device *dev)
sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
/* Calculate Rx buf size */
- dev->mtu = min(dev->mtu, priv->max_mtu);
+ WRITE_ONCE(dev->mtu, min(dev->mtu, priv->max_mtu));
mlx4_en_calc_rx_buf(dev);
en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
@@ -2073,6 +2074,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->rx_ring[i]->csum_ok = 0;
priv->rx_ring[i]->csum_none = 0;
priv->rx_ring[i]->csum_complete = 0;
+ priv->rx_ring[i]->alloc_fail = 0;
}
}
@@ -2394,7 +2396,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
!mlx4_en_check_xdp_mtu(dev, new_mtu))
return -EOPNOTSUPP;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (netif_running(dev)) {
mutex_lock(&mdev->state_lock);
@@ -3099,6 +3101,77 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
last_i += NUM_PHY_STATS;
}
+static void mlx4_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ const struct mlx4_en_rx_ring *ring;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ if (!priv->port_up || mlx4_is_master(priv->mdev->dev))
+ goto out_unlock;
+
+ ring = priv->rx_ring[i];
+ stats->packets = READ_ONCE(ring->packets);
+ stats->bytes = READ_ONCE(ring->bytes);
+ stats->alloc_fail = READ_ONCE(ring->alloc_fail);
+
+out_unlock:
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void mlx4_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ const struct mlx4_en_tx_ring *ring;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ if (!priv->port_up || mlx4_is_master(priv->mdev->dev))
+ goto out_unlock;
+
+ ring = priv->tx_ring[TX][i];
+ stats->packets = READ_ONCE(ring->packets);
+ stats->bytes = READ_ONCE(ring->bytes);
+
+out_unlock:
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static void mlx4_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ spin_lock_bh(&priv->stats_lock);
+
+ if (!priv->port_up || mlx4_is_master(priv->mdev->dev))
+ goto out_unlock;
+
+ if (priv->rx_ring_num) {
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+ }
+
+ if (priv->tx_ring_num[TX]) {
+ tx->packets = 0;
+ tx->bytes = 0;
+ }
+
+out_unlock:
+ spin_unlock_bh(&priv->stats_lock);
+}
+
+static const struct netdev_stat_ops mlx4_stat_ops = {
+ .get_queue_stats_rx = mlx4_get_queue_stats_rx,
+ .get_queue_stats_tx = mlx4_get_queue_stats_tx,
+ .get_base_stats = mlx4_get_base_stats,
+};
+
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_port_profile *prof)
{
@@ -3262,6 +3335,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
+ dev->stat_ops = &mlx4_stat_ops;
dev->ethtool_ops = &mlx4_en_ethtool_ops;
/*
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index eac49657bd07..15c57e9517e9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -42,6 +42,7 @@
#include <linux/if_vlan.h>
#include <linux/vmalloc.h>
#include <linux/irq.h>
+#include <linux/skbuff_ref.h>
#include <net/ip.h>
#if IS_ENABLED(CONFIG_IPV6)
@@ -81,8 +82,10 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
for (i = 0; i < priv->num_frags; i++, frags++) {
if (!frags->page) {
- if (mlx4_alloc_page(priv, frags, gfp))
+ if (mlx4_alloc_page(priv, frags, gfp)) {
+ ring->alloc_fail++;
return -ENOMEM;
+ }
ring->rx_alloc_pages++;
}
rx_desc->data[i].addr = cpu_to_be64(frags->dma +
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 7b02ff61126d..febeadfdd5a5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -169,12 +169,6 @@ module_param_array(port_type_array, int, &arr_argc, 0444);
MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
"1 for IB, 2 for Ethernet");
-struct mlx4_port_config {
- struct list_head list;
- enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
- struct pci_dev *pdev;
-};
-
static atomic_t pf_loading = ATOMIC_INIT(0);
static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
@@ -185,7 +179,8 @@ static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id,
}
static int mlx4_devlink_ierr_reset_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
mlx4_internal_err_reset = ctx->val.vbool;
return 0;
@@ -202,7 +197,8 @@ static int mlx4_devlink_crdump_snapshot_get(struct devlink *devlink, u32 id,
}
static int mlx4_devlink_crdump_snapshot_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index efe3f97b874f..28b70dcc652e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -355,6 +355,7 @@ struct mlx4_en_rx_ring {
unsigned long xdp_tx;
unsigned long xdp_tx_full;
unsigned long dropped;
+ unsigned long alloc_fail;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
struct xdp_rxq_info xdp_rxq;
@@ -379,6 +380,7 @@ struct mlx4_en_cq {
#define MLX4_EN_OPCODE_ERROR 0x1e
const struct cpumask *aff_mask;
+ int cq_idx;
};
struct mlx4_en_port_profile {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 76dc5a9b9648..1289475e7be7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \
- fw_reset.o qos.o lib/tout.o lib/aso.o
+ fw_reset.o qos.o lib/tout.o lib/aso.o wc.o
#
# Netdev basic
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 4957412ff1f6..20768ef2e9d2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -969,19 +969,32 @@ static void cmd_work_handler(struct work_struct *work)
bool poll_cmd = ent->polling;
struct mlx5_cmd_layout *lay;
struct mlx5_core_dev *dev;
- unsigned long cb_timeout;
- struct semaphore *sem;
+ unsigned long timeout;
unsigned long flags;
int alloc_ret;
int cmd_mode;
+ complete(&ent->handling);
+
dev = container_of(cmd, struct mlx5_core_dev, cmd);
- cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+ timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
- complete(&ent->handling);
- sem = ent->page_queue ? &cmd->vars.pages_sem : &cmd->vars.sem;
- down(sem);
if (!ent->page_queue) {
+ if (down_timeout(&cmd->vars.sem, timeout)) {
+ mlx5_core_warn(dev, "%s(0x%x) timed out while waiting for a slot.\n",
+ mlx5_command_str(ent->op), ent->op);
+ if (ent->callback) {
+ ent->callback(-EBUSY, ent->context);
+ mlx5_free_cmd_msg(dev, ent->out);
+ free_msg(dev, ent->in);
+ cmd_ent_put(ent);
+ } else {
+ ent->ret = -EBUSY;
+ complete(&ent->done);
+ }
+ complete(&ent->slotted);
+ return;
+ }
alloc_ret = cmd_alloc_index(cmd, ent);
if (alloc_ret < 0) {
mlx5_core_err_rl(dev, "failed to allocate command entry\n");
@@ -994,10 +1007,11 @@ static void cmd_work_handler(struct work_struct *work)
ent->ret = -EAGAIN;
complete(&ent->done);
}
- up(sem);
+ up(&cmd->vars.sem);
return;
}
} else {
+ down(&cmd->vars.pages_sem);
ent->idx = cmd->vars.max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
clear_bit(ent->idx, &cmd->vars.bitmask);
@@ -1005,6 +1019,8 @@ static void cmd_work_handler(struct work_struct *work)
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
+ complete(&ent->slotted);
+
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));
@@ -1023,7 +1039,7 @@ static void cmd_work_handler(struct work_struct *work)
ent->ts1 = ktime_get_ns();
cmd_mode = cmd->mode;
- if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, cb_timeout))
+ if (ent->callback && schedule_delayed_work(&ent->cb_timeout_work, timeout))
cmd_ent_get(ent);
set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state);
@@ -1143,6 +1159,9 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
ent->ret = -ECANCELED;
goto out_err;
}
+
+ wait_for_completion(&ent->slotted);
+
if (cmd->mode == CMD_MODE_POLLING || ent->polling)
wait_for_completion(&ent->done);
else if (!wait_for_completion_timeout(&ent->done, timeout))
@@ -1157,6 +1176,9 @@ out_err:
} else if (err == -ECANCELED) {
mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
mlx5_command_str(ent->op), ent->op);
+ } else if (err == -EBUSY) {
+ mlx5_core_warn(dev, "%s(0x%x) timeout while waiting for command semaphore.\n",
+ mlx5_command_str(ent->op), ent->op);
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
@@ -1208,6 +1230,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
ent->polling = force_polling;
init_completion(&ent->handling);
+ init_completion(&ent->slotted);
if (!callback)
init_completion(&ent->done);
@@ -1225,7 +1248,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
return 0; /* mlx5_cmd_comp_handler() will put(ent) */
err = wait_func(dev, ent);
- if (err == -ETIMEDOUT || err == -ECANCELED)
+ if (err == -ETIMEDOUT || err == -ECANCELED || err == -EBUSY)
goto out_free;
ds = ent->ts2 - ent->ts1;
@@ -1611,6 +1634,9 @@ static int cmd_comp_notifier(struct notifier_block *nb,
dev = container_of(cmd, struct mlx5_core_dev, cmd);
eqe = data;
+ if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return NOTIFY_DONE;
+
mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false);
return NOTIFY_OK;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index 09652dc89115..36806e813c33 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -143,8 +143,8 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
return simple_read_from_buffer(buf, count, pos, tbuf, ret);
}
-static ssize_t average_write(struct file *filp, const char __user *buf,
- size_t count, loff_t *pos)
+static ssize_t reset_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *pos)
{
struct mlx5_cmd_stats *stats;
@@ -152,6 +152,11 @@ static ssize_t average_write(struct file *filp, const char __user *buf,
spin_lock_irq(&stats->lock);
stats->sum = 0;
stats->n = 0;
+ stats->failed = 0;
+ stats->failed_mbox_status = 0;
+ stats->last_failed_errno = 0;
+ stats->last_failed_mbox_status = 0;
+ stats->last_failed_syndrome = 0;
spin_unlock_irq(&stats->lock);
*pos += count;
@@ -159,11 +164,16 @@ static ssize_t average_write(struct file *filp, const char __user *buf,
return count;
}
-static const struct file_operations stats_fops = {
+static const struct file_operations reset_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = reset_write,
+};
+
+static const struct file_operations average_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = average_read,
- .write = average_write,
};
static ssize_t slots_read(struct file *filp, char __user *buf, size_t count,
@@ -228,8 +238,10 @@ void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev)
continue;
stats->root = debugfs_create_dir(namep, *cmd);
+ debugfs_create_file("reset", 0200, stats->root, stats,
+ &reset_fops);
debugfs_create_file("average", 0400, stats->root, stats,
- &stats_fops);
+ &average_fops);
debugfs_create_u64("n", 0400, stats->root, &stats->n);
debugfs_create_u64("failed", 0400, stats->root, &stats->failed);
debugfs_create_u64("failed_mbox_status", 0400, stats->root,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 47e7c2639774..9a79674d27f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -349,7 +349,7 @@ int mlx5_attach_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct auxiliary_device *adev;
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
int ret = 0, i;
devl_assert_locked(priv_to_devlink(dev));
@@ -406,7 +406,7 @@ void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend)
{
struct mlx5_priv *priv = &dev->priv;
struct auxiliary_device *adev;
- struct auxiliary_driver *adrv;
+ const struct auxiliary_driver *adrv;
pm_message_t pm = {};
int i;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
index 406ebe17405f..b4b3a43e56a0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/cmd_tracepoint.h
@@ -22,10 +22,10 @@ TRACE_EVENT(mlx5_cmd,
__field(u32, syndrome)
__field(int, err)
),
- TP_fast_assign(__assign_str(command_str, command_str);
+ TP_fast_assign(__assign_str(command_str);
__entry->opcode = opcode;
__entry->op_mod = op_mod;
- __assign_str(status_str, status_str);
+ __assign_str(status_str);
__entry->status = status;
__entry->syndrome = syndrome;
__entry->err = err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
index f15718db5d0e..78e481b2c015 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_rep_tracepoint.h
@@ -25,7 +25,7 @@ TRACE_EVENT(mlx5e_rep_neigh_update,
struct in6_addr *pin6;
__be32 *p32;
- __assign_str(devname, nhe->neigh_dev->name);
+ __assign_str(devname);
__entry->neigh_connected = neigh_connected;
memcpy(__entry->ha, ha, ETH_ALEN);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
index ac52ef37f38a..4b1ca228012b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/en_tc_tracepoint.h
@@ -86,7 +86,7 @@ TRACE_EVENT(mlx5e_tc_update_neigh_used_value,
struct in6_addr *pin6;
__be32 *p32;
- __assign_str(devname, nhe->neigh_dev->name);
+ __assign_str(devname);
__entry->neigh_used = neigh_used;
p32 = (__be32 *)__entry->v4;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
index 3038be575923..50f8a7630f86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
@@ -55,12 +55,11 @@ TRACE_EVENT(mlx5_fw,
),
TP_fast_assign(
- __assign_str(dev_name,
- dev_name(tracer->dev->device));
+ __assign_str(dev_name);
__entry->trace_timestamp = trace_timestamp;
__entry->lost = lost;
__entry->event_id = event_id;
- __assign_str(msg, msg);
+ __assign_str(msg);
),
TP_printk("%s [0x%llx] %d [0x%x] %s",
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 84db05fb9389..d9e241423bc5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -80,6 +80,7 @@ struct page_pool;
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5E_RX_MAX_HEAD (256)
+#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8)
#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
@@ -129,7 +130,7 @@ struct page_pool;
#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2
#define MLX5E_DEFAULT_LRO_TIMEOUT 32
-#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+#define MLX5E_DEFAULT_SHAMPO_TIMEOUT 1024
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
@@ -146,25 +147,6 @@ struct page_pool;
#define MLX5E_TX_XSK_POLL_BUDGET 64
#define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */
-#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
- (sizeof(struct mlx5e_umr_wqe) +\
- (sizeof(struct mlx5_klm) * (sgl_len)))
-
-#define MLX5E_KLM_UMR_WQEBBS(klm_entries) \
- (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB))
-
-#define MLX5E_KLM_UMR_DS_CNT(klm_entries)\
- (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS))
-
-#define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\
- (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
-
-#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
- ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)
-
-#define MLX5E_MAX_KLM_PER_WQE(mdev) \
- MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
-
#define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
@@ -320,6 +302,8 @@ struct mlx5e_params {
bool scatter_fcs_en;
bool rx_dim_enabled;
bool tx_dim_enabled;
+ bool rx_moder_use_cqe_mode;
+ bool tx_moder_use_cqe_mode;
u32 pflags;
struct bpf_prog *xdp_prog;
struct mlx5e_xsk *xsk;
@@ -430,7 +414,7 @@ struct mlx5e_txqsq {
u16 cc;
u16 skb_fifo_cc;
u32 dma_fifo_cc;
- struct dim dim; /* Adaptive Moderation */
+ struct dim *dim; /* Adaptive Moderation */
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
@@ -722,7 +706,7 @@ struct mlx5e_rq {
int ix;
unsigned int hw_mtu;
- struct dim dim; /* Dynamic Interrupt Moderation */
+ struct dim *dim; /* Dynamic Interrupt Moderation */
/* XDP */
struct bpf_prog __rcu *xdp_prog;
@@ -797,6 +781,10 @@ struct mlx5e_channel {
int cpu;
/* Sync between icosq recovery and XSK enable/disable. */
struct mutex icosq_recovery_lock;
+
+ /* coalescing configuration */
+ struct dim_cq_moder rx_cq_moder;
+ struct dim_cq_moder tx_cq_moder;
};
struct mlx5e_ptp;
@@ -879,6 +867,8 @@ struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_selq selq;
struct mlx5e_txqsq **txq2sq;
+ struct mlx5e_sq_stats **txq2sq_stats;
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx_dp dcbx_dp;
#endif
@@ -1008,7 +998,8 @@ void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode);
-void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
+void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len);
+void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
@@ -1040,6 +1031,11 @@ void mlx5e_close_rq(struct mlx5e_rq *rq);
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param, u16 q_counter);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
+bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
+ bool dim_enabled);
+bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
+ bool dim_enabled, bool keep_dim_state);
+
struct mlx5e_sq_param;
int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct xsk_buff_pool *xsk_pool,
@@ -1060,6 +1056,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
struct mlx5e_cq *cq);
void mlx5e_close_cq(struct mlx5e_cq *cq);
+int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u8 cq_period_mode);
+int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u16 cq_period, u16 cq_max_count, u8 cq_period_mode);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
@@ -1087,6 +1087,7 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
void *context, bool reset);
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
+int mlx5e_update_tc_and_tx_queues_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx);
@@ -1118,6 +1119,11 @@ int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
void mlx5e_close_txqsq(struct mlx5e_txqsq *sq);
+bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
+ bool dim_enabled);
+bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
+ bool dim_enabled, bool keep_dim_state);
+
static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
{
return MLX5_CAP_ETH(mdev, swp) &&
@@ -1143,7 +1149,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
-int mlx5e_update_nic_rx(struct mlx5e_priv *priv);
void mlx5e_update_carrier(struct mlx5e_priv *priv);
int mlx5e_close(struct net_device *netdev);
int mlx5e_open(struct net_device *netdev);
@@ -1160,7 +1165,7 @@ void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo);
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv,
- uint32_t stringset, uint8_t *data);
+ u32 stringset, u8 *data);
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset);
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data);
@@ -1180,23 +1185,16 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal,
struct kernel_ethtool_coalesce *kernel_coal,
struct netlink_ext_ack *extack);
-int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
- struct ethtool_link_ksettings *link_ksettings);
-int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
- const struct ethtool_link_ksettings *link_ksettings);
-int mlx5e_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh);
-int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack);
+int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal);
+int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
-void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
- struct ethtool_pauseparam *pauseparam);
-int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
- struct ethtool_pauseparam *pauseparam);
/* mlx5e generic netdev management API */
static inline bool
@@ -1222,8 +1220,6 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv,
void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv);
void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu);
-void mlx5e_rx_dim_work(struct work_struct *work);
-void mlx5e_tx_dim_work(struct work_struct *work);
void mlx5e_set_xdp_feature(struct net_device *netdev);
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
index 874a1016623c..66e719e88503 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c
@@ -3,6 +3,7 @@
#include "channels.h"
#include "en.h"
+#include "en/dim.h"
#include "en/ptp.h"
unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs)
@@ -55,3 +56,85 @@ bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn)
*rqn = c->rq.rqn;
return true;
}
+
+int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enable)
+{
+ int i;
+
+ for (i = 0; i < chs->num; i++) {
+ int err = mlx5e_dim_rx_change(&chs->c[i]->rq, enable);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enable)
+{
+ int i, tc;
+
+ for (i = 0; i < chs->num; i++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
+ int err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], enable);
+
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs)
+{
+ int i;
+
+ for (i = 0; i < chs->num; i++) {
+ /* If dim is enabled for the channel, reset the dim state so the
+ * collected statistics will be reset. This is useful for
+ * supporting legacy interfaces that allow things like changing
+ * the CQ period mode for all channels without disturbing
+ * individual channel configurations.
+ */
+ if (chs->c[i]->rq.dim) {
+ int err;
+
+ mlx5e_dim_rx_change(&chs->c[i]->rq, false);
+ err = mlx5e_dim_rx_change(&chs->c[i]->rq, true);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs)
+{
+ int i, tc;
+
+ for (i = 0; i < chs->num; i++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
+ int err;
+
+ /* If dim is enabled for the channel, reset the dim
+ * state so the collected statistics will be reset. This
+ * is useful for supporting legacy interfaces that allow
+ * things like changing the CQ period mode for all
+ * channels without disturbing individual channel
+ * configurations.
+ */
+ if (!chs->c[i]->sq[tc].dim)
+ continue;
+
+ mlx5e_dim_tx_change(&chs->c[i]->sq[tc], false);
+ err = mlx5e_dim_tx_change(&chs->c[i]->sq[tc], true);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
index 6715aa9383b9..eda80f8c6c02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h
@@ -15,5 +15,9 @@ void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix,
void mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn,
u32 *vhca_id);
bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn);
+int mlx5e_channels_rx_change_dim(struct mlx5e_channels *chs, bool enabled);
+int mlx5e_channels_tx_change_dim(struct mlx5e_channels *chs, bool enabled);
+int mlx5e_channels_rx_toggle_dim(struct mlx5e_channels *chs);
+int mlx5e_channels_tx_toggle_dim(struct mlx5e_channels *chs);
#endif /* __MLX5_EN_CHANNELS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h b/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h
new file mode 100644
index 000000000000..110e2c6b7e51
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/dim.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved */
+
+#ifndef __MLX5_EN_DIM_H__
+#define __MLX5_EN_DIM_H__
+
+#include <linux/dim.h>
+#include <linux/types.h>
+#include <linux/mlx5/mlx5_ifc.h>
+
+/* Forward declarations */
+struct mlx5e_rq;
+struct mlx5e_txqsq;
+struct work_struct;
+
+/* convert a boolean value for cqe mode to appropriate dim constant
+ * true : DIM_CQ_PERIOD_MODE_START_FROM_CQE
+ * false : DIM_CQ_PERIOD_MODE_START_FROM_EQE
+ */
+static inline int mlx5e_dim_cq_period_mode(bool start_from_cqe)
+{
+ return start_from_cqe ? DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
+static inline enum mlx5_cq_period_mode
+mlx5e_cq_period_mode(enum dim_cq_period_mode cq_period_mode)
+{
+ switch (cq_period_mode) {
+ case DIM_CQ_PERIOD_MODE_START_FROM_EQE:
+ return MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ case DIM_CQ_PERIOD_MODE_START_FROM_CQE:
+ return MLX5_CQ_PERIOD_MODE_START_FROM_CQE;
+ default:
+ WARN_ON_ONCE(true);
+ return MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ }
+}
+
+void mlx5e_rx_dim_work(struct work_struct *work);
+void mlx5e_tx_dim_work(struct work_struct *work);
+int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enabled);
+int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enabled);
+
+#endif /* __MLX5_EN_DIM_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 4d6225e0eec7..1e8b7d330701 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -154,6 +154,19 @@ struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs);
struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs);
struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress);
void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress);
+
+static inline bool mlx5e_fs_has_arfs(struct net_device *netdev)
+{
+ return IS_ENABLED(CONFIG_MLX5_EN_ARFS) &&
+ netdev->hw_features & NETIF_F_NTUPLE;
+}
+
+static inline bool mlx5e_fs_want_arfs(struct net_device *netdev)
+{
+ return IS_ENABLED(CONFIG_MLX5_EN_ARFS) &&
+ netdev->features & NETIF_F_NTUPLE;
+}
+
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index a3f31d9d527e..64b62ed17b07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -6,6 +6,7 @@
#include "en/port.h"
#include "en_accel/en_accel.h"
#include "en_accel/ipsec.h"
+#include <linux/dim.h>
#include <net/page_pool/types.h>
#include <net/xdp_sock_drv.h>
@@ -513,77 +514,6 @@ int mlx5e_validate_params(struct mlx5_core_dev *mdev, struct mlx5e_params *param
return 0;
}
-static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
-{
- struct dim_cq_moder moder = {};
-
- moder.cq_period_mode = cq_period_mode;
- moder.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
- moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- moder.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
-
- return moder;
-}
-
-static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
-{
- struct dim_cq_moder moder = {};
-
- moder.cq_period_mode = cq_period_mode;
- moder.pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
- moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
- if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
- moder.usec = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
-
- return moder;
-}
-
-static u8 mlx5_to_net_dim_cq_period_mode(u8 cq_period_mode)
-{
- return cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE ?
- DIM_CQ_PERIOD_MODE_START_FROM_CQE :
- DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-}
-
-void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
-{
- if (params->tx_dim_enabled) {
- u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
-
- params->tx_cq_moderation = net_dim_get_def_tx_moderation(dim_period_mode);
- } else {
- params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode);
- }
-}
-
-void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode)
-{
- if (params->rx_dim_enabled) {
- u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode);
-
- params->rx_cq_moderation = net_dim_get_def_rx_moderation(dim_period_mode);
- } else {
- params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode);
- }
-}
-
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
-{
- mlx5e_reset_tx_moderation(params, cq_period_mode);
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
- params->tx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
-}
-
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
-{
- mlx5e_reset_rx_moderation(params, cq_period_mode);
- MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
- params->rx_cq_moderation.cq_period_mode ==
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
-}
-
bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
{
u32 link_speed = 0;
@@ -998,7 +928,7 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_headers_entry_size,
mlx5e_shampo_get_log_hd_entry_size(mdev, params));
MLX5_SET(rqc, rqc, reservation_timeout,
- params->packet_merge.timeout);
+ mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_SHAMPO_TIMEOUT));
MLX5_SET(rqc, rqc, shampo_match_criteria_type,
params->packet_merge.shampo.match_criteria_type);
MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
@@ -1141,22 +1071,36 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param)
{
- int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest;
+ int max_num_of_umr_per_wqe, max_hd_per_wqe, max_ksm_per_umr, rest;
void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
u32 wqebbs;
- max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
+ max_ksm_per_umr = MLX5E_MAX_KSM_PER_WQE(mdev);
max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
- max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
- rest = max_hd_per_wqe % max_klm_per_umr;
- wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe;
+ max_num_of_umr_per_wqe = max_hd_per_wqe / max_ksm_per_umr;
+ rest = max_hd_per_wqe % max_ksm_per_umr;
+ wqebbs = MLX5E_KSM_UMR_WQEBBS(max_ksm_per_umr) * max_num_of_umr_per_wqe;
if (rest)
- wqebbs += MLX5E_KLM_UMR_WQEBBS(rest);
+ wqebbs += MLX5E_KSM_UMR_WQEBBS(rest);
wqebbs *= wq_size;
return wqebbs;
}
+#define MLX5E_LRO_TIMEOUT_ARR_SIZE 4
+
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
+{
+ int i;
+
+ /* The supported periods are organized in ascending order */
+ for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
+ if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
+ break;
+
+ return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
+}
+
static u32 mlx5e_mpwrq_total_umr_wqebbs(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 9a781f18b57f..3f8986f9d862 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -77,11 +77,6 @@ u8 mlx5e_mpwrq_max_log_rq_pkts(struct mlx5_core_dev *mdev, u8 page_shift,
/* Parameter calculations */
-void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
-void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode);
-
bool slow_pci_heuristic(struct mlx5_core_dev *mdev);
int mlx5e_mpwrq_validate_regular(struct mlx5_core_dev *mdev, struct mlx5e_params *params);
int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *params,
@@ -113,6 +108,7 @@ u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param);
+u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index dbe2b19a9570..5f6a0605e4ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -41,7 +41,7 @@ void mlx5_port_query_eth_autoneg(struct mlx5_core_dev *dev, u8 *an_status,
*an_disable_cap = 0;
*an_disable_admin = 0;
- if (mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, 1))
+ if (mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, 1, 0))
return;
*an_status = MLX5_GET(ptys_reg, out, an_status);
@@ -292,10 +292,15 @@ enum mlx5e_fec_supported_link_mode {
MLX5E_FEC_SUPPORTED_LINK_MODE_100G_2X,
MLX5E_FEC_SUPPORTED_LINK_MODE_200G_4X,
MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X,
+ MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X,
MLX5E_MAX_FEC_SUPPORTED_LINK_MODE,
};
#define MLX5E_FEC_FIRST_50G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_50G_1X
+#define MLX5E_FEC_FIRST_100G_PER_LANE_MODE MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X
#define MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, policy, write, link) \
do { \
@@ -308,6 +313,17 @@ enum mlx5e_fec_supported_link_mode {
*_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \
} while (0)
+/* Returns true if FEC can be set for a given link mode. */
+static bool mlx5e_is_fec_supported_link_mode(struct mlx5_core_dev *dev,
+ enum mlx5e_fec_supported_link_mode link_mode)
+{
+ return link_mode < MLX5E_FEC_FIRST_50G_PER_LANE_MODE ||
+ (link_mode < MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm)) ||
+ (link_mode >= MLX5E_FEC_FIRST_100G_PER_LANE_MODE &&
+ MLX5_CAP_PCAM_FEATURE(dev, fec_100G_per_lane_in_pplm));
+}
+
/* get/set FEC admin field for a given speed */
static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
enum mlx5e_fec_supported_link_mode link_mode)
@@ -340,6 +356,18 @@ static int mlx5e_fec_admin_field(u32 *pplm, u16 *fec_policy, bool write,
case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 100g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 200g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 400g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
+ MLX5E_FEC_OVERRIDE_ADMIN_POLICY(pplm, *fec_policy, write, 800g_8x);
+ break;
default:
return -EINVAL;
}
@@ -381,6 +409,18 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_8X:
*fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_8x);
break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_100G_1X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 100g_1x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_200G_2X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 200g_2x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_400G_4X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 400g_4x);
+ break;
+ case MLX5E_FEC_SUPPORTED_LINK_MODE_800G_8X:
+ *fec_cap = MLX5E_GET_FEC_OVERRIDE_CAP(pplm, 800g_8x);
+ break;
default:
return -EINVAL;
}
@@ -389,7 +429,6 @@ static int mlx5e_get_fec_cap_field(u32 *pplm, u16 *fec_cap,
bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
{
- bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
@@ -407,7 +446,7 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
u16 fec_caps;
- if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
mlx5e_get_fec_cap_field(out, &fec_caps, i);
@@ -420,7 +459,6 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy)
int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
u16 *fec_configured_mode)
{
- bool fec_50g_per_lane = MLX5_CAP_PCAM_FEATURE(dev, fec_50G_per_lane_in_pplm);
u32 out[MLX5_ST_SZ_DW(pplm_reg)] = {};
u32 in[MLX5_ST_SZ_DW(pplm_reg)] = {};
int sz = MLX5_ST_SZ_BYTES(pplm_reg);
@@ -445,7 +483,7 @@ int mlx5e_get_fec_mode(struct mlx5_core_dev *dev, u32 *fec_mode_active,
*fec_configured_mode = 0;
for (i = 0; i < MLX5E_MAX_FEC_SUPPORTED_LINK_MODE; i++) {
- if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
mlx5e_fec_admin_field(out, fec_configured_mode, 0, i);
@@ -489,13 +527,13 @@ int mlx5e_set_fec_mode(struct mlx5_core_dev *dev, u16 fec_policy)
u16 conf_fec = fec_policy;
u16 fec_caps = 0;
- if (i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE && !fec_50g_per_lane)
+ if (!mlx5e_is_fec_supported_link_mode(dev, i))
break;
/* RS fec in ethtool is mapped to MLX5E_FEC_RS_528_514
* to link modes up to 25G per lane and to
* MLX5E_FEC_RS_544_514 in the new link modes based on
- * 50 G per lane
+ * 50G or 100G per lane
*/
if (conf_fec == (1 << MLX5E_FEC_RS_528_514) &&
i >= MLX5E_FEC_FIRST_50G_PER_LANE_MODE)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index d0af7271da34..afd654583b6b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -169,6 +169,7 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
WARN_ON_ONCE(!pos->inuse);
pos->inuse = false;
list_del(&pos->entry);
+ ptpsq->cq_stats->lost_cqe++;
}
spin_unlock_bh(&cqe_list->tracker_list_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 6743806b8480..f0744a45db92 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -170,6 +170,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid));
priv->txq2sq[qid] = sq;
+ priv->txq2sq_stats[qid] = sq->stats;
/* Make the change to txq2sq visible before the queue is started.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
@@ -186,6 +187,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id)
void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
{
struct mlx5e_txqsq *sq;
+ u16 txq_ix;
sq = mlx5e_get_qos_sq(priv, qid);
if (!sq) /* Handle the case when the SQ failed to open. */
@@ -194,7 +196,10 @@ void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid)
qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
- priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL;
+ txq_ix = mlx5e_qid_from_qos(&priv->channels, qid);
+
+ priv->txq2sq[txq_ix] = NULL;
+ priv->txq2sq_stats[txq_ix] = NULL;
/* Make the change to txq2sq visible before the queue is started again.
* As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE,
@@ -325,6 +330,7 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
{
struct mlx5e_params *params = &c->priv->channels.params;
struct mlx5e_txqsq __rcu **qos_sqs;
+ u16 txq_ix;
int i;
qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs);
@@ -342,8 +348,11 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c)
qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid);
mlx5e_deactivate_txqsq(sq);
+ txq_ix = mlx5e_qid_from_qos(&c->priv->channels, qid);
+
/* The queue is disabled, no synchronization with datapath is needed. */
- c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL;
+ c->priv->txq2sq[txq_ix] = NULL;
+ c->priv->txq2sq_stats[txq_ix] = NULL;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 22918b2ef7f1..09433b91be17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -146,7 +146,9 @@ static int mlx5e_tx_reporter_timeout_recover(void *ctx)
return err;
}
+ mutex_lock(&priv->state_lock);
err = mlx5e_safe_reopen_channels(priv);
+ mutex_unlock(&priv->state_lock);
if (!err) {
to_ctx->status = 1; /* all channels recovered */
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index fadfa8b50beb..71a168746ebe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -69,6 +69,8 @@ struct mlx5_tc_ct_priv {
struct rhashtable ct_tuples_nat_ht;
struct mlx5_flow_table *ct;
struct mlx5_flow_table *ct_nat;
+ struct mlx5_flow_group *ct_nat_miss_group;
+ struct mlx5_flow_handle *ct_nat_miss_rule;
struct mlx5e_post_act *post_act;
struct mutex control_lock; /* guards parallel adds/dels */
struct mapping_ctx *zone_mapping;
@@ -141,6 +143,8 @@ struct mlx5_ct_counter {
enum {
MLX5_CT_ENTRY_FLAG_VALID,
+ MLX5_CT_ENTRY_IN_CT_TABLE,
+ MLX5_CT_ENTRY_IN_CT_NAT_TABLE,
};
struct mlx5_ct_entry {
@@ -198,9 +202,15 @@ static const struct rhashtable_params tuples_nat_ht_params = {
};
static bool
-mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry)
+mlx5_tc_ct_entry_in_ct_table(struct mlx5_ct_entry *entry)
{
- return !!(entry->tuple_nat_node.next);
+ return test_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags);
+}
+
+static bool
+mlx5_tc_ct_entry_in_ct_nat_table(struct mlx5_ct_entry *entry)
+{
+ return test_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags);
}
static int
@@ -526,8 +536,10 @@ static void
mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry)
{
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, true);
+ if (mlx5_tc_ct_entry_in_ct_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
atomic_dec(&ct_priv->debugfs.stats.offloaded);
}
@@ -814,7 +826,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
&zone_rule->mh,
zone_restore_id,
nat,
- mlx5_tc_ct_entry_has_nat(entry));
+ mlx5_tc_ct_entry_in_ct_nat_table(entry));
if (err) {
ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr;
@@ -888,7 +900,7 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv,
*old_attr = *attr;
err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id,
- nat, mlx5_tc_ct_entry_has_nat(entry));
+ nat, mlx5_tc_ct_entry_in_ct_nat_table(entry));
if (err) {
ct_dbg("Failed to create ct entry mod hdr");
goto err_mod_hdr;
@@ -920,6 +932,7 @@ err_rule:
mlx5_tc_ct_entry_destroy_mod_hdr(ct_priv, zone_rule->attr, mh);
mlx5_put_label_mapping(ct_priv, attr->ct_attr.ct_labels_id);
err_mod_hdr:
+ *attr = *old_attr;
kfree(old_attr);
err_attr:
kvfree(spec);
@@ -957,11 +970,13 @@ static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry)
{
struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv;
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node,
- tuples_nat_ht_params);
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
- tuples_ht_params);
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
+ &entry->tuple_nat_node,
+ tuples_nat_ht_params);
+ if (mlx5_tc_ct_entry_in_ct_table(entry))
+ rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
+ tuples_ht_params);
}
static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry)
@@ -1100,21 +1115,26 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
return err;
}
- err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
- zone_restore_id);
- if (err)
- goto err_orig;
+ if (mlx5_tc_ct_entry_in_ct_table(entry)) {
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
+ if (err)
+ goto err_orig;
+ }
- err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
- zone_restore_id);
- if (err)
- goto err_nat;
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
+ err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
+ if (err)
+ goto err_nat;
+ }
atomic_inc(&ct_priv->debugfs.stats.offloaded);
return 0;
err_nat:
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ if (mlx5_tc_ct_entry_in_ct_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig:
mlx5_tc_ct_counter_put(ct_priv, entry);
return err;
@@ -1126,17 +1146,21 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry,
u8 zone_restore_id)
{
- int err;
+ int err = 0;
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
- zone_restore_id);
- if (err)
- return err;
+ if (mlx5_tc_ct_entry_in_ct_table(entry)) {
+ err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false,
+ zone_restore_id);
+ if (err)
+ return err;
+ }
- err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
- zone_restore_id);
- if (err)
- mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
+ err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true,
+ zone_restore_id);
+ if (err && mlx5_tc_ct_entry_in_ct_table(entry))
+ mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
+ }
return err;
}
@@ -1224,18 +1248,24 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
if (err)
goto err_entries;
- err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
- if (err)
- goto err_tuple;
-
if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) {
err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht,
&entry->tuple_nat_node,
tuples_nat_ht_params);
if (err)
goto err_tuple_nat;
+
+ set_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags);
+ }
+
+ if (!mlx5_tc_ct_entry_in_ct_nat_table(entry)) {
+ err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht,
+ &entry->tuple_node,
+ tuples_ht_params);
+ if (err)
+ goto err_tuple;
+
+ set_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags);
}
spin_unlock_bh(&ct_priv->ht_lock);
@@ -1251,17 +1281,10 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft,
err_rules:
spin_lock_bh(&ct_priv->ht_lock);
- if (mlx5_tc_ct_entry_has_nat(entry))
- rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht,
- &entry->tuple_nat_node, tuples_nat_ht_params);
-err_tuple_nat:
- rhashtable_remove_fast(&ct_priv->ct_tuples_ht,
- &entry->tuple_node,
- tuples_ht_params);
err_tuple:
- rhashtable_remove_fast(&ft->ct_entries_ht,
- &entry->node,
- cts_ht_params);
+ mlx5_tc_ct_entry_remove_from_tuples(entry);
+err_tuple_nat:
+ rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params);
err_entries:
spin_unlock_bh(&ct_priv->ht_lock);
err_set:
@@ -2149,6 +2172,76 @@ mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv)
debugfs_remove_recursive(ct_priv->debugfs.root);
}
+static struct mlx5_flow_handle *
+tc_ct_add_miss_rule(struct mlx5_flow_table *ft,
+ struct mlx5_flow_table *next_ft)
+{
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act act = {};
+
+ act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
+ act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = next_ft;
+
+ return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1);
+}
+
+static int
+tc_ct_add_ct_table_miss_rule(struct mlx5_flow_table *from,
+ struct mlx5_flow_table *to,
+ struct mlx5_flow_group **miss_group,
+ struct mlx5_flow_handle **miss_rule)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *group;
+ struct mlx5_flow_handle *rule;
+ unsigned int max_fte = from->max_fte;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ /* create miss group */
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index,
+ max_fte - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
+ max_fte - 1);
+ group = mlx5_create_flow_group(from, flow_group_in);
+ if (IS_ERR(group)) {
+ err = PTR_ERR(group);
+ goto err_miss_grp;
+ }
+
+ /* add miss rule to next fdb */
+ rule = tc_ct_add_miss_rule(from, to);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto err_miss_rule;
+ }
+
+ *miss_group = group;
+ *miss_rule = rule;
+ kvfree(flow_group_in);
+ return 0;
+
+err_miss_rule:
+ mlx5_destroy_flow_group(group);
+err_miss_grp:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void
+tc_ct_del_ct_table_miss_rule(struct mlx5_flow_group *miss_group,
+ struct mlx5_flow_handle *miss_rule)
+{
+ mlx5_del_flow_rules(miss_rule);
+ mlx5_destroy_flow_group(miss_group);
+}
+
#define INIT_ERR_PREFIX "tc ct offload init failed"
struct mlx5_tc_ct_priv *
@@ -2212,6 +2305,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
goto err_ct_nat_tbl;
}
+ err = tc_ct_add_ct_table_miss_rule(ct_priv->ct_nat, ct_priv->ct,
+ &ct_priv->ct_nat_miss_group,
+ &ct_priv->ct_nat_miss_rule);
+ if (err)
+ goto err_ct_zone_ht;
+
ct_priv->post_act = post_act;
mutex_init(&ct_priv->control_lock);
if (rhashtable_init(&ct_priv->zone_ht, &zone_params))
@@ -2273,6 +2372,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv)
ct_priv->fs_ops->destroy(ct_priv->fs);
kfree(ct_priv->fs);
+ tc_ct_del_ct_table_miss_rule(ct_priv->ct_nat_miss_group, ct_priv->ct_nat_miss_rule);
mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
mapping_destroy(ct_priv->zone_mapping);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index 8dfb57f712b0..721f35e59757 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -850,6 +850,12 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev,
flow_rule_match_enc_control(rule, &match);
addr_type = match.key->addr_type;
+ if (flow_rule_has_enc_control_flags(match.mask->flags,
+ extack)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
/* For tunnel addr_type used same key id`s as for non-tunnel */
if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_match_ipv4_addrs match;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 92065568bb19..6873c1201803 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -117,7 +117,7 @@ bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b,
- __be16 tun_flags);
+ u32 tun_type);
#endif /* CONFIG_MLX5_ESWITCH */
#endif //__MLX5_EN_TC_TUNNEL_H__
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index f1d1e1542e81..878cbdbf5ec8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -587,7 +587,7 @@ bool mlx5e_tc_tun_encap_info_equal_generic(struct mlx5e_encap_key *a,
bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b,
- __be16 tun_flags)
+ u32 tun_type)
{
struct ip_tunnel_info *a_info;
struct ip_tunnel_info *b_info;
@@ -596,8 +596,8 @@ bool mlx5e_tc_tun_encap_info_equal_options(struct mlx5e_encap_key *a,
if (!mlx5e_tc_tun_encap_info_equal_generic(a, b))
return false;
- a_has_opts = !!(a->ip_tun_key->tun_flags & tun_flags);
- b_has_opts = !!(b->ip_tun_key->tun_flags & tun_flags);
+ a_has_opts = test_bit(tun_type, a->ip_tun_key->tun_flags);
+ b_has_opts = test_bit(tun_type, b->ip_tun_key->tun_flags);
/* keys are equal when both don't have any options attached */
if (!a_has_opts && !b_has_opts)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
index 2bcd10b6d653..bf969212cc77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_geneve.c
@@ -106,12 +106,13 @@ static int mlx5e_gen_ip_tunnel_header_geneve(char buf[],
memset(geneveh, 0, sizeof(*geneveh));
geneveh->ver = MLX5E_GENEVE_VER;
geneveh->opt_len = tun_info->options_len / 4;
- geneveh->oam = !!(tun_info->key.tun_flags & TUNNEL_OAM);
- geneveh->critical = !!(tun_info->key.tun_flags & TUNNEL_CRIT_OPT);
+ geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, tun_info->key.tun_flags);
+ geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT,
+ tun_info->key.tun_flags);
mlx5e_tunnel_id_to_vni(tun_info->key.tun_id, geneveh->vni);
geneveh->proto_type = htons(ETH_P_TEB);
- if (tun_info->key.tun_flags & TUNNEL_GENEVE_OPT) {
+ if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, tun_info->key.tun_flags)) {
if (!geneveh->opt_len)
return -EOPNOTSUPP;
ip_tunnel_info_opts_get(geneveh->options, tun_info);
@@ -188,7 +189,7 @@ static int mlx5e_tc_tun_parse_geneve_options(struct mlx5e_priv *priv,
/* make sure that we're talking about GENEVE options */
- if (enc_opts.key->dst_opt_type != TUNNEL_GENEVE_OPT) {
+ if (enc_opts.key->dst_opt_type != IP_TUNNEL_GENEVE_OPT_BIT) {
NL_SET_ERR_MSG_MOD(extack,
"Matching on GENEVE options: option type is not GENEVE");
netdev_warn(priv->netdev,
@@ -337,7 +338,8 @@ static int mlx5e_tc_tun_parse_geneve(struct mlx5e_priv *priv,
static bool mlx5e_tc_tun_encap_info_equal_geneve(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b)
{
- return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_GENEVE_OPT);
+ return mlx5e_tc_tun_encap_info_equal_options(a, b,
+ IP_TUNNEL_GENEVE_OPT_BIT);
}
struct mlx5e_tc_tunnel geneve_tunnel = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
index ada14f0574dc..579eda89fc76 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_gre.c
@@ -31,12 +31,16 @@ static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
const struct ip_tunnel_key *tun_key = &e->tun_info->key;
struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf);
__be32 tun_id = tunnel_id_to_key32(tun_key->tun_id);
+ IP_TUNNEL_DECLARE_FLAGS(unsupp) = { };
int hdr_len;
*ip_proto = IPPROTO_GRE;
/* the HW does not calculate GRE csum or sequences */
- if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ))
+ __set_bit(IP_TUNNEL_CSUM_BIT, unsupp);
+ __set_bit(IP_TUNNEL_SEQ_BIT, unsupp);
+
+ if (ip_tunnel_flags_intersect(tun_key->tun_flags, unsupp))
return -EOPNOTSUPP;
greh->protocol = htons(ETH_P_TEB);
@@ -44,7 +48,7 @@ static int mlx5e_gen_ip_tunnel_header_gretap(char buf[],
/* GRE key */
hdr_len = mlx5e_tc_tun_calc_hlen_gretap(e);
greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags);
- if (tun_key->tun_flags & TUNNEL_KEY) {
+ if (test_bit(IP_TUNNEL_KEY_BIT, tun_key->tun_flags)) {
__be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4);
*ptr = tun_id;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index a184d739d5f8..e4e487c8431b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -90,7 +90,7 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
const struct vxlan_metadata *md;
struct vxlanhdr *vxh;
- if ((tun_key->tun_flags & TUNNEL_VXLAN_OPT) &&
+ if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags) &&
e->tun_info->options_len != sizeof(*md))
return -EOPNOTSUPP;
vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
@@ -99,7 +99,7 @@ static int mlx5e_gen_ip_tunnel_header_vxlan(char buf[],
udp->dest = tun_key->tp_dst;
vxh->vx_flags = VXLAN_HF_VNI;
vxh->vx_vni = vxlan_vni_field(tun_id);
- if (tun_key->tun_flags & TUNNEL_VXLAN_OPT) {
+ if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, tun_key->tun_flags)) {
md = ip_tunnel_info_opts(e->tun_info);
vxlan_build_gbp_hdr(vxh, md);
}
@@ -125,7 +125,7 @@ static int mlx5e_tc_tun_parse_vxlan_gbp_option(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (enc_opts.key->dst_opt_type != TUNNEL_VXLAN_OPT) {
+ if (enc_opts.key->dst_opt_type != IP_TUNNEL_VXLAN_OPT_BIT) {
NL_SET_ERR_MSG_MOD(extack, "Wrong VxLAN option type: not GBP");
return -EOPNOTSUPP;
}
@@ -208,7 +208,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
static bool mlx5e_tc_tun_encap_info_equal_vxlan(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b)
{
- return mlx5e_tc_tun_encap_info_equal_options(a, b, TUNNEL_VXLAN_OPT);
+ return mlx5e_tc_tun_encap_info_equal_options(a, b,
+ IP_TUNNEL_VXLAN_OPT_BIT);
}
static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 879d698b6119..5ec468268d1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -6,6 +6,8 @@
#include "en.h"
#include <linux/indirect_call_wrapper.h>
+#include <net/ip6_checksum.h>
+#include <net/tcp.h>
#define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS)
@@ -34,6 +36,25 @@
#define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)
+#define MLX5E_KSM_UMR_WQE_SZ(sgl_len)\
+ (sizeof(struct mlx5e_umr_wqe) +\
+ (sizeof(struct mlx5_ksm) * (sgl_len)))
+
+#define MLX5E_KSM_UMR_WQEBBS(ksm_entries) \
+ (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_BB))
+
+#define MLX5E_KSM_UMR_DS_CNT(ksm_entries)\
+ (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_DS))
+
+#define MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size)\
+ (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_ksm))
+
+#define MLX5E_KSM_ENTRIES_PER_WQE(wqe_size)\
+ ALIGN_DOWN(MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)
+
+#define MLX5E_MAX_KSM_PER_WQE(mdev) \
+ MLX5E_KSM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
+
static inline
ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts)
{
@@ -460,6 +481,41 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
}
}
+static inline void
+mlx5e_swp_encap_csum_partial(struct mlx5_core_dev *mdev, struct sk_buff *skb, bool tunnel)
+{
+ const struct iphdr *ip = tunnel ? inner_ip_hdr(skb) : ip_hdr(skb);
+ const struct ipv6hdr *ip6;
+ struct tcphdr *th;
+ struct udphdr *uh;
+ int len;
+
+ if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial) || !skb_is_gso(skb))
+ return;
+
+ if (skb_is_gso_tcp(skb)) {
+ th = inner_tcp_hdr(skb);
+ len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb);
+
+ if (ip->version == 4) {
+ th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0);
+ } else {
+ ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+ th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
+ }
+ } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ uh = (struct udphdr *)skb_inner_transport_header(skb);
+ len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+
+ if (ip->version == 4) {
+ uh->check = ~udp_v4_check(len, ip->saddr, ip->daddr, 0);
+ } else {
+ ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+ uh->check = ~udp_v6_check(len, &ip6->saddr, &ip6->daddr, 0);
+ }
+ }
+}
+
#define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1)
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index 82b5ca1be4f3..4610621a340e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -565,7 +565,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
linear = !!(dma_len - inline_hdr_sz);
ds_cnt = MLX5E_TX_WQE_EMPTY_DS_COUNT + linear + !!inline_hdr_sz;
- /* check_result must be 0 if sinfo is passed. */
+ /* check_result must be 0 if xdptxd->has_frags is true. */
if (!check_result) {
int stop_room = 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
index b8dd74453655..1b7132fa70de 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
@@ -270,7 +270,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
/* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
mxbuf->cqe = cqe;
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp);
net_prefetch(mxbuf->xdp.data);
/* Possible flows:
@@ -319,7 +319,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
/* mxbuf->rq is set on allocation, but cqe is per-packet so set it here */
mxbuf->cqe = cqe;
xsk_buff_set_size(&mxbuf->xdp, cqe_bcnt);
- xsk_buff_dma_sync_for_cpu(&mxbuf->xdp, rq->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(&mxbuf->xdp);
net_prefetch(mxbuf->xdp.data);
prog = rcu_dereference(rq->xdp_prog);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
index 06592b9f0424..9240cfe25d10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
@@ -28,8 +28,10 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5_core_dev *mdev)
{
- /* AF_XDP doesn't support frames larger than PAGE_SIZE. */
- if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
+ /* AF_XDP doesn't support frames larger than PAGE_SIZE,
+ * and xsk->chunk_size is limited to 65535 bytes.
+ */
+ if ((size_t)xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index caa34b9c161e..33e32584b07f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -102,8 +102,14 @@ static inline void
mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
{
int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
+ struct udphdr *udphdr;
- udp_hdr(skb)->len = htons(payload_len);
+ if (skb->encapsulation)
+ udphdr = (struct udphdr *)skb_inner_transport_header(skb);
+ else
+ udphdr = udp_hdr(skb);
+
+ udphdr->len = htons(payload_len);
}
struct mlx5e_accel_tx_state {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
index c7d191f66ad1..4f83e3172767 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
@@ -73,7 +73,7 @@ void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule)
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
- uint32_t flow_tag)
+ u32 flow_tag)
{
struct mlx5e_accel_fs_tcp *fs_tcp = mlx5e_fs_get_accel_tcp(fs);
struct mlx5_flow_destination dest = {};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
index a032bff482a6..7e899c716267 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.h
@@ -11,14 +11,14 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs);
void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs);
struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
- uint32_t flow_tag);
+ u32 flow_tag);
void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule);
#else
static inline int mlx5e_accel_fs_tcp_create(struct mlx5e_flow_steering *fs) { return 0; }
static inline void mlx5e_accel_fs_tcp_destroy(struct mlx5e_flow_steering *fs) {}
static inline struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_flow_steering *fs,
struct sock *sk, u32 tirn,
- uint32_t flow_tag)
+ u32 flow_tag)
{ return ERR_PTR(-EOPNOTSUPP); }
static inline void mlx5e_accel_fs_del_sk(struct mlx5_flow_handle *rule) {}
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
index c54fd01ea635..3d274599015b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
@@ -989,7 +989,12 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
struct net *net = dev_net(x->xso.dev);
+ u64 trailer_packets = 0, trailer_bytes = 0;
+ u64 replay_packets = 0, replay_bytes = 0;
+ u64 auth_packets = 0, auth_bytes = 0;
+ u64 success_packets, success_bytes;
u64 packets, bytes, lastuse;
+ size_t headers;
lockdep_assert(lockdep_is_held(&x->lock) ||
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex) ||
@@ -999,26 +1004,43 @@ static void mlx5e_xfrm_update_stats(struct xfrm_state *x)
return;
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- mlx5_fc_query_cached(ipsec_rule->auth.fc, &bytes, &packets, &lastuse);
- x->stats.integrity_failed += packets;
- XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, packets);
-
- mlx5_fc_query_cached(ipsec_rule->trailer.fc, &bytes, &packets, &lastuse);
- XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, packets);
+ mlx5_fc_query_cached(ipsec_rule->auth.fc, &auth_bytes,
+ &auth_packets, &lastuse);
+ x->stats.integrity_failed += auth_packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATEPROTOERROR, auth_packets);
+
+ mlx5_fc_query_cached(ipsec_rule->trailer.fc, &trailer_bytes,
+ &trailer_packets, &lastuse);
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINHDRERROR, trailer_packets);
}
if (x->xso.type != XFRM_DEV_OFFLOAD_PACKET)
return;
- mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
- x->curlft.packets += packets;
- x->curlft.bytes += bytes;
-
if (sa_entry->attrs.dir == XFRM_DEV_OFFLOAD_IN) {
- mlx5_fc_query_cached(ipsec_rule->replay.fc, &bytes, &packets, &lastuse);
- x->stats.replay += packets;
- XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, packets);
+ mlx5_fc_query_cached(ipsec_rule->replay.fc, &replay_bytes,
+ &replay_packets, &lastuse);
+ x->stats.replay += replay_packets;
+ XFRM_ADD_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR, replay_packets);
}
+
+ mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
+ success_packets = packets - auth_packets - trailer_packets - replay_packets;
+ x->curlft.packets += success_packets;
+ /* NIC counts all bytes passed through flow steering and doesn't have
+ * an ability to count payload data size which is needed for SA.
+ *
+ * To overcome HW limitestion, let's approximate the payload size
+ * by removing always available headers.
+ */
+ headers = sizeof(struct ethhdr);
+ if (sa_entry->attrs.family == AF_INET)
+ headers += sizeof(struct iphdr);
+ else
+ headers += sizeof(struct ipv6hdr);
+
+ success_bytes = bytes - auth_bytes - trailer_bytes - replay_bytes;
+ x->curlft.bytes += success_bytes - headers * success_packets;
}
static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
index 41a2543a52cd..e51b03d4c717 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
@@ -750,8 +750,7 @@ err_fs:
err_fs_ft:
if (rx->allow_tunnel_mode)
mlx5_eswitch_unblock_encap(mdev);
- mlx5_del_flow_rules(rx->status.rule);
- mlx5_modify_header_dealloc(mdev, rx->status.modify_hdr);
+ mlx5_ipsec_rx_status_destroy(ipsec, rx);
err_add:
mlx5_destroy_flow_table(rx->ft.status);
err_fs_ft_status:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
index 6e00afe4671b..797db853de36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
@@ -51,9 +51,10 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
- if ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
- MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
- MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level))
+ if (IS_ENABLED(CONFIG_MLX5_CLS_ACT) &&
+ ((MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level)) ||
+ MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, ignore_flow_level)))
caps |= MLX5_IPSEC_CAP_PRIO;
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 82064614846f..3cc640669247 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
if (!x || !x->xso.offload_handle)
goto out_disable;
- if (xo->inner_ipproto) {
- /* Cannot support tunnel packet over IPsec tunnel mode
- * because we cannot offload three IP header csum
- */
- if (x->props.mode == XFRM_MODE_TUNNEL)
- goto out_disable;
-
- /* Only support UDP or TCP L4 checksum */
- if (xo->inner_ipproto != IPPROTO_UDP &&
- xo->inner_ipproto != IPPROTO_TCP)
- goto out_disable;
- }
+ /* Only support UDP or TCP L4 checksum */
+ if (xo->inner_ipproto &&
+ xo->inner_ipproto != IPPROTO_UDP &&
+ xo->inner_ipproto != IPPROTO_TCP)
+ goto out_disable;
return features;
@@ -123,6 +116,7 @@ static inline bool
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg)
{
+ struct mlx5_core_dev *mdev = sq->mdev;
u8 inner_ipproto;
if (!mlx5e_ipsec_eseg_meta(eseg))
@@ -132,9 +126,12 @@ mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
inner_ipproto = xfrm_offload(skb)->inner_ipproto;
if (inner_ipproto) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
- if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
+ if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) {
+ mlx5e_swp_encap_csum_partial(mdev, skb, true);
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
+ }
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ mlx5e_swp_encap_csum_partial(mdev, skb, false);
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial_inner++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
index dd36b04e30a0..92bf3fa44a3b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c
@@ -78,13 +78,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_hw)
unsigned int i;
if (!priv->ipsec)
- return idx;
+ return;
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_hw_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, mlx5e_ipsec_hw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
@@ -92,14 +89,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_hw)
int i;
if (!priv->ipsec)
- return idx;
+ return;
mlx5e_accel_ipsec_fs_read_stats(priv, &priv->ipsec->hw_stats);
for (i = 0; i < NUM_IPSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
- mlx5e_ipsec_hw_stats_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->hw_stats,
+ mlx5e_ipsec_hw_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw)
@@ -115,9 +112,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ipsec_sw)
if (priv->ipsec)
for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ipsec_sw_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, mlx5e_ipsec_sw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
@@ -126,9 +121,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw)
if (priv->ipsec)
for (i = 0; i < NUM_IPSEC_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->ipsec->sw_stats,
- mlx5e_ipsec_sw_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR_ATOMIC64(
+ &priv->ipsec->sw_stats,
+ mlx5e_ipsec_sw_stats_desc, i));
}
MLX5E_DEFINE_STATS_GRP(ipsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
index adc6d8ea0960..07a04a142a2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
@@ -95,8 +95,8 @@ int mlx5e_ktls_init(struct mlx5e_priv *priv);
void mlx5e_ktls_cleanup(struct mlx5e_priv *priv);
int mlx5e_ktls_get_count(struct mlx5e_priv *priv);
-int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data);
-int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data);
+void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data);
+void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data);
#else
static inline void mlx5e_ktls_build_netdev(struct mlx5e_priv *priv)
@@ -144,15 +144,9 @@ static inline bool mlx5e_is_ktls_rx(struct mlx5_core_dev *mdev)
static inline int mlx5e_ktls_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) { }
static inline int mlx5e_ktls_get_count(struct mlx5e_priv *priv) { return 0; }
-static inline int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
-{
- return 0;
-}
+static inline void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data) { }
-static inline int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
-{
- return 0;
-}
+static inline void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data) { }
#endif
#endif /* __MLX5E_TLS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
index 7c1c0eb16787..60be2d72eb9e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_stats.c
@@ -58,35 +58,31 @@ int mlx5e_ktls_get_count(struct mlx5e_priv *priv)
return ARRAY_SIZE(mlx5e_ktls_sw_stats_desc);
}
-int mlx5e_ktls_get_strings(struct mlx5e_priv *priv, uint8_t *data)
+void mlx5e_ktls_get_strings(struct mlx5e_priv *priv, u8 **data)
{
- unsigned int i, n, idx = 0;
+ unsigned int i, n;
if (!priv->tls)
- return 0;
+ return;
n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_ktls_sw_stats_desc[i].format);
-
- return n;
+ ethtool_puts(data, mlx5e_ktls_sw_stats_desc[i].format);
}
-int mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 *data)
+void mlx5e_ktls_get_stats(struct mlx5e_priv *priv, u64 **data)
{
- unsigned int i, n, idx = 0;
+ unsigned int i, n;
if (!priv->tls)
- return 0;
+ return;
n = mlx5e_ktls_get_count(priv);
for (i = 0; i < n; i++)
- data[idx++] = MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
- mlx5e_ktls_sw_stats_desc,
- i);
-
- return n;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR_ATOMIC64(&priv->tls->sw_stats,
+ mlx5e_ktls_sw_stats_desc, i));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
index 4559ee16a11a..4bb47d48061d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_stats.c
@@ -38,16 +38,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(macsec_hw)
unsigned int i;
if (!priv->macsec)
- return idx;
+ return;
if (!mlx5e_is_macsec_device(priv->mdev))
- return idx;
+ return;
for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- mlx5e_macsec_hw_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, mlx5e_macsec_hw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
@@ -56,19 +53,18 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(macsec_hw)
int i;
if (!priv->macsec)
- return idx;
+ return;
if (!mlx5e_is_macsec_device(priv->mdev))
- return idx;
+ return;
macsec_fs = priv->mdev->macsec_fs;
mlx5_macsec_fs_get_stats_fill(macsec_fs, mlx5_macsec_fs_get_stats(macsec_fs));
for (i = 0; i < NUM_MACSEC_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_macsec_fs_get_stats(macsec_fs),
- mlx5e_macsec_hw_stats_desc,
- i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ mlx5_macsec_fs_get_stats(macsec_fs),
+ mlx5e_macsec_hw_stats_desc, i));
}
MLX5E_DEFINE_STATS_GRP(macsec_hw, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
index ca9cfbf57d8f..298bb74ec5e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c
@@ -30,21 +30,22 @@
* SOFTWARE.
*/
-#include <linux/dim.h>
#include "en.h"
+#include "en/dim.h"
static void
mlx5e_complete_dim_work(struct dim *dim, struct dim_cq_moder moder,
struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq)
{
- mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts);
+ mlx5e_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts,
+ mlx5e_cq_period_mode(moder.cq_period_mode));
dim->state = DIM_START_MEASURE;
}
void mlx5e_rx_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct mlx5e_rq *rq = container_of(dim, struct mlx5e_rq, dim);
+ struct mlx5e_rq *rq = dim->priv;
struct dim_cq_moder cur_moder =
net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
@@ -54,9 +55,95 @@ void mlx5e_rx_dim_work(struct work_struct *work)
void mlx5e_tx_dim_work(struct work_struct *work)
{
struct dim *dim = container_of(work, struct dim, work);
- struct mlx5e_txqsq *sq = container_of(dim, struct mlx5e_txqsq, dim);
+ struct mlx5e_txqsq *sq = dim->priv;
struct dim_cq_moder cur_moder =
net_dim_get_tx_moderation(dim->mode, dim->profile_ix);
mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq);
}
+
+static struct dim *mlx5e_dim_enable(struct mlx5_core_dev *mdev,
+ void (*work_fun)(struct work_struct *), int cpu,
+ u8 cq_period_mode, struct mlx5_core_cq *mcq,
+ void *queue)
+{
+ struct dim *dim;
+ int err;
+
+ dim = kvzalloc_node(sizeof(*dim), GFP_KERNEL, cpu_to_node(cpu));
+ if (!dim)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&dim->work, work_fun);
+
+ dim->mode = cq_period_mode;
+ dim->priv = queue;
+
+ err = mlx5e_modify_cq_period_mode(mdev, mcq, dim->mode);
+ if (err) {
+ kvfree(dim);
+ return ERR_PTR(err);
+ }
+
+ return dim;
+}
+
+static void mlx5e_dim_disable(struct dim *dim)
+{
+ cancel_work_sync(&dim->work);
+ kvfree(dim);
+}
+
+int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable)
+{
+ if (enable == !!rq->dim)
+ return 0;
+
+ if (enable) {
+ struct mlx5e_channel *c = rq->channel;
+ struct dim *dim;
+
+ dim = mlx5e_dim_enable(rq->mdev, mlx5e_rx_dim_work, c->cpu,
+ c->rx_cq_moder.cq_period_mode, &rq->cq.mcq, rq);
+ if (IS_ERR(dim))
+ return PTR_ERR(dim);
+
+ rq->dim = dim;
+
+ __set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
+ } else {
+ __clear_bit(MLX5E_RQ_STATE_DIM, &rq->state);
+
+ mlx5e_dim_disable(rq->dim);
+ rq->dim = NULL;
+ }
+
+ return 0;
+}
+
+int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable)
+{
+ if (enable == !!sq->dim)
+ return 0;
+
+ if (enable) {
+ struct mlx5e_channel *c = sq->channel;
+ struct dim *dim;
+
+ dim = mlx5e_dim_enable(sq->mdev, mlx5e_tx_dim_work, c->cpu,
+ c->tx_cq_moder.cq_period_mode, &sq->cq.mcq, sq);
+ if (IS_ERR(dim))
+ return PTR_ERR(dim);
+
+ sq->dim = dim;
+
+ __set_bit(MLX5E_SQ_STATE_DIM, &sq->state);
+ } else {
+ __clear_bit(MLX5E_SQ_STATE_DIM, &sq->state);
+
+ mlx5e_dim_disable(sq->dim);
+ sq->dim = NULL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 67a29826bb57..36845872ae94 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -30,9 +30,12 @@
* SOFTWARE.
*/
+#include <linux/dim.h>
#include <linux/ethtool_netlink.h>
#include "en.h"
+#include "en/channels.h"
+#include "en/dim.h"
#include "en/port.h"
#include "en/params.h"
#include "en/ptp.h"
@@ -219,6 +222,13 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT,
ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_800GAUI_8_800GBASE_CR8_KR8, ext,
+ ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseDR8_2_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseSR8_Full_BIT,
+ ETHTOOL_LINK_MODE_800000baseVR8_Full_BIT);
}
static void mlx5e_ethtool_get_speed_arr(struct mlx5_core_dev *mdev,
@@ -269,8 +279,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < MLX5E_NUM_PFLAGS; i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- mlx5e_priv_flags[i].name);
+ ethtool_puts(&data, mlx5e_priv_flags[i].name);
break;
case ETH_SS_TEST:
@@ -516,7 +525,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE);
+ arfs_enabled = opened && mlx5e_fs_want_arfs(priv->netdev);
if (arfs_enabled)
mlx5e_arfs_disable(priv->fs);
@@ -559,16 +568,13 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
coal->rx_coalesce_usecs = rx_moder->usec;
coal->rx_max_coalesced_frames = rx_moder->pkts;
coal->use_adaptive_rx_coalesce = priv->channels.params.rx_dim_enabled;
+ kernel_coal->use_cqe_mode_rx = priv->channels.params.rx_moder_use_cqe_mode;
tx_moder = &priv->channels.params.tx_cq_moderation;
coal->tx_coalesce_usecs = tx_moder->usec;
coal->tx_max_coalesced_frames = tx_moder->pkts;
coal->use_adaptive_tx_coalesce = priv->channels.params.tx_dim_enabled;
-
- kernel_coal->use_cqe_mode_rx =
- MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_BASED_MODER);
- kernel_coal->use_cqe_mode_tx =
- MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_CQE_BASED_MODER);
+ kernel_coal->use_cqe_mode_tx = priv->channels.params.tx_moder_use_cqe_mode;
return 0;
}
@@ -583,11 +589,73 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
return mlx5e_ethtool_get_coalesce(priv, coal, kernel_coal);
}
+static int mlx5e_ethtool_get_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct dim_cq_moder cur_moder;
+ struct mlx5e_channels *chs;
+ struct mlx5e_channel *c;
+
+ if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&priv->state_lock);
+
+ chs = &priv->channels;
+ if (chs->num <= queue) {
+ mutex_unlock(&priv->state_lock);
+ return -EINVAL;
+ }
+
+ c = chs->c[queue];
+
+ coal->use_adaptive_rx_coalesce = !!c->rq.dim;
+ if (coal->use_adaptive_rx_coalesce) {
+ cur_moder = net_dim_get_rx_moderation(c->rq.dim->mode,
+ c->rq.dim->profile_ix);
+
+ coal->rx_coalesce_usecs = cur_moder.usec;
+ coal->rx_max_coalesced_frames = cur_moder.pkts;
+ } else {
+ coal->rx_coalesce_usecs = c->rx_cq_moder.usec;
+ coal->rx_max_coalesced_frames = c->rx_cq_moder.pkts;
+ }
+
+ coal->use_adaptive_tx_coalesce = !!c->sq[0].dim;
+ if (coal->use_adaptive_tx_coalesce) {
+ /* NOTE: Will only display DIM coalesce profile information of
+ * first channel. The current interface cannot display this
+ * information for all tc.
+ */
+ cur_moder = net_dim_get_tx_moderation(c->sq[0].dim->mode,
+ c->sq[0].dim->profile_ix);
+
+ coal->tx_coalesce_usecs = cur_moder.usec;
+ coal->tx_max_coalesced_frames = cur_moder.pkts;
+
+ } else {
+ coal->tx_coalesce_usecs = c->tx_cq_moder.usec;
+ coal->tx_max_coalesced_frames = c->tx_cq_moder.pkts;
+ }
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+int mlx5e_get_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_get_per_queue_coalesce(priv, queue, coal);
+}
+
#define MLX5E_MAX_COAL_TIME MLX5_MAX_CQ_PERIOD
#define MLX5E_MAX_COAL_FRAMES MLX5_MAX_CQ_COUNT
static void
-mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder)
{
int tc;
int i;
@@ -595,38 +663,35 @@ mlx5e_set_priv_channels_tx_coalesce(struct mlx5e_priv *priv, struct ethtool_coal
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
struct mlx5_core_dev *mdev = c->mdev;
+ enum mlx5_cq_period_mode mode;
+
+ mode = mlx5e_cq_period_mode(moder->cq_period_mode);
+ c->tx_cq_moder = *moder;
for (tc = 0; tc < c->num_tc; tc++) {
- mlx5_core_modify_cq_moderation(mdev,
- &c->sq[tc].cq.mcq,
- coal->tx_coalesce_usecs,
- coal->tx_max_coalesced_frames);
+ mlx5e_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq,
+ moder->usec, moder->pkts,
+ mode);
}
}
}
static void
-mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal)
+mlx5e_set_priv_channels_rx_coalesce(struct mlx5e_priv *priv, struct dim_cq_moder *moder)
{
int i;
for (i = 0; i < priv->channels.num; ++i) {
struct mlx5e_channel *c = priv->channels.c[i];
struct mlx5_core_dev *mdev = c->mdev;
+ enum mlx5_cq_period_mode mode;
- mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
- coal->rx_coalesce_usecs,
- coal->rx_max_coalesced_frames);
- }
-}
+ mode = mlx5e_cq_period_mode(moder->cq_period_mode);
+ c->rx_cq_moder = *moder;
-/* convert a boolean value of cq_mode to mlx5 period mode
- * true : MLX5_CQ_PERIOD_MODE_START_FROM_CQE
- * false : MLX5_CQ_PERIOD_MODE_START_FROM_EQE
- */
-static int cqe_mode_to_period_mode(bool val)
-{
- return val ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ mlx5e_modify_cq_moderation(mdev, &c->rq.cq.mcq, moder->usec, moder->pkts,
+ mode);
+ }
}
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
@@ -636,13 +701,14 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
{
struct dim_cq_moder *rx_moder, *tx_moder;
struct mlx5_core_dev *mdev = priv->mdev;
+ bool rx_dim_enabled, tx_dim_enabled;
struct mlx5e_params new_params;
bool reset_rx, reset_tx;
- bool reset = true;
u8 cq_period_mode;
int err = 0;
- if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ if (!MLX5_CAP_GEN(mdev, cq_moderation) ||
+ !MLX5_CAP_GEN(mdev, cq_period_mode_modify))
return -EOPNOTSUPP;
if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
@@ -665,60 +731,70 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+
mutex_lock(&priv->state_lock);
new_params = priv->channels.params;
- rx_moder = &new_params.rx_cq_moderation;
- rx_moder->usec = coal->rx_coalesce_usecs;
- rx_moder->pkts = coal->rx_max_coalesced_frames;
- new_params.rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_rx);
+ reset_rx = mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode,
+ rx_dim_enabled, false);
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER, cq_period_mode);
- tx_moder = &new_params.tx_cq_moderation;
- tx_moder->usec = coal->tx_coalesce_usecs;
- tx_moder->pkts = coal->tx_max_coalesced_frames;
- new_params.tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+ cq_period_mode = mlx5e_dim_cq_period_mode(kernel_coal->use_cqe_mode_tx);
+ reset_tx = mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode,
+ tx_dim_enabled, false);
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER, cq_period_mode);
- reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled;
- reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled;
+ reset_rx |= rx_dim_enabled != new_params.rx_dim_enabled;
+ reset_tx |= tx_dim_enabled != new_params.tx_dim_enabled;
- cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_rx);
- if (cq_period_mode != rx_moder->cq_period_mode) {
- mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
- reset_rx = true;
- }
+ /* Solely used for global ethtool get coalesce */
+ rx_moder = &new_params.rx_cq_moderation;
+ new_params.rx_dim_enabled = rx_dim_enabled;
+ new_params.rx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_rx;
- cq_period_mode = cqe_mode_to_period_mode(kernel_coal->use_cqe_mode_tx);
- if (cq_period_mode != tx_moder->cq_period_mode) {
- mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
- reset_tx = true;
- }
+ tx_moder = &new_params.tx_cq_moderation;
+ new_params.tx_dim_enabled = tx_dim_enabled;
+ new_params.tx_moder_use_cqe_mode = kernel_coal->use_cqe_mode_tx;
if (reset_rx) {
- u8 mode = MLX5E_GET_PFLAG(&new_params,
- MLX5E_PFLAG_RX_CQE_BASED_MODER);
+ mlx5e_channels_rx_change_dim(&priv->channels, false);
+ mlx5e_reset_rx_moderation(rx_moder, new_params.rx_moder_use_cqe_mode,
+ rx_dim_enabled);
+
+ mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder);
+ } else if (!rx_dim_enabled) {
+ rx_moder->usec = coal->rx_coalesce_usecs;
+ rx_moder->pkts = coal->rx_max_coalesced_frames;
- mlx5e_reset_rx_moderation(&new_params, mode);
+ mlx5e_set_priv_channels_rx_coalesce(priv, rx_moder);
}
+
if (reset_tx) {
- u8 mode = MLX5E_GET_PFLAG(&new_params,
- MLX5E_PFLAG_TX_CQE_BASED_MODER);
+ mlx5e_channels_tx_change_dim(&priv->channels, false);
+ mlx5e_reset_tx_moderation(tx_moder, new_params.tx_moder_use_cqe_mode,
+ tx_dim_enabled);
- mlx5e_reset_tx_moderation(&new_params, mode);
- }
+ mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder);
+ } else if (!tx_dim_enabled) {
+ tx_moder->usec = coal->tx_coalesce_usecs;
+ tx_moder->pkts = coal->tx_max_coalesced_frames;
- /* If DIM state hasn't changed, it's possible to modify interrupt
- * moderation parameters on the fly, even if the channels are open.
- */
- if (!reset_rx && !reset_tx && test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- if (!coal->use_adaptive_rx_coalesce)
- mlx5e_set_priv_channels_rx_coalesce(priv, coal);
- if (!coal->use_adaptive_tx_coalesce)
- mlx5e_set_priv_channels_tx_coalesce(priv, coal);
- reset = false;
+ mlx5e_set_priv_channels_tx_coalesce(priv, tx_moder);
}
- err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, reset);
+ /* DIM enable/disable Rx and Tx channels */
+ err = mlx5e_channels_rx_change_dim(&priv->channels, rx_dim_enabled);
+ if (err)
+ goto state_unlock;
+ err = mlx5e_channels_tx_change_dim(&priv->channels, tx_dim_enabled);
+ if (err)
+ goto state_unlock;
+ err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false);
+state_unlock:
mutex_unlock(&priv->state_lock);
return err;
}
@@ -733,6 +809,88 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
return mlx5e_ethtool_set_coalesce(priv, coal, kernel_coal, extack);
}
+static int mlx5e_ethtool_set_per_queue_coalesce(struct mlx5e_priv *priv, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool rx_dim_enabled, tx_dim_enabled;
+ struct mlx5e_channels *chs;
+ struct mlx5e_channel *c;
+ int err = 0;
+ int tc;
+
+ if (!MLX5_CAP_GEN(mdev, cq_moderation))
+ return -EOPNOTSUPP;
+
+ if (coal->tx_coalesce_usecs > MLX5E_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs > MLX5E_MAX_COAL_TIME) {
+ netdev_info(priv->netdev, "%s: maximum coalesce time supported is %lu usecs\n",
+ __func__, MLX5E_MAX_COAL_TIME);
+ return -ERANGE;
+ }
+
+ if (coal->tx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES ||
+ coal->rx_max_coalesced_frames > MLX5E_MAX_COAL_FRAMES) {
+ netdev_info(priv->netdev, "%s: maximum coalesced frames supported is %lu\n",
+ __func__, MLX5E_MAX_COAL_FRAMES);
+ return -ERANGE;
+ }
+
+ rx_dim_enabled = !!coal->use_adaptive_rx_coalesce;
+ tx_dim_enabled = !!coal->use_adaptive_tx_coalesce;
+
+ mutex_lock(&priv->state_lock);
+
+ chs = &priv->channels;
+ if (chs->num <= queue) {
+ mutex_unlock(&priv->state_lock);
+ return -EINVAL;
+ }
+
+ c = chs->c[queue];
+
+ err = mlx5e_dim_rx_change(&c->rq, rx_dim_enabled);
+ if (err)
+ goto state_unlock;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_dim_tx_change(&c->sq[tc], tx_dim_enabled);
+ if (err)
+ goto state_unlock;
+ }
+
+ if (!rx_dim_enabled) {
+ c->rx_cq_moder.usec = coal->rx_coalesce_usecs;
+ c->rx_cq_moder.pkts = coal->rx_max_coalesced_frames;
+
+ mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+ coal->rx_coalesce_usecs,
+ coal->rx_max_coalesced_frames);
+ }
+
+ if (!tx_dim_enabled) {
+ c->tx_cq_moder.usec = coal->tx_coalesce_usecs;
+ c->tx_cq_moder.pkts = coal->tx_max_coalesced_frames;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5_core_modify_cq_moderation(mdev, &c->sq[tc].cq.mcq,
+ coal->tx_coalesce_usecs,
+ coal->tx_max_coalesced_frames);
+ }
+
+state_unlock:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_per_queue_coalesce(priv, queue, coal);
+}
+
static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
unsigned long *supported_modes,
u32 eth_proto_cap)
@@ -1018,8 +1176,8 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
}
-int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
- struct ethtool_link_ksettings *link_ksettings)
+static int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {};
@@ -1037,7 +1195,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
bool ext;
int err;
- err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1, 0);
if (err) {
netdev_err(priv->netdev, "%s: query port ptys failed: %d\n",
__func__, err);
@@ -1189,8 +1347,8 @@ static bool ext_requested(u8 autoneg, const unsigned long *adver, bool ext_suppo
return autoneg == AUTONEG_ENABLE ? ext_link_mode : ext_supported;
}
-int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
- const struct ethtool_link_ksettings *link_ksettings)
+static int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
+ const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_port_eth_proto eproto;
@@ -1251,7 +1409,12 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
if (!an_changes && link_modes == eproto.admin)
goto out;
- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
+ err = mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext);
+ if (err) {
+ netdev_err(priv->netdev, "%s: failed to set ptys reg: %d\n", __func__, err);
+ goto out;
+ }
+
mlx5_toggle_port_link(mdev);
out:
@@ -1290,7 +1453,7 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
-int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
+static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
u32 rss_context = rxfh->rss_context;
@@ -1303,8 +1466,8 @@ int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
return err;
}
-int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
+static int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct mlx5e_priv *priv = netdev_priv(dev);
u32 *rss_context = &rxfh->rss_context;
@@ -1446,8 +1609,8 @@ static void mlx5e_get_pause_stats(struct net_device *netdev,
mlx5e_stats_pause_get(priv, pause_stats);
}
-void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
- struct ethtool_pauseparam *pauseparam)
+static void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -1468,8 +1631,8 @@ static void mlx5e_get_pauseparam(struct net_device *netdev,
mlx5e_ethtool_get_pauseparam(priv, pauseparam);
}
-int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
- struct ethtool_pauseparam *pauseparam)
+static int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv,
+ struct ethtool_pauseparam *pauseparam)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
@@ -1500,7 +1663,7 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
}
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1524,7 +1687,7 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
}
static int mlx5e_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -1908,7 +2071,7 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
if (enable && !MLX5_CAP_GEN(priv->mdev, cq_period_start_from_cqe))
return -EOPNOTSUPP;
- cq_period_mode = cqe_mode_to_period_mode(enable);
+ cq_period_mode = mlx5e_dim_cq_period_mode(enable);
current_cq_period_mode = is_rx_cq ?
priv->channels.params.rx_cq_moderation.cq_period_mode :
@@ -1918,12 +2081,22 @@ static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
return 0;
new_params = priv->channels.params;
- if (is_rx_cq)
- mlx5e_set_rx_cq_mode_params(&new_params, cq_period_mode);
- else
- mlx5e_set_tx_cq_mode_params(&new_params, cq_period_mode);
+ if (is_rx_cq) {
+ mlx5e_reset_rx_channels_moderation(&priv->channels, cq_period_mode,
+ false, true);
+ mlx5e_channels_rx_toggle_dim(&priv->channels);
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ cq_period_mode);
+ } else {
+ mlx5e_reset_tx_channels_moderation(&priv->channels, cq_period_mode,
+ false, true);
+ mlx5e_channels_tx_toggle_dim(&priv->channels);
+ MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ cq_period_mode);
+ }
- return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true);
+ /* Update pflags of existing channels without resetting them */
+ return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, false);
}
static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
@@ -2124,7 +2297,7 @@ static int set_pflag_tx_port_ts(struct net_device *netdev, bool enable)
*/
err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_num_channels_changed_ctx, NULL, true);
+ mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
if (!err)
priv->tx_ptp_opened = true;
@@ -2422,6 +2595,14 @@ static void mlx5e_get_rmon_stats(struct net_device *netdev,
mlx5e_stats_rmon_get(priv, rmon_stats, ranges);
}
+static void mlx5e_get_ts_stats(struct net_device *netdev,
+ struct ethtool_ts_stats *ts_stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5e_stats_ts_get(priv, ts_stats);
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.cap_rss_ctx_supported = true,
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
@@ -2440,6 +2621,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_channels = mlx5e_set_channels,
.get_coalesce = mlx5e_get_coalesce,
.set_coalesce = mlx5e_set_coalesce,
+ .get_per_queue_coalesce = mlx5e_get_per_queue_coalesce,
+ .set_per_queue_coalesce = mlx5e_set_per_queue_coalesce,
.get_link_ksettings = mlx5e_get_link_ksettings,
.set_link_ksettings = mlx5e_set_link_ksettings,
.get_rxfh_key_size = mlx5e_get_rxfh_key_size,
@@ -2471,5 +2654,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_eth_mac_stats = mlx5e_get_eth_mac_stats,
.get_eth_ctrl_stats = mlx5e_get_eth_ctrl_stats,
.get_rmon_stats = mlx5e_get_rmon_stats,
+ .get_ts_stats = mlx5e_get_ts_stats,
.get_link_ext_stats = mlx5e_get_link_ext_stats
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 777d311d44ef..05058710d2c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -896,8 +896,7 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_flow_steering *fs,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -920,8 +919,7 @@ void mlx5e_set_ttc_params(struct mlx5e_flow_steering *fs,
int tt;
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(fs->mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
@@ -1309,8 +1307,7 @@ int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs,
return -EOPNOTSUPP;
mlx5e_fs_set_ns(fs, ns, false);
- err = mlx5e_arfs_create_tables(fs, rx_res,
- !!(netdev->hw_features & NETIF_F_NTUPLE));
+ err = mlx5e_arfs_create_tables(fs, rx_res, mlx5e_fs_has_arfs(netdev));
if (err) {
fs_err(fs, "Failed to create arfs tables, err=%d\n", err);
netdev->hw_features &= ~NETIF_F_NTUPLE;
@@ -1357,7 +1354,7 @@ err_destroy_ttc_table:
err_destroy_inner_ttc_table:
mlx5e_destroy_inner_ttc_table(fs);
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_arfs_destroy_tables(fs, mlx5e_fs_has_arfs(netdev));
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 3eccdadc0357..773624bb2c5d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -734,7 +734,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
if (num_tuples <= 0) {
netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
__func__, num_tuples);
- return num_tuples;
+ return num_tuples < 0 ? num_tuples : -EINVAL;
}
eth_ft = get_flow_table(priv, fs, num_tuples);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 319930c04093..16b67c457b60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/dim.h>
#include <net/tc_act/tc_gact.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
@@ -38,11 +39,13 @@
#include <linux/debugfs.h>
#include <linux/if_bridge.h>
#include <linux/filter.h>
+#include <net/netdev_queues.h>
#include <net/page_pool/types.h>
#include <net/pkt_sched.h>
#include <net/xdp_sock_drv.h>
#include "eswitch.h"
#include "en.h"
+#include "en/dim.h"
#include "en/txrx.h"
#include "en_tc.h"
#include "en_rep.h"
@@ -72,6 +75,27 @@
#include "lib/devcom.h"
#include "lib/sd.h"
+static bool mlx5e_hw_gro_supported(struct mlx5_core_dev *mdev)
+{
+ if (!MLX5_CAP_GEN(mdev, shampo))
+ return false;
+
+ /* Our HW-GRO implementation relies on "KSM Mkey" for
+ * SHAMPO headers buffer mapping
+ */
+ if (!MLX5_CAP_GEN(mdev, fixed_buffer_size))
+ return false;
+
+ if (!MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer_valid))
+ return false;
+
+ if (MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer) >
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE)
+ return false;
+
+ return true;
+}
+
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift,
enum mlx5e_mpwrq_umr_mode umr_mode)
{
@@ -502,8 +526,8 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
return err;
}
-static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
- u64 nentries,
+static int mlx5e_create_umr_ksm_mkey(struct mlx5_core_dev *mdev,
+ u64 nentries, u8 log_entry_size,
u32 *umr_mkey)
{
int inlen;
@@ -523,12 +547,13 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET(mkc, mkc, lw, 1);
MLX5_SET(mkc, mkc, lr, 1);
- MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KSM);
mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
MLX5_SET(mkc, mkc, translations_octword_size, nentries);
- MLX5_SET(mkc, mkc, length64, 1);
+ MLX5_SET(mkc, mkc, log_page_size, log_entry_size);
+ MLX5_SET64(mkc, mkc, len, nentries << log_entry_size);
err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
kvfree(in);
@@ -563,14 +588,16 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq
static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
struct mlx5e_rq *rq)
{
- u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+ u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
- if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) {
- mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
- max_klm_size, rq->mpwqe.shampo->hd_per_wq);
+ if (max_ksm_size < rq->mpwqe.shampo->hd_per_wq) {
+ mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
+ max_ksm_size, rq->mpwqe.shampo->hd_per_wq);
return -EINVAL;
}
- return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
+
+ return mlx5e_create_umr_ksm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
+ MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE,
&rq->mpwqe.shampo->mkey);
}
@@ -960,17 +987,6 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
}
}
- INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
-
- switch (params->rx_cq_moderation.cq_period_mode) {
- case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
- rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
- break;
- case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
- default:
- rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
- }
-
return 0;
err_destroy_page_pool:
@@ -1020,6 +1036,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5e_free_wqe_alloc_info(rq);
}
+ kvfree(rq->dim);
xdp_rxq_info_unreg(&rq->xdp_rxq);
page_pool_destroy(rq->page_pool);
mlx5_wq_destroy(&rq->wq_ctrl);
@@ -1216,18 +1233,17 @@ void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq)
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
}
+ rq->mpwqe.actual_wq_head = wq->head;
+ rq->mpwqe.umr_in_progress = 0;
+ rq->mpwqe.umr_completed = 0;
+
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 len;
- len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) &
- (rq->mpwqe.shampo->hd_per_wq - 1);
- mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false);
- rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci;
+ len = (shampo->pi - shampo->ci) & shampo->hd_per_wq;
+ mlx5e_shampo_fill_umr(rq, len);
}
-
- rq->mpwqe.actual_wq_head = wq->head;
- rq->mpwqe.umr_in_progress = 0;
- rq->mpwqe.umr_completed = 0;
}
void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
@@ -1252,8 +1268,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
}
if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
- mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq,
- 0, true);
+ mlx5e_shampo_dealloc_hd(rq);
} else {
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
u16 missing = mlx5_wq_cyc_missing(wq);
@@ -1300,8 +1315,21 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
__set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
- if (params->rx_dim_enabled)
- __set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
+ if (rq->channel && !params->rx_dim_enabled) {
+ rq->channel->rx_cq_moder = params->rx_cq_moderation;
+ } else if (rq->channel) {
+ u8 cq_period_mode;
+
+ cq_period_mode = params->rx_moder_use_cqe_mode ?
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ mlx5e_reset_rx_moderation(&rq->channel->rx_cq_moder, cq_period_mode,
+ params->rx_dim_enabled);
+
+ err = mlx5e_dim_rx_change(rq, params->rx_dim_enabled);
+ if (err)
+ goto err_destroy_rq;
+ }
/* We disable csum_complete when XDP is enabled since
* XDP programs might manipulate packets which will render
@@ -1347,7 +1375,8 @@ void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- cancel_work_sync(&rq->dim.work);
+ if (rq->dim)
+ cancel_work_sync(&rq->dim->work);
cancel_work_sync(&rq->recover_work);
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
@@ -1623,9 +1652,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
if (err)
goto err_sq_wq_destroy;
- INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
- sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
-
return 0;
err_sq_wq_destroy:
@@ -1636,6 +1662,7 @@ err_sq_wq_destroy:
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
{
+ kvfree(sq->dim);
mlx5e_free_txqsq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
}
@@ -1791,11 +1818,27 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
if (tx_rate)
mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
- if (params->tx_dim_enabled)
- sq->state |= BIT(MLX5E_SQ_STATE_DIM);
+ if (sq->channel && !params->tx_dim_enabled) {
+ sq->channel->tx_cq_moder = params->tx_cq_moderation;
+ } else if (sq->channel) {
+ u8 cq_period_mode;
+
+ cq_period_mode = params->tx_moder_use_cqe_mode ?
+ DIM_CQ_PERIOD_MODE_START_FROM_CQE :
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ mlx5e_reset_tx_moderation(&sq->channel->tx_cq_moder,
+ cq_period_mode,
+ params->tx_dim_enabled);
+
+ err = mlx5e_dim_tx_change(sq, params->tx_dim_enabled);
+ if (err)
+ goto err_destroy_sq;
+ }
return 0;
+err_destroy_sq:
+ mlx5e_destroy_sq(c->mdev, sq->sqn);
err_free_txqsq:
mlx5e_free_txqsq(sq);
@@ -1847,7 +1890,8 @@ void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
struct mlx5_core_dev *mdev = sq->mdev;
struct mlx5_rate_limit rl = {0};
- cancel_work_sync(&sq->dim.work);
+ if (sq->dim)
+ cancel_work_sync(&sq->dim->work);
cancel_work_sync(&sq->recover_work);
mlx5e_destroy_sq(mdev, sq->sqn);
if (sq->rate_limit) {
@@ -1866,6 +1910,49 @@ void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
mlx5e_reporter_tx_err_cqe(sq);
}
+static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
+{
+ return (struct dim_cq_moder) {
+ .cq_period_mode = cq_period_mode,
+ .pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS,
+ .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE :
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC,
+ };
+}
+
+bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
+ bool dim_enabled)
+{
+ bool reset_needed = cq_moder->cq_period_mode != cq_period_mode;
+
+ if (dim_enabled)
+ *cq_moder = net_dim_get_def_tx_moderation(cq_period_mode);
+ else
+ *cq_moder = mlx5e_get_def_tx_moderation(cq_period_mode);
+
+ return reset_needed;
+}
+
+bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
+ bool dim_enabled, bool keep_dim_state)
+{
+ bool reset = false;
+ int i, tc;
+
+ for (i = 0; i < chs->num; i++) {
+ for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
+ if (keep_dim_state)
+ dim_enabled = !!chs->c[i]->sq[tc].dim;
+
+ reset |= mlx5e_reset_tx_moderation(&chs->c[i]->tx_cq_moder,
+ cq_period_mode, dim_enabled);
+ }
+ }
+
+ return reset;
+}
+
static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
work_func_t recover_work_func)
@@ -2089,7 +2176,8 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
- MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
+ MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode));
+
MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
@@ -2127,8 +2215,10 @@ int mlx5e_open_cq(struct mlx5_core_dev *mdev, struct dim_cq_moder moder,
if (err)
goto err_free_cq;
- if (MLX5_CAP_GEN(mdev, cq_moderation))
- mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
+ if (MLX5_CAP_GEN(mdev, cq_moderation) &&
+ MLX5_CAP_GEN(mdev, cq_period_mode_modify))
+ mlx5e_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts,
+ mlx5e_cq_period_mode(moder.cq_period_mode));
return 0;
err_free_cq:
@@ -2143,6 +2233,40 @@ void mlx5e_close_cq(struct mlx5e_cq *cq)
mlx5e_free_cq(cq);
}
+int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u8 cq_period_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
+ void *cqc;
+
+ MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(cq_period_mode));
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.modify_field_select.modify_field_select,
+ MLX5_CQ_MODIFY_PERIOD_MODE);
+
+ return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
+}
+
+int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
+ u16 cq_period, u16 cq_max_count, u8 cq_period_mode)
+{
+ u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
+ void *cqc;
+
+ MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
+ cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
+ MLX5_SET(cqc, cqc, cq_period, cq_period);
+ MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
+ MLX5_SET(cqc, cqc, cq_period_mode, cq_period_mode);
+ MLX5_SET(modify_cq_in, in,
+ modify_field_select_resize_field_select.modify_field_select.modify_field_select,
+ MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE);
+
+ return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
+}
+
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_create_cq_param *ccp,
@@ -2901,7 +3025,31 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
return err;
}
-static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
+static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
+ struct mlx5e_params *params)
+{
+ int ix;
+
+ for (ix = 0; ix < params->num_channels; ix++) {
+ int num_comp_vectors, irq, vec_ix;
+ struct mlx5_core_dev *mdev;
+
+ mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
+ num_comp_vectors = mlx5_comp_vectors_max(mdev);
+ cpumask_clear(priv->scratchpad.cpumask);
+ vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
+
+ for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) {
+ int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
+
+ cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
+ }
+
+ netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
+ }
+}
+
+static int mlx5e_update_tc_and_tx_queues(struct mlx5e_priv *priv)
{
struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
struct net_device *netdev = priv->netdev;
@@ -2925,22 +3073,10 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
err = mlx5e_update_tx_netdev_queues(priv);
if (err)
goto err_tcs;
- err = netif_set_real_num_rx_queues(netdev, nch);
- if (err) {
- netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
- goto err_txqs;
- }
+ mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
return 0;
-err_txqs:
- /* netif_set_real_num_rx_queues could fail only when nch increased. Only
- * one of nch and ntc is changed in this function. That means, the call
- * to netif_set_real_num_tx_queues below should not fail, because it
- * decreases the number of TX queues.
- */
- WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
-
err_tcs:
WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
old_tc_to_txq));
@@ -2948,42 +3084,32 @@ err_out:
return err;
}
-static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
-
-static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
- struct mlx5e_params *params)
-{
- int ix;
-
- for (ix = 0; ix < params->num_channels; ix++) {
- int num_comp_vectors, irq, vec_ix;
- struct mlx5_core_dev *mdev;
-
- mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
- num_comp_vectors = mlx5_comp_vectors_max(mdev);
- cpumask_clear(priv->scratchpad.cpumask);
- vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
-
- for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) {
- int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
-
- cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
- }
-
- netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
- }
-}
+MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_tc_and_tx_queues);
static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
{
u16 count = priv->channels.params.num_channels;
+ struct net_device *netdev = priv->netdev;
+ int old_num_rxqs;
int err;
- err = mlx5e_update_netdev_queues(priv);
- if (err)
+ old_num_rxqs = netdev->real_num_rx_queues;
+ err = netif_set_real_num_rx_queues(netdev, count);
+ if (err) {
+ netdev_warn(netdev, "%s: netif_set_real_num_rx_queues failed, %d\n",
+ __func__, err);
return err;
-
- mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
+ }
+ err = mlx5e_update_tc_and_tx_queues(priv);
+ if (err) {
+ /* mlx5e_update_tc_and_tx_queues can fail if channels or TCs number increases.
+ * Since channel number changed, it increased. That means, the call to
+ * netif_set_real_num_rx_queues below should not fail, because it
+ * decreases the number of RX queues.
+ */
+ WARN_ON_ONCE(netif_set_real_num_rx_queues(netdev, old_num_rxqs));
+ return err;
+ }
/* This function may be called on attach, before priv->rx_res is created. */
if (priv->rx_res) {
@@ -3011,6 +3137,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
struct mlx5e_txqsq *sq = &c->sq[tc];
priv->txq2sq[sq->txq_ix] = sq;
+ priv->txq2sq_stats[sq->txq_ix] = sq->stats;
}
}
@@ -3025,6 +3152,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
priv->txq2sq[sq->txq_ix] = sq;
+ priv->txq2sq_stats[sq->txq_ix] = sq->stats;
}
out:
@@ -3516,7 +3644,7 @@ static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv,
mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_num_channels_changed_ctx, NULL, true);
+ mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
if (!err && priv->mqprio_rl) {
mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
@@ -3617,10 +3745,8 @@ static struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_create(struct mlx5_core_dev *mdev
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
- mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
struct mlx5e_mqprio_rl *rl;
- bool nch_changed;
int err;
err = mlx5e_mqprio_channel_validate(priv, mqprio);
@@ -3634,10 +3760,8 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
new_params = priv->channels.params;
mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
- nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
- preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
- mlx5e_update_netdev_queues_ctx;
- err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
if (err) {
if (rl) {
mlx5e_mqprio_rl_cleanup(rl);
@@ -3790,7 +3914,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
mlx5e_fold_sw_stats64(priv, stats);
}
- stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
+ stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
stats->rx_length_errors =
PPORT_802_3_GET(pstats, a_in_range_length_errors) +
@@ -3960,6 +4084,47 @@ static int set_feature_rx_all(struct net_device *netdev, bool enable)
return mlx5_set_port_fcs(mdev, !enable);
}
+static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
+{
+ return (struct dim_cq_moder) {
+ .cq_period_mode = cq_period_mode,
+ .pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS,
+ .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC,
+ };
+}
+
+bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
+ bool dim_enabled)
+{
+ bool reset_needed = cq_moder->cq_period_mode != cq_period_mode;
+
+ if (dim_enabled)
+ *cq_moder = net_dim_get_def_rx_moderation(cq_period_mode);
+ else
+ *cq_moder = mlx5e_get_def_rx_moderation(cq_period_mode);
+
+ return reset_needed;
+}
+
+bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
+ bool dim_enabled, bool keep_dim_state)
+{
+ bool reset = false;
+ int i;
+
+ for (i = 0; i < chs->num; i++) {
+ if (keep_dim_state)
+ dim_enabled = !!chs->c[i]->rq.dim;
+
+ reset |= mlx5e_reset_rx_moderation(&chs->c[i]->rx_cq_moder,
+ cq_period_mode, dim_enabled);
+ }
+
+ return reset;
+}
+
static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
{
u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
@@ -4122,13 +4287,19 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
#define MLX5E_HANDLE_FEATURE(feature, handler) \
mlx5e_handle_feature(netdev, &oper_features, feature, handler)
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+ if (features & (NETIF_F_GRO_HW | NETIF_F_LRO)) {
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+ } else {
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
+ }
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
- err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_MLX5_EN_ARFS
err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
@@ -4383,7 +4554,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset);
out:
- netdev->mtu = params->sw_mtu;
+ WRITE_ONCE(netdev->mtu, params->sw_mtu);
mutex_unlock(&priv->state_lock);
return err;
}
@@ -4738,7 +4909,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
/* Verify if UDP port is being offloaded by HW */
if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, port))
- return features;
+ return vxlan_features_check(skb, features);
#if IS_ENABLED(CONFIG_GENEVE)
/* Support Geneve offload for default UDP port */
@@ -4753,7 +4924,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
}
out:
- /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
+ /* Disable CSUM and GSO if skb cannot be offloaded by HW */
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
@@ -4764,7 +4935,6 @@ netdev_features_t mlx5e_features_check(struct sk_buff *skb,
struct mlx5e_priv *priv = netdev_priv(netdev);
features = vlan_features_check(skb, features);
- features = vxlan_features_check(skb, features);
/* Validate if the tunneled packet is being offloaded by HW */
if (skb->encapsulation &&
@@ -4950,10 +5120,7 @@ static int mlx5e_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
mode = nla_get_u16(attr);
if (mode > BRIDGE_MODE_VEPA)
return -EINVAL;
@@ -5011,23 +5178,10 @@ const struct net_device_ops mlx5e_netdev_ops = {
#endif
};
-static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
-{
- int i;
-
- /* The supported periods are organized in ascending order */
- for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
- if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
- break;
-
- return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
-}
-
void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu)
{
struct mlx5e_params *params = &priv->channels.params;
struct mlx5_core_dev *mdev = priv->mdev;
- u8 rx_cq_period_mode;
params->sw_mtu = mtu;
params->hard_mtu = MLX5E_ETH_HARD_MTU;
@@ -5061,13 +5215,16 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
- rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
- MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
- mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
+ MLX5_CAP_GEN(mdev, cq_period_mode_modify);
+ params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
+ MLX5_CAP_GEN(mdev, cq_period_mode_modify);
+ params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe);
+ params->tx_moder_use_cqe_mode = false;
+ mlx5e_reset_rx_moderation(&params->rx_cq_moderation, params->rx_moder_use_cqe_mode,
+ params->rx_dim_enabled);
+ mlx5e_reset_tx_moderation(&params->tx_cq_moderation, params->tx_moder_use_cqe_mode,
+ params->tx_dim_enabled);
/* TX inline */
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
@@ -5141,6 +5298,139 @@ static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
}
+static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_channel_stats *channel_stats;
+ struct mlx5e_rq_stats *xskrq_stats;
+ struct mlx5e_rq_stats *rq_stats;
+
+ ASSERT_RTNL();
+ if (mlx5e_is_uplink_rep(priv) || !priv->stats_nch)
+ return;
+
+ channel_stats = priv->channel_stats[i];
+ xskrq_stats = &channel_stats->xskrq;
+ rq_stats = &channel_stats->rq;
+
+ stats->packets = rq_stats->packets + xskrq_stats->packets;
+ stats->bytes = rq_stats->bytes + xskrq_stats->bytes;
+ stats->alloc_fail = rq_stats->buff_alloc_err +
+ xskrq_stats->buff_alloc_err;
+}
+
+static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_sq_stats *sq_stats;
+
+ ASSERT_RTNL();
+ if (!priv->stats_nch)
+ return;
+
+ /* no special case needed for ptp htb etc since txq2sq_stats is kept up
+ * to date for active sq_stats, otherwise get_base_stats takes care of
+ * inactive sqs.
+ */
+ sq_stats = priv->txq2sq_stats[i];
+ stats->packets = sq_stats->packets;
+ stats->bytes = sq_stats->bytes;
+}
+
+static void mlx5e_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_ptp *ptp_channel;
+ int i, tc;
+
+ ASSERT_RTNL();
+ if (!mlx5e_is_uplink_rep(priv)) {
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+
+ for (i = priv->channels.params.num_channels; i < priv->stats_nch; i++) {
+ struct netdev_queue_stats_rx rx_i = {0};
+
+ mlx5e_get_queue_stats_rx(dev, i, &rx_i);
+
+ rx->packets += rx_i.packets;
+ rx->bytes += rx_i.bytes;
+ rx->alloc_fail += rx_i.alloc_fail;
+ }
+
+ /* always report PTP RX stats from base as there is no
+ * corresponding channel to report them under in
+ * mlx5e_get_queue_stats_rx.
+ */
+ if (priv->rx_ptp_opened) {
+ struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq;
+
+ rx->packets += rq_stats->packets;
+ rx->bytes += rq_stats->bytes;
+ }
+ }
+
+ tx->packets = 0;
+ tx->bytes = 0;
+
+ for (i = 0; i < priv->stats_nch; i++) {
+ struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i];
+
+ /* handle two cases:
+ *
+ * 1. channels which are active. In this case,
+ * report only deactivated TCs on these channels.
+ *
+ * 2. channels which were deactivated
+ * (i > priv->channels.params.num_channels)
+ * must have all of their TCs [0 .. priv->max_opened_tc)
+ * examined because deactivated channels will not be in the
+ * range of [0..real_num_tx_queues) and will not have their
+ * stats reported by mlx5e_get_queue_stats_tx.
+ */
+ if (i < priv->channels.params.num_channels)
+ tc = mlx5e_get_dcb_num_tc(&priv->channels.params);
+ else
+ tc = 0;
+
+ for (; tc < priv->max_opened_tc; tc++) {
+ struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[tc];
+
+ tx->packets += sq_stats->packets;
+ tx->bytes += sq_stats->bytes;
+ }
+ }
+
+ /* if PTP TX was opened at some point and has since either:
+ * - been shutdown and set to NULL, or
+ * - simply disabled (bit unset)
+ *
+ * report stats directly from the ptp_stats structures as these queues
+ * are now unavailable and there is no txq index to retrieve these
+ * stats via calls to mlx5e_get_queue_stats_tx.
+ */
+ ptp_channel = priv->channels.ptp;
+ if (priv->tx_ptp_opened && (!ptp_channel || !test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state))) {
+ for (tc = 0; tc < priv->max_opened_tc; tc++) {
+ struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[tc];
+
+ tx->packets += sq_stats->packets;
+ tx->bytes += sq_stats->bytes;
+ }
+ }
+}
+
+static const struct netdev_stat_ops mlx5e_stat_ops = {
+ .get_queue_stats_rx = mlx5e_get_queue_stats_rx,
+ .get_queue_stats_tx = mlx5e_get_queue_stats_tx,
+ .get_base_stats = mlx5e_get_base_stats,
+};
+
static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -5158,6 +5448,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->watchdog_timeo = 15 * HZ;
+ netdev->stat_ops = &mlx5e_stat_ops;
netdev->ethtool_ops = &mlx5e_ethtool_ops;
netdev->vlan_features |= NETIF_F_SG;
@@ -5196,6 +5487,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ if (mlx5e_hw_gro_supported(mdev) &&
+ mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT,
+ MLX5E_MPWRQ_UMR_MODE_ALIGNED))
+ netdev->hw_features |= NETIF_F_GRO_HW;
+
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -5262,8 +5558,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
netdev->hw_features |= NETIF_F_HW_TC;
#endif
-#ifdef CONFIG_MLX5_EN_ARFS
+#if IS_ENABLED(CONFIG_MLX5_EN_ARFS)
netdev->hw_features |= NETIF_F_NTUPLE;
+#elif IS_ENABLED(CONFIG_MLX5_EN_RXNFC)
+ netdev->features |= NETIF_F_NTUPLE;
#endif
}
@@ -5437,7 +5735,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
err_tc_nic_cleanup:
mlx5e_tc_nic_cleanup(priv);
err_destroy_flow_steering:
- mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev),
priv->profile);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
@@ -5453,7 +5751,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
{
mlx5e_accel_cleanup_rx(priv);
mlx5e_tc_nic_cleanup(priv);
- mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE),
+ mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev),
priv->profile);
mlx5e_rx_res_destroy(priv->rx_res);
priv->rx_res = NULL;
@@ -5565,7 +5863,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_ipsec_cleanup(priv);
}
-int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
+static int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
{
return mlx5e_refresh_tirs(priv, false, false);
}
@@ -5688,9 +5986,13 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
if (!priv->txq2sq)
goto err_destroy_workqueue;
+ priv->txq2sq_stats = kcalloc_node(num_txqs, sizeof(*priv->txq2sq_stats), GFP_KERNEL, node);
+ if (!priv->txq2sq_stats)
+ goto err_free_txq2sq;
+
priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node);
if (!priv->tx_rates)
- goto err_free_txq2sq;
+ goto err_free_txq2sq_stats;
priv->channel_stats =
kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node);
@@ -5701,6 +6003,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
err_free_tx_rates:
kfree(priv->tx_rates);
+err_free_txq2sq_stats:
+ kfree(priv->txq2sq_stats);
err_free_txq2sq:
kfree(priv->txq2sq);
err_destroy_workqueue:
@@ -5724,6 +6028,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kvfree(priv->channel_stats[i]);
kfree(priv->channel_stats);
kfree(priv->tx_rates);
+ kfree(priv->txq2sq_stats);
kfree(priv->txq2sq);
destroy_workqueue(priv->wq);
mlx5e_selq_cleanup(&priv->selq);
@@ -5733,6 +6038,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->htb_qos_sq_stats[i]);
kvfree(priv->htb_qos_sq_stats);
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+
memset(priv, 0, sizeof(*priv));
}
@@ -6058,7 +6368,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
return 0;
}
-static int _mlx5e_suspend(struct auxiliary_device *adev)
+static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg)
{
struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
struct mlx5e_priv *priv = mlx5e_dev->priv;
@@ -6067,7 +6377,7 @@ static int _mlx5e_suspend(struct auxiliary_device *adev)
struct mlx5_core_dev *pos;
int i;
- if (!netif_device_present(netdev)) {
+ if (!pre_netdev_reg && !netif_device_present(netdev)) {
if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
mlx5_sd_for_each_dev(i, mdev, pos)
mlx5e_destroy_mdev_resources(pos);
@@ -6090,7 +6400,7 @@ static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
if (actual_adev)
- err = _mlx5e_suspend(actual_adev);
+ err = _mlx5e_suspend(actual_adev, false);
mlx5_sd_cleanup(mdev);
return err;
@@ -6157,7 +6467,7 @@ static int _mlx5e_probe(struct auxiliary_device *adev)
return 0;
err_resume:
- _mlx5e_suspend(adev);
+ _mlx5e_suspend(adev, true);
err_profile_cleanup:
profile->cleanup(priv);
err_destroy_netdev:
@@ -6197,7 +6507,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev)
mlx5_core_uplink_netdev_set(mdev, NULL);
mlx5e_dcbnl_delete_app(priv);
unregister_netdev(priv->netdev);
- _mlx5e_suspend(adev);
+ _mlx5e_suspend(adev, false);
priv->profile->cleanup(priv);
mlx5e_destroy_netdev(priv);
mlx5e_devlink_port_unregister(mlx5e_dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 05527418fa64..8790d57dc6db 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -30,6 +30,7 @@
* SOFTWARE.
*/
+#include <linux/dim.h>
#include <linux/debugfs.h>
#include <linux/mlx5/fs.h>
#include <net/switchdev.h>
@@ -40,6 +41,7 @@
#include "eswitch.h"
#include "en.h"
+#include "en/dim.h"
#include "en_rep.h"
#include "en/params.h"
#include "en/txrx.h"
@@ -135,9 +137,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- sw_rep_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, sw_rep_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
@@ -145,9 +145,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
- sw_rep_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_rep_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(sw_rep)
@@ -176,11 +176,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
+ ethtool_puts(data, vport_rep_stats_desc[i].format);
for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_rep_loopback_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, vport_rep_loopback_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
@@ -188,12 +186,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport_rep)
int i;
for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
- vport_rep_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
+ vport_rep_stats_desc, i));
for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
- vport_rep_loopback_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
+ vport_rep_loopback_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport_rep)
@@ -275,8 +275,42 @@ out:
kvfree(out);
}
+static int mlx5e_rep_query_aggr_q_counter(struct mlx5_core_dev *dev, int vport, void *out)
+{
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)] = {};
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, other_vport, 1);
+ MLX5_SET(query_q_counter_in, in, vport_number, vport);
+ MLX5_SET(query_q_counter_in, in, aggregate, 1);
+
+ return mlx5_cmd_exec_inout(dev, query_q_counter, in, out);
+}
+
+static void mlx5e_rep_update_vport_q_counter(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rep_stats *rep_stats = &priv->stats.rep_stats;
+ u32 out[MLX5_ST_SZ_DW(query_q_counter_out)] = {};
+ struct mlx5e_rep_priv *rpriv = priv->ppriv;
+ struct mlx5_eswitch_rep *rep = rpriv->rep;
+ int err;
+
+ if (!MLX5_CAP_GEN(priv->mdev, q_counter_other_vport) ||
+ !MLX5_CAP_GEN(priv->mdev, q_counter_aggregation))
+ return;
+
+ err = mlx5e_rep_query_aggr_q_counter(priv->mdev, rep->vport, out);
+ if (err) {
+ netdev_warn(priv->netdev, "failed reading stats on vport %d, error %d\n",
+ rep->vport, err);
+ return;
+ }
+
+ rep_stats->rx_vport_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
+}
+
static void mlx5e_rep_get_strings(struct net_device *dev,
- u32 stringset, uint8_t *data)
+ u32 stringset, u8 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -394,6 +428,8 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.set_channels = mlx5e_rep_set_channels,
.get_coalesce = mlx5e_rep_get_coalesce,
.set_coalesce = mlx5e_rep_set_coalesce,
+ .get_per_queue_coalesce = mlx5e_get_per_queue_coalesce,
+ .set_per_queue_coalesce = mlx5e_set_per_queue_coalesce,
.get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
};
@@ -804,10 +840,6 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_params *params;
- u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
- MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
- MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
-
params = &priv->channels.params;
params->num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
@@ -835,7 +867,7 @@ static void mlx5e_build_rep_params(struct net_device *netdev)
/* CQ moderation params */
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
- mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
+ params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe);
params->mqprio.num_tc = 1;
if (rep->vport != MLX5_VPORT_UPLINK)
@@ -1231,6 +1263,12 @@ static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
return 0;
}
+static void mlx5e_rep_stats_update_ndo_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_stats_update_ndo_stats(priv);
+ mlx5e_rep_update_vport_q_counter(priv);
+}
+
static int mlx5e_rep_event_mpesw(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1423,7 +1461,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.enable = mlx5e_rep_enable,
.disable = mlx5e_rep_disable,
.update_rx = mlx5e_update_rep_rx,
- .update_stats = mlx5e_stats_update_ndo_stats,
+ .update_stats = mlx5e_rep_stats_update_ndo_stats,
.rx_handlers = &mlx5e_rx_handlers_rep,
.max_tc = 1,
.stats_grps = mlx5e_rep_stats_grps,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index d601b5faaed5..de9d01036c28 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -523,15 +523,23 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
static inline void
mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
- struct page *page, u32 frag_offset, u32 len,
+ struct mlx5e_frag_page *frag_page,
+ u32 frag_offset, u32 len,
unsigned int truesize)
{
- dma_addr_t addr = page_pool_get_dma_addr(page);
+ dma_addr_t addr = page_pool_get_dma_addr(frag_page->page);
+ u8 next_frag = skb_shinfo(skb)->nr_frags;
dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
rq->buff.map_dir);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, frag_offset, len, truesize);
+
+ if (skb_can_coalesce(skb, next_frag, frag_page->page, frag_offset)) {
+ skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
+ } else {
+ frag_page->frags++;
+ skb_add_rx_frag(skb, next_frag, frag_page->page,
+ frag_offset, len, truesize);
+ }
}
static inline void
@@ -619,25 +627,25 @@ static int bitmap_find_window(unsigned long *bitmap, int len,
return min(len, count);
}
-static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
- __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs)
+static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
+ __be32 key, u16 offset, u16 ksm_len)
{
- memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms));
+ memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
umr_wqe->ctrl.opmod_idx_opcode =
cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
MLX5_OPCODE_UMR);
umr_wqe->ctrl.umr_mkey = key;
umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
- | MLX5E_KLM_UMR_DS_CNT(klm_len));
+ | MLX5E_KSM_UMR_DS_CNT(ksm_len));
umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
- umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len);
+ umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len);
umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
- u16 klm_entries, u16 index)
+ u16 ksm_entries, u16 index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 entries, pi, header_offset, err, wqe_bbs, new_entries;
@@ -650,20 +658,20 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
int headroom, i;
headroom = rq->buff.headroom;
- new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
- entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
- wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
+ new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1));
+ entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
+ wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries);
pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
- build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs);
+ build_ksm_umr(sq, umr_wqe, shampo->key, index, entries);
frag_page = &shampo->pages[page_index];
for (i = 0; i < entries; i++, index++) {
dma_info = &shampo->info[index];
- if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
- MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT))
- goto update_klm;
+ if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index <
+ MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT))
+ goto update_ksm;
header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
if (!(header_offset & (PAGE_SIZE - 1))) {
@@ -683,12 +691,11 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
dma_info->frag_page = frag_page;
}
-update_klm:
- umr_wqe->inline_klms[i].bcount =
- cpu_to_be32(MLX5E_RX_MAX_HEAD);
- umr_wqe->inline_klms[i].key = cpu_to_be32(lkey);
- umr_wqe->inline_klms[i].va =
- cpu_to_be64(dma_info->addr + headroom);
+update_ksm:
+ umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
+ .key = cpu_to_be32(lkey),
+ .va = cpu_to_be64(dma_info->addr + headroom),
+ };
}
sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
@@ -720,37 +727,38 @@ err_unmap:
static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u16 klm_entries, num_wqe, index, entries_before;
+ u16 ksm_entries, num_wqe, index, entries_before;
struct mlx5e_icosq *sq = rq->icosq;
- int i, err, max_klm_entries, len;
+ int i, err, max_ksm_entries, len;
- max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
- klm_entries = bitmap_find_window(shampo->bitmap,
+ max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev);
+ ksm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
- if (!klm_entries)
+ ksm_entries = ALIGN_DOWN(ksm_entries, MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
+ if (!ksm_entries)
return 0;
- klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1));
- index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT);
+ ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1));
+ index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT);
entries_before = shampo->hd_per_wq - index;
- if (unlikely(entries_before < klm_entries))
- num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) +
- DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries);
+ if (unlikely(entries_before < ksm_entries))
+ num_wqe = DIV_ROUND_UP(entries_before, max_ksm_entries) +
+ DIV_ROUND_UP(ksm_entries - entries_before, max_ksm_entries);
else
- num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries);
+ num_wqe = DIV_ROUND_UP(ksm_entries, max_ksm_entries);
for (i = 0; i < num_wqe; i++) {
- len = (klm_entries > max_klm_entries) ? max_klm_entries :
- klm_entries;
+ len = (ksm_entries > max_ksm_entries) ? max_ksm_entries :
+ ksm_entries;
if (unlikely(index + len > shampo->hd_per_wq))
len = shampo->hd_per_wq - index;
err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
if (unlikely(err))
return err;
index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
- klm_entries -= len;
+ ksm_entries -= len;
}
return 0;
@@ -839,44 +847,28 @@ err:
return err;
}
-/* This function is responsible to dealloc SHAMPO header buffer.
- * close == true specifies that we are in the middle of closing RQ operation so
- * we go over all the entries and if they are not in use we free them,
- * otherwise we only go over a specific range inside the header buffer that are
- * not in use.
- */
-void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close)
+static void
+mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- struct mlx5e_frag_page *deleted_page = NULL;
- int hd_per_wq = shampo->hd_per_wq;
- struct mlx5e_dma_info *hd_info;
- int i, index = start;
-
- for (i = 0; i < len; i++, index++) {
- if (index == hd_per_wq)
- index = 0;
-
- if (close && !test_bit(index, shampo->bitmap))
- continue;
+ u64 addr = shampo->info[header_index].addr;
- hd_info = &shampo->info[index];
- hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
- if (hd_info->frag_page && hd_info->frag_page != deleted_page) {
- deleted_page = hd_info->frag_page;
- mlx5e_page_release_fragmented(rq, hd_info->frag_page);
- }
+ if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
+ struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
- hd_info->frag_page = NULL;
+ dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
+ mlx5e_page_release_fragmented(rq, dma_info->frag_page);
}
+ clear_bit(header_index, shampo->bitmap);
+}
- if (start + len > hd_per_wq) {
- len -= hd_per_wq - start;
- bitmap_clear(shampo->bitmap, start, hd_per_wq - start);
- start = 0;
- }
+void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ int i;
- bitmap_clear(shampo->bitmap, start, len);
+ for_each_set_bit(i, shampo->bitmap, rq->mpwqe.shampo->hd_per_wq)
+ mlx5e_free_rx_shampo_hd_entry(rq, i);
}
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
@@ -917,7 +909,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
if (!rq->xsk_pool) {
count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
- } else if (likely(!rq->xsk_pool->dma_need_sync)) {
+ } else if (likely(!dma_dev_need_sync(rq->pdev))) {
mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
} else {
@@ -971,26 +963,31 @@ void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
sq->cc = sqcc;
}
-static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
- struct mlx5e_icosq *sq)
+void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len)
{
- struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
- struct mlx5e_shampo_hd *shampo;
- /* assume 1:1 relationship between RQ and icosq */
- struct mlx5e_rq *rq = &c->rq;
- int end, from, len = umr.len;
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ int end, from, full_len = len;
- shampo = rq->mpwqe.shampo;
end = shampo->hd_per_wq;
from = shampo->ci;
- if (from + len > shampo->hd_per_wq) {
+ if (from + len > end) {
len -= end - from;
bitmap_set(shampo->bitmap, from, end - from);
from = 0;
}
bitmap_set(shampo->bitmap, from, len);
- shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1);
+ shampo->ci = (shampo->ci + full_len) & (shampo->hd_per_wq - 1);
+}
+
+static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
+ struct mlx5e_icosq *sq)
+{
+ struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
+ /* assume 1:1 relationship between RQ and icosq */
+ struct mlx5e_rq *rq = &c->rq;
+
+ mlx5e_shampo_fill_umr(rq, umr.len);
}
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
@@ -1191,9 +1188,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
check = csum_partial(tcp, tcp->doff * 4,
csum_unfold((__force __sum16)cqe->check_sum));
/* Almost done, don't forget the pseudo header */
- tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
- tot_len - sizeof(struct iphdr),
- IPPROTO_TCP, check);
+ tcp->check = tcp_v4_check(tot_len - sizeof(struct iphdr),
+ ipv4->saddr, ipv4->daddr, check);
} else {
u16 payload_len = tot_len - sizeof(struct ipv6hdr);
struct ipv6hdr *ipv6 = ip_p;
@@ -1208,8 +1204,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
check = csum_partial(tcp, tcp->doff * 4,
csum_unfold((__force __sum16)cqe->check_sum));
/* Almost done, don't forget the pseudo header */
- tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
- IPPROTO_TCP, check);
+ tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
+ &ipv6->daddr, check);
}
}
@@ -1612,9 +1608,7 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5e_rq_stats *stats = rq->stats;
stats->packets++;
- stats->gro_packets++;
stats->bytes += cqe_bcnt;
- stats->gro_bytes += cqe_bcnt;
if (NAPI_GRO_CB(skb)->count != 1)
return;
mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
@@ -1964,30 +1958,24 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
#endif
static void
-mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
- struct mlx5e_frag_page *frag_page,
- u32 data_bcnt, u32 data_offset)
+mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
+ struct mlx5e_frag_page *frag_page,
+ u32 data_bcnt, u32 data_offset)
{
net_prefetchw(skb->data);
- while (data_bcnt) {
+ do {
/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
- unsigned int truesize;
+ unsigned int truesize = pg_consumed_bytes;
- if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
- truesize = pg_consumed_bytes;
- else
- truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
-
- frag_page->frags++;
- mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset,
+ mlx5e_add_skb_frag(rq, skb, frag_page, data_offset,
pg_consumed_bytes, truesize);
data_bcnt -= pg_consumed_bytes;
data_offset = 0;
frag_page++;
- }
+ } while (data_bcnt);
}
static struct sk_buff *
@@ -2212,8 +2200,8 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
/* build SKB around header */
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
- prefetchw(hdr);
- prefetch(data);
+ net_prefetchw(hdr);
+ net_prefetch(data);
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
if (unlikely(!skb))
@@ -2230,7 +2218,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return NULL;
}
- prefetchw(skb->data);
+ net_prefetchw(skb->data);
mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr,
head_offset + rx_headroom,
rx_headroom, head_size);
@@ -2261,12 +2249,19 @@ mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
{
struct sk_buff *skb = rq->hw_gro_data->skb;
struct mlx5e_rq_stats *stats = rq->stats;
+ u16 gro_count = NAPI_GRO_CB(skb)->count;
- stats->gro_skbs++;
if (likely(skb_shinfo(skb)->nr_frags))
mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
- if (NAPI_GRO_CB(skb)->count > 1)
+ if (gro_count > 1) {
+ stats->gro_skbs++;
+ stats->gro_packets += gro_count;
+ stats->gro_bytes += skb->data_len + skb_headlen(skb) * gro_count;
+
mlx5e_shampo_update_hdr(rq, cqe, match);
+ } else {
+ skb_shinfo(skb)->gso_size = 0;
+ }
napi_gro_receive(rq->cq.napi, skb);
rq->hw_gro_data->skb = NULL;
}
@@ -2279,21 +2274,6 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
}
-static void
-mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
-{
- struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u64 addr = shampo->info[header_index].addr;
-
- if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
- struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
-
- dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page);
- }
- bitmap_clear(shampo->bitmap, header_index, 1);
-}
-
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
@@ -2327,8 +2307,6 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
goto mpwrq_cqe_out;
}
- stats->gro_match_packets += match;
-
if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
match = false;
mlx5e_shampo_flush_skb(rq, cqe, match);
@@ -2359,21 +2337,30 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
}
if (likely(head_size)) {
- struct mlx5e_frag_page *frag_page;
+ if (data_bcnt) {
+ struct mlx5e_frag_page *frag_page;
- frag_page = &wi->alloc_units.frag_pages[page_idx];
- mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
+ frag_page = &wi->alloc_units.frag_pages[page_idx];
+ mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
+ } else {
+ stats->hds_nodata_packets++;
+ stats->hds_nodata_bytes += head_size;
+ }
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
- if (flush)
+ if (flush && rq->hw_gro_data->skb)
mlx5e_shampo_flush_skb(rq, cqe, match);
free_hd_entry:
- mlx5e_free_rx_shampo_hd_entry(rq, header_index);
+ if (likely(head_size))
+ mlx5e_free_rx_shampo_hd_entry(rq, header_index);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
return;
+ if (unlikely(!cstrides))
+ return;
+
wq = &rq->mpwqe.wq;
wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 08a75654f5f1..5bf8318cc48b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -359,7 +359,7 @@ int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data)
if (st.cond_func && st.cond_func(priv))
continue;
if (data)
- strcpy(data + count * ETH_GSTRING_LEN, st.name);
+ ethtool_puts(&data, st.name);
count++;
}
return count;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index f3d0898bdbc6..e7a3290a708a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -41,6 +41,11 @@
#include <net/page_pool/helpers.h>
#endif
+void mlx5e_ethtool_put_stat(u64 **data, u64 val)
+{
+ *(*data)++ = val;
+}
+
static unsigned int stats_grps_num(struct mlx5e_priv *priv)
{
return !priv->profile->stats_grps_num ? 0 :
@@ -90,17 +95,17 @@ void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx)
int i;
for (i = 0; i < num_stats_grps; i++)
- idx = stats_grps[i]->fill_stats(priv, data, idx);
+ stats_grps[i]->fill_stats(priv, &data);
}
void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data)
{
mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps;
const unsigned int num_stats_grps = stats_grps_num(priv);
- int i, idx = 0;
+ int i;
for (i = 0; i < num_stats_grps; i++)
- idx = stats_grps[i]->fill_strings(priv, data, idx);
+ stats_grps[i]->fill_strings(priv, &data);
}
/* Concrete NIC Stats */
@@ -136,8 +141,9 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
@@ -257,8 +263,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(sw)
int i;
for (i = 0; i < NUM_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, sw_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
@@ -266,8 +271,9 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(sw)
int i;
for (i = 0; i < NUM_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(data,
+ MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_stats_desc, i));
}
static void mlx5e_stats_grp_sw_update_stats_xdp_red(struct mlx5e_sw_stats *s,
@@ -338,8 +344,9 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_gro_packets += rq_stats->gro_packets;
s->rx_gro_bytes += rq_stats->gro_bytes;
s->rx_gro_skbs += rq_stats->gro_skbs;
- s->rx_gro_match_packets += rq_stats->gro_match_packets;
s->rx_gro_large_hds += rq_stats->gro_large_hds;
+ s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets;
+ s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
@@ -591,14 +598,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qcnt)
int i;
for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- q_stats_desc[i].format);
+ ethtool_puts(data, q_stats_desc[i].format);
for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- drop_rq_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, drop_rq_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
@@ -606,12 +609,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qcnt)
int i;
for (i = 0; i < NUM_Q_COUNTERS && q_counter_any(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
- q_stats_desc, i);
+ mlx5e_ethtool_put_stat(data,
+ MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ q_stats_desc, i));
for (i = 0; i < NUM_DROP_RQ_COUNTERS && priv->drop_rq_q_counter; i++)
- data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
- drop_rq_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ drop_rq_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qcnt)
@@ -685,18 +689,13 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vnic_env)
int i;
for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vnic_env_stats_steer_desc[i].format);
+ ethtool_puts(data, vnic_env_stats_steer_desc[i].format);
for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vnic_env_stats_dev_oob_desc[i].format);
+ ethtool_puts(data, vnic_env_stats_dev_oob_desc[i].format);
for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vnic_env_stats_drop_desc[i].format);
-
- return idx;
+ ethtool_puts(data, vnic_env_stats_drop_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
@@ -704,18 +703,22 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vnic_env)
int i;
for (i = 0; i < NUM_VNIC_ENV_STEER_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
- vnic_env_stats_steer_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_steer_desc, i));
for (i = 0; i < NUM_VNIC_ENV_DEV_OOB_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
- vnic_env_stats_dev_oob_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_dev_oob_desc, i));
for (i = 0; i < NUM_VNIC_ENV_DROP_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
- vnic_env_stats_drop_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(priv->stats.vnic.query_vnic_env_out,
+ vnic_env_stats_drop_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vnic_env)
@@ -798,13 +801,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
int i;
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
+ ethtool_puts(data, vport_stats_desc[i].format);
for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_loopback_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, vport_loopback_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
@@ -812,14 +812,16 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
int i;
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
- vport_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_stats_desc, i));
for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
- vport_loopback_stats_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_loopback_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(vport)
@@ -868,8 +870,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(802_3)
int i;
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, pport_802_3_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
@@ -877,9 +878,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(802_3)
int i;
for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
- pport_802_3_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.IEEE_802_3_counters,
+ pport_802_3_stats_desc, i));
}
#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \
@@ -1029,8 +1031,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2863)
int i;
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, pport_2863_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
@@ -1038,9 +1039,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2863)
int i;
for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
- pport_2863_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.RFC_2863_counters,
+ pport_2863_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2863)
@@ -1088,8 +1090,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(2819)
int i;
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, pport_2819_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
@@ -1097,9 +1098,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(2819)
int i;
for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
- pport_2819_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.RFC_2819_counters,
+ pport_2819_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(2819)
@@ -1172,6 +1174,55 @@ void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
*ranges = mlx5e_rmon_ranges;
}
+void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
+ struct ethtool_ts_stats *ts_stats)
+{
+ int i, j;
+
+ mutex_lock(&priv->state_lock);
+
+ if (priv->tx_ptp_opened) {
+ struct mlx5e_ptp *ptp = priv->channels.ptp;
+
+ ts_stats->pkts = 0;
+ ts_stats->err = 0;
+ ts_stats->lost = 0;
+
+ if (!ptp)
+ goto out;
+
+ /* Aggregate stats across all TCs */
+ for (i = 0; i < ptp->num_tc; i++) {
+ struct mlx5e_ptp_cq_stats *stats =
+ ptp->ptpsq[i].cq_stats;
+
+ ts_stats->pkts += stats->cqe;
+ ts_stats->err += stats->abort + stats->err_cqe +
+ stats->late_cqe;
+ ts_stats->lost += stats->lost_cqe;
+ }
+ } else {
+ /* DMA layer will always successfully timestamp packets. Other
+ * counters do not make sense for this layer.
+ */
+ ts_stats->pkts = 0;
+
+ /* Aggregate stats across all SQs */
+ for (j = 0; j < priv->channels.num; j++) {
+ struct mlx5e_channel *c = priv->channels.c[j];
+
+ for (i = 0; i < c->num_tc; i++) {
+ struct mlx5e_sq_stats *stats = c->sq[i].stats;
+
+ ts_stats->pkts += stats->timestamps;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&priv->state_lock);
+}
+
#define PPORT_PHY_STATISTICAL_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.phys_layer_statistical_cntrs.c##_high)
@@ -1215,21 +1266,18 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(phy)
struct mlx5_core_dev *mdev = priv->mdev;
int i;
- strcpy(data + (idx++) * ETH_GSTRING_LEN, "link_down_events_phy");
+ ethtool_puts(data, "link_down_events_phy");
if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return idx;
+ return;
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_phy_statistical_stats_desc[i].format);
+ ethtool_puts(data, pport_phy_statistical_stats_desc[i].format);
if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_phy_statistical_err_lanes_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data,
+ pport_phy_statistical_err_lanes_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
@@ -1238,24 +1286,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(phy)
int i;
/* link_down_events_phy has special handling since it is not stored in __be64 format */
- data[idx++] = MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
- counter_set.phys_layer_cntrs.link_down_events);
+ mlx5e_ethtool_put_stat(
+ data, MLX5_GET(ppcnt_reg, priv->stats.pport.phy_counters,
+ counter_set.phys_layer_cntrs.link_down_events));
if (!MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group))
- return idx;
+ return;
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
- pport_phy_statistical_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_stats_desc, i));
if (MLX5_CAP_PCAM_FEATURE(mdev, per_lane_error_counters))
for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_PER_LANE_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
- pport_phy_statistical_err_lanes_stats_desc,
- i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport
+ .phy_statistical_counters,
+ pport_phy_statistical_err_lanes_stats_desc,
+ i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(phy)
@@ -1436,9 +1489,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(eth_ext)
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_eth_ext_stats_desc[i].format);
- return idx;
+ ethtool_puts(data, pport_eth_ext_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
@@ -1447,10 +1498,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(eth_ext)
if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
- pport_eth_ext_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.eth_ext_counters,
+ pport_eth_ext_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(eth_ext)
@@ -1516,19 +1568,16 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pcie)
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc[i].format);
+ ethtool_puts(data, pcie_perf_stats_desc[i].format);
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc64[i].format);
+ ethtool_puts(data, pcie_perf_stats_desc64[i].format);
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stall_stats_desc[i].format);
- return idx;
+ ethtool_puts(data,
+ pcie_perf_stall_stats_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
@@ -1537,22 +1586,27 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pcie)
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(
+ &priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc, i));
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc64, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc64, i));
if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stall_stats_desc, i);
- return idx;
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR32_BE(
+ &priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stall_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pcie)
@@ -1609,18 +1663,18 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_port_buff_congest)
int i, prio;
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
- return idx;
+ return;
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_tc_prio_stats_desc[i].format, prio);
+ ethtool_sprintf(data,
+ pport_per_tc_prio_stats_desc[i].format,
+ prio);
for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_tc_congest_prio_stats_desc[i].format, prio);
+ ethtool_sprintf(data,
+ pport_per_tc_congest_prio_stats_desc[i].format,
+ prio);
}
-
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
@@ -1630,20 +1684,24 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_port_buff_congest)
int i, prio;
if (!MLX5_CAP_GEN(mdev, sbcam_reg))
- return idx;
+ return;
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_TC_PRIO_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&pport->per_tc_prio_counters[prio],
- pport_per_tc_prio_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &pport->per_tc_prio_counters[prio],
+ pport_per_tc_prio_stats_desc, i));
for (i = 0; i < NUM_PPORT_PER_TC_CONGEST_PRIO_COUNTERS ; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&pport->per_tc_congest_prio_counters[prio],
- pport_per_tc_congest_prio_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &pport->per_tc_congest_prio_counters
+ [prio],
+ pport_per_tc_congest_prio_stats_desc,
+ i));
}
-
- return idx;
}
static void mlx5e_grp_per_tc_prio_update_stats(struct mlx5e_priv *priv)
@@ -1728,35 +1786,33 @@ static int mlx5e_grp_per_prio_traffic_get_num_stats(void)
return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
}
-static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
- u8 *data,
- int idx)
+static void mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
+ u8 **data)
{
int i, prio;
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_traffic_stats_desc[i].format, prio);
+ ethtool_sprintf(data,
+ pport_per_prio_traffic_stats_desc[i].format,
+ prio);
}
-
- return idx;
}
-static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
- u64 *data,
- int idx)
+static void mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
+ u64 **data)
{
int i, prio;
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_traffic_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport
+ .per_prio_counters[prio],
+ pport_per_prio_traffic_stats_desc, i));
}
-
- return idx;
}
static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
@@ -1816,9 +1872,8 @@ static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
NUM_PPORT_PFC_STALL_COUNTERS(priv);
}
-static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
- u8 *data,
- int idx)
+static void mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
+ u8 **data)
{
unsigned long pfc_combined;
int i, prio;
@@ -1829,28 +1884,26 @@ static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
char pfc_string[ETH_GSTRING_LEN];
snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, pfc_string);
+ ethtool_sprintf(data,
+ pport_per_prio_pfc_stats_desc[i].format,
+ pfc_string);
}
}
if (mlx5e_query_global_pause_combined(priv)) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, "global");
+ ethtool_sprintf(data,
+ pport_per_prio_pfc_stats_desc[i].format,
+ "global");
}
}
for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_pfc_stall_stats_desc[i].format);
-
- return idx;
+ ethtool_puts(data, pport_pfc_stall_stats_desc[i].format);
}
-static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
- u64 *data,
- int idx)
+static void mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
+ u64 **data)
{
unsigned long pfc_combined;
int i, prio;
@@ -1858,25 +1911,30 @@ static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
pfc_combined = mlx5e_query_pfc_combined(priv);
for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_pfc_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport
+ .per_prio_counters[prio],
+ pport_per_prio_pfc_stats_desc, i));
}
}
if (mlx5e_query_global_pause_combined(priv)) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] =
- MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
- pport_per_prio_pfc_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.per_prio_counters[0],
+ pport_per_prio_pfc_stats_desc, i));
}
}
for (i = 0; i < NUM_PPORT_PFC_STALL_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
- pport_pfc_stall_stats_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_BE(
+ &priv->stats.pport.per_prio_counters[0],
+ pport_pfc_stall_stats_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
@@ -1887,16 +1945,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(per_prio)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(per_prio)
{
- idx = mlx5e_grp_per_prio_traffic_fill_strings(priv, data, idx);
- idx = mlx5e_grp_per_prio_pfc_fill_strings(priv, data, idx);
- return idx;
+ mlx5e_grp_per_prio_traffic_fill_strings(priv, data);
+ mlx5e_grp_per_prio_pfc_fill_strings(priv, data);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(per_prio)
{
- idx = mlx5e_grp_per_prio_traffic_fill_stats(priv, data, idx);
- idx = mlx5e_grp_per_prio_pfc_fill_stats(priv, data, idx);
- return idx;
+ mlx5e_grp_per_prio_traffic_fill_stats(priv, data);
+ mlx5e_grp_per_prio_pfc_fill_stats(priv, data);
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(per_prio)
@@ -1944,12 +2000,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(pme)
int i;
for (i = 0; i < NUM_PME_STATUS_STATS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
+ ethtool_puts(data, mlx5e_pme_status_desc[i].format);
for (i = 0; i < NUM_PME_ERR_STATS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
-
- return idx;
+ ethtool_puts(data, mlx5e_pme_error_desc[i].format);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
@@ -1960,14 +2014,14 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(pme)
mlx5_get_pme_stats(priv->mdev, &pme_stats);
for (i = 0; i < NUM_PME_STATUS_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
- mlx5e_pme_status_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(pme_stats.status_counters,
+ mlx5e_pme_status_desc, i));
for (i = 0; i < NUM_PME_ERR_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
- mlx5e_pme_error_desc, i);
-
- return idx;
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(pme_stats.error_counters,
+ mlx5e_pme_error_desc, i));
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(pme) { return; }
@@ -1979,12 +2033,12 @@ static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(tls)
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(tls)
{
- return idx + mlx5e_ktls_get_strings(priv, data + idx * ETH_GSTRING_LEN);
+ mlx5e_ktls_get_strings(priv, data);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(tls)
{
- return idx + mlx5e_ktls_get_stats(priv, data + idx);
+ mlx5e_ktls_get_stats(priv, data);
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(tls) { return; }
@@ -2005,8 +2059,9 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
@@ -2063,6 +2118,7 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, timestamps) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS
@@ -2175,6 +2231,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, late_cqe) },
+ { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, lost_cqe) },
};
static const struct counter_desc ptp_rq_stats_desc[] = {
@@ -2214,6 +2271,7 @@ static const struct counter_desc qos_sq_stats_desc[] = {
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, timestamps) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_blks) },
{ MLX5E_DECLARE_QOS_TX_STAT(struct mlx5e_sq_stats, mpwqe_pkts) },
#ifdef CONFIG_MLX5_EN_TLS
@@ -2264,10 +2322,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(qos)
for (qid = 0; qid < max_qos_sqs; qid++)
for (i = 0; i < NUM_QOS_SQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- qos_sq_stats_desc[i].format, qid);
-
- return idx;
+ ethtool_sprintf(data, qos_sq_stats_desc[i].format, qid);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
@@ -2284,10 +2339,10 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(qos)
struct mlx5e_sq_stats *s = READ_ONCE(stats[qid]);
for (i = 0; i < NUM_QOS_SQ_STATS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_CPU(s, qos_sq_stats_desc, i));
}
-
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(qos) { return; }
@@ -2312,29 +2367,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(ptp)
int i, tc;
if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
- return idx;
+ return;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- "%s", ptp_ch_stats_desc[i].format);
+ ethtool_puts(data, ptp_ch_stats_desc[i].format);
if (priv->tx_ptp_opened) {
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_sq_stats_desc[i].format, tc);
+ ethtool_sprintf(data,
+ ptp_sq_stats_desc[i].format,
+ tc);
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_cq_stats_desc[i].format, tc);
+ ethtool_sprintf(data,
+ ptp_cq_stats_desc[i].format,
+ tc);
}
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ptp_rq_stats_desc[i].format, MLX5E_PTP_CHANNEL_IX);
+ ethtool_sprintf(data, ptp_rq_stats_desc[i].format,
+ MLX5E_PTP_CHANNEL_IX);
}
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
@@ -2342,33 +2397,35 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ptp)
int i, tc;
if (!priv->tx_ptp_opened && !priv->rx_ptp_opened)
- return idx;
+ return;
for (i = 0; i < NUM_PTP_CH_STATS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
- ptp_ch_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(&priv->ptp_stats.ch,
+ ptp_ch_stats_desc, i));
if (priv->tx_ptp_opened) {
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_SQ_STATS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->ptp_stats.sq[tc],
- ptp_sq_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->ptp_stats.sq[tc],
+ ptp_sq_stats_desc, i));
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < NUM_PTP_CQ_STATS; i++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->ptp_stats.cq[tc],
- ptp_cq_stats_desc, i);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->ptp_stats.cq[tc],
+ ptp_cq_stats_desc, i));
}
if (priv->rx_ptp_opened) {
for (i = 0; i < NUM_PTP_RQ_STATS; i++)
- data[idx++] =
+ mlx5e_ethtool_put_stat(
+ data,
MLX5E_READ_CTR64_CPU(&priv->ptp_stats.rq,
- ptp_rq_stats_desc, i);
+ ptp_rq_stats_desc, i));
}
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ptp) { return; }
@@ -2394,38 +2451,29 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(channels)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- ch_stats_desc[j].format, i);
+ ethtool_sprintf(data, ch_stats_desc[j].format, i);
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- rq_stats_desc[j].format, i);
+ ethtool_sprintf(data, rq_stats_desc[j].format, i);
for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- xskrq_stats_desc[j].format, i);
+ ethtool_sprintf(data, xskrq_stats_desc[j].format, i);
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- rq_xdpsq_stats_desc[j].format, i);
+ ethtool_sprintf(data, rq_xdpsq_stats_desc[j].format, i);
}
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- sq_stats_desc[j].format,
- i + tc * max_nch);
+ ethtool_sprintf(data, sq_stats_desc[j].format,
+ i + tc * max_nch);
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- xsksq_stats_desc[j].format, i);
+ ethtool_sprintf(data, xsksq_stats_desc[j].format, i);
for (j = 0; j < NUM_XDPSQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- xdpsq_stats_desc[j].format, i);
+ ethtool_sprintf(data, xdpsq_stats_desc[j].format, i);
}
-
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
@@ -2436,44 +2484,50 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(channels)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_CH_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->ch,
- ch_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->ch,
+ ch_stats_desc, j));
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_RQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq,
- rq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->rq,
+ rq_stats_desc, j));
for (j = 0; j < NUM_XSKRQ_STATS * is_xsk; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xskrq,
- xskrq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->xskrq,
+ xskrq_stats_desc, j));
for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->rq_xdpsq,
- rq_xdpsq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->rq_xdpsq,
+ rq_xdpsq_stats_desc, j));
}
for (tc = 0; tc < priv->max_opened_tc; tc++)
for (i = 0; i < max_nch; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->sq[tc],
- sq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data,
+ MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->sq[tc],
+ sq_stats_desc, j));
for (i = 0; i < max_nch; i++) {
for (j = 0; j < NUM_XSKSQ_STATS * is_xsk; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xsksq,
- xsksq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->xsksq,
+ xsksq_stats_desc, j));
for (j = 0; j < NUM_XDPSQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&priv->channel_stats[i]->xdpsq,
- xdpsq_stats_desc, j);
+ mlx5e_ethtool_put_stat(
+ data, MLX5E_READ_CTR64_CPU(
+ &priv->channel_stats[i]->xdpsq,
+ xdpsq_stats_desc, j));
}
-
- return idx;
}
static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(channels) { return; }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 12b3607afecd..4c5858c1dd82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -71,11 +71,13 @@ struct mlx5e_priv;
struct mlx5e_stats_grp {
u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv);
- int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
- int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
+ void (*fill_strings)(struct mlx5e_priv *priv, u8 **data);
+ void (*fill_stats)(struct mlx5e_priv *priv, u64 **data);
void (*update_stats)(struct mlx5e_priv *priv);
};
+void mlx5e_ethtool_put_stat(u64 **data, u64 val);
+
typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
#define MLX5E_STATS_GRP_OP(grp, name) mlx5e_stats_grp_ ## grp ## _ ## name
@@ -87,10 +89,10 @@ typedef const struct mlx5e_stats_grp *const mlx5e_stats_grp_t;
void MLX5E_STATS_GRP_OP(grp, update_stats)(struct mlx5e_priv *priv)
#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(grp) \
- int MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx)
+ void MLX5E_STATS_GRP_OP(grp, fill_strings)(struct mlx5e_priv *priv, u8 **data)
#define MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(grp) \
- int MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx)
+ void MLX5E_STATS_GRP_OP(grp, fill_stats)(struct mlx5e_priv *priv, u64 **data)
#define MLX5E_STATS_GRP(grp) mlx5e_stats_grp_ ## grp
@@ -126,6 +128,8 @@ void mlx5e_stats_eth_ctrl_get(struct mlx5e_priv *priv,
void mlx5e_stats_rmon_get(struct mlx5e_priv *priv,
struct ethtool_rmon_stats *rmon,
const struct ethtool_rmon_hist_range **ranges);
+void mlx5e_stats_ts_get(struct mlx5e_priv *priv,
+ struct ethtool_ts_stats *ts_stats);
void mlx5e_get_link_ext_stats(struct net_device *dev,
struct ethtool_link_ext_stats *stats);
@@ -149,8 +153,9 @@ struct mlx5e_sw_stats {
u64 rx_gro_packets;
u64 rx_gro_bytes;
u64 rx_gro_skbs;
- u64 rx_gro_match_packets;
u64 rx_gro_large_hds;
+ u64 rx_hds_nodata_packets;
+ u64 rx_hds_nodata_bytes;
u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
@@ -348,8 +353,9 @@ struct mlx5e_rq_stats {
u64 gro_packets;
u64 gro_bytes;
u64 gro_skbs;
- u64 gro_match_packets;
u64 gro_large_hds;
+ u64 hds_nodata_packets;
+ u64 hds_nodata_bytes;
u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
@@ -429,6 +435,7 @@ struct mlx5e_sq_stats {
u64 stopped;
u64 dropped;
u64 recover;
+ u64 timestamps;
/* dirtied @completion */
u64 cqes ____cacheline_aligned_in_smp;
u64 wake;
@@ -461,6 +468,7 @@ struct mlx5e_ptp_cq_stats {
u64 abort;
u64 abort_abs_diff_ns;
u64 late_cqe;
+ u64 lost_cqe;
};
struct mlx5e_rep_stats {
@@ -478,6 +486,7 @@ struct mlx5e_rep_stats {
u64 tx_vport_rdma_multicast_bytes;
u64 vport_loopback_packets;
u64 vport_loopback_bytes;
+ u64 rx_vport_out_of_buffer;
};
struct mlx5e_stats {
@@ -498,6 +507,7 @@ static inline void mlx5e_stats_copy_rep_stats(struct rtnl_link_stats64 *vf_vport
vf_vport->tx_packets = rep_stats->vport_tx_packets;
vf_vport->rx_bytes = rep_stats->vport_rx_bytes;
vf_vport->tx_bytes = rep_stats->vport_tx_bytes;
+ vf_vport->rx_missed_errors = rep_stats->rx_vport_out_of_buffer;
}
extern mlx5e_stats_grp_t mlx5e_nic_stats_grps[];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 31ed26cac9bf..30673292e15f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -835,8 +835,7 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
memset(ttc_params, 0, sizeof(*ttc_params));
- ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
- MLX5_FLOW_NAMESPACE_KERNEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
ttc_params->dests[tt].tir_num =
@@ -2802,12 +2801,6 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
- /* the HW doesn't support frag first/later */
- if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
- NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
- return -EOPNOTSUPP;
- }
-
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
@@ -2820,6 +2813,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
else
*match_level = MLX5_MATCH_L3;
}
+
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT,
+ match.mask->flags, extack))
+ return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
@@ -5464,6 +5461,7 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct tunnel_match_enc_opts enc_opts = {};
struct mlx5_rep_uplink_priv *uplink_priv;
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct mlx5e_rep_priv *uplink_rpriv;
struct metadata_dst *tun_dst;
struct tunnel_match_key key;
@@ -5471,6 +5469,8 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
struct net_device *dev;
int err;
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+
enc_opts_id = tunnel_id & ENC_OPTS_BITS_MASK;
tun_id = tunnel_id >> ENC_OPTS_BITS;
@@ -5503,14 +5503,14 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
key.enc_ip.tos, key.enc_ip.ttl,
- key.enc_tp.dst, TUNNEL_KEY,
+ key.enc_tp.dst, flags,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
break;
case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
key.enc_ip.tos, key.enc_ip.ttl,
- key.enc_tp.dst, 0, TUNNEL_KEY,
+ key.enc_tp.dst, 0, flags,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
break;
@@ -5528,11 +5528,16 @@ static bool mlx5e_tc_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb
tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
- if (enc_opts.key.len)
+ if (enc_opts.key.len) {
+ ip_tunnel_flags_zero(flags);
+ if (enc_opts.key.dst_opt_type)
+ __set_bit(enc_opts.key.dst_opt_type, flags);
+
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
enc_opts.key.data,
enc_opts.key.len,
- enc_opts.key.dst_opt_type);
+ flags);
+ }
skb_dst_set(skb, (struct dst_entry *)tun_dst);
dev = dev_get_by_index(&init_net, key.filter_ifindex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index e21a3b4128ce..b09e9abd39f3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -153,7 +153,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0;
if (skb->encapsulation) {
- ihs = skb_inner_tcp_all_headers(skb);
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
+ ihs = skb_inner_transport_offset(skb) +
+ sizeof(struct udphdr);
+ else
+ ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
@@ -749,11 +753,13 @@ static void mlx5e_consume_skb(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u64 ts = get_cqe_ts(cqe);
hwts.hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, ts);
- if (sq->ptpsq)
+ if (sq->ptpsq) {
mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_CQE_HWTSTAMP,
hwts.hwtstamp, sq->ptpsq->cq_stats);
- else
+ } else {
skb_tstamp_tx(skb, &hwts);
+ sq->stats->timestamps++;
+ }
}
napi_consume_skb(skb, napi_budget);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index a7d9b7cb4297..5873fde65c2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -55,7 +55,7 @@ static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
return;
dim_update_sample(sq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
- net_dim(&sq->dim, dim_sample);
+ net_dim(sq->dim, dim_sample);
}
static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
@@ -67,7 +67,7 @@ static void mlx5e_handle_rx_dim(struct mlx5e_rq *rq)
return;
dim_update_sample(rq->cq.event_ctr, stats->packets, stats->bytes, &dim_sample);
- net_dim(&rq->dim, dim_sample);
+ net_dim(rq->dim, dim_sample);
}
void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 40a6cb052a2d..cb7e7e4104af 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -688,6 +688,12 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
if (err)
goto err2;
+ /* Skip page eq creation when the device does not request for page requests */
+ if (MLX5_CAP_GEN(dev, page_request_disable)) {
+ mlx5_core_dbg(dev, "Skip page EQ creation\n");
+ return 0;
+ }
+
param = (struct mlx5_eq_param) {
.irq = table->ctrl_irq,
.nent = /* TODO: sriov max_vf + */ 1,
@@ -708,7 +714,7 @@ err2:
err1:
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
- mlx5_ctrl_irq_release(table->ctrl_irq);
+ mlx5_ctrl_irq_release(dev, table->ctrl_irq);
return err;
}
@@ -716,14 +722,15 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
- cleanup_async_eq(dev, &table->pages_eq, "pages");
+ if (!MLX5_CAP_GEN(dev, page_request_disable))
+ cleanup_async_eq(dev, &table->pages_eq, "pages");
cleanup_async_eq(dev, &table->async_eq, "async");
mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_DESTROY_EQ);
mlx5_cmd_use_polling(dev);
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
- mlx5_ctrl_irq_release(table->ctrl_irq);
+ mlx5_ctrl_irq_release(dev, table->ctrl_irq);
}
struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev)
@@ -911,7 +918,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
af_desc.is_managed = 1;
cpumask_copy(&af_desc.mask, cpu_online_mask);
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
- irq = mlx5_irq_affinity_request(pool, &af_desc);
+ irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
if (IS_ERR(irq))
return PTR_ERR(irq);
@@ -1180,7 +1187,6 @@ static int get_num_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *eq_table = dev->priv.eq_table;
int max_dev_eqs;
- int max_eqs_sf;
int num_eqs;
/* If ethernet is disabled we use just a single completion vector to
@@ -1190,14 +1196,16 @@ static int get_num_eqs(struct mlx5_core_dev *dev)
if (!mlx5_core_is_eth_enabled(dev) && mlx5_eth_supported(dev))
return 1;
- max_dev_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
- MLX5_CAP_GEN(dev, max_num_eqs) :
- 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ max_dev_eqs = mlx5_max_eq_cap_get(dev);
num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table),
max_dev_eqs - MLX5_MAX_ASYNC_EQS);
if (mlx5_core_is_sf(dev)) {
- max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF,
+ int max_eqs_sf = MLX5_CAP_GEN_2(dev, sf_eq_usage) ?
+ MLX5_CAP_GEN_2(dev, max_num_eqs_24b) :
+ MLX5_COMP_EQS_PER_SF;
+
+ max_eqs_sf = min_t(int, max_eqs_sf,
mlx5_irq_table_get_sfs_vec(eq_table->irq_table));
num_eqs = min_t(int, num_eqs, max_eqs_sf);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
index 50d2ea323979..a436ce895e45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_ofld.c
@@ -6,6 +6,9 @@
#include "helper.h"
#include "ofld.h"
+static int
+acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
+
static bool
esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw,
const struct mlx5_vport *vport)
@@ -123,18 +126,31 @@ static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw,
{
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *flow_rule;
+ bool created = false;
int err = 0;
+ if (!vport->ingress.acl) {
+ err = acl_ingress_ofld_setup(esw, vport);
+ if (err)
+ return err;
+ created = true;
+ }
+
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
flow_act.fg = vport->ingress.offloads.drop_grp;
flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
- goto out;
+ goto err_out;
}
vport->ingress.offloads.drop_rule = flow_rule;
-out:
+
+ return 0;
+err_out:
+ /* Only destroy ingress acl created in this function. */
+ if (created)
+ esw_acl_ingress_ofld_cleanup(esw, vport);
return err;
}
@@ -299,16 +315,12 @@ static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport)
}
}
-int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw,
- struct mlx5_vport *vport)
+static int
+acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
int num_ftes = 0;
int err;
- if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
- !esw_acl_ingress_prio_tag_enabled(esw, vport))
- return 0;
-
esw_acl_ingress_allow_rule_destroy(vport);
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
@@ -347,6 +359,15 @@ group_err:
return err;
}
+int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ !esw_acl_ingress_prio_tag_enabled(esw, vport))
+ return 0;
+
+ return acl_ingress_ofld_setup(esw, vport);
+}
+
void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw,
struct mlx5_vport *vport)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 1b9bc32efd6f..c5ea1d1d2b03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -1874,7 +1874,7 @@ int mlx5_esw_bridge_port_mdb_add(struct net_device *dev, u16 vport_num, u16 esw_
"Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
NL_SET_ERR_MSG_FMT_MOD(extack,
- "Failed to lookup bridge port vlan metadata to create MDB (MAC=%pM,vid=%u,vport=%u)\n",
+ "Failed to lookup vlan metadata for MDB (MAC=%pM,vid=%u,vport=%u)\n",
addr, vid, vport_num);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index d8e739cbcbce..f8869c9b6802 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -98,6 +98,8 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
.port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
.port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
#endif /* CONFIG_XFRM_OFFLOAD */
+ .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get,
+ .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set,
};
static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
@@ -143,6 +145,8 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
.port_fn_state_get = mlx5_devlink_sf_port_fn_state_get,
.port_fn_state_set = mlx5_devlink_sf_port_fn_state_set,
#endif
+ .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get,
+ .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set,
};
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
index 458baf0c6415..1ce332f21ebe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h
@@ -17,7 +17,7 @@ TRACE_EVENT(mlx5_esw_vport_qos_destroy,
__field(unsigned short, vport_id)
__field(unsigned int, tsar_ix)
),
- TP_fast_assign(__assign_str(devname, dev_name(vport->dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->vport_id = vport->vport;
__entry->tsar_ix = vport->qos.esw_tsar_ix;
),
@@ -36,7 +36,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_vport_qos_template,
__field(unsigned int, max_rate)
__field(void *, group)
),
- TP_fast_assign(__assign_str(devname, dev_name(vport->dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->vport_id = vport->vport;
__entry->tsar_ix = vport->qos.esw_tsar_ix;
__entry->bw_share = bw_share;
@@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_group_qos_template,
__field(const void *, group)
__field(unsigned int, tsar_ix)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->group = group;
__entry->tsar_ix = tsar_ix;
),
@@ -102,7 +102,7 @@ TRACE_EVENT(mlx5_esw_group_qos_config,
__field(unsigned int, bw_share)
__field(unsigned int, max_rate)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->group = group;
__entry->tsar_ix = tsar_ix;
__entry->bw_share = bw_share;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index d2ebe56c3977..20146a2dc7f4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -531,7 +531,7 @@ static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type)
switch (type) {
case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
return MLX5_CAP_QOS(dev, esw_element_type) &
- ELEMENT_TYPE_CAP_MASK_TASR;
+ ELEMENT_TYPE_CAP_MASK_TSAR;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
return MLX5_CAP_QOS(dev, esw_element_type) &
ELEMENT_TYPE_CAP_MASK_VPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 1789800faaeb..17f78091ad30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1805,7 +1805,8 @@ err:
}
static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 349e28a6dd8d..578466d69f21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -223,6 +223,7 @@ struct mlx5_vport {
u16 vport;
bool enabled;
+ bool max_eqs_set;
enum mlx5_eswitch_vport_event enabled_events;
int index;
struct mlx5_devlink_port *dl_port;
@@ -573,6 +574,15 @@ int mlx5_devlink_port_fn_ipsec_packet_get(struct devlink_port *port, bool *is_en
int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
struct netlink_ext_ack *extack);
#endif /* CONFIG_XFRM_OFFLOAD */
+int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port,
+ u32 *max_io_eqs,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port,
+ u32 max_io_eqs,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
+ struct netlink_ext_ack *extack);
+
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
@@ -833,7 +843,7 @@ int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw, int max_slaves);
void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw);
-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
+int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
@@ -925,7 +935,7 @@ mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
static inline int
-mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
{
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 844d3e3a65dd..768199d2255a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -67,6 +67,9 @@
#define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
+#define MLX5_ESW_MAX_CTRL_EQS 4
+#define MLX5_ESW_DEFAULT_SF_COMP_EQS 8
+
static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
.max_fte = MLX5_ESW_VPORT_TBL_SIZE,
.max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
@@ -2411,7 +2414,8 @@ err:
}
static int esw_port_metadata_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_eswitch *esw = dev->priv.eswitch;
@@ -2502,6 +2506,16 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw)
esw_offloads_cleanup_reps(esw);
}
+static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep, u8 rep_type)
+{
+ if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
+ REP_REGISTERED, REP_LOADED) == REP_REGISTERED)
+ return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
+
+ return 0;
+}
+
static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep, u8 rep_type)
{
@@ -2526,13 +2540,11 @@ static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
int err;
rep = mlx5_eswitch_get_rep(esw, vport_num);
- for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
- if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
- REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
- err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
- if (err)
- goto err_reps;
- }
+ for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+ err = __esw_offloads_load_rep(esw, rep, rep_type);
+ if (err)
+ goto err_reps;
+ }
return 0;
@@ -3277,7 +3289,7 @@ static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
esw_vport_destroy_offloads_acl_tables(esw, vport);
}
-int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
@@ -3290,13 +3302,13 @@ int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
return 0;
- ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
+ ret = __esw_offloads_load_rep(esw, rep, REP_IB);
if (ret)
return ret;
mlx5_esw_for_each_rep(esw, i, rep) {
if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
- mlx5_esw_offloads_rep_load(esw, rep->vport);
+ __esw_offloads_load_rep(esw, rep, REP_IB);
}
return 0;
@@ -4568,3 +4580,122 @@ unlock:
return err;
}
#endif /* CONFIG_XFRM_OFFLOAD */
+
+int
+mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ u16 vport_num = vport->vport;
+ struct mlx5_eswitch *esw;
+ void *query_ctx;
+ void *hca_caps;
+ u32 max_eqs;
+ int err;
+
+ esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support getting the max number of EQs");
+ return -EOPNOTSUPP;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ mutex_lock(&esw->state_lock);
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
+ MLX5_CAP_GENERAL_2);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
+ goto out;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ max_eqs = MLX5_GET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b);
+ if (max_eqs < MLX5_ESW_MAX_CTRL_EQS)
+ *max_io_eqs = 0;
+ else
+ *max_io_eqs = max_eqs - MLX5_ESW_MAX_CTRL_EQS;
+out:
+ mutex_unlock(&esw->state_lock);
+ kfree(query_ctx);
+ return err;
+}
+
+int
+mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
+ int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ u16 vport_num = vport->vport;
+ struct mlx5_eswitch *esw;
+ void *query_ctx;
+ void *hca_caps;
+ u16 max_eqs;
+ int err;
+
+ esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
+ if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support VHCA management");
+ return -EOPNOTSUPP;
+ }
+
+ if (!MLX5_CAP_GEN_2(esw->dev, max_num_eqs_24b)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device doesn't support changing the max number of EQs");
+ return -EOPNOTSUPP;
+ }
+
+ if (check_add_overflow(max_io_eqs, MLX5_ESW_MAX_CTRL_EQS, &max_eqs)) {
+ NL_SET_ERR_MSG_MOD(extack, "Supplied value out of range");
+ return -EINVAL;
+ }
+
+ query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
+ if (!query_ctx)
+ return -ENOMEM;
+
+ mutex_lock(&esw->state_lock);
+ err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
+ MLX5_CAP_GENERAL_2);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
+ goto out;
+ }
+
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
+ MLX5_SET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b, max_eqs);
+
+ if (mlx5_esw_is_sf_vport(esw, vport_num))
+ MLX5_SET(cmd_hca_cap_2, hca_caps, sf_eq_usage, 1);
+
+ err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2);
+ if (err)
+ NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps");
+ vport->max_eqs_set = true;
+out:
+ mutex_unlock(&esw->state_lock);
+ kfree(query_ctx);
+ return err;
+}
+
+int
+mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return mlx5_devlink_port_fn_max_io_eqs_set(port,
+ MLX5_ESW_DEFAULT_SF_COMP_EQS,
+ extack);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index cf085a478e3e..a47d6419160d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -3332,7 +3332,8 @@ static int mlx5_fs_mode_validate(struct devlink *devlink, u32 id,
}
static int mlx5_fs_mode_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
enum mlx5_flow_steering_mode mode;
@@ -3352,9 +3353,9 @@ static int mlx5_fs_mode_get(struct devlink *devlink, u32 id,
struct mlx5_core_dev *dev = devlink_priv(devlink);
if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS)
- strcpy(ctx->val.vstr, "smfs");
+ strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr));
else
- strcpy(ctx->val.vstr, "dmfs");
+ strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr));
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index e7faf7e73ca4..b61b7d966114 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -283,7 +283,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return 0;
}
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id)
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id)
{
u32 in[MLX5_ST_SZ_DW(init_hca_in)] = {};
int i;
@@ -373,6 +373,10 @@ int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
do {
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+ return -EACCES;
+ }
cond_resched();
} while (!time_after(jiffies, end));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 2911aa34a5be..b43ca0b762c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -52,7 +52,8 @@ static void mlx5_set_fw_rst_ack(struct mlx5_core_dev *dev)
}
static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_fw_reset *fw_reset;
@@ -206,6 +207,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ struct devlink *devlink = priv_to_devlink(dev);
/* if this is the driver that initiated the fw reset, devlink completed the reload */
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
@@ -217,9 +219,11 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unload
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
mlx5_load_one(dev, true);
- devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0,
+ devl_lock(devlink);
+ devlink_remote_reload_actions_performed(devlink, 0,
BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
+ devl_unlock(devlink);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index ad38e31822df..a6329ca2d9bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -248,6 +248,10 @@ recover_from_sw_reset:
do {
if (mlx5_get_nic_state(dev) == MLX5_INITIAL_SEG_NIC_INTERFACE_DISABLED)
break;
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for NIC IFC\n");
+ goto unlock;
+ }
msleep(20);
} while (!time_after(jiffies, end));
@@ -317,6 +321,10 @@ int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
mlx5_core_warn(dev, "device is being removed, stop waiting for PCI\n");
return -ENODEV;
}
+ if (pci_channel_offline(dev->pdev)) {
+ mlx5_core_err(dev, "PCI channel offline, stop waiting for PCI\n");
+ return -EACCES;
+ }
msleep(100);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 779d92b762d3..26f8a11b8906 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -136,7 +136,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev,
}
static int mlx5i_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
@@ -215,7 +215,7 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
int speed, ret;
ret = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper, &ib_proto_oper,
- 1);
+ 1, 0);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index d77be1b4dd9c..0979d672d47f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -372,7 +372,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
mlx5e_fs_set_ns(priv->fs, ns, false);
err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res,
- !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_fs_has_arfs(priv->netdev));
if (err) {
netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
err);
@@ -391,8 +391,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
return 0;
err_destroy_arfs_tables:
- mlx5e_arfs_destroy_tables(priv->fs,
- !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev));
return err;
}
@@ -400,8 +399,7 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv->fs);
- mlx5e_arfs_destroy_tables(priv->fs,
- !!(priv->netdev->hw_features & NETIF_F_NTUPLE));
+ mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev));
mlx5e_ethtool_cleanup_steering(priv->fs);
}
@@ -531,7 +529,7 @@ static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu)
if (err)
goto out;
- netdev->mtu = new_params.sw_mtu;
+ WRITE_ONCE(netdev->mtu, new_params.sw_mtu);
out:
mutex_unlock(&priv->state_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
index f87471306f6b..028a76944d82 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -280,7 +280,7 @@ static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu)
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
mutex_lock(&priv->state_lock);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
mutex_unlock(&priv->state_lock);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
index 612e666ec263..1477db7f5307 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c
@@ -48,6 +48,7 @@ static struct mlx5_irq *
irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
{
struct irq_affinity_desc auto_desc = {};
+ struct mlx5_irq *irq;
u32 irq_index;
int err;
@@ -64,9 +65,12 @@ irq_pool_request_irq(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_de
else
cpu_get(pool, cpumask_first(&af_desc->mask));
}
- return mlx5_irq_alloc(pool, irq_index,
- cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
- NULL);
+ irq = mlx5_irq_alloc(pool, irq_index,
+ cpumask_empty(&auto_desc.mask) ? af_desc : &auto_desc,
+ NULL);
+ if (IS_ERR(irq))
+ xa_erase(&pool->irqs, irq_index);
+ return irq;
}
/* Looking for the IRQ with the smallest refcount that fits req_mask.
@@ -112,15 +116,18 @@ irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req
/**
* mlx5_irq_affinity_request - request an IRQ according to the given mask.
+ * @dev: mlx5 core device which is requesting the IRQ.
* @pool: IRQ pool to request from.
* @af_desc: affinity descriptor for this IRQ.
*
* This function returns a pointer to IRQ, or ERR_PTR in case of error.
*/
struct mlx5_irq *
-mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ struct irq_affinity_desc *af_desc)
{
struct mlx5_irq *least_loaded_irq, *new_irq;
+ int ret;
mutex_lock(&pool->lock);
least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask);
@@ -153,6 +160,16 @@ out:
mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ);
unlock:
mutex_unlock(&pool->lock);
+ if (mlx5_irq_pool_is_sf_pool(pool)) {
+ ret = auxiliary_device_sysfs_irq_add(mlx5_sf_coredev_to_adev(dev),
+ mlx5_irq_get_irq(least_loaded_irq));
+ if (ret) {
+ mlx5_core_err(dev, "Failed to create sysfs entry for irq %d, ret = %d\n",
+ mlx5_irq_get_irq(least_loaded_irq), ret);
+ mlx5_irq_put(least_loaded_irq);
+ least_loaded_irq = ERR_PTR(ret);
+ }
+ }
return least_loaded_irq;
}
@@ -164,6 +181,9 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
synchronize_irq(pci_irq_vector(pool->dev->pdev,
mlx5_irq_get_index(irq)));
+ if (mlx5_irq_pool_is_sf_pool(pool))
+ auxiliary_device_sysfs_irq_remove(mlx5_sf_coredev_to_adev(dev),
+ mlx5_irq_get_irq(irq));
if (mlx5_irq_put(irq))
if (pool->irqs_per_cpu)
cpu_put(pool, cpu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 69d482f7c5a2..cf8045b92689 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -713,13 +713,13 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return 0;
}
-#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 4
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
#ifdef CONFIG_MLX5_ESWITCH
struct mlx5_core_dev *dev;
u8 mode;
#endif
+ bool roce_support;
int i;
for (i = 0; i < ldev->ports; i++)
@@ -739,13 +739,16 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
- if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports > MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
- return false;
#else
for (i = 0; i < ldev->ports; i++)
if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
return false;
#endif
+ roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
+ for (i = 1; i < ldev->ports; i++)
+ if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
+ return false;
+
return true;
}
@@ -814,7 +817,7 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
if (shared_fdb)
for (i = 0; i < ldev->ports; i++)
if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
}
static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
@@ -913,8 +916,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
} else if (roce_lag) {
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- for (i = 1; i < ldev->ports; i++)
- mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
+ for (i = 1; i < ldev->ports; i++) {
+ if (mlx5_get_roce_state(ldev->pf[i].dev))
+ mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
+ }
} else if (shared_fdb) {
int i;
@@ -922,7 +927,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_rescan_drivers_locked(dev0);
for (i = 0; i < ldev->ports; i++) {
- err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
break;
}
@@ -933,7 +938,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
for (i = 0; i < ldev->ports; i++)
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
}
@@ -1533,7 +1538,7 @@ u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
goto unlock;
for (i = 0; i < ldev->ports; i++) {
- if (ldev->pf[MLX5_LAG_P1].netdev == slave) {
+ if (ldev->pf[i].netdev == slave) {
port = i;
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 82889f30506e..571ea26edd0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -99,7 +99,7 @@ static int enable_mpesw(struct mlx5_lag *ldev)
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
for (i = 0; i < ldev->ports; i++) {
- err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
if (err)
goto err_rescan_drivers;
}
@@ -113,7 +113,7 @@ err_rescan_drivers:
err_add_devices:
mlx5_lag_add_devices(ldev);
for (i = 0; i < ldev->ports; i++)
- mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_mpesw_metadata_cleanup(ldev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index 101b3bb90863..ab2717012b79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -88,9 +88,13 @@ static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
&dest, 1);
if (IS_ERR(lag_definer->rules[idx])) {
err = PTR_ERR(lag_definer->rules[idx]);
- while (i--)
- while (j--)
+ do {
+ while (j--) {
+ idx = i * ldev->buckets + j;
mlx5_del_flow_rules(lag_definer->rules[idx]);
+ }
+ j = ldev->buckets;
+ } while (i--);
goto destroy_fg;
}
}
@@ -449,13 +453,11 @@ static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
struct ttc_params *ttc_params)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_table_attr *ft_attr;
int tt;
- ttc_params->ns = mlx5_get_flow_namespace(dev,
- MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
ft_attr = &ttc_params->ft_attr;
ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
@@ -470,13 +472,11 @@ static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
struct ttc_params *ttc_params)
{
- struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
struct mlx5_flow_table_attr *ft_attr;
int tt;
- ttc_params->ns = mlx5_get_flow_namespace(dev,
- MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ttc_params->ns_type = MLX5_FLOW_NAMESPACE_PORT_SEL;
ft_attr = &ttc_params->ft_attr;
ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index b78f2ba25c19..9f13cea16446 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -9,21 +9,24 @@
#include "mlx5_core.h"
#include "lib/fs_ttc.h"
-#define MLX5_TTC_NUM_GROUPS 3
-#define MLX5_TTC_GROUP1_SIZE (BIT(3) + MLX5_NUM_TUNNEL_TT)
-#define MLX5_TTC_GROUP2_SIZE BIT(1)
-#define MLX5_TTC_GROUP3_SIZE BIT(0)
-#define MLX5_TTC_TABLE_SIZE (MLX5_TTC_GROUP1_SIZE +\
- MLX5_TTC_GROUP2_SIZE +\
- MLX5_TTC_GROUP3_SIZE)
-
-#define MLX5_INNER_TTC_NUM_GROUPS 3
-#define MLX5_INNER_TTC_GROUP1_SIZE BIT(3)
-#define MLX5_INNER_TTC_GROUP2_SIZE BIT(1)
-#define MLX5_INNER_TTC_GROUP3_SIZE BIT(0)
-#define MLX5_INNER_TTC_TABLE_SIZE (MLX5_INNER_TTC_GROUP1_SIZE +\
- MLX5_INNER_TTC_GROUP2_SIZE +\
- MLX5_INNER_TTC_GROUP3_SIZE)
+#define MLX5_TTC_MAX_NUM_GROUPS 4
+#define MLX5_TTC_GROUP_TCPUDP_SIZE (MLX5_TT_IPV6_UDP + 1)
+
+struct mlx5_fs_ttc_groups {
+ bool use_l4_type;
+ int num_groups;
+ int group_size[MLX5_TTC_MAX_NUM_GROUPS];
+};
+
+static int mlx5_fs_ttc_table_size(const struct mlx5_fs_ttc_groups *groups)
+{
+ int i, sz = 0;
+
+ for (i = 0; i < groups->num_groups; i++)
+ sz += groups->group_size[i];
+
+ return sz;
+}
/* L3/L4 traffic type classifier */
struct mlx5_ttc_table {
@@ -138,6 +141,53 @@ static struct mlx5_etype_proto ttc_tunnel_rules[] = {
};
+enum TTC_GROUP_TYPE {
+ TTC_GROUPS_DEFAULT = 0,
+ TTC_GROUPS_USE_L4_TYPE = 1,
+};
+
+static const struct mlx5_fs_ttc_groups ttc_groups[] = {
+ [TTC_GROUPS_DEFAULT] = {
+ .num_groups = 3,
+ .group_size = {
+ BIT(3) + MLX5_NUM_TUNNEL_TT,
+ BIT(1),
+ BIT(0),
+ },
+ },
+ [TTC_GROUPS_USE_L4_TYPE] = {
+ .use_l4_type = true,
+ .num_groups = 4,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(3) + MLX5_NUM_TUNNEL_TT - MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(1),
+ BIT(0),
+ },
+ },
+};
+
+static const struct mlx5_fs_ttc_groups inner_ttc_groups[] = {
+ [TTC_GROUPS_DEFAULT] = {
+ .num_groups = 3,
+ .group_size = {
+ BIT(3),
+ BIT(1),
+ BIT(0),
+ },
+ },
+ [TTC_GROUPS_USE_L4_TYPE] = {
+ .use_l4_type = true,
+ .num_groups = 4,
+ .group_size = {
+ MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(3) - MLX5_TTC_GROUP_TCPUDP_SIZE,
+ BIT(1),
+ BIT(0),
+ },
+ },
+};
+
u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt)
{
return ttc_tunnel_rules[tt].proto;
@@ -188,9 +238,29 @@ static u8 mlx5_etype_to_ipv(u16 ethertype)
return 0;
}
+static void mlx5_fs_ttc_set_match_proto(void *headers_c, void *headers_v,
+ u8 proto, bool use_l4_type)
+{
+ int l4_type;
+
+ if (use_l4_type && (proto == IPPROTO_TCP || proto == IPPROTO_UDP)) {
+ if (proto == IPPROTO_TCP)
+ l4_type = MLX5_PACKET_L4_TYPE_TCP;
+ else
+ l4_type = MLX5_PACKET_L4_TYPE_UDP;
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, l4_type);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, l4_type, l4_type);
+ } else {
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, proto);
+ }
+}
+
static struct mlx5_flow_handle *
mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
- struct mlx5_flow_destination *dest, u16 etype, u8 proto)
+ struct mlx5_flow_destination *dest, u16 etype, u8 proto,
+ bool use_l4_type)
{
int match_ipv_outer =
MLX5_CAP_FLOWTABLE_NIC_RX(dev,
@@ -207,8 +277,13 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
if (proto) {
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
+ mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
+ spec->match_criteria,
+ outer_headers),
+ MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ outer_headers),
+ proto, use_l4_type);
}
ipv = mlx5_etype_to_ipv(etype);
@@ -234,7 +309,8 @@ mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
struct ttc_params *params,
- struct mlx5_ttc_table *ttc)
+ struct mlx5_ttc_table *ttc,
+ bool use_l4_type)
{
struct mlx5_flow_handle **trules;
struct mlx5_ttc_rule *rules;
@@ -251,7 +327,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
ttc_rules[tt].etype,
- ttc_rules[tt].proto);
+ ttc_rules[tt].proto,
+ use_l4_type);
if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule);
rule->rule = NULL;
@@ -273,7 +350,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
trules[tt] = mlx5_generate_ttc_rule(dev, ft,
&params->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype,
- ttc_tunnel_rules[tt].proto);
+ ttc_tunnel_rules[tt].proto,
+ use_l4_type);
if (IS_ERR(trules[tt])) {
err = PTR_ERR(trules[tt]);
trules[tt] = NULL;
@@ -289,7 +367,8 @@ del_rules:
}
static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
- bool use_ipv)
+ bool use_ipv,
+ const struct mlx5_fs_ttc_groups *groups)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0;
@@ -297,7 +376,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
int err;
u8 *mc;
- ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL);
+ ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
if (!ttc->g)
return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL);
@@ -307,16 +386,31 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
return -ENOMEM;
}
- /* L4 Group */
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
if (use_ipv)
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version);
else
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+
+ /* TCP UDP group */
+ if (groups->use_l4_type) {
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.l4_type);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, outer_headers.l4_type, 0);
+ }
+
+ /* L4 Group */
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_TTC_GROUP1_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -326,7 +420,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
/* L3 Group */
MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_TTC_GROUP2_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -336,7 +430,7 @@ static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc,
/* Any Group */
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_TTC_GROUP3_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -358,7 +452,7 @@ static struct mlx5_flow_handle *
mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
struct mlx5_flow_destination *dest,
- u16 etype, u8 proto)
+ u16 etype, u8 proto, bool use_l4_type)
{
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
@@ -379,8 +473,13 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
if (proto) {
spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol);
- MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto);
+ mlx5_fs_ttc_set_match_proto(MLX5_ADDR_OF(fte_match_param,
+ spec->match_criteria,
+ inner_headers),
+ MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ inner_headers),
+ proto, use_l4_type);
}
rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
@@ -395,7 +494,8 @@ mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev,
static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
struct ttc_params *params,
- struct mlx5_ttc_table *ttc)
+ struct mlx5_ttc_table *ttc,
+ bool use_l4_type)
{
struct mlx5_ttc_rule *rules;
struct mlx5_flow_table *ft;
@@ -413,7 +513,8 @@ static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev,
rule->rule = mlx5_generate_inner_ttc_rule(dev, ft,
&params->dests[tt],
ttc_rules[tt].etype,
- ttc_rules[tt].proto);
+ ttc_rules[tt].proto,
+ use_l4_type);
if (IS_ERR(rule->rule)) {
err = PTR_ERR(rule->rule);
rule->rule = NULL;
@@ -430,7 +531,8 @@ del_rules:
return err;
}
-static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
+static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc,
+ const struct mlx5_fs_ttc_groups *groups)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
int ix = 0;
@@ -438,8 +540,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
int err;
u8 *mc;
- ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g),
- GFP_KERNEL);
+ ttc->g = kcalloc(groups->num_groups, sizeof(*ttc->g), GFP_KERNEL);
if (!ttc->g)
return -ENOMEM;
in = kvzalloc(inlen, GFP_KERNEL);
@@ -449,13 +550,28 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
return -ENOMEM;
}
- /* L4 Group */
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS);
+
+ /* TCP UDP group */
+ if (groups->use_l4_type) {
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.l4_type);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += groups->group_size[ttc->num_groups];
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
+ if (IS_ERR(ttc->g[ttc->num_groups]))
+ goto err;
+ ttc->num_groups++;
+
+ MLX5_SET(fte_match_param, mc, inner_headers.l4_type, 0);
+ }
+
+ /* L4 Group */
+ MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_INNER_TTC_GROUP1_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -465,7 +581,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
/* L3 Group */
MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_INNER_TTC_GROUP2_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -475,7 +591,7 @@ static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc)
/* Any Group */
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5_INNER_TTC_GROUP3_SIZE;
+ ix += groups->group_size[ttc->num_groups];
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in);
if (IS_ERR(ttc->g[ttc->num_groups]))
@@ -496,27 +612,47 @@ err:
struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev,
struct ttc_params *params)
{
+ const struct mlx5_fs_ttc_groups *groups;
+ struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc;
+ bool use_l4_type;
int err;
ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
if (!ttc)
return ERR_PTR(-ENOMEM);
+ switch (params->ns_type) {
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+ MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
+ break;
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, inner_l4_type);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ groups = use_l4_type ? &inner_ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
+ &inner_ttc_groups[TTC_GROUPS_DEFAULT];
+
WARN_ON_ONCE(params->ft_attr.max_fte);
- params->ft_attr.max_fte = MLX5_INNER_TTC_TABLE_SIZE;
- ttc->t = mlx5_create_flow_table(params->ns, &params->ft_attr);
+ params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
+ ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t);
kvfree(ttc);
return ERR_PTR(err);
}
- err = mlx5_create_inner_ttc_table_groups(ttc);
+ err = mlx5_create_inner_ttc_table_groups(ttc, groups);
if (err)
goto destroy_ft;
- err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc);
+ err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc, use_l4_type);
if (err)
goto destroy_ft;
@@ -549,27 +685,47 @@ struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev,
bool match_ipv_outer =
MLX5_CAP_FLOWTABLE_NIC_RX(dev,
ft_field_support.outer_ip_version);
+ const struct mlx5_fs_ttc_groups *groups;
+ struct mlx5_flow_namespace *ns;
struct mlx5_ttc_table *ttc;
+ bool use_l4_type;
int err;
ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL);
if (!ttc)
return ERR_PTR(-ENOMEM);
+ switch (params->ns_type) {
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+ MLX5_CAP_PORT_SELECTION_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
+ break;
+ case MLX5_FLOW_NAMESPACE_KERNEL:
+ use_l4_type = MLX5_CAP_GEN_2(dev, pcc_ifa2) &&
+ MLX5_CAP_NIC_RX_FT_FIELD_SUPPORT_2(dev, outer_l4_type);
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ns = mlx5_get_flow_namespace(dev, params->ns_type);
+ groups = use_l4_type ? &ttc_groups[TTC_GROUPS_USE_L4_TYPE] :
+ &ttc_groups[TTC_GROUPS_DEFAULT];
+
WARN_ON_ONCE(params->ft_attr.max_fte);
- params->ft_attr.max_fte = MLX5_TTC_TABLE_SIZE;
- ttc->t = mlx5_create_flow_table(params->ns, &params->ft_attr);
+ params->ft_attr.max_fte = mlx5_fs_ttc_table_size(groups);
+ ttc->t = mlx5_create_flow_table(ns, &params->ft_attr);
if (IS_ERR(ttc->t)) {
err = PTR_ERR(ttc->t);
kvfree(ttc);
return ERR_PTR(err);
}
- err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer);
+ err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer, groups);
if (err)
goto destroy_ft;
- err = mlx5_generate_ttc_table_rules(dev, params, ttc);
+ err = mlx5_generate_ttc_table_rules(dev, params, ttc, use_l4_type);
if (err)
goto destroy_ft;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index 85fef0cd1c07..92eea6bea310 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -40,7 +40,7 @@ struct mlx5_ttc_rule {
struct mlx5_ttc_table;
struct ttc_params {
- struct mlx5_flow_namespace *ns;
+ enum mlx5_flow_namespace_type ns_type;
struct mlx5_flow_table_attr ft_attr;
struct mlx5_flow_destination dests[MLX5_NUM_TT];
DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
index 234cd00f71a1..b7d4b1a2baf2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/ipsec_fs_roce.c
@@ -386,7 +386,8 @@ static int ipsec_fs_roce_tx_mpv_create(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP;
peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
- if (!peer_priv) {
+ if (!peer_priv || !peer_priv->ipsec) {
+ mlx5_core_err(mdev, "IPsec not supported on master device\n");
err = -EOPNOTSUPP;
goto release_peer;
}
@@ -455,7 +456,8 @@ static int ipsec_fs_roce_rx_mpv_create(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP;
peer_priv = mlx5_devcom_get_next_peer_data(*ipsec_roce->devcom, &tmp);
- if (!peer_priv) {
+ if (!peer_priv || !peer_priv->ipsec) {
+ mlx5_core_err(mdev, "IPsec not supported on master device\n");
err = -EOPNOTSUPP;
goto release_peer;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
index 6b774e0c2766..d0b595ba6110 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/pci_vsc.c
@@ -74,6 +74,10 @@ int mlx5_vsc_gw_lock(struct mlx5_core_dev *dev)
ret = -EBUSY;
goto pci_unlock;
}
+ if (pci_channel_offline(dev->pdev)) {
+ ret = -EACCES;
+ goto pci_unlock;
+ }
/* Check if semaphore is already locked */
ret = vsc_read(dev, VSC_SEMAPHORE_OFFSET, &lock_val);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
index dd5d186dc614..eeb0b7ea05f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/sd.c
@@ -100,10 +100,6 @@ static bool ft_create_alias_supported(struct mlx5_core_dev *dev)
static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
{
- /* Feature is currently implemented for PFs only */
- if (!mlx5_core_is_pf(dev))
- return false;
-
/* Honor the SW implementation limit */
if (host_buses > MLX5_SD_MAX_GROUP_SZ)
return false;
@@ -130,7 +126,7 @@ static bool mlx5_sd_is_supported(struct mlx5_core_dev *dev, u8 host_buses)
}
static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
- u8 *host_buses, u8 *sd_group)
+ u8 *host_buses)
{
u32 out[MLX5_ST_SZ_DW(mpir_reg)];
int err;
@@ -139,10 +135,6 @@ static int mlx5_query_sd(struct mlx5_core_dev *dev, bool *sdm,
if (err)
return err;
- err = mlx5_query_nic_vport_sd_group(dev, sd_group);
- if (err)
- return err;
-
*sdm = MLX5_GET(mpir_reg, out, sdm);
*host_buses = MLX5_GET(mpir_reg, out, host_buses);
@@ -162,17 +154,29 @@ static int sd_init(struct mlx5_core_dev *dev)
bool sdm;
int err;
- if (!MLX5_CAP_MCAM_REG(dev, mpir))
+ /* Feature is currently implemented for PFs only */
+ if (!mlx5_core_is_pf(dev))
+ return 0;
+
+ /* Block on embedded CPU PFs */
+ if (mlx5_core_is_ecpf(dev))
return 0;
- err = mlx5_query_sd(dev, &sdm, &host_buses, &sd_group);
+ err = mlx5_query_nic_vport_sd_group(dev, &sd_group);
if (err)
return err;
- if (!sdm)
+ if (!sd_group)
return 0;
- if (!sd_group)
+ if (!MLX5_CAP_MCAM_REG(dev, mpir))
+ return 0;
+
+ err = mlx5_query_sd(dev, &sdm, &host_buses);
+ if (err)
+ return err;
+
+ if (!sdm)
return 0;
group_id = mlx5_sd_group_id(dev, sd_group);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 331ce47f51a1..5b7e6f4b5c7e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1298,6 +1298,9 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
if (!err)
mlx5_function_disable(dev, boot);
+ else
+ mlx5_stop_health_poll(dev, boot);
+
return err;
}
@@ -1680,6 +1683,8 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
struct devlink *devlink = priv_to_devlink(dev);
int err;
+ devl_lock(devlink);
+ devl_register(devlink);
dev->state = MLX5_DEVICE_STATE_UP;
err = mlx5_function_enable(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
if (err) {
@@ -1693,27 +1698,21 @@ int mlx5_init_one_light(struct mlx5_core_dev *dev)
goto query_hca_caps_err;
}
- devl_lock(devlink);
- devl_register(devlink);
-
err = mlx5_devlink_params_register(priv_to_devlink(dev));
if (err) {
mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
- goto params_reg_err;
+ goto query_hca_caps_err;
}
devl_unlock(devlink);
return 0;
-params_reg_err:
- devl_unregister(devlink);
- devl_unlock(devlink);
query_hca_caps_err:
- devl_unregister(devlink);
- devl_unlock(devlink);
mlx5_function_disable(dev, true);
out:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ devl_unregister(devlink);
+ devl_unlock(devlink);
return err;
}
@@ -1820,6 +1819,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
mutex_init(&dev->intf_state_mutex);
lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
mutex_init(&dev->mlx5e_res.uplink_netdev_lock);
+ mutex_init(&dev->wc_state_lock);
mutex_init(&priv->bfregs.reg_head.lock);
mutex_init(&priv->bfregs.wc_head.lock);
@@ -1917,6 +1917,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
mutex_destroy(&priv->alloc_mutex);
mutex_destroy(&priv->bfregs.wc_head.lock);
mutex_destroy(&priv->bfregs.reg_head.lock);
+ mutex_destroy(&dev->wc_state_lock);
mutex_destroy(&dev->mlx5e_res.uplink_netdev_lock);
mutex_destroy(&dev->intf_state_mutex);
lockdep_unregister_key(&dev->lock_key);
@@ -2141,7 +2142,6 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
/* Panic tear down fw command will stop the PCI bus communication
* with the HCA, so the health poll is no longer needed.
*/
- mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev, false);
ret = mlx5_cmd_fast_teardown_hca(dev);
@@ -2176,6 +2176,7 @@ static void shutdown(struct pci_dev *pdev)
mlx5_core_info(dev, "Shutdown was called\n");
set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
+ mlx5_drain_health_wq(dev);
err = mlx5_try_fast_unload(dev);
if (err)
mlx5_unload_one(dev, false);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 58732f44940f..62c770b0eaa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -205,7 +205,7 @@ int mlx5_cmd_enable(struct mlx5_core_dev *dev);
void mlx5_cmd_disable(struct mlx5_core_dev *dev);
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state);
-int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id);
+int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev);
@@ -320,6 +320,12 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_SF;
}
+static inline struct auxiliary_device *
+mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev)
+{
+ return container_of(mdev->device, struct auxiliary_device, dev);
+}
+
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
@@ -383,4 +389,14 @@ static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vpo
: vport;
}
+static inline int mlx5_max_eq_cap_get(const struct mlx5_core_dev *dev)
+{
+ if (MLX5_CAP_GEN_2(dev, max_num_eqs_24b))
+ return MLX5_CAP_GEN_2(dev, max_num_eqs_24b);
+
+ if (MLX5_CAP_GEN(dev, max_num_eqs))
+ return MLX5_CAP_GEN(dev, max_num_eqs);
+
+ return 1 << MLX5_CAP_GEN(dev, log_max_eq);
+}
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index 1088114e905d..0881e961d8b1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -25,7 +25,7 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn,
int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs);
struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev);
-void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq);
+void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq);
struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
struct irq_affinity_desc *af_desc,
struct cpu_rmap **rmap);
@@ -36,13 +36,15 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb);
struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq);
int mlx5_irq_get_index(struct mlx5_irq *irq);
+int mlx5_irq_get_irq(const struct mlx5_irq *irq);
struct mlx5_irq_pool;
#ifdef CONFIG_MLX5_SF
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
struct cpumask *used_cpus, u16 vecidx);
-struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool,
- struct irq_affinity_desc *af_desc);
+struct mlx5_irq *
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ struct irq_affinity_desc *af_desc);
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq);
#else
static inline
@@ -53,7 +55,8 @@ struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
}
static inline struct mlx5_irq *
-mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc)
+mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool,
+ struct irq_affinity_desc *af_desc)
{
return ERR_PTR(-EOPNOTSUPP);
}
@@ -61,6 +64,7 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *
static inline
void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq)
{
+ mlx5_irq_release_vector(irq);
}
#endif
#endif /* __MLX5_IRQ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index dcf58efac159..d894a88fa9f2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -660,6 +660,9 @@ int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
npages, boot ? "boot" : "init", func_id);
+ if (!npages)
+ return 0;
+
return give_pages(dev, func_id, npages, 0, mlx5_core_is_ecpf(dev));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 6bac8ad70ba6..81a9232a03e1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -16,6 +16,7 @@
#endif
#define MLX5_SFS_PER_CTRL_IRQ 64
+#define MLX5_MAX_MSIX_PER_SF 256
#define MLX5_IRQ_CTRL_SF_MAX 8
/* min num of vectors for SFs to be enabled */
#define MLX5_IRQ_VEC_COMP_BASE_SF 2
@@ -367,6 +368,11 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq)
return irq->mask;
}
+int mlx5_irq_get_irq(const struct mlx5_irq *irq)
+{
+ return irq->map.virq;
+}
+
int mlx5_irq_get_index(struct mlx5_irq *irq)
{
return irq->map.index;
@@ -440,11 +446,12 @@ static void _mlx5_irq_release(struct mlx5_irq *irq)
/**
* mlx5_ctrl_irq_release - release a ctrl IRQ back to the system.
+ * @dev: mlx5 device that releasing the IRQ.
* @ctrl_irq: ctrl IRQ to be released.
*/
-void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq)
+void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq)
{
- _mlx5_irq_release(ctrl_irq);
+ mlx5_irq_affinity_irq_release(dev, ctrl_irq);
}
/**
@@ -473,7 +480,7 @@ struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev)
/* Allocate the IRQ in index 0. The vector was already allocated */
irq = irq_pool_request_vector(pool, 0, &af_desc, NULL);
} else {
- irq = mlx5_irq_affinity_request(pool, &af_desc);
+ irq = mlx5_irq_affinity_request(dev, pool, &af_desc);
}
return irq;
@@ -508,58 +515,6 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
}
/**
- * mlx5_msix_alloc - allocate msix interrupt
- * @dev: mlx5 device from which to request
- * @handler: interrupt handler
- * @affdesc: affinity descriptor
- * @name: interrupt name
- *
- * Returns: struct msi_map with result encoded.
- * Note: the caller must make sure to release the irq by calling
- * mlx5_msix_free() if shutdown was initiated.
- */
-struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev,
- irqreturn_t (*handler)(int, void *),
- const struct irq_affinity_desc *affdesc,
- const char *name)
-{
- struct msi_map map;
- int err;
-
- if (!dev->pdev) {
- map.virq = 0;
- map.index = -EINVAL;
- return map;
- }
-
- map = pci_msix_alloc_irq_at(dev->pdev, MSI_ANY_INDEX, affdesc);
- if (!map.virq)
- return map;
-
- err = request_irq(map.virq, handler, 0, name, NULL);
- if (err) {
- mlx5_core_warn(dev, "err %d\n", err);
- pci_msix_free_irq(dev->pdev, map);
- map.virq = 0;
- map.index = -ENOMEM;
- }
- return map;
-}
-EXPORT_SYMBOL(mlx5_msix_alloc);
-
-/**
- * mlx5_msix_free - free a previously allocated msix interrupt
- * @dev: mlx5 device associated with interrupt
- * @map: map previously returned by mlx5_msix_alloc()
- */
-void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map)
-{
- free_irq(map.virq, NULL);
- pci_msix_free_irq(dev->pdev, map);
-}
-EXPORT_SYMBOL(mlx5_msix_free);
-
-/**
* mlx5_irq_release_vector - release one IRQ back to the system.
* @irq: the irq to release.
*/
@@ -641,8 +596,6 @@ static void irq_pool_free(struct mlx5_irq_pool *pool)
static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
{
struct mlx5_irq_table *table = dev->priv.irq_table;
- int num_sf_ctrl_by_msix;
- int num_sf_ctrl_by_sfs;
int num_sf_ctrl;
int err;
@@ -660,10 +613,8 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec)
}
/* init sf_ctrl_pool */
- num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF);
- num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
- MLX5_SFS_PER_CTRL_IRQ);
- num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs);
+ num_sf_ctrl = DIV_ROUND_UP(mlx5_sf_max_functions(dev),
+ MLX5_SFS_PER_CTRL_IRQ);
num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl);
table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl,
"mlx5_sf_ctrl",
@@ -763,9 +714,7 @@ int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
int mlx5_irq_table_create(struct mlx5_core_dev *dev)
{
- int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ?
- MLX5_CAP_GEN(dev, max_num_eqs) :
- 1 << MLX5_CAP_GEN(dev, log_max_eq);
+ int num_eqs = mlx5_max_eq_cap_get(dev);
int total_vec;
int pcif_vec;
int req_vec;
@@ -780,8 +729,7 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
total_vec = pcif_vec;
if (mlx5_sf_max_functions(dev))
- total_vec += MLX5_IRQ_CTRL_SF_MAX +
- MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
+ total_vec += MLX5_MAX_MSIX_PER_SF * mlx5_sf_max_functions(dev);
total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev));
pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 7fba1c46e2ac..50931584132b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -144,11 +144,13 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
- int ptys_size, int proto_mask, u8 local_port)
+ int ptys_size, int proto_mask,
+ u8 local_port, u8 plane_index)
{
u32 in[MLX5_ST_SZ_DW(ptys_reg)] = {0};
MLX5_SET(ptys_reg, in, local_port, local_port);
+ MLX5_SET(ptys_reg, in, plane_ind, plane_index);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
return mlx5_core_access_reg(dev, in, sizeof(in), ptys,
ptys_size, MLX5_REG_PTYS, 0, 0);
@@ -167,13 +169,13 @@ int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
}
int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper,
- u16 *proto_oper, u8 local_port)
+ u16 *proto_oper, u8 local_port, u8 plane_index)
{
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
int err;
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB,
- local_port);
+ local_port, plane_index);
if (err)
return err;
@@ -1114,7 +1116,7 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
if (!eproto)
return -EINVAL;
- err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port, 0);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
index 7f7c9af5deed..0537de86f981 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
@@ -22,7 +22,7 @@ DECLARE_EVENT_CLASS(mlx5_sf_dev_template,
__field(u16, hw_fn_id)
__field(u32, sfnum)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->sfdev = sfdev;
__entry->aux_id = aux_id;
__entry->hw_fn_id = sfdev->fn_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 7ebe71280827..b706f1486504 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -60,6 +60,13 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto remap_err;
}
+ /* Peer devlink logic expects to work on unregistered devlink instance. */
+ err = mlx5_core_peer_devlink_set(sf_dev, devlink);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
+ goto peer_devlink_set_err;
+ }
+
if (MLX5_ESWITCH_MANAGER(sf_dev->parent_mdev))
err = mlx5_init_one_light(mdev);
else
@@ -69,20 +76,10 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto init_one_err;
}
- err = mlx5_core_peer_devlink_set(sf_dev, devlink);
- if (err) {
- mlx5_core_warn(mdev, "mlx5_core_peer_devlink_set err=%d\n", err);
- goto peer_devlink_set_err;
- }
-
return 0;
-peer_devlink_set_err:
- if (mlx5_dev_is_lightweight(sf_dev->mdev))
- mlx5_uninit_one_light(sf_dev->mdev);
- else
- mlx5_uninit_one(sf_dev->mdev);
init_one_err:
+peer_devlink_set_err:
iounmap(mdev->iseg);
remap_err:
mlx5_mdev_uninit(mdev);
@@ -115,6 +112,7 @@ static void mlx5_sf_dev_shutdown(struct auxiliary_device *adev)
struct mlx5_core_dev *mdev = sf_dev->mdev;
set_bit(MLX5_BREAK_FW_WAIT, &mdev->intf_state);
+ mlx5_drain_health_wq(mdev);
mlx5_unload_one(mdev, false);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 6c11e075cab0..a96be98be032 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -161,6 +161,7 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
struct netlink_ext_ack *extack)
{
+ struct mlx5_vport *vport;
int err;
if (mlx5_sf_is_active(sf))
@@ -170,6 +171,13 @@ static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
return -EBUSY;
}
+ vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port);
+ if (!vport->max_eqs_set && MLX5_CAP_GEN_2(dev, max_num_eqs_24b)) {
+ err = mlx5_devlink_port_fn_max_io_eqs_set_sf_default(&sf->dl_port.dl_port,
+ extack);
+ if (err)
+ return err;
+ }
err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id);
if (err)
return err;
@@ -318,7 +326,11 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink,
static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
+ struct mlx5_vport *vport;
+
mutex_lock(&table->sf_state_lock);
+ vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port);
+ vport->max_eqs_set = false;
mlx5_sf_function_id_erase(table, sf);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h
index 8bf1cd90930d..302ce00da5a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h
@@ -24,7 +24,7 @@ TRACE_EVENT(mlx5_sf_add,
__field(u16, hw_fn_id)
__field(u32, sfnum)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->port_index = port_index;
__entry->controller = controller;
__entry->hw_fn_id = hw_fn_id;
@@ -46,7 +46,7 @@ TRACE_EVENT(mlx5_sf_free,
__field(u32, controller)
__field(u16, hw_fn_id)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->port_index = port_index;
__entry->controller = controller;
__entry->hw_fn_id = hw_fn_id;
@@ -67,7 +67,7 @@ TRACE_EVENT(mlx5_sf_hwc_alloc,
__field(u16, hw_fn_id)
__field(u32, sfnum)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->controller = controller;
__entry->hw_fn_id = hw_fn_id;
__entry->sfnum = sfnum;
@@ -84,7 +84,7 @@ TRACE_EVENT(mlx5_sf_hwc_free,
TP_STRUCT__entry(__string(devname, dev_name(dev->device))
__field(u16, hw_fn_id)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->hw_fn_id = hw_fn_id;
),
TP_printk("(%s) hw_id=0x%x\n", __get_str(devname), __entry->hw_fn_id)
@@ -97,7 +97,7 @@ TRACE_EVENT(mlx5_sf_hwc_deferred_free,
TP_STRUCT__entry(__string(devname, dev_name(dev->device))
__field(u16, hw_fn_id)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->hw_fn_id = hw_fn_id;
),
TP_printk("(%s) hw_id=0x%x\n", __get_str(devname), __entry->hw_fn_id)
@@ -113,7 +113,7 @@ DECLARE_EVENT_CLASS(mlx5_sf_state_template,
__field(unsigned int, port_index)
__field(u32, controller)
__field(u16, hw_fn_id)),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->port_index = port_index;
__entry->controller = controller;
__entry->hw_fn_id = hw_fn_id;
@@ -152,7 +152,7 @@ TRACE_EVENT(mlx5_sf_update_state,
__field(u16, hw_fn_id)
__field(u8, state)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->port_index = port_index;
__entry->controller = controller;
__entry->hw_fn_id = hw_fn_id;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h
index fd814a190b8b..6352cb004a18 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h
@@ -20,7 +20,7 @@ TRACE_EVENT(mlx5_sf_vhca_event,
__field(u32, sfnum)
__field(u8, vhca_state)
),
- TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ TP_fast_assign(__assign_str(devname);
__entry->hw_fn_id = event->function_id;
__entry->sfnum = event->sw_function_id;
__entry->vhca_state = event->new_vhca_state;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index 042ca0349124..d1db04baa1fa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -7,7 +7,7 @@
/* don't try to optimize STE allocation if the stack is too constaraining */
#define DR_RULE_MAX_STES_OPTIMIZED 0
#else
-#define DR_RULE_MAX_STES_OPTIMIZED 5
+#define DR_RULE_MAX_STES_OPTIMIZED 2
#endif
#define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index f708b029425a..e9f6c7ed7a7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -1883,7 +1883,7 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
struct mlx5dr_match_misc5 *misc5 = &value->misc5;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index dd856cde188d..1d49704b9542 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -1897,7 +1897,7 @@ void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
struct mlx5dr_match_misc5 *misc5 = &value->misc5;
@@ -2129,7 +2129,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
static int
dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
- uint8_t *tag)
+ u8 *tag)
{
u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
struct mlx5dr_match_misc *misc = &value->misc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 81eff6c410ce..7618c6147f86 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -1379,6 +1379,11 @@ int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
u32 obj_id);
+int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
+ u8 *dw_selectors, u8 *byte_selectors,
+ u8 *match_mask, u32 *definer_id);
+void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
+
struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
enum mlx5dr_icm_type icm_type);
void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index 89fced86936f..3ac7dc67509f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -153,11 +153,6 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action);
u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action);
-int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
- u8 *dw_selectors, u8 *byte_selectors,
- u8 *match_mask, u32 *definer_id);
-void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
-
static inline bool
mlx5dr_is_supported(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 1005bb6935b6..0d5f750faa45 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -737,6 +737,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
system_image_guid);
+ rep->num_plane = MLX5_GET_PR(hca_vport_context, ctx, num_port_plane);
ex:
kvfree(out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
new file mode 100644
index 000000000000..1bed75eca97d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c
@@ -0,0 +1,434 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/io.h>
+#include <linux/mlx5/transobj.h>
+#include "lib/clock.h"
+#include "mlx5_core.h"
+#include "wq.h"
+
+#define TEST_WC_NUM_WQES 255
+#define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES))
+#define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ
+#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100)
+
+struct mlx5_wc_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+
+ /* data path - accessed per napi poll */
+ struct mlx5_core_cq mcq;
+
+ /* control */
+ struct mlx5_core_dev *mdev;
+ struct mlx5_wq_ctrl wq_ctrl;
+};
+
+struct mlx5_wc_sq {
+ /* data path */
+ u16 cc;
+ u16 pc;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ u32 sqn;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+
+ struct mlx5_wc_cq cq;
+ struct mlx5_sq_bfreg bfreg;
+};
+
+static int mlx5_wc_create_cqwq(struct mlx5_core_dev *mdev, void *cqc,
+ struct mlx5_wc_cq *cq)
+{
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ struct mlx5_wq_param param = {};
+ int err;
+ u32 i;
+
+ err = mlx5_cqwq_create(mdev, &param, cqc, &cq->wq, &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->mdev = mdev;
+
+ return 0;
+}
+
+static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data)
+{
+ u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+ struct mlx5_core_dev *mdev = cq->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int err, inlen, eqn;
+ void *in, *cqc;
+
+ err = mlx5_comp_eqn_get(mdev, 0, &eqn);
+ if (err)
+ return err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc));
+
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
+ MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out));
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq)
+{
+ void *cqc;
+ int err;
+
+ cqc = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL);
+ if (!cqc)
+ return -ENOMEM;
+
+ MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ);
+ MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
+ if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128)
+ MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
+
+ err = mlx5_wc_create_cqwq(mdev, cqc, cq);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc cq wq, err=%d\n", err);
+ goto err_create_cqwq;
+ }
+
+ err = create_wc_cq(cq, cqc);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc cq, err=%d\n", err);
+ goto err_create_cq;
+ }
+
+ kvfree(cqc);
+ return 0;
+
+err_create_cq:
+ mlx5_wq_destroy(&cq->wq_ctrl);
+err_create_cqwq:
+ kvfree(cqc);
+ return err;
+}
+
+static void mlx5_wc_destroy_cq(struct mlx5_wc_cq *cq)
+{
+ mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int create_wc_sq(struct mlx5_core_dev *mdev, void *sqc_data,
+ struct mlx5_wc_sq *sq)
+{
+ void *in, *sqc, *wq;
+ int inlen, err;
+ u8 ts_format;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc));
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ ts_format = mlx5_is_real_time_sq(mdev) ?
+ MLX5_TIMESTAMP_FORMAT_REAL_TIME :
+ MLX5_TIMESTAMP_FORMAT_FREE_RUNNING;
+ MLX5_SET(sqc, sqc, ts_format, ts_format);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, sq->bfreg.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_frag_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc sq, err=%d\n", err);
+ goto err_create_sq;
+ }
+
+ memset(in, 0, MLX5_ST_SZ_BYTES(modify_sq_in));
+ MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST);
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY);
+
+ err = mlx5_core_modify_sq(mdev, sq->sqn, in);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to set wc sq(sqn=0x%x) ready, err=%d\n",
+ sq->sqn, err);
+ goto err_modify_sq;
+ }
+
+ kvfree(in);
+ return 0;
+
+err_modify_sq:
+ mlx5_core_destroy_sq(mdev, sq->sqn);
+err_create_sq:
+ kvfree(in);
+ return err;
+}
+
+static int mlx5_wc_create_sq(struct mlx5_core_dev *mdev, struct mlx5_wc_sq *sq)
+{
+ struct mlx5_wq_param param = {};
+ void *sqc_data, *wq;
+ int err;
+
+ sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL);
+ if (!sqc_data)
+ return -ENOMEM;
+
+ wq = MLX5_ADDR_OF(sqc, sqc_data, wq);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(wq, wq, log_wq_sz, TEST_WC_SQ_LOG_WQ_SZ);
+
+ err = mlx5_wq_cyc_create(mdev, &param, wq, &sq->wq, &sq->wq_ctrl);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to create wc sq wq, err=%d\n", err);
+ goto err_create_wq_cyc;
+ }
+
+ err = create_wc_sq(mdev, sqc_data, sq);
+ if (err)
+ goto err_create_sq;
+
+ mlx5_core_dbg(mdev, "wc sq->sqn = 0x%x created\n", sq->sqn);
+
+ kvfree(sqc_data);
+ return 0;
+
+err_create_sq:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+err_create_wq_cyc:
+ kvfree(sqc_data);
+ return err;
+}
+
+static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq)
+{
+ mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+}
+
+static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, bool signaled)
+{
+ int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2;
+ struct mlx5_wqe_ctrl_seg *ctrl;
+ __be32 mmio_wqe[16] = {};
+ u16 pi;
+
+ pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc);
+ ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
+ memset(ctrl, 0, sizeof(*ctrl));
+ ctrl->opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_NOP);
+ ctrl->qpn_ds =
+ cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+ DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), MLX5_SEND_WQE_DS));
+ if (signaled)
+ ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE;
+
+ memcpy(mmio_wqe, ctrl, sizeof(*ctrl));
+ ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |=
+ MLX5_WQE_CTRL_CQ_UPDATE;
+
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ sq->pc++;
+ sq->wq.db[MLX5_SND_DBR] = cpu_to_be32(sq->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ __iowrite64_copy(sq->bfreg.map + sq->bfreg.offset, mmio_wqe,
+ sizeof(mmio_wqe) / 8);
+
+ sq->bfreg.offset ^= buf_size;
+}
+
+static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq)
+{
+ struct mlx5_wc_cq *cq = &sq->cq;
+ struct mlx5_cqe64 *cqe;
+
+ cqe = mlx5_cqwq_get_cqe(&cq->wq);
+ if (!cqe)
+ return -ETIMEDOUT;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ mlx5_cqwq_pop(&cq->wq);
+
+ if (get_cqe_opcode(cqe) == MLX5_CQE_REQ) {
+ int wqe_counter = be16_to_cpu(cqe->wqe_counter);
+ struct mlx5_core_dev *mdev = cq->mdev;
+
+ if (wqe_counter == TEST_WC_NUM_WQES - 1)
+ mdev->wc_state = MLX5_WC_STATE_UNSUPPORTED;
+ else
+ mdev->wc_state = MLX5_WC_STATE_SUPPORTED;
+
+ mlx5_core_dbg(mdev, "wc wqe_counter = 0x%x\n", wqe_counter);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc++;
+
+ return 0;
+}
+
+static void mlx5_core_test_wc(struct mlx5_core_dev *mdev)
+{
+ unsigned long expires;
+ struct mlx5_wc_sq *sq;
+ int i, err;
+
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ return;
+
+ sq = kzalloc(sizeof(*sq), GFP_KERNEL);
+ if (!sq)
+ return;
+
+ err = mlx5_alloc_bfreg(mdev, &sq->bfreg, true, false);
+ if (err) {
+ mlx5_core_err(mdev, "Failed to alloc bfreg for wc, err=%d\n", err);
+ goto err_alloc_bfreg;
+ }
+
+ err = mlx5_wc_create_cq(mdev, &sq->cq);
+ if (err)
+ goto err_create_cq;
+
+ err = mlx5_wc_create_sq(mdev, sq);
+ if (err)
+ goto err_create_sq;
+
+ for (i = 0; i < TEST_WC_NUM_WQES - 1; i++)
+ mlx5_wc_post_nop(sq, false);
+
+ mlx5_wc_post_nop(sq, true);
+
+ expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES;
+ do {
+ err = mlx5_wc_poll_cq(sq);
+ if (err)
+ usleep_range(2, 10);
+ } while (mdev->wc_state == MLX5_WC_STATE_UNINITIALIZED &&
+ time_is_after_jiffies(expires));
+
+ mlx5_wc_destroy_sq(sq);
+
+err_create_sq:
+ mlx5_wc_destroy_cq(&sq->cq);
+err_create_cq:
+ mlx5_free_bfreg(mdev, &sq->bfreg);
+err_alloc_bfreg:
+ kfree(sq);
+}
+
+bool mlx5_wc_support_get(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_core_dev *parent = NULL;
+
+ if (!MLX5_CAP_GEN(mdev, bf)) {
+ mlx5_core_dbg(mdev, "BlueFlame not supported\n");
+ goto out;
+ }
+
+ if (!MLX5_CAP_GEN(mdev, log_max_sq)) {
+ mlx5_core_dbg(mdev, "SQ not supported\n");
+ goto out;
+ }
+
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ /* No need to lock anything as we perform WC test only
+ * once for whole device and was already done.
+ */
+ goto out;
+
+ mutex_lock(&mdev->wc_state_lock);
+
+ if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED)
+ goto unlock;
+
+#ifdef CONFIG_MLX5_SF
+ if (mlx5_core_is_sf(mdev))
+ parent = mdev->priv.parent_mdev;
+#endif
+
+ if (parent) {
+ mutex_lock(&parent->wc_state_lock);
+
+ mlx5_core_test_wc(parent);
+
+ mlx5_core_dbg(mdev, "parent set wc_state=%d\n",
+ parent->wc_state);
+ mdev->wc_state = parent->wc_state;
+
+ mutex_unlock(&parent->wc_state_lock);
+ }
+
+ mlx5_core_test_wc(mdev);
+
+unlock:
+ mutex_unlock(&mdev->wc_state_lock);
+out:
+ mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state);
+
+ return mdev->wc_state == MLX5_WC_STATE_SUPPORTED;
+}
+EXPORT_SYMBOL(mlx5_wc_support_get);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index bc94e75a7aeb..e7777700ee18 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -40,6 +40,7 @@
*/
#define MLXBF_GIGE_BCAST_MAC_FILTER_IDX 0
#define MLXBF_GIGE_LOCAL_MAC_FILTER_IDX 1
+#define MLXBF_GIGE_MAX_FILTER_IDX 3
/* Define for broadcast MAC literal */
#define BCAST_MAC_ADDR 0xFFFFFFFFFFFF
@@ -175,6 +176,13 @@ enum mlxbf_gige_res {
int mlxbf_gige_mdio_probe(struct platform_device *pdev,
struct mlxbf_gige *priv);
void mlxbf_gige_mdio_remove(struct mlxbf_gige *priv);
+
+void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv);
+void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv);
+void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index);
+void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index);
void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 dmac);
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index ba303868686a..385a56ac7348 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -168,6 +168,10 @@ static int mlxbf_gige_open(struct net_device *netdev)
if (err)
goto napi_deinit;
+ mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_BCAST_MAC_FILTER_IDX);
+ mlxbf_gige_enable_mac_rx_filter(priv, MLXBF_GIGE_LOCAL_MAC_FILTER_IDX);
+ mlxbf_gige_enable_multicast_rx(priv);
+
/* Set bits in INT_EN that we care about */
int_en = MLXBF_GIGE_INT_EN_HW_ACCESS_ERROR |
MLXBF_GIGE_INT_EN_TX_CHECKSUM_INPUTS |
@@ -379,6 +383,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
void __iomem *plu_base;
void __iomem *base;
int addr, phy_irq;
+ unsigned int i;
int err;
base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC);
@@ -423,6 +428,11 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->rx_q_entries = MLXBF_GIGE_DEFAULT_RXQ_SZ;
priv->tx_q_entries = MLXBF_GIGE_DEFAULT_TXQ_SZ;
+ for (i = 0; i <= MLXBF_GIGE_MAX_FILTER_IDX; i++)
+ mlxbf_gige_disable_mac_rx_filter(priv, i);
+ mlxbf_gige_disable_multicast_rx(priv);
+ mlxbf_gige_disable_promisc(priv);
+
/* Write initial MAC address to hardware */
mlxbf_gige_initial_mac(priv);
@@ -436,7 +446,7 @@ static int mlxbf_gige_probe(struct platform_device *pdev)
priv->rx_irq = platform_get_irq(pdev, MLXBF_GIGE_RECEIVE_PKT_INTR_IDX);
priv->llu_plu_irq = platform_get_irq(pdev, MLXBF_GIGE_LLU_PLU_INTR_IDX);
- phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy-gpios", 0);
+ phy_irq = acpi_dev_gpio_irq_get_by(ACPI_COMPANION(&pdev->dev), "phy", 0);
if (phy_irq < 0) {
dev_err(&pdev->dev, "Error getting PHY irq. Use polling instead");
phy_irq = PHY_POLL;
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
index 98a8681c21b9..4d14cb13fd64 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_regs.h
@@ -62,6 +62,8 @@
#define MLXBF_GIGE_TX_STATUS_DATA_FIFO_FULL BIT(1)
#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_START 0x0520
#define MLXBF_GIGE_RX_MAC_FILTER_DMAC_RANGE_END 0x0528
+#define MLXBF_GIGE_RX_MAC_FILTER_GENERAL 0x0530
+#define MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST BIT(1)
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC 0x0540
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_DISC_EN BIT(0)
#define MLXBF_GIGE_RX_MAC_FILTER_COUNT_PASS 0x0548
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
index 699984358493..eb62620b63c7 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
@@ -11,15 +11,31 @@
#include "mlxbf_gige.h"
#include "mlxbf_gige_regs.h"
-void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
- unsigned int index, u64 dmac)
+void mlxbf_gige_enable_multicast_rx(struct mlxbf_gige *priv)
{
void __iomem *base = priv->base;
- u64 control;
+ u64 data;
- /* Write destination MAC to specified MAC RX filter */
- writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
- (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+ data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+ data |= MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
+ writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+}
+
+void mlxbf_gige_disable_multicast_rx(struct mlxbf_gige *priv)
+{
+ void __iomem *base = priv->base;
+ u64 data;
+
+ data = readq(base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+ data &= ~MLXBF_GIGE_RX_MAC_FILTER_EN_MULTICAST;
+ writeq(data, base + MLXBF_GIGE_RX_MAC_FILTER_GENERAL);
+}
+
+void mlxbf_gige_enable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index)
+{
+ void __iomem *base = priv->base;
+ u64 control;
/* Enable MAC receive filter mask for specified index */
control = readq(base + MLXBF_GIGE_CONTROL);
@@ -27,6 +43,28 @@ void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
writeq(control, base + MLXBF_GIGE_CONTROL);
}
+void mlxbf_gige_disable_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index)
+{
+ void __iomem *base = priv->base;
+ u64 control;
+
+ /* Disable MAC receive filter mask for specified index */
+ control = readq(base + MLXBF_GIGE_CONTROL);
+ control &= ~(MLXBF_GIGE_CONTROL_EN_SPECIFIC_MAC << index);
+ writeq(control, base + MLXBF_GIGE_CONTROL);
+}
+
+void mlxbf_gige_set_mac_rx_filter(struct mlxbf_gige *priv,
+ unsigned int index, u64 dmac)
+{
+ void __iomem *base = priv->base;
+
+ /* Write destination MAC to specified MAC RX filter */
+ writeq(dmac, base + MLXBF_GIGE_RX_MAC_FILTER +
+ (index * MLXBF_GIGE_RX_MAC_FILTER_STRIDE));
+}
+
void mlxbf_gige_get_mac_rx_filter(struct mlxbf_gige *priv,
unsigned int index, u64 *dmac)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index a510bf2cff2f..74f7e27b490f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -33,6 +33,7 @@ config MLXSW_CORE_THERMAL
config MLXSW_PCI
tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
depends on PCI && HAS_IOMEM && MLXSW_CORE
+ select PAGE_POOL
default m
help
This is PCI bus implementation for Mellanox Technologies Switch ASICs.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 6c06b0592760..294e758f1067 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -513,6 +513,63 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
+int
+mlxsw_env_set_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ u32 bytes_written = 0;
+ u16 device_addr;
+ int err;
+
+ if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot write to EEPROM of a module on an inactive line card");
+ return -EIO;
+ }
+
+ err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type");
+ return err;
+ }
+
+ device_addr = page->offset;
+
+ while (bytes_written < page->length) {
+ char mcia_pl[MLXSW_REG_MCIA_LEN];
+ char eeprom_tmp[128] = {};
+ u8 size;
+
+ size = min_t(u8, page->length - bytes_written,
+ mlxsw_env->max_eeprom_len);
+
+ mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page->page,
+ device_addr + bytes_written, size,
+ page->i2c_address);
+ mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank);
+ memcpy(eeprom_tmp, page->data + bytes_written, size);
+ mlxsw_reg_mcia_eeprom_memcpy_to(mcia_pl, eeprom_tmp);
+
+ err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcia), mcia_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to access module's EEPROM");
+ return err;
+ }
+
+ err = mlxsw_env_mcia_status_process(mcia_pl, extack);
+ if (err)
+ return err;
+
+ bytes_written += size;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mlxsw_env_set_module_eeprom_by_page);
+
static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index,
u8 module)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index a197e3ae069c..e4ff17869400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -28,6 +28,12 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
+int
+mlxsw_env_set_module_eeprom_by_page(struct mlxsw_core *mlxsw_core,
+ u8 slot_index, u8 module,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack);
+
int mlxsw_env_reset_module(struct net_device *netdev,
struct mlxsw_core *mlxsw_core, u8 slot_index,
u8 module, u32 *flags);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
index 025e0db983fe..b032d5a4b3b8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c
@@ -1484,6 +1484,7 @@ err_type_file_file_validate:
vfree(types_info->data);
err_data_alloc:
kfree(types_info);
+ linecards->types_info = NULL;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
index 5c511e1a8efa..d61478c0c632 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
@@ -100,6 +100,12 @@ static const struct mlxsw_cooling_states default_cooling_states[] = {
struct mlxsw_thermal;
+struct mlxsw_thermal_cooling_device {
+ struct mlxsw_thermal *thermal;
+ struct thermal_cooling_device *cdev;
+ unsigned int idx;
+};
+
struct mlxsw_thermal_module {
struct mlxsw_thermal *parent;
struct thermal_zone_device *tzdev;
@@ -123,7 +129,7 @@ struct mlxsw_thermal {
const struct mlxsw_bus_info *bus_info;
struct thermal_zone_device *tzdev;
int polling_delay;
- struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX];
+ struct mlxsw_thermal_cooling_device cdevs[MLXSW_MFCR_PWMS_MAX];
struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS];
struct mlxsw_thermal_area line_cards[];
@@ -147,7 +153,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal,
int i;
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
- if (thermal->cdevs[i] == cdev)
+ if (thermal->cdevs[i].cdev == cdev)
return i;
/* Allow mlxsw thermal zone binding to an external cooling device */
@@ -352,17 +358,14 @@ static int mlxsw_thermal_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *p_state)
{
- struct mlxsw_thermal *thermal = cdev->devdata;
+ struct mlxsw_thermal_cooling_device *mlxsw_cdev = cdev->devdata;
+ struct mlxsw_thermal *thermal = mlxsw_cdev->thermal;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- int err, idx;
u8 duty;
+ int err;
- idx = mlxsw_get_cooling_device_idx(thermal, cdev);
- if (idx < 0)
- return idx;
-
- mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0);
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_cdev->idx, 0);
err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(dev, "Failed to query PWM duty\n");
@@ -378,22 +381,19 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
- struct mlxsw_thermal *thermal = cdev->devdata;
+ struct mlxsw_thermal_cooling_device *mlxsw_cdev = cdev->devdata;
+ struct mlxsw_thermal *thermal = mlxsw_cdev->thermal;
struct device *dev = thermal->bus_info->dev;
char mfsc_pl[MLXSW_REG_MFSC_LEN];
- int idx;
int err;
if (state > MLXSW_THERMAL_MAX_STATE)
return -EINVAL;
- idx = mlxsw_get_cooling_device_idx(thermal, cdev);
- if (idx < 0)
- return idx;
-
/* Normalize the state to the valid speed range. */
state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state);
- mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state));
+ mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_cdev->idx,
+ mlxsw_state_to_duty(state));
err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl);
if (err) {
dev_err(dev, "Failed to write PWM duty\n");
@@ -753,17 +753,21 @@ int mlxsw_thermal_init(struct mlxsw_core *core,
}
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
if (pwm_active & BIT(i)) {
+ struct mlxsw_thermal_cooling_device *mlxsw_cdev;
struct thermal_cooling_device *cdev;
+ mlxsw_cdev = &thermal->cdevs[i];
+ mlxsw_cdev->thermal = thermal;
+ mlxsw_cdev->idx = i;
cdev = thermal_cooling_device_register("mlxsw_fan",
- thermal,
+ mlxsw_cdev,
&mlxsw_cooling_ops);
if (IS_ERR(cdev)) {
err = PTR_ERR(cdev);
dev_err(dev, "Failed to register cooling device\n");
goto err_thermal_cooling_device_register;
}
- thermal->cdevs[i] = cdev;
+ mlxsw_cdev->cdev = cdev;
}
}
@@ -824,8 +828,7 @@ err_thermal_modules_init:
err_thermal_zone_device_register:
err_thermal_cooling_device_register:
for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
- if (thermal->cdevs[i])
- thermal_cooling_device_unregister(thermal->cdevs[i]);
+ thermal_cooling_device_unregister(thermal->cdevs[i].cdev);
err_reg_write:
err_reg_query:
kfree(thermal);
@@ -847,12 +850,8 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal)
thermal->tzdev = NULL;
}
- for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) {
- if (thermal->cdevs[i]) {
- thermal_cooling_device_unregister(thermal->cdevs[i]);
- thermal->cdevs[i] = NULL;
- }
- }
+ for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++)
+ thermal_cooling_device_unregister(thermal->cdevs[i].cdev);
kfree(thermal);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index cfafbeb42586..a619a0736bd1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -218,6 +218,10 @@ __mlxsw_item_bit_array_offset(const struct mlxsw_item *item,
}
max_index = (item->size.bytes << 3) / item->element_size - 1;
+ if (WARN_ONCE(index > max_index,
+ "name=%s,index=%u,max_index=%u\n", item->name, index,
+ max_index))
+ index = 0;
be_index = max_index - index;
offset = be_index * item->element_size >> 3;
in_byte_index = index % (BITS_PER_BYTE / item->element_size);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index f0ceb196a6ce..828c65036a4c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -140,6 +140,20 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
page, extack);
}
+static int
+mlxsw_m_set_module_eeprom_by_page(struct net_device *netdev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_set_module_eeprom_by_page(core,
+ mlxsw_m_port->slot_index,
+ mlxsw_m_port->module,
+ page, extack);
+}
+
static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
{
struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
@@ -181,6 +195,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
.get_module_info = mlxsw_m_get_module_info,
.get_module_eeprom = mlxsw_m_get_module_eeprom,
.get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = mlxsw_m_set_module_eeprom_by_page,
.reset = mlxsw_m_reset,
.get_module_power_mode = mlxsw_m_get_module_power_mode,
.set_module_power_mode = mlxsw_m_set_module_power_mode,
@@ -702,8 +717,8 @@ static struct mlxsw_driver mlxsw_m_driver = {
};
static const struct i2c_device_id mlxsw_m_i2c_id[] = {
- { "mlxsw_minimal", 0},
- { },
+ { "mlxsw_minimal" },
+ { }
};
static struct i2c_driver mlxsw_m_i2c_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index f42a1b1c9368..060e5b939211 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -8,12 +8,12 @@
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/wait.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
#include <linux/log2.h>
#include <linux/string.h>
+#include <net/page_pool/helpers.h>
#include "pci_hw.h"
#include "pci.h"
@@ -36,6 +36,11 @@ enum mlxsw_pci_queue_type {
#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
+enum mlxsw_pci_cq_type {
+ MLXSW_PCI_CQ_SDQ,
+ MLXSW_PCI_CQ_RDQ,
+};
+
static const u16 mlxsw_pci_doorbell_type_offset[] = {
MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
@@ -57,15 +62,11 @@ struct mlxsw_pci_mem_item {
};
struct mlxsw_pci_queue_elem_info {
+ struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
char *elem; /* pointer to actual dma mapped element mem chunk */
- union {
- struct {
- struct sk_buff *skb;
- } sdq;
- struct {
- struct sk_buff *skb;
- } rdq;
- } u;
+ struct {
+ struct sk_buff *skb;
+ } sdq;
};
struct mlxsw_pci_queue {
@@ -78,19 +79,20 @@ struct mlxsw_pci_queue {
u8 num; /* queue number */
u8 elem_size; /* size of one element */
enum mlxsw_pci_queue_type type;
- struct tasklet_struct tasklet; /* queue processing tasklet */
struct mlxsw_pci *pci;
union {
struct {
- u32 comp_sdq_count;
- u32 comp_rdq_count;
enum mlxsw_pci_cqe_v v;
+ struct mlxsw_pci_queue *dq;
+ struct napi_struct napi;
+ struct page_pool *page_pool;
} cq;
struct {
- u32 ev_cmd_count;
- u32 ev_comp_count;
- u32 ev_other_count;
+ struct tasklet_struct tasklet;
} eq;
+ struct {
+ struct mlxsw_pci_queue *cq;
+ } rdq;
} u;
};
@@ -109,6 +111,7 @@ struct mlxsw_pci {
bool cff_support;
enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode;
enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode;
+ u8 num_sg_entries; /* Number of scatter/gather entries for packets. */
struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
u32 doorbell_offset;
struct mlxsw_core *core;
@@ -120,9 +123,6 @@ struct mlxsw_pci {
struct mlxsw_pci_mem_item out_mbox;
struct mlxsw_pci_mem_item in_mbox;
struct mutex lock; /* Lock access to command registers */
- bool nopoll;
- wait_queue_head_t wait;
- bool wait_done;
struct {
u8 status;
u64 out_param;
@@ -131,13 +131,43 @@ struct mlxsw_pci {
struct mlxsw_bus_info bus_info;
const struct pci_device_id *id;
enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
- u8 num_sdq_cqs; /* Number of CQs used for SDQs */
+ u8 num_cqs; /* Number of CQs */
+ u8 num_sdqs; /* Number of SDQs */
bool skip_reset;
+ struct net_device *napi_dev_tx;
+ struct net_device *napi_dev_rx;
};
-static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
+static int mlxsw_pci_napi_devs_init(struct mlxsw_pci *mlxsw_pci)
+{
+ int err;
+
+ mlxsw_pci->napi_dev_tx = alloc_netdev_dummy(0);
+ if (!mlxsw_pci->napi_dev_tx)
+ return -ENOMEM;
+ strscpy(mlxsw_pci->napi_dev_tx->name, "mlxsw_tx",
+ sizeof(mlxsw_pci->napi_dev_tx->name));
+
+ mlxsw_pci->napi_dev_rx = alloc_netdev_dummy(0);
+ if (!mlxsw_pci->napi_dev_rx) {
+ err = -ENOMEM;
+ goto err_alloc_rx;
+ }
+ strscpy(mlxsw_pci->napi_dev_rx->name, "mlxsw_rx",
+ sizeof(mlxsw_pci->napi_dev_rx->name));
+ dev_set_threaded(mlxsw_pci->napi_dev_rx, true);
+
+ return 0;
+
+err_alloc_rx:
+ free_netdev(mlxsw_pci->napi_dev_tx);
+ return err;
+}
+
+static void mlxsw_pci_napi_devs_fini(struct mlxsw_pci *mlxsw_pci)
{
- tasklet_schedule(&q->tasklet);
+ free_netdev(mlxsw_pci->napi_dev_rx);
+ free_netdev(mlxsw_pci->napi_dev_tx);
}
static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
@@ -187,25 +217,6 @@ mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
return &mlxsw_pci->queues[q_type];
}
-static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
- enum mlxsw_pci_queue_type q_type)
-{
- struct mlxsw_pci_queue_type_group *queue_group;
-
- queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
- return queue_group->count;
-}
-
-static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
-{
- return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
-}
-
-static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
-{
- return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
-}
-
static struct mlxsw_pci_queue *
__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
enum mlxsw_pci_queue_type q_type, u8 q_num)
@@ -220,23 +231,16 @@ static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
}
-static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
- u8 q_num)
-{
- return __mlxsw_pci_queue_get(mlxsw_pci,
- MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
-}
-
static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
u8 q_num)
{
return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
}
-static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
- u8 q_num)
+static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci)
{
- return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
+ /* There is only one EQ at index 0. */
+ return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, 0);
}
static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
@@ -291,7 +295,9 @@ static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
+ struct mlxsw_pci_queue *cq;
int tclass;
+ u8 cq_num;
int lp;
int i;
int err;
@@ -304,7 +310,8 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE;
/* Set CQ of same number of this SDQ. */
- mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
+ cq_num = q->num;
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num);
mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp);
mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
@@ -317,6 +324,9 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
if (err)
return err;
+
+ cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
+ cq->u.cq.dq = q;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
return 0;
}
@@ -327,6 +337,29 @@ static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
}
+#define MLXSW_PCI_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN)
+
+#define MLXSW_PCI_RX_BUF_SW_OVERHEAD \
+ (MLXSW_PCI_SKB_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
+static void
+mlxsw_pci_wqe_rx_frag_set(struct mlxsw_pci *mlxsw_pci, struct page *page,
+ char *wqe, int index, size_t frag_len)
+{
+ dma_addr_t mapaddr;
+
+ mapaddr = page_pool_get_dma_addr(page);
+
+ if (index == 0) {
+ mapaddr += MLXSW_PCI_SKB_HEADROOM;
+ frag_len = frag_len - MLXSW_PCI_RX_BUF_SW_OVERHEAD;
+ }
+
+ mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
+ mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
+}
+
static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
int index, char *frag_data, size_t frag_len,
int direction)
@@ -356,51 +389,150 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction);
}
-static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_pci_queue_elem_info *elem_info,
- gfp_t gfp)
+static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[],
+ u16 byte_count)
{
- size_t buf_len = MLXSW_PORT_MAX_MTU;
- char *wqe = elem_info->elem;
+ unsigned int linear_data_size;
struct sk_buff *skb;
- int err;
+ int page_index = 0;
+ bool linear_only;
+ void *data;
+
+ data = page_address(pages[page_index]);
+ net_prefetch(data);
+
+ skb = napi_build_skb(data, PAGE_SIZE);
+ if (unlikely(!skb))
+ return ERR_PTR(-ENOMEM);
+
+ linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE;
+ linear_data_size = linear_only ? byte_count :
+ PAGE_SIZE -
+ MLXSW_PCI_RX_BUF_SW_OVERHEAD;
+
+ skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM);
+ skb_put(skb, linear_data_size);
+
+ if (linear_only)
+ return skb;
+
+ byte_count -= linear_data_size;
+ page_index++;
+
+ while (byte_count > 0) {
+ unsigned int frag_size;
+ struct page *page;
+
+ page = pages[page_index];
+ frag_size = min(byte_count, PAGE_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ page, 0, frag_size, PAGE_SIZE);
+ byte_count -= frag_size;
+ page_index++;
+ }
+
+ return skb;
+}
+
+static int mlxsw_pci_rdq_page_alloc(struct mlxsw_pci_queue *q,
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ int index)
+{
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ char *wqe = elem_info->elem;
+ struct page *page;
- skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp);
- if (!skb)
+ page = page_pool_dev_alloc_pages(cq->u.cq.page_pool);
+ if (unlikely(!page))
return -ENOMEM;
- err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
- buf_len, DMA_FROM_DEVICE);
- if (err)
- goto err_frag_map;
+ mlxsw_pci_wqe_rx_frag_set(q->pci, page, wqe, index, PAGE_SIZE);
+ elem_info->pages[index] = page;
+ return 0;
+}
+
+static void mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue *q,
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ int index)
+{
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
- elem_info->u.rdq.skb = skb;
+ page_pool_put_page(cq->u.cq.page_pool, elem_info->pages[index], -1,
+ false);
+}
+
+static u8 mlxsw_pci_num_sg_entries_get(u16 byte_count)
+{
+ return DIV_ROUND_UP(byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD,
+ PAGE_SIZE);
+}
+
+static int
+mlxsw_pci_elem_info_pages_ref_store(const struct mlxsw_pci_queue *q,
+ const struct mlxsw_pci_queue_elem_info *el,
+ u16 byte_count, struct page *pages[],
+ u8 *p_num_sg_entries)
+{
+ u8 num_sg_entries;
+ int i;
+
+ num_sg_entries = mlxsw_pci_num_sg_entries_get(byte_count);
+ if (WARN_ON_ONCE(num_sg_entries > q->pci->num_sg_entries))
+ return -EINVAL;
+
+ for (i = 0; i < num_sg_entries; i++)
+ pages[i] = el->pages[i];
+
+ *p_num_sg_entries = num_sg_entries;
return 0;
+}
+
+static int
+mlxsw_pci_rdq_pages_alloc(struct mlxsw_pci_queue *q,
+ struct mlxsw_pci_queue_elem_info *elem_info,
+ u8 num_sg_entries)
+{
+ struct page *old_pages[MLXSW_PCI_WQE_SG_ENTRIES];
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ int i, err;
+
+ for (i = 0; i < num_sg_entries; i++) {
+ old_pages[i] = elem_info->pages[i];
+ err = mlxsw_pci_rdq_page_alloc(q, elem_info, i);
+ if (err) {
+ dev_err_ratelimited(&q->pci->pdev->dev, "Failed to alloc page\n");
+ goto err_page_alloc;
+ }
+ }
+
+ return 0;
+
+err_page_alloc:
+ for (i--; i >= 0; i--)
+ page_pool_recycle_direct(cq->u.cq.page_pool, old_pages[i]);
-err_frag_map:
- dev_kfree_skb_any(skb);
return err;
}
-static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_pci_queue_elem_info *elem_info)
+static void
+mlxsw_pci_rdq_pages_recycle(struct mlxsw_pci_queue *q, struct page *pages[],
+ u8 num_sg_entries)
{
- struct sk_buff *skb;
- char *wqe;
-
- skb = elem_info->u.rdq.skb;
- wqe = elem_info->elem;
+ struct mlxsw_pci_queue *cq = q->u.rdq.cq;
+ int i;
- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
+ for (i = 0; i < num_sg_entries; i++)
+ page_pool_recycle_direct(cq->u.cq.page_pool, pages[i]);
}
static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
struct mlxsw_pci_queue_elem_info *elem_info;
- u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
- int i;
+ u8 sdq_count = mlxsw_pci->num_sdqs;
+ struct mlxsw_pci_queue *cq;
+ u8 cq_num;
+ int i, j;
int err;
q->producer_counter = 0;
@@ -409,7 +541,8 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
/* Set CQ of same number of this RDQ with base
* above SDQ count as the lower ones are assigned to SDQs.
*/
- mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
+ cq_num = sdq_count + q->num;
+ mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
@@ -421,14 +554,21 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
if (err)
return err;
+ cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num);
+ cq->u.cq.dq = q;
+ q->u.rdq.cq = cq;
+
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
for (i = 0; i < q->count; i++) {
elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
BUG_ON(!elem_info);
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL);
- if (err)
- goto rollback;
+
+ for (j = 0; j < mlxsw_pci->num_sg_entries; j++) {
+ err = mlxsw_pci_rdq_page_alloc(q, elem_info, j);
+ if (err)
+ goto rollback;
+ }
/* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
@@ -439,8 +579,12 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
rollback:
for (i--; i >= 0; i--) {
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
- mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ for (j--; j >= 0; j--)
+ mlxsw_pci_rdq_page_free(q, elem_info, j);
+ j = mlxsw_pci->num_sg_entries;
}
+ q->u.rdq.cq = NULL;
+ cq->u.cq.dq = NULL;
mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
return err;
@@ -450,12 +594,13 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q)
{
struct mlxsw_pci_queue_elem_info *elem_info;
- int i;
+ int i, j;
mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
for (i = 0; i < q->count; i++) {
elem_info = mlxsw_pci_queue_elem_info_get(q, i);
- mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
+ for (j = 0; j < mlxsw_pci->num_sg_entries; j++)
+ mlxsw_pci_rdq_page_free(q, elem_info, j);
}
}
@@ -465,54 +610,11 @@ static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
q->u.cq.v = mlxsw_pci->max_cqe_ver;
if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
- q->num < mlxsw_pci->num_sdq_cqs &&
+ q->num < mlxsw_pci->num_sdqs &&
!mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core))
q->u.cq.v = MLXSW_PCI_CQE_V1;
}
-static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
- struct mlxsw_pci_queue *q)
-{
- int i;
- int err;
-
- q->consumer_counter = 0;
-
- for (i = 0; i < q->count; i++) {
- char *elem = mlxsw_pci_queue_elem_get(q, i);
-
- mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
- }
-
- if (q->u.cq.v == MLXSW_PCI_CQE_V1)
- mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
- MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
- else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
- mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
- MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
-
- mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
- mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
- mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
- for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
- dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
-
- mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
- }
- err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
- if (err)
- return err;
- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
- mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
- return 0;
-}
-
-static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
- struct mlxsw_pci_queue *q)
-{
- mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
-}
-
static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci,
ptrdiff_t off)
{
@@ -543,7 +645,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q,
u16 consumer_counter_limit,
enum mlxsw_pci_cqe_v cqe_v,
- char *cqe)
+ char *cqe, int budget)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_elem_info *elem_info;
@@ -554,8 +656,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
spin_lock(&q->lock);
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
- skb = elem_info->u.sdq.skb;
+ tx_info = mlxsw_skb_cb(elem_info->sdq.skb)->tx_info;
+ skb = elem_info->sdq.skb;
wqe = elem_info->elem;
for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
@@ -569,8 +671,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
}
if (skb)
- dev_kfree_skb_any(skb);
- elem_info->u.sdq.skb = NULL;
+ napi_consume_skb(skb, budget);
+ elem_info->sdq.skb = NULL;
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
@@ -632,27 +734,40 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
enum mlxsw_pci_cqe_v cqe_v, char *cqe)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
+ struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES];
struct mlxsw_pci_queue_elem_info *elem_info;
struct mlxsw_rx_info rx_info = {};
- char wqe[MLXSW_PCI_WQE_SIZE];
struct sk_buff *skb;
+ u8 num_sg_entries;
u16 byte_count;
int err;
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- skb = elem_info->u.rdq.skb;
- memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC);
- if (err) {
- dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
+ if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
+ byte_count -= ETH_FCS_LEN;
+
+ err = mlxsw_pci_elem_info_pages_ref_store(q, elem_info, byte_count,
+ pages, &num_sg_entries);
+ if (err)
+ goto out;
+
+ err = mlxsw_pci_rdq_pages_alloc(q, elem_info, num_sg_entries);
+ if (err)
+ goto out;
+
+ skb = mlxsw_pci_rdq_build_skb(pages, byte_count);
+ if (IS_ERR(skb)) {
+ dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n");
+ mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries);
goto out;
}
- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+ skb_mark_for_recycle(skb);
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
rx_info.is_lag = true;
@@ -685,16 +800,10 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe);
- byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
- if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
- byte_count -= ETH_FCS_LEN;
- skb_put(skb, byte_count);
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
out:
- /* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
- mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
return;
}
@@ -714,13 +823,86 @@ static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
return elem;
}
-static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
+static bool mlxsw_pci_cq_cqe_to_handle(struct mlxsw_pci_queue *q)
{
- struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
+ struct mlxsw_pci_queue_elem_info *elem_info;
+ bool owner_bit;
+
+ elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
+ owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem_info->elem);
+ return !mlxsw_pci_elem_hw_owned(q, owner_bit);
+}
+
+static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget)
+{
+ struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue,
+ u.cq.napi);
+ struct mlxsw_pci_queue *rdq = q->u.cq.dq;
struct mlxsw_pci *mlxsw_pci = q->pci;
+ int work_done = 0;
+ char *cqe;
+
+ /* If the budget is 0, Rx processing should be skipped. */
+ if (unlikely(!budget))
+ return 0;
+
+ while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
+ u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
+ u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
+ u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
+
+ if (unlikely(sendq)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
+ if (unlikely(dqn != rdq->num)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
+ mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
+ wqe_counter, q->u.cq.v, cqe);
+
+ if (++work_done == budget)
+ break;
+ }
+
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, rdq);
+
+ if (work_done < budget)
+ goto processing_completed;
+
+ /* The driver still has outstanding work to do, budget was exhausted.
+ * Return exactly budget. In that case, the NAPI instance will be polled
+ * again.
+ */
+ if (mlxsw_pci_cq_cqe_to_handle(q))
+ goto out;
+
+ /* The driver processed all the completions and handled exactly
+ * 'budget'. Return 'budget - 1' to distinguish from the case that
+ * driver still has completions to handle.
+ */
+ if (work_done == budget)
+ work_done--;
+
+processing_completed:
+ if (napi_complete_done(napi, work_done))
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+out:
+ return work_done;
+}
+
+static int mlxsw_pci_napi_poll_cq_tx(struct napi_struct *napi, int budget)
+{
+ struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue,
+ u.cq.napi);
+ struct mlxsw_pci_queue *sdq = q->u.cq.dq;
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ int work_done = 0;
char *cqe;
- int items = 0;
- int credits = q->count >> 1;
while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
@@ -728,46 +910,112 @@ static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t)
u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
+ if (unlikely(!sendq)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
+ if (unlikely(dqn != sdq->num)) {
+ WARN_ON_ONCE(1);
+ continue;
+ }
+
memcpy(ncqe, cqe, q->elem_size);
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
- if (sendq) {
- struct mlxsw_pci_queue *sdq;
+ mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
+ wqe_counter, q->u.cq.v, ncqe, budget);
- sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
- mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
- wqe_counter, q->u.cq.v, ncqe);
- q->u.cq.comp_sdq_count++;
- } else {
- struct mlxsw_pci_queue *rdq;
+ work_done++;
+ }
- rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
- mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
- wqe_counter, q->u.cq.v, ncqe);
- q->u.cq.comp_rdq_count++;
- }
- if (++items == credits)
- break;
+ /* If the budget is 0 napi_complete_done() should never be called. */
+ if (unlikely(!budget))
+ goto processing_completed;
+
+ work_done = min(work_done, budget - 1);
+ if (unlikely(!napi_complete_done(napi, work_done)))
+ goto out;
+
+processing_completed:
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+out:
+ return work_done;
+}
+
+static enum mlxsw_pci_cq_type
+mlxsw_pci_cq_type(const struct mlxsw_pci *mlxsw_pci,
+ const struct mlxsw_pci_queue *q)
+{
+ /* Each CQ is mapped to one DQ. The first 'num_sdqs' queues are used
+ * for SDQs and the rest are used for RDQs.
+ */
+ if (q->num < mlxsw_pci->num_sdqs)
+ return MLXSW_PCI_CQ_SDQ;
+
+ return MLXSW_PCI_CQ_RDQ;
+}
+
+static void mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue *q,
+ enum mlxsw_pci_cq_type cq_type)
+{
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+
+ switch (cq_type) {
+ case MLXSW_PCI_CQ_SDQ:
+ netif_napi_add(mlxsw_pci->napi_dev_tx, &q->u.cq.napi,
+ mlxsw_pci_napi_poll_cq_tx);
+ break;
+ case MLXSW_PCI_CQ_RDQ:
+ netif_napi_add(mlxsw_pci->napi_dev_rx, &q->u.cq.napi,
+ mlxsw_pci_napi_poll_cq_rx);
+ break;
}
- if (items)
- mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
}
-static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
+static void mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue *q)
{
- return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
- MLXSW_PCI_CQE01_COUNT;
+ netif_napi_del(&q->u.cq.napi);
}
-static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
+static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q,
+ enum mlxsw_pci_cq_type cq_type)
{
- return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
- MLXSW_PCI_CQE01_SIZE;
+ struct page_pool_params pp_params = {};
+ struct mlxsw_pci *mlxsw_pci = q->pci;
+ struct page_pool *page_pool;
+
+ if (cq_type != MLXSW_PCI_CQ_RDQ)
+ return 0;
+
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = MLXSW_PCI_WQE_COUNT * mlxsw_pci->num_sg_entries;
+ pp_params.nid = dev_to_node(&mlxsw_pci->pdev->dev);
+ pp_params.dev = &mlxsw_pci->pdev->dev;
+ pp_params.napi = &q->u.cq.napi;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+
+ page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(page_pool))
+ return PTR_ERR(page_pool);
+
+ q->u.cq.page_pool = page_pool;
+ return 0;
}
-static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+static void mlxsw_pci_cq_page_pool_fini(struct mlxsw_pci_queue *q,
+ enum mlxsw_pci_cq_type cq_type)
+{
+ if (cq_type != MLXSW_PCI_CQ_RDQ)
+ return;
+
+ page_pool_destroy(q->u.cq.page_pool);
+}
+
+static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
+ enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q);
int i;
int err;
@@ -776,39 +1024,64 @@ static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
for (i = 0; i < q->count; i++) {
char *elem = mlxsw_pci_queue_elem_get(q, i);
- mlxsw_pci_eqe_owner_set(elem, 1);
+ mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
}
- mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
- mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
- mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+ if (q->u.cq.v == MLXSW_PCI_CQE_V1)
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
+ else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
+ mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
+ MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
+
+ mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
+ mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
+ mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
- mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
+ mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
}
- err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+ err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
if (err)
return err;
+ mlxsw_pci_cq_napi_setup(q, cq_type);
+
+ err = mlxsw_pci_cq_page_pool_init(q, cq_type);
+ if (err)
+ goto err_page_pool_init;
+
+ napi_enable(&q->u.cq.napi);
mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
return 0;
+
+err_page_pool_init:
+ mlxsw_pci_cq_napi_teardown(q);
+ return err;
}
-static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q)
{
- mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
+ enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q);
+
+ napi_disable(&q->u.cq.napi);
+ mlxsw_pci_cq_page_pool_fini(q, cq_type);
+ mlxsw_pci_cq_napi_teardown(q);
+ mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
}
-static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
+static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
{
- mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
- mlxsw_pci->cmd.comp.out_param =
- ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
- mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
- mlxsw_pci->cmd.wait_done = true;
- wake_up(&mlxsw_pci->cmd.wait);
+ return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
+ MLXSW_PCI_CQE01_COUNT;
+}
+
+static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
+{
+ return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
+ MLXSW_PCI_CQE01_SIZE;
}
static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
@@ -829,52 +1102,79 @@ static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t)
{
- struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet);
- struct mlxsw_pci *mlxsw_pci = q->pci;
- u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
- char *eqe;
- u8 cqn;
- bool cq_handle = false;
- int items = 0;
+ struct mlxsw_pci_queue *q = from_tasklet(q, t, u.eq.tasklet);
+ struct mlxsw_pci *mlxsw_pci = q->pci;
int credits = q->count >> 1;
+ u8 cqn, cq_count;
+ int items = 0;
+ char *eqe;
memset(&active_cqns, 0, sizeof(active_cqns));
while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
+ cqn = mlxsw_pci_eqe_cqn_get(eqe);
+ set_bit(cqn, active_cqns);
- /* Command interface completion events are always received on
- * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
- * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
- */
- switch (q->num) {
- case MLXSW_PCI_EQ_ASYNC_NUM:
- mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
- q->u.eq.ev_cmd_count++;
- break;
- case MLXSW_PCI_EQ_COMP_NUM:
- cqn = mlxsw_pci_eqe_cqn_get(eqe);
- set_bit(cqn, active_cqns);
- cq_handle = true;
- q->u.eq.ev_comp_count++;
- break;
- default:
- q->u.eq.ev_other_count++;
- }
if (++items == credits)
break;
}
- if (items) {
- mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
- mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
- }
- if (!cq_handle)
+ if (!items)
return;
+
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+
+ cq_count = mlxsw_pci->num_cqs;
for_each_set_bit(cqn, active_cqns, cq_count) {
q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
- mlxsw_pci_queue_tasklet_schedule(q);
+ napi_schedule(&q->u.cq.napi);
+ }
+}
+
+static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_pci_queue *q)
+{
+ int i;
+ int err;
+
+ /* We expect to initialize only one EQ, which gets num=0 as it is
+ * located at index zero. We use the EQ as EQ1, so set the number for
+ * future use.
+ */
+ WARN_ON_ONCE(q->num);
+ q->num = MLXSW_PCI_EQ_COMP_NUM;
+
+ q->consumer_counter = 0;
+
+ for (i = 0; i < q->count; i++) {
+ char *elem = mlxsw_pci_queue_elem_get(q, i);
+
+ mlxsw_pci_eqe_owner_set(elem, 1);
+ }
+
+ mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
+ mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
+ mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
+ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
+ dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
+
+ mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
}
+ err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
+ if (err)
+ return err;
+ tasklet_setup(&q->u.eq.tasklet, mlxsw_pci_eq_tasklet);
+ mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
+ mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
+ return 0;
+}
+
+static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
+ struct mlxsw_pci_queue *q)
+{
+ mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
}
struct mlxsw_pci_queue_ops {
@@ -886,7 +1186,6 @@ struct mlxsw_pci_queue_ops {
struct mlxsw_pci_queue *q);
void (*fini)(struct mlxsw_pci *mlxsw_pci,
struct mlxsw_pci_queue *q);
- void (*tasklet)(struct tasklet_struct *t);
u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
u16 elem_count;
@@ -914,7 +1213,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
.pre_init = mlxsw_pci_cq_pre_init,
.init = mlxsw_pci_cq_init,
.fini = mlxsw_pci_cq_fini,
- .tasklet = mlxsw_pci_cq_tasklet,
.elem_count_f = mlxsw_pci_cq_elem_count,
.elem_size_f = mlxsw_pci_cq_elem_size
};
@@ -923,7 +1221,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
.type = MLXSW_PCI_QUEUE_TYPE_EQ,
.init = mlxsw_pci_eq_init,
.fini = mlxsw_pci_eq_fini,
- .tasklet = mlxsw_pci_eq_tasklet,
.elem_count = MLXSW_PCI_EQE_COUNT,
.elem_size = MLXSW_PCI_EQE_SIZE
};
@@ -948,9 +1245,6 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
q->type = q_ops->type;
q->pci = mlxsw_pci;
- if (q_ops->tasklet)
- tasklet_setup(&q->tasklet, q_ops->tasklet);
-
mem_item->size = MLXSW_PCI_AQ_SIZE;
mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev,
mem_item->size, &mem_item->mapaddr,
@@ -1074,7 +1368,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
if (num_sdqs + num_rdqs > num_cqs ||
num_sdqs < MLXSW_PCI_SDQS_MIN ||
- num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
+ num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_MAX) {
dev_err(&pdev->dev, "Unsupported number of queues\n");
return -EINVAL;
}
@@ -1089,10 +1383,11 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
return -EINVAL;
}
- mlxsw_pci->num_sdq_cqs = num_sdqs;
+ mlxsw_pci->num_cqs = num_cqs;
+ mlxsw_pci->num_sdqs = num_sdqs;
err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
- num_eqs);
+ MLXSW_PCI_EQS_COUNT);
if (err) {
dev_err(&pdev->dev, "Failed to initialize event queues\n");
return err;
@@ -1119,8 +1414,6 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
goto err_rdqs_init;
}
- /* We have to poll in command interface until queues are initialized */
- mlxsw_pci->cmd.nopoll = true;
return 0;
err_rdqs_init:
@@ -1134,7 +1427,6 @@ err_cqs_init:
static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
{
- mlxsw_pci->cmd.nopoll = false;
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
@@ -1432,12 +1724,9 @@ static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
{
struct mlxsw_pci *mlxsw_pci = dev_id;
struct mlxsw_pci_queue *q;
- int i;
- for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
- q = mlxsw_pci_eq_get(mlxsw_pci, i);
- mlxsw_pci_queue_tasklet_schedule(q);
- }
+ q = mlxsw_pci_eq_get(mlxsw_pci);
+ tasklet_schedule(&q->u.eq.tasklet);
return IRQ_HANDLED;
}
@@ -1490,20 +1779,31 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
return -EBUSY;
}
-static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
+static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci,
+ bool pci_reset_sbr_supported)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
char mrsr_pl[MLXSW_REG_MRSR_LEN];
+ struct pci_dev *bridge;
int err;
+ if (!pci_reset_sbr_supported) {
+ pci_dbg(pdev, "Performing PCI hot reset instead of \"all reset\"\n");
+ goto sbr;
+ }
+
mlxsw_reg_mrsr_pack(mrsr_pl,
MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE);
err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
if (err)
return err;
+sbr:
device_lock_assert(&pdev->dev);
+ bridge = pci_upstream_bridge(pdev);
+ if (bridge)
+ pci_cfg_access_lock(bridge);
pci_cfg_access_lock(pdev);
pci_save_state(pdev);
@@ -1513,6 +1813,8 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci)
pci_restore_state(pdev);
pci_cfg_access_unlock(pdev);
+ if (bridge)
+ pci_cfg_access_unlock(bridge);
return err;
}
@@ -1529,6 +1831,7 @@ static int
mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
{
struct pci_dev *pdev = mlxsw_pci->pdev;
+ bool pci_reset_sbr_supported = false;
char mcam_pl[MLXSW_REG_MCAM_LEN];
bool pci_reset_supported = false;
u32 sys_status;
@@ -1548,13 +1851,17 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id)
mlxsw_reg_mcam_pack(mcam_pl,
MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES);
err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl);
- if (!err)
+ if (!err) {
mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET,
&pci_reset_supported);
+ mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET_SBR,
+ &pci_reset_sbr_supported);
+ }
if (pci_reset_supported) {
pci_dbg(pdev, "Starting PCI reset flow\n");
- err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci);
+ err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci,
+ pci_reset_sbr_supported);
} else {
pci_dbg(pdev, "Starting software reset flow\n");
err = mlxsw_pci_reset_sw(mlxsw_pci);
@@ -1587,6 +1894,17 @@ static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
pci_free_irq_vectors(mlxsw_pci->pdev);
}
+static void mlxsw_pci_num_sg_entries_set(struct mlxsw_pci *mlxsw_pci)
+{
+ u8 num_sg_entries;
+
+ num_sg_entries = mlxsw_pci_num_sg_entries_get(MLXSW_PORT_MAX_MTU);
+ mlxsw_pci->num_sg_entries = min(num_sg_entries,
+ MLXSW_PCI_WQE_SG_ENTRIES);
+
+ WARN_ON(num_sg_entries > MLXSW_PCI_WQE_SG_ENTRIES);
+}
+
static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
const struct mlxsw_config_profile *profile,
struct mlxsw_res *res)
@@ -1709,6 +2027,12 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_requery_resources;
+ mlxsw_pci_num_sg_entries_set(mlxsw_pci);
+
+ err = mlxsw_pci_napi_devs_init(mlxsw_pci);
+ if (err)
+ goto err_napi_devs_init;
+
err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
if (err)
goto err_aqs_init;
@@ -1726,6 +2050,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
err_request_eq_irq:
mlxsw_pci_aqs_fini(mlxsw_pci);
err_aqs_init:
+ mlxsw_pci_napi_devs_fini(mlxsw_pci);
+err_napi_devs_init:
err_requery_resources:
err_config_profile:
err_cqe_v_check:
@@ -1753,6 +2079,7 @@ static void mlxsw_pci_fini(void *bus_priv)
free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
mlxsw_pci_aqs_fini(mlxsw_pci);
+ mlxsw_pci_napi_devs_fini(mlxsw_pci);
mlxsw_pci_fw_area_fini(mlxsw_pci);
mlxsw_pci_free_irq_vectors(mlxsw_pci);
}
@@ -1761,7 +2088,7 @@ static struct mlxsw_pci_queue *
mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
const struct mlxsw_tx_info *tx_info)
{
- u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
+ u8 ctl_sdq_count = mlxsw_pci->num_sdqs - 1;
u8 sdqn;
if (tx_info->is_emad) {
@@ -1808,7 +2135,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
goto unlock;
}
mlxsw_skb_cb(skb)->tx_info = *tx_info;
- elem_info->u.sdq.skb = skb;
+ elem_info->sdq.skb = skb;
wqe = elem_info->elem;
mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
@@ -1860,9 +2187,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
- bool evreq = mlxsw_pci->cmd.nopoll;
unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
- bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
+ unsigned long end;
+ bool wait_done;
int err;
*p_status = MLXSW_CMD_STATUS_OK;
@@ -1886,36 +2213,28 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
- *p_wait_done = false;
+ wait_done = false;
wmb(); /* all needs to be written before we write control register */
mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
MLXSW_PCI_CIR_CTRL_GO_BIT |
- (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
(opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
opcode);
- if (!evreq) {
- unsigned long end;
-
- end = jiffies + timeout;
- do {
- u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
+ end = jiffies + timeout;
+ do {
+ u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
- if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
- *p_wait_done = true;
- *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
- break;
- }
- cond_resched();
- } while (time_before(jiffies, end));
- } else {
- wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
- *p_status = mlxsw_pci->cmd.comp.status;
- }
+ if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
+ wait_done = true;
+ *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
+ break;
+ }
+ cond_resched();
+ } while (time_before(jiffies, end));
err = 0;
- if (*p_wait_done) {
+ if (wait_done) {
if (*p_status)
err = -EIO;
} else {
@@ -1929,14 +2248,12 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
*/
__be32 tmp;
- if (!evreq) {
- tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
- CIR_OUT_PARAM_HI));
- memcpy(out_mbox, &tmp, sizeof(tmp));
- tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
- CIR_OUT_PARAM_LO));
- memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
- }
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_HI));
+ memcpy(out_mbox, &tmp, sizeof(tmp));
+ tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
+ CIR_OUT_PARAM_LO));
+ memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
} else if (!err && out_mbox) {
memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
}
@@ -2015,7 +2332,6 @@ static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci)
int err;
mutex_init(&mlxsw_pci->cmd.lock);
- init_waitqueue_head(&mlxsw_pci->cmd.wait);
err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index 7cdf0ce24f28..6bed495dcf0f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -42,8 +42,8 @@
((offset) + (type_offset) + (num) * 4)
#define MLXSW_PCI_CQS_MAX 96
-#define MLXSW_PCI_EQS_COUNT 2
-#define MLXSW_PCI_EQ_ASYNC_NUM 0
+#define MLXSW_PCI_EQS_MAX 2
+#define MLXSW_PCI_EQS_COUNT 1
#define MLXSW_PCI_EQ_COMP_NUM 1
#define MLXSW_PCI_SDQS_MIN 2 /* EMAD and control traffic */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index ac4d4ea51597..0a73b1a4526e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -6,7 +6,8 @@
#include <linux/types.h>
-#define MLXSW_PORT_MAX_MTU 10000
+#define MLXSW_PORT_MAX_MTU (10 * 1024)
+#define MLXSW_PORT_ETH_FRAME_HDR (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
#define MLXSW_PORT_DEFAULT_VID 1
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 8892654c685f..3bb89045eaf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4786,8 +4786,11 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4);
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR BIT(8)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4 BIT(9)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_1_100GBASE_CR_KR BIT(11)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_2_200GBASE_CR2_KR2 BIT(13)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8 BIT(15)
+#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_4_400GBASE_CR4_KR4 BIT(16)
#define MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8 BIT(19)
/* reg_ptys_ext_eth_proto_cap
@@ -10668,6 +10671,8 @@ enum mlxsw_reg_mcam_mng_feature_cap_mask_bits {
MLXSW_REG_MCAM_MCIA_128B = 34,
/* If set, MRSR.command=6 is supported. */
MLXSW_REG_MCAM_PCI_RESET = 48,
+ /* If set, MRSR.command=6 is supported with Secondary Bus Reset. */
+ MLXSW_REG_MCAM_PCI_RESET_SBR = 67,
};
#define MLXSW_REG_BYTES_PER_DWORD 0x4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index bb642e9bb6cf..f064789f3240 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -405,29 +405,12 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_port->dev->dev_addr);
}
-static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char pmtu_pl[MLXSW_REG_PMTU_LEN];
- int err;
-
- mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
- if (err)
- return err;
-
- *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
- return 0;
-}
-
static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmtu_pl[MLXSW_REG_PMTU_LEN];
- mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
- if (mtu > mlxsw_sp_port->max_mtu)
- return -EINVAL;
+ mtu += MLXSW_PORT_ETH_FRAME_HDR;
mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
@@ -825,7 +808,7 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
if (err)
goto err_port_mtu_set;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
err_port_mtu_set:
@@ -1697,8 +1680,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
- dev->min_mtu = 0;
- dev->max_mtu = ETH_MAX_MTU;
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR;
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
@@ -1727,13 +1710,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
goto err_max_speed_get;
}
- err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
- mlxsw_sp_port->local_port);
- goto err_port_max_mtu_get;
- }
-
err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
@@ -1877,7 +1853,6 @@ err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
-err_port_max_mtu_get:
err_max_speed_get:
err_port_speed_by_width_set:
err_port_system_port_mapping_set:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3beb5d0847ab..8d3c61287696 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -238,7 +238,7 @@ struct mlxsw_sp_ptp_ops {
struct hwtstamp_config *config);
void (*shaper_work)(struct work_struct *work);
int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int (*get_stats_count)(void);
void (*get_stats_strings)(u8 **p);
void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -359,7 +359,6 @@ struct mlxsw_sp_port {
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
- int max_mtu;
u32 max_speed;
struct mlxsw_sp_hdroom *hdroom;
u64 module_overheat_initial_val;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index 4b713832fdd5..07cb1e26ca3e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -391,7 +391,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id);
+ lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key,
+ erp_id);
if (IS_ERR(lkey_id))
return PTR_ERR(lkey_id);
aentry->lkey_id = lkey_id;
@@ -399,7 +400,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE,
priority, region->tcam_region_info,
- aentry->enc_key, erp_id,
+ aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -428,7 +429,7 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp,
mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0,
region->tcam_region_info,
- aentry->enc_key, erp_id,
+ aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -457,7 +458,7 @@ mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block);
mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE,
priority, region->tcam_region_info,
- aentry->enc_key, erp_id,
+ aentry->ht_key.enc_key, erp_id,
aentry->delta_info.start,
aentry->delta_info.mask,
aentry->delta_info.value,
@@ -480,26 +481,23 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp,
int err;
mlxsw_afk_encode(afk, region->key_info, &rulei->values,
- aentry->ht_key.full_enc_key, mask);
+ aentry->ht_key.enc_key, mask);
erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false);
if (IS_ERR(erp_mask))
return PTR_ERR(erp_mask);
aentry->erp_mask = erp_mask;
aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask);
- memcpy(aentry->enc_key, aentry->ht_key.full_enc_key,
- sizeof(aentry->enc_key));
/* Compute all needed delta information and clear the delta bits
- * from the encrypted key.
+ * from the encoded key.
*/
delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask);
aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta);
aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta);
aentry->delta_info.value =
- mlxsw_sp_acl_erp_delta_value(delta,
- aentry->ht_key.full_enc_key);
- mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key);
+ mlxsw_sp_acl_erp_delta_value(delta, aentry->ht_key.enc_key);
+ mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key);
/* Add rule to the list of A-TCAM rules, assuming this
* rule is intended to A-TCAM. In case this rule does
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
index 95f63fcf4ba1..a54eedb69a3f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c
@@ -249,7 +249,7 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion,
memcpy(chunk + pad_bytes, &erp_region_id,
sizeof(erp_region_id));
memcpy(chunk + key_offset,
- &aentry->enc_key[chunk_key_offsets[chunk_index]],
+ &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]],
chunk_key_len);
chunk += chunk_len;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
index d231f4d2888b..9eee229303cc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c
@@ -1217,18 +1217,6 @@ static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj,
return err ? false : true;
}
-static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2)
-{
- const struct mlxsw_sp_acl_erp_key *key1 = obj1;
- const struct mlxsw_sp_acl_erp_key *key2 = obj2;
-
- /* For hints purposes, two objects are considered equal
- * in case the masks are the same. Does not matter what
- * the "ctcam" value is.
- */
- return memcmp(key1->mask, key2->mask, sizeof(key1->mask));
-}
-
static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj,
void *obj)
{
@@ -1308,7 +1296,6 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv)
static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = {
.obj_size = sizeof(struct mlxsw_sp_acl_erp_key),
.delta_check = mlxsw_sp_acl_erp_delta_check,
- .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp,
.delta_create = mlxsw_sp_acl_erp_delta_create,
.delta_destroy = mlxsw_sp_acl_erp_delta_destroy,
.root_create = mlxsw_sp_acl_erp_root_create,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 92a406f02eae..b1d08e958bf9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -1504,7 +1504,8 @@ mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
static int
mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
struct mlxsw_sp_acl_tcam_vregion *vregion;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
index 79a1d8606512..010204f73ea4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h
@@ -167,9 +167,9 @@ struct mlxsw_sp_acl_atcam_region {
};
struct mlxsw_sp_acl_atcam_entry_ht_key {
- char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded
- * key.
- */
+ char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, minus
+ * delta bits.
+ */
u8 erp_id;
};
@@ -181,9 +181,6 @@ struct mlxsw_sp_acl_atcam_entry {
struct rhash_head ht_node;
struct list_head list; /* Member in entries_list */
struct mlxsw_sp_acl_atcam_entry_ht_key ht_key;
- char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key,
- * minus delta bits.
- */
struct {
u16 start;
u8 mask;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index c9f1c79f3f9d..2c0cfa79d138 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -399,11 +399,13 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_hdroom *hdroom)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ unsigned int max_mtu = mlxsw_sp_port->dev->max_mtu;
u16 reserve_cells;
int i;
+ max_mtu += MLXSW_PORT_ETH_FRAME_HDR;
/* Internal buffer. */
- reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_mtu,
+ reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, max_mtu,
mlxsw_sp_port->max_speed);
reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells);
hdroom->int_buf.reserve_cells = reserve_cells;
@@ -613,7 +615,9 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
/* Buffer 9 is used for control traffic. */
- size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu);
+ size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port,
+ mlxsw_sp_port->dev->max_mtu +
+ MLXSW_PORT_ETH_FRAME_HDR);
hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9);
return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true);
@@ -1607,8 +1611,8 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
+ u16 local_port, local_port_1, first_local_port, last_local_port;
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u16 local_port, local_port_1, last_local_port;
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
u8 masked_count, current_page = 0;
unsigned long cb_priv = 0;
@@ -1628,6 +1632,7 @@ next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, false);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
@@ -1645,9 +1650,12 @@ next_batch:
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
- local_port, 1);
+ local_port - first_local_port,
+ 1);
}
- mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
+ local_port - first_local_port,
+ 1);
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
&bulk_list);
@@ -1684,7 +1692,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
unsigned int sb_index)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
- u16 local_port, last_local_port;
+ u16 local_port, first_local_port, last_local_port;
LIST_HEAD(bulk_list);
unsigned int masked_count;
u8 current_page = 0;
@@ -1702,6 +1710,7 @@ next_batch:
masked_count = 0;
mlxsw_reg_sbsr_pack(sbsr_pl, true);
mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page);
+ first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE;
last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE +
MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1;
@@ -1719,9 +1728,12 @@ next_batch:
if (local_port != MLXSW_PORT_CPU_PORT) {
/* Ingress quotas are not supported for the CPU port */
mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl,
- local_port, 1);
+ local_port - first_local_port,
+ 1);
}
- mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl,
+ local_port - first_local_port,
+ 1);
for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) {
err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
&bulk_list);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index ca80af06465f..fa6eddd27ecf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -283,7 +283,7 @@ static u64 mlxsw_sp_dpipe_table_erif_size_get(void *priv)
return MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
}
-static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = {
.matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_erif_entries_dump,
@@ -734,7 +734,7 @@ static u64 mlxsw_sp_dpipe_table_host4_size_get(void *priv)
return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET);
}
-static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = {
.matches_dump = mlxsw_sp_dpipe_table_host4_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_host_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_host4_entries_dump,
@@ -811,7 +811,7 @@ static u64 mlxsw_sp_dpipe_table_host6_size_get(void *priv)
return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET6);
}
-static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = {
.matches_dump = mlxsw_sp_dpipe_table_host6_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_host_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_host6_entries_dump,
@@ -1230,7 +1230,7 @@ mlxsw_sp_dpipe_table_adj_size_get(void *priv)
return size;
}
-static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
+static const struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = {
.matches_dump = mlxsw_sp_dpipe_table_adj_matches_dump,
.actions_dump = mlxsw_sp_dpipe_table_adj_actions_dump,
.entries_dump = mlxsw_sp_dpipe_table_adj_entries_dump,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 0f29e9c19411..2bed8c86b7cf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -1068,7 +1068,21 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev,
}
static int
-mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info)
+mlxsw_sp_set_module_eeprom_by_page(struct net_device *dev,
+ const struct ethtool_module_eeprom *page,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 slot_index = mlxsw_sp_port->mapping.slot_index;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_set_module_eeprom_by_page(mlxsw_sp->core, slot_index,
+ module, page, extack);
+}
+
+static int
+mlxsw_sp_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -1256,6 +1270,7 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_module_info = mlxsw_sp_get_module_info,
.get_module_eeprom = mlxsw_sp_get_module_eeprom,
.get_module_eeprom_by_page = mlxsw_sp_get_module_eeprom_by_page,
+ .set_module_eeprom_by_page = mlxsw_sp_set_module_eeprom_by_page,
.get_ts_info = mlxsw_sp_get_ts_info,
.get_eth_phy_stats = mlxsw_sp_get_eth_phy_stats,
.get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats,
@@ -1649,6 +1664,18 @@ mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = {
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2)
static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr[] = {
+ ETHTOOL_LINK_MODE_100000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseDR_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_1_100GBASE_CR_KR_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr)
+
+static const enum ethtool_link_mode_bit_indices
mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
@@ -1661,6 +1688,18 @@ mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = {
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4)
static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2[] = {
+ ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_2_200GBASE_CR2_KR2_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2)
+
+static const enum ethtool_link_mode_bit_indices
mlxsw_sp2_mask_ethtool_400gaui_8[] = {
ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT,
ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT,
@@ -1673,6 +1712,18 @@ mlxsw_sp2_mask_ethtool_400gaui_8[] = {
ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8)
static const enum ethtool_link_mode_bit_indices
+mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4[] = {
+ ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT,
+};
+
+#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_4_400GBASE_CR4_KR4_LEN \
+ ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4)
+
+static const enum ethtool_link_mode_bit_indices
mlxsw_sp2_mask_ethtool_800gaui_8[] = {
ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT,
ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT,
@@ -1817,6 +1868,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.width = 2,
},
{
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_1_100GBASE_CR_KR,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_1_100GBASE_CR_KR_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X,
+ .speed = SPEED_100000,
+ .width = 1,
+ },
+ {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4,
.mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN,
@@ -1826,6 +1885,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.width = 4,
},
{
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_2_200GBASE_CR2_KR2,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_2_200GBASE_CR2_KR2_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_2X,
+ .speed = SPEED_200000,
+ .width = 2,
+ },
+ {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8,
.mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN,
@@ -1834,6 +1901,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = {
.width = 8,
},
{
+ .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_4_400GBASE_CR4_KR4,
+ .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4,
+ .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_4_400GBASE_CR4_KR4_LEN,
+ .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X,
+ .speed = SPEED_400000,
+ .width = 4,
+ },
+ {
.mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8,
.mask_ethtool = mlxsw_sp2_mask_ethtool_800gaui_8,
.m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_800GAUI_8_LEN,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 9fd1ca079258..f07955b5439f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -595,6 +595,10 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp,
flow_rule_match_control(rule, &match);
addr_type = match.key->addr_type;
+
+ if (flow_rule_has_control_flags(match.mask->flags,
+ f->common.extack))
+ return -EOPNOTSUPP;
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 3340b4a694c3..d761a1235994 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -8,7 +8,7 @@
#include "spectrum_ipip.h"
#include "reg.h"
-struct ip_tunnel_parm
+struct ip_tunnel_parm_kern
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev)
{
struct ip_tunnel *tun = netdev_priv(ol_dev);
@@ -24,27 +24,29 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev)
return tun->parms;
}
-static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms)
+static bool
+mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm_kern *parms)
{
- return !!(parms->i_flags & TUNNEL_KEY);
+ return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags);
}
static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms)
{
- return !!(parms->i_flags & TUNNEL_KEY);
+ return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags);
}
-static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms)
+static bool
+mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm_kern *parms)
{
- return !!(parms->o_flags & TUNNEL_KEY);
+ return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags);
}
static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms)
{
- return !!(parms->o_flags & TUNNEL_KEY);
+ return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags);
}
-static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms)
+static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm_kern *parms)
{
return mlxsw_sp_ipip_parms4_has_ikey(parms) ?
be32_to_cpu(parms->i_key) : 0;
@@ -56,7 +58,7 @@ static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms)
be32_to_cpu(parms->i_key) : 0;
}
-static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms)
+static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm_kern *parms)
{
return mlxsw_sp_ipip_parms4_has_okey(parms) ?
be32_to_cpu(parms->o_key) : 0;
@@ -69,7 +71,7 @@ static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms)
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms)
+mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm_kern *parms)
{
return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr };
}
@@ -81,7 +83,7 @@ mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms)
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms)
+mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm_kern *parms)
{
return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr };
}
@@ -96,7 +98,7 @@ union mlxsw_sp_l3addr
mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms4;
+ struct ip_tunnel_parm_kern parms4;
struct __ip6_tnl_parm parms6;
switch (proto) {
@@ -115,7 +117,9 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ struct ip_tunnel_parm_kern parms4;
+
+ parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4;
}
@@ -124,7 +128,7 @@ static union mlxsw_sp_l3addr
mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms4;
+ struct ip_tunnel_parm_kern parms4;
struct __ip6_tnl_parm parms6;
switch (proto) {
@@ -150,7 +154,7 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
static struct mlxsw_sp_ipip_parms
mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
return (struct mlxsw_sp_ipip_parms) {
.proto = MLXSW_SP_L3_PROTO_IPV4,
@@ -187,8 +191,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp,
{
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
+ struct ip_tunnel_parm_kern parms;
char rtdp_pl[MLXSW_REG_RTDP_LEN];
- struct ip_tunnel_parm parms;
unsigned int type_check;
bool has_ikey;
u32 daddr4;
@@ -238,12 +242,15 @@ static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
struct ip_tunnel *tunnel = netdev_priv(ol_dev);
- __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
bool inherit_ttl = tunnel->parms.iph.ttl == 0;
bool inherit_tos = tunnel->parms.iph.tos & 0x1;
+ IP_TUNNEL_DECLARE_FLAGS(okflags) = { };
+
+ /* We can't offload any other features. */
+ __set_bit(IP_TUNNEL_KEY_BIT, okflags);
- return (tunnel->parms.i_flags & ~okflags) == 0 &&
- (tunnel->parms.o_flags & ~okflags) == 0 &&
+ return ip_tunnel_flags_subset(tunnel->parms.i_flags, okflags) &&
+ ip_tunnel_flags_subset(tunnel->parms.o_flags, okflags) &&
inherit_ttl && inherit_tos &&
mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV4, ol_dev);
}
@@ -252,7 +259,7 @@ static struct mlxsw_sp_rif_ipip_lb_config
mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
- struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ?
@@ -439,10 +446,13 @@ static bool mlxsw_sp_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev);
bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
bool inherit_ttl = tparm.hop_limit == 0;
- __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
+ IP_TUNNEL_DECLARE_FLAGS(okflags) = { };
+
+ /* We can't offload any other features. */
+ __set_bit(IP_TUNNEL_KEY_BIT, okflags);
- return (tparm.i_flags & ~okflags) == 0 &&
- (tparm.o_flags & ~okflags) == 0 &&
+ return ip_tunnel_flags_subset(tparm.i_flags, okflags) &&
+ ip_tunnel_flags_subset(tparm.o_flags, okflags) &&
inherit_ttl && inherit_tos &&
mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index a35f009da561..a66173779641 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -9,7 +9,7 @@
#include <linux/if_tunnel.h>
#include <net/ip6_tunnel.h>
-struct ip_tunnel_parm
+struct ip_tunnel_parm_kern
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev);
struct __ip6_tnl_parm
mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
index cbb6c75a6620..5b174cb95eb8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c
@@ -1276,7 +1276,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
@@ -1661,7 +1661,7 @@ err_get_message_types:
}
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
index a8b88230959a..769095d4932d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h
@@ -11,7 +11,7 @@ struct mlxsw_sp;
struct mlxsw_sp_port;
struct mlxsw_sp_ptp_clock;
-static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct ethtool_ts_info *info)
+static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct kernel_ethtool_ts_info *info)
{
info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE;
@@ -50,7 +50,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
void mlxsw_sp1_ptp_shaper_work(struct work_struct *work);
int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mlxsw_sp1_get_stats_count(void);
void mlxsw_sp1_get_stats_strings(u8 **p);
@@ -84,7 +84,7 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
struct hwtstamp_config *config);
int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info);
+ struct kernel_ethtool_ts_info *info);
int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core,
struct mlxsw_sp_port *mlxsw_sp_port,
@@ -152,7 +152,7 @@ static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work)
}
static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
@@ -227,7 +227,7 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
return mlxsw_sp_ptp_get_ts_info_noptp(info);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 40ba314fbc72..800dfb64ec83 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -11450,12 +11450,16 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
{
bool old_inc_parsing_depth, new_inc_parsing_depth;
struct mlxsw_sp_mp_hash_config config = {};
+ struct net *net = mlxsw_sp_net(mlxsw_sp);
char recr2_pl[MLXSW_REG_RECR2_LEN];
unsigned long bit;
u32 seed;
int err;
- seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
+ seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).user_seed;
+ if (!seed)
+ seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0);
+
mlxsw_reg_recr2_pack(recr2_pl, seed);
mlxsw_sp_mp4_hash_init(mlxsw_sp, &config);
mlxsw_sp_mp6_hash_init(mlxsw_sp, &config);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index af50ff9e5f26..4b5fd71c897d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -413,8 +413,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
__be32 *saddrp, __be32 *daddrp)
{
struct ip_tunnel *tun = netdev_priv(to_dev);
+ struct ip_tunnel_parm_kern parms;
struct net_device *dev = NULL;
- struct ip_tunnel_parm parms;
struct rtable *rt = NULL;
struct flowi4 fl4;
@@ -451,7 +451,7 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
const struct net_device *to_dev,
struct mlxsw_sp_span_parms *sparmsp)
{
- struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ struct ip_tunnel_parm_kern tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
bool inherit_tos = tparm.iph.tos & 0x1;
@@ -461,7 +461,8 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp,
if (!(to_dev->flags & IFF_UP) ||
/* Reject tunnels with GRE keys, checksums, etc. */
- tparm.i_flags || tparm.o_flags ||
+ !ip_tunnel_flags_empty(tparm.i_flags) ||
+ !ip_tunnel_flags_empty(tparm.o_flags) ||
/* Require a fixed TTL and a TOS copied from the mirrored packet. */
inherit_ttl || !inherit_tos ||
/* A destination address may not be "any". */
@@ -539,7 +540,7 @@ mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
if (!dst || dst->error)
goto out;
- rt6 = container_of(dst, struct rt6_info, dst);
+ rt6 = dst_rt6_info(dst);
dev = dst->dev;
*saddrp = fl6.saddr;
@@ -565,7 +566,8 @@ mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp,
if (!(to_dev->flags & IFF_UP) ||
/* Reject tunnels with GRE keys, checksums, etc. */
- tparm.i_flags || tparm.o_flags ||
+ !ip_tunnel_flags_empty(tparm.i_flags) ||
+ !ip_tunnel_flags_empty(tparm.o_flags) ||
/* Require a fixed TTL and a TOS copied from the mirrored packet. */
inherit_ttl || !inherit_tos ||
/* A destination address may not be "any". */
diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig
new file mode 100644
index 000000000000..c002ede36402
--- /dev/null
+++ b/drivers/net/ethernet/meta/Kconfig
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Meta Platforms network device configuration
+#
+
+config NET_VENDOR_META
+ bool "Meta Platforms devices"
+ default y
+ help
+ If you have a network (Ethernet) card designed by Meta, say Y.
+ That's Meta as in the parent company of Facebook.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Meta cards. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_META
+
+config FBNIC
+ tristate "Meta Platforms Host Network Interface"
+ depends on X86_64 || COMPILE_TEST
+ depends on !S390
+ depends on MAX_SKB_FRAGS < 22
+ depends on PCI_MSI
+ select PHYLINK
+ help
+ This driver supports Meta Platforms Host Network Interface.
+
+ To compile this driver as a module, choose M here. The module
+ will be called fbnic. MSI-X interrupt support is required.
+
+endif # NET_VENDOR_META
diff --git a/drivers/net/ethernet/meta/Makefile b/drivers/net/ethernet/meta/Makefile
new file mode 100644
index 000000000000..88804f3de963
--- /dev/null
+++ b/drivers/net/ethernet/meta/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Meta Platforms network device drivers.
+#
+
+obj-$(CONFIG_FBNIC) += fbnic/
diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile
new file mode 100644
index 000000000000..9373b558fdc9
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/Makefile
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) Meta Platforms, Inc. and affiliates.
+
+#
+# Makefile for the Meta(R) Host Network Interface
+#
+
+obj-$(CONFIG_FBNIC) += fbnic.o
+
+fbnic-y := fbnic_devlink.o \
+ fbnic_fw.o \
+ fbnic_irq.o \
+ fbnic_mac.o \
+ fbnic_netdev.o \
+ fbnic_pci.o \
+ fbnic_phylink.o \
+ fbnic_rpc.o \
+ fbnic_tlv.o \
+ fbnic_txrx.o
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h
new file mode 100644
index 000000000000..ad2689bfd6cb
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_H_
+#define _FBNIC_H_
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "fbnic_csr.h"
+#include "fbnic_fw.h"
+#include "fbnic_mac.h"
+#include "fbnic_rpc.h"
+
+struct fbnic_dev {
+ struct device *dev;
+ struct net_device *netdev;
+
+ u32 __iomem *uc_addr0;
+ u32 __iomem *uc_addr4;
+ const struct fbnic_mac *mac;
+ unsigned int fw_msix_vector;
+ unsigned int pcs_msix_vector;
+ unsigned short num_irqs;
+
+ struct delayed_work service_task;
+
+ struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES];
+ struct fbnic_fw_cap fw_cap;
+ /* Lock protecting Tx Mailbox queue to prevent possible races */
+ spinlock_t fw_tx_lock;
+
+ unsigned long last_heartbeat_request;
+ unsigned long last_heartbeat_response;
+ u8 fw_heartbeat_enabled;
+
+ u64 dsn;
+ u32 mps;
+ u32 readrq;
+
+ /* Local copy of the devices TCAM */
+ struct fbnic_act_tcam act_tcam[FBNIC_RPC_TCAM_ACT_NUM_ENTRIES];
+ struct fbnic_mac_addr mac_addr[FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES];
+ u8 mac_addr_boundary;
+
+ /* Number of TCQs/RCQs available on hardware */
+ u16 max_num_queues;
+};
+
+/* Reserve entry 0 in the MSI-X "others" array until we have filled all
+ * 32 of the possible interrupt slots. By doing this we can avoid any
+ * potential conflicts should we need to enable one of the debug interrupt
+ * causes later.
+ */
+enum {
+ FBNIC_FW_MSIX_ENTRY,
+ FBNIC_PCS_MSIX_ENTRY,
+ FBNIC_NON_NAPI_VECTORS
+};
+
+static inline bool fbnic_present(struct fbnic_dev *fbd)
+{
+ return !!READ_ONCE(fbd->uc_addr0);
+}
+
+static inline void fbnic_wr32(struct fbnic_dev *fbd, u32 reg, u32 val)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr0);
+
+ if (csr)
+ writel(val, csr + reg);
+}
+
+u32 fbnic_rd32(struct fbnic_dev *fbd, u32 reg);
+
+static inline void fbnic_wrfl(struct fbnic_dev *fbd)
+{
+ fbnic_rd32(fbd, FBNIC_MASTER_SPARE_0);
+}
+
+static inline void
+fbnic_rmw32(struct fbnic_dev *fbd, u32 reg, u32 mask, u32 val)
+{
+ u32 v;
+
+ v = fbnic_rd32(fbd, reg);
+ v &= ~mask;
+ v |= val;
+ fbnic_wr32(fbd, reg, v);
+}
+
+#define wr32(_f, _r, _v) fbnic_wr32(_f, _r, _v)
+#define rd32(_f, _r) fbnic_rd32(_f, _r)
+#define wrfl(_f) fbnic_wrfl(_f)
+
+bool fbnic_fw_present(struct fbnic_dev *fbd);
+u32 fbnic_fw_rd32(struct fbnic_dev *fbd, u32 reg);
+void fbnic_fw_wr32(struct fbnic_dev *fbd, u32 reg, u32 val);
+
+#define fw_rd32(_f, _r) fbnic_fw_rd32(_f, _r)
+#define fw_wr32(_f, _r, _v) fbnic_fw_wr32(_f, _r, _v)
+#define fw_wrfl(_f) fbnic_fw_rd32(_f, FBNIC_FW_ZERO_REG)
+
+static inline bool fbnic_bmc_present(struct fbnic_dev *fbd)
+{
+ return fbd->fw_cap.bmc_present;
+}
+
+static inline bool fbnic_init_failure(struct fbnic_dev *fbd)
+{
+ return !fbd->netdev;
+}
+
+extern char fbnic_driver_name[];
+
+void fbnic_devlink_free(struct fbnic_dev *fbd);
+struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev);
+void fbnic_devlink_register(struct fbnic_dev *fbd);
+void fbnic_devlink_unregister(struct fbnic_dev *fbd);
+
+int fbnic_fw_enable_mbx(struct fbnic_dev *fbd);
+void fbnic_fw_disable_mbx(struct fbnic_dev *fbd);
+
+int fbnic_pcs_irq_enable(struct fbnic_dev *fbd);
+void fbnic_pcs_irq_disable(struct fbnic_dev *fbd);
+
+int fbnic_request_irq(struct fbnic_dev *dev, int nr, irq_handler_t handler,
+ unsigned long flags, const char *name, void *data);
+void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data);
+void fbnic_free_irqs(struct fbnic_dev *fbd);
+int fbnic_alloc_irqs(struct fbnic_dev *fbd);
+
+enum fbnic_boards {
+ fbnic_board_asic
+};
+
+struct fbnic_info {
+ unsigned int max_num_queues;
+ unsigned int bar_mask;
+};
+
+#endif /* _FBNIC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
new file mode 100644
index 000000000000..a64360de0552
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h
@@ -0,0 +1,838 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_CSR_H_
+#define _FBNIC_CSR_H_
+
+#include <linux/bitops.h>
+
+#define CSR_BIT(nr) (1u << (nr))
+#define CSR_GENMASK(h, l) GENMASK(h, l)
+
+#define DESC_BIT(nr) BIT_ULL(nr)
+#define DESC_GENMASK(h, l) GENMASK_ULL(h, l)
+
+/* Defines the minimum firmware version required by the driver */
+#define MIN_FW_MAJOR_VERSION 0
+#define MIN_FW_MINOR_VERSION 10
+#define MIN_FW_BUILD_VERSION 6
+#define MIN_FW_VERSION_CODE (MIN_FW_MAJOR_VERSION * (1u << 24) + \
+ MIN_FW_MINOR_VERSION * (1u << 16) + \
+ MIN_FW_BUILD_VERSION)
+
+#define PCI_DEVICE_ID_META_FBNIC_ASIC 0x0013
+
+#define FBNIC_CLOCK_FREQ (600 * (1000 * 1000))
+
+/* Transmit Work Descriptor Format */
+/* Length, Type, Offset Masks and Shifts */
+#define FBNIC_TWD_L2_HLEN_MASK DESC_GENMASK(5, 0)
+
+#define FBNIC_TWD_L3_TYPE_MASK DESC_GENMASK(7, 6)
+enum {
+ FBNIC_TWD_L3_TYPE_OTHER = 0,
+ FBNIC_TWD_L3_TYPE_IPV4 = 1,
+ FBNIC_TWD_L3_TYPE_IPV6 = 2,
+ FBNIC_TWD_L3_TYPE_V6V6 = 3,
+};
+
+#define FBNIC_TWD_L3_OHLEN_MASK DESC_GENMASK(15, 8)
+#define FBNIC_TWD_L3_IHLEN_MASK DESC_GENMASK(23, 16)
+
+enum {
+ FBNIC_TWD_L4_TYPE_OTHER = 0,
+ FBNIC_TWD_L4_TYPE_TCP = 1,
+ FBNIC_TWD_L4_TYPE_UDP = 2,
+};
+
+#define FBNIC_TWD_CSUM_OFFSET_MASK DESC_GENMASK(27, 24)
+#define FBNIC_TWD_L4_HLEN_MASK DESC_GENMASK(31, 28)
+
+/* Flags and Type */
+#define FBNIC_TWD_L4_TYPE_MASK DESC_GENMASK(33, 32)
+#define FBNIC_TWD_FLAG_REQ_TS DESC_BIT(34)
+#define FBNIC_TWD_FLAG_REQ_LSO DESC_BIT(35)
+#define FBNIC_TWD_FLAG_REQ_CSO DESC_BIT(36)
+#define FBNIC_TWD_FLAG_REQ_COMPLETION DESC_BIT(37)
+#define FBNIC_TWD_FLAG_DEST_MAC DESC_BIT(43)
+#define FBNIC_TWD_FLAG_DEST_BMC DESC_BIT(44)
+#define FBNIC_TWD_FLAG_DEST_FW DESC_BIT(45)
+#define FBNIC_TWD_TYPE_MASK DESC_GENMASK(47, 46)
+enum {
+ FBNIC_TWD_TYPE_META = 0,
+ FBNIC_TWD_TYPE_OPT_META = 1,
+ FBNIC_TWD_TYPE_AL = 2,
+ FBNIC_TWD_TYPE_LAST_AL = 3,
+};
+
+/* MSS and Completion Req */
+#define FBNIC_TWD_MSS_MASK DESC_GENMASK(61, 48)
+
+#define FBNIC_TWD_TS_MASK DESC_GENMASK(39, 0)
+#define FBNIC_TWD_ADDR_MASK DESC_GENMASK(45, 0)
+#define FBNIC_TWD_LEN_MASK DESC_GENMASK(63, 48)
+
+/* Tx Completion Descriptor Format */
+#define FBNIC_TCD_TYPE0_HEAD0_MASK DESC_GENMASK(15, 0)
+#define FBNIC_TCD_TYPE0_HEAD1_MASK DESC_GENMASK(31, 16)
+
+#define FBNIC_TCD_TYPE1_TS_MASK DESC_GENMASK(39, 0)
+
+#define FBNIC_TCD_STATUS_MASK DESC_GENMASK(59, 48)
+#define FBNIC_TCD_STATUS_TS_INVALID DESC_BIT(48)
+#define FBNIC_TCD_STATUS_ILLEGAL_TS_REQ DESC_BIT(49)
+#define FBNIC_TCD_TWQ1 DESC_BIT(60)
+#define FBNIC_TCD_TYPE_MASK DESC_GENMASK(62, 61)
+enum {
+ FBNIC_TCD_TYPE_0 = 0,
+ FBNIC_TCD_TYPE_1 = 1,
+};
+
+#define FBNIC_TCD_DONE DESC_BIT(63)
+
+/* Rx Buffer Descriptor Format
+ *
+ * The layout of this can vary depending on the page size of the system.
+ *
+ * If the page size is 4K then the layout will simply consist of ID for
+ * the 16 most significant bits, and the lower 46 are essentially the page
+ * address with the lowest 12 bits being reserved 0 due to the fact that
+ * a page will be aligned.
+ *
+ * If the page size is larger than 4K then the lower n bits of the ID and
+ * page address will be reserved for the fragment ID. This fragment will
+ * be 4K in size and will be used to index both the DMA address and the ID
+ * by the same amount.
+ */
+#define FBNIC_BD_DESC_ADDR_MASK DESC_GENMASK(45, 12)
+#define FBNIC_BD_DESC_ID_MASK DESC_GENMASK(63, 48)
+#define FBNIC_BD_FRAG_SIZE \
+ (FBNIC_BD_DESC_ADDR_MASK & ~(FBNIC_BD_DESC_ADDR_MASK - 1))
+#define FBNIC_BD_FRAG_COUNT \
+ (PAGE_SIZE / FBNIC_BD_FRAG_SIZE)
+#define FBNIC_BD_FRAG_ADDR_MASK \
+ (FBNIC_BD_DESC_ADDR_MASK & \
+ ~(FBNIC_BD_DESC_ADDR_MASK * FBNIC_BD_FRAG_COUNT))
+#define FBNIC_BD_FRAG_ID_MASK \
+ (FBNIC_BD_DESC_ID_MASK & \
+ ~(FBNIC_BD_DESC_ID_MASK * FBNIC_BD_FRAG_COUNT))
+#define FBNIC_BD_PAGE_ADDR_MASK \
+ (FBNIC_BD_DESC_ADDR_MASK & ~FBNIC_BD_FRAG_ADDR_MASK)
+#define FBNIC_BD_PAGE_ID_MASK \
+ (FBNIC_BD_DESC_ID_MASK & ~FBNIC_BD_FRAG_ID_MASK)
+
+/* Rx Completion Queue Descriptors */
+#define FBNIC_RCD_TYPE_MASK DESC_GENMASK(62, 61)
+enum {
+ FBNIC_RCD_TYPE_HDR_AL = 0,
+ FBNIC_RCD_TYPE_PAY_AL = 1,
+ FBNIC_RCD_TYPE_OPT_META = 2,
+ FBNIC_RCD_TYPE_META = 3,
+};
+
+#define FBNIC_RCD_DONE DESC_BIT(63)
+
+/* Address/Length Completion Descriptors */
+#define FBNIC_RCD_AL_BUFF_ID_MASK DESC_GENMASK(15, 0)
+#define FBNIC_RCD_AL_BUFF_FRAG_MASK (FBNIC_BD_FRAG_COUNT - 1)
+#define FBNIC_RCD_AL_BUFF_PAGE_MASK \
+ (FBNIC_RCD_AL_BUFF_ID_MASK & ~FBNIC_RCD_AL_BUFF_FRAG_MASK)
+#define FBNIC_RCD_AL_BUFF_LEN_MASK DESC_GENMASK(28, 16)
+#define FBNIC_RCD_AL_BUFF_OFF_MASK DESC_GENMASK(43, 32)
+#define FBNIC_RCD_AL_PAGE_FIN DESC_BIT(60)
+
+/* Header AL specific values */
+#define FBNIC_RCD_HDR_AL_OVERFLOW DESC_BIT(53)
+#define FBNIC_RCD_HDR_AL_DMA_HINT_MASK DESC_GENMASK(59, 54)
+enum {
+ FBNIC_RCD_HDR_AL_DMA_HINT_NONE = 0,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L2 = 1,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L3 = 2,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L4 = 4,
+};
+
+/* Optional Metadata Completion Descriptors */
+#define FBNIC_RCD_OPT_META_TS_MASK DESC_GENMASK(39, 0)
+#define FBNIC_RCD_OPT_META_ACTION_MASK DESC_GENMASK(45, 40)
+#define FBNIC_RCD_OPT_META_ACTION DESC_BIT(57)
+#define FBNIC_RCD_OPT_META_TS DESC_BIT(58)
+#define FBNIC_RCD_OPT_META_TYPE_MASK DESC_GENMASK(60, 59)
+
+/* Metadata Completion Descriptors */
+#define FBNIC_RCD_META_RSS_HASH_MASK DESC_GENMASK(31, 0)
+#define FBNIC_RCD_META_L2_CSUM_MASK DESC_GENMASK(47, 32)
+#define FBNIC_RCD_META_L3_TYPE_MASK DESC_GENMASK(49, 48)
+enum {
+ FBNIC_RCD_META_L3_TYPE_OTHER = 0,
+ FBNIC_RCD_META_L3_TYPE_IPV4 = 1,
+ FBNIC_RCD_META_L3_TYPE_IPV6 = 2,
+ FBNIC_RCD_META_L3_TYPE_V6V6 = 3,
+};
+
+#define FBNIC_RCD_META_L4_TYPE_MASK DESC_GENMASK(51, 50)
+enum {
+ FBNIC_RCD_META_L4_TYPE_OTHER = 0,
+ FBNIC_RCD_META_L4_TYPE_TCP = 1,
+ FBNIC_RCD_META_L4_TYPE_UDP = 2,
+};
+
+#define FBNIC_RCD_META_L4_CSUM_UNNECESSARY DESC_BIT(52)
+#define FBNIC_RCD_META_ERR_MAC_EOP DESC_BIT(53)
+#define FBNIC_RCD_META_ERR_TRUNCATED_FRAME DESC_BIT(54)
+#define FBNIC_RCD_META_ERR_PARSER DESC_BIT(55)
+#define FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK \
+ (FBNIC_RCD_META_ERR_MAC_EOP | FBNIC_RCD_META_ERR_TRUNCATED_FRAME)
+#define FBNIC_RCD_META_ECN DESC_BIT(60)
+
+/* Register Definitions
+ *
+ * The registers are laid as indexes into an le32 array. As such the actual
+ * address is 4 times the index value. Below each register is defined as 3
+ * fields, name, index, and Address.
+ *
+ * Name Index Address
+ *************************************************************************/
+/* Interrupt Registers */
+#define FBNIC_CSR_START_INTR 0x00000 /* CSR section delimiter */
+#define FBNIC_INTR_STATUS(n) (0x00000 + (n)) /* 0x00000 + 4*n */
+#define FBNIC_INTR_STATUS_CNT 8
+#define FBNIC_INTR_MASK(n) (0x00008 + (n)) /* 0x00020 + 4*n */
+#define FBNIC_INTR_MASK_CNT 8
+#define FBNIC_INTR_SET(n) (0x00010 + (n)) /* 0x00040 + 4*n */
+#define FBNIC_INTR_SET_CNT 8
+#define FBNIC_INTR_CLEAR(n) (0x00018 + (n)) /* 0x00060 + 4*n */
+#define FBNIC_INTR_CLEAR_CNT 8
+#define FBNIC_INTR_SW_STATUS(n) (0x00020 + (n)) /* 0x00080 + 4*n */
+#define FBNIC_INTR_SW_STATUS_CNT 8
+#define FBNIC_INTR_SW_AC_MODE(n) (0x00028 + (n)) /* 0x000a0 + 4*n */
+#define FBNIC_INTR_SW_AC_MODE_CNT 8
+#define FBNIC_INTR_MASK_SET(n) (0x00030 + (n)) /* 0x000c0 + 4*n */
+#define FBNIC_INTR_MASK_SET_CNT 8
+#define FBNIC_INTR_MASK_CLEAR(n) (0x00038 + (n)) /* 0x000e0 + 4*n */
+#define FBNIC_INTR_MASK_CLEAR_CNT 8
+#define FBNIC_MAX_MSIX_VECS 256U
+#define FBNIC_INTR_MSIX_CTRL(n) (0x00040 + (n)) /* 0x00100 + 4*n */
+#define FBNIC_INTR_MSIX_CTRL_VECTOR_MASK CSR_GENMASK(7, 0)
+#define FBNIC_INTR_MSIX_CTRL_ENABLE CSR_BIT(31)
+enum {
+ FBNIC_INTR_MSIX_CTRL_PCS_IDX = 34,
+};
+
+#define FBNIC_CSR_END_INTR 0x0005f /* CSR section delimiter */
+
+/* Interrupt MSIX Registers */
+#define FBNIC_CSR_START_INTR_CQ 0x00400 /* CSR section delimiter */
+#define FBNIC_INTR_CQ_REARM(n) \
+ (0x00400 + 4 * (n)) /* 0x01000 + 16*n */
+#define FBNIC_INTR_CQ_REARM_CNT 256
+#define FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT CSR_GENMASK(13, 0)
+#define FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN CSR_BIT(14)
+#define FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT CSR_GENMASK(28, 15)
+#define FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN CSR_BIT(29)
+#define FBNIC_INTR_CQ_REARM_INTR_RELOAD CSR_BIT(30)
+#define FBNIC_INTR_CQ_REARM_INTR_UNMASK CSR_BIT(31)
+
+#define FBNIC_INTR_RCQ_TIMEOUT(n) \
+ (0x00401 + 4 * (n)) /* 0x01004 + 16*n */
+#define FBNIC_INTR_RCQ_TIMEOUT_CNT 256
+#define FBNIC_INTR_TCQ_TIMEOUT(n) \
+ (0x00402 + 4 * (n)) /* 0x01008 + 16*n */
+#define FBNIC_INTR_TCQ_TIMEOUT_CNT 256
+#define FBNIC_CSR_END_INTR_CQ 0x007fe /* CSR section delimiter */
+
+/* Global QM Tx registers */
+#define FBNIC_CSR_START_QM_TX 0x00800 /* CSR section delimiter */
+#define FBNIC_QM_TWQ_IDLE(n) (0x00800 + (n)) /* 0x02000 + 4*n */
+#define FBNIC_QM_TWQ_IDLE_CNT 8
+#define FBNIC_QM_TWQ_DEFAULT_META_L 0x00818 /* 0x02060 */
+#define FBNIC_QM_TWQ_DEFAULT_META_H 0x00819 /* 0x02064 */
+
+#define FBNIC_QM_TQS_CTL0 0x0081b /* 0x0206c */
+#define FBNIC_QM_TQS_CTL0_LSO_TS_MASK CSR_BIT(0)
+enum {
+ FBNIC_QM_TQS_CTL0_LSO_TS_FIRST = 0,
+ FBNIC_QM_TQS_CTL0_LSO_TS_LAST = 1,
+};
+
+#define FBNIC_QM_TQS_CTL0_PREFETCH_THRESH CSR_GENMASK(7, 1)
+enum {
+ FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN = 16,
+};
+
+#define FBNIC_QM_TQS_CTL1 0x0081c /* 0x02070 */
+#define FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS CSR_GENMASK(7, 0)
+#define FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS CSR_GENMASK(15, 8)
+#define FBNIC_QM_TQS_MTU_CTL0 0x0081d /* 0x02074 */
+#define FBNIC_QM_TQS_MTU_CTL1 0x0081e /* 0x02078 */
+#define FBNIC_QM_TQS_MTU_CTL1_BULK CSR_GENMASK(13, 0)
+#define FBNIC_QM_TCQ_IDLE(n) (0x00821 + (n)) /* 0x02084 + 4*n */
+#define FBNIC_QM_TCQ_IDLE_CNT 4
+#define FBNIC_QM_TCQ_CTL0 0x0082d /* 0x020b4 */
+#define FBNIC_QM_TCQ_CTL0_COAL_WAIT CSR_GENMASK(15, 0)
+#define FBNIC_QM_TCQ_CTL0_TICK_CYCLES CSR_GENMASK(26, 16)
+#define FBNIC_QM_TQS_IDLE(n) (0x00830 + (n)) /* 0x020c0 + 4*n */
+#define FBNIC_QM_TQS_IDLE_CNT 8
+#define FBNIC_QM_TQS_EDT_TS_RANGE 0x00849 /* 0x2124 */
+#define FBNIC_QM_TDE_IDLE(n) (0x00853 + (n)) /* 0x0214c + 4*n */
+#define FBNIC_QM_TDE_IDLE_CNT 8
+#define FBNIC_QM_TNI_TDF_CTL 0x0086c /* 0x021b0 */
+#define FBNIC_QM_TNI_TDF_CTL_MRRS CSR_GENMASK(1, 0)
+#define FBNIC_QM_TNI_TDF_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_TNI_TDF_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_TNI_TDF_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_QM_TNI_TDE_CTL 0x0086d /* 0x021b4 */
+#define FBNIC_QM_TNI_TDE_CTL_MRRS CSR_GENMASK(1, 0)
+#define FBNIC_QM_TNI_TDE_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_TNI_TDE_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_TNI_TDE_CTL_MAX_OB CSR_GENMASK(24, 12)
+#define FBNIC_QM_TNI_TDE_CTL_MRRS_1K CSR_BIT(25)
+#define FBNIC_QM_TNI_TCM_CTL 0x0086e /* 0x021b8 */
+#define FBNIC_QM_TNI_TCM_CTL_MPS CSR_GENMASK(1, 0)
+#define FBNIC_QM_TNI_TCM_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_TNI_TCM_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_TNI_TCM_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_CSR_END_QM_TX 0x00873 /* CSR section delimiter */
+
+/* Global QM Rx registers */
+#define FBNIC_CSR_START_QM_RX 0x00c00 /* CSR section delimiter */
+#define FBNIC_QM_RCQ_IDLE(n) (0x00c00 + (n)) /* 0x03000 + 4*n */
+#define FBNIC_QM_RCQ_IDLE_CNT 4
+#define FBNIC_QM_RCQ_CTL0 0x00c0c /* 0x03030 */
+#define FBNIC_QM_RCQ_CTL0_COAL_WAIT CSR_GENMASK(15, 0)
+#define FBNIC_QM_RCQ_CTL0_TICK_CYCLES CSR_GENMASK(26, 16)
+#define FBNIC_QM_HPQ_IDLE(n) (0x00c0f + (n)) /* 0x0303c + 4*n */
+#define FBNIC_QM_HPQ_IDLE_CNT 4
+#define FBNIC_QM_PPQ_IDLE(n) (0x00c13 + (n)) /* 0x0304c + 4*n */
+#define FBNIC_QM_PPQ_IDLE_CNT 4
+#define FBNIC_QM_RNI_RBP_CTL 0x00c2d /* 0x030b4 */
+#define FBNIC_QM_RNI_RBP_CTL_MRRS CSR_GENMASK(1, 0)
+#define FBNIC_QM_RNI_RBP_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_RNI_RBP_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_RNI_RBP_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_QM_RNI_RDE_CTL 0x00c2e /* 0x030b8 */
+#define FBNIC_QM_RNI_RDE_CTL_MPS CSR_GENMASK(1, 0)
+#define FBNIC_QM_RNI_RDE_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_RNI_RDE_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_RNI_RDE_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_QM_RNI_RCM_CTL 0x00c2f /* 0x030bc */
+#define FBNIC_QM_RNI_RCM_CTL_MPS CSR_GENMASK(1, 0)
+#define FBNIC_QM_RNI_RCM_CTL_CLS CSR_GENMASK(3, 2)
+#define FBNIC_QM_RNI_RCM_CTL_MAX_OT CSR_GENMASK(11, 4)
+#define FBNIC_QM_RNI_RCM_CTL_MAX_OB CSR_GENMASK(23, 12)
+#define FBNIC_CSR_END_QM_RX 0x00c34 /* CSR section delimiter */
+
+/* TCE registers */
+#define FBNIC_CSR_START_TCE 0x04000 /* CSR section delimiter */
+#define FBNIC_TCE_REG_BASE 0x04000 /* 0x10000 */
+
+#define FBNIC_TCE_LSO_CTRL 0x04000 /* 0x10000 */
+#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST CSR_GENMASK(8, 0)
+#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID CSR_GENMASK(17, 9)
+#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_END CSR_GENMASK(26, 18)
+#define FBNIC_TCE_LSO_CTRL_IPID_MODE_INC CSR_BIT(27)
+
+#define FBNIC_TCE_CSO_CTRL 0x04001 /* 0x10004 */
+#define FBNIC_TCE_CSO_CTRL_TCP_ZERO_CSUM CSR_BIT(0)
+
+#define FBNIC_TCE_TXB_CTRL 0x04002 /* 0x10008 */
+#define FBNIC_TCE_TXB_CTRL_LOAD CSR_BIT(0)
+#define FBNIC_TCE_TXB_CTRL_TCAM_ENABLE CSR_BIT(1)
+#define FBNIC_TCE_TXB_CTRL_DISABLE CSR_BIT(2)
+
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL 0x04003 /* 0x1000c */
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2 CSR_GENMASK(23, 16)
+
+#define FBNIC_TCE_TXB_TEI_Q0_CTRL 0x04004 /* 0x10010 */
+#define FBNIC_TCE_TXB_TEI_Q1_CTRL 0x04005 /* 0x10014 */
+#define FBNIC_TCE_TXB_MC_Q_CTRL 0x04006 /* 0x10018 */
+#define FBNIC_TCE_TXB_RX_TEI_Q_CTRL 0x04007 /* 0x1001c */
+#define FBNIC_TCE_TXB_RX_BMC_Q_CTRL 0x04008 /* 0x10020 */
+#define FBNIC_TCE_TXB_Q_CTRL_START CSR_GENMASK(10, 0)
+#define FBNIC_TCE_TXB_Q_CTRL_SIZE CSR_GENMASK(22, 11)
+
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL 0x04009 /* 0x10024 */
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL 0x0400a /* 0x10028 */
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2 CSR_GENMASK(23, 16)
+
+#define FBNIC_TCE_TXB_CLDR_CFG 0x0400b /* 0x1002c */
+#define FBNIC_TCE_TXB_CLDR_CFG_NUM_SLOT CSR_GENMASK(5, 0)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG(n) (0x0400c + (n)) /* 0x10030 + 4*n */
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_CNT 16
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_0 CSR_GENMASK(1, 0)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_1 CSR_GENMASK(3, 2)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_2 CSR_GENMASK(5, 4)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_3 CSR_GENMASK(7, 6)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_0 CSR_GENMASK(9, 8)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_1 CSR_GENMASK(11, 10)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_2 CSR_GENMASK(13, 12)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_3 CSR_GENMASK(15, 14)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_0 CSR_GENMASK(17, 16)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_1 CSR_GENMASK(19, 18)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_2 CSR_GENMASK(21, 20)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_3 CSR_GENMASK(23, 22)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_0 CSR_GENMASK(25, 24)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_1 CSR_GENMASK(27, 26)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_2 CSR_GENMASK(29, 28)
+#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_3 CSR_GENMASK(31, 30)
+
+#define FBNIC_TCE_BMC_MAX_PKTSZ 0x0403a /* 0x100e8 */
+#define FBNIC_TCE_BMC_MAX_PKTSZ_TX CSR_GENMASK(13, 0)
+#define FBNIC_TCE_BMC_MAX_PKTSZ_RX CSR_GENMASK(27, 14)
+#define FBNIC_TCE_MC_MAX_PKTSZ 0x0403b /* 0x100ec */
+#define FBNIC_TCE_MC_MAX_PKTSZ_TMI CSR_GENMASK(13, 0)
+
+#define FBNIC_TCE_SOP_PROT_CTRL 0x0403c /* 0x100f0 */
+#define FBNIC_TCE_SOP_PROT_CTRL_TBI CSR_GENMASK(7, 0)
+#define FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM CSR_GENMASK(14, 8)
+#define FBNIC_TCE_SOP_PROT_CTRL_TTI_CM CSR_GENMASK(18, 15)
+
+#define FBNIC_TCE_DROP_CTRL 0x0403d /* 0x100f4 */
+#define FBNIC_TCE_DROP_CTRL_TTI_CM_DROP_EN CSR_BIT(0)
+#define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1)
+#define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2)
+
+#define FBNIC_TCE_TXB_TX_BMC_Q_CTRL 0x0404B /* 0x1012c */
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL 0x0404C /* 0x10130 */
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT 0x0404D /* 0x10134 */
+#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT \
+ 0x0404E /* 0x10138 */
+#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT 0x0404F /* 0x1013c */
+#define FBNIC_CSR_END_TCE 0x04050 /* CSR section delimiter */
+
+/* TMI registers */
+#define FBNIC_CSR_START_TMI 0x04400 /* CSR section delimiter */
+#define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */
+#define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */
+#define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0)
+#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */
+/* Rx Buffer Registers */
+#define FBNIC_CSR_START_RXB 0x08000 /* CSR section delimiter */
+enum {
+ FBNIC_RXB_FIFO_MC = 0,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_FIFO_NET_TO_BMC = 3,
+ FBNIC_RXB_FIFO_HOST = 4,
+ /* Unused */
+ FBNIC_RXB_FIFO_BMC_TO_HOST = 6,
+ /* Unused */
+ FBNIC_RXB_FIFO_INDICES = 8
+};
+
+#define FBNIC_RXB_CT_SIZE(n) (0x08000 + (n)) /* 0x20000 + 4*n */
+#define FBNIC_RXB_CT_SIZE_CNT 8
+#define FBNIC_RXB_CT_SIZE_HEADER CSR_GENMASK(5, 0)
+#define FBNIC_RXB_CT_SIZE_PAYLOAD CSR_GENMASK(11, 6)
+#define FBNIC_RXB_CT_SIZE_ENABLE CSR_BIT(12)
+#define FBNIC_RXB_PAUSE_DROP_CTRL 0x08008 /* 0x20020 */
+#define FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE CSR_GENMASK(7, 0)
+#define FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE CSR_GENMASK(15, 8)
+#define FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE CSR_GENMASK(23, 16)
+#define FBNIC_RXB_PAUSE_DROP_CTRL_PS_ENABLE CSR_GENMASK(27, 24)
+#define FBNIC_RXB_PAUSE_THLD(n) (0x08009 + (n)) /* 0x20024 + 4*n */
+#define FBNIC_RXB_PAUSE_THLD_CNT 8
+#define FBNIC_RXB_PAUSE_THLD_ON CSR_GENMASK(12, 0)
+#define FBNIC_RXB_PAUSE_THLD_OFF CSR_GENMASK(25, 13)
+#define FBNIC_RXB_DROP_THLD(n) (0x08011 + (n)) /* 0x20044 + 4*n */
+#define FBNIC_RXB_DROP_THLD_CNT 8
+#define FBNIC_RXB_DROP_THLD_ON CSR_GENMASK(12, 0)
+#define FBNIC_RXB_DROP_THLD_OFF CSR_GENMASK(25, 13)
+#define FBNIC_RXB_ECN_THLD(n) (0x0801e + (n)) /* 0x20078 + 4*n */
+#define FBNIC_RXB_ECN_THLD_CNT 8
+#define FBNIC_RXB_ECN_THLD_ON CSR_GENMASK(12, 0)
+#define FBNIC_RXB_ECN_THLD_OFF CSR_GENMASK(25, 13)
+#define FBNIC_RXB_PBUF_CFG(n) (0x08027 + (n)) /* 0x2009c + 4*n */
+#define FBNIC_RXB_PBUF_CFG_CNT 8
+#define FBNIC_RXB_PBUF_BASE_ADDR CSR_GENMASK(12, 0)
+#define FBNIC_RXB_PBUF_SIZE CSR_GENMASK(21, 13)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0 0x0802f /* 0x200bc */
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0 CSR_GENMASK(7, 0)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM1 CSR_GENMASK(15, 8)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2 CSR_GENMASK(23, 16)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM3 CSR_GENMASK(31, 24)
+#define FBNIC_RXB_DWRR_RDE_WEIGHT1 0x08030 /* 0x200c0 */
+#define FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4 CSR_GENMASK(7, 0)
+#define FBNIC_RXB_DWRR_BMC_WEIGHT 0x08031 /* 0x200c4 */
+#define FBNIC_RXB_CLDR_PRIO_CFG(n) (0x8034 + (n)) /* 0x200d0 + 4*n */
+#define FBNIC_RXB_CLDR_PRIO_CFG_CNT 16
+#define FBNIC_RXB_ENDIAN_FCS 0x08044 /* 0x20110 */
+enum {
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_DEQUEUE_BMC = 2,
+ FBNIC_RXB_DEQUEUE_HOST = 3,
+ FBNIC_RXB_DEQUEUE_INDICES = 4
+};
+
+#define FBNIC_RXB_PBUF_CREDIT(n) (0x08047 + (n)) /* 0x2011C + 4*n */
+#define FBNIC_RXB_PBUF_CREDIT_CNT 8
+#define FBNIC_RXB_PBUF_CREDIT_MASK CSR_GENMASK(13, 0)
+#define FBNIC_RXB_INTF_CREDIT 0x0804f /* 0x2013C */
+#define FBNIC_RXB_INTF_CREDIT_MASK0 CSR_GENMASK(3, 0)
+#define FBNIC_RXB_INTF_CREDIT_MASK1 CSR_GENMASK(7, 4)
+#define FBNIC_RXB_INTF_CREDIT_MASK2 CSR_GENMASK(11, 8)
+#define FBNIC_RXB_INTF_CREDIT_MASK3 CSR_GENMASK(15, 12)
+
+#define FBNIC_RXB_PAUSE_EVENT_CNT(n) (0x08053 + (n)) /* 0x2014c + 4*n */
+#define FBNIC_RXB_DROP_FRMS_STS(n) (0x08057 + (n)) /* 0x2015c + 4*n */
+#define FBNIC_RXB_DROP_BYTES_STS_L(n) \
+ (0x08080 + 2 * (n)) /* 0x20200 + 8*n */
+#define FBNIC_RXB_DROP_BYTES_STS_H(n) \
+ (0x08081 + 2 * (n)) /* 0x20204 + 8*n */
+#define FBNIC_RXB_TRUN_FRMS_STS(n) (0x08091 + (n)) /* 0x20244 + 4*n */
+#define FBNIC_RXB_TRUN_BYTES_STS_L(n) \
+ (0x080c0 + 2 * (n)) /* 0x20300 + 8*n */
+#define FBNIC_RXB_TRUN_BYTES_STS_H(n) \
+ (0x080c1 + 2 * (n)) /* 0x20304 + 8*n */
+#define FBNIC_RXB_TRANS_PAUSE_STS(n) (0x080d1 + (n)) /* 0x20344 + 4*n */
+#define FBNIC_RXB_TRANS_DROP_STS(n) (0x080d9 + (n)) /* 0x20364 + 4*n */
+#define FBNIC_RXB_TRANS_ECN_STS(n) (0x080e1 + (n)) /* 0x20384 + 4*n */
+enum {
+ FBNIC_RXB_ENQUEUE_NET = 0,
+ FBNIC_RXB_ENQUEUE_BMC = 1,
+ /* Unused */
+ /* Unused */
+ FBNIC_RXB_ENQUEUE_INDICES = 4
+};
+
+#define FBNIC_RXB_DRBO_FRM_CNT_SRC(n) (0x080f9 + (n)) /* 0x203e4 + 4*n */
+#define FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(n) \
+ (0x080fd + (n)) /* 0x203f4 + 4*n */
+#define FBNIC_RXB_DRBO_BYTE_CNT_SRC_H(n) \
+ (0x08101 + (n)) /* 0x20404 + 4*n */
+#define FBNIC_RXB_INTF_FRM_CNT_DST(n) (0x08105 + (n)) /* 0x20414 + 4*n */
+#define FBNIC_RXB_INTF_BYTE_CNT_DST_L(n) \
+ (0x08109 + (n)) /* 0x20424 + 4*n */
+#define FBNIC_RXB_INTF_BYTE_CNT_DST_H(n) \
+ (0x0810d + (n)) /* 0x20434 + 4*n */
+#define FBNIC_RXB_PBUF_FRM_CNT_DST(n) (0x08111 + (n)) /* 0x20444 + 4*n */
+#define FBNIC_RXB_PBUF_BYTE_CNT_DST_L(n) \
+ (0x08115 + (n)) /* 0x20454 + 4*n */
+#define FBNIC_RXB_PBUF_BYTE_CNT_DST_H(n) \
+ (0x08119 + (n)) /* 0x20464 + 4*n */
+
+#define FBNIC_RXB_PBUF_FIFO_LEVEL(n) (0x0811d + (n)) /* 0x20474 + 4*n */
+
+#define FBNIC_RXB_INTEGRITY_ERR(n) (0x0812f + (n)) /* 0x204bc + 4*n */
+#define FBNIC_RXB_MAC_ERR(n) (0x08133 + (n)) /* 0x204cc + 4*n */
+#define FBNIC_RXB_PARSER_ERR(n) (0x08137 + (n)) /* 0x204dc + 4*n */
+#define FBNIC_RXB_FRM_ERR(n) (0x0813b + (n)) /* 0x204ec + 4*n */
+
+#define FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT 0x08143 /* 0x2050c */
+#define FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT 0x08144 /* 0x20510 */
+#define FBNIC_CSR_END_RXB 0x081b1 /* CSR section delimiter */
+
+/* Rx Parser and Classifier Registers */
+#define FBNIC_CSR_START_RPC 0x08400 /* CSR section delimiter */
+#define FBNIC_RPC_RMI_CONFIG 0x08400 /* 0x21000 */
+#define FBNIC_RPC_RMI_CONFIG_OH_BYTES CSR_GENMASK(4, 0)
+#define FBNIC_RPC_RMI_CONFIG_FCS_PRESENT CSR_BIT(8)
+#define FBNIC_RPC_RMI_CONFIG_ENABLE CSR_BIT(12)
+#define FBNIC_RPC_RMI_CONFIG_MTU CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_ACT_TBL0_DEFAULT 0x0840a /* 0x21028 */
+#define FBNIC_RPC_ACT_TBL0_DROP CSR_BIT(0)
+#define FBNIC_RPC_ACT_TBL0_DEST_MASK CSR_GENMASK(3, 1)
+enum {
+ FBNIC_RPC_ACT_TBL0_DEST_HOST = 1,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC = 2,
+ FBNIC_RPC_ACT_TBL0_DEST_EI = 4,
+};
+
+#define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16)
+#define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30)
+
+#define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */
+#define FBNIC_RPC_ACT_TBL1_RSS_ENA_MASK CSR_GENMASK(15, 0)
+enum {
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_IP_SRC = 1,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_IP_DST = 2,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_SRC = 4,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_DST = 8,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L2_DA = 16,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_RSS_BYTE = 32,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_IV6_FL_LBL = 64,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_OV6_FL_LBL = 128,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_DSCP = 256,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L3_PROT = 512,
+ FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_PROT = 1024,
+};
+
+#define FBNIC_RPC_RSS_KEY(n) (0x0840c + (n)) /* 0x21030 + 4*n */
+#define FBNIC_RPC_RSS_KEY_BIT_LEN 425
+#define FBNIC_RPC_RSS_KEY_BYTE_LEN \
+ DIV_ROUND_UP(FBNIC_RPC_RSS_KEY_BIT_LEN, 8)
+#define FBNIC_RPC_RSS_KEY_DWORD_LEN \
+ DIV_ROUND_UP(FBNIC_RPC_RSS_KEY_BIT_LEN, 32)
+#define FBNIC_RPC_RSS_KEY_LAST_IDX \
+ (FBNIC_RPC_RSS_KEY_DWORD_LEN - 1)
+#define FBNIC_RPC_RSS_KEY_LAST_MASK \
+ CSR_GENMASK(31, \
+ FBNIC_RPC_RSS_KEY_DWORD_LEN * 32 - \
+ FBNIC_RPC_RSS_KEY_BIT_LEN)
+
+#define FBNIC_RPC_TCAM_MACDA_VALIDATE 0x0852d /* 0x214b4 */
+#define FBNIC_CSR_END_RPC 0x0856b /* CSR section delimiter */
+
+/* RPC RAM Registers */
+
+#define FBNIC_CSR_START_RPC_RAM 0x08800 /* CSR section delimiter */
+#define FBNIC_RPC_ACT_TBL0(n) (0x08800 + (n)) /* 0x22000 + 4*n */
+#define FBNIC_RPC_ACT_TBL1(n) (0x08840 + (n)) /* 0x22100 + 4*n */
+#define FBNIC_RPC_ACT_TBL_NUM_ENTRIES 64
+
+/* TCAM Tables */
+#define FBNIC_RPC_TCAM_VALIDATE CSR_BIT(31)
+
+/* 64 Action TCAM Entries, 12 registers
+ * 3 mixed, src port, dst port, 6 L4 words, and Validate
+ */
+#define FBNIC_RPC_TCAM_ACT(m, n) \
+ (0x08880 + 0x40 * (n) + (m)) /* 0x22200 + 256*n + 4*m */
+
+#define FBNIC_RPC_TCAM_ACT_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_ACT_MASK CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_TCAM_MACDA(m, n) \
+ (0x08b80 + 0x20 * (n) + (m)) /* 0x022e00 + 128*n + 4*m */
+#define FBNIC_RPC_TCAM_MACDA_VALUE CSR_GENMASK(15, 0)
+#define FBNIC_RPC_TCAM_MACDA_MASK CSR_GENMASK(31, 16)
+
+#define FBNIC_RPC_RSS_TBL(n, m) \
+ (0x08d20 + 0x100 * (n) + (m)) /* 0x023480 + 1024*n + 4*m */
+#define FBNIC_RPC_RSS_TBL_COUNT 2
+#define FBNIC_RPC_RSS_TBL_SIZE 256
+#define FBNIC_CSR_END_RPC_RAM 0x08f1f /* CSR section delimiter */
+
+/* Fab Registers */
+#define FBNIC_CSR_START_FAB 0x0C000 /* CSR section delimiter */
+#define FBNIC_FAB_AXI4_AR_SPACER_2_CFG 0x0C005 /* 0x30014 */
+#define FBNIC_FAB_AXI4_AR_SPACER_MASK CSR_BIT(16)
+#define FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD CSR_GENMASK(15, 0)
+#define FBNIC_CSR_END_FAB 0x0C020 /* CSR section delimiter */
+
+/* Master Registers */
+#define FBNIC_CSR_START_MASTER 0x0C400 /* CSR section delimiter */
+#define FBNIC_MASTER_SPARE_0 0x0C41B /* 0x3106c */
+#define FBNIC_CSR_END_MASTER 0x0C452 /* CSR section delimiter */
+
+/* MAC MAC registers (ASIC only) */
+#define FBNIC_CSR_START_MAC_MAC 0x11000 /* CSR section delimiter */
+#define FBNIC_MAC_COMMAND_CONFIG 0x11002 /* 0x44008 */
+#define FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS CSR_BIT(29)
+#define FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS CSR_BIT(28)
+#define FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS CSR_BIT(27)
+#define FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN CSR_BIT(11)
+#define FBNIC_MAC_COMMAND_CONFIG_LOOPBACK_EN CSR_BIT(10)
+#define FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN CSR_BIT(4)
+#define FBNIC_MAC_COMMAND_CONFIG_RX_ENA CSR_BIT(1)
+#define FBNIC_MAC_COMMAND_CONFIG_TX_ENA CSR_BIT(0)
+#define FBNIC_MAC_CL01_PAUSE_QUANTA 0x11015 /* 0x44054 */
+#define FBNIC_MAC_CL01_QUANTA_THRESH 0x11019 /* 0x44064 */
+#define FBNIC_CSR_END_MAC_MAC 0x11028 /* CSR section delimiter */
+
+/* Signals from MAC, AN, PCS, and LED CSR registers (ASIC only) */
+#define FBNIC_CSR_START_SIG 0x11800 /* CSR section delimiter */
+#define FBNIC_SIG_MAC_IN0 0x11800 /* 0x46000 */
+#define FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK CSR_BIT(14)
+#define FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK CSR_BIT(13)
+#define FBNIC_SIG_MAC_IN0_RESET_TX_CLK CSR_BIT(12)
+#define FBNIC_SIG_MAC_IN0_RESET_RX_CLK CSR_BIT(11)
+#define FBNIC_SIG_MAC_IN0_TX_CRC CSR_BIT(8)
+#define FBNIC_SIG_MAC_IN0_CFG_MODE128 CSR_BIT(10)
+#define FBNIC_SIG_PCS_OUT0 0x11808 /* 0x46020 */
+#define FBNIC_SIG_PCS_OUT0_LINK CSR_BIT(27)
+#define FBNIC_SIG_PCS_OUT0_BLOCK_LOCK CSR_GENMASK(24, 5)
+#define FBNIC_SIG_PCS_OUT0_AMPS_LOCK CSR_GENMASK(4, 1)
+#define FBNIC_SIG_PCS_OUT1 0x11809 /* 0x46024 */
+#define FBNIC_SIG_PCS_OUT1_FCFEC_LOCK CSR_GENMASK(11, 8)
+#define FBNIC_SIG_PCS_INTR_STS 0x11814 /* 0x46050 */
+#define FBNIC_SIG_PCS_INTR_LINK_DOWN CSR_BIT(1)
+#define FBNIC_SIG_PCS_INTR_LINK_UP CSR_BIT(0)
+#define FBNIC_SIG_PCS_INTR_MASK 0x11816 /* 0x46058 */
+#define FBNIC_CSR_END_SIG 0x1184e /* CSR section delimiter */
+
+/* PUL User Registers */
+#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */
+#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18)
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */
+#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18)
+#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */
+
+/* Queue Registers
+ *
+ * The queue register offsets are specific for a given queue grouping. So to
+ * find the actual register offset it is necessary to combine FBNIC_QUEUE(n)
+ * with the register to get the actual register offset like so:
+ * FBNIC_QUEUE_TWQ0_CTL(n) == FBNIC_QUEUE(n) + FBNIC_QUEUE_TWQ0_CTL
+ */
+#define FBNIC_CSR_START_QUEUE 0x40000 /* CSR section delimiter */
+#define FBNIC_QUEUE_STRIDE 0x400 /* 0x1000 */
+#define FBNIC_QUEUE(n)\
+ (0x40000 + FBNIC_QUEUE_STRIDE * (n)) /* 0x100000 + 4096*n */
+
+#define FBNIC_QUEUE_TWQ0_CTL 0x000 /* 0x000 */
+#define FBNIC_QUEUE_TWQ1_CTL 0x001 /* 0x004 */
+#define FBNIC_QUEUE_TWQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_TWQ_CTL_ENABLE CSR_BIT(1)
+#define FBNIC_QUEUE_TWQ0_TAIL 0x002 /* 0x008 */
+#define FBNIC_QUEUE_TWQ1_TAIL 0x003 /* 0x00c */
+
+#define FBNIC_QUEUE_TWQ0_SIZE 0x00a /* 0x028 */
+#define FBNIC_QUEUE_TWQ1_SIZE 0x00b /* 0x02c */
+#define FBNIC_QUEUE_TWQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_TWQ0_BAL 0x020 /* 0x080 */
+#define FBNIC_QUEUE_BAL_MASK CSR_GENMASK(31, 7)
+#define FBNIC_QUEUE_TWQ0_BAH 0x021 /* 0x084 */
+#define FBNIC_QUEUE_TWQ1_BAL 0x022 /* 0x088 */
+#define FBNIC_QUEUE_TWQ1_BAH 0x023 /* 0x08c */
+
+/* Tx Completion Queue Registers */
+#define FBNIC_QUEUE_TCQ_CTL 0x080 /* 0x200 */
+#define FBNIC_QUEUE_TCQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_TCQ_CTL_ENABLE CSR_BIT(1)
+
+#define FBNIC_QUEUE_TCQ_HEAD 0x081 /* 0x204 */
+
+#define FBNIC_QUEUE_TCQ_SIZE 0x084 /* 0x210 */
+#define FBNIC_QUEUE_TCQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_TCQ_BAL 0x0a0 /* 0x280 */
+#define FBNIC_QUEUE_TCQ_BAH 0x0a1 /* 0x284 */
+
+/* Tx Interrupt Manager Registers */
+#define FBNIC_QUEUE_TIM_CTL 0x0c0 /* 0x300 */
+#define FBNIC_QUEUE_TIM_CTL_MSIX_MASK CSR_GENMASK(7, 0)
+
+#define FBNIC_QUEUE_TIM_THRESHOLD 0x0c1 /* 0x304 */
+#define FBNIC_QUEUE_TIM_THRESHOLD_TWD_MASK CSR_GENMASK(14, 0)
+
+#define FBNIC_QUEUE_TIM_CLEAR 0x0c2 /* 0x308 */
+#define FBNIC_QUEUE_TIM_CLEAR_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_TIM_SET 0x0c3 /* 0x30c */
+#define FBNIC_QUEUE_TIM_SET_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_TIM_MASK 0x0c4 /* 0x310 */
+#define FBNIC_QUEUE_TIM_MASK_MASK CSR_BIT(0)
+
+#define FBNIC_QUEUE_TIM_TIMER 0x0c5 /* 0x314 */
+
+#define FBNIC_QUEUE_TIM_COUNTS 0x0c6 /* 0x318 */
+#define FBNIC_QUEUE_TIM_COUNTS_CNT1_MASK CSR_GENMASK(30, 16)
+#define FBNIC_QUEUE_TIM_COUNTS_CNT0_MASK CSR_GENMASK(14, 0)
+
+/* Rx Completion Queue Registers */
+#define FBNIC_QUEUE_RCQ_CTL 0x200 /* 0x800 */
+#define FBNIC_QUEUE_RCQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_RCQ_CTL_ENABLE CSR_BIT(1)
+
+#define FBNIC_QUEUE_RCQ_HEAD 0x201 /* 0x804 */
+
+#define FBNIC_QUEUE_RCQ_SIZE 0x204 /* 0x810 */
+#define FBNIC_QUEUE_RCQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_RCQ_BAL 0x220 /* 0x880 */
+#define FBNIC_QUEUE_RCQ_BAH 0x221 /* 0x884 */
+
+/* Rx Buffer Descriptor Queue Registers */
+#define FBNIC_QUEUE_BDQ_CTL 0x240 /* 0x900 */
+#define FBNIC_QUEUE_BDQ_CTL_RESET CSR_BIT(0)
+#define FBNIC_QUEUE_BDQ_CTL_ENABLE CSR_BIT(1)
+#define FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE CSR_BIT(30)
+
+#define FBNIC_QUEUE_BDQ_HPQ_TAIL 0x241 /* 0x904 */
+#define FBNIC_QUEUE_BDQ_PPQ_TAIL 0x242 /* 0x908 */
+
+#define FBNIC_QUEUE_BDQ_HPQ_SIZE 0x247 /* 0x91c */
+#define FBNIC_QUEUE_BDQ_PPQ_SIZE 0x248 /* 0x920 */
+#define FBNIC_QUEUE_BDQ_SIZE_MASK CSR_GENMASK(3, 0)
+
+#define FBNIC_QUEUE_BDQ_HPQ_BAL 0x260 /* 0x980 */
+#define FBNIC_QUEUE_BDQ_HPQ_BAH 0x261 /* 0x984 */
+#define FBNIC_QUEUE_BDQ_PPQ_BAL 0x262 /* 0x988 */
+#define FBNIC_QUEUE_BDQ_PPQ_BAH 0x263 /* 0x98c */
+
+/* Rx DMA Engine Configuration */
+#define FBNIC_QUEUE_RDE_CTL0 0x2a0 /* 0xa80 */
+#define FBNIC_QUEUE_RDE_CTL0_EN_HDR_SPLIT CSR_BIT(31)
+#define FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK CSR_GENMASK(30, 29)
+enum {
+ FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE = 0,
+ FBNIC_QUEUE_RDE_CTL0_DROP_WAIT = 1,
+ FBNIC_QUEUE_RDE_CTL0_DROP_NEVER = 2,
+};
+
+#define FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK CSR_GENMASK(28, 20)
+#define FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK CSR_GENMASK(19, 11)
+
+#define FBNIC_QUEUE_RDE_CTL1 0x2a1 /* 0xa84 */
+#define FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK CSR_GENMASK(24, 12)
+#define FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK CSR_GENMASK(11, 9)
+#define FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK CSR_GENMASK(8, 6)
+#define FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK CSR_GENMASK(5, 2)
+#define FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_MASK CSR_GENMASK(1, 0)
+enum {
+ FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_NONE = 0,
+ FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_ALL = 1,
+ FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_RSS = 2,
+};
+
+/* Rx Interrupt Manager Registers */
+#define FBNIC_QUEUE_RIM_CTL 0x2c0 /* 0xb00 */
+#define FBNIC_QUEUE_RIM_CTL_MSIX_MASK CSR_GENMASK(7, 0)
+
+#define FBNIC_QUEUE_RIM_THRESHOLD 0x2c1 /* 0xb04 */
+#define FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK CSR_GENMASK(14, 0)
+
+#define FBNIC_QUEUE_RIM_CLEAR 0x2c2 /* 0xb08 */
+#define FBNIC_QUEUE_RIM_CLEAR_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_RIM_SET 0x2c3 /* 0xb0c */
+#define FBNIC_QUEUE_RIM_SET_MASK CSR_BIT(0)
+#define FBNIC_QUEUE_RIM_MASK 0x2c4 /* 0xb10 */
+#define FBNIC_QUEUE_RIM_MASK_MASK CSR_BIT(0)
+
+#define FBNIC_QUEUE_RIM_COAL_STATUS 0x2c5 /* 0xb14 */
+#define FBNIC_QUEUE_RIM_RCD_COUNT_MASK CSR_GENMASK(30, 16)
+#define FBNIC_QUEUE_RIM_TIMER_MASK CSR_GENMASK(13, 0)
+#define FBNIC_MAX_QUEUES 128
+#define FBNIC_CSR_END_QUEUE (0x40000 + 0x400 * FBNIC_MAX_QUEUES - 1)
+
+/* BAR 4 CSRs */
+
+/* The IPC mailbox consists of 32 mailboxes, with each mailbox consisting
+ * of 32 4 byte registers. We will use 2 registers per descriptor so the
+ * length of the mailbox is reduced to 16.
+ *
+ * Currently we use an offset of 0x6000 on BAR4 for the mailbox so we just
+ * have to do the math and determine the offset based on the mailbox
+ * direction and index inside that mailbox.
+ */
+#define FBNIC_IPC_MBX_DESC_LEN 16
+#define FBNIC_IPC_MBX(mbx_idx, desc_idx) \
+ ((((mbx_idx) * FBNIC_IPC_MBX_DESC_LEN + (desc_idx)) * 2) + 0x6000)
+
+/* Use first register in mailbox to flush writes */
+#define FBNIC_FW_ZERO_REG FBNIC_IPC_MBX(0, 0)
+
+enum {
+ FBNIC_IPC_MBX_RX_IDX,
+ FBNIC_IPC_MBX_TX_IDX,
+ FBNIC_IPC_MBX_INDICES,
+};
+
+#define FBNIC_IPC_MBX_DESC_LEN_MASK DESC_GENMASK(63, 48)
+#define FBNIC_IPC_MBX_DESC_EOM DESC_BIT(46)
+#define FBNIC_IPC_MBX_DESC_ADDR_MASK DESC_GENMASK(45, 3)
+#define FBNIC_IPC_MBX_DESC_FW_CMPL DESC_BIT(1)
+#define FBNIC_IPC_MBX_DESC_HOST_CMPL DESC_BIT(0)
+
+#endif /* _FBNIC_CSR_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
new file mode 100644
index 000000000000..e87049dfd223
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <asm/unaligned.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+#include <net/devlink.h>
+
+#include "fbnic.h"
+
+#define FBNIC_SN_STR_LEN 24
+
+static int fbnic_devlink_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ struct fbnic_dev *fbd = devlink_priv(devlink);
+ int err;
+
+ if (fbd->dsn) {
+ unsigned char serial[FBNIC_SN_STR_LEN];
+ u8 dsn[8];
+
+ put_unaligned_be64(fbd->dsn, dsn);
+ err = snprintf(serial, FBNIC_SN_STR_LEN, "%8phD", dsn);
+ if (err < 0)
+ return err;
+
+ err = devlink_info_serial_number_put(req, serial);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct devlink_ops fbnic_devlink_ops = {
+ .info_get = fbnic_devlink_info_get,
+};
+
+void fbnic_devlink_free(struct fbnic_dev *fbd)
+{
+ struct devlink *devlink = priv_to_devlink(fbd);
+
+ devlink_free(devlink);
+}
+
+struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev)
+{
+ void __iomem * const *iomap_table;
+ struct devlink *devlink;
+ struct fbnic_dev *fbd;
+
+ devlink = devlink_alloc(&fbnic_devlink_ops, sizeof(struct fbnic_dev),
+ &pdev->dev);
+ if (!devlink)
+ return NULL;
+
+ fbd = devlink_priv(devlink);
+ pci_set_drvdata(pdev, fbd);
+ fbd->dev = &pdev->dev;
+
+ iomap_table = pcim_iomap_table(pdev);
+ fbd->uc_addr0 = iomap_table[0];
+ fbd->uc_addr4 = iomap_table[4];
+
+ fbd->dsn = pci_get_dsn(pdev);
+ fbd->mps = pcie_get_mps(pdev);
+ fbd->readrq = pcie_get_readrq(pdev);
+
+ fbd->mac_addr_boundary = FBNIC_RPC_TCAM_MACDA_DEFAULT_BOUNDARY;
+
+ return fbd;
+}
+
+void fbnic_devlink_register(struct fbnic_dev *fbd)
+{
+ struct devlink *devlink = priv_to_devlink(fbd);
+
+ devlink_register(devlink);
+}
+
+void fbnic_devlink_unregister(struct fbnic_dev *fbd)
+{
+ struct devlink *devlink = priv_to_devlink(fbd);
+
+ devlink_unregister(devlink);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h b/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h
new file mode 100644
index 000000000000..809ba6729442
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h
@@ -0,0 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#define DRV_NAME "fbnic"
+#define DRV_SUMMARY "Meta(R) Host Network Interface Driver"
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
new file mode 100644
index 000000000000..0c6e1b4c119b
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c
@@ -0,0 +1,791 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/dev_printk.h>
+#include <linux/dma-mapping.h>
+#include <linux/gfp.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_tlv.h"
+
+static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx, u64 desc)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+
+ fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
+ fw_wrfl(fbd);
+ fw_wr32(fbd, desc_offset, lower_32_bits(desc));
+}
+
+static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
+{
+ u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
+ u64 desc;
+
+ desc = fw_rd32(fbd, desc_offset);
+ desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
+
+ return desc;
+}
+
+static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+{
+ int desc_idx;
+
+ /* Initialize first descriptor to all 0s. Doing this gives us a
+ * solid stop for the firmware to hit when it is done looping
+ * through the ring.
+ */
+ __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0);
+
+ fw_wrfl(fbd);
+
+ /* We then fill the rest of the ring starting at the end and moving
+ * back toward descriptor 0 with skip descriptors that have no
+ * length nor address, and tell the firmware that they can skip
+ * them and just move past them to the one we initialized to 0.
+ */
+ for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) {
+ __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx,
+ FBNIC_IPC_MBX_DESC_FW_CMPL |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
+ fw_wrfl(fbd);
+ }
+}
+
+void fbnic_mbx_init(struct fbnic_dev *fbd)
+{
+ int i;
+
+ /* Initialize lock to protect Tx ring */
+ spin_lock_init(&fbd->fw_tx_lock);
+
+ /* Reinitialize mailbox memory */
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
+
+ /* Do not auto-clear the FW mailbox interrupt, let SW clear it */
+ wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
+
+ /* Clear any stale causes in vector 0 as that is used for doorbell */
+ wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_init_desc_ring(fbd, i);
+}
+
+static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
+ struct fbnic_tlv_msg *msg, u16 length, u8 eom)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+ u8 tail = mbx->tail;
+ dma_addr_t addr;
+ int direction;
+
+ if (!mbx->ready || !fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+
+ if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
+ return -EBUSY;
+
+ addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
+ if (dma_mapping_error(fbd->dev, addr)) {
+ free_page((unsigned long)msg);
+
+ return -ENOSPC;
+ }
+
+ mbx->buf_info[tail].msg = msg;
+ mbx->buf_info[tail].addr = addr;
+
+ mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
+
+ fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
+
+ __fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
+ FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
+ (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
+ (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
+ FBNIC_IPC_MBX_DESC_HOST_CMPL);
+
+ return 0;
+}
+
+static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
+ int desc_idx)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+ int direction;
+
+ if (!mbx->buf_info[desc_idx].msg)
+ return;
+
+ direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
+ DMA_TO_DEVICE;
+ dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
+ PAGE_SIZE, direction);
+
+ free_page((unsigned long)mbx->buf_info[desc_idx].msg);
+ mbx->buf_info[desc_idx].msg = NULL;
+}
+
+static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+{
+ int i;
+
+ fbnic_mbx_init_desc_ring(fbd, mbx_idx);
+
+ for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
+ fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
+}
+
+void fbnic_mbx_clean(struct fbnic_dev *fbd)
+{
+ int i;
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_clean_desc_ring(fbd, i);
+}
+
+#define FBNIC_MBX_MAX_PAGE_SIZE FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
+#define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
+
+static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
+ u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
+ int err = 0;
+
+ /* Do nothing if mailbox is not ready, or we already have pages on
+ * the ring that can be used by the firmware
+ */
+ if (!rx_mbx->ready)
+ return -ENODEV;
+
+ /* Fill all but 1 unused descriptors in the Rx queue. */
+ count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
+ while (!err && count--) {
+ struct fbnic_tlv_msg *msg;
+
+ msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
+ __GFP_NOWARN);
+ if (!msg) {
+ err = -ENOMEM;
+ break;
+ }
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
+ FBNIC_RX_PAGE_SIZE, 0);
+ if (err)
+ free_page((unsigned long)msg);
+ }
+
+ return err;
+}
+
+static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
+ struct fbnic_tlv_msg *msg)
+{
+ unsigned long flags;
+ int err;
+
+ spin_lock_irqsave(&fbd->fw_tx_lock, flags);
+
+ err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
+ le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
+
+ spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
+
+ return err;
+}
+
+static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ u8 head = tx_mbx->head;
+ u64 desc;
+
+ while (head != tx_mbx->tail) {
+ desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
+ if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
+ break;
+
+ fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
+
+ head++;
+ head %= FBNIC_IPC_MBX_DESC_LEN;
+ }
+
+ /* Record head for next interrupt */
+ tx_mbx->head = head;
+}
+
+/**
+ * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
+ * @fbd: FBNIC device structure
+ * @msg_type: ENUM value indicating message type to send
+ *
+ * Return:
+ * One the following values:
+ * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
+ * -ENODEV: Device I/O error
+ * -ENOMEM: Failed to allocate message
+ * -EBUSY: No space in mailbox
+ * -ENOSPC: DMA mapping failed
+ *
+ * This function sends a single TLV header indicating the host wants to take
+ * some action. However there are no other side effects which means that any
+ * response will need to be caught via a completion if this action is
+ * expected to kick off a resultant action.
+ */
+static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
+{
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(msg_type);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ free_page((unsigned long)msg);
+
+ return err;
+}
+
+/**
+ * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message
+ * @fbd: FBNIC device structure
+ *
+ * Return: NULL on failure to allocate, error pointer on error, or pointer
+ * to new TLV test message.
+ *
+ * Sends a single TLV header indicating the host wants the firmware to
+ * confirm the capabilities and version.
+ **/
+static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd)
+{
+ int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
+
+ /* Return 0 if we are not calling this on ASIC */
+ return (err == -EOPNOTSUPP) ? 0 : err;
+}
+
+static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
+{
+ struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
+
+ /* This is a one time init, so just exit if it is completed */
+ if (mbx->ready)
+ return;
+
+ mbx->ready = true;
+
+ switch (mbx_idx) {
+ case FBNIC_IPC_MBX_RX_IDX:
+ /* Make sure we have a page for the FW to write to */
+ fbnic_mbx_alloc_rx_msgs(fbd);
+ break;
+ case FBNIC_IPC_MBX_TX_IDX:
+ /* Force version to 1 if we successfully requested an update
+ * from the firmware. This should be overwritten once we get
+ * the actual version from the firmware in the capabilities
+ * request message.
+ */
+ if (!fbnic_fw_xmit_cap_msg(fbd) &&
+ !fbd->fw_cap.running.mgmt.version)
+ fbd->fw_cap.running.mgmt.version = 1;
+ break;
+ }
+}
+
+static void fbnic_mbx_postinit(struct fbnic_dev *fbd)
+{
+ int i;
+
+ /* We only need to do this on the first interrupt following init.
+ * this primes the mailbox so that we will have cleared all the
+ * skip descriptors.
+ */
+ if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
+ return;
+
+ wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
+ fbnic_mbx_postinit_desc_ring(fbd, i);
+}
+
+/**
+ * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
+ * to FW mailbox
+ *
+ * @fbd: FBNIC device structure
+ * @take_ownership: take/release the ownership
+ *
+ * Return: zero on success, negative value on failure
+ *
+ * Notifies the firmware that the driver either takes ownership of the NIC
+ * (when @take_ownership is true) or releases it.
+ */
+int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
+{
+ unsigned long req_time = jiffies;
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ if (take_ownership) {
+ err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
+ if (err)
+ goto free_message;
+ }
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ /* Initialize heartbeat, set last response to 1 second in the past
+ * so that we will trigger a timeout if the firmware doesn't respond
+ */
+ fbd->last_heartbeat_response = req_time - HZ;
+
+ fbd->last_heartbeat_request = req_time;
+
+ /* Set heartbeat detection based on if we are taking ownership */
+ fbd->fw_heartbeat_enabled = take_ownership;
+
+ return err;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
+ FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
+ FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
+ FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
+ FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
+ struct fbnic_tlv_msg *attr, int len)
+{
+ int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
+ struct fbnic_tlv_msg *mac_results[8];
+ int err, i = 0;
+
+ /* Make sure we have enough room to process all the MAC addresses */
+ if (len > 8)
+ return -ENOSPC;
+
+ /* Parse the array */
+ err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
+ fbnic_fw_cap_resp_index,
+ FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
+ if (err)
+ return err;
+
+ /* Copy results into MAC addr array */
+ for (i = 0; i < len && mac_results[i]; i++)
+ fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
+
+ /* Zero remaining unused addresses */
+ while (i < len)
+ eth_zero_addr(bmc_mac_addr[i++]);
+
+ return 0;
+}
+
+static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
+{
+ u32 active_slot = 0, all_multi = 0;
+ struct fbnic_dev *fbd = opaque;
+ u32 speed = 0, fec = 0;
+ size_t commit_size = 0;
+ bool bmc_present;
+ int err;
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION,
+ fbd->fw_cap.running.mgmt.version);
+
+ if (!fbd->fw_cap.running.mgmt.version)
+ return -EINVAL;
+
+ if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) {
+ char running_ver[FBNIC_FW_VER_MAX_SIZE];
+
+ fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
+ running_ver);
+ dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n",
+ running_ver,
+ MIN_FW_MAJOR_VERSION,
+ MIN_FW_MINOR_VERSION,
+ MIN_FW_BUILD_VERSION);
+ /* Disable TX mailbox to prevent card use until firmware is
+ * updated.
+ */
+ fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
+ return -EINVAL;
+ }
+
+ get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size,
+ fbd->fw_cap.running.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+ if (!commit_size)
+ dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION,
+ fbd->fw_cap.stored.mgmt.version);
+ get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size,
+ fbd->fw_cap.stored.mgmt.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION,
+ fbd->fw_cap.running.bootloader.version);
+ get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size,
+ fbd->fw_cap.running.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION,
+ fbd->fw_cap.stored.bootloader.version);
+ get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size,
+ fbd->fw_cap.stored.bootloader.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION,
+ fbd->fw_cap.stored.undi.version);
+ get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size,
+ fbd->fw_cap.stored.undi.commit,
+ FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot);
+ fbd->fw_cap.active_slot = active_slot;
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed);
+ get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec);
+ fbd->fw_cap.link_speed = speed;
+ fbd->fw_cap.link_fec = fec;
+
+ bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
+ if (bmc_present) {
+ struct fbnic_tlv_msg *attr;
+
+ attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
+ if (!attr)
+ return -EINVAL;
+
+ err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
+ attr, 4);
+ if (err)
+ return err;
+
+ get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi);
+ } else {
+ memset(fbd->fw_cap.bmc_mac_addr, 0,
+ sizeof(fbd->fw_cap.bmc_mac_addr));
+ }
+
+ fbd->fw_cap.bmc_present = bmc_present;
+
+ if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
+ fbd->fw_cap.all_multi = all_multi;
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_ownership_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
+
+ /* Count the ownership response as a heartbeat reply */
+ fbd->last_heartbeat_response = jiffies;
+
+ return 0;
+}
+
+static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
+ FBNIC_TLV_ATTR_LAST
+};
+
+static int fbnic_fw_parse_heartbeat_resp(void *opaque,
+ struct fbnic_tlv_msg **results)
+{
+ struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
+
+ fbd->last_heartbeat_response = jiffies;
+
+ return 0;
+}
+
+static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
+{
+ unsigned long req_time = jiffies;
+ struct fbnic_tlv_msg *msg;
+ int err = 0;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
+ if (!msg)
+ return -ENOMEM;
+
+ err = fbnic_mbx_map_tlv_msg(fbd, msg);
+ if (err)
+ goto free_message;
+
+ fbd->last_heartbeat_request = req_time;
+
+ return err;
+
+free_message:
+ free_page((unsigned long)msg);
+ return err;
+}
+
+static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
+{
+ unsigned long last_response = fbd->last_heartbeat_response;
+ unsigned long last_request = fbd->last_heartbeat_request;
+
+ return !time_before(last_response, last_request);
+}
+
+int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
+{
+ int err = -ETIMEDOUT;
+ int attempts = 50;
+
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ while (attempts--) {
+ msleep(200);
+ if (poll)
+ fbnic_mbx_poll(fbd);
+
+ if (!fbnic_fw_heartbeat_current(fbd))
+ continue;
+
+ /* Place new message on mailbox to elicit a response */
+ err = fbnic_fw_xmit_heartbeat_message(fbd);
+ if (err)
+ dev_warn(fbd->dev,
+ "Failed to send heartbeat message: %d\n",
+ err);
+ break;
+ }
+
+ return err;
+}
+
+void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
+{
+ unsigned long last_request = fbd->last_heartbeat_request;
+ int err;
+
+ /* Do not check heartbeat or send another request until current
+ * period has expired. Otherwise we might start spamming requests.
+ */
+ if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
+ return;
+
+ /* We already reported no mailbox. Wait for it to come back */
+ if (!fbd->fw_heartbeat_enabled)
+ return;
+
+ /* Was the last heartbeat response long time ago? */
+ if (!fbnic_fw_heartbeat_current(fbd)) {
+ dev_warn(fbd->dev,
+ "Firmware did not respond to heartbeat message\n");
+ fbd->fw_heartbeat_enabled = false;
+ }
+
+ /* Place new message on mailbox to elicit a response */
+ err = fbnic_fw_xmit_heartbeat_message(fbd);
+ if (err)
+ dev_warn(fbd->dev, "Failed to send heartbeat message\n");
+}
+
+static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
+ FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
+ fbnic_fw_parse_cap_resp),
+ FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
+ fbnic_fw_parse_ownership_resp),
+ FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
+ fbnic_fw_parse_heartbeat_resp),
+ FBNIC_TLV_MSG_ERROR
+};
+
+static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
+ u8 head = rx_mbx->head;
+ u64 desc, length;
+
+ while (head != rx_mbx->tail) {
+ struct fbnic_tlv_msg *msg;
+ int err;
+
+ desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
+ if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
+ break;
+
+ dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ msg = rx_mbx->buf_info[head].msg;
+
+ length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
+
+ /* Ignore NULL mailbox descriptors */
+ if (!length)
+ goto next_page;
+
+ /* Report descriptors with length greater than page size */
+ if (length > PAGE_SIZE) {
+ dev_warn(fbd->dev,
+ "Invalid mailbox descriptor length: %lld\n",
+ length);
+ goto next_page;
+ }
+
+ if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
+ dev_warn(fbd->dev, "Mailbox message length mismatch\n");
+
+ /* If parsing fails dump contents of message to dmesg */
+ err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
+ if (err) {
+ dev_warn(fbd->dev, "Unable to process message: %d\n",
+ err);
+ print_hex_dump(KERN_WARNING, "fbnic:",
+ DUMP_PREFIX_OFFSET, 16, 2,
+ msg, length, true);
+ }
+
+ dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
+next_page:
+
+ free_page((unsigned long)rx_mbx->buf_info[head].msg);
+ rx_mbx->buf_info[head].msg = NULL;
+
+ head++;
+ head %= FBNIC_IPC_MBX_DESC_LEN;
+ }
+
+ /* Record head for next interrupt */
+ rx_mbx->head = head;
+
+ /* Make sure we have at least one page for the FW to write to */
+ fbnic_mbx_alloc_rx_msgs(fbd);
+}
+
+void fbnic_mbx_poll(struct fbnic_dev *fbd)
+{
+ fbnic_mbx_postinit(fbd);
+
+ fbnic_mbx_process_tx_msgs(fbd);
+ fbnic_mbx_process_rx_msgs(fbd);
+}
+
+int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx;
+ int attempts = 50;
+
+ /* Immediate fail if BAR4 isn't there */
+ if (!fbnic_fw_present(fbd))
+ return -ENODEV;
+
+ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+ while (!tx_mbx->ready && --attempts) {
+ /* Force the firmware to trigger an interrupt response to
+ * avoid the mailbox getting stuck closed if the interrupt
+ * is reset.
+ */
+ fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
+
+ msleep(200);
+
+ fbnic_mbx_poll(fbd);
+ }
+
+ return attempts ? 0 : -ETIMEDOUT;
+}
+
+void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx;
+ int attempts = 50;
+ u8 count = 0;
+
+ /* Nothing to do if there is no mailbox */
+ if (!fbnic_fw_present(fbd))
+ return;
+
+ /* Record current Rx stats */
+ tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+
+ /* Nothing to do if mailbox never got to ready */
+ if (!tx_mbx->ready)
+ return;
+
+ /* Give firmware time to process packet,
+ * we will wait up to 10 seconds which is 50 waits of 200ms.
+ */
+ do {
+ u8 head = tx_mbx->head;
+
+ if (head == tx_mbx->tail)
+ break;
+
+ msleep(200);
+ fbnic_mbx_process_tx_msgs(fbd);
+
+ count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN;
+ } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
new file mode 100644
index 000000000000..c65bca613665
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_FW_H_
+#define _FBNIC_FW_H_
+
+#include <linux/if_ether.h>
+#include <linux/types.h>
+
+struct fbnic_dev;
+struct fbnic_tlv_msg;
+
+struct fbnic_fw_mbx {
+ u8 ready, head, tail;
+ struct {
+ struct fbnic_tlv_msg *msg;
+ dma_addr_t addr;
+ } buf_info[FBNIC_IPC_MBX_DESC_LEN];
+};
+
+// FW_VER_MAX_SIZE must match ETHTOOL_FWVERS_LEN
+#define FBNIC_FW_VER_MAX_SIZE 32
+// Formatted version is in the format XX.YY.ZZ_RRR_COMMIT
+#define FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE (FBNIC_FW_VER_MAX_SIZE - 13)
+#define FBNIC_FW_LOG_MAX_SIZE 256
+
+struct fbnic_fw_ver {
+ u32 version;
+ char commit[FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE];
+};
+
+struct fbnic_fw_cap {
+ struct {
+ struct fbnic_fw_ver mgmt, bootloader;
+ } running;
+ struct {
+ struct fbnic_fw_ver mgmt, bootloader, undi;
+ } stored;
+ u8 active_slot;
+ u8 bmc_mac_addr[4][ETH_ALEN];
+ u8 bmc_present : 1;
+ u8 all_multi : 1;
+ u8 link_speed;
+ u8 link_fec;
+};
+
+void fbnic_mbx_init(struct fbnic_dev *fbd);
+void fbnic_mbx_clean(struct fbnic_dev *fbd);
+void fbnic_mbx_poll(struct fbnic_dev *fbd);
+int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd);
+void fbnic_mbx_flush_tx(struct fbnic_dev *fbd);
+int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership);
+int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll);
+void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd);
+
+#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str) \
+do { \
+ const u32 __rev_id = _rev_id; \
+ snprintf(_str, sizeof(_str), "%02lu.%02lu.%02lu-%03lu%s%s", \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MAJOR, __rev_id), \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MINOR, __rev_id), \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_PATCH, __rev_id), \
+ FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_BUILD, __rev_id), \
+ _delim, _commit); \
+} while (0)
+
+#define fbnic_mk_fw_ver_str(_rev_id, _str) \
+ fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str)
+
+#define FW_HEARTBEAT_PERIOD (10 * HZ)
+
+enum {
+ FBNIC_TLV_MSG_ID_HOST_CAP_REQ = 0x10,
+ FBNIC_TLV_MSG_ID_FW_CAP_RESP = 0x11,
+ FBNIC_TLV_MSG_ID_OWNERSHIP_REQ = 0x12,
+ FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13,
+ FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14,
+ FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15,
+};
+
+#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24)
+#define FBNIC_FW_CAP_RESP_VERSION_MINOR CSR_GENMASK(23, 16)
+#define FBNIC_FW_CAP_RESP_VERSION_PATCH CSR_GENMASK(15, 8)
+#define FBNIC_FW_CAP_RESP_VERSION_BUILD CSR_GENMASK(7, 0)
+enum {
+ FBNIC_FW_CAP_RESP_VERSION = 0x0,
+ FBNIC_FW_CAP_RESP_BMC_PRESENT = 0x1,
+ FBNIC_FW_CAP_RESP_BMC_MAC_ADDR = 0x2,
+ FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY = 0x3,
+ FBNIC_FW_CAP_RESP_STORED_VERSION = 0x4,
+ FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT = 0x5,
+ FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR = 0x6,
+ FBNIC_FW_CAP_RESP_BMC_ALL_MULTI = 0x8,
+ FBNIC_FW_CAP_RESP_FW_STATE = 0x9,
+ FBNIC_FW_CAP_RESP_FW_LINK_SPEED = 0xa,
+ FBNIC_FW_CAP_RESP_FW_LINK_FEC = 0xb,
+ FBNIC_FW_CAP_RESP_STORED_COMMIT_STR = 0xc,
+ FBNIC_FW_CAP_RESP_CMRT_VERSION = 0xd,
+ FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION = 0xe,
+ FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR = 0xf,
+ FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR = 0x10,
+ FBNIC_FW_CAP_RESP_UEFI_VERSION = 0x11,
+ FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR = 0x12,
+ FBNIC_FW_CAP_RESP_MSG_MAX
+};
+
+enum {
+ FBNIC_FW_LINK_SPEED_25R1 = 1,
+ FBNIC_FW_LINK_SPEED_50R2 = 2,
+ FBNIC_FW_LINK_SPEED_50R1 = 3,
+ FBNIC_FW_LINK_SPEED_100R2 = 4,
+};
+
+enum {
+ FBNIC_FW_LINK_FEC_NONE = 1,
+ FBNIC_FW_LINK_FEC_RS = 2,
+ FBNIC_FW_LINK_FEC_BASER = 3,
+};
+
+enum {
+ FBNIC_FW_OWNERSHIP_FLAG = 0x0,
+ FBNIC_FW_OWNERSHIP_MSG_MAX
+};
+#endif /* _FBNIC_FW_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
new file mode 100644
index 000000000000..914362195920
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_txrx.h"
+
+static irqreturn_t fbnic_fw_msix_intr(int __always_unused irq, void *data)
+{
+ struct fbnic_dev *fbd = (struct fbnic_dev *)data;
+
+ fbnic_mbx_poll(fbd);
+
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * fbnic_fw_enable_mbx - Configure and initialize Firmware Mailbox
+ * @fbd: Pointer to device to initialize
+ *
+ * This function will initialize the firmware mailbox rings, enable the IRQ
+ * and initialize the communication between the Firmware and the host. The
+ * firmware is expected to respond to the initialization by sending an
+ * interrupt essentially notifying the host that it has seen the
+ * initialization and is now synced up.
+ *
+ * Return: non-zero on failure.
+ **/
+int fbnic_fw_enable_mbx(struct fbnic_dev *fbd)
+{
+ u32 vector = fbd->fw_msix_vector;
+ int err;
+
+ /* Request the IRQ for FW Mailbox vector. */
+ err = request_threaded_irq(vector, NULL, &fbnic_fw_msix_intr,
+ IRQF_ONESHOT, dev_name(fbd->dev), fbd);
+ if (err)
+ return err;
+
+ /* Initialize mailbox and attempt to poll it into ready state */
+ fbnic_mbx_init(fbd);
+ err = fbnic_mbx_poll_tx_ready(fbd);
+ if (err) {
+ dev_warn(fbd->dev, "FW mailbox did not enter ready state\n");
+ free_irq(vector, fbd);
+ return err;
+ }
+
+ /* Enable interrupts */
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ return 0;
+}
+
+/**
+ * fbnic_fw_disable_mbx - Disable mailbox and place it in standby state
+ * @fbd: Pointer to device to disable
+ *
+ * This function will disable the mailbox interrupt, free any messages still
+ * in the mailbox and place it into a standby state. The firmware is
+ * expected to see the update and assume that the host is in the reset state.
+ **/
+void fbnic_fw_disable_mbx(struct fbnic_dev *fbd)
+{
+ /* Disable interrupt and free vector */
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY);
+
+ /* Free the vector */
+ free_irq(fbd->fw_msix_vector, fbd);
+
+ /* Make sure disabling logs message is sent, must be done here to
+ * avoid risk of completing without a running interrupt.
+ */
+ fbnic_mbx_flush_tx(fbd);
+
+ /* Reset the mailboxes to the initialized state */
+ fbnic_mbx_clean(fbd);
+}
+
+static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data)
+{
+ struct fbnic_dev *fbd = data;
+ struct fbnic_net *fbn;
+
+ if (fbd->mac->pcs_get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) {
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0),
+ 1u << FBNIC_PCS_MSIX_ENTRY);
+ return IRQ_HANDLED;
+ }
+
+ fbn = netdev_priv(fbd->netdev);
+
+ phylink_pcs_change(&fbn->phylink_pcs, false);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * fbnic_pcs_irq_enable - Configure the MAC to enable it to advertise link
+ * @fbd: Pointer to device to initialize
+ *
+ * This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ
+ * will remain disabled until we start the MAC/PCS/PHY logic via phylink.
+ *
+ * Return: non-zero on failure.
+ **/
+int fbnic_pcs_irq_enable(struct fbnic_dev *fbd)
+{
+ u32 vector = fbd->pcs_msix_vector;
+ int err;
+
+ /* Request the IRQ for MAC link vector.
+ * Map MAC cause to it, and unmask it
+ */
+ err = request_irq(vector, &fbnic_pcs_msix_intr, 0,
+ fbd->netdev->name, fbd);
+ if (err)
+ return err;
+
+ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
+ FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE);
+
+ return 0;
+}
+
+/**
+ * fbnic_pcs_irq_disable - Teardown the MAC IRQ to prepare for stopping
+ * @fbd: Pointer to device that is stopping
+ *
+ * This function undoes the work done in fbnic_pcs_irq_enable and prepares
+ * the device to no longer receive traffic on the host interface.
+ **/
+void fbnic_pcs_irq_disable(struct fbnic_dev *fbd)
+{
+ /* Disable interrupt */
+ fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX),
+ FBNIC_PCS_MSIX_ENTRY);
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY);
+
+ /* Free the vector */
+ free_irq(fbd->pcs_msix_vector, fbd);
+}
+
+int fbnic_request_irq(struct fbnic_dev *fbd, int nr, irq_handler_t handler,
+ unsigned long flags, const char *name, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return irq;
+
+ return request_irq(irq, handler, flags, name, data);
+}
+
+void fbnic_free_irq(struct fbnic_dev *fbd, int nr, void *data)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int irq = pci_irq_vector(pdev, nr);
+
+ if (irq < 0)
+ return;
+
+ free_irq(irq, data);
+}
+
+void fbnic_free_irqs(struct fbnic_dev *fbd)
+{
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+
+ fbd->pcs_msix_vector = 0;
+ fbd->fw_msix_vector = 0;
+
+ fbd->num_irqs = 0;
+
+ pci_free_irq_vectors(pdev);
+}
+
+int fbnic_alloc_irqs(struct fbnic_dev *fbd)
+{
+ unsigned int wanted_irqs = FBNIC_NON_NAPI_VECTORS;
+ struct pci_dev *pdev = to_pci_dev(fbd->dev);
+ int num_irqs;
+
+ wanted_irqs += min_t(unsigned int, num_online_cpus(), FBNIC_MAX_RXQS);
+ num_irqs = pci_alloc_irq_vectors(pdev, FBNIC_NON_NAPI_VECTORS + 1,
+ wanted_irqs, PCI_IRQ_MSIX);
+ if (num_irqs < 0) {
+ dev_err(fbd->dev, "Failed to allocate MSI-X entries\n");
+ return num_irqs;
+ }
+
+ if (num_irqs < wanted_irqs)
+ dev_warn(fbd->dev, "Allocated %d IRQs, expected %d\n",
+ num_irqs, wanted_irqs);
+
+ fbd->num_irqs = num_irqs;
+
+ fbd->pcs_msix_vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY);
+ fbd->fw_msix_vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
new file mode 100644
index 000000000000..7920e7af82d9
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <net/tcp.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+#include "fbnic_netdev.h"
+
+static void fbnic_init_readrq(struct fbnic_dev *fbd, unsigned int offset,
+ unsigned int cls, unsigned int readrq)
+{
+ u32 val = rd32(fbd, offset);
+
+ /* The TDF_CTL masks are a superset of the RNI_RBP ones. So we can
+ * use them when setting either the TDE_CTF or RNI_RBP registers.
+ */
+ val &= FBNIC_QM_TNI_TDF_CTL_MAX_OT | FBNIC_QM_TNI_TDF_CTL_MAX_OB;
+
+ val |= FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_MRRS, readrq) |
+ FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_CLS, cls);
+
+ wr32(fbd, offset, val);
+}
+
+static void fbnic_init_mps(struct fbnic_dev *fbd, unsigned int offset,
+ unsigned int cls, unsigned int mps)
+{
+ u32 val = rd32(fbd, offset);
+
+ /* Currently all MPS masks are identical so just use the first one */
+ val &= ~(FBNIC_QM_TNI_TCM_CTL_MPS | FBNIC_QM_TNI_TCM_CTL_CLS);
+
+ val |= FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_MPS, mps) |
+ FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_CLS, cls);
+
+ wr32(fbd, offset, val);
+}
+
+static void fbnic_mac_init_axi(struct fbnic_dev *fbd)
+{
+ bool override_1k = false;
+ int readrq, mps, cls;
+
+ /* All of the values are based on being a power of 2 starting
+ * with 64 == 0. Therefore we can either divide by 64 in the
+ * case of constants, or just subtract 6 from the log2 of the value
+ * in order to get the value we will be programming into the
+ * registers.
+ */
+ readrq = ilog2(fbd->readrq) - 6;
+ if (readrq > 3)
+ override_1k = true;
+ readrq = clamp(readrq, 0, 3);
+
+ mps = ilog2(fbd->mps) - 6;
+ mps = clamp(mps, 0, 3);
+
+ cls = ilog2(L1_CACHE_BYTES) - 6;
+ cls = clamp(cls, 0, 3);
+
+ /* Configure Tx/Rx AXI Paths w/ Read Request and Max Payload sizes */
+ fbnic_init_readrq(fbd, FBNIC_QM_TNI_TDF_CTL, cls, readrq);
+ fbnic_init_mps(fbd, FBNIC_QM_TNI_TCM_CTL, cls, mps);
+
+ /* Configure QM TNI TDE:
+ * - Max outstanding AXI beats to 704(768 - 64) - guaranetees 8% of
+ * buffer capacity to descriptors.
+ * - Max outstanding transactions to 128
+ */
+ wr32(fbd, FBNIC_QM_TNI_TDE_CTL,
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS_1K, override_1k ? 1 : 0) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OB, 704) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OT, 128) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS, readrq) |
+ FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_CLS, cls));
+
+ fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq);
+ fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps);
+ fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps);
+
+ /* Enable XALI AR/AW outbound */
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
+ wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
+ FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
+}
+
+static void fbnic_mac_init_qm(struct fbnic_dev *fbd)
+{
+ u32 clock_freq;
+
+ /* Configure TSO behavior */
+ wr32(fbd, FBNIC_QM_TQS_CTL0,
+ FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK,
+ FBNIC_QM_TQS_CTL0_LSO_TS_LAST) |
+ FIELD_PREP(FBNIC_QM_TQS_CTL0_PREFETCH_THRESH,
+ FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN));
+
+ /* Limit EDT to INT_MAX as this is the limit of the EDT Qdisc */
+ wr32(fbd, FBNIC_QM_TQS_EDT_TS_RANGE, INT_MAX);
+
+ /* Configure MTU
+ * Due to known HW issue we cannot set the MTU to within 16 octets
+ * of a 64 octet aligned boundary. So we will set the TQS_MTU(s) to
+ * MTU + 1.
+ */
+ wr32(fbd, FBNIC_QM_TQS_MTU_CTL0, FBNIC_MAX_JUMBO_FRAME_SIZE + 1);
+ wr32(fbd, FBNIC_QM_TQS_MTU_CTL1,
+ FIELD_PREP(FBNIC_QM_TQS_MTU_CTL1_BULK,
+ FBNIC_MAX_JUMBO_FRAME_SIZE + 1));
+
+ clock_freq = FBNIC_CLOCK_FREQ;
+
+ /* Be aggressive on the timings. We will have the interrupt
+ * threshold timer tick once every 1 usec and coalesce writes for
+ * up to 80 usecs.
+ */
+ wr32(fbd, FBNIC_QM_TCQ_CTL0,
+ FIELD_PREP(FBNIC_QM_TCQ_CTL0_TICK_CYCLES,
+ clock_freq / 1000000) |
+ FIELD_PREP(FBNIC_QM_TCQ_CTL0_COAL_WAIT,
+ clock_freq / 12500));
+
+ /* We will have the interrupt threshold timer tick once every
+ * 1 usec and coalesce writes for up to 2 usecs.
+ */
+ wr32(fbd, FBNIC_QM_RCQ_CTL0,
+ FIELD_PREP(FBNIC_QM_RCQ_CTL0_TICK_CYCLES,
+ clock_freq / 1000000) |
+ FIELD_PREP(FBNIC_QM_RCQ_CTL0_COAL_WAIT,
+ clock_freq / 500000));
+
+ /* Configure spacer control to 64 beats. */
+ wr32(fbd, FBNIC_FAB_AXI4_AR_SPACER_2_CFG,
+ FBNIC_FAB_AXI4_AR_SPACER_MASK |
+ FIELD_PREP(FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD, 2));
+}
+
+#define FBNIC_DROP_EN_MASK 0x7d
+#define FBNIC_PAUSE_EN_MASK 0x14
+#define FBNIC_ECN_EN_MASK 0x10
+
+struct fbnic_fifo_config {
+ unsigned int addr;
+ unsigned int size;
+};
+
+/* Rx FIFO Configuration
+ * The table consists of 8 entries, of which only 4 are currently used
+ * The starting addr is in units of 64B and the size is in 2KB units
+ * Below is the human readable version of the table defined below:
+ * Function Addr Size
+ * ----------------------------------
+ * Network to Host/BMC 384K 64K
+ * Unused
+ * Unused
+ * Network to BMC 448K 32K
+ * Network to Host 0 384K
+ * Unused
+ * BMC to Host 480K 32K
+ * Unused
+ */
+static const struct fbnic_fifo_config fifo_config[] = {
+ { .addr = 0x1800, .size = 0x20 }, /* Network to Host/BMC */
+ { }, /* Unused */
+ { }, /* Unused */
+ { .addr = 0x1c00, .size = 0x10 }, /* Network to BMC */
+ { .addr = 0x0000, .size = 0xc0 }, /* Network to Host */
+ { }, /* Unused */
+ { .addr = 0x1e00, .size = 0x10 }, /* BMC to Host */
+ { } /* Unused */
+};
+
+static void fbnic_mac_init_rxb(struct fbnic_dev *fbd)
+{
+ bool rx_enable;
+ int i;
+
+ rx_enable = !!(rd32(fbd, FBNIC_RPC_RMI_CONFIG) &
+ FBNIC_RPC_RMI_CONFIG_ENABLE);
+
+ for (i = 0; i < 8; i++) {
+ unsigned int size = fifo_config[i].size;
+
+ /* If we are coming up on a system that already has the
+ * Rx data path enabled we don't need to reconfigure the
+ * FIFOs. Instead we can check to verify the values are
+ * large enough to meet our needs, and use the values to
+ * populate the flow control, ECN, and drop thresholds.
+ */
+ if (rx_enable) {
+ size = FIELD_GET(FBNIC_RXB_PBUF_SIZE,
+ rd32(fbd, FBNIC_RXB_PBUF_CFG(i)));
+ if (size < fifo_config[i].size)
+ dev_warn(fbd->dev,
+ "fifo%d size of %d smaller than expected value of %d\n",
+ i, size << 11,
+ fifo_config[i].size << 11);
+ } else {
+ /* Program RXB Cuthrough */
+ wr32(fbd, FBNIC_RXB_CT_SIZE(i),
+ FIELD_PREP(FBNIC_RXB_CT_SIZE_HEADER, 4) |
+ FIELD_PREP(FBNIC_RXB_CT_SIZE_PAYLOAD, 2));
+
+ /* The granularity for the packet buffer size is 2KB
+ * granularity while the packet buffer base address is
+ * only 64B granularity
+ */
+ wr32(fbd, FBNIC_RXB_PBUF_CFG(i),
+ FIELD_PREP(FBNIC_RXB_PBUF_BASE_ADDR,
+ fifo_config[i].addr) |
+ FIELD_PREP(FBNIC_RXB_PBUF_SIZE, size));
+
+ /* The granularity for the credits is 64B. This is
+ * based on RXB_PBUF_SIZE * 32 + 4.
+ */
+ wr32(fbd, FBNIC_RXB_PBUF_CREDIT(i),
+ FIELD_PREP(FBNIC_RXB_PBUF_CREDIT_MASK,
+ size ? size * 32 + 4 : 0));
+ }
+
+ if (!size)
+ continue;
+
+ /* Pause is size of FIFO with 56KB skid to start/stop */
+ wr32(fbd, FBNIC_RXB_PAUSE_THLD(i),
+ !(FBNIC_PAUSE_EN_MASK & (1u << i)) ? 0x1fff :
+ FIELD_PREP(FBNIC_RXB_PAUSE_THLD_ON,
+ size * 32 - 0x380) |
+ FIELD_PREP(FBNIC_RXB_PAUSE_THLD_OFF, 0x380));
+
+ /* Enable Drop when only one packet is left in the FIFO */
+ wr32(fbd, FBNIC_RXB_DROP_THLD(i),
+ !(FBNIC_DROP_EN_MASK & (1u << i)) ? 0x1fff :
+ FIELD_PREP(FBNIC_RXB_DROP_THLD_ON,
+ size * 32 -
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64) |
+ FIELD_PREP(FBNIC_RXB_DROP_THLD_OFF,
+ size * 32 -
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64));
+
+ /* Enable ECN bit when 1/4 of RXB is filled with at least
+ * 1 room for one full jumbo frame before setting ECN
+ */
+ wr32(fbd, FBNIC_RXB_ECN_THLD(i),
+ !(FBNIC_ECN_EN_MASK & (1u << i)) ? 0x1fff :
+ FIELD_PREP(FBNIC_RXB_ECN_THLD_ON,
+ max_t(unsigned int,
+ size * 32 / 4,
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64)) |
+ FIELD_PREP(FBNIC_RXB_ECN_THLD_OFF,
+ max_t(unsigned int,
+ size * 32 / 4,
+ FBNIC_MAX_JUMBO_FRAME_SIZE / 64)));
+ }
+
+ /* For now only enable drop and ECN. We need to add driver/kernel
+ * interfaces for configuring pause.
+ */
+ wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL,
+ FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE,
+ FBNIC_DROP_EN_MASK) |
+ FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE,
+ FBNIC_ECN_EN_MASK));
+
+ /* Program INTF credits */
+ wr32(fbd, FBNIC_RXB_INTF_CREDIT,
+ FBNIC_RXB_INTF_CREDIT_MASK0 |
+ FBNIC_RXB_INTF_CREDIT_MASK1 |
+ FBNIC_RXB_INTF_CREDIT_MASK2 |
+ FIELD_PREP(FBNIC_RXB_INTF_CREDIT_MASK3, 8));
+
+ /* Configure calendar slots.
+ * Rx: 0 - 62 RDE 1st, BMC 2nd
+ * 63 BMC 1st, RDE 2nd
+ */
+ for (i = 0; i < 16; i++) {
+ u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b;
+
+ wr32(fbd, FBNIC_RXB_CLDR_PRIO_CFG(i), calendar_val);
+ }
+
+ /* Split the credits for the DRR up as follows:
+ * Quantum0: 8000 Network to Host
+ * Quantum1: 0 Not used
+ * Quantum2: 80 BMC to Host
+ * Quantum3: 0 Not used
+ * Quantum4: 8000 Multicast to Host and BMC
+ */
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x40) |
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2, 0x50));
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x1f));
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x40));
+ wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT,
+ FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x1f));
+
+ /* Program RXB FCS Endian register */
+ wr32(fbd, FBNIC_RXB_ENDIAN_FCS, 0x0aaaaaa0);
+}
+
+static void fbnic_mac_init_txb(struct fbnic_dev *fbd)
+{
+ int i;
+
+ wr32(fbd, FBNIC_TCE_TXB_CTRL, 0);
+
+ /* Configure Tx QM Credits */
+ wr32(fbd, FBNIC_QM_TQS_CTL1,
+ FIELD_PREP(FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS, 0x40) |
+ FIELD_PREP(FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS, 0x20));
+
+ /* Initialize internal Tx queues */
+ wr32(fbd, FBNIC_TCE_TXB_TEI_Q0_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_TEI_Q1_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_MC_Q_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x400) |
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x000));
+ wr32(fbd, FBNIC_TCE_TXB_RX_TEI_Q_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_TX_BMC_Q_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) |
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x400));
+ wr32(fbd, FBNIC_TCE_TXB_RX_BMC_Q_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) |
+ FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x600));
+
+ wr32(fbd, FBNIC_TCE_LSO_CTRL,
+ FBNIC_TCE_LSO_CTRL_IPID_MODE_INC |
+ FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST, TCPHDR_PSH |
+ TCPHDR_FIN) |
+ FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID, TCPHDR_PSH |
+ TCPHDR_CWR |
+ TCPHDR_FIN) |
+ FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_END, TCPHDR_CWR));
+ wr32(fbd, FBNIC_TCE_CSO_CTRL, 0);
+
+ wr32(fbd, FBNIC_TCE_BMC_MAX_PKTSZ,
+ FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_TX,
+ FBNIC_MAX_JUMBO_FRAME_SIZE) |
+ FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_RX,
+ FBNIC_MAX_JUMBO_FRAME_SIZE));
+ wr32(fbd, FBNIC_TCE_MC_MAX_PKTSZ,
+ FIELD_PREP(FBNIC_TCE_MC_MAX_PKTSZ_TMI,
+ FBNIC_MAX_JUMBO_FRAME_SIZE));
+
+ /* Configure calendar slots.
+ * Tx: 0 - 62 TMI 1st, BMC 2nd
+ * 63 BMC 1st, TMI 2nd
+ */
+ for (i = 0; i < 16; i++) {
+ u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b;
+
+ wr32(fbd, FBNIC_TCE_TXB_CLDR_SLOT_CFG(i), calendar_val);
+ }
+
+ /* Configure DWRR */
+ wr32(fbd, FBNIC_TCE_TXB_ENQ_WRR_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0, 0x64) |
+ FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2, 0x04));
+ wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL, 0);
+ wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT, 0);
+ wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0, 0x50) |
+ FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1, 0x82));
+ wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT, 0);
+ wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL,
+ FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1, 0x50) |
+ FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x20));
+ wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT,
+ FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x03));
+
+ /* Configure SOP protocol protection */
+ wr32(fbd, FBNIC_TCE_SOP_PROT_CTRL,
+ FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TBI, 0x78) |
+ FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM, 0x40) |
+ FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_CM, 0x0c));
+
+ /* Conservative configuration on MAC interface Start of Packet
+ * protection FIFO. This sets the minimum depth of the FIFO before
+ * we start sending packets to the MAC measured in 64B units and
+ * up to 160 entries deep.
+ *
+ * For the ASIC the clock is fast enough that we will likely fill
+ * the SOP FIFO before the MAC can drain it. So just use a minimum
+ * value of 8.
+ */
+ wr32(fbd, FBNIC_TMI_SOP_PROT_CTRL, 8);
+
+ wrfl(fbd);
+ wr32(fbd, FBNIC_TCE_TXB_CTRL, FBNIC_TCE_TXB_CTRL_TCAM_ENABLE |
+ FBNIC_TCE_TXB_CTRL_LOAD);
+}
+
+static void fbnic_mac_init_regs(struct fbnic_dev *fbd)
+{
+ fbnic_mac_init_axi(fbd);
+ fbnic_mac_init_qm(fbd);
+ fbnic_mac_init_rxb(fbd);
+ fbnic_mac_init_txb(fbd);
+}
+
+static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause)
+{
+ u32 rxb_pause_ctrl;
+
+ /* Enable generation of pause frames if enabled */
+ rxb_pause_ctrl = rd32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL);
+ rxb_pause_ctrl &= ~FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE;
+ if (tx_pause)
+ rxb_pause_ctrl |=
+ FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE,
+ FBNIC_PAUSE_EN_MASK);
+ wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl);
+}
+
+static int fbnic_pcs_get_link_event_asic(struct fbnic_dev *fbd)
+{
+ u32 pcs_intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS);
+
+ if (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN)
+ return FBNIC_LINK_EVENT_DOWN;
+
+ return (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ?
+ FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE;
+}
+
+static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd,
+ bool tx_pause, bool rx_pause)
+{
+ /* Enable MAC Promiscuous mode and Tx padding */
+ u32 command_config = FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN |
+ FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN;
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+
+ /* Disable pause frames if not enabled */
+ if (!tx_pause)
+ command_config |= FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS;
+ if (!rx_pause)
+ command_config |= FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS;
+
+ /* Disable fault handling if no FEC is requested */
+ if ((fbn->fec & FBNIC_FEC_MODE_MASK) == FBNIC_FEC_OFF)
+ command_config |= FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS;
+
+ return command_config;
+}
+
+static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ u32 pcs_status, lane_mask = ~0;
+
+ pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0);
+ if (!(pcs_status & FBNIC_SIG_PCS_OUT0_LINK))
+ return false;
+
+ /* Define the expected lane mask for the status bits we need to check */
+ switch (fbn->link_mode & FBNIC_LINK_MODE_MASK) {
+ case FBNIC_LINK_100R2:
+ lane_mask = 0xf;
+ break;
+ case FBNIC_LINK_50R1:
+ lane_mask = 3;
+ break;
+ case FBNIC_LINK_50R2:
+ switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
+ case FBNIC_FEC_OFF:
+ lane_mask = 0x63;
+ break;
+ case FBNIC_FEC_RS:
+ lane_mask = 5;
+ break;
+ case FBNIC_FEC_BASER:
+ lane_mask = 0xf;
+ break;
+ }
+ break;
+ case FBNIC_LINK_25R1:
+ lane_mask = 1;
+ break;
+ }
+
+ /* Use an XOR to remove the bits we expect to see set */
+ switch (fbn->fec & FBNIC_FEC_MODE_MASK) {
+ case FBNIC_FEC_OFF:
+ lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK,
+ pcs_status);
+ break;
+ case FBNIC_FEC_RS:
+ lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_AMPS_LOCK,
+ pcs_status);
+ break;
+ case FBNIC_FEC_BASER:
+ lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT1_FCFEC_LOCK,
+ rd32(fbd, FBNIC_SIG_PCS_OUT1));
+ break;
+ }
+
+ /* If all lanes cancelled then we have a lock on all lanes */
+ return !lane_mask;
+}
+
+static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd)
+{
+ bool link;
+
+ /* Flush status bits to clear possible stale data,
+ * bits should reset themselves back to 1 if link is truly up
+ */
+ wr32(fbd, FBNIC_SIG_PCS_OUT0, FBNIC_SIG_PCS_OUT0_LINK |
+ FBNIC_SIG_PCS_OUT0_BLOCK_LOCK |
+ FBNIC_SIG_PCS_OUT0_AMPS_LOCK);
+ wr32(fbd, FBNIC_SIG_PCS_OUT1, FBNIC_SIG_PCS_OUT1_FCFEC_LOCK);
+ wrfl(fbd);
+
+ /* Clear interrupt state due to recent changes. */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_STS,
+ FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP);
+
+ link = fbnic_mac_get_pcs_link_status(fbd);
+
+ /* Enable interrupt to only capture changes in link state */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK,
+ ~FBNIC_SIG_PCS_INTR_LINK_DOWN & ~FBNIC_SIG_PCS_INTR_LINK_UP);
+ wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_PCS_MSIX_ENTRY);
+
+ return link;
+}
+
+static void fbnic_pcs_get_fw_settings(struct fbnic_dev *fbd)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+ u8 link_mode = fbn->link_mode;
+ u8 fec = fbn->fec;
+
+ /* Update FEC first to reflect FW current mode */
+ if (fbn->fec & FBNIC_FEC_AUTO) {
+ switch (fbd->fw_cap.link_fec) {
+ case FBNIC_FW_LINK_FEC_NONE:
+ fec = FBNIC_FEC_OFF;
+ break;
+ case FBNIC_FW_LINK_FEC_RS:
+ fec = FBNIC_FEC_RS;
+ break;
+ case FBNIC_FW_LINK_FEC_BASER:
+ fec = FBNIC_FEC_BASER;
+ break;
+ default:
+ return;
+ }
+
+ fbn->fec = fec;
+ }
+
+ /* Do nothing if AUTO mode is not engaged */
+ if (fbn->link_mode & FBNIC_LINK_AUTO) {
+ switch (fbd->fw_cap.link_speed) {
+ case FBNIC_FW_LINK_SPEED_25R1:
+ link_mode = FBNIC_LINK_25R1;
+ break;
+ case FBNIC_FW_LINK_SPEED_50R2:
+ link_mode = FBNIC_LINK_50R2;
+ break;
+ case FBNIC_FW_LINK_SPEED_50R1:
+ link_mode = FBNIC_LINK_50R1;
+ fec = FBNIC_FEC_RS;
+ break;
+ case FBNIC_FW_LINK_SPEED_100R2:
+ link_mode = FBNIC_LINK_100R2;
+ fec = FBNIC_FEC_RS;
+ break;
+ default:
+ return;
+ }
+
+ fbn->link_mode = link_mode;
+ }
+}
+
+static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd)
+{
+ /* Mask and clear the PCS interrupt, will be enabled by link handler */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
+ wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
+
+ /* Pull in settings from FW */
+ fbnic_pcs_get_fw_settings(fbd);
+
+ return 0;
+}
+
+static void fbnic_pcs_disable_asic(struct fbnic_dev *fbd)
+{
+ /* Mask and clear the PCS interrupt */
+ wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0);
+ wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0);
+}
+
+static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd)
+{
+ u32 cmd_cfg, mac_ctrl;
+
+ cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, false, false);
+ mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0);
+
+ mac_ctrl |= FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_RX_CLK;
+
+ wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl);
+ wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg);
+}
+
+static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd,
+ bool tx_pause, bool rx_pause)
+{
+ u32 cmd_cfg, mac_ctrl;
+
+ fbnic_mac_tx_pause_config(fbd, tx_pause);
+
+ cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, tx_pause, rx_pause);
+ mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0);
+
+ mac_ctrl &= ~(FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_TX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK |
+ FBNIC_SIG_MAC_IN0_RESET_RX_CLK);
+ cmd_cfg |= FBNIC_MAC_COMMAND_CONFIG_RX_ENA |
+ FBNIC_MAC_COMMAND_CONFIG_TX_ENA;
+
+ wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl);
+ wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg);
+}
+
+static const struct fbnic_mac fbnic_mac_asic = {
+ .init_regs = fbnic_mac_init_regs,
+ .pcs_enable = fbnic_pcs_enable_asic,
+ .pcs_disable = fbnic_pcs_disable_asic,
+ .pcs_get_link = fbnic_pcs_get_link_asic,
+ .pcs_get_link_event = fbnic_pcs_get_link_event_asic,
+ .link_down = fbnic_mac_link_down_asic,
+ .link_up = fbnic_mac_link_up_asic,
+};
+
+/**
+ * fbnic_mac_init - Assign a MAC type and initialize the fbnic device
+ * @fbd: Device pointer to device to initialize
+ *
+ * Return: zero on success, negative on failure
+ *
+ * Initialize the MAC function pointers and initializes the MAC of
+ * the device.
+ **/
+int fbnic_mac_init(struct fbnic_dev *fbd)
+{
+ fbd->mac = &fbnic_mac_asic;
+
+ fbd->mac->init_regs(fbd);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
new file mode 100644
index 000000000000..f53be6e6aef9
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_MAC_H_
+#define _FBNIC_MAC_H_
+
+#include <linux/types.h>
+
+struct fbnic_dev;
+
+#define FBNIC_MAX_JUMBO_FRAME_SIZE 9742
+
+enum {
+ FBNIC_LINK_EVENT_NONE = 0,
+ FBNIC_LINK_EVENT_UP = 1,
+ FBNIC_LINK_EVENT_DOWN = 2,
+};
+
+/* Treat the FEC bits as a bitmask laid out as follows:
+ * Bit 0: RS Enabled
+ * Bit 1: BASER(Firecode) Enabled
+ * Bit 2: Retrieve FEC from FW
+ */
+enum {
+ FBNIC_FEC_OFF = 0,
+ FBNIC_FEC_RS = 1,
+ FBNIC_FEC_BASER = 2,
+ FBNIC_FEC_AUTO = 4,
+};
+
+#define FBNIC_FEC_MODE_MASK (FBNIC_FEC_AUTO - 1)
+
+/* Treat the link modes as a set of modulation/lanes bitmask:
+ * Bit 0: Lane Count, 0 = R1, 1 = R2
+ * Bit 1: Modulation, 0 = NRZ, 1 = PAM4
+ * Bit 2: Retrieve link mode from FW
+ */
+enum {
+ FBNIC_LINK_25R1 = 0,
+ FBNIC_LINK_50R2 = 1,
+ FBNIC_LINK_50R1 = 2,
+ FBNIC_LINK_100R2 = 3,
+ FBNIC_LINK_AUTO = 4,
+};
+
+#define FBNIC_LINK_MODE_R2 (FBNIC_LINK_50R2)
+#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1)
+#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1)
+
+/* This structure defines the interface hooks for the MAC. The MAC hooks
+ * will be configured as a const struct provided with a set of function
+ * pointers.
+ *
+ * void (*init_regs)(struct fbnic_dev *fbd);
+ * Initialize MAC registers to enable Tx/Rx paths and FIFOs.
+ *
+ * void (*pcs_enable)(struct fbnic_dev *fbd);
+ * Configure and enable PCS to enable link if not already enabled
+ * void (*pcs_disable)(struct fbnic_dev *fbd);
+ * Shutdown the link if we are the only consumer of it.
+ * bool (*pcs_get_link)(struct fbnic_dev *fbd);
+ * Check PCS link status
+ * int (*pcs_get_link_event)(struct fbnic_dev *fbd)
+ * Get the current link event status, reports true if link has
+ * changed to either FBNIC_LINK_EVENT_DOWN or FBNIC_LINK_EVENT_UP
+ *
+ * void (*link_down)(struct fbnic_dev *fbd);
+ * Configure MAC for link down event
+ * void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
+ * Configure MAC for link up event;
+ *
+ */
+struct fbnic_mac {
+ void (*init_regs)(struct fbnic_dev *fbd);
+
+ int (*pcs_enable)(struct fbnic_dev *fbd);
+ void (*pcs_disable)(struct fbnic_dev *fbd);
+ bool (*pcs_get_link)(struct fbnic_dev *fbd);
+ int (*pcs_get_link_event)(struct fbnic_dev *fbd);
+
+ void (*link_down)(struct fbnic_dev *fbd);
+ void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause);
+};
+
+int fbnic_mac_init(struct fbnic_dev *fbd);
+#endif /* _FBNIC_MAC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
new file mode 100644
index 000000000000..b7ce6da68543
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/etherdevice.h>
+#include <linux/ipv6.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_txrx.h"
+
+int __fbnic_open(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ int err;
+
+ err = fbnic_alloc_napi_vectors(fbn);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_resources(fbn);
+ if (err)
+ goto free_napi_vectors;
+
+ err = netif_set_real_num_tx_queues(fbn->netdev,
+ fbn->num_tx_queues);
+ if (err)
+ goto free_resources;
+
+ err = netif_set_real_num_rx_queues(fbn->netdev,
+ fbn->num_rx_queues);
+ if (err)
+ goto free_resources;
+
+ /* Send ownership message and flush to verify FW has seen it */
+ err = fbnic_fw_xmit_ownership_msg(fbd, true);
+ if (err) {
+ dev_warn(fbd->dev,
+ "Error %d sending host ownership message to the firmware\n",
+ err);
+ goto free_resources;
+ }
+
+ err = fbnic_fw_init_heartbeat(fbd, false);
+ if (err)
+ goto release_ownership;
+
+ err = fbnic_pcs_irq_enable(fbd);
+ if (err)
+ goto release_ownership;
+ /* Pull the BMC config and initialize the RPC */
+ fbnic_bmc_rpc_init(fbd);
+ fbnic_rss_reinit(fbd, fbn);
+
+ return 0;
+release_ownership:
+ fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+free_resources:
+ fbnic_free_resources(fbn);
+free_napi_vectors:
+ fbnic_free_napi_vectors(fbn);
+ return err;
+}
+
+static int fbnic_open(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ int err;
+
+ err = __fbnic_open(fbn);
+ if (!err)
+ fbnic_up(fbn);
+
+ return err;
+}
+
+static int fbnic_stop(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ fbnic_down(fbn);
+ fbnic_pcs_irq_disable(fbn->fbd);
+
+ fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
+
+ fbnic_free_resources(fbn);
+ fbnic_free_napi_vectors(fbn);
+
+ return 0;
+}
+
+static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_addr *avail_addr;
+
+ if (WARN_ON(!is_valid_ether_addr(addr)))
+ return -EADDRNOTAVAIL;
+
+ avail_addr = __fbnic_uc_sync(fbn->fbd, addr);
+ if (!avail_addr)
+ return -ENOSPC;
+
+ /* Add type flag indicating this address is in use by the host */
+ set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam);
+
+ return 0;
+}
+
+static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, ret;
+
+ /* Scan from middle of list to bottom, filling bottom up.
+ * Skip the first entry which is reserved for dev_addr and
+ * leave the last entry to use for promiscuous filtering.
+ */
+ for (i = fbd->mac_addr_boundary, ret = -ENOENT;
+ i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (!ether_addr_equal(mac_addr->value.addr8, addr))
+ continue;
+
+ ret = __fbnic_uc_unsync(mac_addr);
+ }
+
+ return ret;
+}
+
+static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_mac_addr *avail_addr;
+
+ if (WARN_ON(!is_multicast_ether_addr(addr)))
+ return -EADDRNOTAVAIL;
+
+ avail_addr = __fbnic_mc_sync(fbn->fbd, addr);
+ if (!avail_addr)
+ return -ENOSPC;
+
+ /* Add type flag indicating this address is in use by the host */
+ set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam);
+
+ return 0;
+}
+
+static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int i, ret;
+
+ /* Scan from middle of list to top, filling top down.
+ * Skip over the address reserved for the BMC MAC and
+ * exclude index 0 as that belongs to the broadcast address
+ */
+ for (i = fbd->mac_addr_boundary, ret = -ENOENT;
+ --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (!ether_addr_equal(mac_addr->value.addr8, addr))
+ continue;
+
+ ret = __fbnic_mc_unsync(mac_addr);
+ }
+
+ return ret;
+}
+
+void __fbnic_set_rx_mode(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ bool uc_promisc = false, mc_promisc = false;
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_mac_addr *mac_addr;
+ int err;
+
+ /* Populate host address from dev_addr */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX];
+ if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr);
+ mac_addr->state = FBNIC_TCAM_S_UPDATE;
+ set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam);
+ }
+
+ /* Populate broadcast address if broadcast is enabled */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
+ if (netdev->flags & IFF_BROADCAST) {
+ if (!is_broadcast_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_broadcast_addr(mac_addr->value.addr8);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam);
+ } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST);
+ }
+
+ /* Synchronize unicast and multicast address lists */
+ err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync);
+ if (err == -ENOSPC)
+ uc_promisc = true;
+ err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync);
+ if (err == -ENOSPC)
+ mc_promisc = true;
+
+ uc_promisc |= !!(netdev->flags & IFF_PROMISC);
+ mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
+
+ /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
+ if (uc_promisc) {
+ if (!is_zero_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ set_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mc_promisc &&
+ (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
+ /* We have to add a special handler for multicast as the
+ * BMC may have an all-multi rule already in place. As such
+ * adding a rule ourselves won't do any good so we will have
+ * to modify the rules for the ALL MULTI below if the BMC
+ * already has the rule in place.
+ */
+ if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
+ mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ mac_addr->value.addr8[0] ^= 1;
+ mac_addr->mask.addr8[0] ^= 1;
+ set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
+ if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
+ mac_addr->act_tcam);
+ } else {
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ }
+ }
+
+ /* Add rules for BMC all multicast if it is enabled */
+ fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
+
+ /* Sift out any unshared BMC rules and place them in BMC only section */
+ fbnic_sift_macda(fbd);
+
+ /* Write updates to hardware */
+ fbnic_write_rules(fbd);
+ fbnic_write_macda(fbd);
+}
+
+static void fbnic_set_rx_mode(struct net_device *netdev)
+{
+ /* No need to update the hardware if we are not running */
+ if (netif_running(netdev))
+ __fbnic_set_rx_mode(netdev);
+}
+
+static int fbnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(netdev, addr->sa_data);
+
+ fbnic_set_rx_mode(netdev);
+
+ return 0;
+}
+
+void fbnic_clear_rx_mode(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ if (mac_addr->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ bitmap_clear(mac_addr->act_tcam,
+ FBNIC_MAC_ADDR_T_HOST_START,
+ FBNIC_MAC_ADDR_T_HOST_LEN);
+
+ if (bitmap_empty(mac_addr->act_tcam,
+ FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ }
+
+ /* Write updates to hardware */
+ fbnic_write_macda(fbd);
+
+ __dev_uc_unsync(netdev, NULL);
+ __dev_mc_unsync(netdev, NULL);
+}
+
+static const struct net_device_ops fbnic_netdev_ops = {
+ .ndo_open = fbnic_open,
+ .ndo_stop = fbnic_stop,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_start_xmit = fbnic_xmit_frame,
+ .ndo_features_check = fbnic_features_check,
+ .ndo_set_mac_address = fbnic_set_mac,
+ .ndo_set_rx_mode = fbnic_set_rx_mode,
+};
+
+void fbnic_reset_queues(struct fbnic_net *fbn,
+ unsigned int tx, unsigned int rx)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ unsigned int max_napis;
+
+ max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
+
+ tx = min(tx, max_napis);
+ fbn->num_tx_queues = tx;
+
+ rx = min(rx, max_napis);
+ fbn->num_rx_queues = rx;
+
+ fbn->num_napi = max(tx, rx);
+}
+
+/**
+ * fbnic_netdev_free - Free the netdev associate with fbnic
+ * @fbd: Driver specific structure to free netdev from
+ *
+ * Allocate and initialize the netdev and netdev private structure. Bind
+ * together the hardware, netdev, and pci data structures.
+ **/
+void fbnic_netdev_free(struct fbnic_dev *fbd)
+{
+ struct fbnic_net *fbn = netdev_priv(fbd->netdev);
+
+ if (fbn->phylink)
+ phylink_destroy(fbn->phylink);
+
+ free_netdev(fbd->netdev);
+ fbd->netdev = NULL;
+}
+
+/**
+ * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic
+ * @fbd: Driver specific structure to associate netdev with
+ *
+ * Allocate and initialize the netdev and netdev private structure. Bind
+ * together the hardware, netdev, and pci data structures.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
+{
+ struct net_device *netdev;
+ struct fbnic_net *fbn;
+ int default_queues;
+
+ netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS);
+ if (!netdev)
+ return NULL;
+
+ SET_NETDEV_DEV(netdev, fbd->dev);
+ fbd->netdev = netdev;
+
+ netdev->netdev_ops = &fbnic_netdev_ops;
+
+ fbn = netdev_priv(netdev);
+
+ fbn->netdev = netdev;
+ fbn->fbd = fbd;
+ INIT_LIST_HEAD(&fbn->napis);
+
+ fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
+ fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
+ fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
+ fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
+
+ default_queues = netif_get_num_default_rss_queues();
+ if (default_queues > fbd->max_num_queues)
+ default_queues = fbd->max_num_queues;
+
+ fbnic_reset_queues(fbn, default_queues, default_queues);
+
+ fbnic_reset_indir_tbl(fbn);
+ fbnic_rss_key_fill(fbn->rss_key);
+ fbnic_rss_init_en_mask(fbn);
+
+ netdev->features |=
+ NETIF_F_RXHASH |
+ NETIF_F_SG |
+ NETIF_F_HW_CSUM |
+ NETIF_F_RXCSUM;
+
+ netdev->hw_features |= netdev->features;
+ netdev->vlan_features |= netdev->features;
+ netdev->hw_enc_features |= netdev->features;
+
+ netdev->min_mtu = IPV6_MIN_MTU;
+ netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
+
+ /* TBD: This is workaround for BMC as phylink doesn't have support
+ * for leavling the link enabled if a BMC is present.
+ */
+ netdev->ethtool->wol_enabled = true;
+
+ fbn->fec = FBNIC_FEC_AUTO | FBNIC_FEC_RS;
+ fbn->link_mode = FBNIC_LINK_AUTO | FBNIC_LINK_50R2;
+ netif_carrier_off(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+
+ if (fbnic_phylink_init(netdev)) {
+ fbnic_netdev_free(fbd);
+ return NULL;
+ }
+
+ return netdev;
+}
+
+static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr)
+{
+ addr[0] = (dsn >> 56) & 0xFF;
+ addr[1] = (dsn >> 48) & 0xFF;
+ addr[2] = (dsn >> 40) & 0xFF;
+ addr[3] = (dsn >> 16) & 0xFF;
+ addr[4] = (dsn >> 8) & 0xFF;
+ addr[5] = dsn & 0xFF;
+
+ return is_valid_ether_addr(addr) ? 0 : -EINVAL;
+}
+
+/**
+ * fbnic_netdev_register - Initialize general software structures
+ * @netdev: Netdev containing structure to initialize and register
+ *
+ * Initialize the MAC address for the netdev and register it.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+int fbnic_netdev_register(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+ u64 dsn = fbd->dsn;
+ u8 addr[ETH_ALEN];
+ int err;
+
+ err = fbnic_dsn_to_mac_addr(dsn, addr);
+ if (!err) {
+ ether_addr_copy(netdev->perm_addr, addr);
+ eth_hw_addr_set(netdev, addr);
+ } else {
+ /* A randomly assigned MAC address will cause provisioning
+ * issues so instead just fail to spawn the netdev and
+ * avoid any confusion.
+ */
+ dev_err(fbd->dev, "MAC addr %pM invalid\n", addr);
+ return err;
+ }
+
+ return register_netdev(netdev);
+}
+
+void fbnic_netdev_unregister(struct net_device *netdev)
+{
+ unregister_netdev(netdev);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
new file mode 100644
index 000000000000..6bc0ebeb8182
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_NETDEV_H_
+#define _FBNIC_NETDEV_H_
+
+#include <linux/types.h>
+#include <linux/phylink.h>
+
+#include "fbnic_csr.h"
+#include "fbnic_rpc.h"
+#include "fbnic_txrx.h"
+
+struct fbnic_net {
+ struct fbnic_ring *tx[FBNIC_MAX_TXQS];
+ struct fbnic_ring *rx[FBNIC_MAX_RXQS];
+
+ struct net_device *netdev;
+ struct fbnic_dev *fbd;
+
+ u32 txq_size;
+ u32 hpq_size;
+ u32 ppq_size;
+ u32 rcq_size;
+
+ u16 num_napi;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct phylink_pcs phylink_pcs;
+
+ /* TBD: Remove these when phylink supports FEC and lane config */
+ u8 fec;
+ u8 link_mode;
+
+ u16 num_tx_queues;
+ u16 num_rx_queues;
+
+ u8 indir_tbl[FBNIC_RPC_RSS_TBL_COUNT][FBNIC_RPC_RSS_TBL_SIZE];
+ u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
+ u32 rss_flow_hash[FBNIC_NUM_HASH_OPT];
+
+ u64 link_down_events;
+
+ struct list_head napis;
+};
+
+int __fbnic_open(struct fbnic_net *fbn);
+void fbnic_up(struct fbnic_net *fbn);
+void fbnic_down(struct fbnic_net *fbn);
+
+struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd);
+void fbnic_netdev_free(struct fbnic_dev *fbd);
+int fbnic_netdev_register(struct net_device *netdev);
+void fbnic_netdev_unregister(struct net_device *netdev);
+void fbnic_reset_queues(struct fbnic_net *fbn,
+ unsigned int tx, unsigned int rx);
+
+void __fbnic_set_rx_mode(struct net_device *netdev);
+void fbnic_clear_rx_mode(struct net_device *netdev);
+
+int fbnic_phylink_init(struct net_device *netdev);
+#endif /* _FBNIC_NETDEV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
new file mode 100644
index 000000000000..a4809fe0fc24
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c
@@ -0,0 +1,564 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/rtnetlink.h>
+#include <linux/types.h>
+
+#include "fbnic.h"
+#include "fbnic_drvinfo.h"
+#include "fbnic_netdev.h"
+
+char fbnic_driver_name[] = DRV_NAME;
+
+MODULE_DESCRIPTION(DRV_SUMMARY);
+MODULE_LICENSE("GPL");
+
+static const struct fbnic_info fbnic_asic_info = {
+ .max_num_queues = FBNIC_MAX_QUEUES,
+ .bar_mask = BIT(0) | BIT(4)
+};
+
+static const struct fbnic_info *fbnic_info_tbl[] = {
+ [fbnic_board_asic] = &fbnic_asic_info,
+};
+
+static const struct pci_device_id fbnic_pci_tbl[] = {
+ { PCI_DEVICE_DATA(META, FBNIC_ASIC, fbnic_board_asic) },
+ /* Required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, fbnic_pci_tbl);
+
+u32 fbnic_rd32(struct fbnic_dev *fbd, u32 reg)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr0);
+ u32 value;
+
+ if (!csr)
+ return ~0U;
+
+ value = readl(csr + reg);
+
+ /* If any bits are 0 value should be valid */
+ if (~value)
+ return value;
+
+ /* All 1's may be valid if ZEROs register still works */
+ if (reg != FBNIC_MASTER_SPARE_0 && ~readl(csr + FBNIC_MASTER_SPARE_0))
+ return value;
+
+ /* Hardware is giving us all 1's reads, assume it is gone */
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+
+ dev_err(fbd->dev,
+ "Failed read (idx 0x%x AKA addr 0x%x), disabled CSR access, awaiting reset\n",
+ reg, reg << 2);
+
+ /* Notify stack that device has lost (PCIe) link */
+ if (!fbnic_init_failure(fbd))
+ netif_device_detach(fbd->netdev);
+
+ return ~0U;
+}
+
+bool fbnic_fw_present(struct fbnic_dev *fbd)
+{
+ return !!READ_ONCE(fbd->uc_addr4);
+}
+
+void fbnic_fw_wr32(struct fbnic_dev *fbd, u32 reg, u32 val)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr4);
+
+ if (csr)
+ writel(val, csr + reg);
+}
+
+u32 fbnic_fw_rd32(struct fbnic_dev *fbd, u32 reg)
+{
+ u32 __iomem *csr = READ_ONCE(fbd->uc_addr4);
+ u32 value;
+
+ if (!csr)
+ return ~0U;
+
+ value = readl(csr + reg);
+
+ /* If any bits are 0 value should be valid */
+ if (~value)
+ return value;
+
+ /* All 1's may be valid if ZEROs register still works */
+ if (reg != FBNIC_FW_ZERO_REG && ~readl(csr + FBNIC_FW_ZERO_REG))
+ return value;
+
+ /* Hardware is giving us all 1's reads, assume it is gone */
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+
+ dev_err(fbd->dev,
+ "Failed read (idx 0x%x AKA addr 0x%x), disabled CSR access, awaiting reset\n",
+ reg, reg << 2);
+
+ /* Notify stack that device has lost (PCIe) link */
+ if (!fbnic_init_failure(fbd))
+ netif_device_detach(fbd->netdev);
+
+ return ~0U;
+}
+
+static void fbnic_service_task_start(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ schedule_delayed_work(&fbd->service_task, HZ);
+ phylink_resume(fbn->phylink);
+}
+
+static void fbnic_service_task_stop(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ phylink_suspend(fbn->phylink, fbnic_bmc_present(fbd));
+ cancel_delayed_work(&fbd->service_task);
+}
+
+void fbnic_up(struct fbnic_net *fbn)
+{
+ fbnic_enable(fbn);
+
+ fbnic_fill(fbn);
+
+ fbnic_rss_reinit_hw(fbn->fbd, fbn);
+
+ __fbnic_set_rx_mode(fbn->netdev);
+
+ /* Enable Tx/Rx processing */
+ fbnic_napi_enable(fbn);
+ netif_tx_start_all_queues(fbn->netdev);
+
+ fbnic_service_task_start(fbn);
+}
+
+static void fbnic_down_noidle(struct fbnic_net *fbn)
+{
+ fbnic_service_task_stop(fbn);
+
+ /* Disable Tx/Rx Processing */
+ fbnic_napi_disable(fbn);
+ netif_tx_disable(fbn->netdev);
+
+ fbnic_clear_rx_mode(fbn->netdev);
+ fbnic_clear_rules(fbn->fbd);
+ fbnic_rss_disable_hw(fbn->fbd);
+ fbnic_disable(fbn);
+}
+
+void fbnic_down(struct fbnic_net *fbn)
+{
+ fbnic_down_noidle(fbn);
+
+ fbnic_wait_all_queues_idle(fbn->fbd, false);
+
+ fbnic_flush(fbn);
+}
+
+static void fbnic_health_check(struct fbnic_dev *fbd)
+{
+ struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
+
+ /* As long as the heart is beating the FW is healty */
+ if (fbd->fw_heartbeat_enabled)
+ return;
+
+ /* If the Tx mailbox still has messages sitting in it then there likely
+ * isn't anything we can do. We will wait until the mailbox is empty to
+ * report the fault so we can collect the crashlog.
+ */
+ if (tx_mbx->head != tx_mbx->tail)
+ return;
+
+ /* TBD: Need to add a more thorough recovery here.
+ * Specifically I need to verify what all the firmware will have
+ * changed since we had setup and it rebooted. May just need to
+ * perform a down/up. For now we will just reclaim ownership so
+ * the heartbeat can catch the next fault.
+ */
+ fbnic_fw_xmit_ownership_msg(fbd, true);
+}
+
+static void fbnic_service_task(struct work_struct *work)
+{
+ struct fbnic_dev *fbd = container_of(to_delayed_work(work),
+ struct fbnic_dev, service_task);
+
+ rtnl_lock();
+
+ fbnic_fw_check_heartbeat(fbd);
+
+ fbnic_health_check(fbd);
+
+ if (netif_carrier_ok(fbd->netdev))
+ fbnic_napi_depletion_check(fbd->netdev);
+
+ if (netif_running(fbd->netdev))
+ schedule_delayed_work(&fbd->service_task, HZ);
+
+ rtnl_unlock();
+}
+
+/**
+ * fbnic_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in fbnic_pci_tbl
+ *
+ * Initializes a PCI device identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ const struct fbnic_info *info = fbnic_info_tbl[ent->driver_data];
+ struct net_device *netdev;
+ struct fbnic_dev *fbd;
+ int err;
+
+ if (pdev->error_state != pci_channel_io_normal) {
+ dev_err(&pdev->dev,
+ "PCI device still in an error state. Unable to load...\n");
+ return -EIO;
+ }
+
+ err = pcim_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "PCI enable device failed: %d\n", err);
+ return err;
+ }
+
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46));
+ if (err)
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "DMA configuration failed: %d\n", err);
+ return err;
+ }
+
+ err = pcim_iomap_regions(pdev, info->bar_mask, fbnic_driver_name);
+ if (err) {
+ dev_err(&pdev->dev,
+ "pci_request_selected_regions failed: %d\n", err);
+ return err;
+ }
+
+ fbd = fbnic_devlink_alloc(pdev);
+ if (!fbd) {
+ dev_err(&pdev->dev, "Devlink allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* Populate driver with hardware-specific info and handlers */
+ fbd->max_num_queues = info->max_num_queues;
+
+ pci_set_master(pdev);
+ pci_save_state(pdev);
+
+ INIT_DELAYED_WORK(&fbd->service_task, fbnic_service_task);
+
+ err = fbnic_alloc_irqs(fbd);
+ if (err)
+ goto free_fbd;
+
+ err = fbnic_mac_init(fbd);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize MAC: %d\n", err);
+ goto free_irqs;
+ }
+
+ err = fbnic_fw_enable_mbx(fbd);
+ if (err) {
+ dev_err(&pdev->dev,
+ "Firmware mailbox initialization failure\n");
+ goto free_irqs;
+ }
+
+ fbnic_devlink_register(fbd);
+
+ if (!fbd->dsn) {
+ dev_warn(&pdev->dev, "Reading serial number failed\n");
+ goto init_failure_mode;
+ }
+
+ netdev = fbnic_netdev_alloc(fbd);
+ if (!netdev) {
+ dev_err(&pdev->dev, "Netdev allocation failed\n");
+ goto init_failure_mode;
+ }
+
+ err = fbnic_netdev_register(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "Netdev registration failed: %d\n", err);
+ goto ifm_free_netdev;
+ }
+
+ return 0;
+
+ifm_free_netdev:
+ fbnic_netdev_free(fbd);
+init_failure_mode:
+ dev_warn(&pdev->dev, "Probe error encountered, entering init failure mode. Normal networking functionality will not be available.\n");
+ /* Always return 0 even on error so devlink is registered to allow
+ * firmware updates for fixes.
+ */
+ return 0;
+free_irqs:
+ fbnic_free_irqs(fbd);
+free_fbd:
+ pci_disable_device(pdev);
+ fbnic_devlink_free(fbd);
+
+ return err;
+}
+
+/**
+ * fbnic_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * Called by the PCI subsystem to alert the driver that it should release
+ * a PCI device. The could be caused by a Hot-Plug event, or because the
+ * driver is going to be removed from memory.
+ **/
+static void fbnic_remove(struct pci_dev *pdev)
+{
+ struct fbnic_dev *fbd = pci_get_drvdata(pdev);
+
+ if (!fbnic_init_failure(fbd)) {
+ struct net_device *netdev = fbd->netdev;
+
+ fbnic_netdev_unregister(netdev);
+ cancel_delayed_work_sync(&fbd->service_task);
+ fbnic_netdev_free(fbd);
+ }
+
+ fbnic_devlink_unregister(fbd);
+ fbnic_fw_disable_mbx(fbd);
+ fbnic_free_irqs(fbd);
+
+ pci_disable_device(pdev);
+ fbnic_devlink_free(fbd);
+}
+
+static int fbnic_pm_suspend(struct device *dev)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ struct net_device *netdev = fbd->netdev;
+
+ if (fbnic_init_failure(fbd))
+ goto null_uc_addr;
+
+ rtnl_lock();
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ netdev->netdev_ops->ndo_stop(netdev);
+
+ rtnl_unlock();
+
+null_uc_addr:
+ fbnic_fw_disable_mbx(fbd);
+
+ /* Free the IRQs so they aren't trying to occupy sleeping CPUs */
+ fbnic_free_irqs(fbd);
+
+ /* Hardware is about to go away, so switch off MMIO access internally */
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+
+ return 0;
+}
+
+static int __fbnic_pm_resume(struct device *dev)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ struct net_device *netdev = fbd->netdev;
+ void __iomem * const *iomap_table;
+ struct fbnic_net *fbn;
+ int err;
+
+ /* Restore MMIO access */
+ iomap_table = pcim_iomap_table(to_pci_dev(dev));
+ fbd->uc_addr0 = iomap_table[0];
+ fbd->uc_addr4 = iomap_table[4];
+
+ /* Rerequest the IRQs */
+ err = fbnic_alloc_irqs(fbd);
+ if (err)
+ goto err_invalidate_uc_addr;
+
+ fbd->mac->init_regs(fbd);
+
+ /* Re-enable mailbox */
+ err = fbnic_fw_enable_mbx(fbd);
+ if (err)
+ goto err_free_irqs;
+
+ /* No netdev means there isn't a network interface to bring up */
+ if (fbnic_init_failure(fbd))
+ return 0;
+
+ fbn = netdev_priv(netdev);
+
+ /* Reset the queues if needed */
+ fbnic_reset_queues(fbn, fbn->num_tx_queues, fbn->num_rx_queues);
+
+ rtnl_lock();
+
+ if (netif_running(netdev)) {
+ err = __fbnic_open(fbn);
+ if (err)
+ goto err_disable_mbx;
+ }
+
+ rtnl_unlock();
+
+ return 0;
+err_disable_mbx:
+ rtnl_unlock();
+ fbnic_fw_disable_mbx(fbd);
+err_free_irqs:
+ fbnic_free_irqs(fbd);
+err_invalidate_uc_addr:
+ WRITE_ONCE(fbd->uc_addr0, NULL);
+ WRITE_ONCE(fbd->uc_addr4, NULL);
+ return err;
+}
+
+static void __fbnic_pm_attach(struct device *dev)
+{
+ struct fbnic_dev *fbd = dev_get_drvdata(dev);
+ struct net_device *netdev = fbd->netdev;
+ struct fbnic_net *fbn;
+
+ if (fbnic_init_failure(fbd))
+ return;
+
+ fbn = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ fbnic_up(fbn);
+
+ netif_device_attach(netdev);
+}
+
+static int __maybe_unused fbnic_pm_resume(struct device *dev)
+{
+ int err;
+
+ err = __fbnic_pm_resume(dev);
+ if (!err)
+ __fbnic_pm_attach(dev);
+
+ return err;
+}
+
+static const struct dev_pm_ops fbnic_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fbnic_pm_suspend, fbnic_pm_resume)
+};
+
+static void fbnic_shutdown(struct pci_dev *pdev)
+{
+ fbnic_pm_suspend(&pdev->dev);
+}
+
+static pci_ers_result_t fbnic_err_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ /* Disconnect device if failure is not recoverable via reset */
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ fbnic_pm_suspend(&pdev->dev);
+
+ /* Request a slot reset */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t fbnic_err_slot_reset(struct pci_dev *pdev)
+{
+ int err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+
+ if (pci_enable_device_mem(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ /* Restore device to previous state */
+ err = __fbnic_pm_resume(&pdev->dev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static void fbnic_err_resume(struct pci_dev *pdev)
+{
+ __fbnic_pm_attach(&pdev->dev);
+}
+
+static const struct pci_error_handlers fbnic_err_handler = {
+ .error_detected = fbnic_err_error_detected,
+ .slot_reset = fbnic_err_slot_reset,
+ .resume = fbnic_err_resume,
+};
+
+static struct pci_driver fbnic_driver = {
+ .name = fbnic_driver_name,
+ .id_table = fbnic_pci_tbl,
+ .probe = fbnic_probe,
+ .remove = fbnic_remove,
+ .driver.pm = &fbnic_pm_ops,
+ .shutdown = fbnic_shutdown,
+ .err_handler = &fbnic_err_handler,
+};
+
+/**
+ * fbnic_init_module - Driver Registration Routine
+ *
+ * The first routine called when the driver is loaded. All it does is
+ * register with the PCI subsystem.
+ *
+ * Return: 0 on success, negative on failure
+ **/
+static int __init fbnic_init_module(void)
+{
+ int err;
+
+ err = pci_register_driver(&fbnic_driver);
+ if (err)
+ goto out;
+
+ pr_info(DRV_SUMMARY " (%s)", fbnic_driver.name);
+out:
+ return err;
+}
+module_init(fbnic_init_module);
+
+/**
+ * fbnic_exit_module - Driver Exit Cleanup Routine
+ *
+ * Called just before the driver is removed from memory.
+ **/
+static void __exit fbnic_exit_module(void)
+{
+ pci_unregister_driver(&fbnic_driver);
+}
+module_exit(fbnic_exit_module);
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
new file mode 100644
index 000000000000..1a5e1e719b30
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/phy.h>
+#include <linux/phylink.h>
+
+#include "fbnic.h"
+#include "fbnic_mac.h"
+#include "fbnic_netdev.h"
+
+static struct fbnic_net *
+fbnic_pcs_to_net(struct phylink_pcs *pcs)
+{
+ return container_of(pcs, struct fbnic_net, phylink_pcs);
+}
+
+static void
+fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs,
+ struct phylink_link_state *state)
+{
+ struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ /* For now we use hard-coded defaults and FW config to determine
+ * the current values. In future patches we will add support for
+ * reconfiguring these values and changing link settings.
+ */
+ switch (fbd->fw_cap.link_speed) {
+ case FBNIC_FW_LINK_SPEED_25R1:
+ state->speed = SPEED_25000;
+ break;
+ case FBNIC_FW_LINK_SPEED_50R2:
+ state->speed = SPEED_50000;
+ break;
+ case FBNIC_FW_LINK_SPEED_100R2:
+ state->speed = SPEED_100000;
+ break;
+ default:
+ state->speed = SPEED_UNKNOWN;
+ break;
+ }
+
+ state->duplex = DUPLEX_FULL;
+
+ state->link = fbd->mac->pcs_get_link(fbd);
+}
+
+static int
+fbnic_phylink_pcs_enable(struct phylink_pcs *pcs)
+{
+ struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ return fbd->mac->pcs_enable(fbd);
+}
+
+static void
+fbnic_phylink_pcs_disable(struct phylink_pcs *pcs)
+{
+ struct fbnic_net *fbn = fbnic_pcs_to_net(pcs);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ return fbd->mac->pcs_disable(fbd);
+}
+
+static int
+fbnic_phylink_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
+ phy_interface_t interface,
+ const unsigned long *advertising,
+ bool permit_pause_to_mac)
+{
+ return 0;
+}
+
+static const struct phylink_pcs_ops fbnic_phylink_pcs_ops = {
+ .pcs_config = fbnic_phylink_pcs_config,
+ .pcs_enable = fbnic_phylink_pcs_enable,
+ .pcs_disable = fbnic_phylink_pcs_disable,
+ .pcs_get_state = fbnic_phylink_pcs_get_state,
+};
+
+static struct phylink_pcs *
+fbnic_phylink_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+
+ return &fbn->phylink_pcs;
+}
+
+static void
+fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static void
+fbnic_phylink_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbd->mac->link_down(fbd);
+
+ fbn->link_down_events++;
+}
+
+static void
+fbnic_phylink_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct net_device *netdev = to_net_dev(config->dev);
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct fbnic_dev *fbd = fbn->fbd;
+
+ fbd->mac->link_up(fbd, tx_pause, rx_pause);
+}
+
+static const struct phylink_mac_ops fbnic_phylink_mac_ops = {
+ .mac_select_pcs = fbnic_phylink_mac_select_pcs,
+ .mac_config = fbnic_phylink_mac_config,
+ .mac_link_down = fbnic_phylink_mac_link_down,
+ .mac_link_up = fbnic_phylink_mac_link_up,
+};
+
+int fbnic_phylink_init(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ struct phylink *phylink;
+
+ fbn->phylink_pcs.neg_mode = true;
+ fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops;
+
+ fbn->phylink_config.dev = &netdev->dev;
+ fbn->phylink_config.type = PHYLINK_NETDEV;
+ fbn->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
+ MAC_10000FD | MAC_25000FD |
+ MAC_40000FD | MAC_50000FD |
+ MAC_100000FD;
+ fbn->phylink_config.default_an_inband = true;
+
+ __set_bit(PHY_INTERFACE_MODE_XGMII,
+ fbn->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_XLGMII,
+ fbn->phylink_config.supported_interfaces);
+
+ phylink = phylink_create(&fbn->phylink_config, NULL,
+ PHY_INTERFACE_MODE_XLGMII,
+ &fbnic_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ fbn->phylink = phylink;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
new file mode 100644
index 000000000000..c8aa29fc052b
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+
+#include "fbnic.h"
+#include "fbnic_netdev.h"
+#include "fbnic_rpc.h"
+
+void fbnic_reset_indir_tbl(struct fbnic_net *fbn)
+{
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx);
+ fbn->indir_tbl[1][i] = ethtool_rxfh_indir_default(i, num_rx);
+ }
+}
+
+void fbnic_rss_key_fill(u32 *buffer)
+{
+ static u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN];
+
+ net_get_random_once(rss_key, sizeof(rss_key));
+ rss_key[FBNIC_RPC_RSS_KEY_LAST_IDX] &= FBNIC_RPC_RSS_KEY_LAST_MASK;
+
+ memcpy(buffer, rss_key, sizeof(rss_key));
+}
+
+#define RX_HASH_OPT_L4 \
+ (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
+#define RX_HASH_OPT_L3 \
+ (RXH_IP_SRC | RXH_IP_DST)
+#define RX_HASH_OPT_L2 RXH_L2DA
+
+void fbnic_rss_init_en_mask(struct fbnic_net *fbn)
+{
+ fbn->rss_flow_hash[FBNIC_TCP4_HASH_OPT] = RX_HASH_OPT_L4;
+ fbn->rss_flow_hash[FBNIC_TCP6_HASH_OPT] = RX_HASH_OPT_L4;
+
+ fbn->rss_flow_hash[FBNIC_UDP4_HASH_OPT] = RX_HASH_OPT_L3;
+ fbn->rss_flow_hash[FBNIC_UDP6_HASH_OPT] = RX_HASH_OPT_L3;
+ fbn->rss_flow_hash[FBNIC_IPV4_HASH_OPT] = RX_HASH_OPT_L3;
+ fbn->rss_flow_hash[FBNIC_IPV6_HASH_OPT] = RX_HASH_OPT_L3;
+
+ fbn->rss_flow_hash[FBNIC_ETHER_HASH_OPT] = RX_HASH_OPT_L2;
+}
+
+void fbnic_rss_disable_hw(struct fbnic_dev *fbd)
+{
+ /* Disable RPC by clearing enable bit and configuration */
+ if (!fbnic_bmc_present(fbd))
+ wr32(fbd, FBNIC_RPC_RMI_CONFIG,
+ FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20));
+}
+
+#define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \
+ FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \
+ FIELD_GET(RXH_##_fh, _val))
+static u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type)
+{
+ u32 flow_hash = fbn->rss_flow_hash[flow_type];
+ u32 rss_en_mask = 0;
+
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L2DA, L2_DA, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_SRC, IP_SRC, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_DST, IP_DST, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_0_1, L4_SRC, flow_hash);
+ rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_2_3, L4_DST, flow_hash);
+
+ return rss_en_mask;
+}
+
+void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn)
+{
+ unsigned int i;
+
+ for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) {
+ wr32(fbd, FBNIC_RPC_RSS_TBL(0, i), fbn->indir_tbl[0][i]);
+ wr32(fbd, FBNIC_RPC_RSS_TBL(1, i), fbn->indir_tbl[1][i]);
+ }
+
+ for (i = 0; i < FBNIC_RPC_RSS_KEY_DWORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_RSS_KEY(i), fbn->rss_key[i]);
+
+ /* Default action for this to drop w/ no destination */
+ wr32(fbd, FBNIC_RPC_ACT_TBL0_DEFAULT, FBNIC_RPC_ACT_TBL0_DROP);
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_ACT_TBL1_DEFAULT, 0);
+
+ /* If it isn't already enabled set the RMI Config value to enable RPC */
+ wr32(fbd, FBNIC_RPC_RMI_CONFIG,
+ FIELD_PREP(FBNIC_RPC_RMI_CONFIG_MTU, FBNIC_MAX_JUMBO_FRAME_SIZE) |
+ FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20) |
+ FBNIC_RPC_RMI_CONFIG_ENABLE);
+}
+
+void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd,
+ bool enable_host)
+{
+ struct fbnic_act_tcam *act_tcam;
+ struct fbnic_mac_addr *mac_addr;
+ int j;
+
+ /* We need to add the all multicast filter at the end of the
+ * multicast address list. This way if there are any that are
+ * shared between the host and the BMC they can be directed to
+ * both. Otherwise the remainder just get sent directly to the
+ * BMC.
+ */
+ mac_addr = &fbd->mac_addr[fbd->mac_addr_boundary - 1];
+ if (fbnic_bmc_present(fbd) && fbd->fw_cap.all_multi) {
+ if (mac_addr->state != FBNIC_TCAM_S_VALID) {
+ eth_zero_addr(mac_addr->value.addr8);
+ eth_broadcast_addr(mac_addr->mask.addr8);
+ mac_addr->value.addr8[0] ^= 1;
+ mac_addr->mask.addr8[0] ^= 1;
+ set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ if (enable_host)
+ set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ else
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
+ mac_addr->act_tcam);
+ } else if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam) &&
+ !is_zero_ether_addr(mac_addr->mask.addr8) &&
+ mac_addr->state == FBNIC_TCAM_S_VALID) {
+ clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam);
+ clear_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ }
+
+ /* We have to add a special handler for multicast as the
+ * BMC may have an all-multi rule already in place. As such
+ * adding a rule ourselves won't do any good so we will have
+ * to modify the rules for the ALL MULTI below if the BMC
+ * already has the rule in place.
+ */
+ act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET];
+
+ /* If we are not enabling the rule just delete it. We will fall
+ * back to the RSS rules that support the multicast addresses.
+ */
+ if (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi || enable_host) {
+ if (act_tcam->state == FBNIC_TCAM_S_VALID)
+ act_tcam->state = FBNIC_TCAM_S_DELETE;
+ return;
+ }
+
+ /* Rewrite TCAM rule 23 to handle BMC all-multi traffic */
+ act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ act_tcam->mask.tcam[0] = 0xffff;
+
+ /* MACDA 0 - 3 is reserved for the BMC MAC address */
+ act_tcam->value.tcam[1] =
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ fbd->mac_addr_boundary - 1) |
+ FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ act_tcam->mask.tcam[1] = 0xffff &
+ ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX &
+ ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+
+ for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+}
+
+void fbnic_bmc_rpc_init(struct fbnic_dev *fbd)
+{
+ int i = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX;
+ struct fbnic_act_tcam *act_tcam;
+ struct fbnic_mac_addr *mac_addr;
+ int j;
+
+ /* Check if BMC is present */
+ if (!fbnic_bmc_present(fbd))
+ return;
+
+ /* Fetch BMC MAC addresses from firmware capabilities */
+ for (j = 0; j < 4; j++) {
+ u8 *bmc_mac = fbd->fw_cap.bmc_mac_addr[j];
+
+ /* Validate BMC MAC addresses */
+ if (is_zero_ether_addr(bmc_mac))
+ continue;
+
+ if (is_multicast_ether_addr(bmc_mac))
+ mac_addr = __fbnic_mc_sync(fbd, bmc_mac);
+ else
+ mac_addr = &fbd->mac_addr[i++];
+
+ if (!mac_addr) {
+ netdev_err(fbd->netdev,
+ "No slot for BMC MAC address[%d]\n", j);
+ continue;
+ }
+
+ ether_addr_copy(mac_addr->value.addr8, bmc_mac);
+ eth_zero_addr(mac_addr->mask.addr8);
+
+ set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ /* Validate Broadcast is also present, record it and tag it */
+ mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
+ eth_broadcast_addr(mac_addr->value.addr8);
+ set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam);
+ mac_addr->state = FBNIC_TCAM_S_ADD;
+
+ /* Rewrite TCAM rule 0 if it isn't present to relocate BMC rules */
+ act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_OFFSET];
+ act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ act_tcam->mask.tcam[0] = 0xffff;
+
+ /* MACDA 0 - 3 is reserved for the BMC MAC address
+ * to account for that we have to mask out the lower 2 bits
+ * of the macda by performing an &= with 0x1c.
+ */
+ act_tcam->value.tcam[1] = FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+ act_tcam->mask.tcam[1] = 0xffff &
+ ~FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, 0x1c) &
+ ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+
+ for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+
+ fbnic_bmc_rpc_all_multi_config(fbd, false);
+}
+
+#define FBNIC_ACT1_INIT(_l4, _udp, _ip, _v6) \
+ (((_l4) ? FBNIC_RPC_TCAM_ACT1_L4_VALID : 0) | \
+ ((_udp) ? FBNIC_RPC_TCAM_ACT1_L4_IS_UDP : 0) | \
+ ((_ip) ? FBNIC_RPC_TCAM_ACT1_IP_VALID : 0) | \
+ ((_v6) ? FBNIC_RPC_TCAM_ACT1_IP_IS_V6 : 0))
+
+void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn)
+{
+ static const u32 act1_value[FBNIC_NUM_HASH_OPT] = {
+ FBNIC_ACT1_INIT(1, 1, 1, 1), /* UDP6 */
+ FBNIC_ACT1_INIT(1, 1, 1, 0), /* UDP4 */
+ FBNIC_ACT1_INIT(1, 0, 1, 1), /* TCP6 */
+ FBNIC_ACT1_INIT(1, 0, 1, 0), /* TCP4 */
+ FBNIC_ACT1_INIT(0, 0, 1, 1), /* IP6 */
+ FBNIC_ACT1_INIT(0, 0, 1, 0), /* IP4 */
+ 0 /* Ether */
+ };
+ unsigned int i;
+
+ /* To support scenarios where a BMC is present we must write the
+ * rules twice, once for the unicast cases, and once again for
+ * the broadcast/multicast cases as we have to support 2 destinations.
+ */
+ BUILD_BUG_ON(FBNIC_RSS_EN_NUM_UNICAST * 2 != FBNIC_RSS_EN_NUM_ENTRIES);
+ BUILD_BUG_ON(ARRAY_SIZE(act1_value) != FBNIC_NUM_HASH_OPT);
+
+ /* Program RSS hash enable mask for host in action TCAM/table. */
+ for (i = fbnic_bmc_present(fbd) ? 0 : FBNIC_RSS_EN_NUM_UNICAST;
+ i < FBNIC_RSS_EN_NUM_ENTRIES; i++) {
+ unsigned int idx = i + FBNIC_RPC_ACT_TBL_RSS_OFFSET;
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx];
+ u32 flow_hash, dest, rss_en_mask;
+ int flow_type, j;
+ u16 value = 0;
+
+ flow_type = i % FBNIC_RSS_EN_NUM_UNICAST;
+ flow_hash = fbn->rss_flow_hash[flow_type];
+
+ /* Set DEST_HOST based on absence of RXH_DISCARD */
+ dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ !(RXH_DISCARD & flow_hash) ?
+ FBNIC_RPC_ACT_TBL0_DEST_HOST : 0);
+
+ if (i >= FBNIC_RSS_EN_NUM_UNICAST && fbnic_bmc_present(fbd))
+ dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+
+ if (!dest)
+ dest = FBNIC_RPC_ACT_TBL0_DROP;
+
+ if (act1_value[flow_type] & FBNIC_RPC_TCAM_ACT1_L4_VALID)
+ dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT,
+ FBNIC_RCD_HDR_AL_DMA_HINT_L4);
+
+ rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, flow_type);
+
+ act_tcam->dest = dest;
+ act_tcam->rss_en_mask = rss_en_mask;
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+
+ act_tcam->mask.tcam[0] = 0xffff;
+
+ /* We reserve the upper 8 MACDA TCAM entries for host
+ * unicast. So we set the value to 24, and the mask the
+ * lower bits so that the lower entries can be used as
+ * multicast or BMC addresses.
+ */
+ if (i < FBNIC_RSS_EN_NUM_UNICAST)
+ value = FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX,
+ fbd->mac_addr_boundary);
+ value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID;
+
+ flow_type = i % FBNIC_RSS_EN_NUM_UNICAST;
+ value |= act1_value[flow_type];
+
+ act_tcam->value.tcam[1] = value;
+ act_tcam->mask.tcam[1] = ~value;
+
+ for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++)
+ act_tcam->mask.tcam[j] = 0xffff;
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ }
+}
+
+struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr)
+{
+ struct fbnic_mac_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from middle of list to bottom, filling bottom up.
+ * Skip the first entry which is reserved for dev_addr and
+ * leave the last entry to use for promiscuous filtering.
+ */
+ for (i = fbd->mac_addr_boundary - 1;
+ i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX; i++) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = mac_addr;
+ } else if (ether_addr_equal(mac_addr->value.addr8, addr)) {
+ avail_addr = mac_addr;
+ break;
+ }
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ether_addr_copy(avail_addr->value.addr8, addr);
+ eth_zero_addr(avail_addr->mask.addr8);
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr)
+{
+ struct fbnic_mac_addr *avail_addr = NULL;
+ unsigned int i;
+
+ /* Scan from middle of list to top, filling top down.
+ * Skip over the address reserved for the BMC MAC and
+ * exclude index 0 as that belongs to the broadcast address
+ */
+ for (i = fbd->mac_addr_boundary;
+ --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (mac_addr->state == FBNIC_TCAM_S_DISABLED) {
+ avail_addr = mac_addr;
+ } else if (ether_addr_equal(mac_addr->value.addr8, addr)) {
+ avail_addr = mac_addr;
+ break;
+ }
+ }
+
+ /* Scan the BMC addresses to see if it may have already
+ * reserved the address.
+ */
+ while (--i) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
+
+ if (!is_zero_ether_addr(mac_addr->mask.addr8))
+ continue;
+
+ /* Only move on if we find a match */
+ if (!ether_addr_equal(mac_addr->value.addr8, addr))
+ continue;
+
+ /* We need to pull this address to the shared area */
+ if (avail_addr) {
+ memcpy(avail_addr, mac_addr, sizeof(*mac_addr));
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ break;
+ }
+
+ if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) {
+ ether_addr_copy(avail_addr->value.addr8, addr);
+ eth_zero_addr(avail_addr->mask.addr8);
+ avail_addr->state = FBNIC_TCAM_S_ADD;
+ }
+
+ return avail_addr;
+}
+
+int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx)
+{
+ if (!test_and_clear_bit(tcam_idx, mac_addr->act_tcam))
+ return -ENOENT;
+
+ if (bitmap_empty(mac_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
+ mac_addr->state = FBNIC_TCAM_S_DELETE;
+
+ return 0;
+}
+
+void fbnic_sift_macda(struct fbnic_dev *fbd)
+{
+ int dest, src;
+
+ /* Move BMC only addresses back into BMC region */
+ for (dest = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX,
+ src = FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX;
+ ++dest < FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX &&
+ src < fbd->mac_addr_boundary;) {
+ struct fbnic_mac_addr *dest_addr = &fbd->mac_addr[dest];
+
+ if (dest_addr->state != FBNIC_TCAM_S_DISABLED)
+ continue;
+
+ while (src < fbd->mac_addr_boundary) {
+ struct fbnic_mac_addr *src_addr = &fbd->mac_addr[src++];
+
+ /* Verify BMC bit is set */
+ if (!test_bit(FBNIC_MAC_ADDR_T_BMC, src_addr->act_tcam))
+ continue;
+
+ /* Verify filter isn't already disabled */
+ if (src_addr->state == FBNIC_TCAM_S_DISABLED ||
+ src_addr->state == FBNIC_TCAM_S_DELETE)
+ continue;
+
+ /* Verify only BMC bit is set */
+ if (bitmap_weight(src_addr->act_tcam,
+ FBNIC_RPC_TCAM_ACT_NUM_ENTRIES) != 1)
+ continue;
+
+ /* Verify we are not moving wildcard address */
+ if (!is_zero_ether_addr(src_addr->mask.addr8))
+ continue;
+
+ memcpy(dest_addr, src_addr, sizeof(*src_addr));
+ src_addr->state = FBNIC_TCAM_S_DELETE;
+ dest_addr->state = FBNIC_TCAM_S_ADD;
+ }
+ }
+}
+
+static void fbnic_clear_macda_entry(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), 0);
+}
+
+static void fbnic_clear_macda(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ if (mac_addr->state == FBNIC_TCAM_S_DISABLED)
+ continue;
+
+ if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
+ if (fbnic_bmc_present(fbd))
+ continue;
+ dev_warn_once(fbd->dev,
+ "Found BMC MAC address w/ BMC not present\n");
+ }
+
+ fbnic_clear_macda_entry(fbd, idx);
+
+ /* If rule was already destined for deletion just wipe it now */
+ if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
+ memset(mac_addr, 0, sizeof(*mac_addr));
+ continue;
+ }
+
+ /* Change state to update so that we will rewrite
+ * this tcam the next time fbnic_write_macda is called.
+ */
+ mac_addr->state = FBNIC_TCAM_S_UPDATE;
+ }
+}
+
+static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx,
+ struct fbnic_mac_addr *mac_addr)
+{
+ __be16 *mask, *value;
+ int i;
+
+ mask = &mac_addr->mask.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1];
+ value = &mac_addr->value.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1];
+
+ for (i = 0; i < FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_MACDA_MASK, ntohs(*mask--)) |
+ FIELD_PREP(FBNIC_RPC_TCAM_MACDA_VALUE, ntohs(*value--)));
+
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+}
+
+void fbnic_write_macda(struct fbnic_dev *fbd)
+{
+ int idx;
+
+ for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
+ struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
+
+ /* Check if update flag is set else exit. */
+ if (!(mac_addr->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ /* Clear by writing 0s. */
+ if (mac_addr->state == FBNIC_TCAM_S_DELETE) {
+ /* Invalidate entry and clear addr state info */
+ fbnic_clear_macda_entry(fbd, idx);
+ memset(mac_addr, 0, sizeof(*mac_addr));
+
+ continue;
+ }
+
+ fbnic_write_macda_entry(fbd, idx, mac_addr);
+
+ mac_addr->state = FBNIC_TCAM_S_VALID;
+ }
+}
+
+static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
+{
+ int i;
+
+ /* Invalidate entry and clear addr state info */
+ for (i = 0; i <= FBNIC_RPC_TCAM_ACT_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), 0);
+}
+
+void fbnic_clear_rules(struct fbnic_dev *fbd)
+{
+ u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK,
+ FBNIC_RPC_ACT_TBL0_DEST_BMC);
+ int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1;
+ struct fbnic_act_tcam *act_tcam;
+
+ /* Clear MAC rules */
+ fbnic_clear_macda(fbd);
+
+ /* If BMC is present we need to preserve the last rule which
+ * will be used to route traffic to the BMC if it is received.
+ *
+ * At this point it should be the only MAC address in the MACDA
+ * so any unicast or multicast traffic received should be routed
+ * to it. So leave the last rule in place.
+ *
+ * It will be rewritten to add the host again when we bring
+ * the interface back up.
+ */
+ if (fbnic_bmc_present(fbd)) {
+ act_tcam = &fbd->act_tcam[i];
+
+ if (act_tcam->state == FBNIC_TCAM_S_VALID &&
+ (act_tcam->dest & dest)) {
+ wr32(fbd, FBNIC_RPC_ACT_TBL0(i), dest);
+ wr32(fbd, FBNIC_RPC_ACT_TBL1(i), 0);
+
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+
+ i--;
+ }
+ }
+
+ /* Work from the bottom up deleting all other rules from hardware */
+ do {
+ act_tcam = &fbd->act_tcam[i];
+
+ if (act_tcam->state != FBNIC_TCAM_S_VALID)
+ continue;
+
+ fbnic_clear_act_tcam(fbd, i);
+ act_tcam->state = FBNIC_TCAM_S_UPDATE;
+ } while (i--);
+}
+
+static void fbnic_delete_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
+{
+ fbnic_clear_act_tcam(fbd, idx);
+ memset(&fbd->act_tcam[idx], 0, sizeof(struct fbnic_act_tcam));
+}
+
+static void fbnic_update_act_tcam(struct fbnic_dev *fbd, unsigned int idx)
+{
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx];
+ int i;
+
+ /* Update entry by writing the destination and RSS mask */
+ wr32(fbd, FBNIC_RPC_ACT_TBL0(idx), act_tcam->dest);
+ wr32(fbd, FBNIC_RPC_ACT_TBL1(idx), act_tcam->rss_en_mask);
+
+ /* Write new TCAM rule to hardware */
+ for (i = 0; i < FBNIC_RPC_TCAM_ACT_WORD_LEN; i++)
+ wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i),
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT_MASK,
+ act_tcam->mask.tcam[i]) |
+ FIELD_PREP(FBNIC_RPC_TCAM_ACT_VALUE,
+ act_tcam->value.tcam[i]));
+
+ wrfl(fbd);
+
+ wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), FBNIC_RPC_TCAM_VALIDATE);
+ act_tcam->state = FBNIC_TCAM_S_VALID;
+}
+
+void fbnic_write_rules(struct fbnic_dev *fbd)
+{
+ int i;
+
+ /* Flush any pending action table rules */
+ for (i = 0; i < FBNIC_RPC_ACT_TBL_NUM_ENTRIES; i++) {
+ struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i];
+
+ /* Check if update flag is set else exit. */
+ if (!(act_tcam->state & FBNIC_TCAM_S_UPDATE))
+ continue;
+
+ if (act_tcam->state == FBNIC_TCAM_S_DELETE)
+ fbnic_delete_act_tcam(fbd, i);
+ else
+ fbnic_update_act_tcam(fbd, i);
+ }
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
new file mode 100644
index 000000000000..d62935f722a2
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_RPC_H_
+#define _FBNIC_RPC_H_
+
+#include <uapi/linux/in6.h>
+#include <linux/bitfield.h>
+
+/* The TCAM state definitions follow an expected ordering.
+ * They start out disabled, then move through the following states:
+ * Disabled 0 -> Add 2
+ * Add 2 -> Valid 1
+ *
+ * Valid 1 -> Add/Update 2
+ * Add 2 -> Valid 1
+ *
+ * Valid 1 -> Delete 3
+ * Delete 3 -> Disabled 0
+ */
+enum {
+ FBNIC_TCAM_S_DISABLED = 0,
+ FBNIC_TCAM_S_VALID = 1,
+ FBNIC_TCAM_S_ADD = 2,
+ FBNIC_TCAM_S_UPDATE = FBNIC_TCAM_S_ADD,
+ FBNIC_TCAM_S_DELETE = 3,
+};
+
+/* 32 MAC Destination Address TCAM Entries
+ * 4 registers DA[1:0], DA[3:2], DA[5:4], Validate
+ */
+#define FBNIC_RPC_TCAM_MACDA_WORD_LEN 3
+#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32
+
+#define FBNIC_RPC_TCAM_ACT_WORD_LEN 11
+#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64
+
+struct fbnic_mac_addr {
+ union {
+ unsigned char addr8[ETH_ALEN];
+ __be16 addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN];
+ } mask, value;
+ unsigned char state;
+ DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES);
+};
+
+struct fbnic_act_tcam {
+ struct {
+ u16 tcam[FBNIC_RPC_TCAM_ACT_WORD_LEN];
+ } mask, value;
+ unsigned char state;
+ u16 rss_en_mask;
+ u32 dest;
+};
+
+enum {
+ FBNIC_RSS_EN_HOST_UDP6,
+ FBNIC_RSS_EN_HOST_UDP4,
+ FBNIC_RSS_EN_HOST_TCP6,
+ FBNIC_RSS_EN_HOST_TCP4,
+ FBNIC_RSS_EN_HOST_IP6,
+ FBNIC_RSS_EN_HOST_IP4,
+ FBNIC_RSS_EN_HOST_ETHER,
+ FBNIC_RSS_EN_XCAST_UDP6,
+#define FBNIC_RSS_EN_NUM_UNICAST FBNIC_RSS_EN_XCAST_UDP6
+ FBNIC_RSS_EN_XCAST_UDP4,
+ FBNIC_RSS_EN_XCAST_TCP6,
+ FBNIC_RSS_EN_XCAST_TCP4,
+ FBNIC_RSS_EN_XCAST_IP6,
+ FBNIC_RSS_EN_XCAST_IP4,
+ FBNIC_RSS_EN_XCAST_ETHER,
+ FBNIC_RSS_EN_NUM_ENTRIES
+};
+
+/* Reserve the first 2 entries for the use by the BMC so that we can
+ * avoid allowing rules to get in the way of BMC unicast traffic.
+ */
+#define FBNIC_RPC_ACT_TBL_BMC_OFFSET 0
+#define FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET 1
+
+/* We reserve the last 14 entries for RSS rules on the host. The BMC
+ * unicast rule will need to be populated above these and is expected to
+ * use MACDA TCAM entry 23 to store the BMC MAC address.
+ */
+#define FBNIC_RPC_ACT_TBL_RSS_OFFSET \
+ (FBNIC_RPC_ACT_TBL_NUM_ENTRIES - FBNIC_RSS_EN_NUM_ENTRIES)
+
+/* Flags used to identify the owner for this MAC filter. Note that any
+ * flags set for Broadcast thru Promisc indicate that the rule belongs
+ * to the RSS filters for the host.
+ */
+enum {
+ FBNIC_MAC_ADDR_T_BMC = 0,
+ FBNIC_MAC_ADDR_T_BROADCAST = FBNIC_RPC_ACT_TBL_RSS_OFFSET,
+#define FBNIC_MAC_ADDR_T_HOST_START FBNIC_MAC_ADDR_T_BROADCAST
+ FBNIC_MAC_ADDR_T_MULTICAST,
+ FBNIC_MAC_ADDR_T_UNICAST,
+ FBNIC_MAC_ADDR_T_ALLMULTI, /* BROADCAST ... MULTICAST*/
+ FBNIC_MAC_ADDR_T_PROMISC, /* BROADCAST ... UNICAST */
+ FBNIC_MAC_ADDR_T_HOST_LAST
+};
+
+#define FBNIC_MAC_ADDR_T_HOST_LEN \
+ (FBNIC_MAC_ADDR_T_HOST_LAST - FBNIC_MAC_ADDR_T_HOST_START)
+
+#define FBNIC_RPC_TCAM_ACT0_IPSRC_IDX CSR_GENMASK(2, 0)
+#define FBNIC_RPC_TCAM_ACT0_IPSRC_VALID CSR_BIT(3)
+#define FBNIC_RPC_TCAM_ACT0_IPDST_IDX CSR_GENMASK(6, 4)
+#define FBNIC_RPC_TCAM_ACT0_IPDST_VALID CSR_BIT(7)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX CSR_GENMASK(10, 8)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID CSR_BIT(11)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX CSR_GENMASK(14, 12)
+#define FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID CSR_BIT(15)
+
+#define FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX CSR_GENMASK(9, 5)
+#define FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID CSR_BIT(10)
+#define FBNIC_RPC_TCAM_ACT1_IP_IS_V6 CSR_BIT(11)
+#define FBNIC_RPC_TCAM_ACT1_IP_VALID CSR_BIT(12)
+#define FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID CSR_BIT(13)
+#define FBNIC_RPC_TCAM_ACT1_L4_IS_UDP CSR_BIT(14)
+#define FBNIC_RPC_TCAM_ACT1_L4_VALID CSR_BIT(15)
+
+/* TCAM 0 - 3 reserved for BMC MAC addresses */
+#define FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX 0
+/* TCAM 4 reserved for broadcast MAC address */
+#define FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX 4
+/* TCAMs 5 - 30 will be used for multicast and unicast addresses. The
+ * boundary between the two can be variable it is currently set to 24
+ * on which the unicast addresses start. The general idea is that we will
+ * always go top-down with unicast, and bottom-up with multicast so that
+ * there should be free-space in the middle between the two.
+ *
+ * The entry at MADCA_DEFAULT_BOUNDARY is a special case as it can be used
+ * for the ALL MULTI address if the list is full, or the BMC has requested
+ * it.
+ */
+#define FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX 5
+#define FBNIC_RPC_TCAM_MACDA_DEFAULT_BOUNDARY 24
+#define FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX 30
+/* Reserved for use to record Multicast promisc, or Promiscuous */
+#define FBNIC_RPC_TCAM_MACDA_PROMISC_IDX 31
+
+enum {
+ FBNIC_UDP6_HASH_OPT,
+ FBNIC_UDP4_HASH_OPT,
+ FBNIC_TCP6_HASH_OPT,
+ FBNIC_TCP4_HASH_OPT,
+#define FBNIC_L4_HASH_OPT FBNIC_TCP4_HASH_OPT
+ FBNIC_IPV6_HASH_OPT,
+ FBNIC_IPV4_HASH_OPT,
+#define FBNIC_IP_HASH_OPT FBNIC_IPV4_HASH_OPT
+ FBNIC_ETHER_HASH_OPT,
+ FBNIC_NUM_HASH_OPT,
+};
+
+struct fbnic_dev;
+struct fbnic_net;
+
+void fbnic_bmc_rpc_init(struct fbnic_dev *fbd);
+void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd, bool enable_host);
+
+void fbnic_reset_indir_tbl(struct fbnic_net *fbn);
+void fbnic_rss_key_fill(u32 *buffer);
+void fbnic_rss_init_en_mask(struct fbnic_net *fbn);
+void fbnic_rss_disable_hw(struct fbnic_dev *fbd);
+void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn);
+
+int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx);
+struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr);
+struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd,
+ const unsigned char *addr);
+void fbnic_sift_macda(struct fbnic_dev *fbd);
+void fbnic_write_macda(struct fbnic_dev *fbd);
+
+static inline int __fbnic_uc_unsync(struct fbnic_mac_addr *mac_addr)
+{
+ return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_UNICAST);
+}
+
+static inline int __fbnic_mc_unsync(struct fbnic_mac_addr *mac_addr)
+{
+ return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_MULTICAST);
+}
+
+void fbnic_clear_rules(struct fbnic_dev *fbd);
+void fbnic_write_rules(struct fbnic_dev *fbd);
+#endif /* _FBNIC_RPC_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
new file mode 100644
index 000000000000..2a174ab062a3
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/once.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <uapi/linux/if_ether.h>
+
+#include "fbnic_tlv.h"
+
+/**
+ * fbnic_tlv_msg_alloc - Allocate page and initialize FW message header
+ * @msg_id: Identifier for new message we are starting
+ *
+ * Return: pointer to start of message, or NULL on failure.
+ *
+ * Allocates a page and initializes message header at start of page.
+ * Initial message size is 1 DWORD which is just the header.
+ **/
+struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id)
+{
+ struct fbnic_tlv_hdr hdr = { 0 };
+ struct fbnic_tlv_msg *msg;
+
+ msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_KERNEL);
+ if (!msg)
+ return NULL;
+
+ /* Start with zero filled header and then back fill with data */
+ hdr.type = msg_id;
+ hdr.is_msg = 1;
+ hdr.len = cpu_to_le16(1);
+
+ /* Copy header into start of message */
+ msg->hdr = hdr;
+
+ return msg;
+}
+
+/**
+ * fbnic_tlv_attr_put_flag - Add flag value to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds a 1 DWORD flag attribute to the message. The presence of this
+ * attribute can be used as a boolean value indicating true, otherwise the
+ * value is considered false.
+ **/
+int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id)
+{
+ int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
+ struct fbnic_tlv_hdr hdr = { 0 };
+ struct fbnic_tlv_msg *attr;
+
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ if (attr_max_len < sizeof(*attr))
+ return -ENOSPC;
+
+ /* Get header pointer and bump attr to start of data */
+ attr = &msg[le16_to_cpu(msg->hdr.len)];
+
+ /* Record attribute type and size */
+ hdr.type = attr_id;
+ hdr.len = cpu_to_le16(sizeof(hdr));
+
+ attr->hdr = hdr;
+ le16_add_cpu(&msg->hdr.len,
+ FBNIC_TLV_MSG_SIZE(le16_to_cpu(hdr.len)));
+
+ return 0;
+}
+
+/**
+ * fbnic_tlv_attr_put_value - Add data to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @value: Pointer to data to be stored
+ * @len: Size of data to be stored.
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by value into the message. The
+ * result is rounded up to the nearest DWORD for sizing so that the
+ * headers remain aligned.
+ *
+ * The assumption is that the value field is in a format where byte
+ * ordering can be guaranteed such as a byte array or a little endian
+ * format.
+ **/
+int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const void *value, const int len)
+{
+ int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
+ struct fbnic_tlv_hdr hdr = { 0 };
+ struct fbnic_tlv_msg *attr;
+
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ if (attr_max_len < sizeof(*attr) + len)
+ return -ENOSPC;
+
+ /* Get header pointer and bump attr to start of data */
+ attr = &msg[le16_to_cpu(msg->hdr.len)];
+
+ /* Record attribute type and size */
+ hdr.type = attr_id;
+ hdr.len = cpu_to_le16(sizeof(hdr) + len);
+
+ /* Zero pad end of region to be written if we aren't aligned */
+ if (len % sizeof(hdr))
+ attr->value[len / sizeof(hdr)] = 0;
+
+ /* Copy data over */
+ memcpy(attr->value, value, len);
+
+ attr->hdr = hdr;
+ le16_add_cpu(&msg->hdr.len,
+ FBNIC_TLV_MSG_SIZE(le16_to_cpu(hdr.len)));
+
+ return 0;
+}
+
+/**
+ * __fbnic_tlv_attr_put_int - Add integer to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @value: Data to be stored
+ * @len: Size of data to be stored, either 4 or 8 bytes.
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by value into the message. Will
+ * format the data as little endian.
+ **/
+int __fbnic_tlv_attr_put_int(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ s64 value, const int len)
+{
+ __le64 le64_value = cpu_to_le64(value);
+
+ return fbnic_tlv_attr_put_value(msg, attr_id, &le64_value, len);
+}
+
+/**
+ * fbnic_tlv_attr_put_mac_addr - Add mac_addr to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @mac_addr: Byte pointer to MAC address to be stored
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by mac_addr into the message. Will
+ * copy the address raw so it will be in big endian with start of MAC
+ * address at start of attribute.
+ **/
+int fbnic_tlv_attr_put_mac_addr(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const u8 *mac_addr)
+{
+ return fbnic_tlv_attr_put_value(msg, attr_id, mac_addr, ETH_ALEN);
+}
+
+/**
+ * fbnic_tlv_attr_put_string - Add string to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ * @string: Byte pointer to null terminated string to be stored
+ *
+ * Return: -ENOSPC if there is no room for the attribute. Otherwise 0.
+ *
+ * Adds header and copies data pointed to by string into the message. Will
+ * copy the address raw so it will be in byte order.
+ **/
+int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
+ const char *string)
+{
+ int attr_max_len = PAGE_SIZE - sizeof(*msg);
+ int str_len = 1;
+
+ /* The max length will be message minus existing message and new
+ * attribute header. Since the message is measured in DWORDs we have
+ * to multiply the size by 4.
+ *
+ * The string length doesn't include the \0 so we have to add one to
+ * the final value, so start with that as our initial value.
+ *
+ * We will verify if the string will fit in fbnic_tlv_attr_put_value()
+ */
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ str_len += strnlen(string, attr_max_len);
+
+ return fbnic_tlv_attr_put_value(msg, attr_id, string, str_len);
+}
+
+/**
+ * fbnic_tlv_attr_get_unsigned - Retrieve unsigned value from result
+ * @attr: Attribute to retrieve data from
+ *
+ * Return: unsigned 64b value containing integer value
+ **/
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr)
+{
+ __le64 le64_value = 0;
+
+ memcpy(&le64_value, &attr->value[0],
+ le16_to_cpu(attr->hdr.len) - sizeof(*attr));
+
+ return le64_to_cpu(le64_value);
+}
+
+/**
+ * fbnic_tlv_attr_get_signed - Retrieve signed value from result
+ * @attr: Attribute to retrieve data from
+ *
+ * Return: signed 64b value containing integer value
+ **/
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr)
+{
+ int shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8;
+ __le64 le64_value = 0;
+ s64 value;
+
+ /* Copy the value and adjust for byte ordering */
+ memcpy(&le64_value, &attr->value[0],
+ le16_to_cpu(attr->hdr.len) - sizeof(*attr));
+ value = le64_to_cpu(le64_value);
+
+ /* Sign extend the return value by using a pair of shifts */
+ return (value << shift) >> shift;
+}
+
+/**
+ * fbnic_tlv_attr_get_string - Retrieve string value from result
+ * @attr: Attribute to retrieve data from
+ * @str: Pointer to an allocated string to store the data
+ * @max_size: The maximum size which can be in str
+ *
+ * Return: the size of the string read from firmware
+ **/
+size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
+ size_t max_size)
+{
+ max_size = min_t(size_t, max_size,
+ (le16_to_cpu(attr->hdr.len) * 4) - sizeof(*attr));
+ memcpy(str, &attr->value, max_size);
+
+ return max_size;
+}
+
+/**
+ * fbnic_tlv_attr_nest_start - Add nested attribute header to message
+ * @msg: Message header we are adding flag attribute to
+ * @attr_id: ID of flag attribute we are adding to message
+ *
+ * Return: NULL if there is no room for the attribute. Otherwise a pointer
+ * to the new attribute header.
+ *
+ * New header length is stored initially in DWORDs.
+ **/
+struct fbnic_tlv_msg *fbnic_tlv_attr_nest_start(struct fbnic_tlv_msg *msg,
+ u16 attr_id)
+{
+ int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg);
+ struct fbnic_tlv_msg *attr = &msg[le16_to_cpu(msg->hdr.len)];
+ struct fbnic_tlv_hdr hdr = { 0 };
+
+ /* Make sure we have space for at least the nest header plus one more */
+ attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32);
+ if (attr_max_len < sizeof(*attr) * 2)
+ return NULL;
+
+ /* Record attribute type and size */
+ hdr.type = attr_id;
+
+ /* Add current message length to account for consumption within the
+ * page and leave it as a multiple of DWORDs, we will shift to
+ * bytes when we close it out.
+ */
+ hdr.len = cpu_to_le16(1);
+
+ attr->hdr = hdr;
+
+ return attr;
+}
+
+/**
+ * fbnic_tlv_attr_nest_stop - Close out nested attribute and add it to message
+ * @msg: Message header we are adding flag attribute to
+ *
+ * Closes out nested attribute, adds length to message, and then bumps
+ * length from DWORDs to bytes to match other attributes.
+ **/
+void fbnic_tlv_attr_nest_stop(struct fbnic_tlv_msg *msg)
+{
+ struct fbnic_tlv_msg *attr = &msg[le16_to_cpu(msg->hdr.len)];
+ u16 len = le16_to_cpu(attr->hdr.len);
+
+ /* Add attribute to message if there is more than just a header */
+ if (len <= 1)
+ return;
+
+ le16_add_cpu(&msg->hdr.len, len);
+
+ /* Convert from DWORDs to bytes */
+ attr->hdr.len = cpu_to_le16(len * sizeof(u32));
+}
+
+static int
+fbnic_tlv_attr_validate(struct fbnic_tlv_msg *attr,
+ const struct fbnic_tlv_index *tlv_index)
+{
+ u16 len = le16_to_cpu(attr->hdr.len) - sizeof(*attr);
+ u16 attr_id = attr->hdr.type;
+ __le32 *value = &attr->value[0];
+
+ if (attr->hdr.is_msg)
+ return -EINVAL;
+
+ if (attr_id >= FBNIC_TLV_RESULTS_MAX)
+ return -EINVAL;
+
+ while (tlv_index->id != attr_id) {
+ if (tlv_index->id == FBNIC_TLV_ATTR_ID_UNKNOWN) {
+ if (attr->hdr.cannot_ignore)
+ return -ENOENT;
+ return le16_to_cpu(attr->hdr.len);
+ }
+
+ tlv_index++;
+ }
+
+ if (offset_in_page(attr) + len > PAGE_SIZE - sizeof(*attr))
+ return -E2BIG;
+
+ switch (tlv_index->type) {
+ case FBNIC_TLV_STRING:
+ if (!len || len > tlv_index->len)
+ return -EINVAL;
+ if (((char *)value)[len - 1])
+ return -EINVAL;
+ break;
+ case FBNIC_TLV_FLAG:
+ if (len)
+ return -EINVAL;
+ break;
+ case FBNIC_TLV_UNSIGNED:
+ case FBNIC_TLV_SIGNED:
+ if (tlv_index->len > sizeof(__le64))
+ return -EINVAL;
+ fallthrough;
+ case FBNIC_TLV_BINARY:
+ if (!len || len > tlv_index->len)
+ return -EINVAL;
+ break;
+ case FBNIC_TLV_NESTED:
+ case FBNIC_TLV_ARRAY:
+ if (len % 4)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * fbnic_tlv_attr_parse_array - Parse array of attributes into results array
+ * @attr: Start of attributes in the message
+ * @len: Length of attributes in the message
+ * @results: Array of pointers to store the results of parsing
+ * @tlv_index: List of TLV attributes to be parsed from message
+ * @tlv_attr_id: Specific ID that is repeated in array
+ * @array_len: Number of results to store in results array
+ *
+ * Return: zero on success, or negative value on error.
+ *
+ * Will take a list of attributes and a parser definition and will capture
+ * the results in the results array to have the data extracted later.
+ **/
+int fbnic_tlv_attr_parse_array(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index,
+ u16 tlv_attr_id, size_t array_len)
+{
+ int i = 0;
+
+ /* Initialize results table to NULL. */
+ memset(results, 0, array_len * sizeof(results[0]));
+
+ /* Nothing to parse if header was only thing there */
+ if (!len)
+ return 0;
+
+ /* Work through list of attributes, parsing them as necessary */
+ while (len > 0) {
+ u16 attr_id = attr->hdr.type;
+ u16 attr_len;
+ int err;
+
+ if (tlv_attr_id != attr_id)
+ return -EINVAL;
+
+ /* Stop parsing on full error */
+ err = fbnic_tlv_attr_validate(attr, tlv_index);
+ if (err < 0)
+ return err;
+
+ if (i >= array_len)
+ return -ENOSPC;
+
+ results[i++] = attr;
+
+ attr_len = FBNIC_TLV_MSG_SIZE(le16_to_cpu(attr->hdr.len));
+ len -= attr_len;
+ attr += attr_len;
+ }
+
+ return len == 0 ? 0 : -EINVAL;
+}
+
+/**
+ * fbnic_tlv_attr_parse - Parse attributes into a list of attribute results
+ * @attr: Start of attributes in the message
+ * @len: Length of attributes in the message
+ * @results: Array of pointers to store the results of parsing
+ * @tlv_index: List of TLV attributes to be parsed from message
+ *
+ * Return: zero on success, or negative value on error.
+ *
+ * Will take a list of attributes and a parser definition and will capture
+ * the results in the results array to have the data extracted later.
+ **/
+int fbnic_tlv_attr_parse(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index)
+{
+ /* Initialize results table to NULL. */
+ memset(results, 0, sizeof(results[0]) * FBNIC_TLV_RESULTS_MAX);
+
+ /* Nothing to parse if header was only thing there */
+ if (!len)
+ return 0;
+
+ /* Work through list of attributes, parsing them as necessary */
+ while (len > 0) {
+ int err = fbnic_tlv_attr_validate(attr, tlv_index);
+ u16 attr_id = attr->hdr.type;
+ u16 attr_len;
+
+ /* Stop parsing on full error */
+ if (err < 0)
+ return err;
+
+ /* Ignore results for unsupported values */
+ if (!err) {
+ /* Do not overwrite existing entries */
+ if (results[attr_id])
+ return -EADDRINUSE;
+
+ results[attr_id] = attr;
+ }
+
+ attr_len = FBNIC_TLV_MSG_SIZE(le16_to_cpu(attr->hdr.len));
+ len -= attr_len;
+ attr += attr_len;
+ }
+
+ return len == 0 ? 0 : -EINVAL;
+}
+
+/**
+ * fbnic_tlv_msg_parse - Parse message and process via predetermined functions
+ * @opaque: Value passed to parser function to enable driver access
+ * @msg: Message to be parsed.
+ * @parser: TLV message parser definition.
+ *
+ * Return: zero on success, or negative value on error.
+ *
+ * Will take a message a number of message types via the attribute parsing
+ * definitions and function provided for the parser array.
+ **/
+int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
+ const struct fbnic_tlv_parser *parser)
+{
+ struct fbnic_tlv_msg *results[FBNIC_TLV_RESULTS_MAX];
+ u16 msg_id = msg->hdr.type;
+ int err;
+
+ if (!msg->hdr.is_msg)
+ return -EINVAL;
+
+ if (le16_to_cpu(msg->hdr.len) > PAGE_SIZE / sizeof(u32))
+ return -E2BIG;
+
+ while (parser->id != msg_id) {
+ if (parser->id == FBNIC_TLV_MSG_ID_UNKNOWN)
+ return -ENOENT;
+ parser++;
+ }
+
+ err = fbnic_tlv_attr_parse(&msg[1], le16_to_cpu(msg->hdr.len) - 1,
+ results, parser->attr);
+ if (err)
+ return err;
+
+ return parser->func(opaque, results);
+}
+
+/**
+ * fbnic_tlv_parser_error - called if message doesn't match known type
+ * @opaque: (unused)
+ * @results: (unused)
+ *
+ * Return: -EBADMSG to indicate the message is an unsupported type
+ **/
+int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results)
+{
+ return -EBADMSG;
+}
+
+void fbnic_tlv_attr_addr_copy(u8 *dest, struct fbnic_tlv_msg *src)
+{
+ u8 *mac_addr;
+
+ mac_addr = fbnic_tlv_attr_get_value_ptr(src);
+ memcpy(dest, mac_addr, ETH_ALEN);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
new file mode 100644
index 000000000000..67300ab44353
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_TLV_H_
+#define _FBNIC_TLV_H_
+
+#include <asm/byteorder.h>
+#include <linux/bits.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+#define FBNIC_TLV_MSG_ALIGN(len) ALIGN(len, sizeof(u32))
+#define FBNIC_TLV_MSG_SIZE(len) \
+ (FBNIC_TLV_MSG_ALIGN(len) / sizeof(u32))
+
+/* TLV Header Format
+ * 3 2 1
+ * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Length |M|I|RSV| Type / ID |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * The TLV header format described above will be used for transferring
+ * messages between the host and the firmware. To ensure byte ordering
+ * we have defined all fields as being little endian.
+ * Type/ID: Identifier for message and/or attribute
+ * RSV: Reserved field for future use, likely as additional flags
+ * I: cannot_ignore flag, identifies if unrecognized attribute can be ignored
+ * M: is_msg, indicates that this is the start of a new message
+ * Length: Total length of message in dwords including header
+ * or
+ * Total length of attribute in bytes including header
+ */
+struct fbnic_tlv_hdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ u16 type : 12; /* 0 .. 11 Type / ID */
+ u16 rsvd : 2; /* 12 .. 13 Reserved for future use */
+ u16 cannot_ignore : 1; /* 14 Attribute can be ignored */
+ u16 is_msg : 1; /* 15 Header belongs to message */
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ u16 is_msg : 1; /* 15 Header belongs to message */
+ u16 cannot_ignore : 1; /* 14 Attribute can be ignored */
+ u16 rsvd : 2; /* 13 .. 12 Reserved for future use */
+ u16 type : 12; /* 11 .. 0 Type / ID */
+#else
+#error "Missing defines from byteorder.h"
+#endif
+ __le16 len; /* 16 .. 32 length including TLV header */
+};
+
+#define FBNIC_TLV_RESULTS_MAX 32
+
+struct fbnic_tlv_msg {
+ struct fbnic_tlv_hdr hdr;
+ __le32 value[];
+};
+
+#define FBNIC_TLV_MSG_ID_UNKNOWN USHRT_MAX
+
+enum fbnic_tlv_type {
+ FBNIC_TLV_STRING,
+ FBNIC_TLV_FLAG,
+ FBNIC_TLV_UNSIGNED,
+ FBNIC_TLV_SIGNED,
+ FBNIC_TLV_BINARY,
+ FBNIC_TLV_NESTED,
+ FBNIC_TLV_ARRAY,
+ __FBNIC_TLV_MAX_TYPE
+};
+
+/* TLV Index
+ * Defines the relationship between the attribute IDs and their types.
+ * For each entry in the index there will be a size and type associated
+ * with it so that we can use this to parse the data and verify it matches
+ * the expected layout.
+ */
+struct fbnic_tlv_index {
+ u16 id;
+ u16 len;
+ enum fbnic_tlv_type type;
+};
+
+#define TLV_MAX_DATA (PAGE_SIZE - 512)
+#define FBNIC_TLV_ATTR_ID_UNKNOWN USHRT_MAX
+#define FBNIC_TLV_ATTR_STRING(id, len) { id, len, FBNIC_TLV_STRING }
+#define FBNIC_TLV_ATTR_FLAG(id) { id, 0, FBNIC_TLV_FLAG }
+#define FBNIC_TLV_ATTR_U32(id) { id, sizeof(u32), FBNIC_TLV_UNSIGNED }
+#define FBNIC_TLV_ATTR_U64(id) { id, sizeof(u64), FBNIC_TLV_UNSIGNED }
+#define FBNIC_TLV_ATTR_S32(id) { id, sizeof(s32), FBNIC_TLV_SIGNED }
+#define FBNIC_TLV_ATTR_S64(id) { id, sizeof(s64), FBNIC_TLV_SIGNED }
+#define FBNIC_TLV_ATTR_MAC_ADDR(id) { id, ETH_ALEN, FBNIC_TLV_BINARY }
+#define FBNIC_TLV_ATTR_NESTED(id) { id, 0, FBNIC_TLV_NESTED }
+#define FBNIC_TLV_ATTR_ARRAY(id) { id, 0, FBNIC_TLV_ARRAY }
+#define FBNIC_TLV_ATTR_RAW_DATA(id) { id, TLV_MAX_DATA, FBNIC_TLV_BINARY }
+#define FBNIC_TLV_ATTR_LAST { FBNIC_TLV_ATTR_ID_UNKNOWN, 0, 0 }
+
+struct fbnic_tlv_parser {
+ u16 id;
+ const struct fbnic_tlv_index *attr;
+ int (*func)(void *opaque,
+ struct fbnic_tlv_msg **results);
+};
+
+#define FBNIC_TLV_PARSER(id, attr, func) { FBNIC_TLV_MSG_ID_##id, attr, func }
+
+static inline void *
+fbnic_tlv_attr_get_value_ptr(struct fbnic_tlv_msg *attr)
+{
+ return (void *)&attr->value[0];
+}
+
+static inline bool fbnic_tlv_attr_get_bool(struct fbnic_tlv_msg *attr)
+{
+ return !!attr;
+}
+
+u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr);
+s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr);
+size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str,
+ size_t max_size);
+
+#define get_unsigned_result(id, location) \
+do { \
+ struct fbnic_tlv_msg *result = results[id]; \
+ if (result) \
+ location = fbnic_tlv_attr_get_unsigned(result); \
+} while (0)
+
+#define get_signed_result(id, location) \
+do { \
+ struct fbnic_tlv_msg *result = results[id]; \
+ if (result) \
+ location = fbnic_tlv_attr_get_signed(result); \
+} while (0)
+
+#define get_string_result(id, size, str, max_size) \
+do { \
+ struct fbnic_tlv_msg *result = results[id]; \
+ if (result) \
+ size = fbnic_tlv_attr_get_string(result, str, max_size); \
+} while (0)
+
+#define get_bool(id) (!!(results[id]))
+
+struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id);
+int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id);
+int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const void *value, const int len);
+int __fbnic_tlv_attr_put_int(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ s64 value, const int len);
+#define fbnic_tlv_attr_put_int(msg, attr_id, value) \
+ __fbnic_tlv_attr_put_int(msg, attr_id, value, \
+ FBNIC_TLV_MSG_ALIGN(sizeof(value)))
+int fbnic_tlv_attr_put_mac_addr(struct fbnic_tlv_msg *msg, const u16 attr_id,
+ const u8 *mac_addr);
+int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id,
+ const char *string);
+struct fbnic_tlv_msg *fbnic_tlv_attr_nest_start(struct fbnic_tlv_msg *msg,
+ u16 attr_id);
+void fbnic_tlv_attr_nest_stop(struct fbnic_tlv_msg *msg);
+void fbnic_tlv_attr_addr_copy(u8 *dest, struct fbnic_tlv_msg *src);
+int fbnic_tlv_attr_parse_array(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index,
+ u16 tlv_attr_id, size_t array_len);
+int fbnic_tlv_attr_parse(struct fbnic_tlv_msg *attr, int len,
+ struct fbnic_tlv_msg **results,
+ const struct fbnic_tlv_index *tlv_index);
+int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg,
+ const struct fbnic_tlv_parser *parser);
+int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results);
+
+#define FBNIC_TLV_MSG_ERROR \
+ FBNIC_TLV_PARSER(UNKNOWN, NULL, fbnic_tlv_parser_error)
+#endif /* _FBNIC_TLV_H_ */
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
new file mode 100644
index 000000000000..0ed4c9fff5d8
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
@@ -0,0 +1,1913 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bitfield.h>
+#include <linux/iopoll.h>
+#include <linux/pci.h>
+#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
+
+#include "fbnic.h"
+#include "fbnic_csr.h"
+#include "fbnic_netdev.h"
+#include "fbnic_txrx.h"
+
+struct fbnic_xmit_cb {
+ u32 bytecount;
+ u8 desc_count;
+ int hw_head;
+};
+
+#define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb))
+
+static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring)
+{
+ unsigned long csr_base = (unsigned long)ring->doorbell;
+
+ csr_base &= ~(FBNIC_QUEUE_STRIDE * sizeof(u32) - 1);
+
+ return (u32 __iomem *)csr_base;
+}
+
+static u32 fbnic_ring_rd32(struct fbnic_ring *ring, unsigned int csr)
+{
+ u32 __iomem *csr_base = fbnic_ring_csr_base(ring);
+
+ return readl(csr_base + csr);
+}
+
+static void fbnic_ring_wr32(struct fbnic_ring *ring, unsigned int csr, u32 val)
+{
+ u32 __iomem *csr_base = fbnic_ring_csr_base(ring);
+
+ writel(val, csr_base + csr);
+}
+
+static unsigned int fbnic_desc_unused(struct fbnic_ring *ring)
+{
+ return (ring->head - ring->tail - 1) & ring->size_mask;
+}
+
+static unsigned int fbnic_desc_used(struct fbnic_ring *ring)
+{
+ return (ring->tail - ring->head) & ring->size_mask;
+}
+
+static struct netdev_queue *txring_txq(const struct net_device *dev,
+ const struct fbnic_ring *ring)
+{
+ return netdev_get_tx_queue(dev, ring->q_idx);
+}
+
+static int fbnic_maybe_stop_tx(const struct net_device *dev,
+ struct fbnic_ring *ring,
+ const unsigned int size)
+{
+ struct netdev_queue *txq = txring_txq(dev, ring);
+ int res;
+
+ res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size,
+ FBNIC_TX_DESC_WAKEUP);
+
+ return !res;
+}
+
+static bool fbnic_tx_sent_queue(struct sk_buff *skb, struct fbnic_ring *ring)
+{
+ struct netdev_queue *dev_queue = txring_txq(skb->dev, ring);
+ unsigned int bytecount = FBNIC_XMIT_CB(skb)->bytecount;
+ bool xmit_more = netdev_xmit_more();
+
+ /* TBD: Request completion more often if xmit_more becomes large */
+
+ return __netdev_tx_sent_queue(dev_queue, bytecount, xmit_more);
+}
+
+static void fbnic_unmap_single_twd(struct device *dev, __le64 *twd)
+{
+ u64 raw_twd = le64_to_cpu(*twd);
+ unsigned int len;
+ dma_addr_t dma;
+
+ dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd);
+ len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd);
+
+ dma_unmap_single(dev, dma, len, DMA_TO_DEVICE);
+}
+
+static void fbnic_unmap_page_twd(struct device *dev, __le64 *twd)
+{
+ u64 raw_twd = le64_to_cpu(*twd);
+ unsigned int len;
+ dma_addr_t dma;
+
+ dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd);
+ len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd);
+
+ dma_unmap_page(dev, dma, len, DMA_TO_DEVICE);
+}
+
+#define FBNIC_TWD_TYPE(_type) \
+ cpu_to_le64(FIELD_PREP(FBNIC_TWD_TYPE_MASK, FBNIC_TWD_TYPE_##_type))
+
+static bool
+fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
+{
+ unsigned int l2len, i3len;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+ return false;
+
+ l2len = skb_mac_header_len(skb);
+ i3len = skb_checksum_start(skb) - skb_network_header(skb);
+
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK,
+ skb->csum_offset / 2));
+
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO);
+
+ *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) |
+ FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2));
+ return false;
+}
+
+static void
+fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq)
+{
+ skb_checksum_none_assert(skb);
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ return;
+
+ if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ u16 csum = FIELD_GET(FBNIC_RCD_META_L2_CSUM_MASK, rcd);
+
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = (__force __wsum)csum;
+ }
+}
+
+static bool
+fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta)
+{
+ struct device *dev = skb->dev->dev.parent;
+ unsigned int tail = ring->tail, first;
+ unsigned int size, data_len;
+ skb_frag_t *frag;
+ dma_addr_t dma;
+ __le64 *twd;
+
+ ring->tx_buf[tail] = skb;
+
+ tail++;
+ tail &= ring->size_mask;
+ first = tail;
+
+ size = skb_headlen(skb);
+ data_len = skb->data_len;
+
+ if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
+ goto dma_error;
+
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+
+ for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ twd = &ring->desc[tail];
+
+ if (dma_mapping_error(dev, dma))
+ goto dma_error;
+
+ *twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) |
+ FIELD_PREP(FBNIC_TWD_LEN_MASK, size) |
+ FIELD_PREP(FBNIC_TWD_TYPE_MASK,
+ FBNIC_TWD_TYPE_AL));
+
+ tail++;
+ tail &= ring->size_mask;
+
+ if (!data_len)
+ break;
+
+ size = skb_frag_size(frag);
+ data_len -= size;
+
+ if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK))
+ goto dma_error;
+
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ }
+
+ *twd |= FBNIC_TWD_TYPE(LAST_AL);
+
+ FBNIC_XMIT_CB(skb)->desc_count = ((twd - meta) + 1) & ring->size_mask;
+
+ ring->tail = tail;
+
+ /* Verify there is room for another packet */
+ fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC);
+
+ if (fbnic_tx_sent_queue(skb, ring)) {
+ *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_COMPLETION);
+
+ /* Force DMA writes to flush before writing to tail */
+ dma_wmb();
+
+ writel(tail, ring->doorbell);
+ }
+
+ return false;
+dma_error:
+ if (net_ratelimit())
+ netdev_err(skb->dev, "TX DMA map failed\n");
+
+ while (tail != first) {
+ tail--;
+ tail &= ring->size_mask;
+ twd = &ring->desc[tail];
+ if (tail == first)
+ fbnic_unmap_single_twd(dev, twd);
+ else
+ fbnic_unmap_page_twd(dev, twd);
+ }
+
+ return true;
+}
+
+#define FBNIC_MIN_FRAME_LEN 60
+
+static netdev_tx_t
+fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring)
+{
+ __le64 *meta = &ring->desc[ring->tail];
+ u16 desc_needed;
+
+ if (skb_put_padto(skb, FBNIC_MIN_FRAME_LEN))
+ goto err_count;
+
+ /* Need: 1 descriptor per page,
+ * + 1 desc for skb_head,
+ * + 2 desc for metadata and timestamp metadata
+ * + 7 desc gap to keep tail from touching head
+ * otherwise try next time
+ */
+ desc_needed = skb_shinfo(skb)->nr_frags + 10;
+ if (fbnic_maybe_stop_tx(skb->dev, ring, desc_needed))
+ return NETDEV_TX_BUSY;
+
+ *meta = cpu_to_le64(FBNIC_TWD_FLAG_DEST_MAC);
+
+ /* Write all members within DWORD to condense this into 2 4B writes */
+ FBNIC_XMIT_CB(skb)->bytecount = skb->len;
+ FBNIC_XMIT_CB(skb)->desc_count = 0;
+
+ if (fbnic_tx_offloads(ring, skb, meta))
+ goto err_free;
+
+ if (fbnic_tx_map(ring, skb, meta))
+ goto err_free;
+
+ return NETDEV_TX_OK;
+
+err_free:
+ dev_kfree_skb_any(skb);
+err_count:
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev)
+{
+ struct fbnic_net *fbn = netdev_priv(dev);
+ unsigned int q_map = skb->queue_mapping;
+
+ return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]);
+}
+
+netdev_features_t
+fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int l2len, l3len;
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+ return features;
+
+ l2len = skb_mac_header_len(skb);
+ l3len = skb_checksum_start(skb) - skb_network_header(skb);
+
+ /* Check header lengths are multiple of 2.
+ * In case of 6in6 we support longer headers (IHLEN + OHLEN)
+ * but keep things simple for now, 512B is plenty.
+ */
+ if ((l2len | l3len | skb->csum_offset) % 2 ||
+ !FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) ||
+ !FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) ||
+ !FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2))
+ return features & ~NETIF_F_CSUM_MASK;
+
+ return features;
+}
+
+static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget,
+ struct fbnic_ring *ring, bool discard,
+ unsigned int hw_head)
+{
+ u64 total_bytes = 0, total_packets = 0;
+ unsigned int head = ring->head;
+ struct netdev_queue *txq;
+ unsigned int clean_desc;
+
+ clean_desc = (hw_head - head) & ring->size_mask;
+
+ while (clean_desc) {
+ struct sk_buff *skb = ring->tx_buf[head];
+ unsigned int desc_cnt;
+
+ desc_cnt = FBNIC_XMIT_CB(skb)->desc_count;
+ if (desc_cnt > clean_desc)
+ break;
+
+ ring->tx_buf[head] = NULL;
+
+ clean_desc -= desc_cnt;
+
+ while (!(ring->desc[head] & FBNIC_TWD_TYPE(AL))) {
+ head++;
+ head &= ring->size_mask;
+ desc_cnt--;
+ }
+
+ fbnic_unmap_single_twd(nv->dev, &ring->desc[head]);
+ head++;
+ head &= ring->size_mask;
+ desc_cnt--;
+
+ while (desc_cnt--) {
+ fbnic_unmap_page_twd(nv->dev, &ring->desc[head]);
+ head++;
+ head &= ring->size_mask;
+ }
+
+ total_bytes += FBNIC_XMIT_CB(skb)->bytecount;
+ total_packets += 1;
+
+ napi_consume_skb(skb, napi_budget);
+ }
+
+ if (!total_bytes)
+ return;
+
+ ring->head = head;
+
+ txq = txring_txq(nv->napi.dev, ring);
+
+ if (unlikely(discard)) {
+ netdev_tx_completed_queue(txq, total_packets, total_bytes);
+ return;
+ }
+
+ netif_txq_completed_wake(txq, total_packets, total_bytes,
+ fbnic_desc_unused(ring),
+ FBNIC_TX_DESC_WAKEUP);
+}
+
+static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
+ struct page *page)
+{
+ struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+
+ page_pool_fragment_page(page, PAGECNT_BIAS_MAX);
+ rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX;
+ rx_buf->page = page;
+}
+
+static struct page *fbnic_page_pool_get(struct fbnic_ring *ring,
+ unsigned int idx)
+{
+ struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+
+ rx_buf->pagecnt_bias--;
+
+ return rx_buf->page;
+}
+
+static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx,
+ struct fbnic_napi_vector *nv, int budget)
+{
+ struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+ struct page *page = rx_buf->page;
+
+ if (!page_pool_unref_page(page, rx_buf->pagecnt_bias))
+ page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget);
+
+ rx_buf->page = NULL;
+}
+
+static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget,
+ struct fbnic_q_triad *qt, s32 head0)
+{
+ if (head0 >= 0)
+ fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0);
+}
+
+static void
+fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt,
+ int napi_budget)
+{
+ struct fbnic_ring *cmpl = &qt->cmpl;
+ __le64 *raw_tcd, done;
+ u32 head = cmpl->head;
+ s32 head0 = -1;
+
+ done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE);
+ raw_tcd = &cmpl->desc[head & cmpl->size_mask];
+
+ /* Walk the completion queue collecting the heads reported by NIC */
+ while ((*raw_tcd & cpu_to_le64(FBNIC_TCD_DONE)) == done) {
+ u64 tcd;
+
+ dma_rmb();
+
+ tcd = le64_to_cpu(*raw_tcd);
+
+ switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) {
+ case FBNIC_TCD_TYPE_0:
+ if (!(tcd & FBNIC_TCD_TWQ1))
+ head0 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK,
+ tcd);
+ /* Currently all err status bits are related to
+ * timestamps and as those have yet to be added
+ * they are skipped for now.
+ */
+ break;
+ default:
+ break;
+ }
+
+ raw_tcd++;
+ head++;
+ if (!(head & cmpl->size_mask)) {
+ done ^= cpu_to_le64(FBNIC_TCD_DONE);
+ raw_tcd = &cmpl->desc[0];
+ }
+ }
+
+ /* Record the current head/tail of the queue */
+ if (cmpl->head != head) {
+ cmpl->head = head;
+ writel(head & cmpl->size_mask, cmpl->doorbell);
+ }
+
+ /* Unmap and free processed buffers */
+ fbnic_clean_twq(nv, napi_budget, qt, head0);
+}
+
+static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget,
+ struct fbnic_ring *ring, unsigned int hw_head)
+{
+ unsigned int head = ring->head;
+
+ if (head == hw_head)
+ return;
+
+ do {
+ fbnic_page_pool_drain(ring, head, nv, napi_budget);
+
+ head++;
+ head &= ring->size_mask;
+ } while (head != hw_head);
+
+ ring->head = head;
+}
+
+static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page)
+{
+ __le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT];
+ dma_addr_t dma = page_pool_get_dma_addr(page);
+ u64 bd, i = FBNIC_BD_FRAG_COUNT;
+
+ bd = (FBNIC_BD_PAGE_ADDR_MASK & dma) |
+ FIELD_PREP(FBNIC_BD_PAGE_ID_MASK, id);
+
+ /* In the case that a page size is larger than 4K we will map a
+ * single page to multiple fragments. The fragments will be
+ * FBNIC_BD_FRAG_COUNT in size and the lower n bits will be use
+ * to indicate the individual fragment IDs.
+ */
+ do {
+ *bdq_desc = cpu_to_le64(bd);
+ bd += FIELD_PREP(FBNIC_BD_DESC_ADDR_MASK, 1) |
+ FIELD_PREP(FBNIC_BD_DESC_ID_MASK, 1);
+ } while (--i);
+}
+
+static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq)
+{
+ unsigned int count = fbnic_desc_unused(bdq);
+ unsigned int i = bdq->tail;
+
+ if (!count)
+ return;
+
+ do {
+ struct page *page;
+
+ page = page_pool_dev_alloc_pages(nv->page_pool);
+ if (!page)
+ break;
+
+ fbnic_page_pool_init(bdq, i, page);
+ fbnic_bd_prep(bdq, i, page);
+
+ i++;
+ i &= bdq->size_mask;
+
+ count--;
+ } while (count);
+
+ if (bdq->tail != i) {
+ bdq->tail = i;
+
+ /* Force DMA writes to flush before writing to tail */
+ dma_wmb();
+
+ writel(i, bdq->doorbell);
+ }
+}
+
+static unsigned int fbnic_hdr_pg_start(unsigned int pg_off)
+{
+ /* The headroom of the first header may be larger than FBNIC_RX_HROOM
+ * due to alignment. So account for that by just making the page
+ * offset 0 if we are starting at the first header.
+ */
+ if (ALIGN(FBNIC_RX_HROOM, 128) > FBNIC_RX_HROOM &&
+ pg_off == ALIGN(FBNIC_RX_HROOM, 128))
+ return 0;
+
+ return pg_off - FBNIC_RX_HROOM;
+}
+
+static unsigned int fbnic_hdr_pg_end(unsigned int pg_off, unsigned int len)
+{
+ /* Determine the end of the buffer by finding the start of the next
+ * and then subtracting the headroom from that frame.
+ */
+ pg_off += len + FBNIC_RX_TROOM + FBNIC_RX_HROOM;
+
+ return ALIGN(pg_off, 128) - FBNIC_RX_HROOM;
+}
+
+static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd,
+ struct fbnic_pkt_buff *pkt,
+ struct fbnic_q_triad *qt)
+{
+ unsigned int hdr_pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ unsigned int hdr_pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
+ struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx);
+ unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
+ unsigned int frame_sz, hdr_pg_start, hdr_pg_end, headroom;
+ unsigned char *hdr_start;
+
+ /* data_hard_start should always be NULL when this is called */
+ WARN_ON_ONCE(pkt->buff.data_hard_start);
+
+ /* Short-cut the end calculation if we know page is fully consumed */
+ hdr_pg_end = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
+ FBNIC_BD_FRAG_SIZE : fbnic_hdr_pg_end(hdr_pg_off, len);
+ hdr_pg_start = fbnic_hdr_pg_start(hdr_pg_off);
+
+ headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD;
+ frame_sz = hdr_pg_end - hdr_pg_start;
+ xdp_init_buff(&pkt->buff, frame_sz, NULL);
+ hdr_pg_start += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
+ FBNIC_BD_FRAG_SIZE;
+
+ /* Sync DMA buffer */
+ dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
+ hdr_pg_start, frame_sz,
+ DMA_BIDIRECTIONAL);
+
+ /* Build frame around buffer */
+ hdr_start = page_address(page) + hdr_pg_start;
+
+ xdp_prepare_buff(&pkt->buff, hdr_start, headroom,
+ len - FBNIC_RX_PAD, true);
+
+ pkt->data_truesize = 0;
+ pkt->data_len = 0;
+ pkt->nr_frags = 0;
+}
+
+static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd,
+ struct fbnic_pkt_buff *pkt,
+ struct fbnic_q_triad *qt)
+{
+ unsigned int pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ unsigned int pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd);
+ unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd);
+ struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx);
+ struct skb_shared_info *shinfo;
+ unsigned int truesize;
+
+ truesize = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ?
+ FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128);
+
+ pg_off += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) *
+ FBNIC_BD_FRAG_SIZE;
+
+ /* Sync DMA buffer */
+ dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page),
+ pg_off, truesize, DMA_BIDIRECTIONAL);
+
+ /* Add page to xdp shared info */
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+
+ /* We use gso_segs to store truesize */
+ pkt->data_truesize += truesize;
+
+ __skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len);
+
+ /* Store data_len in gso_size */
+ pkt->data_len += len;
+}
+
+static void fbnic_put_pkt_buff(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt, int budget)
+{
+ struct skb_shared_info *shinfo;
+ struct page *page;
+ int nr_frags;
+
+ if (!pkt->buff.data_hard_start)
+ return;
+
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+ nr_frags = pkt->nr_frags;
+
+ while (nr_frags--) {
+ page = skb_frag_page(&shinfo->frags[nr_frags]);
+ page_pool_put_full_page(nv->page_pool, page, !!budget);
+ }
+
+ page = virt_to_page(pkt->buff.data_hard_start);
+ page_pool_put_full_page(nv->page_pool, page, !!budget);
+}
+
+static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv,
+ struct fbnic_pkt_buff *pkt)
+{
+ unsigned int nr_frags = pkt->nr_frags;
+ struct skb_shared_info *shinfo;
+ unsigned int truesize;
+ struct sk_buff *skb;
+
+ truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM -
+ pkt->buff.data_hard_start;
+
+ /* Build frame around buffer */
+ skb = napi_build_skb(pkt->buff.data_hard_start, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Push data pointer to start of data, put tail to end of data */
+ skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start);
+ __skb_put(skb, pkt->buff.data_end - pkt->buff.data);
+
+ /* Add tracking for metadata at the start of the frame */
+ skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta);
+
+ /* Add Rx frags */
+ if (nr_frags) {
+ /* Verify that shared info didn't move */
+ shinfo = xdp_get_shared_info_from_buff(&pkt->buff);
+ WARN_ON(skb_shinfo(skb) != shinfo);
+
+ skb->truesize += pkt->data_truesize;
+ skb->data_len += pkt->data_len;
+ shinfo->nr_frags = nr_frags;
+ skb->len += pkt->data_len;
+ }
+
+ skb_mark_for_recycle(skb);
+
+ /* Set MAC header specific fields */
+ skb->protocol = eth_type_trans(skb, nv->napi.dev);
+
+ return skb;
+}
+
+static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd)
+{
+ return (FBNIC_RCD_META_L4_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L4 :
+ (FBNIC_RCD_META_L3_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L3 :
+ PKT_HASH_TYPE_L2;
+}
+
+static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv,
+ u64 rcd, struct sk_buff *skb,
+ struct fbnic_q_triad *qt)
+{
+ struct net_device *netdev = nv->napi.dev;
+ struct fbnic_ring *rcq = &qt->cmpl;
+
+ fbnic_rx_csum(rcd, skb, rcq);
+
+ if (netdev->features & NETIF_F_RXHASH)
+ skb_set_hash(skb,
+ FIELD_GET(FBNIC_RCD_META_RSS_HASH_MASK, rcd),
+ fbnic_skb_hash_type(rcd));
+
+ skb_record_rx_queue(skb, rcq->q_idx);
+}
+
+static bool fbnic_rcd_metadata_err(u64 rcd)
+{
+ return !!(FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK & rcd);
+}
+
+static int fbnic_clean_rcq(struct fbnic_napi_vector *nv,
+ struct fbnic_q_triad *qt, int budget)
+{
+ struct fbnic_ring *rcq = &qt->cmpl;
+ struct fbnic_pkt_buff *pkt;
+ s32 head0 = -1, head1 = -1;
+ __le64 *raw_rcd, done;
+ u32 head = rcq->head;
+ u64 packets = 0;
+
+ done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0;
+ raw_rcd = &rcq->desc[head & rcq->size_mask];
+ pkt = rcq->pkt;
+
+ /* Walk the completion queue collecting the heads reported by NIC */
+ while (likely(packets < budget)) {
+ struct sk_buff *skb = ERR_PTR(-EINVAL);
+ u64 rcd;
+
+ if ((*raw_rcd & cpu_to_le64(FBNIC_RCD_DONE)) == done)
+ break;
+
+ dma_rmb();
+
+ rcd = le64_to_cpu(*raw_rcd);
+
+ switch (FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd)) {
+ case FBNIC_RCD_TYPE_HDR_AL:
+ head0 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ fbnic_pkt_prepare(nv, rcd, pkt, qt);
+
+ break;
+ case FBNIC_RCD_TYPE_PAY_AL:
+ head1 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd);
+ fbnic_add_rx_frag(nv, rcd, pkt, qt);
+
+ break;
+ case FBNIC_RCD_TYPE_OPT_META:
+ /* Only type 0 is currently supported */
+ if (FIELD_GET(FBNIC_RCD_OPT_META_TYPE_MASK, rcd))
+ break;
+
+ /* We currently ignore the action table index */
+ break;
+ case FBNIC_RCD_TYPE_META:
+ if (likely(!fbnic_rcd_metadata_err(rcd)))
+ skb = fbnic_build_skb(nv, pkt);
+
+ /* Populate skb and invalidate XDP */
+ if (!IS_ERR_OR_NULL(skb)) {
+ fbnic_populate_skb_fields(nv, rcd, skb, qt);
+
+ packets++;
+
+ napi_gro_receive(&nv->napi, skb);
+ } else {
+ fbnic_put_pkt_buff(nv, pkt, 1);
+ }
+
+ pkt->buff.data_hard_start = NULL;
+
+ break;
+ }
+
+ raw_rcd++;
+ head++;
+ if (!(head & rcq->size_mask)) {
+ done ^= cpu_to_le64(FBNIC_RCD_DONE);
+ raw_rcd = &rcq->desc[0];
+ }
+ }
+
+ /* Unmap and free processed buffers */
+ if (head0 >= 0)
+ fbnic_clean_bdq(nv, budget, &qt->sub0, head0);
+ fbnic_fill_bdq(nv, &qt->sub0);
+
+ if (head1 >= 0)
+ fbnic_clean_bdq(nv, budget, &qt->sub1, head1);
+ fbnic_fill_bdq(nv, &qt->sub1);
+
+ /* Record the current head/tail of the queue */
+ if (rcq->head != head) {
+ rcq->head = head;
+ writel(head & rcq->size_mask, rcq->doorbell);
+ }
+
+ return packets;
+}
+
+static void fbnic_nv_irq_disable(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 v_idx = nv->v_idx;
+
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(v_idx / 32), 1 << (v_idx % 32));
+}
+
+static void fbnic_nv_irq_rearm(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 v_idx = nv->v_idx;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(v_idx),
+ FBNIC_INTR_CQ_REARM_INTR_UNMASK);
+}
+
+static int fbnic_poll(struct napi_struct *napi, int budget)
+{
+ struct fbnic_napi_vector *nv = container_of(napi,
+ struct fbnic_napi_vector,
+ napi);
+ int i, j, work_done = 0;
+
+ for (i = 0; i < nv->txt_count; i++)
+ fbnic_clean_tcq(nv, &nv->qt[i], budget);
+
+ for (j = 0; j < nv->rxt_count; j++, i++)
+ work_done += fbnic_clean_rcq(nv, &nv->qt[i], budget);
+
+ if (work_done >= budget)
+ return budget;
+
+ if (likely(napi_complete_done(napi, work_done)))
+ fbnic_nv_irq_rearm(nv);
+
+ return 0;
+}
+
+static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct fbnic_napi_vector *nv = data;
+
+ napi_schedule_irqoff(&nv->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void fbnic_remove_tx_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ if (!(txr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ /* Remove pointer to the Tx ring */
+ WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr);
+ fbn->tx[txr->q_idx] = NULL;
+}
+
+static void fbnic_remove_rx_ring(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ if (!(rxr->flags & FBNIC_RING_F_STATS))
+ return;
+
+ /* Remove pointer to the Rx ring */
+ WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr);
+ fbn->rx[rxr->q_idx] = NULL;
+}
+
+static void fbnic_free_napi_vector(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 v_idx = nv->v_idx;
+ int i, j;
+
+ for (i = 0; i < nv->txt_count; i++) {
+ fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl);
+ }
+
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0);
+ fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1);
+ fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl);
+ }
+
+ fbnic_free_irq(fbd, v_idx, nv);
+ page_pool_destroy(nv->page_pool);
+ netif_napi_del(&nv->napi);
+ list_del(&nv->napis);
+ kfree(nv);
+}
+
+void fbnic_free_napi_vectors(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv, *temp;
+
+ list_for_each_entry_safe(nv, temp, &fbn->napis, napis)
+ fbnic_free_napi_vector(fbn, nv);
+}
+
+static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv)
+{
+ unsigned char *dev_name = nv->napi.dev->name;
+
+ if (!nv->rxt_count)
+ snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name,
+ nv->v_idx - FBNIC_NON_NAPI_VECTORS);
+ else
+ snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name,
+ nv->v_idx - FBNIC_NON_NAPI_VECTORS);
+}
+
+#define FBNIC_PAGE_POOL_FLAGS \
+ (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
+
+static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = FBNIC_PAGE_POOL_FLAGS,
+ .pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count,
+ .nid = NUMA_NO_NODE,
+ .dev = nv->dev,
+ .dma_dir = DMA_BIDIRECTIONAL,
+ .offset = 0,
+ .max_len = PAGE_SIZE
+ };
+ struct page_pool *pp;
+
+ /* Page pool cannot exceed a size of 32768. This doesn't limit the
+ * pages on the ring but the number we can have cached waiting on
+ * the next use.
+ *
+ * TBD: Can this be reduced further? Would a multiple of
+ * NAPI_POLL_WEIGHT possibly make more sense? The question is how
+ * may pages do we need to hold in reserve to get the best return
+ * without hogging too much system memory.
+ */
+ if (pp_params.pool_size > 32768)
+ pp_params.pool_size = 32768;
+
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ nv->page_pool = pp;
+
+ return 0;
+}
+
+static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell,
+ int q_idx, u8 flags)
+{
+ ring->doorbell = doorbell;
+ ring->q_idx = q_idx;
+ ring->flags = flags;
+}
+
+static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn,
+ unsigned int v_count, unsigned int v_idx,
+ unsigned int txq_count, unsigned int txq_idx,
+ unsigned int rxq_count, unsigned int rxq_idx)
+{
+ int txt_count = txq_count, rxt_count = rxq_count;
+ u32 __iomem *uc_addr = fbd->uc_addr0;
+ struct fbnic_napi_vector *nv;
+ struct fbnic_q_triad *qt;
+ int qt_count, err;
+ u32 __iomem *db;
+
+ qt_count = txt_count + rxq_count;
+ if (!qt_count)
+ return -EINVAL;
+
+ /* If MMIO has already failed there are no rings to initialize */
+ if (!uc_addr)
+ return -EIO;
+
+ /* Allocate NAPI vector and queue triads */
+ nv = kzalloc(struct_size(nv, qt, qt_count), GFP_KERNEL);
+ if (!nv)
+ return -ENOMEM;
+
+ /* Record queue triad counts */
+ nv->txt_count = txt_count;
+ nv->rxt_count = rxt_count;
+
+ /* Provide pointer back to fbnic and MSI-X vectors */
+ nv->fbd = fbd;
+ nv->v_idx = v_idx;
+
+ /* Record IRQ to NAPI struct */
+ netif_napi_set_irq(&nv->napi,
+ pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx));
+
+ /* Tie napi to netdev */
+ list_add(&nv->napis, &fbn->napis);
+ netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll);
+
+ /* Tie nv back to PCIe dev */
+ nv->dev = fbd->dev;
+
+ /* Allocate page pool */
+ if (rxq_count) {
+ err = fbnic_alloc_nv_page_pool(fbn, nv);
+ if (err)
+ goto napi_del;
+ }
+
+ /* Initialize vector name */
+ fbnic_name_napi_vector(nv);
+
+ /* Request the IRQ for napi vector */
+ err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings,
+ IRQF_SHARED, nv->name, nv);
+ if (err)
+ goto pp_destroy;
+
+ /* Initialize queue triads */
+ qt = nv->qt;
+
+ while (txt_count) {
+ /* Configure Tx queue */
+ db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ0_TAIL];
+
+ /* Assign Tx queue to netdev if applicable */
+ if (txq_count > 0) {
+ u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS;
+
+ fbnic_ring_init(&qt->sub0, db, txq_idx, flags);
+ fbn->tx[txq_idx] = &qt->sub0;
+ txq_count--;
+ } else {
+ fbnic_ring_init(&qt->sub0, db, 0,
+ FBNIC_RING_F_DISABLED);
+ }
+
+ /* Configure Tx completion queue */
+ db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TCQ_HEAD];
+ fbnic_ring_init(&qt->cmpl, db, 0, 0);
+
+ /* Update Tx queue index */
+ txt_count--;
+ txq_idx += v_count;
+
+ /* Move to next queue triad */
+ qt++;
+ }
+
+ while (rxt_count) {
+ /* Configure header queue */
+ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL];
+ fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX);
+
+ /* Configure payload queue */
+ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL];
+ fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX);
+
+ /* Configure Rx completion queue */
+ db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD];
+ fbnic_ring_init(&qt->cmpl, db, rxq_idx, FBNIC_RING_F_STATS);
+ fbn->rx[rxq_idx] = &qt->cmpl;
+
+ /* Update Rx queue index */
+ rxt_count--;
+ rxq_idx += v_count;
+
+ /* Move to next queue triad */
+ qt++;
+ }
+
+ return 0;
+
+pp_destroy:
+ page_pool_destroy(nv->page_pool);
+napi_del:
+ netif_napi_del(&nv->napi);
+ list_del(&nv->napis);
+ kfree(nv);
+ return err;
+}
+
+int fbnic_alloc_napi_vectors(struct fbnic_net *fbn)
+{
+ unsigned int txq_idx = 0, rxq_idx = 0, v_idx = FBNIC_NON_NAPI_VECTORS;
+ unsigned int num_tx = fbn->num_tx_queues;
+ unsigned int num_rx = fbn->num_rx_queues;
+ unsigned int num_napi = fbn->num_napi;
+ struct fbnic_dev *fbd = fbn->fbd;
+ int err;
+
+ /* Allocate 1 Tx queue per napi vector */
+ if (num_napi < FBNIC_MAX_TXQS && num_napi == num_tx + num_rx) {
+ while (num_tx) {
+ err = fbnic_alloc_napi_vector(fbd, fbn,
+ num_napi, v_idx,
+ 1, txq_idx, 0, 0);
+ if (err)
+ goto free_vectors;
+
+ /* Update counts and index */
+ num_tx--;
+ txq_idx++;
+
+ v_idx++;
+ }
+ }
+
+ /* Allocate Tx/Rx queue pairs per vector, or allocate remaining Rx */
+ while (num_rx | num_tx) {
+ int tqpv = DIV_ROUND_UP(num_tx, num_napi - txq_idx);
+ int rqpv = DIV_ROUND_UP(num_rx, num_napi - rxq_idx);
+
+ err = fbnic_alloc_napi_vector(fbd, fbn, num_napi, v_idx,
+ tqpv, txq_idx, rqpv, rxq_idx);
+ if (err)
+ goto free_vectors;
+
+ /* Update counts and index */
+ num_tx -= tqpv;
+ txq_idx++;
+
+ num_rx -= rqpv;
+ rxq_idx++;
+
+ v_idx++;
+ }
+
+ return 0;
+
+free_vectors:
+ fbnic_free_napi_vectors(fbn);
+
+ return -ENOMEM;
+}
+
+static void fbnic_free_ring_resources(struct device *dev,
+ struct fbnic_ring *ring)
+{
+ kvfree(ring->buffer);
+ ring->buffer = NULL;
+
+ /* If size is not set there are no descriptors present */
+ if (!ring->size)
+ return;
+
+ dma_free_coherent(dev, ring->size, ring->desc, ring->dma);
+ ring->size_mask = 0;
+ ring->size = 0;
+}
+
+static int fbnic_alloc_tx_ring_desc(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ size_t size;
+
+ /* Round size up to nearest 4K */
+ size = ALIGN(array_size(sizeof(*txr->desc), fbn->txq_size), 4096);
+
+ txr->desc = dma_alloc_coherent(dev, size, &txr->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!txr->desc)
+ return -ENOMEM;
+
+ /* txq_size should be a power of 2, so mask is just that -1 */
+ txr->size_mask = fbn->txq_size - 1;
+ txr->size = size;
+
+ return 0;
+}
+
+static int fbnic_alloc_tx_ring_buffer(struct fbnic_ring *txr)
+{
+ size_t size = array_size(sizeof(*txr->tx_buf), txr->size_mask + 1);
+
+ txr->tx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+ return txr->tx_buf ? 0 : -ENOMEM;
+}
+
+static int fbnic_alloc_tx_ring_resources(struct fbnic_net *fbn,
+ struct fbnic_ring *txr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ if (txr->flags & FBNIC_RING_F_DISABLED)
+ return 0;
+
+ err = fbnic_alloc_tx_ring_desc(fbn, txr);
+ if (err)
+ return err;
+
+ if (!(txr->flags & FBNIC_RING_F_CTX))
+ return 0;
+
+ err = fbnic_alloc_tx_ring_buffer(txr);
+ if (err)
+ goto free_desc;
+
+ return 0;
+
+free_desc:
+ fbnic_free_ring_resources(dev, txr);
+ return err;
+}
+
+static int fbnic_alloc_rx_ring_desc(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ size_t desc_size = sizeof(*rxr->desc);
+ u32 rxq_size;
+ size_t size;
+
+ switch (rxr->doorbell - fbnic_ring_csr_base(rxr)) {
+ case FBNIC_QUEUE_BDQ_HPQ_TAIL:
+ rxq_size = fbn->hpq_size / FBNIC_BD_FRAG_COUNT;
+ desc_size *= FBNIC_BD_FRAG_COUNT;
+ break;
+ case FBNIC_QUEUE_BDQ_PPQ_TAIL:
+ rxq_size = fbn->ppq_size / FBNIC_BD_FRAG_COUNT;
+ desc_size *= FBNIC_BD_FRAG_COUNT;
+ break;
+ case FBNIC_QUEUE_RCQ_HEAD:
+ rxq_size = fbn->rcq_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Round size up to nearest 4K */
+ size = ALIGN(array_size(desc_size, rxq_size), 4096);
+
+ rxr->desc = dma_alloc_coherent(dev, size, &rxr->dma,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!rxr->desc)
+ return -ENOMEM;
+
+ /* rxq_size should be a power of 2, so mask is just that -1 */
+ rxr->size_mask = rxq_size - 1;
+ rxr->size = size;
+
+ return 0;
+}
+
+static int fbnic_alloc_rx_ring_buffer(struct fbnic_ring *rxr)
+{
+ size_t size = array_size(sizeof(*rxr->rx_buf), rxr->size_mask + 1);
+
+ if (rxr->flags & FBNIC_RING_F_CTX)
+ size = sizeof(*rxr->rx_buf) * (rxr->size_mask + 1);
+ else
+ size = sizeof(*rxr->pkt);
+
+ rxr->rx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN);
+
+ return rxr->rx_buf ? 0 : -ENOMEM;
+}
+
+static int fbnic_alloc_rx_ring_resources(struct fbnic_net *fbn,
+ struct fbnic_ring *rxr)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ err = fbnic_alloc_rx_ring_desc(fbn, rxr);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_rx_ring_buffer(rxr);
+ if (err)
+ goto free_desc;
+
+ return 0;
+
+free_desc:
+ fbnic_free_ring_resources(dev, rxr);
+ return err;
+}
+
+static void fbnic_free_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_q_triad *qt)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+
+ fbnic_free_ring_resources(dev, &qt->cmpl);
+ fbnic_free_ring_resources(dev, &qt->sub1);
+ fbnic_free_ring_resources(dev, &qt->sub0);
+}
+
+static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_q_triad *qt)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub0);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl);
+ if (err)
+ goto free_sub1;
+
+ return 0;
+
+free_sub1:
+ fbnic_free_ring_resources(dev, &qt->sub0);
+ return err;
+}
+
+static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn,
+ struct fbnic_q_triad *qt)
+{
+ struct device *dev = fbn->netdev->dev.parent;
+ int err;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0);
+ if (err)
+ return err;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1);
+ if (err)
+ goto free_sub0;
+
+ err = fbnic_alloc_rx_ring_resources(fbn, &qt->cmpl);
+ if (err)
+ goto free_sub1;
+
+ return 0;
+
+free_sub1:
+ fbnic_free_ring_resources(dev, &qt->sub1);
+free_sub0:
+ fbnic_free_ring_resources(dev, &qt->sub0);
+ return err;
+}
+
+static void fbnic_free_nv_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j;
+
+ /* Free Tx Resources */
+ for (i = 0; i < nv->txt_count; i++)
+ fbnic_free_qt_resources(fbn, &nv->qt[i]);
+
+ for (j = 0; j < nv->rxt_count; j++, i++)
+ fbnic_free_qt_resources(fbn, &nv->qt[i]);
+}
+
+static int fbnic_alloc_nv_resources(struct fbnic_net *fbn,
+ struct fbnic_napi_vector *nv)
+{
+ int i, j, err;
+
+ /* Allocate Tx Resources */
+ for (i = 0; i < nv->txt_count; i++) {
+ err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]);
+ if (err)
+ goto free_resources;
+ }
+
+ /* Allocate Rx Resources */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]);
+ if (err)
+ goto free_resources;
+ }
+
+ return 0;
+
+free_resources:
+ while (i--)
+ fbnic_free_qt_resources(fbn, &nv->qt[i]);
+ return err;
+}
+
+void fbnic_free_resources(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+
+ list_for_each_entry(nv, &fbn->napis, napis)
+ fbnic_free_nv_resources(fbn, nv);
+}
+
+int fbnic_alloc_resources(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+ int err = -ENODEV;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ err = fbnic_alloc_nv_resources(fbn, nv);
+ if (err)
+ goto free_resources;
+ }
+
+ return 0;
+
+free_resources:
+ list_for_each_entry_continue_reverse(nv, &fbn->napis, napis)
+ fbnic_free_nv_resources(fbn, nv);
+
+ return err;
+}
+
+static void fbnic_disable_twq0(struct fbnic_ring *txr)
+{
+ u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL);
+
+ twq_ctl &= ~FBNIC_QUEUE_TWQ_CTL_ENABLE;
+
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ0_CTL, twq_ctl);
+}
+
+static void fbnic_disable_tcq(struct fbnic_ring *txr)
+{
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TCQ_CTL, 0);
+ fbnic_ring_wr32(txr, FBNIC_QUEUE_TIM_MASK, FBNIC_QUEUE_TIM_MASK_MASK);
+}
+
+static void fbnic_disable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
+{
+ u32 bdq_ctl = fbnic_ring_rd32(hpq, FBNIC_QUEUE_BDQ_CTL);
+
+ bdq_ctl &= ~FBNIC_QUEUE_BDQ_CTL_ENABLE;
+
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl);
+}
+
+static void fbnic_disable_rcq(struct fbnic_ring *rxr)
+{
+ fbnic_ring_wr32(rxr, FBNIC_QUEUE_RCQ_CTL, 0);
+ fbnic_ring_wr32(rxr, FBNIC_QUEUE_RIM_MASK, FBNIC_QUEUE_RIM_MASK_MASK);
+}
+
+void fbnic_napi_disable(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ napi_disable(&nv->napi);
+
+ fbnic_nv_irq_disable(nv);
+ }
+}
+
+void fbnic_disable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_napi_vector *nv;
+ int i, j;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ /* Disable Tx queue triads */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_disable_twq0(&qt->sub0);
+ fbnic_disable_tcq(&qt->cmpl);
+ }
+
+ /* Disable Rx queue triads */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_disable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_disable_rcq(&qt->cmpl);
+ }
+ }
+
+ fbnic_wrfl(fbd);
+}
+
+static void fbnic_tx_flush(struct fbnic_dev *fbd)
+{
+ netdev_warn(fbd->netdev, "triggering Tx flush\n");
+
+ fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN,
+ FBNIC_TMI_DROP_CTRL_EN);
+}
+
+static void fbnic_tx_flush_off(struct fbnic_dev *fbd)
+{
+ fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN, 0);
+}
+
+struct fbnic_idle_regs {
+ u32 reg_base;
+ u8 reg_cnt;
+};
+
+static bool fbnic_all_idle(struct fbnic_dev *fbd,
+ const struct fbnic_idle_regs *regs,
+ unsigned int nregs)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < nregs; i++) {
+ for (j = 0; j < regs[i].reg_cnt; j++) {
+ if (fbnic_rd32(fbd, regs[i].reg_base + j) != ~0U)
+ return false;
+ }
+ }
+ return true;
+}
+
+static void fbnic_idle_dump(struct fbnic_dev *fbd,
+ const struct fbnic_idle_regs *regs,
+ unsigned int nregs, const char *dir, int err)
+{
+ unsigned int i, j;
+
+ netdev_err(fbd->netdev, "error waiting for %s idle %d\n", dir, err);
+ for (i = 0; i < nregs; i++)
+ for (j = 0; j < regs[i].reg_cnt; j++)
+ netdev_err(fbd->netdev, "0x%04x: %08x\n",
+ regs[i].reg_base + j,
+ fbnic_rd32(fbd, regs[i].reg_base + j));
+}
+
+int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail)
+{
+ static const struct fbnic_idle_regs tx[] = {
+ { FBNIC_QM_TWQ_IDLE(0), FBNIC_QM_TWQ_IDLE_CNT, },
+ { FBNIC_QM_TQS_IDLE(0), FBNIC_QM_TQS_IDLE_CNT, },
+ { FBNIC_QM_TDE_IDLE(0), FBNIC_QM_TDE_IDLE_CNT, },
+ { FBNIC_QM_TCQ_IDLE(0), FBNIC_QM_TCQ_IDLE_CNT, },
+ }, rx[] = {
+ { FBNIC_QM_HPQ_IDLE(0), FBNIC_QM_HPQ_IDLE_CNT, },
+ { FBNIC_QM_PPQ_IDLE(0), FBNIC_QM_PPQ_IDLE_CNT, },
+ { FBNIC_QM_RCQ_IDLE(0), FBNIC_QM_RCQ_IDLE_CNT, },
+ };
+ bool idle;
+ int err;
+
+ err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000,
+ false, fbd, tx, ARRAY_SIZE(tx));
+ if (err == -ETIMEDOUT) {
+ fbnic_tx_flush(fbd);
+ err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle,
+ 2, 500000, false,
+ fbd, tx, ARRAY_SIZE(tx));
+ fbnic_tx_flush_off(fbd);
+ }
+ if (err) {
+ fbnic_idle_dump(fbd, tx, ARRAY_SIZE(tx), "Tx", err);
+ if (may_fail)
+ return err;
+ }
+
+ err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000,
+ false, fbd, rx, ARRAY_SIZE(rx));
+ if (err)
+ fbnic_idle_dump(fbd, rx, ARRAY_SIZE(rx), "Rx", err);
+ return err;
+}
+
+void fbnic_flush(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ int i, j;
+
+ /* Flush any processed Tx Queue Triads and drop the rest */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+ struct netdev_queue *tx_queue;
+
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail);
+
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
+
+ /* Nothing else to do if Tx queue is disabled */
+ if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
+ continue;
+
+ /* Reset BQL associated with Tx queue */
+ tx_queue = netdev_get_tx_queue(nv->napi.dev,
+ qt->sub0.q_idx);
+ netdev_tx_reset_queue(tx_queue);
+
+ /* Disassociate Tx queue from NAPI */
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, NULL);
+ }
+
+ /* Flush any processed Rx Queue Triads and drop the rest */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ /* Clean the work queues of unprocessed work */
+ fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail);
+ fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail);
+
+ /* Reset completion queue descriptor ring */
+ memset(qt->cmpl.desc, 0, qt->cmpl.size);
+
+ fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0);
+ qt->cmpl.pkt->buff.data_hard_start = NULL;
+
+ /* Disassociate Rx queue from NAPI */
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, NULL);
+ }
+ }
+}
+
+void fbnic_fill(struct fbnic_net *fbn)
+{
+ struct fbnic_napi_vector *nv;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ int i, j;
+
+ /* Configure NAPI mapping for Tx */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ /* Nothing to do if Tx queue is disabled */
+ if (qt->sub0.flags & FBNIC_RING_F_DISABLED)
+ continue;
+
+ /* Associate Tx queue with NAPI */
+ netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx,
+ NETDEV_QUEUE_TYPE_TX, &nv->napi);
+ }
+
+ /* Configure NAPI mapping and populate pages
+ * in the BDQ rings to use for Rx
+ */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ /* Associate Rx queue with NAPI */
+ netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx,
+ NETDEV_QUEUE_TYPE_RX, &nv->napi);
+
+ /* Populate the header and payload BDQs */
+ fbnic_fill_bdq(nv, &qt->sub0);
+ fbnic_fill_bdq(nv, &qt->sub1);
+ }
+ }
+}
+
+static void fbnic_enable_twq0(struct fbnic_ring *twq)
+{
+ u32 log_size = fls(twq->size_mask);
+
+ if (!twq->size_mask)
+ return;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_RESET);
+ twq->tail = 0;
+ twq->head = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAL, lower_32_bits(twq->dma));
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAH, upper_32_bits(twq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_SIZE, log_size & 0xf);
+
+ fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE);
+}
+
+static void fbnic_enable_tcq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *tcq)
+{
+ u32 log_size = fls(tcq->size_mask);
+
+ if (!tcq->size_mask)
+ return;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_RESET);
+ tcq->tail = 0;
+ tcq->head = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAL, lower_32_bits(tcq->dma));
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAH, upper_32_bits(tcq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_SIZE, log_size & 0xf);
+
+ /* Store interrupt information for the completion queue */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_CTL, nv->v_idx);
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_THRESHOLD, tcq->size_mask / 2);
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_MASK, 0);
+
+ /* Enable queue */
+ fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_ENABLE);
+}
+
+static void fbnic_enable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq)
+{
+ u32 bdq_ctl = FBNIC_QUEUE_BDQ_CTL_ENABLE;
+ u32 log_size;
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, FBNIC_QUEUE_BDQ_CTL_RESET);
+ ppq->tail = 0;
+ ppq->head = 0;
+ hpq->tail = 0;
+ hpq->head = 0;
+
+ log_size = fls(hpq->size_mask);
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAL, lower_32_bits(hpq->dma));
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAH, upper_32_bits(hpq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_SIZE, log_size & 0xf);
+
+ if (!ppq->size_mask)
+ goto write_ctl;
+
+ log_size = fls(ppq->size_mask);
+
+ /* Add enabling of PPQ to BDQ control */
+ bdq_ctl |= FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAL, lower_32_bits(ppq->dma));
+ fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAH, upper_32_bits(ppq->dma));
+ fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_SIZE, log_size & 0xf);
+
+write_ctl:
+ fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl);
+}
+
+static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *rcq)
+{
+ u32 drop_mode, rcq_ctl;
+
+ drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE;
+
+ /* Specify packet layout */
+ rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK, FBNIC_RX_HROOM) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK, FBNIC_RX_TROOM);
+
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl);
+}
+
+static void fbnic_enable_rcq(struct fbnic_napi_vector *nv,
+ struct fbnic_ring *rcq)
+{
+ u32 log_size = fls(rcq->size_mask);
+ u32 rcq_ctl;
+
+ fbnic_config_drop_mode_rcq(nv, rcq);
+
+ rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK,
+ FBNIC_RX_MAX_HDR) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK,
+ FBNIC_RX_PAYLD_OFFSET) |
+ FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK,
+ FBNIC_RX_PAYLD_PG_CL);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL1, rcq_ctl);
+
+ /* Reset head/tail */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_RESET);
+ rcq->head = 0;
+ rcq->tail = 0;
+
+ /* Store descriptor ring address and size */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAL, lower_32_bits(rcq->dma));
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAH, upper_32_bits(rcq->dma));
+
+ /* Write lower 4 bits of log size as 64K ring size is 0 */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf);
+
+ /* Store interrupt information for the completion queue */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2);
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0);
+
+ /* Enable queue */
+ fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_ENABLE);
+}
+
+void fbnic_enable(struct fbnic_net *fbn)
+{
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_napi_vector *nv;
+ int i, j;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ /* Setup Tx Queue Triads */
+ for (i = 0; i < nv->txt_count; i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_enable_twq0(&qt->sub0);
+ fbnic_enable_tcq(nv, &qt->cmpl);
+ }
+
+ /* Setup Rx Queue Triads */
+ for (j = 0; j < nv->rxt_count; j++, i++) {
+ struct fbnic_q_triad *qt = &nv->qt[i];
+
+ fbnic_enable_bdq(&qt->sub0, &qt->sub1);
+ fbnic_config_drop_mode_rcq(nv, &qt->cmpl);
+ fbnic_enable_rcq(nv, &qt->cmpl);
+ }
+ }
+
+ fbnic_wrfl(fbd);
+}
+
+static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv)
+{
+ struct fbnic_dev *fbd = nv->fbd;
+ u32 val;
+
+ val = FBNIC_INTR_CQ_REARM_INTR_UNMASK;
+
+ fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val);
+}
+
+void fbnic_napi_enable(struct fbnic_net *fbn)
+{
+ u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_napi_vector *nv;
+ int i;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ napi_enable(&nv->napi);
+
+ fbnic_nv_irq_enable(nv);
+
+ /* Record bit used for NAPI IRQs so we can
+ * set the mask appropriately
+ */
+ irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
+ }
+
+ /* Force the first interrupt on the device to guarantee
+ * that any packets that may have been enqueued during the
+ * bringup are processed.
+ */
+ for (i = 0; i < ARRAY_SIZE(irqs); i++) {
+ if (!irqs[i])
+ continue;
+ fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]);
+ }
+
+ fbnic_wrfl(fbd);
+}
+
+void fbnic_napi_depletion_check(struct net_device *netdev)
+{
+ struct fbnic_net *fbn = netdev_priv(netdev);
+ u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {};
+ struct fbnic_dev *fbd = fbn->fbd;
+ struct fbnic_napi_vector *nv;
+ int i, j;
+
+ list_for_each_entry(nv, &fbn->napis, napis) {
+ /* Find RQs which are completely out of pages */
+ for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) {
+ /* Assume 4 pages is always enough to fit a packet
+ * and therefore generate a completion and an IRQ.
+ */
+ if (fbnic_desc_used(&nv->qt[i].sub0) < 4 ||
+ fbnic_desc_used(&nv->qt[i].sub1) < 4)
+ irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(irqs); i++) {
+ if (!irqs[i])
+ continue;
+ fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(i), irqs[i]);
+ fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]);
+ }
+
+ fbnic_wrfl(fbd);
+}
diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
new file mode 100644
index 000000000000..4a206c0e7192
--- /dev/null
+++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) Meta Platforms, Inc. and affiliates. */
+
+#ifndef _FBNIC_TXRX_H_
+#define _FBNIC_TXRX_H_
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/xdp.h>
+
+struct fbnic_net;
+
+/* Guarantee we have space needed for storing the buffer
+ * To store the buffer we need:
+ * 1 descriptor per page
+ * + 1 descriptor for skb head
+ * + 2 descriptors for metadata and optional metadata
+ * + 7 descriptors to keep tail out of the same cacheline as head
+ * If we cannot guarantee that then we should return TX_BUSY
+ */
+#define FBNIC_MAX_SKB_DESC (MAX_SKB_FRAGS + 10)
+#define FBNIC_TX_DESC_WAKEUP (FBNIC_MAX_SKB_DESC * 2)
+#define FBNIC_TX_DESC_MIN roundup_pow_of_two(FBNIC_TX_DESC_WAKEUP)
+
+#define FBNIC_MAX_TXQS 128u
+#define FBNIC_MAX_RXQS 128u
+
+#define FBNIC_TXQ_SIZE_DEFAULT 1024
+#define FBNIC_HPQ_SIZE_DEFAULT 256
+#define FBNIC_PPQ_SIZE_DEFAULT 256
+#define FBNIC_RCQ_SIZE_DEFAULT 1024
+
+#define FBNIC_RX_TROOM \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define FBNIC_RX_HROOM \
+ (ALIGN(FBNIC_RX_TROOM + NET_SKB_PAD, 128) - FBNIC_RX_TROOM)
+#define FBNIC_RX_PAD 0
+#define FBNIC_RX_MAX_HDR (1536 - FBNIC_RX_PAD)
+#define FBNIC_RX_PAYLD_OFFSET 0
+#define FBNIC_RX_PAYLD_PG_CL 0
+
+#define FBNIC_RING_F_DISABLED BIT(0)
+#define FBNIC_RING_F_CTX BIT(1)
+#define FBNIC_RING_F_STATS BIT(2) /* Ring's stats may be used */
+
+struct fbnic_pkt_buff {
+ struct xdp_buff buff;
+ u32 data_truesize;
+ u16 data_len;
+ u16 nr_frags;
+};
+
+/* Pagecnt bias is long max to reserve the last bit to catch overflow
+ * cases where if we overcharge the bias it will flip over to be negative.
+ */
+#define PAGECNT_BIAS_MAX LONG_MAX
+struct fbnic_rx_buf {
+ struct page *page;
+ long pagecnt_bias;
+};
+
+struct fbnic_ring {
+ /* Pointer to buffer specific info */
+ union {
+ struct fbnic_pkt_buff *pkt; /* RCQ */
+ struct fbnic_rx_buf *rx_buf; /* BDQ */
+ void **tx_buf; /* TWQ */
+ void *buffer; /* Generic pointer */
+ };
+
+ u32 __iomem *doorbell; /* Pointer to CSR space for ring */
+ __le64 *desc; /* Descriptor ring memory */
+ u16 size_mask; /* Size of ring in descriptors - 1 */
+ u8 q_idx; /* Logical netdev ring index */
+ u8 flags; /* Ring flags (FBNIC_RING_F_*) */
+
+ u32 head, tail; /* Head/Tail of ring */
+
+ /* Slow path fields follow */
+ dma_addr_t dma; /* Phys addr of descriptor memory */
+ size_t size; /* Size of descriptor ring in memory */
+};
+
+struct fbnic_q_triad {
+ struct fbnic_ring sub0, sub1, cmpl;
+};
+
+struct fbnic_napi_vector {
+ struct napi_struct napi;
+ struct device *dev; /* Device for DMA unmapping */
+ struct page_pool *page_pool;
+ struct fbnic_dev *fbd;
+ char name[IFNAMSIZ + 9];
+
+ u16 v_idx;
+ u8 txt_count;
+ u8 rxt_count;
+
+ struct list_head napis;
+
+ struct fbnic_q_triad qt[];
+};
+
+#define FBNIC_MAX_TXQS 128u
+#define FBNIC_MAX_RXQS 128u
+
+netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev);
+netdev_features_t
+fbnic_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features);
+
+int fbnic_alloc_napi_vectors(struct fbnic_net *fbn);
+void fbnic_free_napi_vectors(struct fbnic_net *fbn);
+int fbnic_alloc_resources(struct fbnic_net *fbn);
+void fbnic_free_resources(struct fbnic_net *fbn);
+void fbnic_napi_enable(struct fbnic_net *fbn);
+void fbnic_napi_disable(struct fbnic_net *fbn);
+void fbnic_enable(struct fbnic_net *fbn);
+void fbnic_disable(struct fbnic_net *fbn);
+void fbnic_flush(struct fbnic_net *fbn);
+void fbnic_fill(struct fbnic_net *fbn);
+
+void fbnic_napi_depletion_check(struct net_device *netdev);
+int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail);
+
+#endif /* _FBNIC_TXRX_H_ */
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index d4cdf3d4f552..7fa1820db9cc 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -234,12 +234,13 @@ static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
/**
* ks8851_rx_pkts - receive packets from the host
* @ks: The device information.
+ * @rxq: Queue of packets received in this function.
*
* This is called from the IRQ work queue when the system detects that there
* are packets in the receive queue. Find out how many packets there are and
* read them from the FIFO.
*/
-static void ks8851_rx_pkts(struct ks8851_net *ks)
+static void ks8851_rx_pkts(struct ks8851_net *ks, struct sk_buff_head *rxq)
{
struct sk_buff *skb;
unsigned rxfc;
@@ -299,7 +300,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
ks8851_dbg_dumpkkt(ks, rxpkt);
skb->protocol = eth_type_trans(skb, ks->netdev);
- __netif_rx(skb);
+ __skb_queue_tail(rxq, skb);
ks->netdev->stats.rx_packets++;
ks->netdev->stats.rx_bytes += rxlen;
@@ -326,65 +327,50 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
static irqreturn_t ks8851_irq(int irq, void *_ks)
{
struct ks8851_net *ks = _ks;
- unsigned handled = 0;
+ struct sk_buff_head rxq;
unsigned long flags;
unsigned int status;
-
- local_bh_disable();
+ struct sk_buff *skb;
ks8851_lock(ks, &flags);
status = ks8851_rdreg16(ks, KS_ISR);
+ ks8851_wrreg16(ks, KS_ISR, status);
netif_dbg(ks, intr, ks->netdev,
"%s: status 0x%04x\n", __func__, status);
- if (status & IRQ_LCI)
- handled |= IRQ_LCI;
-
if (status & IRQ_LDI) {
u16 pmecr = ks8851_rdreg16(ks, KS_PMECR);
pmecr &= ~PMECR_WKEVT_MASK;
ks8851_wrreg16(ks, KS_PMECR, pmecr | PMECR_WKEVT_LINK);
-
- handled |= IRQ_LDI;
}
- if (status & IRQ_RXPSI)
- handled |= IRQ_RXPSI;
-
if (status & IRQ_TXI) {
unsigned short tx_space = ks8851_rdreg16(ks, KS_TXMIR);
netif_dbg(ks, intr, ks->netdev,
"%s: txspace %d\n", __func__, tx_space);
- spin_lock(&ks->statelock);
+ spin_lock_bh(&ks->statelock);
ks->tx_space = tx_space;
if (netif_queue_stopped(ks->netdev))
netif_wake_queue(ks->netdev);
- spin_unlock(&ks->statelock);
-
- handled |= IRQ_TXI;
+ spin_unlock_bh(&ks->statelock);
}
- if (status & IRQ_RXI)
- handled |= IRQ_RXI;
-
if (status & IRQ_SPIBEI) {
netdev_err(ks->netdev, "%s: spi bus error\n", __func__);
- handled |= IRQ_SPIBEI;
}
- ks8851_wrreg16(ks, KS_ISR, handled);
-
if (status & IRQ_RXI) {
/* the datasheet says to disable the rx interrupt during
* packet read-out, however we're masking the interrupt
* from the device so do not bother masking just the RX
* from the device. */
- ks8851_rx_pkts(ks);
+ __skb_queue_head_init(&rxq);
+ ks8851_rx_pkts(ks, &rxq);
}
/* if something stopped the rx process, probably due to wanting
@@ -408,7 +394,9 @@ static irqreturn_t ks8851_irq(int irq, void *_ks)
if (status & IRQ_LCI)
mii_check_link(&ks->mii);
- local_bh_enable();
+ if (status & IRQ_RXI)
+ while ((skb = __skb_dequeue(&rxq)))
+ netif_rx(skb);
return IRQ_HANDLED;
}
@@ -494,6 +482,7 @@ static int ks8851_net_open(struct net_device *dev)
ks8851_wrreg16(ks, KS_IER, ks->rc_ier);
ks->queued_len = 0;
+ ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
netif_start_queue(ks->netdev);
netif_dbg(ks, ifup, ks->netdev, "network device up\n");
@@ -647,14 +636,14 @@ static void ks8851_set_rx_mode(struct net_device *dev)
/* schedule work to do the actual set of the data if needed */
- spin_lock(&ks->statelock);
+ spin_lock_bh(&ks->statelock);
if (memcmp(&rxctrl, &ks->rxctrl, sizeof(rxctrl)) != 0) {
memcpy(&ks->rxctrl, &rxctrl, sizeof(ks->rxctrl));
schedule_work(&ks->rxctrl_work);
}
- spin_unlock(&ks->statelock);
+ spin_unlock_bh(&ks->statelock);
}
static int ks8851_set_mac_address(struct net_device *dev, void *addr)
@@ -1113,7 +1102,6 @@ int ks8851_probe_common(struct net_device *netdev, struct device *dev,
int ret;
ks->netdev = netdev;
- ks->tx_space = 6144;
ks->gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
ret = PTR_ERR_OR_ZERO(ks->gpio);
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 670c1de966db..3062cc0f9199 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -340,10 +340,10 @@ static void ks8851_tx_work(struct work_struct *work)
tx_space = ks8851_rdreg16_spi(ks, KS_TXMIR);
- spin_lock(&ks->statelock);
+ spin_lock_bh(&ks->statelock);
ks->queued_len -= dequeued_len;
ks->tx_space = tx_space;
- spin_unlock(&ks->statelock);
+ spin_unlock_bh(&ks->statelock);
ks8851_unlock_spi(ks, &flags);
}
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index c5aeeb964c17..dc1d9f774565 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -5427,7 +5427,7 @@ static int netdev_change_mtu(struct net_device *dev, int new_mtu)
}
hw_mtu = (hw_mtu + 3) & ~3;
hw_priv->mtu = hw_mtu;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c
index 443128adbcb6..26b00e66d912 100644
--- a/drivers/net/ethernet/microchip/encx24j600-regmap.c
+++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c
@@ -75,7 +75,7 @@ static int regmap_encx24j600_sfr_read(void *context, u8 reg, u8 *val,
if (unlikely(ret))
return ret;
} else {
- /* Translate registers that are more effecient using
+ /* Translate registers that are more efficient using
* 3-byte SPI commands
*/
switch (reg) {
@@ -129,7 +129,7 @@ static int regmap_encx24j600_sfr_update(struct encx24j600_context *ctx,
if (unlikely(ret))
return ret;
} else {
- /* Translate registers that are more effecient using
+ /* Translate registers that are more efficient using
* 3-byte SPI commands
*/
switch (reg) {
@@ -474,13 +474,13 @@ static struct regmap_config regcfg = {
.unlock = regmap_unlock_mutex,
};
-static struct regmap_bus regmap_encx24j600 = {
+static const struct regmap_bus regmap_encx24j600 = {
.write = regmap_encx24j600_write,
.read = regmap_encx24j600_read,
.reg_update_bits = regmap_encx24j600_reg_update_bits,
};
-static struct regmap_config phycfg = {
+static const struct regmap_config phycfg = {
.name = "phy",
.reg_bits = 8,
.val_bits = 16,
@@ -492,7 +492,7 @@ static struct regmap_config phycfg = {
.volatile_reg = encx24j600_phymap_volatile,
};
-static struct regmap_bus phymap_encx24j600 = {
+static const struct regmap_bus phymap_encx24j600 = {
.reg_write = regmap_encx24j600_phy_reg_write,
.reg_read = regmap_encx24j600_phy_reg_read,
};
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index d7c8aa77ec75..b011bf5c2305 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -569,7 +569,7 @@ static void encx24j600_dump_config(struct encx24j600_priv *priv,
pr_info(DRV_NAME " MABBIPG: %04X\n", encx24j600_read_reg(priv,
MABBIPG));
- /* PHY configuation */
+ /* PHY configuration */
pr_info(DRV_NAME " PHCON1: %04X\n", encx24j600_read_phy(priv, PHCON1));
pr_info(DRV_NAME " PHCON2: %04X\n", encx24j600_read_phy(priv, PHCON2));
pr_info(DRV_NAME " PHANA: %04X\n", encx24j600_read_phy(priv, PHANA));
@@ -837,7 +837,9 @@ static void encx24j600_hw_tx(struct encx24j600_priv *priv)
dump_packet("TX", priv->tx_skb->len, priv->tx_skb->data);
if (encx24j600_read_reg(priv, EIR) & TXABTIF)
- /* Last transmition aborted due to error. Reset TX interface */
+ /* Last transmission aborted due to error.
+ * Reset TX interface
+ */
encx24j600_reset_hw_tx(priv);
/* Clear the TXIF flag if were previously set */
@@ -1112,7 +1114,6 @@ MODULE_DEVICE_TABLE(spi, encx24j600_spi_id_table);
static struct spi_driver encx24j600_spi_net_driver = {
.driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.bus = &spi_bus_type,
},
.probe = encx24j600_spi_probe,
diff --git a/drivers/net/ethernet/microchip/encx24j600_hw.h b/drivers/net/ethernet/microchip/encx24j600_hw.h
index 34c5a289898c..2522f4f48b67 100644
--- a/drivers/net/ethernet/microchip/encx24j600_hw.h
+++ b/drivers/net/ethernet/microchip/encx24j600_hw.h
@@ -243,7 +243,7 @@ int devm_regmap_init_encx24j600(struct device *dev,
/* MAIPG */
/* value of the high byte is given by the reserved bits,
- * value of the low byte is recomended setting of the
+ * value of the low byte is recommended setting of the
* IPG parameter.
*/
#define MAIPGH_VAL 0x0C
diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c
index 8a6ae171e375..3a63ec091413 100644
--- a/drivers/net/ethernet/microchip/lan743x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c
@@ -1029,7 +1029,7 @@ static int lan743x_ethtool_set_rxfh(struct net_device *netdev,
}
static int lan743x_ethtool_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
@@ -1076,15 +1076,10 @@ static int lan743x_ethtool_get_eee(struct net_device *netdev,
buf = lan743x_csr_read(adapter, MAC_CR);
if (buf & MAC_CR_EEE_EN_) {
- eee->eee_enabled = true;
- eee->tx_lpi_enabled = true;
/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
buf = lan743x_csr_read(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT);
eee->tx_lpi_timer = buf;
} else {
- eee->eee_enabled = false;
- eee->eee_active = false;
- eee->tx_lpi_enabled = false;
eee->tx_lpi_timer = 0;
}
@@ -1097,7 +1092,6 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev,
struct lan743x_adapter *adapter;
struct phy_device *phydev;
u32 buf = 0;
- int ret = 0;
if (!netdev)
return -EINVAL;
@@ -1114,23 +1108,8 @@ static int lan743x_ethtool_set_eee(struct net_device *netdev,
}
if (eee->eee_enabled) {
- ret = phy_init_eee(phydev, false);
- if (ret) {
- netif_err(adapter, drv, adapter->netdev,
- "EEE initialization failed\n");
- return ret;
- }
-
buf = (u32)eee->tx_lpi_timer;
lan743x_csr_write(adapter, MAC_EEE_TX_LPI_REQ_DLY_CNT, buf);
-
- buf = lan743x_csr_read(adapter, MAC_CR);
- buf |= MAC_CR_EEE_EN_;
- lan743x_csr_write(adapter, MAC_CR, buf);
- } else {
- buf = lan743x_csr_read(adapter, MAC_CR);
- buf &= ~MAC_CR_EEE_EN_;
- lan743x_csr_write(adapter, MAC_CR, buf);
}
return phy_ethtool_set_eee(phydev, eee);
@@ -1148,8 +1127,12 @@ static void lan743x_ethtool_get_wol(struct net_device *netdev,
if (netdev->phydev)
phy_ethtool_get_wol(netdev->phydev, wol);
- wol->supported |= WAKE_BCAST | WAKE_UCAST | WAKE_MCAST |
- WAKE_MAGIC | WAKE_PHY | WAKE_ARP;
+ if (wol->supported != adapter->phy_wol_supported)
+ netif_warn(adapter, drv, adapter->netdev,
+ "PHY changed its supported WOL! old=%x, new=%x\n",
+ adapter->phy_wol_supported, wol->supported);
+
+ wol->supported |= MAC_SUPPORTED_WAKES;
if (adapter->is_pci11x1x)
wol->supported |= WAKE_MAGICSECURE;
@@ -1164,7 +1147,39 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
{
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ /* WAKE_MAGICSEGURE is a modifier of and only valid together with
+ * WAKE_MAGIC
+ */
+ if ((wol->wolopts & WAKE_MAGICSECURE) && !(wol->wolopts & WAKE_MAGIC))
+ return -EINVAL;
+
+ if (netdev->phydev) {
+ struct ethtool_wolinfo phy_wol;
+ int ret;
+
+ phy_wol.wolopts = wol->wolopts & adapter->phy_wol_supported;
+
+ /* If WAKE_MAGICSECURE was requested, filter out WAKE_MAGIC
+ * for PHYs that do not support WAKE_MAGICSECURE
+ */
+ if (wol->wolopts & WAKE_MAGICSECURE &&
+ !(adapter->phy_wol_supported & WAKE_MAGICSECURE))
+ phy_wol.wolopts &= ~WAKE_MAGIC;
+
+ ret = phy_ethtool_set_wol(netdev->phydev, &phy_wol);
+ if (ret && (ret != -EOPNOTSUPP))
+ return ret;
+
+ if (ret == -EOPNOTSUPP)
+ adapter->phy_wolopts = 0;
+ else
+ adapter->phy_wolopts = phy_wol.wolopts;
+ } else {
+ adapter->phy_wolopts = 0;
+ }
+
adapter->wolopts = 0;
+ wol->wolopts &= ~adapter->phy_wolopts;
if (wol->wolopts & WAKE_UCAST)
adapter->wolopts |= WAKE_UCAST;
if (wol->wolopts & WAKE_MCAST)
@@ -1185,10 +1200,10 @@ static int lan743x_ethtool_set_wol(struct net_device *netdev,
memset(adapter->sopass, 0, sizeof(u8) * SOPASS_MAX);
}
+ wol->wolopts = adapter->wolopts | adapter->phy_wolopts;
device_set_wakeup_enable(&adapter->pdev->dev, (bool)wol->wolopts);
- return netdev->phydev ? phy_ethtool_set_wol(netdev->phydev, wol)
- : -ENETDOWN;
+ return 0;
}
#endif /* CONFIG_PM */
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 75a988c0bd79..e418539565b1 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -803,7 +803,7 @@ static int lan743x_mdiobus_read_c22(struct mii_bus *bus, int phy_id, int index)
u32 val, mii_access;
int ret;
- /* comfirm MII not busy */
+ /* confirm MII not busy */
ret = lan743x_mac_mii_wait_till_not_busy(adapter);
if (ret < 0)
return ret;
@@ -868,7 +868,7 @@ static int lan743x_mdiobus_read_c45(struct mii_bus *bus, int phy_id,
u32 mmd_access;
int ret;
- /* comfirm MII not busy */
+ /* confirm MII not busy */
ret = lan743x_mac_mii_wait_till_not_busy(adapter);
if (ret < 0)
return ret;
@@ -1462,6 +1462,13 @@ static void lan743x_phy_link_status_change(struct net_device *netdev)
phydev->interface == PHY_INTERFACE_MODE_1000BASEX ||
phydev->interface == PHY_INTERFACE_MODE_2500BASEX)
lan743x_sgmii_config(adapter);
+
+ data = lan743x_csr_read(adapter, MAC_CR);
+ if (phydev->enable_tx_lpi)
+ data |= MAC_CR_EEE_EN_;
+ else
+ data &= ~MAC_CR_EEE_EN_;
+ lan743x_csr_write(adapter, MAC_CR, data);
}
}
@@ -3111,6 +3118,17 @@ static int lan743x_netdev_open(struct net_device *netdev)
if (ret)
goto close_tx;
}
+
+#ifdef CONFIG_PM
+ if (adapter->netdev->phydev) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phy_ethtool_get_wol(netdev->phydev, &wol);
+ adapter->phy_wol_supported = wol.supported;
+ adapter->phy_wolopts = wol.wolopts;
+ }
+#endif
+
return 0;
close_tx:
@@ -3177,7 +3195,7 @@ static int lan743x_netdev_change_mtu(struct net_device *netdev, int new_mtu)
ret = lan743x_mac_set_mtu(adapter, new_mtu);
if (!ret)
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return ret;
}
@@ -3568,7 +3586,7 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
/* clear wake settings */
pmtctl = lan743x_csr_read(adapter, PMT_CTL);
- pmtctl |= PMT_CTL_WUPS_MASK_;
+ pmtctl |= PMT_CTL_WUPS_MASK_ | PMT_CTL_RES_CLR_WKP_MASK_;
pmtctl &= ~(PMT_CTL_GPIO_WAKEUP_EN_ | PMT_CTL_EEE_WAKEUP_EN_ |
PMT_CTL_WOL_EN_ | PMT_CTL_MAC_D3_RX_CLK_OVR_ |
PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ | PMT_CTL_ETH_PHY_WAKE_EN_);
@@ -3580,10 +3598,9 @@ static void lan743x_pm_set_wol(struct lan743x_adapter *adapter)
pmtctl |= PMT_CTL_ETH_PHY_D3_COLD_OVR_ | PMT_CTL_ETH_PHY_D3_OVR_;
- if (adapter->wolopts & WAKE_PHY) {
- pmtctl |= PMT_CTL_ETH_PHY_EDPD_PLL_CTL_;
+ if (adapter->phy_wolopts)
pmtctl |= PMT_CTL_ETH_PHY_WAKE_EN_;
- }
+
if (adapter->wolopts & WAKE_MAGIC) {
wucsr |= MAC_WUCSR_MPEN_;
macrx |= MAC_RX_RXEN_;
@@ -3679,7 +3696,7 @@ static int lan743x_pm_suspend(struct device *dev)
lan743x_csr_write(adapter, MAC_WUCSR2, 0);
lan743x_csr_write(adapter, MAC_WK_SRC, 0xFFFFFFFF);
- if (adapter->wolopts)
+ if (adapter->wolopts || adapter->phy_wolopts)
lan743x_pm_set_wol(adapter);
if (adapter->is_pci11x1x) {
@@ -3703,6 +3720,7 @@ static int lan743x_pm_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct lan743x_adapter *adapter = netdev_priv(netdev);
+ u32 data;
int ret;
pci_set_power_state(pdev, PCI_D0);
@@ -3721,6 +3739,30 @@ static int lan743x_pm_resume(struct device *dev)
return ret;
}
+ ret = lan743x_csr_read(adapter, MAC_WK_SRC);
+ netif_dbg(adapter, drv, adapter->netdev,
+ "Wakeup source : 0x%08X\n", ret);
+
+ /* Clear the wol configuration and status bits. Note that
+ * the status bits are "Write One to Clear (W1C)"
+ */
+ data = MAC_WUCSR_EEE_TX_WAKE_ | MAC_WUCSR_EEE_RX_WAKE_ |
+ MAC_WUCSR_RFE_WAKE_FR_ | MAC_WUCSR_PFDA_FR_ | MAC_WUCSR_WUFR_ |
+ MAC_WUCSR_MPR_ | MAC_WUCSR_BCAST_FR_;
+ lan743x_csr_write(adapter, MAC_WUCSR, data);
+
+ data = MAC_WUCSR2_NS_RCD_ | MAC_WUCSR2_ARP_RCD_ |
+ MAC_WUCSR2_IPV6_TCPSYN_RCD_ | MAC_WUCSR2_IPV4_TCPSYN_RCD_;
+ lan743x_csr_write(adapter, MAC_WUCSR2, data);
+
+ data = MAC_WK_SRC_ETH_PHY_WK_ | MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ |
+ MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ | MAC_WK_SRC_EEE_TX_WK_ |
+ MAC_WK_SRC_EEE_RX_WK_ | MAC_WK_SRC_RFE_FR_WK_ |
+ MAC_WK_SRC_PFDA_FR_WK_ | MAC_WK_SRC_MP_FR_WK_ |
+ MAC_WK_SRC_BCAST_FR_WK_ | MAC_WK_SRC_WU_FR_WK_ |
+ MAC_WK_SRC_WK_FR_SAVED_;
+ lan743x_csr_write(adapter, MAC_WK_SRC, data);
+
/* open netdev when netdev is at running state while resume.
* For instance, it is true when system wakesup after pm-suspend
* However, it is false when system wakes up after suspend GUI menu
@@ -3729,9 +3771,6 @@ static int lan743x_pm_resume(struct device *dev)
lan743x_netdev_open(netdev);
netif_device_attach(netdev);
- ret = lan743x_csr_read(adapter, MAC_WK_SRC);
- netif_info(adapter, drv, adapter->netdev,
- "Wakeup source : 0x%08X\n", ret);
return 0;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 645bc048e52e..3b2585a384e2 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -61,6 +61,7 @@
#define PMT_CTL_RX_FCT_RFE_D3_CLK_OVR_ BIT(18)
#define PMT_CTL_GPIO_WAKEUP_EN_ BIT(15)
#define PMT_CTL_EEE_WAKEUP_EN_ BIT(13)
+#define PMT_CTL_RES_CLR_WKP_MASK_ GENMASK(9, 8)
#define PMT_CTL_READY_ BIT(7)
#define PMT_CTL_ETH_PHY_RST_ BIT(4)
#define PMT_CTL_WOL_EN_ BIT(3)
@@ -227,12 +228,31 @@
#define MAC_WUCSR (0x140)
#define MAC_MP_SO_EN_ BIT(21)
#define MAC_WUCSR_RFE_WAKE_EN_ BIT(14)
+#define MAC_WUCSR_EEE_TX_WAKE_ BIT(13)
+#define MAC_WUCSR_EEE_RX_WAKE_ BIT(11)
+#define MAC_WUCSR_RFE_WAKE_FR_ BIT(9)
+#define MAC_WUCSR_PFDA_FR_ BIT(7)
+#define MAC_WUCSR_WUFR_ BIT(6)
+#define MAC_WUCSR_MPR_ BIT(5)
+#define MAC_WUCSR_BCAST_FR_ BIT(4)
#define MAC_WUCSR_PFDA_EN_ BIT(3)
#define MAC_WUCSR_WAKE_EN_ BIT(2)
#define MAC_WUCSR_MPEN_ BIT(1)
#define MAC_WUCSR_BCST_EN_ BIT(0)
#define MAC_WK_SRC (0x144)
+#define MAC_WK_SRC_ETH_PHY_WK_ BIT(17)
+#define MAC_WK_SRC_IPV6_TCPSYN_RCD_WK_ BIT(16)
+#define MAC_WK_SRC_IPV4_TCPSYN_RCD_WK_ BIT(15)
+#define MAC_WK_SRC_EEE_TX_WK_ BIT(14)
+#define MAC_WK_SRC_EEE_RX_WK_ BIT(13)
+#define MAC_WK_SRC_RFE_FR_WK_ BIT(12)
+#define MAC_WK_SRC_PFDA_FR_WK_ BIT(11)
+#define MAC_WK_SRC_MP_FR_WK_ BIT(10)
+#define MAC_WK_SRC_BCAST_FR_WK_ BIT(9)
+#define MAC_WK_SRC_WU_FR_WK_ BIT(8)
+#define MAC_WK_SRC_WK_FR_SAVED_ BIT(7)
+
#define MAC_MP_SO_HI (0x148)
#define MAC_MP_SO_LO (0x14C)
@@ -295,6 +315,10 @@
#define RFE_INDX(index) (0x580 + (index << 2))
#define MAC_WUCSR2 (0x600)
+#define MAC_WUCSR2_NS_RCD_ BIT(7)
+#define MAC_WUCSR2_ARP_RCD_ BIT(6)
+#define MAC_WUCSR2_IPV6_TCPSYN_RCD_ BIT(5)
+#define MAC_WUCSR2_IPV4_TCPSYN_RCD_ BIT(4)
#define SGMII_ACC (0x720)
#define SGMII_ACC_SGMII_BZY_ BIT(31)
@@ -1018,6 +1042,8 @@ enum lan743x_sgmii_lsd {
LINK_2500_SLAVE
};
+#define MAC_SUPPORTED_WAKES (WAKE_BCAST | WAKE_UCAST | WAKE_MCAST | \
+ WAKE_MAGIC | WAKE_ARP)
struct lan743x_adapter {
struct net_device *netdev;
struct mii_bus *mdiobus;
@@ -1025,6 +1051,8 @@ struct lan743x_adapter {
#ifdef CONFIG_PM
u32 wolopts;
u8 sopass[SOPASS_MAX];
+ u32 phy_wolopts;
+ u32 phy_wol_supported;
#endif
struct pci_dev *pdev;
struct lan743x_csr csr;
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index 2801f08bf1c9..dcea6652d56d 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -58,7 +58,7 @@ int lan743x_gpio_init(struct lan743x_adapter *adapter)
static void lan743x_ptp_wait_till_cmd_done(struct lan743x_adapter *adapter,
u32 bit_mask)
{
- int timeout = 1000;
+ int timeout = PTP_CMD_CTL_TIMEOUT_CNT;
u32 data = 0;
while (timeout &&
@@ -555,7 +555,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
if (half == wf_high) {
/* It's 50% match. Use the toggle option */
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_;
- /* In this case, devide period value by 2 */
+ /* In this case, divide period value by 2 */
ts_period = ns_to_timespec64(div_s64(period64, 2));
period_sec = ts_period.tv_sec;
period_nsec = ts_period.tv_nsec;
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.h b/drivers/net/ethernet/microchip/lan743x_ptp.h
index e26d4eff7133..0d29914cd460 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.h
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.h
@@ -21,6 +21,7 @@
#define LAN743X_PTP_N_EXTTS 4
#define LAN743X_PTP_N_PPS 0
#define PCI11X1X_PTP_IO_MAX_CHANNELS 8
+#define PTP_CMD_CTL_TIMEOUT_CNT 50
struct lan743x_adapter;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
index 06811c60d598..aec7066d83b3 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c
@@ -376,7 +376,6 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
mac_stats->SingleCollisionFrames =
lan966x->stats[idx + SYS_COUNT_TX_COL];
- mac_stats->MultipleCollisionFrames = 0;
mac_stats->FramesReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_UC] +
lan966x->stats[idx + SYS_COUNT_RX_MC] +
@@ -384,26 +383,19 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev,
mac_stats->FrameCheckSequenceErrors =
lan966x->stats[idx + SYS_COUNT_RX_CRC] +
lan966x->stats[idx + SYS_COUNT_RX_CRC];
- mac_stats->AlignmentErrors = 0;
mac_stats->OctetsTransmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_OCT] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT];
mac_stats->FramesWithDeferredXmissions =
lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD];
- mac_stats->LateCollisions = 0;
- mac_stats->FramesAbortedDueToXSColls = 0;
- mac_stats->FramesLostDueToIntMACXmitError = 0;
- mac_stats->CarrierSenseErrors = 0;
mac_stats->OctetsReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_OCT];
- mac_stats->FramesLostDueToIntMACRcvError = 0;
mac_stats->MulticastFramesXmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_MC] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC];
mac_stats->BroadcastFramesXmittedOK =
lan966x->stats[idx + SYS_COUNT_TX_BC] +
lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC];
- mac_stats->FramesWithExcessiveDeferral = 0;
mac_stats->MulticastFramesReceivedOK =
lan966x->stats[idx + SYS_COUNT_RX_MC];
mac_stats->BroadcastFramesReceivedOK =
@@ -546,7 +538,7 @@ static int lan966x_set_pauseparam(struct net_device *dev,
}
static int lan966x_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
index f3b1e0d31826..e706163ce9cc 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ifh.h
@@ -78,7 +78,7 @@
/* Classified internal priority for queuing */
#define IFH_POS_QOS_CLASS 100
-/* Bit mask with eight cpu copy classses */
+/* Bit mask with eight cpu copy classes */
#define IFH_POS_CPUQ 92
/* Relearn + learn flags (*) */
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
index 2635ef8958c8..ec672af12e25 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c
@@ -276,7 +276,7 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
++i;
}
- /* Inidcate EOF and valid bytes in the last word */
+ /* Indicate EOF and valid bytes in the last word */
lan_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
QS_INJ_CTRL_VLD_BYTES_SET(skb->len < LAN966X_BUFFER_MIN_SZ ?
0 : last) |
@@ -402,7 +402,7 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu)
lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)),
lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port));
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!lan966x->fdma)
return 0;
@@ -474,14 +474,14 @@ static int lan966x_port_hwtstamp_set(struct net_device *dev,
cfg->source != HWTSTAMP_SOURCE_PHYLIB)
return -EOPNOTSUPP;
+ if (cfg->source == HWTSTAMP_SOURCE_NETDEV && !port->lan966x->ptp)
+ return -EOPNOTSUPP;
+
err = lan966x_ptp_setup_traps(port, cfg);
if (err)
return err;
if (cfg->source == HWTSTAMP_SOURCE_NETDEV) {
- if (!port->lan966x->ptp)
- return -EOPNOTSUPP;
-
err = lan966x_ptp_hwtstamp_set(port, cfg, extack);
if (err) {
lan966x_ptp_del_traps(port);
@@ -520,7 +520,7 @@ bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb)
u32 val;
/* The IGMP and MLD frames are not forward by the HW if
- * multicast snooping is enabled, therefor don't mark as
+ * multicast snooping is enabled, therefore don't mark as
* offload to allow the SW to forward the frames accordingly.
*/
val = lan_rd(lan966x, ANA_CPU_FWD_CFG(port));
@@ -1087,8 +1087,6 @@ static int lan966x_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, lan966x);
lan966x->dev = &pdev->dev;
- lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
-
if (!device_get_mac_address(&pdev->dev, mac_addr)) {
ether_addr_copy(lan966x->base_mac, mac_addr);
} else {
@@ -1179,6 +1177,8 @@ static int lan966x_probe(struct platform_device *pdev)
return dev_err_probe(&pdev->dev, -ENODEV,
"no ethernet-ports child found\n");
+ lan966x->debugfs_root = debugfs_create_dir("lan966x", NULL);
+
/* init switch */
lan966x_init(lan966x);
lan966x_stats_init(lan966x);
@@ -1257,6 +1257,8 @@ cleanup_ports:
destroy_workqueue(lan966x->stats_queue);
mutex_destroy(&lan966x->stats_lock);
+ debugfs_remove_recursive(lan966x->debugfs_root);
+
return err;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
index caa9e0533c96..f8bebbcf77b2 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h
@@ -326,7 +326,7 @@ struct lan966x {
u8 base_mac[ETH_ALEN];
- spinlock_t tx_lock; /* lock for frame transmition */
+ spinlock_t tx_lock; /* lock for frame transmission */
struct net_device *bridge;
u16 bridge_mask;
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
index 2e83bbb9477e..fdfa4040d9ee 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_port.c
@@ -88,7 +88,7 @@ static void lan966x_port_link_down(struct lan966x_port *port)
SYS_FRONT_PORT_MODE_HDX_MODE,
lan966x, SYS_FRONT_PORT_MODE(port->chip_port));
- /* 8: Flush the queues accociated with the port */
+ /* 8: Flush the queues associated with the port */
lan_rmw(QSYS_SW_PORT_MODE_AGING_MODE_SET(3),
QSYS_SW_PORT_MODE_AGING_MODE,
lan966x, QSYS_SW_PORT_MODE(port->chip_port));
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
index d696cf9dbd19..43913d6204e1 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_tc_flower.c
@@ -45,6 +45,7 @@ static bool lan966x_tc_is_known_etype(struct vcap_tc_flower_parse_usage *st,
static int
lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
{
+ struct netlink_ext_ack *extack = st->fco->common.extack;
struct flow_match_control match;
int err = 0;
@@ -59,7 +60,7 @@ lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
VCAP_KF_L3_FRAGMENT,
VCAP_BIT_0);
if (err)
- goto out;
+ goto bad_frag_out;
}
if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
@@ -72,15 +73,20 @@ lan966x_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
VCAP_KF_L3_FRAG_OFS_GT0,
VCAP_BIT_1);
if (err)
- goto out;
+ goto bad_frag_out;
}
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT |
+ FLOW_DIS_FIRST_FRAG,
+ match.mask->flags, extack))
+ return -EOPNOTSUPP;
+
st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
return err;
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
+bad_frag_out:
+ NL_SET_ERR_MSG_MOD(extack, "ip_frag parse error");
return err;
}
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
index a4414f63c9b1..a1471e38d118 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
@@ -581,7 +581,7 @@ static void lan966x_vcap_move(struct net_device *dev,
lan966x_vcap_wait_update(lan966x, admin->tgt_inst);
}
-static struct vcap_operations lan966x_vcap_ops = {
+static const struct vcap_operations lan966x_vcap_ops = {
.validate_keyset = lan966x_vcap_validate_keyset,
.add_default_fields = lan966x_vcap_add_default_fields,
.cache_erase = lan966x_vcap_cache_erase,
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
index 3c44660128da..fa34a739c748 100644
--- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
+++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c
@@ -157,7 +157,7 @@ void lan966x_vlan_port_apply(struct lan966x_port *port)
pvid = lan966x_vlan_port_get_pvid(port);
- /* Ingress clasification (ANA_PORT_VLAN_CFG) */
+ /* Ingress classification (ANA_PORT_VLAN_CFG) */
/* Default vlan to classify for untagged frames (may be zero) */
val = ANA_VLAN_CFG_VLAN_VID_SET(pvid);
if (port->vlan_aware)
diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile
index 1cb1cc3f1a85..b68fe9c9a656 100644
--- a/drivers/net/ethernet/microchip/sparx5/Makefile
+++ b/drivers/net/ethernet/microchip/sparx5/Makefile
@@ -10,7 +10,8 @@ sparx5-switch-y := sparx5_main.o sparx5_packet.o \
sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o \
sparx5_ptp.o sparx5_pgid.o sparx5_tc.o sparx5_qos.o \
sparx5_vcap_impl.o sparx5_vcap_ag_api.o sparx5_tc_flower.o \
- sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o sparx5_psfp.o
+ sparx5_tc_matchall.o sparx5_pool.o sparx5_sdlb.o sparx5_police.o \
+ sparx5_psfp.o sparx5_mirror.o
sparx5-switch-$(CONFIG_SPARX5_DCB) += sparx5_dcb.o
sparx5-switch-$(CONFIG_DEBUG_FS) += sparx5_vcap_debugfs.o
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
index a06dc5a9b355..4f800c1a435d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c
@@ -1183,7 +1183,7 @@ static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno)
}
static int sparx5_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct sparx5_port *port = netdev_priv(dev);
struct sparx5 *sparx5 = port->sparx5;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
index 141897dfe388..1915998f6079 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c
@@ -143,7 +143,7 @@ static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx)
static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx)
{
- /* Dectivate the RX channel */
+ /* Deactivate the RX channel */
spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE,
sparx5, FDMA_CH_ACTIVATE);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index 3c066b62e689..b64c814eac11 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -899,6 +899,9 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
dev_err(sparx5->dev, "PTP failed\n");
goto cleanup_ports;
}
+
+ INIT_LIST_HEAD(&sparx5->mall_entries);
+
goto cleanup_config;
cleanup_ports:
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
index 316fed5f2735..1982ae03b4fe 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h
@@ -18,6 +18,7 @@
#include <linux/ptp_clock_kernel.h>
#include <linux/hrtimer.h>
#include <linux/debugfs.h>
+#include <net/flow_offload.h>
#include "sparx5_main_regs.h"
@@ -173,6 +174,7 @@ struct sparx5_port {
struct phylink_config phylink_config;
struct phylink *phylink;
struct phylink_pcs phylink_pcs;
+ struct flow_stats mirror_stats;
u16 portno;
/* Ingress default VLAN (pvid) */
u16 pvid;
@@ -227,6 +229,22 @@ struct sparx5_mdb_entry {
u16 pgid_idx;
};
+struct sparx5_mall_mirror_entry {
+ u32 idx;
+ struct sparx5_port *port;
+};
+
+struct sparx5_mall_entry {
+ struct list_head list;
+ struct sparx5_port *port;
+ unsigned long cookie;
+ enum flow_action_id type;
+ bool ingress;
+ union {
+ struct sparx5_mall_mirror_entry mirror;
+ };
+};
+
#define SPARX5_PTP_TIMEOUT msecs_to_jiffies(10)
#define SPARX5_SKB_CB(skb) \
((struct sparx5_skb_cb *)((skb)->cb))
@@ -295,6 +313,7 @@ struct sparx5 {
struct vcap_control *vcap_ctrl;
/* PGID allocation map */
u8 pgid_map[PGID_TABLE_SIZE];
+ struct list_head mall_entries;
/* Common root for debugfs */
struct dentry *debugfs_root;
};
@@ -541,6 +560,12 @@ void sparx5_psfp_init(struct sparx5 *sparx5);
void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
const ktime_t org_base_time, ktime_t *new_base_time);
+/* sparx5_mirror.c */
+int sparx5_mirror_add(struct sparx5_mall_entry *entry);
+void sparx5_mirror_del(struct sparx5_mall_entry *entry);
+void sparx5_mirror_stats(struct sparx5_mall_entry *entry,
+ struct flow_stats *fstats);
+
/* Clock period in picoseconds */
static inline u32 sparx5_clk_period(enum sparx5_core_clockfreq cclock)
{
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
index bd03a0a3c1da..22acc1f3380c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main_regs.h
@@ -83,6 +83,64 @@ enum sparx5_target {
#define ANA_AC_OWN_UPSID_OWN_UPSID_GET(x)\
FIELD_GET(ANA_AC_OWN_UPSID_OWN_UPSID, x)
+/* ANA_AC:MIRROR_PROBE:PROBE_CFG */
+#define ANA_AC_PROBE_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 0, 0, 1, 4)
+
+#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD GENMASK(31, 27)
+#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD, x)
+#define ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_RX_CPU_AND_VD, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_CPU_SET GENMASK(26, 19)
+#define ANA_AC_PROBE_CFG_PROBE_CPU_SET_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_CPU_SET, x)
+#define ANA_AC_PROBE_CFG_PROBE_CPU_SET_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_CPU_SET, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_VID GENMASK(18, 6)
+#define ANA_AC_PROBE_CFG_PROBE_VID_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_VID, x)
+#define ANA_AC_PROBE_CFG_PROBE_VID_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_VID, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE GENMASK(5, 4)
+#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_VLAN_MODE, x)
+#define ANA_AC_PROBE_CFG_PROBE_VLAN_MODE_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_VLAN_MODE, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE GENMASK(3, 2)
+#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_MAC_MODE, x)
+#define ANA_AC_PROBE_CFG_PROBE_MAC_MODE_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_MAC_MODE, x)
+
+#define ANA_AC_PROBE_CFG_PROBE_DIRECTION GENMASK(1, 0)
+#define ANA_AC_PROBE_CFG_PROBE_DIRECTION_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_CFG_PROBE_DIRECTION, x)
+#define ANA_AC_PROBE_CFG_PROBE_DIRECTION_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_CFG_PROBE_DIRECTION, x)
+
+/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG */
+#define ANA_AC_PROBE_PORT_CFG(g) \
+ __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 8, 0, 1, 4)
+
+/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG1 */
+#define ANA_AC_PROBE_PORT_CFG1(g) \
+ __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 12, 0, 1, 4)
+
+/* ANA_AC:MIRROR_PROBE:PROBE_PORT_CFG2 */
+#define ANA_AC_PROBE_PORT_CFG2(g) \
+ __REG(TARGET_ANA_AC, 0, 1, 893696, g, 3, 32, 16, 0, 1, 4)
+
+#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2 BIT(0)
+#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2_SET(x)\
+ FIELD_PREP(ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2, x)
+#define ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2_GET(x)\
+ FIELD_GET(ANA_AC_PROBE_PORT_CFG2_PROBE_PORT_MASK2, x)
+
/* ANA_AC:SRC:SRC_CFG */
#define ANA_AC_SRC_CFG(g) __REG(TARGET_ANA_AC,\
0, 1, 849920, g, 102, 16, 0, 0, 1, 4)
@@ -6203,6 +6261,16 @@ enum sparx5_target {
#define QFWD_SWITCH_PORT_MODE_LEARNALL_MORE_GET(x)\
FIELD_GET(QFWD_SWITCH_PORT_MODE_LEARNALL_MORE, x)
+/* QFWD:SYSTEM:FRAME_COPY_CFG */
+#define QFWD_FRAME_COPY_CFG(r)\
+ __REG(TARGET_QFWD, 0, 1, 0, 0, 1, 340, 284, r, 12, 4)
+
+#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL GENMASK(12, 6)
+#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_SET(x)\
+ FIELD_PREP(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x)
+#define QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_GET(x)\
+ FIELD_GET(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, x)
+
/* QRES:RES_CTRL:RES_CFG */
#define QRES_RES_CFG(g) __REG(TARGET_QRES,\
0, 1, 0, g, 5120, 16, 0, 0, 1, 4)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
new file mode 100644
index 000000000000..15db423be4aa
--- /dev/null
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_mirror.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Microchip Sparx5 Switch driver
+ *
+ * Copyright (c) 2024 Microchip Technology Inc. and its subsidiaries.
+ */
+
+#include "sparx5_main.h"
+#include "sparx5_main_regs.h"
+#include "sparx5_tc.h"
+
+#define SPX5_MIRROR_PROBE_MAX 3
+#define SPX5_MIRROR_DISABLED 0
+#define SPX5_MIRROR_EGRESS 1
+#define SPX5_MIRROR_INGRESS 2
+#define SPX5_MIRROR_MONITOR_PORT_DEFAULT 65
+#define SPX5_QFWD_MP_OFFSET 9 /* Mirror port offset in the QFWD register */
+
+/* Convert from bool ingress/egress to mirror direction */
+static u32 sparx5_mirror_to_dir(bool ingress)
+{
+ return ingress ? SPX5_MIRROR_INGRESS : SPX5_MIRROR_EGRESS;
+}
+
+/* Get ports belonging to this mirror */
+static u64 sparx5_mirror_port_get(struct sparx5 *sparx5, u32 idx)
+{
+ return (u64)spx5_rd(sparx5, ANA_AC_PROBE_PORT_CFG1(idx)) << 32 |
+ spx5_rd(sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+}
+
+/* Add port to mirror (only front ports) */
+static void sparx5_mirror_port_add(struct sparx5 *sparx5, u32 idx, u32 portno)
+{
+ u32 val, reg = portno;
+
+ reg = portno / BITS_PER_BYTE;
+ val = BIT(portno % BITS_PER_BYTE);
+
+ if (reg == 0)
+ return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+ else
+ return spx5_rmw(val, val, sparx5, ANA_AC_PROBE_PORT_CFG1(idx));
+}
+
+/* Delete port from mirror (only front ports) */
+static void sparx5_mirror_port_del(struct sparx5 *sparx5, u32 idx, u32 portno)
+{
+ u32 val, reg = portno;
+
+ reg = portno / BITS_PER_BYTE;
+ val = BIT(portno % BITS_PER_BYTE);
+
+ if (reg == 0)
+ return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG(idx));
+ else
+ return spx5_rmw(0, val, sparx5, ANA_AC_PROBE_PORT_CFG1(idx));
+}
+
+/* Check if mirror contains port */
+static bool sparx5_mirror_contains(struct sparx5 *sparx5, u32 idx, u32 portno)
+{
+ return (sparx5_mirror_port_get(sparx5, idx) & BIT_ULL(portno)) != 0;
+}
+
+/* Check if mirror is empty */
+static bool sparx5_mirror_is_empty(struct sparx5 *sparx5, u32 idx)
+{
+ return sparx5_mirror_port_get(sparx5, idx) == 0;
+}
+
+/* Get direction of mirror */
+static u32 sparx5_mirror_dir_get(struct sparx5 *sparx5, u32 idx)
+{
+ u32 val = spx5_rd(sparx5, ANA_AC_PROBE_CFG(idx));
+
+ return ANA_AC_PROBE_CFG_PROBE_DIRECTION_GET(val);
+}
+
+/* Set direction of mirror */
+static void sparx5_mirror_dir_set(struct sparx5 *sparx5, u32 idx, u32 dir)
+{
+ spx5_rmw(ANA_AC_PROBE_CFG_PROBE_DIRECTION_SET(dir),
+ ANA_AC_PROBE_CFG_PROBE_DIRECTION, sparx5,
+ ANA_AC_PROBE_CFG(idx));
+}
+
+/* Set the monitor port for this mirror */
+static void sparx5_mirror_monitor_set(struct sparx5 *sparx5, u32 idx,
+ u32 portno)
+{
+ spx5_rmw(QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_SET(portno),
+ QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL, sparx5,
+ QFWD_FRAME_COPY_CFG(idx + SPX5_QFWD_MP_OFFSET));
+}
+
+/* Get the monitor port of this mirror */
+static u32 sparx5_mirror_monitor_get(struct sparx5 *sparx5, u32 idx)
+{
+ u32 val = spx5_rd(sparx5,
+ QFWD_FRAME_COPY_CFG(idx + SPX5_QFWD_MP_OFFSET));
+
+ return QFWD_FRAME_COPY_CFG_FRMC_PORT_VAL_GET(val);
+}
+
+/* Check if port is the monitor port of this mirror */
+static bool sparx5_mirror_has_monitor(struct sparx5 *sparx5, u32 idx,
+ u32 portno)
+{
+ return sparx5_mirror_monitor_get(sparx5, idx) == portno;
+}
+
+/* Get a suitable mirror for this port */
+static int sparx5_mirror_get(struct sparx5_port *sport,
+ struct sparx5_port *mport, u32 dir, u32 *idx)
+{
+ struct sparx5 *sparx5 = sport->sparx5;
+ u32 i;
+
+ /* Check if this port is already used as a monitor port */
+ for (i = 0; i < SPX5_MIRROR_PROBE_MAX; i++)
+ if (sparx5_mirror_has_monitor(sparx5, i, sport->portno))
+ return -EINVAL;
+
+ /* Check if existing mirror can be reused
+ * (same direction and monitor port).
+ */
+ for (i = 0; i < SPX5_MIRROR_PROBE_MAX; i++) {
+ if (sparx5_mirror_dir_get(sparx5, i) == dir &&
+ sparx5_mirror_has_monitor(sparx5, i, mport->portno)) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ /* Return free mirror */
+ for (i = 0; i < SPX5_MIRROR_PROBE_MAX; i++) {
+ if (sparx5_mirror_is_empty(sparx5, i)) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int sparx5_mirror_add(struct sparx5_mall_entry *entry)
+{
+ u32 mirror_idx, dir = sparx5_mirror_to_dir(entry->ingress);
+ struct sparx5_port *sport, *mport;
+ struct sparx5 *sparx5;
+ int err;
+
+ /* Source port */
+ sport = entry->port;
+ /* monitor port */
+ mport = entry->mirror.port;
+ sparx5 = sport->sparx5;
+
+ if (sport->portno == mport->portno)
+ return -EINVAL;
+
+ err = sparx5_mirror_get(sport, mport, dir, &mirror_idx);
+ if (err)
+ return err;
+
+ if (sparx5_mirror_contains(sparx5, mirror_idx, sport->portno))
+ return -EEXIST;
+
+ /* Add port to mirror */
+ sparx5_mirror_port_add(sparx5, mirror_idx, sport->portno);
+
+ /* Set direction of mirror */
+ sparx5_mirror_dir_set(sparx5, mirror_idx, dir);
+
+ /* Set monitor port for mirror */
+ sparx5_mirror_monitor_set(sparx5, mirror_idx, mport->portno);
+
+ entry->mirror.idx = mirror_idx;
+
+ return 0;
+}
+
+void sparx5_mirror_del(struct sparx5_mall_entry *entry)
+{
+ struct sparx5_port *port = entry->port;
+ struct sparx5 *sparx5 = port->sparx5;
+ u32 mirror_idx = entry->mirror.idx;
+
+ sparx5_mirror_port_del(sparx5, mirror_idx, port->portno);
+ if (!sparx5_mirror_is_empty(sparx5, mirror_idx))
+ return;
+
+ sparx5_mirror_dir_set(sparx5, mirror_idx, SPX5_MIRROR_DISABLED);
+
+ sparx5_mirror_monitor_set(sparx5,
+ mirror_idx,
+ SPX5_MIRROR_MONITOR_PORT_DEFAULT);
+}
+
+void sparx5_mirror_stats(struct sparx5_mall_entry *entry,
+ struct flow_stats *fstats)
+{
+ struct sparx5_port *port = entry->port;
+ struct rtnl_link_stats64 new_stats;
+ struct flow_stats *old_stats;
+
+ old_stats = &entry->port->mirror_stats;
+ sparx5_get_stats64(port->ndev, &new_stats);
+
+ if (entry->ingress) {
+ flow_stats_update(fstats,
+ new_stats.rx_bytes - old_stats->bytes,
+ new_stats.rx_packets - old_stats->pkts,
+ new_stats.rx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.rx_bytes;
+ old_stats->pkts = new_stats.rx_packets;
+ old_stats->drops = new_stats.rx_dropped;
+ old_stats->lastused = jiffies;
+ } else {
+ flow_stats_update(fstats,
+ new_stats.tx_bytes - old_stats->bytes,
+ new_stats.tx_packets - old_stats->pkts,
+ new_stats.tx_dropped - old_stats->drops,
+ old_stats->lastused,
+ FLOW_ACTION_HW_STATS_IMMEDIATE);
+
+ old_stats->bytes = new_stats.tx_bytes;
+ old_stats->pkts = new_stats.tx_packets;
+ old_stats->drops = new_stats.tx_dropped;
+ old_stats->lastused = jiffies;
+ }
+}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
index ac7e1cffbcec..f3f5fb420468 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c
@@ -67,7 +67,7 @@ static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
for (i = 0; i < IFH_LEN; i++)
ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp));
- /* Decode IFH (whats needed) */
+ /* Decode IFH (what's needed) */
sparx5_ifh_parse(ifh, &fi);
/* Map to port netdev */
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
index 60dd2fd603a8..062e486c002c 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c
@@ -370,7 +370,7 @@ static int sparx5_port_disable(struct sparx5 *sparx5, struct sparx5_port *port,
/* 6: Wait while the last frame is exiting the queues */
usleep_range(8 * spd_prm, 10 * spd_prm);
- /* 7: Flush the queues accociated with the port->portno */
+ /* 7: Flush the queues associated with the port->portno */
spx5_rmw(HSCH_FLUSH_CTRL_FLUSH_PORT_SET(port->portno) |
HSCH_FLUSH_CTRL_FLUSH_DST_SET(1) |
HSCH_FLUSH_CTRL_FLUSH_SRC_SET(1) |
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
index 4af85d108a06..0b4abc3eb53d 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c
@@ -190,7 +190,7 @@ static int sparx5_port_bridge_join(struct sparx5_port *port,
/* Remove standalone port entry */
sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
- /* Port enters in bridge mode therefor don't need to copy to CPU
+ /* Port enters in bridge mode therefore don't need to copy to CPU
* frames for multicast in case the bridge is not requesting them
*/
__dev_mc_unsync(ndev, sparx5_mc_unsync);
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
index 55f255a3c9db..8d67d9f24c76 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_flower.c
@@ -159,13 +159,14 @@ out:
static int
sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
{
+ struct netlink_ext_ack *extack = st->fco->common.extack;
struct flow_match_control mt;
u32 value, mask;
int err = 0;
flow_rule_match_control(st->frule, &mt);
- if (mt.mask->flags) {
+ if (mt.mask->flags & (FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) {
u8 is_frag_key = !!(mt.key->flags & FLOW_DIS_IS_FRAGMENT);
u8 is_frag_mask = !!(mt.mask->flags & FLOW_DIS_IS_FRAGMENT);
u8 is_frag_idx = (is_frag_key << 1) | is_frag_mask;
@@ -178,7 +179,7 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
u8 vdt = sparx5_vcap_frag_map[is_frag_idx][first_frag_idx];
if (vdt == FRAG_INVAL) {
- NL_SET_ERR_MSG_MOD(st->fco->common.extack,
+ NL_SET_ERR_MSG_MOD(extack,
"Match on invalid fragment flag combination");
return -EINVAL;
}
@@ -190,16 +191,19 @@ sparx5_tc_flower_handler_control_usage(struct vcap_tc_flower_parse_usage *st)
err = vcap_rule_add_key_u32(st->vrule,
VCAP_KF_L3_FRAGMENT_TYPE,
value, mask);
- if (err)
- goto out;
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "ip_frag parse error");
+ return err;
+ }
}
- st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT |
+ FLOW_DIS_FIRST_FRAG,
+ mt.mask->flags, extack))
+ return -EOPNOTSUPP;
- return err;
+ st->used_keys |= BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL);
-out:
- NL_SET_ERR_MSG_MOD(st->fco->common.extack, "ip_frag parse error");
return err;
}
@@ -1023,6 +1027,64 @@ static int sparx5_tc_action_vlan_push(struct vcap_admin *admin,
return err;
}
+static void sparx5_tc_flower_set_port_mask(struct vcap_u72_action *ports,
+ struct net_device *ndev)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ int byidx = port->portno / BITS_PER_BYTE;
+ int biidx = port->portno % BITS_PER_BYTE;
+
+ ports->value[byidx] |= BIT(biidx);
+}
+
+static int sparx5_tc_action_mirred(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ struct flow_action_entry *act)
+{
+ struct vcap_u72_action ports = {0};
+ int err;
+
+ if (admin->vtype != VCAP_TYPE_IS0 && admin->vtype != VCAP_TYPE_IS2) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Mirror action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
+ SPX5_PMM_OR_DSTMASK);
+ if (err)
+ return err;
+
+ sparx5_tc_flower_set_port_mask(&ports, act->dev);
+
+ return vcap_rule_add_action_u72(vrule, VCAP_AF_PORT_MASK, &ports);
+}
+
+static int sparx5_tc_action_redirect(struct vcap_admin *admin,
+ struct vcap_rule *vrule,
+ struct flow_cls_offload *fco,
+ struct flow_action_entry *act)
+{
+ struct vcap_u72_action ports = {0};
+ int err;
+
+ if (admin->vtype != VCAP_TYPE_IS0 && admin->vtype != VCAP_TYPE_IS2) {
+ NL_SET_ERR_MSG_MOD(fco->common.extack,
+ "Redirect action not supported in this VCAP");
+ return -EOPNOTSUPP;
+ }
+
+ err = vcap_rule_add_action_u32(vrule, VCAP_AF_MASK_MODE,
+ SPX5_PMM_REPLACE_ALL);
+ if (err)
+ return err;
+
+ sparx5_tc_flower_set_port_mask(&ports, act->dev);
+
+ return vcap_rule_add_action_u72(vrule, VCAP_AF_PORT_MASK, &ports);
+}
+
/* Remove rule keys that may prevent templates from matching a keyset */
static void sparx5_tc_flower_simplify_rule(struct vcap_admin *admin,
struct vcap_rule *vrule,
@@ -1169,6 +1231,16 @@ static int sparx5_tc_flower_replace(struct net_device *ndev,
if (err)
goto out;
break;
+ case FLOW_ACTION_MIRRED:
+ err = sparx5_tc_action_mirred(admin, vrule, fco, act);
+ if (err)
+ goto out;
+ break;
+ case FLOW_ACTION_REDIRECT:
+ err = sparx5_tc_action_redirect(admin, vrule, fco, act);
+ if (err)
+ goto out;
+ break;
case FLOW_ACTION_ACCEPT:
err = sparx5_tc_set_actionset(admin, vrule);
if (err)
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
index d88a93f22606..6b4d1d7b9730 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_tc_matchall.c
@@ -11,11 +11,44 @@
#include "sparx5_main.h"
#include "sparx5_vcap_impl.h"
+static struct sparx5_mall_entry *
+sparx5_tc_matchall_entry_find(struct list_head *entries, unsigned long cookie)
+{
+ struct sparx5_mall_entry *entry;
+
+ list_for_each_entry(entry, entries, list) {
+ if (entry->cookie == cookie)
+ return entry;
+ }
+
+ return NULL;
+}
+
+static void sparx5_tc_matchall_parse_action(struct sparx5_port *port,
+ struct sparx5_mall_entry *entry,
+ struct flow_action_entry *action,
+ bool ingress,
+ unsigned long cookie)
+{
+ entry->port = port;
+ entry->type = action->id;
+ entry->ingress = ingress;
+ entry->cookie = cookie;
+}
+
+static void
+sparx5_tc_matchall_parse_mirror_action(struct sparx5_mall_entry *entry,
+ struct flow_action_entry *action)
+{
+ entry->mirror.port = netdev_priv(action->dev);
+}
+
static int sparx5_tc_matchall_replace(struct net_device *ndev,
struct tc_cls_matchall_offload *tmo,
bool ingress)
{
struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5_mall_entry *mall_entry;
struct flow_action_entry *action;
struct sparx5 *sparx5;
int err;
@@ -27,8 +60,45 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev,
}
action = &tmo->rule->action.entries[0];
+ mall_entry = kzalloc(sizeof(*mall_entry), GFP_KERNEL);
+ if (!mall_entry)
+ return -ENOMEM;
+
+ sparx5_tc_matchall_parse_action(port,
+ mall_entry,
+ action,
+ ingress,
+ tmo->cookie);
+
sparx5 = port->sparx5;
switch (action->id) {
+ case FLOW_ACTION_MIRRED:
+ sparx5_tc_matchall_parse_mirror_action(mall_entry, action);
+ err = sparx5_mirror_add(mall_entry);
+ if (err) {
+ switch (err) {
+ case -EEXIST:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Mirroring already exists");
+ break;
+ case -EINVAL:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Cannot mirror a monitor port");
+ break;
+ case -ENOENT:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "No more mirror probes available");
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(tmo->common.extack,
+ "Unknown error");
+ break;
+ }
+ return err;
+ }
+ /* Get baseline stats for this port */
+ sparx5_mirror_stats(mall_entry, &tmo->stats);
+ break;
case FLOW_ACTION_GOTO:
err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
tmo->common.chain_index,
@@ -59,6 +129,9 @@ static int sparx5_tc_matchall_replace(struct net_device *ndev,
NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
return -EOPNOTSUPP;
}
+
+ list_add_tail(&mall_entry->list, &sparx5->mall_entries);
+
return 0;
}
@@ -67,19 +140,51 @@ static int sparx5_tc_matchall_destroy(struct net_device *ndev,
bool ingress)
{
struct sparx5_port *port = netdev_priv(ndev);
- struct sparx5 *sparx5;
- int err;
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_mall_entry *entry;
+ int err = 0;
- sparx5 = port->sparx5;
- if (!tmo->rule && tmo->cookie) {
+ entry = sparx5_tc_matchall_entry_find(&sparx5->mall_entries,
+ tmo->cookie);
+ if (!entry)
+ return -ENOENT;
+
+ if (entry->type == FLOW_ACTION_MIRRED) {
+ sparx5_mirror_del(entry);
+ } else if (entry->type == FLOW_ACTION_GOTO) {
err = vcap_enable_lookups(sparx5->vcap_ctrl, ndev,
0, 0, tmo->cookie, false);
- if (err)
- return err;
- return 0;
+ } else {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
+ err = -EOPNOTSUPP;
}
- NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
- return -EOPNOTSUPP;
+
+ list_del(&entry->list);
+
+ return err;
+}
+
+static int sparx5_tc_matchall_stats(struct net_device *ndev,
+ struct tc_cls_matchall_offload *tmo,
+ bool ingress)
+{
+ struct sparx5_port *port = netdev_priv(ndev);
+ struct sparx5 *sparx5 = port->sparx5;
+ struct sparx5_mall_entry *entry;
+
+ entry = sparx5_tc_matchall_entry_find(&sparx5->mall_entries,
+ tmo->cookie);
+ if (!entry)
+ return -ENOENT;
+
+ if (entry->type == FLOW_ACTION_MIRRED) {
+ sparx5_mirror_stats(entry, &tmo->stats);
+ } else {
+ NL_SET_ERR_MSG_MOD(tmo->common.extack, "Unsupported action");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
}
int sparx5_tc_matchall(struct net_device *ndev,
@@ -91,6 +196,8 @@ int sparx5_tc_matchall(struct net_device *ndev,
return sparx5_tc_matchall_replace(ndev, tmo, ingress);
case TC_CLSMATCHALL_DESTROY:
return sparx5_tc_matchall_destroy(ndev, tmo, ingress);
+ case TC_CLSMATCHALL_STATS:
+ return sparx5_tc_matchall_stats(ndev, tmo, ingress);
default:
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
index 187efa1fc904..967c8621c250 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c
@@ -1507,7 +1507,7 @@ static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin,
}
}
-static struct vcap_operations sparx5_vcap_ops = {
+static const struct vcap_operations sparx5_vcap_ops = {
.validate_keyset = sparx5_vcap_validate_keyset,
.add_default_fields = sparx5_vcap_add_default_fields,
.cache_erase = sparx5_vcap_cache_erase,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
index c3569a4c7b69..4735fad05708 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_ag_api.h
@@ -290,7 +290,7 @@ enum vcap_keyfield_set {
* Sparx5: TCP flag RST , LAN966x: TCP: TCP flag RST. PTP over UDP: messageType
* bit 3
* VCAP_KF_L4_SEQUENCE_EQ0_IS: W1, sparx5: is2/es2, lan966x: is2
- * Set if TCP sequence number is 0, LAN966x: Overlayed with PTP over UDP:
+ * Set if TCP sequence number is 0, LAN966x: Overlaid with PTP over UDP:
* messageType bit 0
* VCAP_KF_L4_SPORT: W16, sparx5: is0/is2/es2, lan966x: is1/is2
* TCP/UDP source port
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
index ef980e4e5bc2..2687765abe52 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -327,7 +327,7 @@ static int vcap_find_keystream_typegroup_sw(struct vcap_control *vctrl,
}
/* Verify that the typegroup information, subword count, keyset and type id
- * are in sync and correct, return the list of matchin keysets
+ * are in sync and correct, return the list of matching keysets
*/
int
vcap_find_keystream_keysets(struct vcap_control *vctrl,
@@ -2907,6 +2907,18 @@ int vcap_rule_add_action_u32(struct vcap_rule *rule,
}
EXPORT_SYMBOL_GPL(vcap_rule_add_action_u32);
+/* Add a 72 bit action field with value to the rule */
+int vcap_rule_add_action_u72(struct vcap_rule *rule,
+ enum vcap_action_field action,
+ struct vcap_u72_action *fieldval)
+{
+ struct vcap_client_actionfield_data data;
+
+ memcpy(&data.u72, fieldval, sizeof(data.u72));
+ return vcap_rule_add_action(rule, action, VCAP_FIELD_U72, &data);
+}
+EXPORT_SYMBOL_GPL(vcap_rule_add_action_u72);
+
static int vcap_read_counter(struct vcap_rule_internal *ri,
struct vcap_counter *ctr)
{
@@ -2931,7 +2943,7 @@ void vcap_netbytes_copy(u8 *dst, u8 *src, int count)
}
EXPORT_SYMBOL_GPL(vcap_netbytes_copy);
-/* Convert validation error code into tc extact error message */
+/* Convert validation error code into tc extack error message */
void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule)
{
switch (vrule->exterr) {
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h
index 9eccfa633c1a..6069ad95c27e 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h
@@ -271,7 +271,7 @@ struct vcap_operations {
/* VCAP API Client control interface */
struct vcap_control {
- struct vcap_operations *ops; /* client supplied operations */
+ const struct vcap_operations *ops; /* client supplied operations */
const struct vcap_info *vcaps; /* client supplied vcap models */
const struct vcap_statistics *stats; /* client supplied vcap stats */
struct list_head list; /* list of vcap instances */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
index 88641508f885..cdf79e17ca54 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_client.h
@@ -200,6 +200,8 @@ int vcap_rule_add_action_bit(struct vcap_rule *rule,
enum vcap_action_field action, enum vcap_bit val);
int vcap_rule_add_action_u32(struct vcap_rule *rule,
enum vcap_action_field action, u32 value);
+int vcap_rule_add_action_u72(struct vcap_rule *rule, enum vcap_action_field action,
+ struct vcap_u72_action *fieldval);
/* Get number of rules in a vcap instance lookup chain id range */
int vcap_admin_rule_count(struct vcap_admin *admin, int cid);
@@ -236,7 +238,7 @@ const struct vcap_set *vcap_keyfieldset(struct vcap_control *vctrl,
/* Copy to host byte order */
void vcap_netbytes_copy(u8 *dst, u8 *src, int count);
-/* Convert validation error code into tc extact error message */
+/* Convert validation error code into tc extack error message */
void vcap_set_tc_exterr(struct flow_cls_offload *fco, struct vcap_rule *vrule);
/* Cleanup a VCAP instance */
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
index b23c11b0647c..9c9d38042125 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c
@@ -221,7 +221,7 @@ static int vcap_test_port_info(struct net_device *ndev,
return 0;
}
-static struct vcap_operations test_callbacks = {
+static const struct vcap_operations test_callbacks = {
.validate_keyset = test_val_keyset,
.add_default_fields = test_add_def_fields,
.cache_erase = test_cache_erase,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index fe4e166de8a0..51d9423b08a6 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -211,7 +211,7 @@ static int vcap_test_port_info(struct net_device *ndev,
return 0;
}
-static struct vcap_operations test_callbacks = {
+static const struct vcap_operations test_callbacks = {
.validate_keyset = test_val_keyset,
.add_default_fields = test_add_def_fields,
.cache_erase = test_cache_erase,
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
index df81d9ff502b..844bdf6b5f45 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_private.h
@@ -109,7 +109,7 @@ int vcap_addr_keysets(struct vcap_control *vctrl, struct net_device *ndev,
struct vcap_keyset_list *kslist);
/* Verify that the typegroup information, subword count, keyset and type id
- * are in sync and correct, return the list of matchin keysets
+ * are in sync and correct, return the list of matching keysets
*/
int vcap_find_keystream_keysets(struct vcap_control *vctrl, enum vcap_type vt,
u32 *keystream, u32 *mskstream, bool mask,
diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig
index 01eb7445ead9..901fbffbf718 100644
--- a/drivers/net/ethernet/microsoft/Kconfig
+++ b/drivers/net/ethernet/microsoft/Kconfig
@@ -17,7 +17,8 @@ if NET_VENDOR_MICROSOFT
config MICROSOFT_MANA
tristate "Microsoft Azure Network Adapter (MANA) support"
- depends on PCI_MSI && X86_64
+ depends on PCI_MSI
+ depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN)
depends on PCI_HYPERV
select AUXILIARY_BUS
select PAGE_POOL
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index 1332db9a08eb..ddb8f68d80a2 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -182,7 +182,7 @@ int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length,
dma_addr_t dma_handle;
void *buf;
- if (length < PAGE_SIZE || !is_power_of_2(length))
+ if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
return -EINVAL;
gmi->dev = gc->dev;
@@ -380,6 +380,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq)
case GDMA_EQE_HWC_INIT_EQ_ID_DB:
case GDMA_EQE_HWC_INIT_DATA:
case GDMA_EQE_HWC_INIT_DONE:
+ case GDMA_EQE_RNIC_QP_FATAL:
if (!eq->eq.callback)
break;
@@ -717,7 +718,7 @@ EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA);
static int mana_gd_create_dma_region(struct gdma_dev *gd,
struct gdma_mem_info *gmi)
{
- unsigned int num_page = gmi->length / PAGE_SIZE;
+ unsigned int num_page = gmi->length / MANA_PAGE_SIZE;
struct gdma_create_dma_region_req *req = NULL;
struct gdma_create_dma_region_resp resp = {};
struct gdma_context *gc = gd->gdma_context;
@@ -727,10 +728,10 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
int err;
int i;
- if (length < PAGE_SIZE || !is_power_of_2(length))
+ if (length < MANA_PAGE_SIZE || !is_power_of_2(length))
return -EINVAL;
- if (offset_in_page(gmi->virt_addr) != 0)
+ if (!MANA_PAGE_ALIGNED(gmi->virt_addr))
return -EINVAL;
hwc = gc->hwc.driver_data;
@@ -751,7 +752,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd,
req->page_addr_list_len = num_page;
for (i = 0; i < num_page; i++)
- req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE;
+ req->page_addr_list[i] = gmi->dma_handle + i * MANA_PAGE_SIZE;
err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp);
if (err)
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index 2729a2c5acf9..cafded2f9382 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -3,6 +3,7 @@
#include <net/mana/gdma.h>
#include <net/mana/hw_channel.h>
+#include <linux/vmalloc.h>
static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id)
{
@@ -361,12 +362,12 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
int err;
eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth);
- if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
- eq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+ if (eq_size < MANA_MIN_QSIZE)
+ eq_size = MANA_MIN_QSIZE;
cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth);
- if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE)
- cq_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+ if (cq_size < MANA_MIN_QSIZE)
+ cq_size = MANA_MIN_QSIZE;
hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL);
if (!hwc_cq)
@@ -428,7 +429,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth,
dma_buf->num_reqs = q_depth;
- buf_size = PAGE_ALIGN(q_depth * max_msg_size);
+ buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size);
gmi = &dma_buf->mem_info;
err = mana_gd_alloc_memory(gc, buf_size, gmi);
@@ -496,8 +497,8 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc,
else
queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth);
- if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE)
- queue_size = MINIMUM_SUPPORTED_PAGE_SIZE;
+ if (queue_size < MANA_MIN_QSIZE)
+ queue_size = MANA_MIN_QSIZE;
hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL);
if (!hwc_wq)
@@ -848,7 +849,7 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
}
if (!wait_for_completion_timeout(&ctx->comp_event,
- (msecs_to_jiffies(hwc->hwc_timeout) * HZ))) {
+ (msecs_to_jiffies(hwc->hwc_timeout)))) {
dev_err(hwc->dev, "HWC: Request timed out!\n");
err = -ETIMEDOUT;
goto out;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index d8af5e7e15b4..39f56973746d 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -481,7 +481,7 @@ static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
struct sock *sk = skb->sk;
int txq;
- txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK];
+ txq = apc->indir_table[hash & (apc->indir_table_sz - 1)];
if (txq != old_q && sk && sk_fullsock(sk) &&
rcu_access_pointer(sk->sk_dst_cache))
@@ -599,7 +599,11 @@ static void mana_get_rxbuf_cfg(int mtu, u32 *datasize, u32 *alloc_size,
else
*headroom = XDP_PACKET_HEADROOM;
- *alloc_size = mtu + MANA_RXBUF_PAD + *headroom;
+ *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
+
+ /* Using page pool in this case, so alloc_size is PAGE_SIZE */
+ if (*alloc_size < PAGE_SIZE)
+ *alloc_size = PAGE_SIZE;
*datasize = mtu + ETH_HLEN;
}
@@ -690,12 +694,12 @@ static int mana_change_mtu(struct net_device *ndev, int new_mtu)
goto out;
}
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
err = mana_attach(ndev);
if (err) {
netdev_err(ndev, "mana_attach failed: %d\n", err);
- ndev->mtu = old_mtu;
+ WRITE_ONCE(ndev->mtu, old_mtu);
}
out:
@@ -721,6 +725,13 @@ static void mana_cleanup_port_context(struct mana_port_context *apc)
apc->rxqs = NULL;
}
+static void mana_cleanup_indir_table(struct mana_port_context *apc)
+{
+ apc->indir_table_sz = 0;
+ kfree(apc->indir_table);
+ kfree(apc->rxobj_table);
+}
+
static int mana_init_port_context(struct mana_port_context *apc)
{
apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *),
@@ -962,7 +973,16 @@ static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
*max_sq = resp.max_num_sq;
*max_rq = resp.max_num_rq;
- *num_indir_entry = resp.num_indirection_ent;
+ if (resp.num_indirection_ent > 0 &&
+ resp.num_indirection_ent <= MANA_INDIRECT_TABLE_MAX_SIZE &&
+ is_power_of_2(resp.num_indirection_ent)) {
+ *num_indir_entry = resp.num_indirection_ent;
+ } else {
+ netdev_warn(apc->ndev,
+ "Setting indirection table size to default %d for vPort %d\n",
+ MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx);
+ *num_indir_entry = MANA_INDIRECT_TABLE_DEF_SIZE;
+ }
apc->port_handle = resp.vport;
ether_addr_copy(apc->mac_addr, resp.mac_addr);
@@ -1054,15 +1074,13 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
bool update_default_rxobj, bool update_key,
bool update_tab)
{
- u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
struct mana_cfg_rx_steer_req_v2 *req;
struct mana_cfg_rx_steer_resp resp = {};
struct net_device *ndev = apc->ndev;
- mana_handle_t *req_indir_tab;
u32 req_buf_size;
int err;
- req_buf_size = sizeof(*req) + sizeof(mana_handle_t) * num_entries;
+ req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz);
req = kzalloc(req_buf_size, GFP_KERNEL);
if (!req)
return -ENOMEM;
@@ -1073,8 +1091,9 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
req->hdr.req.msg_version = GDMA_MESSAGE_V2;
req->vport = apc->port_handle;
- req->num_indir_entries = num_entries;
- req->indir_tab_offset = sizeof(*req);
+ req->num_indir_entries = apc->indir_table_sz;
+ req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
+ indir_tab);
req->rx_enable = rx;
req->rss_enable = apc->rss_state;
req->update_default_rxobj = update_default_rxobj;
@@ -1086,11 +1105,9 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
if (update_key)
memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
- if (update_tab) {
- req_indir_tab = (mana_handle_t *)(req + 1);
- memcpy(req_indir_tab, apc->rxobj_table,
- req->num_indir_entries * sizeof(mana_handle_t));
- }
+ if (update_tab)
+ memcpy(req->indir_tab, apc->rxobj_table,
+ flex_array_size(req, indir_tab, req->num_indir_entries));
err = mana_send_request(apc->ac, req, req_buf_size, &resp,
sizeof(resp));
@@ -1113,7 +1130,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc,
}
netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
- apc->port_handle, num_entries);
+ apc->port_handle, apc->indir_table_sz);
out:
kfree(req);
return err;
@@ -1775,7 +1792,6 @@ static void mana_poll_rx_cq(struct mana_cq *cq)
static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
{
struct mana_cq *cq = context;
- u8 arm_bit;
int w;
WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
@@ -1786,16 +1802,23 @@ static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
mana_poll_tx_cq(cq);
w = cq->work_done;
-
- if (w < cq->budget &&
- napi_complete_done(&cq->napi, w)) {
- arm_bit = SET_ARM_BIT;
- } else {
- arm_bit = 0;
+ cq->work_done_since_doorbell += w;
+
+ if (w < cq->budget) {
+ mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
+ cq->work_done_since_doorbell = 0;
+ napi_complete_done(&cq->napi, w);
+ } else if (cq->work_done_since_doorbell >
+ cq->gdma_cq->queue_size / COMP_ENTRY_SIZE * 4) {
+ /* MANA hardware requires at least one doorbell ring every 8
+ * wraparounds of CQ even if there is no need to arm the CQ.
+ * This driver rings the doorbell as soon as we have exceeded
+ * 4 wraparounds.
+ */
+ mana_gd_ring_cq(gdma_queue, 0);
+ cq->work_done_since_doorbell = 0;
}
- mana_gd_ring_cq(gdma_queue, arm_bit);
-
return w;
}
@@ -1891,10 +1914,10 @@ static int mana_create_txq(struct mana_port_context *apc,
* to prevent overflow.
*/
txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32;
- BUILD_BUG_ON(!PAGE_ALIGNED(txq_size));
+ BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size));
cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE;
- cq_size = PAGE_ALIGN(cq_size);
+ cq_size = MANA_PAGE_ALIGN(cq_size);
gc = gd->gdma_context;
@@ -2191,8 +2214,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
if (err)
goto out;
- rq_size = PAGE_ALIGN(rq_size);
- cq_size = PAGE_ALIGN(cq_size);
+ rq_size = MANA_PAGE_ALIGN(rq_size);
+ cq_size = MANA_PAGE_ALIGN(cq_size);
/* Create RQ */
memset(&spec, 0, sizeof(spec));
@@ -2346,11 +2369,33 @@ static int mana_create_vport(struct mana_port_context *apc,
return mana_create_txq(apc, net);
}
+static int mana_rss_table_alloc(struct mana_port_context *apc)
+{
+ if (!apc->indir_table_sz) {
+ netdev_err(apc->ndev,
+ "Indirection table size not set for vPort %d\n",
+ apc->port_idx);
+ return -EINVAL;
+ }
+
+ apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
+ if (!apc->indir_table)
+ return -ENOMEM;
+
+ apc->rxobj_table = kcalloc(apc->indir_table_sz, sizeof(mana_handle_t), GFP_KERNEL);
+ if (!apc->rxobj_table) {
+ kfree(apc->indir_table);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void mana_rss_table_init(struct mana_port_context *apc)
{
int i;
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ for (i = 0; i < apc->indir_table_sz; i++)
apc->indir_table[i] =
ethtool_rxfh_indir_default(i, apc->num_queues);
}
@@ -2363,7 +2408,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
int i;
if (update_tab) {
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
+ for (i = 0; i < apc->indir_table_sz; i++) {
queue_idx = apc->indir_table[i];
apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
}
@@ -2468,7 +2513,6 @@ static int mana_init_port(struct net_device *ndev)
struct mana_port_context *apc = netdev_priv(ndev);
u32 max_txq, max_rxq, max_queues;
int port_idx = apc->port_idx;
- u32 num_indirect_entries;
int err;
err = mana_init_port_context(apc);
@@ -2476,7 +2520,7 @@ static int mana_init_port(struct net_device *ndev)
return err;
err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
- &num_indirect_entries);
+ &apc->indir_table_sz);
if (err) {
netdev_err(ndev, "Failed to query info for vPort %d\n",
port_idx);
@@ -2495,8 +2539,7 @@ static int mana_init_port(struct net_device *ndev)
return 0;
reset_apc:
- kfree(apc->rxqs);
- apc->rxqs = NULL;
+ mana_cleanup_port_context(apc);
return err;
}
@@ -2725,6 +2768,10 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
if (err)
goto free_net;
+ err = mana_rss_table_alloc(apc);
+ if (err)
+ goto reset_apc;
+
netdev_lockdep_set_classes(ndev);
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
@@ -2741,14 +2788,15 @@ static int mana_probe_port(struct mana_context *ac, int port_idx,
err = register_netdev(ndev);
if (err) {
netdev_err(ndev, "Unable to register netdev.\n");
- goto reset_apc;
+ goto free_indir;
}
return 0;
+free_indir:
+ mana_cleanup_indir_table(apc);
reset_apc:
- kfree(apc->rxqs);
- apc->rxqs = NULL;
+ mana_cleanup_port_context(apc);
free_net:
*ndev_storage = NULL;
netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
@@ -2800,6 +2848,8 @@ static int add_adev(struct gdma_dev *gd)
if (ret)
goto init_fail;
+ /* madev is owned by the auxiliary device */
+ madev = NULL;
ret = auxiliary_device_add(adev);
if (ret)
goto add_fail;
@@ -2874,16 +2924,30 @@ int mana_probe(struct gdma_dev *gd, bool resuming)
if (!resuming) {
for (i = 0; i < ac->num_ports; i++) {
err = mana_probe_port(ac, i, &ac->ports[i]);
- if (err)
+ /* we log the port for which the probe failed and stop
+ * probes for subsequent ports.
+ * Note that we keep running ports, for which the probes
+ * were successful, unless add_adev fails too
+ */
+ if (err) {
+ dev_err(dev, "Probe Failed for port %d\n", i);
break;
+ }
}
} else {
for (i = 0; i < ac->num_ports; i++) {
rtnl_lock();
err = mana_attach(ac->ports[i]);
rtnl_unlock();
- if (err)
+ /* we log the port for which the attach failed and stop
+ * attach for subsequent ports
+ * Note that we keep running ports, for which the attach
+ * were successful, unless add_adev fails too
+ */
+ if (err) {
+ dev_err(dev, "Attach Failed for port %d\n", i);
break;
+ }
}
}
@@ -2899,6 +2963,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
+ struct mana_port_context *apc;
struct device *dev = gc->dev;
struct net_device *ndev;
int err;
@@ -2910,6 +2975,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
for (i = 0; i < ac->num_ports; i++) {
ndev = ac->ports[i];
+ apc = netdev_priv(ndev);
if (!ndev) {
if (i == 0)
dev_err(dev, "No net device to remove\n");
@@ -2933,6 +2999,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending)
}
unregister_netdevice(ndev);
+ mana_cleanup_indir_table(apc);
rtnl_unlock();
@@ -2950,3 +3017,22 @@ out:
gd->gdma_context = NULL;
kfree(ac);
}
+
+struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index)
+{
+ struct net_device *ndev;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "Taking primary netdev without holding the RCU read lock");
+ if (port_index >= ac->num_ports)
+ return NULL;
+
+ /* When mana is used in netvsc, the upper netdevice should be returned. */
+ if (ac->ports[port_index]->flags & IFF_SLAVE)
+ ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
+ else
+ ndev = ac->ports[port_index];
+
+ return ndev;
+}
+EXPORT_SYMBOL_NS(mana_get_primary_netdev_rcu, NET_MANA);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index ab2413d71f6c..146d5db1792f 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -245,7 +245,9 @@ static u32 mana_get_rxfh_key_size(struct net_device *ndev)
static u32 mana_rss_indir_size(struct net_device *ndev)
{
- return MANA_INDIRECT_TABLE_SIZE;
+ struct mana_port_context *apc = netdev_priv(ndev);
+
+ return apc->indir_table_sz;
}
static int mana_get_rxfh(struct net_device *ndev,
@@ -257,7 +259,7 @@ static int mana_get_rxfh(struct net_device *ndev,
rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (rxfh->indir) {
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ for (i = 0; i < apc->indir_table_sz; i++)
rxfh->indir[i] = apc->indir_table[i];
}
@@ -273,8 +275,8 @@ static int mana_set_rxfh(struct net_device *ndev,
{
struct mana_port_context *apc = netdev_priv(ndev);
bool update_hash = false, update_table = false;
- u32 save_table[MANA_INDIRECT_TABLE_SIZE];
u8 save_key[MANA_HASH_KEY_SIZE];
+ u32 *save_table;
int i, err;
if (!apc->port_is_up)
@@ -284,13 +286,19 @@ static int mana_set_rxfh(struct net_device *ndev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
+ save_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
+ if (!save_table)
+ return -ENOMEM;
+
if (rxfh->indir) {
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
- if (rxfh->indir[i] >= apc->num_queues)
- return -EINVAL;
+ for (i = 0; i < apc->indir_table_sz; i++)
+ if (rxfh->indir[i] >= apc->num_queues) {
+ err = -EINVAL;
+ goto cleanup;
+ }
update_table = true;
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) {
+ for (i = 0; i < apc->indir_table_sz; i++) {
save_table[i] = apc->indir_table[i];
apc->indir_table[i] = rxfh->indir[i];
}
@@ -306,7 +314,7 @@ static int mana_set_rxfh(struct net_device *ndev,
if (err) { /* recover to original values */
if (update_table) {
- for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++)
+ for (i = 0; i < apc->indir_table_sz; i++)
apc->indir_table[i] = save_table[i];
}
@@ -316,6 +324,9 @@ static int mana_set_rxfh(struct net_device *ndev,
mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table);
}
+cleanup:
+ kfree(save_table);
+
return err;
}
diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c
index 5553af9c8085..0f1679ebad96 100644
--- a/drivers/net/ethernet/microsoft/mana/shm_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c
@@ -6,6 +6,7 @@
#include <linux/io.h>
#include <linux/mm.h>
+#include <net/mana/gdma.h>
#include <net/mana/shm_channel.h>
#define PAGE_FRAME_L48_WIDTH_BYTES 6
@@ -155,8 +156,8 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
return err;
}
- if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) ||
- !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr))
+ if (!MANA_PAGE_ALIGNED(eq_addr) || !MANA_PAGE_ALIGNED(cq_addr) ||
+ !MANA_PAGE_ALIGNED(rq_addr) || !MANA_PAGE_ALIGNED(sq_addr))
return -EINVAL;
if ((eq_msix_index & VECTOR_MASK) != eq_msix_index)
@@ -183,7 +184,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* EQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
- frame_addr = PHYS_PFN(eq_addr);
+ frame_addr = MANA_PFN(eq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
@@ -191,7 +192,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* CQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
- frame_addr = PHYS_PFN(cq_addr);
+ frame_addr = MANA_PFN(cq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
@@ -199,7 +200,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* RQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
- frame_addr = PHYS_PFN(rq_addr);
+ frame_addr = MANA_PFN(rq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
@@ -207,7 +208,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr,
/* SQ addr: low 48 bits of frame address */
shmem = (u64 *)ptr;
- frame_addr = PHYS_PFN(sq_addr);
+ frame_addr = MANA_PFN(sq_addr);
*shmem = frame_addr & PAGE_FRAME_L48_MASK;
all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) <<
(frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index ed2fb44500b0..3d72aa7b1305 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -453,9 +453,158 @@ static u16 ocelot_vlan_unaware_pvid(struct ocelot *ocelot,
return VLAN_N_VID - bridge_num - 1;
}
+/**
+ * ocelot_update_vlan_reclassify_rule() - Make switch aware only to bridge VLAN TPID
+ *
+ * @ocelot: Switch private data structure
+ * @port: Index of ingress port
+ *
+ * IEEE 802.1Q-2018 clauses "5.5 C-VLAN component conformance" and "5.6 S-VLAN
+ * component conformance" suggest that a C-VLAN component should only recognize
+ * and filter on C-Tags, and an S-VLAN component should only recognize and
+ * process based on C-Tags.
+ *
+ * In Linux, as per commit 1a0b20b25732 ("Merge branch 'bridge-next'"), C-VLAN
+ * components are largely represented by a bridge with vlan_protocol 802.1Q,
+ * and S-VLAN components by a bridge with vlan_protocol 802.1ad.
+ *
+ * Currently the driver only offloads vlan_protocol 802.1Q, but the hardware
+ * design is non-conformant, because the switch assigns each frame to a VLAN
+ * based on an entirely different question, as detailed in figure "Basic VLAN
+ * Classification Flow" from its manual and reproduced below.
+ *
+ * Set TAG_TYPE, PCP, DEI, VID to port-default values in VLAN_CFG register
+ * if VLAN_AWARE_ENA[port] and frame has outer tag then:
+ * if VLAN_INNER_TAG_ENA[port] and frame has inner tag then:
+ * TAG_TYPE = (Frame.InnerTPID <> 0x8100)
+ * Set PCP, DEI, VID to values from inner VLAN header
+ * else:
+ * TAG_TYPE = (Frame.OuterTPID <> 0x8100)
+ * Set PCP, DEI, VID to values from outer VLAN header
+ * if VID == 0 then:
+ * VID = VLAN_CFG.VLAN_VID
+ *
+ * Summarized, the switch will recognize both 802.1Q and 802.1ad TPIDs as VLAN
+ * "with equal rights", and just set the TAG_TYPE bit to 0 (if 802.1Q) or to 1
+ * (if 802.1ad). It will classify based on whichever of the tags is "outer", no
+ * matter what TPID that may have (or "inner", if VLAN_INNER_TAG_ENA[port]).
+ *
+ * In the VLAN Table, the TAG_TYPE information is not accessible - just the
+ * classified VID is - so it is as if each VLAN Table entry is for 2 VLANs:
+ * C-VLAN X, and S-VLAN X.
+ *
+ * Whereas the Linux bridge behavior is to only filter on frames with a TPID
+ * equal to the vlan_protocol, and treat everything else as VLAN-untagged.
+ *
+ * Consider an ingress packet tagged with 802.1ad VID=3 and 802.1Q VID=5,
+ * received on a bridge vlan_filtering=1 vlan_protocol=802.1Q port. This frame
+ * should be treated as 802.1Q-untagged, and classified to the PVID of that
+ * bridge port. Not to VID=3, and not to VID=5.
+ *
+ * The VCAP IS1 TCAM has everything we need to overwrite the choices made in
+ * the basic VLAN classification pipeline: it can match on TAG_TYPE in the key,
+ * and it can modify the classified VID in the action. Thus, for each port
+ * under a vlan_filtering bridge, we can insert a rule in VCAP IS1 lookup 0 to
+ * match on 802.1ad tagged frames and modify their classified VID to the 802.1Q
+ * PVID of the port. This effectively makes it appear to the outside world as
+ * if those packets were processed as VLAN-untagged.
+ *
+ * The rule needs to be updated each time the bridge PVID changes, and needs
+ * to be deleted if the bridge PVID is deleted, or if the port becomes
+ * VLAN-unaware.
+ */
+static int ocelot_update_vlan_reclassify_rule(struct ocelot *ocelot, int port)
+{
+ unsigned long cookie = OCELOT_VCAP_IS1_VLAN_RECLASSIFY(ocelot, port);
+ struct ocelot_vcap_block *block_vcap_is1 = &ocelot->block[VCAP_IS1];
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ const struct ocelot_bridge_vlan *pvid_vlan;
+ struct ocelot_vcap_filter *filter;
+ int err, val, pcp, dei;
+ bool vid_replace_ena;
+ u16 vid;
+
+ pvid_vlan = ocelot_port->pvid_vlan;
+ vid_replace_ena = ocelot_port->vlan_aware && pvid_vlan;
+
+ filter = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, cookie,
+ false);
+ if (!vid_replace_ena) {
+ /* If the reclassification filter doesn't need to exist, delete
+ * it if it was previously installed, and exit doing nothing
+ * otherwise.
+ */
+ if (filter)
+ return ocelot_vcap_filter_del(ocelot, filter);
+
+ return 0;
+ }
+
+ /* The reclassification rule must apply. See if it already exists
+ * or if it must be created.
+ */
+
+ /* Treating as VLAN-untagged means using as classified VID equal to
+ * the bridge PVID, and PCP/DEI set to the port default QoS values.
+ */
+ vid = pvid_vlan->vid;
+ val = ocelot_read_gix(ocelot, ANA_PORT_QOS_CFG, port);
+ pcp = ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(val);
+ dei = !!(val & ANA_PORT_QOS_CFG_DP_DEFAULT_VAL);
+
+ if (filter) {
+ bool changed = false;
+
+ /* Filter exists, just update it */
+ if (filter->action.vid != vid) {
+ filter->action.vid = vid;
+ changed = true;
+ }
+ if (filter->action.pcp != pcp) {
+ filter->action.pcp = pcp;
+ changed = true;
+ }
+ if (filter->action.dei != dei) {
+ filter->action.dei = dei;
+ changed = true;
+ }
+
+ if (!changed)
+ return 0;
+
+ return ocelot_vcap_filter_replace(ocelot, filter);
+ }
+
+ /* Filter doesn't exist, create it */
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter)
+ return -ENOMEM;
+
+ filter->key_type = OCELOT_VCAP_KEY_ANY;
+ filter->ingress_port_mask = BIT(port);
+ filter->vlan.tpid = OCELOT_VCAP_BIT_1;
+ filter->prio = 1;
+ filter->id.cookie = cookie;
+ filter->id.tc_offload = false;
+ filter->block_id = VCAP_IS1;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ filter->lookup = 0;
+ filter->action.vid_replace_ena = true;
+ filter->action.pcp_dei_ena = true;
+ filter->action.vid = vid;
+ filter->action.pcp = pcp;
+ filter->action.dei = dei;
+
+ err = ocelot_vcap_filter_add(ocelot, filter, NULL);
+ if (err)
+ kfree(filter);
+
+ return err;
+}
+
/* Default vlan to clasify for untagged frames (may be zero) */
-static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
- const struct ocelot_bridge_vlan *pvid_vlan)
+static int ocelot_port_set_pvid(struct ocelot *ocelot, int port,
+ const struct ocelot_bridge_vlan *pvid_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
u16 pvid = ocelot_vlan_unaware_pvid(ocelot, ocelot_port->bridge);
@@ -475,15 +624,23 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
* happens automatically), but also 802.1p traffic which gets
* classified to VLAN 0, but that is always in our RX filter, so it
* would get accepted were it not for this setting.
+ *
+ * Also, we only support the bridge 802.1Q VLAN protocol, so
+ * 802.1ad-tagged frames (carrying S-Tags) should be considered
+ * 802.1Q-untagged, and also dropped.
*/
if (!pvid_vlan && ocelot_port->vlan_aware)
val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA;
ocelot_rmw_gix(ocelot, val,
ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
- ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA,
+ ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA |
+ ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA,
ANA_PORT_DROP_CFG, port);
+
+ return ocelot_update_vlan_reclassify_rule(ocelot, port);
}
static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
@@ -631,7 +788,10 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M,
ANA_PORT_VLAN_CFG, port);
- ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
+ err = ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
+ if (err)
+ return err;
+
ocelot_port_manage_port_tag(ocelot, port);
return 0;
@@ -684,9 +844,12 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
return err;
/* Default ingress vlan classification */
- if (pvid)
- ocelot_port_set_pvid(ocelot, port,
- ocelot_bridge_vlan_find(ocelot, vid));
+ if (pvid) {
+ err = ocelot_port_set_pvid(ocelot, port,
+ ocelot_bridge_vlan_find(ocelot, vid));
+ if (err)
+ return err;
+ }
/* Untagged egress vlan clasification */
ocelot_port_manage_port_tag(ocelot, port);
@@ -712,8 +875,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
return err;
/* Ingress */
- if (del_pvid)
- ocelot_port_set_pvid(ocelot, port, NULL);
+ if (del_pvid) {
+ err = ocelot_port_set_pvid(ocelot, port, NULL);
+ if (err)
+ return err;
+ }
/* Egress */
ocelot_port_manage_port_tag(ocelot, port);
@@ -1099,6 +1265,48 @@ void ocelot_ptp_rx_timestamp(struct ocelot *ocelot, struct sk_buff *skb,
}
EXPORT_SYMBOL(ocelot_ptp_rx_timestamp);
+void ocelot_lock_inj_grp(struct ocelot *ocelot, int grp)
+ __acquires(&ocelot->inj_lock)
+{
+ spin_lock(&ocelot->inj_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_lock_inj_grp);
+
+void ocelot_unlock_inj_grp(struct ocelot *ocelot, int grp)
+ __releases(&ocelot->inj_lock)
+{
+ spin_unlock(&ocelot->inj_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_unlock_inj_grp);
+
+void ocelot_lock_xtr_grp(struct ocelot *ocelot, int grp)
+ __acquires(&ocelot->inj_lock)
+{
+ spin_lock(&ocelot->inj_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp);
+
+void ocelot_unlock_xtr_grp(struct ocelot *ocelot, int grp)
+ __releases(&ocelot->inj_lock)
+{
+ spin_unlock(&ocelot->inj_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp);
+
+void ocelot_lock_xtr_grp_bh(struct ocelot *ocelot, int grp)
+ __acquires(&ocelot->xtr_lock)
+{
+ spin_lock_bh(&ocelot->xtr_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_lock_xtr_grp_bh);
+
+void ocelot_unlock_xtr_grp_bh(struct ocelot *ocelot, int grp)
+ __releases(&ocelot->xtr_lock)
+{
+ spin_unlock_bh(&ocelot->xtr_lock);
+}
+EXPORT_SYMBOL_GPL(ocelot_unlock_xtr_grp_bh);
+
int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
{
u64 timestamp, src_port, len;
@@ -1109,6 +1317,8 @@ int ocelot_xtr_poll_frame(struct ocelot *ocelot, int grp, struct sk_buff **nskb)
u32 val, *buf;
int err;
+ lockdep_assert_held(&ocelot->xtr_lock);
+
err = ocelot_xtr_poll_xfh(ocelot, grp, xfh);
if (err)
return err;
@@ -1184,6 +1394,8 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
{
u32 val = ocelot_read(ocelot, QS_INJ_STATUS);
+ lockdep_assert_held(&ocelot->inj_lock);
+
if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))))
return false;
if (val & QS_INJ_STATUS_WMARK_REACHED(BIT(grp)))
@@ -1193,28 +1405,55 @@ bool ocelot_can_inject(struct ocelot *ocelot, int grp)
}
EXPORT_SYMBOL(ocelot_can_inject);
-void ocelot_ifh_port_set(void *ifh, int port, u32 rew_op, u32 vlan_tag)
+/**
+ * ocelot_ifh_set_basic - Set basic information in Injection Frame Header
+ * @ifh: Pointer to Injection Frame Header memory
+ * @ocelot: Switch private data structure
+ * @port: Egress port number
+ * @rew_op: Egress rewriter operation for PTP
+ * @skb: Pointer to socket buffer (packet)
+ *
+ * Populate the Injection Frame Header with basic information for this skb: the
+ * analyzer bypass bit, destination port, VLAN info, egress rewriter info.
+ */
+void ocelot_ifh_set_basic(void *ifh, struct ocelot *ocelot, int port,
+ u32 rew_op, struct sk_buff *skb)
{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct net_device *dev = skb->dev;
+ u64 vlan_tci, tag_type;
+ int qos_class;
+
+ ocelot_xmit_get_vlan_info(skb, ocelot_port->bridge, &vlan_tci,
+ &tag_type);
+
+ qos_class = netdev_get_num_tc(dev) ?
+ netdev_get_prio_tc_map(dev, skb->priority) : skb->priority;
+
+ memset(ifh, 0, OCELOT_TAG_LEN);
ocelot_ifh_set_bypass(ifh, 1);
+ ocelot_ifh_set_src(ifh, BIT_ULL(ocelot->num_phys_ports));
ocelot_ifh_set_dest(ifh, BIT_ULL(port));
- ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
- if (vlan_tag)
- ocelot_ifh_set_vlan_tci(ifh, vlan_tag);
+ ocelot_ifh_set_qos_class(ifh, qos_class);
+ ocelot_ifh_set_tag_type(ifh, tag_type);
+ ocelot_ifh_set_vlan_tci(ifh, vlan_tci);
if (rew_op)
ocelot_ifh_set_rew_op(ifh, rew_op);
}
-EXPORT_SYMBOL(ocelot_ifh_port_set);
+EXPORT_SYMBOL(ocelot_ifh_set_basic);
void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
u32 rew_op, struct sk_buff *skb)
{
- u32 ifh[OCELOT_TAG_LEN / 4] = {0};
+ u32 ifh[OCELOT_TAG_LEN / 4];
unsigned int i, count, last;
+ lockdep_assert_held(&ocelot->inj_lock);
+
ocelot_write_rix(ocelot, QS_INJ_CTRL_GAP_SIZE(1) |
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
- ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
+ ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb);
for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
@@ -1247,6 +1486,8 @@ EXPORT_SYMBOL(ocelot_port_inject_frame);
void ocelot_drain_cpu_queue(struct ocelot *ocelot, int grp)
{
+ lockdep_assert_held(&ocelot->xtr_lock);
+
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp))
ocelot_read_rix(ocelot, QS_XTR_RD, grp);
}
@@ -2532,7 +2773,7 @@ int ocelot_port_set_default_prio(struct ocelot *ocelot, int port, u8 prio)
ANA_PORT_QOS_CFG,
port);
- return 0;
+ return ocelot_update_vlan_reclassify_rule(ocelot, port);
}
EXPORT_SYMBOL_GPL(ocelot_port_set_default_prio);
@@ -2929,6 +3170,8 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->fwd_domain_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
+ spin_lock_init(&ocelot->inj_lock);
+ spin_lock_init(&ocelot->xtr_lock);
ocelot->owq = alloc_ordered_workqueue("ocelot-owq", 0);
if (!ocelot->owq)
diff --git a/drivers/net/ethernet/mscc/ocelot_fdma.c b/drivers/net/ethernet/mscc/ocelot_fdma.c
index 312a46832154..00326ae8c708 100644
--- a/drivers/net/ethernet/mscc/ocelot_fdma.c
+++ b/drivers/net/ethernet/mscc/ocelot_fdma.c
@@ -665,8 +665,7 @@ static int ocelot_fdma_prepare_skb(struct ocelot *ocelot, int port, u32 rew_op,
ifh = skb_push(skb, OCELOT_TAG_LEN);
skb_put(skb, ETH_FCS_LEN);
- memset(ifh, 0, OCELOT_TAG_LEN);
- ocelot_ifh_port_set(ifh, port, rew_op, skb_vlan_tag_get(skb));
+ ocelot_ifh_set_basic(ifh, ocelot, port, rew_op, skb);
return 0;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 33b438c6aec5..a057ec3dab97 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -609,11 +609,8 @@ ocelot_flower_parse_key(struct ocelot *ocelot, int port, bool ingress,
return ret;
}
- if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
- struct flow_match_control match;
-
- flow_rule_match_control(rule, &match);
- }
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_match_vlan match;
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 21a87a3fc556..7c9540a71725 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -980,7 +980,7 @@ static int ocelot_port_get_sset_count(struct net_device *dev, int sset)
}
static int ocelot_port_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ocelot_port_private *priv = netdev_priv(dev);
struct ocelot *ocelot = priv->port.ocelot;
diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c
index cb32234a5bf1..b3c28260adf8 100644
--- a/drivers/net/ethernet/mscc/ocelot_ptp.c
+++ b/drivers/net/ethernet/mscc/ocelot_ptp.c
@@ -580,7 +580,7 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr)
EXPORT_SYMBOL(ocelot_hwstamp_set);
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->phc_index = ocelot->ptp_clock ?
ptp_clock_index(ocelot->ptp_clock) : -1;
diff --git a/drivers/net/ethernet/mscc/ocelot_vcap.c b/drivers/net/ethernet/mscc/ocelot_vcap.c
index 73cdec5ca6a3..5734b86aed5b 100644
--- a/drivers/net/ethernet/mscc/ocelot_vcap.c
+++ b/drivers/net/ethernet/mscc/ocelot_vcap.c
@@ -695,6 +695,7 @@ static void is1_entry_set(struct ocelot *ocelot, int ix,
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_MC, filter->dmac_mc);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_L2_BC, filter->dmac_bc);
vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_VLAN_TAGGED, tag->tagged);
+ vcap_key_bit_set(vcap, &data, VCAP_IS1_HK_TPID, tag->tpid);
vcap_key_set(vcap, &data, VCAP_IS1_HK_VID,
tag->vid.value, tag->vid.mask);
vcap_key_set(vcap, &data, VCAP_IS1_HK_PCP,
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 993212c3a7da..c09dd2e3343c 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -51,6 +51,8 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
struct ocelot *ocelot = arg;
int grp = 0, err;
+ ocelot_lock_xtr_grp(ocelot, grp);
+
while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) {
struct sk_buff *skb;
@@ -69,6 +71,8 @@ out:
if (err < 0)
ocelot_drain_cpu_queue(ocelot, 0);
+ ocelot_unlock_xtr_grp(ocelot, grp);
+
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 7b7e1c5b00f4..b7d9657a7af3 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -3036,11 +3036,11 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
/* if we change the mtu on an active device, we must
* reset the device so the firmware sees the change */
myri10ge_close(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
myri10ge_open(dev);
- } else
- dev->mtu = new_mtu;
-
+ } else {
+ WRITE_ONCE(dev->mtu, new_mtu);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 650a5a166070..ad0c14849115 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -2526,7 +2526,7 @@ static void __set_rx_mode(struct net_device *dev)
static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* synchronized against open : rtnl_lock() held by caller */
if (netif_running(dev)) {
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 55408f16fbbc..f235e76e4ce9 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -6637,7 +6637,7 @@ static int s2io_change_mtu(struct net_device *dev, int new_mtu)
struct s2io_nic *sp = netdev_priv(dev);
int ret = 0;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (netif_running(dev)) {
s2io_stop_all_tx_queue(sp);
s2io_card_down(sp);
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index a655f9e69a7b..0e1a3800f371 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -132,7 +132,8 @@ exit_close_nsp:
static int
nfp_devlink_param_u8_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
const struct nfp_devlink_param_u8_arg *arg;
struct nfp_pf *pf = devlink_priv(devlink);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 2c3f62907958..aca2a7417af3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -396,6 +396,17 @@ nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
return 0;
}
+#define NFP_FL_CHECK(flag) ({ \
+ IP_TUNNEL_DECLARE_FLAGS(__check) = { }; \
+ __be16 __res; \
+ \
+ __set_bit(IP_TUNNEL_##flag##_BIT, __check); \
+ __res = ip_tunnel_flags_to_be16(__check); \
+ \
+ BUILD_BUG_ON(__builtin_constant_p(__res) && \
+ NFP_FL_TUNNEL_##flag != __res); \
+})
+
static int
nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
const struct flow_action_entry *act,
@@ -410,6 +421,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
u32 tmp_set_ip_tun_type_index = 0;
/* Currently support one pre-tunnel so index is always 0. */
int pretun_idx = 0;
+ __be16 tun_flags;
if (!IS_ENABLED(CONFIG_IPV6) && ipv6)
return -EOPNOTSUPP;
@@ -417,9 +429,10 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
if (ipv6 && !(priv->flower_ext_feats & NFP_FL_FEATS_IPV6_TUN))
return -EOPNOTSUPP;
- BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
- NFP_FL_TUNNEL_KEY != TUNNEL_KEY ||
- NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
+ NFP_FL_CHECK(CSUM);
+ NFP_FL_CHECK(KEY);
+ NFP_FL_CHECK(GENEVE_OPT);
+
if (ip_tun->options_len &&
(tun_type != NFP_FL_TUNNEL_GENEVE ||
!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))) {
@@ -427,7 +440,9 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
return -EOPNOTSUPP;
}
- if (ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS) {
+ tun_flags = ip_tunnel_flags_to_be16(ip_tun->key.tun_flags);
+ if (!ip_tunnel_flags_is_be16_compat(ip_tun->key.tun_flags) ||
+ (tun_flags & ~NFP_FL_SUPPORTED_UDP_TUN_FLAGS)) {
NL_SET_ERR_MSG_MOD(extack,
"unsupported offload: loaded firmware does not support tunnel flag offload");
return -EOPNOTSUPP;
@@ -442,7 +457,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
FIELD_PREP(NFP_FL_PRE_TUN_INDEX, pretun_idx);
set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
- if (ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY)
+ if (tun_flags & NFP_FL_TUNNEL_KEY)
set_tun->tun_id = ip_tun->key.tun_id;
if (ip_tun->key.ttl) {
@@ -486,7 +501,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun,
}
set_tun->tos = ip_tun->key.tos;
- set_tun->tun_flags = ip_tun->key.tun_flags;
+ set_tun->tun_flags = tun_flags;
if (tun_type == NFP_FL_TUNNEL_GENEVE) {
set_tun->tun_proto = htons(ETH_P_TEB);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 0aceef9fe582..46ffc2c20893 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -321,6 +321,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
flow_rule_match_enc_control(rule, &enc_ctl);
+ if (flow_rule_has_enc_control_flags(enc_ctl.mask->flags,
+ extack))
+ return -EOPNOTSUPP;
+
if (enc_ctl.mask->addr_type != 0xffff) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported");
return -EOPNOTSUPP;
@@ -527,10 +531,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
struct flow_match_control ctl;
flow_rule_match_control(rule, &ctl);
- if (ctl.key->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS) {
- NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on unknown control flag");
+
+ if (!flow_rule_is_supp_control_flags(NFP_FLOWER_SUPPORTED_CTLFLAGS,
+ ctl.mask->flags, extack))
return -EOPNOTSUPP;
- }
}
ret_key_ls->key_layer = key_layer;
diff --git a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
index 45be6954d5aa..01cfa9cc1b5e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
+++ b/drivers/net/ethernet/netronome/nfp/nfd3/xsk.c
@@ -184,7 +184,7 @@ nfp_nfd3_xsk_rx(struct nfp_net_rx_ring *rx_ring, int budget,
xrxbuf->xdp->data += meta_len;
xrxbuf->xdp->data_end = xrxbuf->xdp->data + pkt_len;
xdp_set_data_meta_invalid(xrxbuf->xdp);
- xsk_buff_dma_sync_for_cpu(xrxbuf->xdp, r_vec->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xrxbuf->xdp);
net_prefetch(xrxbuf->xdp->data);
if (meta_len) {
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
index 635d33c0d6d3..ea75b9a06313 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_devlink.c
@@ -160,6 +160,7 @@ static const struct nfp_devlink_versions_simple {
{ DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, "assembly.revision", },
{ DEVLINK_INFO_VERSION_GENERIC_BOARD_MANUFACTURE, "assembly.vendor", },
{ "board.model", /* code name */ "assembly.model", },
+ { DEVLINK_INFO_VERSION_GENERIC_BOARD_PART_NUMBER, "pn", },
};
static int
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index f28e769e6fda..182ba0a8b095 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -1526,7 +1526,7 @@ static void nfp_net_dp_swap(struct nfp_net *nn, struct nfp_net_dp *dp)
*dp = nn->dp;
nn->dp = new_dp;
- nn->dp.netdev->mtu = new_dp.mtu;
+ WRITE_ONCE(nn->dp.netdev->mtu, new_dp.mtu);
if (!netif_is_rxfh_configured(nn->dp.netdev))
nfp_net_rss_init_itbl(nn);
@@ -2289,10 +2289,7 @@ static int nfp_net_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
if (!br_spec)
return -EINVAL;
- nla_for_each_nested(attr, br_spec, rem) {
- if (nla_type(attr) != IFLA_BRIDGE_MODE)
- continue;
-
+ nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
new_ctrl = nn->dp.ctrl;
mode = nla_get_u16(attr);
if (mode == BRIDGE_MODE_VEPA)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
index a614df095b08..2dd37557185e 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugdump.c
@@ -34,8 +34,11 @@ enum nfp_dumpspec_type {
/* generic type plus length */
struct nfp_dump_tl {
- __be32 type;
- __be32 length; /* chunk length to follow, aligned to 8 bytes */
+ /* New members must be added within the struct_group() macro below. */
+ struct_group_tagged(nfp_dump_tl_hdr, hdr,
+ __be32 type;
+ __be32 length; /* chunk length to follow, aligned to 8 bytes */
+ );
char data[];
};
@@ -55,19 +58,19 @@ struct nfp_dump_common_cpp {
/* CSR dumpables */
struct nfp_dumpspec_csr {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
struct nfp_dump_common_cpp cpp;
__be32 register_width; /* in bits */
};
struct nfp_dumpspec_rtsym {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
char rtsym[];
};
/* header for register dumpable */
struct nfp_dump_csr {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
struct nfp_dump_common_cpp cpp;
__be32 register_width; /* in bits */
__be32 error; /* error code encountered while reading */
@@ -75,7 +78,7 @@ struct nfp_dump_csr {
};
struct nfp_dump_rtsym {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
struct nfp_dump_common_cpp cpp;
__be32 error; /* error code encountered while reading */
u8 padded_name_length; /* pad so data starts at 8 byte boundary */
@@ -84,12 +87,12 @@ struct nfp_dump_rtsym {
};
struct nfp_dump_prolog {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
__be32 dump_level;
};
struct nfp_dump_error {
- struct nfp_dump_tl tl;
+ struct nfp_dump_tl_hdr tl;
__be32 error;
char padding[4];
char spec[];
@@ -449,6 +452,8 @@ static int
nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
struct nfp_dump_state *dump)
{
+ struct nfp_dump_tl *spec_csr_tl =
+ container_of(&spec_csr->tl, struct nfp_dump_tl, hdr);
struct nfp_dump_csr *dump_header = dump->p;
u32 reg_sz, header_size, total_size;
u32 cpp_rd_addr, max_rd_addr;
@@ -458,7 +463,7 @@ nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
int err;
if (!nfp_csr_spec_valid(spec_csr))
- return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+ return nfp_dump_error_tlv(spec_csr_tl, -EINVAL, dump);
reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
header_size = ALIGN8(sizeof(*dump_header));
@@ -466,7 +471,7 @@ nfp_dump_csr_range(struct nfp_pf *pf, struct nfp_dumpspec_csr *spec_csr,
ALIGN8(be32_to_cpu(spec_csr->cpp.dump_length));
dest = dump->p + header_size;
- err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+ err = nfp_add_tlv(be32_to_cpu(spec_csr_tl->type), total_size, dump);
if (err)
return err;
@@ -552,6 +557,8 @@ nfp_dump_indirect_csr_range(struct nfp_pf *pf,
struct nfp_dumpspec_csr *spec_csr,
struct nfp_dump_state *dump)
{
+ struct nfp_dump_tl *spec_csr_tl =
+ container_of(&spec_csr->tl, struct nfp_dump_tl, hdr);
struct nfp_dump_csr *dump_header = dump->p;
u32 reg_sz, header_size, total_size;
u32 cpp_rd_addr, max_rd_addr;
@@ -560,7 +567,7 @@ nfp_dump_indirect_csr_range(struct nfp_pf *pf,
int err;
if (!nfp_csr_spec_valid(spec_csr))
- return nfp_dump_error_tlv(&spec_csr->tl, -EINVAL, dump);
+ return nfp_dump_error_tlv(spec_csr_tl, -EINVAL, dump);
reg_sz = be32_to_cpu(spec_csr->register_width) / BITS_PER_BYTE;
header_size = ALIGN8(sizeof(*dump_header));
@@ -569,7 +576,7 @@ nfp_dump_indirect_csr_range(struct nfp_pf *pf,
total_size = header_size + ALIGN8(reg_data_length);
dest = dump->p + header_size;
- err = nfp_add_tlv(be32_to_cpu(spec_csr->tl.type), total_size, dump);
+ err = nfp_add_tlv(be32_to_cpu(spec_csr_tl->type), total_size, dump);
if (err)
return err;
@@ -597,6 +604,8 @@ static int
nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
struct nfp_dump_state *dump)
{
+ struct nfp_dump_tl *spec_tl =
+ container_of(&spec->tl, struct nfp_dump_tl, hdr);
struct nfp_dump_rtsym *dump_header = dump->p;
struct nfp_dumpspec_cpp_isl_id cpp_params;
struct nfp_rtsym_table *rtbl = pf->rtbl;
@@ -607,14 +616,14 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
void *dest;
int err;
- tl_len = be32_to_cpu(spec->tl.length);
+ tl_len = be32_to_cpu(spec_tl->length);
key_len = strnlen(spec->rtsym, tl_len);
if (key_len == tl_len)
- return nfp_dump_error_tlv(&spec->tl, -EINVAL, dump);
+ return nfp_dump_error_tlv(spec_tl, -EINVAL, dump);
sym = nfp_rtsym_lookup(rtbl, spec->rtsym);
if (!sym)
- return nfp_dump_error_tlv(&spec->tl, -ENOENT, dump);
+ return nfp_dump_error_tlv(spec_tl, -ENOENT, dump);
sym_size = nfp_rtsym_size(sym);
header_size =
@@ -622,7 +631,7 @@ nfp_dump_single_rtsym(struct nfp_pf *pf, struct nfp_dumpspec_rtsym *spec,
total_size = header_size + ALIGN8(sym_size);
dest = dump->p + header_size;
- err = nfp_add_tlv(be32_to_cpu(spec->tl.type), total_size, dump);
+ err = nfp_add_tlv(be32_to_cpu(spec_tl->type), total_size, dump);
if (err)
return err;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 3af1229a3f08..eee0bfc41074 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -177,7 +177,7 @@ static int nfp_repr_change_mtu(struct net_device *netdev, int new_mtu)
if (err)
return err;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index fa1f78b03cb2..2aa4ad9cf96e 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -946,7 +946,7 @@ static int nixge_change_mtu(struct net_device *ndev, int new_mtu)
NIXGE_MAX_JUMBO_FRAME_SIZE)
return -EINVAL;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 31f896c4aa26..720f577929db 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3098,7 +3098,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
int old_mtu;
old_mtu = dev->mtu;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* return early if the buffer sizes will not change */
if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 28b7cec485ef..4ac29cd59f2b 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2184,7 +2184,7 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
}
} else {
pch_gbe_reset(adapter);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
adapter->hw.mac.max_frame_size = max_frame;
}
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index ed7dd0a04235..62ba269da902 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1639,7 +1639,7 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu)
reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4);
write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */
mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 2ccc2c2a06e3..1c61390677f7 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -18,6 +18,8 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
+#define IONIC_ASIC_TYPE_ELBA 2
+
#define DEVCMD_TIMEOUT 5
#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
@@ -47,6 +49,7 @@ struct ionic {
struct ionic_dev_bar bars[IONIC_BARS_MAX];
unsigned int num_bars;
struct ionic_identity ident;
+ struct workqueue_struct *wq;
struct ionic_lif *lif;
unsigned int nnqs_per_lif;
unsigned int neqs_per_lif;
@@ -54,6 +57,8 @@ struct ionic {
unsigned int nrxqs_per_lif;
unsigned int nintrs;
DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX);
+ cpumask_var_t *affinity_masks;
+ struct delayed_work doorbell_check_dwork;
struct work_struct nb_work;
struct notifier_block nb;
struct rw_semaphore vf_op_lock; /* lock for VF operations */
@@ -93,4 +98,6 @@ int ionic_port_identify(struct ionic *ionic);
int ionic_port_init(struct ionic *ionic);
int ionic_port_reset(struct ionic *ionic);
+bool ionic_doorbell_wa(struct ionic *ionic);
+
#endif /* _IONIC_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 6ba8d4aca0a0..b93791d6b593 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -326,6 +326,11 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out;
}
+#ifdef CONFIG_PPC64
+ /* Ensure MSI/MSI-X interrupts lie within addressable physical memory */
+ pdev->no_64bit_msi = 1;
+#endif
+
err = ionic_setup_one(ionic);
if (err)
goto err_out;
@@ -372,6 +377,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mod_timer(&ionic->watchdog_timer,
round_jiffies(jiffies + ionic->watchdog_period));
+ ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
return 0;
@@ -406,6 +412,8 @@ static void ionic_remove(struct pci_dev *pdev)
if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state))
set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state);
+ if (ionic->lif->doorbell_wa)
+ cancel_delayed_work_sync(&ionic->doorbell_check_dwork);
ionic_lif_unregister(ionic->lif);
ionic_devlink_unregister(ionic);
ionic_lif_deinit(ionic->lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index c3ae11a48024..59e5a9f21105 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -220,7 +220,7 @@ static int netdev_show(struct seq_file *seq, void *v)
{
struct net_device *netdev = seq->private;
- seq_printf(seq, "%s\n", netdev->name);
+ seq_printf(seq, "%s\n", netdev_name(netdev));
return 0;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 874499337132..9e42d599840d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -43,11 +43,99 @@ static void ionic_watchdog_cb(struct timer_list *t)
work->type = IONIC_DW_TYPE_RX_MODE;
netdev_dbg(lif->netdev, "deferred: rx_mode\n");
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
}
}
-static void ionic_watchdog_init(struct ionic *ionic)
+static void ionic_napi_schedule_do_softirq(struct napi_struct *napi)
+{
+ local_bh_disable();
+ napi_schedule(napi);
+ local_bh_enable();
+}
+
+void ionic_doorbell_napi_work(struct work_struct *work)
+{
+ struct ionic_qcq *qcq = container_of(work, struct ionic_qcq,
+ doorbell_napi_work);
+ unsigned long now, then, dif;
+
+ now = READ_ONCE(jiffies);
+ then = qcq->q.dbell_jiffies;
+ dif = now - then;
+
+ if (dif > qcq->q.dbell_deadline)
+ ionic_napi_schedule_do_softirq(&qcq->napi);
+}
+
+static int ionic_get_preferred_cpu(struct ionic *ionic,
+ struct ionic_intr_info *intr)
+{
+ int cpu;
+
+ cpu = cpumask_first_and(*intr->affinity_mask, cpu_online_mask);
+ if (cpu >= nr_cpu_ids)
+ cpu = cpumask_local_spread(0, dev_to_node(ionic->dev));
+
+ return cpu;
+}
+
+static void ionic_queue_dbell_napi_work(struct ionic *ionic,
+ struct ionic_qcq *qcq)
+{
+ int cpu;
+
+ if (!(qcq->flags & IONIC_QCQ_F_INTR))
+ return;
+
+ cpu = ionic_get_preferred_cpu(ionic, &qcq->intr);
+ queue_work_on(cpu, ionic->wq, &qcq->doorbell_napi_work);
+}
+
+static void ionic_doorbell_check_dwork(struct work_struct *work)
+{
+ struct ionic *ionic = container_of(work, struct ionic,
+ doorbell_check_dwork.work);
+ struct ionic_lif *lif = ionic->lif;
+
+ mutex_lock(&lif->queue_lock);
+
+ if (test_bit(IONIC_LIF_F_FW_STOPPING, lif->state) ||
+ test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ mutex_unlock(&lif->queue_lock);
+ return;
+ }
+
+ ionic_napi_schedule_do_softirq(&lif->adminqcq->napi);
+
+ if (test_bit(IONIC_LIF_F_UP, lif->state)) {
+ int i;
+
+ for (i = 0; i < lif->nxqs; i++) {
+ ionic_queue_dbell_napi_work(ionic, lif->txqcqs[i]);
+ ionic_queue_dbell_napi_work(ionic, lif->rxqcqs[i]);
+ }
+
+ if (lif->hwstamp_txq &&
+ lif->hwstamp_txq->flags & IONIC_QCQ_F_INTR)
+ ionic_napi_schedule_do_softirq(&lif->hwstamp_txq->napi);
+ if (lif->hwstamp_rxq &&
+ lif->hwstamp_rxq->flags & IONIC_QCQ_F_INTR)
+ ionic_napi_schedule_do_softirq(&lif->hwstamp_rxq->napi);
+ }
+ mutex_unlock(&lif->queue_lock);
+
+ ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
+}
+
+bool ionic_doorbell_wa(struct ionic *ionic)
+{
+ u8 asic_type = ionic->idev.dev_info.asic_type;
+
+ return !asic_type || asic_type == IONIC_ASIC_TYPE_ELBA;
+}
+
+static int ionic_watchdog_init(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
@@ -63,6 +151,31 @@ static void ionic_watchdog_init(struct ionic *ionic)
idev->fw_status_ready = true;
idev->fw_generation = IONIC_FW_STS_F_GENERATION &
ioread8(&idev->dev_info_regs->fw_status);
+
+ ionic->wq = alloc_workqueue("%s-wq", WQ_UNBOUND, 0,
+ dev_name(ionic->dev));
+ if (!ionic->wq) {
+ dev_err(ionic->dev, "alloc_workqueue failed");
+ return -ENOMEM;
+ }
+
+ if (ionic_doorbell_wa(ionic))
+ INIT_DELAYED_WORK(&ionic->doorbell_check_dwork,
+ ionic_doorbell_check_dwork);
+
+ return 0;
+}
+
+void ionic_queue_doorbell_check(struct ionic *ionic, int delay)
+{
+ int cpu;
+
+ if (!ionic->lif->doorbell_wa)
+ return;
+
+ cpu = ionic_get_preferred_cpu(ionic, &ionic->lif->adminqcq->intr);
+ queue_delayed_work_on(cpu, ionic->wq, &ionic->doorbell_check_dwork,
+ delay);
}
void ionic_init_devinfo(struct ionic *ionic)
@@ -94,6 +207,7 @@ int ionic_dev_setup(struct ionic *ionic)
struct device *dev = ionic->dev;
int size;
u32 sig;
+ int err;
/* BAR0: dev_cmd and interrupts */
if (num_bars < 1) {
@@ -129,7 +243,9 @@ int ionic_dev_setup(struct ionic *ionic)
return -EFAULT;
}
- ionic_watchdog_init(ionic);
+ err = ionic_watchdog_init(ionic);
+ if (err)
+ return err;
idev->db_pages = bar->vaddr;
idev->phy_db_pages = bar->bus_addr;
@@ -161,6 +277,7 @@ void ionic_dev_teardown(struct ionic *ionic)
idev->phy_cmb_pages = 0;
idev->cmb_npages = 0;
+ destroy_workqueue(ionic->wq);
mutex_destroy(&idev->cmb_inuse_lock);
}
@@ -273,7 +390,7 @@ do_check_time:
if (work) {
work->type = IONIC_DW_TYPE_LIF_RESET;
work->fw_status = fw_status_ready;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
}
}
}
@@ -703,10 +820,6 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell)
q->dbval | q->head_idx);
q->dbell_jiffies = jiffies;
-
- if (q_to_qcq(q)->napi_qcq)
- mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
- jiffies + IONIC_NAPI_DEADLINE);
}
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index f30eee4a5a80..c647033f3ad2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -28,7 +28,7 @@
#define IONIC_DEV_INFO_REG_COUNT 32
#define IONIC_DEV_CMD_REG_COUNT 32
-#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */
+#define IONIC_NAPI_DEADLINE (HZ) /* 1 sec */
#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */
#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
@@ -280,9 +280,9 @@ struct ionic_intr_info {
u64 rearm_count;
unsigned int index;
unsigned int vector;
- unsigned int cpu;
u32 dim_coal_hw;
- cpumask_t affinity_mask;
+ cpumask_var_t *affinity_mask;
+ struct irq_affinity_notify aff_notify;
};
struct ionic_cq {
@@ -375,7 +375,9 @@ typedef void (*ionic_cq_done_cb)(void *done_arg);
unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
ionic_cq_cb cb, ionic_cq_done_cb done_cb,
void *done_arg);
-unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do);
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do,
+ bool in_napi);
int ionic_q_init(struct ionic_lif *lif, struct ionic_dev *idev,
struct ionic_queue *q, unsigned int index, const char *name,
@@ -386,6 +388,8 @@ bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos);
int ionic_heartbeat_check(struct ionic *ionic);
bool ionic_is_fw_running(struct ionic_dev *idev);
+void ionic_doorbell_napi_work(struct work_struct *work);
+void ionic_queue_doorbell_check(struct ionic *ionic, int delay);
bool ionic_adminq_poke_doorbell(struct ionic_queue *q);
bool ionic_txq_poke_doorbell(struct ionic_queue *q);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 91183965a6b7..4619fd74f3e3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -11,6 +11,8 @@
#include "ionic_ethtool.h"
#include "ionic_stats.h"
+#define IONIC_MAX_RX_COPYBREAK min(U16_MAX, IONIC_MAX_BUF_LEN)
+
static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
{
u32 i;
@@ -872,10 +874,17 @@ static int ionic_set_tunable(struct net_device *dev,
const void *data)
{
struct ionic_lif *lif = netdev_priv(dev);
+ u32 rx_copybreak;
switch (tuna->id) {
case ETHTOOL_RX_COPYBREAK:
- lif->rx_copybreak = *(u32 *)data;
+ rx_copybreak = *(u32 *)data;
+ if (rx_copybreak > IONIC_MAX_RX_COPYBREAK) {
+ netdev_err(dev, "Max supported rx_copybreak size: %u\n",
+ IONIC_MAX_RX_COPYBREAK);
+ return -EINVAL;
+ }
+ lif->rx_copybreak = (u16)rx_copybreak;
break;
default:
return -EOPNOTSUPP;
@@ -968,7 +977,7 @@ static int ionic_get_module_eeprom(struct net_device *netdev,
}
static int ionic_get_ts_info(struct net_device *netdev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ionic_lif *lif = netdev_priv(netdev);
struct ionic *ionic = lif->ionic;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index 9a1825edf0d0..9c85c0706c6e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -71,7 +71,7 @@ enum ionic_cmd_opcode {
IONIC_CMD_FW_CONTROL_V1 = 255,
};
-/**
+/*
* enum ionic_status_code - Device command return codes
*/
enum ionic_status_code {
@@ -112,6 +112,7 @@ enum ionic_notifyq_opcode {
/**
* struct ionic_admin_cmd - General admin command format
* @opcode: Opcode for the command
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
* @cmd_data: Opcode-specific command bytes
*/
@@ -125,6 +126,7 @@ struct ionic_admin_cmd {
/**
* struct ionic_admin_comp - General admin command completion format
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @cmd_data: Command-specific bytes
* @color: Color bit (Always 0 for commands issued to the
@@ -147,6 +149,7 @@ static inline u8 color_match(u8 color, u8 done_color)
/**
* struct ionic_nop_cmd - NOP command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
*/
struct ionic_nop_cmd {
u8 opcode;
@@ -156,6 +159,7 @@ struct ionic_nop_cmd {
/**
* struct ionic_nop_comp - NOP command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_nop_comp {
u8 status;
@@ -166,6 +170,7 @@ struct ionic_nop_comp {
* struct ionic_dev_init_cmd - Device init command
* @opcode: opcode
* @type: Device type
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_init_cmd {
u8 opcode;
@@ -176,6 +181,7 @@ struct ionic_dev_init_cmd {
/**
* struct ionic_dev_init_comp - Device init command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_init_comp {
u8 status;
@@ -185,6 +191,7 @@ struct ionic_dev_init_comp {
/**
* struct ionic_dev_reset_cmd - Device reset command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_reset_cmd {
u8 opcode;
@@ -194,6 +201,7 @@ struct ionic_dev_reset_cmd {
/**
* struct ionic_dev_reset_comp - Reset command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_reset_comp {
u8 status;
@@ -207,6 +215,7 @@ struct ionic_dev_reset_comp {
* struct ionic_dev_identify_cmd - Driver/device identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_identify_cmd {
u8 opcode;
@@ -218,6 +227,7 @@ struct ionic_dev_identify_cmd {
* struct ionic_dev_identify_comp - Driver/device identify command completion
* @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_identify_comp {
u8 status;
@@ -242,6 +252,7 @@ enum ionic_os_type {
* @kernel_ver: Kernel version, numeric format
* @kernel_ver_str: Kernel version, string format
* @driver_ver_str: Driver version, string format
+ * @words: word access to struct contents
*/
union ionic_drv_identity {
struct {
@@ -267,7 +278,9 @@ enum ionic_dev_capability {
* union ionic_dev_identity - device identity information
* @version: Version of device identify
* @type: Identify type (0 for now)
+ * @rsvd: reserved byte(s)
* @nports: Number of ports provisioned
+ * @rsvd2: reserved byte(s)
* @nlifs: Number of LIFs provisioned
* @nintrs: Number of interrupts provisioned
* @ndbpgs_per_lif: Number of doorbell pages per LIF
@@ -284,6 +297,7 @@ enum ionic_dev_capability {
* @hwstamp_mult: Hardware tick to nanosecond multiplier.
* @hwstamp_shift: Hardware tick to nanosecond divisor (power of two).
* @capabilities: Device capabilities
+ * @words: word access to struct contents
*/
union ionic_dev_identity {
struct {
@@ -317,6 +331,7 @@ enum ionic_lif_type {
* @opcode: opcode
* @type: LIF type (enum ionic_lif_type)
* @ver: Version of identify returned by device
+ * @rsvd: reserved byte(s)
*/
struct ionic_lif_identify_cmd {
u8 opcode;
@@ -329,6 +344,7 @@ struct ionic_lif_identify_cmd {
* struct ionic_lif_identify_comp - LIF identify command completion
* @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_identify_comp {
u8 status;
@@ -416,7 +432,7 @@ enum ionic_txq_feature {
};
/**
- * struct ionic_hwstamp_bits - Hardware timestamp decoding bits
+ * enum ionic_hwstamp_bits - Hardware timestamp decoding bits
* @IONIC_HWSTAMP_INVALID: Invalid hardware timestamp value
* @IONIC_HWSTAMP_CQ_NEGOFFSET: Timestamp field negative offset
* from the base cq descriptor.
@@ -429,6 +445,7 @@ enum ionic_hwstamp_bits {
/**
* struct ionic_lif_logical_qtype - Descriptor of logical to HW queue type
* @qtype: Hardware Queue Type
+ * @rsvd: reserved byte(s)
* @qid_count: Number of Queue IDs of the logical type
* @qid_base: Minimum Queue ID of the logical type
*/
@@ -454,12 +471,14 @@ enum ionic_lif_state {
/**
* union ionic_lif_config - LIF configuration
* @state: LIF state (enum ionic_lif_state)
+ * @rsvd: reserved byte(s)
* @name: LIF name
* @mtu: MTU
* @mac: Station MAC address
* @vlan: Default Vlan ID
* @features: Features (enum ionic_eth_hw_features)
* @queue_count: Queue counts per queue-type
+ * @words: word access to struct contents
*/
union ionic_lif_config {
struct {
@@ -481,33 +500,39 @@ union ionic_lif_config {
* @capabilities: LIF capabilities
*
* @eth: Ethernet identify structure
- * @version: Ethernet identify structure version
- * @max_ucast_filters: Number of perfect unicast addresses supported
- * @max_mcast_filters: Number of perfect multicast addresses supported
- * @min_frame_size: Minimum size of frames to be sent
- * @max_frame_size: Maximum size of frames to be sent
- * @hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode)
- * @hwstamp_rx_filters: Bitmask of enum ionic_pkt_class
- * @config: LIF config struct with features, mtu, mac, q counts
+ * @eth.version: Ethernet identify structure version
+ * @eth.rsvd: reserved byte(s)
+ * @eth.max_ucast_filters: Number of perfect unicast addresses supported
+ * @eth.max_mcast_filters: Number of perfect multicast addresses supported
+ * @eth.min_frame_size: Minimum size of frames to be sent
+ * @eth.max_frame_size: Maximum size of frames to be sent
+ * @eth.rsvd2: reserved byte(s)
+ * @eth.hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode)
+ * @eth.hwstamp_rx_filters: Bitmask of enum ionic_pkt_class
+ * @eth.rsvd3: reserved byte(s)
+ * @eth.config: LIF config struct with features, mtu, mac, q counts
*
* @rdma: RDMA identify structure
- * @version: RDMA version of opcodes and queue descriptors
- * @qp_opcodes: Number of RDMA queue pair opcodes supported
- * @admin_opcodes: Number of RDMA admin opcodes supported
- * @npts_per_lif: Page table size per LIF
- * @nmrs_per_lif: Number of memory regions per LIF
- * @nahs_per_lif: Number of address handles per LIF
- * @max_stride: Max work request stride
- * @cl_stride: Cache line stride
- * @pte_stride: Page table entry stride
- * @rrq_stride: Remote RQ work request stride
- * @rsq_stride: Remote SQ work request stride
- * @dcqcn_profiles: Number of DCQCN profiles
- * @aq_qtype: RDMA Admin Qtype
- * @sq_qtype: RDMA Send Qtype
- * @rq_qtype: RDMA Receive Qtype
- * @cq_qtype: RDMA Completion Qtype
- * @eq_qtype: RDMA Event Qtype
+ * @rdma.version: RDMA version of opcodes and queue descriptors
+ * @rdma.qp_opcodes: Number of RDMA queue pair opcodes supported
+ * @rdma.admin_opcodes: Number of RDMA admin opcodes supported
+ * @rdma.rsvd: reserved byte(s)
+ * @rdma.npts_per_lif: Page table size per LIF
+ * @rdma.nmrs_per_lif: Number of memory regions per LIF
+ * @rdma.nahs_per_lif: Number of address handles per LIF
+ * @rdma.max_stride: Max work request stride
+ * @rdma.cl_stride: Cache line stride
+ * @rdma.pte_stride: Page table entry stride
+ * @rdma.rrq_stride: Remote RQ work request stride
+ * @rdma.rsq_stride: Remote SQ work request stride
+ * @rdma.dcqcn_profiles: Number of DCQCN profiles
+ * @rdma.rsvd_dimensions: reserved byte(s)
+ * @rdma.aq_qtype: RDMA Admin Qtype
+ * @rdma.sq_qtype: RDMA Send Qtype
+ * @rdma.rq_qtype: RDMA Receive Qtype
+ * @rdma.cq_qtype: RDMA Completion Qtype
+ * @rdma.eq_qtype: RDMA Event Qtype
+ * @words: word access to struct contents
*/
union ionic_lif_identity {
struct {
@@ -558,7 +583,9 @@ union ionic_lif_identity {
* @opcode: Opcode
* @type: LIF type (enum ionic_lif_type)
* @index: LIF index
+ * @rsvd: reserved byte(s)
* @info_pa: Destination address for LIF info (struct ionic_lif_info)
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_init_cmd {
u8 opcode;
@@ -572,7 +599,9 @@ struct ionic_lif_init_cmd {
/**
* struct ionic_lif_init_comp - LIF init command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @hw_index: Hardware index of the initialized LIF
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_init_comp {
u8 status;
@@ -584,9 +613,11 @@ struct ionic_lif_init_comp {
/**
* struct ionic_q_identify_cmd - queue identify command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @lif_type: LIF type (enum ionic_lif_type)
* @type: Logical queue type (enum ionic_logical_qtype)
* @ver: Highest queue type version that the driver supports
+ * @rsvd2: reserved byte(s)
*/
struct ionic_q_identify_cmd {
u8 opcode;
@@ -600,8 +631,10 @@ struct ionic_q_identify_cmd {
/**
* struct ionic_q_identify_comp - queue identify command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @ver: Queue type version that can be used with FW
+ * @rsvd2: reserved byte(s)
*/
struct ionic_q_identify_comp {
u8 status;
@@ -615,12 +648,14 @@ struct ionic_q_identify_comp {
* union ionic_q_identity - queue identity information
* @version: Queue type version that can be used with FW
* @supported: Bitfield of queue versions, first bit = ver 0
+ * @rsvd: reserved byte(s)
* @features: Queue features (enum ionic_q_feature, etc)
* @desc_sz: Descriptor size
* @comp_sz: Completion descriptor size
* @sg_desc_sz: Scatter/Gather descriptor size
* @max_sg_elems: Maximum number of Scatter/Gather elements
* @sg_desc_stride: Number of Scatter/Gather elements per descriptor
+ * @words: word access to struct contents
*/
union ionic_q_identity {
struct {
@@ -640,8 +675,10 @@ union ionic_q_identity {
/**
* struct ionic_q_init_cmd - Queue init command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @type: Logical queue type
* @ver: Queue type version
+ * @rsvd1: reserved byte(s)
* @lif_index: LIF index
* @index: (LIF, qtype) relative admin queue index
* @intr_index: Interrupt control register index, or Event queue index
@@ -667,6 +704,7 @@ union ionic_q_identity {
* @ring_base: Queue ring base address
* @cq_ring_base: Completion queue ring base address
* @sg_ring_base: Scatter/Gather ring base address
+ * @rsvd2: reserved byte(s)
* @features: Mask of queue features to enable, if not in the flags above.
*/
struct ionic_q_init_cmd {
@@ -698,9 +736,11 @@ struct ionic_q_init_cmd {
/**
* struct ionic_q_init_comp - Queue init command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @hw_index: Hardware Queue ID
* @hw_type: Hardware Queue type
+ * @rsvd2: reserved byte(s)
* @color: Color
*/
struct ionic_q_init_comp {
@@ -800,7 +840,7 @@ enum ionic_txq_desc_opcode {
* will set CWR flag in the first segment if
* CWR is set in the template header, and
* clear CWR in remaining segments.
- * @flags:
+ * flags:
* vlan:
* Insert an L2 VLAN header using @vlan_tci
* encap:
@@ -813,13 +853,14 @@ enum ionic_txq_desc_opcode {
* TSO start
* tso_eot:
* TSO end
- * @num_sg_elems: Number of scatter-gather elements in SG
+ * num_sg_elems: Number of scatter-gather elements in SG
* descriptor
- * @addr: First data buffer's DMA address
+ * addr: First data buffer's DMA address
* (Subsequent data buffers are on txq_sg_desc)
* @len: First data buffer's length, in bytes
* @vlan_tci: VLAN tag to insert in the packet (if requested
* by @V-bit). Includes .1p and .1q tags
+ * @hword0: half word padding
* @hdr_len: Length of packet headers, including
* encapsulating outer header, if applicable
* Valid for opcodes IONIC_TXQ_DESC_OPCODE_CALC_CSUM and
@@ -830,10 +871,12 @@ enum ionic_txq_desc_opcode {
* IONIC_TXQ_DESC_OPCODE_TSO, @hdr_len is up to
* inner-most L4 payload, so inclusive of
* inner-most L4 header.
+ * @hword1: half word padding
* @mss: Desired MSS value for TSO; only applicable for
* IONIC_TXQ_DESC_OPCODE_TSO
* @csum_start: Offset from packet to first byte checked in L4 checksum
* @csum_offset: Offset from csum_start to L4 checksum field
+ * @hword2: half word padding
*/
struct ionic_txq_desc {
__le64 cmd;
@@ -901,6 +944,7 @@ static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags,
* struct ionic_txq_sg_elem - Transmit scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
+ * @rsvd: reserved byte(s)
*/
struct ionic_txq_sg_elem {
__le64 addr;
@@ -927,7 +971,9 @@ struct ionic_txq_sg_desc_v1 {
/**
* struct ionic_txq_comp - Ethernet transmit queue completion descriptor
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_txq_comp {
@@ -953,6 +999,7 @@ enum ionic_rxq_desc_opcode {
* receive, including actual bytes received,
* are recorded in Rx completion descriptor.
*
+ * @rsvd: reserved byte(s)
* @len: Data buffer's length, in bytes
* @addr: Data buffer's DMA address
*/
@@ -967,6 +1014,7 @@ struct ionic_rxq_desc {
* struct ionic_rxq_sg_elem - Receive scatter-gather (SG) descriptor element
* @addr: DMA address of SG element data buffer
* @len: Length of SG element data buffer, in bytes
+ * @rsvd: reserved byte(s)
*/
struct ionic_rxq_sg_elem {
__le64 addr;
@@ -1170,6 +1218,7 @@ enum ionic_pkt_class {
* @lif_index: LIF index
* @index: Queue index
* @oper: Operation (enum ionic_q_control_oper)
+ * @rsvd: reserved byte(s)
*/
struct ionic_q_control_cmd {
u8 opcode;
@@ -1182,7 +1231,7 @@ struct ionic_q_control_cmd {
typedef struct ionic_admin_comp ionic_q_control_comp;
-enum q_control_oper {
+enum ionic_q_control_oper {
IONIC_Q_DISABLE = 0,
IONIC_Q_ENABLE = 1,
IONIC_Q_HANG_RESET = 2,
@@ -1216,7 +1265,7 @@ enum ionic_xcvr_state {
IONIC_XCVR_STATE_SPROM_READ_ERR = 4,
};
-/**
+/*
* enum ionic_xcvr_pid - Supported link modes
*/
enum ionic_xcvr_pid {
@@ -1351,6 +1400,7 @@ struct ionic_xcvr_status {
* @fec_type: fec type (enum ionic_port_fec_type)
* @pause_type: pause type (enum ionic_port_pause_type)
* @loopback_mode: loopback mode (enum ionic_port_loopback_mode)
+ * @words: word access to struct contents
*/
union ionic_port_config {
struct {
@@ -1382,6 +1432,7 @@ union ionic_port_config {
* @speed: link speed (in Mbps)
* @link_down_count: number of times link went from up to down
* @fec_type: fec type (enum ionic_port_fec_type)
+ * @rsvd: reserved byte(s)
* @xcvr: transceiver status
*/
struct ionic_port_status {
@@ -1399,6 +1450,7 @@ struct ionic_port_status {
* @opcode: opcode
* @index: port index
* @ver: Highest version of identify supported by driver
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_identify_cmd {
u8 opcode;
@@ -1411,6 +1463,7 @@ struct ionic_port_identify_cmd {
* struct ionic_port_identify_comp - Port identify command completion
* @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_identify_comp {
u8 status;
@@ -1422,7 +1475,9 @@ struct ionic_port_identify_comp {
* struct ionic_port_init_cmd - Port initialization command
* @opcode: opcode
* @index: port index
+ * @rsvd: reserved byte(s)
* @info_pa: destination address for port info (struct ionic_port_info)
+ * @rsvd2: reserved byte(s)
*/
struct ionic_port_init_cmd {
u8 opcode;
@@ -1435,6 +1490,7 @@ struct ionic_port_init_cmd {
/**
* struct ionic_port_init_comp - Port initialization command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_init_comp {
u8 status;
@@ -1445,6 +1501,7 @@ struct ionic_port_init_comp {
* struct ionic_port_reset_cmd - Port reset command
* @opcode: opcode
* @index: port index
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_reset_cmd {
u8 opcode;
@@ -1455,6 +1512,7 @@ struct ionic_port_reset_cmd {
/**
* struct ionic_port_reset_comp - Port reset command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_reset_comp {
u8 status;
@@ -1510,6 +1568,7 @@ enum ionic_port_attr {
* @opcode: Opcode
* @index: Port index
* @attr: Attribute type (enum ionic_port_attr)
+ * @rsvd: reserved byte(s)
* @state: Port state
* @speed: Port speed
* @mtu: Port MTU
@@ -1518,6 +1577,7 @@ enum ionic_port_attr {
* @pause_type: Port pause type setting
* @loopback_mode: Port loopback mode
* @stats_ctl: Port stats setting
+ * @rsvd2: reserved byte(s)
*/
struct ionic_port_setattr_cmd {
u8 opcode;
@@ -1540,6 +1600,7 @@ struct ionic_port_setattr_cmd {
/**
* struct ionic_port_setattr_comp - Port set attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @color: Color bit
*/
struct ionic_port_setattr_comp {
@@ -1553,6 +1614,7 @@ struct ionic_port_setattr_comp {
* @opcode: Opcode
* @index: port index
* @attr: Attribute type (enum ionic_port_attr)
+ * @rsvd: reserved byte(s)
*/
struct ionic_port_getattr_cmd {
u8 opcode;
@@ -1564,6 +1626,7 @@ struct ionic_port_getattr_cmd {
/**
* struct ionic_port_getattr_comp - Port get attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @state: Port state
* @speed: Port speed
* @mtu: Port MTU
@@ -1571,6 +1634,7 @@ struct ionic_port_getattr_cmd {
* @fec_type: Port FEC type setting
* @pause_type: Port pause type setting
* @loopback_mode: Port loopback mode
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_port_getattr_comp {
@@ -1593,9 +1657,11 @@ struct ionic_port_getattr_comp {
* struct ionic_lif_status - LIF status register
* @eid: most recent NotifyQ event id
* @port_num: port the LIF is connected to
+ * @rsvd: reserved byte(s)
* @link_status: port status (enum ionic_port_oper_status)
* @link_speed: speed of link in Mbps
* @link_down_count: number of times link went from up to down
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_status {
__le64 eid;
@@ -1610,7 +1676,9 @@ struct ionic_lif_status {
/**
* struct ionic_lif_reset_cmd - LIF reset command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @index: LIF index
+ * @rsvd2: reserved byte(s)
*/
struct ionic_lif_reset_cmd {
u8 opcode;
@@ -1643,9 +1711,11 @@ enum ionic_dev_attr {
* struct ionic_dev_setattr_cmd - Set Device attributes on the NIC
* @opcode: Opcode
* @attr: Attribute type (enum ionic_dev_attr)
+ * @rsvd: reserved byte(s)
* @state: Device state (enum ionic_dev_state)
* @name: The bus info, e.g. PCI slot-device-function, 0 terminated
* @features: Device features
+ * @rsvd2: reserved byte(s)
*/
struct ionic_dev_setattr_cmd {
u8 opcode;
@@ -1662,7 +1732,9 @@ struct ionic_dev_setattr_cmd {
/**
* struct ionic_dev_setattr_comp - Device set attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @features: Device features
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_dev_setattr_comp {
@@ -1679,6 +1751,7 @@ struct ionic_dev_setattr_comp {
* struct ionic_dev_getattr_cmd - Get Device attributes from the NIC
* @opcode: opcode
* @attr: Attribute type (enum ionic_dev_attr)
+ * @rsvd: reserved byte(s)
*/
struct ionic_dev_getattr_cmd {
u8 opcode;
@@ -1687,9 +1760,11 @@ struct ionic_dev_getattr_cmd {
};
/**
- * struct ionic_dev_setattr_comp - Device set attr command completion
+ * struct ionic_dev_getattr_comp - Device set attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @features: Device features
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_dev_getattr_comp {
@@ -1702,7 +1777,7 @@ struct ionic_dev_getattr_comp {
u8 color;
};
-/**
+/*
* RSS parameters
*/
#define IONIC_RSS_HASH_KEY_SIZE 40
@@ -1726,6 +1801,7 @@ enum ionic_rss_hash_types {
* @IONIC_LIF_ATTR_RSS: LIF RSS attribute
* @IONIC_LIF_ATTR_STATS_CTRL: LIF statistics control attribute
* @IONIC_LIF_ATTR_TXSTAMP: LIF TX timestamping mode
+ * @IONIC_LIF_ATTR_MAX: maximum attribute value
*/
enum ionic_lif_attr {
IONIC_LIF_ATTR_STATE = 0,
@@ -1736,6 +1812,7 @@ enum ionic_lif_attr {
IONIC_LIF_ATTR_RSS = 5,
IONIC_LIF_ATTR_STATS_CTRL = 6,
IONIC_LIF_ATTR_TXSTAMP = 7,
+ IONIC_LIF_ATTR_MAX = 255,
};
/**
@@ -1749,11 +1826,13 @@ enum ionic_lif_attr {
* @mac: Station mac
* @features: Features (enum ionic_eth_hw_features)
* @rss: RSS properties
- * @types: The hash types to enable (see rss_hash_types)
- * @key: The hash secret key
- * @addr: Address for the indirection table shared memory
+ * @rss.types: The hash types to enable (see rss_hash_types)
+ * @rss.key: The hash secret key
+ * @rss.rsvd: reserved byte(s)
+ * @rss.addr: Address for the indirection table shared memory
* @stats_ctl: stats control commands (enum ionic_stats_ctl_cmd)
- * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode)
+ * @txstamp_mode: TX Timestamping Mode (enum ionic_txstamp_mode)
+ * @rsvd: reserved byte(s)
*/
struct ionic_lif_setattr_cmd {
u8 opcode;
@@ -1772,7 +1851,7 @@ struct ionic_lif_setattr_cmd {
__le64 addr;
} rss;
u8 stats_ctl;
- __le16 txstamp_mode;
+ __le16 txstamp_mode;
u8 rsvd[60];
} __packed;
};
@@ -1780,8 +1859,10 @@ struct ionic_lif_setattr_cmd {
/**
* struct ionic_lif_setattr_comp - LIF set attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @features: features (enum ionic_eth_hw_features)
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_lif_setattr_comp {
@@ -1800,6 +1881,7 @@ struct ionic_lif_setattr_comp {
* @opcode: Opcode
* @attr: Attribute type (enum ionic_lif_attr)
* @index: LIF index
+ * @rsvd: reserved byte(s)
*/
struct ionic_lif_getattr_cmd {
u8 opcode;
@@ -1811,13 +1893,14 @@ struct ionic_lif_getattr_cmd {
/**
* struct ionic_lif_getattr_comp - LIF get attr command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @state: LIF state (enum ionic_lif_state)
- * @name: The netdev name string, 0 terminated
* @mtu: Mtu
* @mac: Station mac
* @features: Features (enum ionic_eth_hw_features)
- * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode)
+ * @txstamp_mode: TX Timestamping Mode (enum ionic_txstamp_mode)
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_lif_getattr_comp {
@@ -1838,12 +1921,15 @@ struct ionic_lif_getattr_comp {
/**
* struct ionic_lif_setphc_cmd - Set LIF PTP Hardware Clock
* @opcode: Opcode
+ * @rsvd1: reserved byte(s)
* @lif_index: LIF index
+ * @rsvd2: reserved byte(s)
* @tick: Hardware stamp tick of an instant in time.
* @nsec: Nanosecond stamp of the same instant.
* @frac: Fractional nanoseconds at the same instant.
* @mult: Cycle to nanosecond multiplier.
* @shift: Cycle to nanosecond divisor (power of two).
+ * @rsvd3: reserved byte(s)
*/
struct ionic_lif_setphc_cmd {
u8 opcode;
@@ -1870,6 +1956,7 @@ enum ionic_rx_mode {
/**
* struct ionic_rx_mode_set_cmd - Set LIF's Rx mode command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
* @rx_mode: Rx mode flags:
* IONIC_RX_MODE_F_UNICAST: Accept known unicast packets
@@ -1878,6 +1965,7 @@ enum ionic_rx_mode {
* IONIC_RX_MODE_F_PROMISC: Accept any packets
* IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets
* IONIC_RX_MODE_F_RDMA_SNIFFER: Sniff RDMA packets
+ * @rsvd2: reserved byte(s)
*/
struct ionic_rx_mode_set_cmd {
u8 opcode;
@@ -1904,13 +1992,14 @@ enum ionic_rx_filter_match_type {
* @qid: Queue ID
* @match: Rx filter match type (see IONIC_RX_FILTER_MATCH_xxx)
* @vlan: VLAN filter
- * @vlan: VLAN ID
+ * @vlan.vlan: VLAN ID
* @mac: MAC filter
- * @addr: MAC address (network-byte order)
+ * @mac.addr: MAC address (network-byte order)
* @mac_vlan: MACVLAN filter
- * @vlan: VLAN ID
- * @addr: MAC address (network-byte order)
+ * @mac_vlan.vlan: VLAN ID
+ * @mac_vlan.addr: MAC address (network-byte order)
* @pkt_class: Packet classification filter
+ * @rsvd: reserved byte(s)
*/
struct ionic_rx_filter_add_cmd {
u8 opcode;
@@ -1937,8 +2026,10 @@ struct ionic_rx_filter_add_cmd {
/**
* struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @filter_id: Filter ID
+ * @rsvd2: reserved byte(s)
* @color: Color bit
*/
struct ionic_rx_filter_add_comp {
@@ -1953,8 +2044,10 @@ struct ionic_rx_filter_add_comp {
/**
* struct ionic_rx_filter_del_cmd - Delete LIF Rx filter command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
* @filter_id: Filter ID
+ * @rsvd2: reserved byte(s)
*/
struct ionic_rx_filter_del_cmd {
u8 opcode;
@@ -2000,6 +2093,7 @@ enum ionic_vf_link_status {
* @trust: enable VF trust
* @linkstate: set link up or down
* @stats_pa: set DMA address for VF stats
+ * @pad: reserved byte(s)
*/
struct ionic_vf_setattr_cmd {
u8 opcode;
@@ -2031,6 +2125,7 @@ struct ionic_vf_setattr_comp {
* @opcode: Opcode
* @attr: Attribute type (enum ionic_vf_attr)
* @vf_index: VF index
+ * @rsvd: reserved byte(s)
*/
struct ionic_vf_getattr_cmd {
u8 opcode;
@@ -2064,8 +2159,8 @@ enum ionic_vf_ctrl_opcode {
/**
* struct ionic_vf_ctrl_cmd - VF control command
* @opcode: Opcode for the command
- * @vf_index: VF Index. It is unused if op START_ALL is used.
* @ctrl_opcode: VF control operation type
+ * @vf_index: VF Index. It is unused if op START_ALL is used.
*/
struct ionic_vf_ctrl_cmd {
u8 opcode;
@@ -2089,7 +2184,7 @@ struct ionic_vf_ctrl_comp {
* struct ionic_qos_identify_cmd - QoS identify command
* @opcode: opcode
* @ver: Highest version of identify supported by driver
- *
+ * @rsvd: reserved byte(s)
*/
struct ionic_qos_identify_cmd {
u8 opcode;
@@ -2101,6 +2196,7 @@ struct ionic_qos_identify_cmd {
* struct ionic_qos_identify_comp - QoS identify command completion
* @status: Status of the command (enum ionic_status_code)
* @ver: Version of identify returned by device
+ * @rsvd: reserved byte(s)
*/
struct ionic_qos_identify_comp {
u8 status;
@@ -2118,7 +2214,7 @@ struct ionic_qos_identify_comp {
#define IONIC_QOS_ALL_PCP 0xFF
#define IONIC_DSCP_BLOCK_SIZE 8
-/**
+/*
* enum ionic_qos_class
*/
enum ionic_qos_class {
@@ -2174,6 +2270,7 @@ enum ionic_qos_sched_type {
* @dot1q_pcp: Dot1q pcp value
* @ndscp: Number of valid dscp values in the ip_dscp field
* @ip_dscp: IP dscp values
+ * @words: word access to struct contents
*/
union ionic_qos_config {
struct {
@@ -2219,8 +2316,9 @@ union ionic_qos_config {
* union ionic_qos_identity - QoS identity structure
* @version: Version of the identify structure
* @type: QoS system type
- * @nclasses: Number of usable QoS classes
+ * @rsvd: reserved byte(s)
* @config: Current configuration of classes
+ * @words: word access to struct contents
*/
union ionic_qos_identity {
struct {
@@ -2236,7 +2334,9 @@ union ionic_qos_identity {
* struct ionic_qos_init_cmd - QoS config init command
* @opcode: Opcode
* @group: QoS class id
+ * @rsvd: reserved byte(s)
* @info_pa: destination address for qos info
+ * @rsvd1: reserved byte(s)
*/
struct ionic_qos_init_cmd {
u8 opcode;
@@ -2252,6 +2352,7 @@ typedef struct ionic_admin_comp ionic_qos_init_comp;
* struct ionic_qos_reset_cmd - QoS config reset command
* @opcode: Opcode
* @group: QoS class id
+ * @rsvd: reserved byte(s)
*/
struct ionic_qos_reset_cmd {
u8 opcode;
@@ -2260,8 +2361,10 @@ struct ionic_qos_reset_cmd {
};
/**
- * struct ionic_qos_clear_port_stats_cmd - Qos config reset command
+ * struct ionic_qos_clear_stats_cmd - Qos config reset command
* @opcode: Opcode
+ * @group_bitmap: bitmap of groups to be cleared
+ * @rsvd: reserved byte(s)
*/
struct ionic_qos_clear_stats_cmd {
u8 opcode;
@@ -2274,6 +2377,7 @@ typedef struct ionic_admin_comp ionic_qos_reset_comp;
/**
* struct ionic_fw_download_cmd - Firmware download command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @addr: dma address of the firmware buffer
* @offset: offset of the firmware buffer within the full image
* @length: number of valid bytes in the firmware buffer
@@ -2297,6 +2401,7 @@ typedef struct ionic_admin_comp ionic_fw_download_comp;
* @IONIC_FW_INSTALL_STATUS: Firmware installation status
* @IONIC_FW_ACTIVATE_ASYNC: Activate firmware asynchronously
* @IONIC_FW_ACTIVATE_STATUS: Firmware activate status
+ * @IONIC_FW_UPDATE_CLEANUP: Clean up after an interrupted fw update
*/
enum ionic_fw_control_oper {
IONIC_FW_RESET = 0,
@@ -2312,8 +2417,10 @@ enum ionic_fw_control_oper {
/**
* struct ionic_fw_control_cmd - Firmware control command
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @oper: firmware control operation (enum ionic_fw_control_oper)
* @slot: slot to activate
+ * @rsvd1: reserved byte(s)
*/
struct ionic_fw_control_cmd {
u8 opcode;
@@ -2326,8 +2433,10 @@ struct ionic_fw_control_cmd {
/**
* struct ionic_fw_control_comp - Firmware control copletion
* @status: Status of the command (enum ionic_status_code)
+ * @rsvd: reserved byte(s)
* @comp_index: Index in the descriptor ring for which this is the completion
* @slot: Slot where the firmware was installed
+ * @rsvd1: reserved byte(s)
* @color: Color bit
*/
struct ionic_fw_control_comp {
@@ -2346,7 +2455,9 @@ struct ionic_fw_control_comp {
/**
* struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd
* @opcode: opcode
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
+ * @rsvd2: reserved byte(s)
*
* There is no RDMA specific dev command completion struct. Completion uses
* the common struct ionic_admin_comp. Only the status is indicated.
@@ -2362,6 +2473,7 @@ struct ionic_rdma_reset_cmd {
/**
* struct ionic_rdma_queue_cmd - Create RDMA Queue command
* @opcode: opcode, 52, 53
+ * @rsvd: reserved byte(s)
* @lif_index: LIF index
* @qid_ver: (qid | (RDMA version << 24))
* @cid: intr, eq_id, or cq_id
@@ -2369,6 +2481,7 @@ struct ionic_rdma_reset_cmd {
* @depth_log2: log base two of queue depth
* @stride_log2: log base two of queue stride
* @dma_addr: address of the queue memory
+ * @rsvd2: reserved byte(s)
*
* The same command struct is used to create an RDMA event queue, completion
* queue, or RDMA admin queue. The cid is an interrupt number for an event
@@ -2425,6 +2538,7 @@ struct ionic_notifyq_event {
* @ecode: event code = IONIC_EVENT_LINK_CHANGE
* @link_status: link up/down, with error bits (enum ionic_port_status)
* @link_speed: speed of the network link
+ * @rsvd: reserved byte(s)
*
* Sent when the network link state changes between UP and DOWN
*/
@@ -2442,6 +2556,7 @@ struct ionic_link_change_event {
* @ecode: event code = IONIC_EVENT_RESET
* @reset_code: reset type
* @state: 0=pending, 1=complete, 2=error
+ * @rsvd: reserved byte(s)
*
* Sent when the NIC or some subsystem is going to be or
* has been reset.
@@ -2458,6 +2573,7 @@ struct ionic_reset_event {
* struct ionic_heartbeat_event - Sent periodically by NIC to indicate health
* @eid: event number
* @ecode: event code = IONIC_EVENT_HEARTBEAT
+ * @rsvd: reserved byte(s)
*/
struct ionic_heartbeat_event {
__le64 eid;
@@ -2481,6 +2597,7 @@ struct ionic_log_event {
* struct ionic_xcvr_event - Transceiver change event
* @eid: event number
* @ecode: event code = IONIC_EVENT_XCVR
+ * @rsvd: reserved byte(s)
*/
struct ionic_xcvr_event {
__le64 eid;
@@ -2488,7 +2605,7 @@ struct ionic_xcvr_event {
u8 rsvd[54];
};
-/**
+/*
* struct ionic_port_stats - Port statistics structure
*/
struct ionic_port_stats {
@@ -2646,8 +2763,7 @@ enum ionic_oflow_drop_stats {
IONIC_OFLOW_DROP_MAX,
};
-/**
- * struct port_pb_stats - packet buffers system stats
+/* struct ionic_port_pb_stats - packet buffers system stats
* uses ionic_pb_buffer_drop_stats for drop_counts[]
*/
struct ionic_port_pb_stats {
@@ -2681,7 +2797,9 @@ struct ionic_port_pb_stats {
* @pause_type: supported pause types
* @loopback_mode: supported loopback mode
* @speeds: supported speeds
+ * @rsvd2: reserved byte(s)
* @config: current port configuration
+ * @words: word access to struct contents
*/
union ionic_port_identity {
struct {
@@ -2707,7 +2825,8 @@ union ionic_port_identity {
* @status: Port status data
* @stats: Port statistics data
* @mgmt_stats: Port management statistics data
- * @port_pb_drop_stats: uplink pb drop stats
+ * @rsvd: reserved byte(s)
+ * @pb_stats: uplink pb drop stats
*/
struct ionic_port_info {
union ionic_port_config config;
@@ -2721,7 +2840,7 @@ struct ionic_port_info {
struct ionic_port_pb_stats pb_stats;
};
-/**
+/*
* struct ionic_lif_stats - LIF statistics structure
*/
struct ionic_lif_stats {
@@ -2983,8 +3102,10 @@ struct ionic_hwstamp_regs {
* bit 4-7 - 4 bit generation number, changes on fw restart
* @fw_heartbeat: Firmware heartbeat counter
* @serial_num: Serial number
+ * @rsvd_pad1024: reserved byte(s)
* @fw_version: Firmware version
- * @hwstamp_regs: Hardware current timestamp registers
+ * @hwstamp: Hardware current timestamp registers
+ * @words: word access to struct contents
*/
union ionic_dev_info_regs {
#define IONIC_DEVINFO_FWVERS_BUFLEN 32
@@ -3014,7 +3135,9 @@ union ionic_dev_info_regs {
* @done: Done indicator, bit 0 == 1 when command is complete
* @cmd: Opcode-specific command bytes
* @comp: Opcode-specific response bytes
+ * @rsvd: reserved byte(s)
* @data: Opcode-specific side-data
+ * @words: word access to struct contents
*/
union ionic_dev_cmd_regs {
struct {
@@ -3032,6 +3155,7 @@ union ionic_dev_cmd_regs {
* union ionic_dev_regs - Device register format for bar 0 page 0
* @info: Device info registers
* @devcmd: Device command registers
+ * @words: word access to struct contents
*/
union ionic_dev_regs {
struct {
@@ -3098,6 +3222,7 @@ union ionic_adminq_comp {
* interrupts when armed.
* @qid_lo: Queue destination for the producer index and flags (low bits)
* @qid_hi: Queue destination for the producer index and flags (high bits)
+ * @rsvd2: reserved byte(s)
*/
struct ionic_doorbell {
__le16 p_index;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 7f0c6cdc375e..aa0cc31dfe6e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -126,13 +126,13 @@ static void ionic_lif_deferred_work(struct work_struct *work)
} while (true);
}
-void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
+void ionic_lif_deferred_enqueue(struct ionic_lif *lif,
struct ionic_deferred_work *work)
{
- spin_lock_bh(&def->lock);
- list_add_tail(&work->list, &def->list);
- spin_unlock_bh(&def->lock);
- schedule_work(&def->work);
+ spin_lock_bh(&lif->deferred.lock);
+ list_add_tail(&work->list, &lif->deferred.list);
+ spin_unlock_bh(&lif->deferred.lock);
+ queue_work(lif->ionic->wq, &lif->deferred.work);
}
static void ionic_link_status_check(struct ionic_lif *lif)
@@ -207,19 +207,12 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
}
work->type = IONIC_DW_TYPE_LINK_STATUS;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
} else {
ionic_link_status_check(lif);
}
}
-static void ionic_napi_deadline(struct timer_list *timer)
-{
- struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline);
-
- napi_schedule(&qcq->napi);
-}
-
static irqreturn_t ionic_isr(int irq, void *data)
{
struct napi_struct *napi = data;
@@ -237,12 +230,12 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
const char *name;
if (lif->registered)
- name = lif->netdev->name;
+ name = netdev_name(lif->netdev);
else
name = dev_name(dev);
snprintf(intr->name, sizeof(intr->name),
- "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
+ "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name);
return devm_request_irq(dev, intr->vector, ionic_isr,
0, intr->name, &qcq->napi);
@@ -272,6 +265,18 @@ static void ionic_intr_free(struct ionic *ionic, int index)
clear_bit(index, ionic->intrs);
}
+static void ionic_irq_aff_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct ionic_intr_info *intr = container_of(notify, struct ionic_intr_info, aff_notify);
+
+ cpumask_copy(*intr->affinity_mask, mask);
+}
+
+static void ionic_irq_aff_release(struct kref __always_unused *ref)
+{
+}
+
static int ionic_qcq_enable(struct ionic_qcq *qcq)
{
struct ionic_queue *q = &qcq->q;
@@ -304,12 +309,12 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
if (ret)
return ret;
- if (qcq->napi.poll)
- napi_enable(&qcq->napi);
-
if (qcq->flags & IONIC_QCQ_F_INTR) {
+ napi_enable(&qcq->napi);
+ irq_set_affinity_notifier(qcq->intr.vector,
+ &qcq->intr.aff_notify);
irq_set_affinity_hint(qcq->intr.vector,
- &qcq->intr.affinity_mask);
+ *qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
@@ -339,13 +344,15 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f
if (qcq->flags & IONIC_QCQ_F_INTR) {
struct ionic_dev *idev = &lif->ionic->idev;
+ if (lif->doorbell_wa)
+ cancel_work_sync(&qcq->doorbell_napi_work);
cancel_work_sync(&qcq->dim.work);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_SET);
synchronize_irq(qcq->intr.vector);
+ irq_set_affinity_notifier(qcq->intr.vector, NULL);
irq_set_affinity_hint(qcq->intr.vector, NULL);
napi_disable(&qcq->napi);
- del_timer_sync(&qcq->napi_deadline);
}
/* If there was a previous fw communcation error, don't bother with
@@ -480,11 +487,11 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
{
n_qcq->intr.vector = src_qcq->intr.vector;
n_qcq->intr.index = src_qcq->intr.index;
- n_qcq->napi_qcq = src_qcq->napi_qcq;
}
static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
{
+ cpumask_var_t *affinity_mask;
int err;
if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
@@ -516,10 +523,19 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc
}
/* try to get the irq on the local numa node first */
- qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
- dev_to_node(lif->ionic->dev));
- if (qcq->intr.cpu != -1)
- cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
+ affinity_mask = &lif->ionic->affinity_masks[qcq->intr.index];
+ if (cpumask_empty(*affinity_mask)) {
+ unsigned int cpu;
+
+ cpu = cpumask_local_spread(qcq->intr.index,
+ dev_to_node(lif->ionic->dev));
+ if (cpu != -1)
+ cpumask_set_cpu(cpu, *affinity_mask);
+ }
+
+ qcq->intr.affinity_mask = affinity_mask;
+ qcq->intr.aff_notify.notify = ionic_irq_aff_notify;
+ qcq->intr.aff_notify.release = ionic_irq_aff_release;
netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
return 0;
@@ -676,6 +692,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
INIT_WORK(&new->dim.work, ionic_dim_work);
new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
+ if (lif->doorbell_wa)
+ INIT_WORK(&new->doorbell_napi_work, ionic_doorbell_napi_work);
*qcq = new;
@@ -834,11 +852,8 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE;
q->dbell_jiffies = jiffies;
- if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) {
+ if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi);
- qcq->napi_qcq = qcq;
- timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
- }
qcq->flags |= IONIC_QCQ_F_INITED;
@@ -911,9 +926,6 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
else
netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi);
- qcq->napi_qcq = qcq;
- timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
-
qcq->flags |= IONIC_QCQ_F_INITED;
return 0;
@@ -1168,7 +1180,6 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
struct ionic_dev *idev = &lif->ionic->idev;
unsigned long irqflags;
unsigned int flags = 0;
- bool resched = false;
int rx_work = 0;
int tx_work = 0;
int n_work = 0;
@@ -1184,6 +1195,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
ionic_adminq_service, NULL, NULL);
+
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
if (lif->hwstamp_rxq)
@@ -1191,7 +1203,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_rx_service, NULL, NULL);
if (lif->hwstamp_txq)
- tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget);
+ tx_work = ionic_tx_cq_service(&lif->hwstamp_txq->cq, budget, !!budget);
work_done = max(max(n_work, a_work), max(rx_work, tx_work));
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -1205,15 +1217,14 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget)
ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags);
}
- if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q))
- resched = true;
- if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q))
- resched = true;
- if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q))
- resched = true;
- if (resched)
- mod_timer(&lif->adminqcq->napi_deadline,
- jiffies + IONIC_NAPI_DEADLINE);
+ if (lif->doorbell_wa) {
+ if (!a_work)
+ ionic_adminq_poke_doorbell(&lif->adminqcq->q);
+ if (lif->hwstamp_rxq && !rx_work)
+ ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q);
+ if (lif->hwstamp_txq && !tx_work)
+ ionic_txq_poke_doorbell(&lif->hwstamp_txq->q);
+ }
return work_done;
}
@@ -1385,7 +1396,7 @@ static void ionic_ndo_set_rx_mode(struct net_device *netdev)
}
work->type = IONIC_DW_TYPE_RX_MODE;
netdev_dbg(lif->netdev, "deferred: rx_mode\n");
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
}
static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
@@ -1761,13 +1772,13 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
/* if we're not running, nothing more to do */
if (!netif_running(netdev)) {
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
return 0;
}
mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
err = ionic_start_queues_reconfig(lif);
mutex_unlock(&lif->queue_lock);
@@ -3141,6 +3152,44 @@ err_out:
return err;
}
+static int ionic_affinity_masks_alloc(struct ionic *ionic)
+{
+ cpumask_var_t *affinity_masks;
+ int nintrs = ionic->nintrs;
+ int i;
+
+ affinity_masks = kcalloc(nintrs, sizeof(cpumask_var_t), GFP_KERNEL);
+ if (!affinity_masks)
+ return -ENOMEM;
+
+ for (i = 0; i < nintrs; i++) {
+ if (!zalloc_cpumask_var_node(&affinity_masks[i], GFP_KERNEL,
+ dev_to_node(ionic->dev)))
+ goto err_out;
+ }
+
+ ionic->affinity_masks = affinity_masks;
+
+ return 0;
+
+err_out:
+ for (--i; i >= 0; i--)
+ free_cpumask_var(affinity_masks[i]);
+ kfree(affinity_masks);
+
+ return -ENOMEM;
+}
+
+static void ionic_affinity_masks_free(struct ionic *ionic)
+{
+ int i;
+
+ for (i = 0; i < ionic->nintrs; i++)
+ free_cpumask_var(ionic->affinity_masks[i]);
+ kfree(ionic->affinity_masks);
+ ionic->affinity_masks = NULL;
+}
+
int ionic_lif_alloc(struct ionic *ionic)
{
struct device *dev = ionic->dev;
@@ -3232,11 +3281,15 @@ int ionic_lif_alloc(struct ionic *ionic)
ionic_debugfs_add_lif(lif);
+ err = ionic_affinity_masks_alloc(ionic);
+ if (err)
+ goto err_out_free_lif_info;
+
/* allocate control queues and txrx queue arrays */
ionic_lif_queue_identify(lif);
err = ionic_qcqs_alloc(lif);
if (err)
- goto err_out_free_lif_info;
+ goto err_out_free_affinity_masks;
/* allocate rss indirection table */
tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
@@ -3258,6 +3311,8 @@ int ionic_lif_alloc(struct ionic *ionic)
err_out_free_qcqs:
ionic_qcqs_free(lif);
+err_out_free_affinity_masks:
+ ionic_affinity_masks_free(lif->ionic);
err_out_free_lif_info:
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
lif->info = NULL;
@@ -3358,6 +3413,7 @@ int ionic_restart_lif(struct ionic_lif *lif)
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
ionic_link_status_check_request(lif, CAN_SLEEP);
netif_device_attach(lif->netdev);
+ ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE);
return 0;
@@ -3388,6 +3444,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
* just need to reanimate it.
*/
ionic_init_devinfo(ionic);
+ ionic_reset(ionic);
err = ionic_identify(ionic);
if (err)
goto err_out;
@@ -3430,6 +3487,8 @@ void ionic_lif_free(struct ionic_lif *lif)
if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
ionic_lif_reset(lif);
+ ionic_affinity_masks_free(lif->ionic);
+
/* free lif info */
kfree(lif->identity);
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
@@ -3503,14 +3562,11 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif)
netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi);
- qcq->napi_qcq = qcq;
- timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0);
-
napi_enable(&qcq->napi);
if (qcq->flags & IONIC_QCQ_F_INTR) {
irq_set_affinity_hint(qcq->intr.vector,
- &qcq->intr.affinity_mask);
+ *qcq->intr.affinity_mask);
ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
IONIC_INTR_MASK_CLEAR);
}
@@ -3697,6 +3753,7 @@ int ionic_lif_init(struct ionic_lif *lif)
goto err_out_notifyq_deinit;
lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
+ lif->doorbell_wa = ionic_doorbell_wa(lif->ionic);
set_bit(IONIC_LIF_F_INITED, lif->state);
@@ -3731,7 +3788,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
},
};
- strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
+ strscpy(ctx.cmd.lif_setattr.name, netdev_name(lif->netdev),
sizeof(ctx.cmd.lif_setattr.name));
ionic_adminq_post_wait(lif, &ctx);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 08f4266fe2aa..3e1005293c4a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -84,12 +84,11 @@ struct ionic_qcq {
u32 cmb_pgid;
u32 cmb_order;
struct dim dim;
- struct timer_list napi_deadline;
struct ionic_queue q;
struct ionic_cq cq;
struct napi_struct napi;
- struct ionic_qcq *napi_qcq;
struct ionic_intr_info intr;
+ struct work_struct doorbell_napi_work;
struct dentry *dentry;
};
@@ -207,11 +206,12 @@ struct ionic_lif {
unsigned int nxqs;
unsigned int ntxq_descs;
unsigned int nrxq_descs;
- u32 rx_copybreak;
u64 rxq_features;
- u16 rx_mode;
u64 hw_features;
+ u16 rx_copybreak;
+ u16 rx_mode;
bool registered;
+ bool doorbell_wa;
u16 lif_type;
unsigned int link_down_count;
unsigned int nmcast;
@@ -226,11 +226,11 @@ struct ionic_lif {
u32 info_sz;
struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX];
- u16 rss_types;
u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE];
u8 *rss_ind_tbl;
dma_addr_t rss_ind_tbl_pa;
u32 rss_ind_tbl_sz;
+ u16 rss_types;
struct ionic_rx_filters rx_filters;
u32 rx_coalesce_usecs; /* what the user asked for */
@@ -333,7 +333,7 @@ static inline bool ionic_txq_hwstamp_enabled(struct ionic_queue *q)
void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep);
void ionic_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *ns);
-void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
+void ionic_lif_deferred_enqueue(struct ionic_lif *lif,
struct ionic_deferred_work *work);
int ionic_lif_alloc(struct ionic *ionic);
int ionic_lif_init(struct ionic_lif *lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index c1259324b0be..0f817c3f92d8 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -287,7 +287,7 @@ bool ionic_notifyq_service(struct ionic_cq *cq)
clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state);
} else {
work->type = IONIC_DW_TYPE_LIF_RESET;
- ionic_lif_deferred_enqueue(&lif->deferred, work);
+ ionic_lif_deferred_enqueue(lif, work);
}
}
break;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 5dba6d2d633c..fc79baad4561 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -23,7 +23,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_tx_desc_info *desc_info,
- struct ionic_txq_comp *comp);
+ struct ionic_txq_comp *comp,
+ bool in_napi);
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell)
{
@@ -480,6 +481,20 @@ int ionic_xdp_xmit(struct net_device *netdev, int n,
return nxmit;
}
+static void ionic_xdp_rx_put_bufs(struct ionic_queue *q,
+ struct ionic_buf_info *buf_info,
+ int nbufs)
+{
+ int i;
+
+ for (i = 0; i < nbufs; i++) {
+ dma_unmap_page(q->dev, buf_info->dma_addr,
+ IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
+ buf_info->page = NULL;
+ buf_info++;
+ }
+}
+
static bool ionic_run_xdp(struct ionic_rx_stats *stats,
struct net_device *netdev,
struct bpf_prog *xdp_prog,
@@ -493,6 +508,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
struct netdev_queue *nq;
struct xdp_frame *xdpf;
int remain_len;
+ int nbufs = 1;
int frag_len;
int err = 0;
@@ -502,7 +518,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
XDP_PACKET_HEADROOM, frag_len, false);
dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info),
- XDP_PACKET_HEADROOM, len,
+ XDP_PACKET_HEADROOM, frag_len,
DMA_FROM_DEVICE);
prefetchw(&xdp_buf.data_hard_start);
@@ -542,6 +558,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
if (page_is_pfmemalloc(bi->page))
xdp_buff_set_frag_pfmemalloc(&xdp_buf);
} while (remain_len > 0);
+ nbufs += sinfo->nr_frags;
}
xdp_action = bpf_prog_run_xdp(xdp_prog, &xdp_buf);
@@ -574,34 +591,28 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats,
goto out_xdp_abort;
}
- dma_unmap_page(rxq->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
-
err = ionic_xdp_post_frame(txq, xdpf, XDP_TX,
buf_info->page,
buf_info->page_offset,
true);
__netif_tx_unlock(nq);
- if (err) {
+ if (unlikely(err)) {
netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err);
goto out_xdp_abort;
}
+ ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
stats->xdp_tx++;
/* the Tx completion will free the buffers */
break;
case XDP_REDIRECT:
- /* unmap the pages before handing them to a different device */
- dma_unmap_page(rxq->dev, buf_info->dma_addr,
- IONIC_PAGE_SIZE, DMA_FROM_DEVICE);
-
err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog);
- if (err) {
+ if (unlikely(err)) {
netdev_dbg(netdev, "xdp_do_redirect err %d\n", err);
goto out_xdp_abort;
}
- buf_info->page = NULL;
+ ionic_xdp_rx_put_bufs(rxq, buf_info, nbufs);
rxq->xdp_flush = true;
stats->xdp_redirect++;
break;
@@ -867,9 +878,6 @@ void ionic_rx_fill(struct ionic_queue *q)
q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE;
q->dbell_jiffies = jiffies;
-
- mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline,
- jiffies + IONIC_NAPI_DEADLINE);
}
void ionic_rx_empty(struct ionic_queue *q)
@@ -934,7 +942,7 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
u32 work_done = 0;
u32 flags = 0;
- work_done = ionic_tx_cq_service(cq, budget);
+ work_done = ionic_tx_cq_service(cq, budget, !!budget);
if (unlikely(!budget))
return budget;
@@ -952,8 +960,8 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- if (!work_done && ionic_txq_poke_doorbell(&qcq->q))
- mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+ if (!work_done && cq->bound_q->lif->doorbell_wa)
+ ionic_txq_poke_doorbell(&qcq->q);
return work_done;
}
@@ -995,8 +1003,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- if (!work_done && ionic_rxq_poke_doorbell(&qcq->q))
- mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+ if (!work_done && cq->bound_q->lif->doorbell_wa)
+ ionic_rxq_poke_doorbell(&qcq->q);
return work_done;
}
@@ -1009,7 +1017,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
struct ionic_qcq *txqcq;
struct ionic_lif *lif;
struct ionic_cq *txcq;
- bool resched = false;
u32 rx_work_done = 0;
u32 tx_work_done = 0;
u32 flags = 0;
@@ -1018,7 +1025,7 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
txqcq = lif->txqcqs[qi];
txcq = &lif->txqcqs[qi]->cq;
- tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT);
+ tx_work_done = ionic_tx_cq_service(txcq, IONIC_TX_BUDGET_DEFAULT, !!budget);
if (unlikely(!budget))
return budget;
@@ -1041,12 +1048,12 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done + rx_work_done, flags);
}
- if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q))
- resched = true;
- if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q))
- resched = true;
- if (resched)
- mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE);
+ if (lif->doorbell_wa) {
+ if (!rx_work_done)
+ ionic_rxq_poke_doorbell(&rxqcq->q);
+ if (!tx_work_done)
+ ionic_txq_poke_doorbell(&txqcq->q);
+ }
return rx_work_done;
}
@@ -1058,7 +1065,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
dma_addr_t dma_addr;
dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
net_warn_ratelimited("%s: DMA single map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
@@ -1075,7 +1082,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
dma_addr_t dma_addr;
dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
dev_name(dev), q->name);
q_to_tx_stats(q)->dma_map_err++;
@@ -1151,7 +1158,8 @@ static void ionic_tx_desc_unmap_bufs(struct ionic_queue *q,
static void ionic_tx_clean(struct ionic_queue *q,
struct ionic_tx_desc_info *desc_info,
- struct ionic_txq_comp *comp)
+ struct ionic_txq_comp *comp,
+ bool in_napi)
{
struct ionic_tx_stats *stats = q_to_tx_stats(q);
struct ionic_qcq *qcq = q_to_qcq(q);
@@ -1203,11 +1211,13 @@ static void ionic_tx_clean(struct ionic_queue *q,
desc_info->bytes = skb->len;
stats->clean++;
- napi_consume_skb(skb, 1);
+ napi_consume_skb(skb, likely(in_napi) ? 1 : 0);
}
static bool ionic_tx_service(struct ionic_cq *cq,
- unsigned int *total_pkts, unsigned int *total_bytes)
+ unsigned int *total_pkts,
+ unsigned int *total_bytes,
+ bool in_napi)
{
struct ionic_tx_desc_info *desc_info;
struct ionic_queue *q = cq->bound_q;
@@ -1229,7 +1239,7 @@ static bool ionic_tx_service(struct ionic_cq *cq,
desc_info->bytes = 0;
index = q->tail_idx;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, comp);
+ ionic_tx_clean(q, desc_info, comp, in_napi);
if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
@@ -1243,7 +1253,9 @@ static bool ionic_tx_service(struct ionic_cq *cq,
return true;
}
-unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
+unsigned int ionic_tx_cq_service(struct ionic_cq *cq,
+ unsigned int work_to_do,
+ bool in_napi)
{
unsigned int work_done = 0;
unsigned int bytes = 0;
@@ -1252,7 +1264,7 @@ unsigned int ionic_tx_cq_service(struct ionic_cq *cq, unsigned int work_to_do)
if (work_to_do == 0)
return 0;
- while (ionic_tx_service(cq, &pkts, &bytes)) {
+ while (ionic_tx_service(cq, &pkts, &bytes, in_napi)) {
if (cq->tail_idx == cq->num_descs - 1)
cq->done_color = !cq->done_color;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
@@ -1278,7 +1290,7 @@ void ionic_tx_flush(struct ionic_cq *cq)
{
u32 work_done;
- work_done = ionic_tx_cq_service(cq, cq->num_descs);
+ work_done = ionic_tx_cq_service(cq, cq->num_descs, false);
if (work_done)
ionic_intr_credits(cq->idev->intr_ctrl, cq->bound_intr->index,
work_done, IONIC_INTR_CRED_RESET_COALESCE);
@@ -1295,7 +1307,7 @@ void ionic_tx_empty(struct ionic_queue *q)
desc_info = &q->tx_info[q->tail_idx];
desc_info->bytes = 0;
q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
- ionic_tx_clean(q, desc_info, NULL);
+ ionic_tx_clean(q, desc_info, NULL, false);
if (desc_info->skb) {
pkts++;
bytes += desc_info->bytes;
@@ -1316,7 +1328,7 @@ static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
int err;
err = skb_cow_head(skb, 0);
- if (err)
+ if (unlikely(err))
return err;
if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
@@ -1340,7 +1352,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
int err;
err = skb_cow_head(skb, 0);
- if (err)
+ if (unlikely(err))
return err;
if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
@@ -1357,7 +1369,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
}
static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
- struct ionic_tx_desc_info *desc_info,
+ struct ionic_txq_desc *desc,
struct sk_buff *skb,
dma_addr_t addr, u8 nsge, u16 len,
unsigned int hdrlen, unsigned int mss,
@@ -1365,7 +1377,6 @@ static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q,
u16 vlan_tci, bool has_vlan,
bool start, bool done)
{
- struct ionic_txq_desc *desc = &q->txq[q->head_idx];
u8 flags = 0;
u64 cmd;
@@ -1445,7 +1456,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
err = ionic_tx_tcp_inner_pseudo_csum(skb);
else
err = ionic_tx_tcp_pseudo_csum(skb);
- if (err) {
+ if (unlikely(err)) {
/* clean up mapping from ionic_tx_map_skb */
ionic_tx_desc_unmap_bufs(q, desc_info);
return err;
@@ -1503,10 +1514,9 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q,
seg_rem = min(tso_rem, mss);
done = (tso_rem == 0);
/* post descriptor */
- ionic_tx_tso_post(netdev, q, desc_info, skb,
- desc_addr, desc_nsge, desc_len,
- hdrlen, mss, outer_csum, vlan_tci, has_vlan,
- start, done);
+ ionic_tx_tso_post(netdev, q, desc, skb, desc_addr, desc_nsge,
+ desc_len, hdrlen, mss, outer_csum, vlan_tci,
+ has_vlan, start, done);
start = false;
/* Buffer information is stored with the first tso descriptor */
desc_info = &q->tx_info[q->head_idx];
@@ -1731,7 +1741,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
linearize:
if (too_many_frags) {
err = skb_linearize(skb);
- if (err)
+ if (unlikely(err))
return err;
q_to_tx_stats(q)->linearize++;
}
@@ -1765,7 +1775,7 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb,
else
err = ionic_tx(netdev, q, skb);
- if (err)
+ if (unlikely(err))
goto err_out_drop;
return NETDEV_TX_OK;
@@ -1811,7 +1821,7 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
else
err = ionic_tx(netdev, q, skb);
- if (err)
+ if (unlikely(err))
goto err_out_drop;
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
index 2fcbcecb41d1..fef4b2b0b1f2 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
@@ -571,9 +571,6 @@ static u64 ctx_addr_sig_regs[][3] = {
#define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2])
#define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1])
-#define lower32(x) ((u32)((x) & 0xffffffff))
-#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff))
-
static struct netxen_recv_crb recv_crb_registers[] = {
/* Instance 0 */
{
@@ -723,9 +720,9 @@ netxen_init_old_ctx(struct netxen_adapter *adapter)
NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE;
NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port),
- lower32(recv_ctx->phys_addr));
+ lower_32_bits(recv_ctx->phys_addr));
NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port),
- upper32(recv_ctx->phys_addr));
+ upper_32_bits(recv_ctx->phys_addr));
NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port),
signature | port);
return 0;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 6e12cd21ac90..89c8b2349694 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -960,7 +960,7 @@ int netxen_nic_change_mtu(struct net_device *netdev, int mtu)
rc = adapter->set_mtu(adapter, mtu);
if (!rc)
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 1d719726f72b..b7def3b54937 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -662,8 +662,6 @@ struct qed_hwfn {
};
struct pci_params {
- int pm_cap;
-
unsigned long mem_start;
unsigned long mem_end;
unsigned int irq;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index dad8e617c393..1adc7fbb3f2f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -132,7 +132,8 @@ static int qed_dl_param_get(struct devlink *dl, u32 id,
}
static int qed_dl_param_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct qed_devlink *qed_dl = devlink_priv(dl);
struct qed_dev *cdev;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index c278f8893042..f915c423fe70 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -323,8 +323,7 @@ static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
goto err2;
}
- cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
+ if (IS_PF(cdev) && !pdev->pm_cap)
DP_NOTICE(cdev, "Cannot find power management capability\n");
rc = dma_set_mask_and_coherent(&cdev->pdev->dev, DMA_BIT_MASK(64));
@@ -1206,7 +1205,6 @@ out:
static int qed_slowpath_wq_start(struct qed_dev *cdev)
{
struct qed_hwfn *hwfn;
- char name[NAME_SIZE];
int i;
if (IS_VF(cdev))
@@ -1215,11 +1213,11 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
- snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
- cdev->pdev->bus->number,
- PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+ hwfn->slowpath_wq = alloc_workqueue("slowpath-%02x:%02x.%02x",
+ 0, 0, cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn),
+ hwfn->abs_pf_id);
- hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
if (!hwfn->slowpath_wq) {
DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
return -ENOMEM;
@@ -1351,7 +1349,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
(params->drv_rev << 8) |
(params->drv_eng);
strscpy(drv_version.name, params->name,
- MCP_DRV_VER_STR_SIZE - 4);
+ sizeof(drv_version.name));
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
if (rc) {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index ae3ebf0cf999..97b059be1041 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -1026,7 +1026,7 @@ static int qede_get_regs_len(struct net_device *ndev)
static void qede_update_mtu(struct qede_dev *edev,
struct qede_reload_args *args)
{
- edev->ndev->mtu = args->u.mtu;
+ WRITE_ONCE(edev->ndev->mtu, args->u.mtu);
}
/* Netdevice NDOs */
@@ -1137,7 +1137,7 @@ static int qede_set_channels(struct net_device *dev,
}
static int qede_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct qede_dev *edev = netdev_priv(dev);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index cb6b33a228ea..985026dd816f 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -1520,8 +1520,8 @@ static int qede_flow_spec_validate_unused(struct qede_dev *edev,
return 0;
}
-static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
- struct qede_arfs_tuple *t)
+static int qede_set_v4_tuple_to_profile(struct qede_arfs_tuple *t,
+ struct netlink_ext_ack *extack)
{
/* We must have Only 4-tuples/l4 port/src ip/dst ip
* as an input.
@@ -1538,7 +1538,7 @@ static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
t->dst_ipv4 && !t->src_ipv4) {
t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
} else {
- DP_INFO(edev, "Invalid N-tuple\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid N-tuple");
return -EOPNOTSUPP;
}
@@ -1549,9 +1549,9 @@ static int qede_set_v4_tuple_to_profile(struct qede_dev *edev,
return 0;
}
-static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
- struct qede_arfs_tuple *t,
- struct in6_addr *zaddr)
+static int qede_set_v6_tuple_to_profile(struct qede_arfs_tuple *t,
+ struct in6_addr *zaddr,
+ struct netlink_ext_ack *extack)
{
/* We must have Only 4-tuples/l4 port/src ip/dst ip
* as an input.
@@ -1573,7 +1573,7 @@ static int qede_set_v6_tuple_to_profile(struct qede_dev *edev,
!memcmp(&t->src_ipv6, zaddr, sizeof(struct in6_addr))) {
t->mode = QED_FILTER_CONFIG_MODE_IP_DEST;
} else {
- DP_INFO(edev, "Invalid N-tuple\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid N-tuple");
return -EOPNOTSUPP;
}
@@ -1671,7 +1671,7 @@ static int qede_parse_actions(struct qede_dev *edev,
int i;
if (!flow_action_has_entries(flow_action)) {
- DP_NOTICE(edev, "No actions received\n");
+ NL_SET_ERR_MSG_MOD(extack, "No actions received");
return -EINVAL;
}
@@ -1687,7 +1687,8 @@ static int qede_parse_actions(struct qede_dev *edev,
break;
if (act->queue.index >= QEDE_RSS_COUNT(edev)) {
- DP_INFO(edev, "Queue out-of-bounds\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Queue out-of-bounds");
return -EINVAL;
}
break;
@@ -1700,8 +1701,8 @@ static int qede_parse_actions(struct qede_dev *edev,
}
static int
-qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *t)
+qede_flow_parse_ports(struct flow_rule *rule, struct qede_arfs_tuple *t,
+ struct netlink_ext_ack *extack)
{
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
@@ -1709,7 +1710,8 @@ qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
flow_rule_match_ports(rule, &match);
if ((match.key->src && match.mask->src != htons(U16_MAX)) ||
(match.key->dst && match.mask->dst != htons(U16_MAX))) {
- DP_NOTICE(edev, "Do not support ports masks\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Do not support ports masks");
return -EINVAL;
}
@@ -1721,10 +1723,12 @@ qede_flow_parse_ports(struct qede_dev *edev, struct flow_rule *rule,
}
static int
-qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *t)
+qede_flow_parse_v6_common(struct flow_rule *rule,
+ struct qede_arfs_tuple *t,
+ struct netlink_ext_ack *extack)
{
struct in6_addr zero_addr, addr;
+ int err;
memset(&zero_addr, 0, sizeof(addr));
memset(&addr, 0xff, sizeof(addr));
@@ -1737,8 +1741,8 @@ qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
memcmp(&match.mask->src, &addr, sizeof(addr))) ||
(memcmp(&match.key->dst, &zero_addr, sizeof(addr)) &&
memcmp(&match.mask->dst, &addr, sizeof(addr)))) {
- DP_NOTICE(edev,
- "Do not support IPv6 address prefix/mask\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Do not support IPv6 address prefix/mask");
return -EINVAL;
}
@@ -1746,23 +1750,28 @@ qede_flow_parse_v6_common(struct qede_dev *edev, struct flow_rule *rule,
memcpy(&t->dst_ipv6, &match.key->dst, sizeof(addr));
}
- if (qede_flow_parse_ports(edev, rule, t))
- return -EINVAL;
+ err = qede_flow_parse_ports(rule, t, extack);
+ if (err)
+ return err;
- return qede_set_v6_tuple_to_profile(edev, t, &zero_addr);
+ return qede_set_v6_tuple_to_profile(t, &zero_addr, extack);
}
static int
-qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *t)
+qede_flow_parse_v4_common(struct flow_rule *rule,
+ struct qede_arfs_tuple *t,
+ struct netlink_ext_ack *extack)
{
+ int err;
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
struct flow_match_ipv4_addrs match;
flow_rule_match_ipv4_addrs(rule, &match);
if ((match.key->src && match.mask->src != htonl(U32_MAX)) ||
(match.key->dst && match.mask->dst != htonl(U32_MAX))) {
- DP_NOTICE(edev, "Do not support ipv4 prefix/masks\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Do not support ipv4 prefix/masks");
return -EINVAL;
}
@@ -1770,55 +1779,57 @@ qede_flow_parse_v4_common(struct qede_dev *edev, struct flow_rule *rule,
t->dst_ipv4 = match.key->dst;
}
- if (qede_flow_parse_ports(edev, rule, t))
- return -EINVAL;
+ err = qede_flow_parse_ports(rule, t, extack);
+ if (err)
+ return err;
- return qede_set_v4_tuple_to_profile(edev, t);
+ return qede_set_v4_tuple_to_profile(t, extack);
}
static int
-qede_flow_parse_tcp_v6(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *tuple)
+qede_flow_parse_tcp_v6(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IPV6);
- return qede_flow_parse_v6_common(edev, rule, tuple);
+ return qede_flow_parse_v6_common(rule, tuple, extack);
}
static int
-qede_flow_parse_tcp_v4(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *tuple)
+qede_flow_parse_tcp_v4(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
tuple->ip_proto = IPPROTO_TCP;
tuple->eth_proto = htons(ETH_P_IP);
- return qede_flow_parse_v4_common(edev, rule, tuple);
+ return qede_flow_parse_v4_common(rule, tuple, extack);
}
static int
-qede_flow_parse_udp_v6(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *tuple)
+qede_flow_parse_udp_v6(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IPV6);
- return qede_flow_parse_v6_common(edev, rule, tuple);
+ return qede_flow_parse_v6_common(rule, tuple, extack);
}
static int
-qede_flow_parse_udp_v4(struct qede_dev *edev, struct flow_rule *rule,
- struct qede_arfs_tuple *tuple)
+qede_flow_parse_udp_v4(struct flow_rule *rule, struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
tuple->ip_proto = IPPROTO_UDP;
tuple->eth_proto = htons(ETH_P_IP);
- return qede_flow_parse_v4_common(edev, rule, tuple);
+ return qede_flow_parse_v4_common(rule, tuple, extack);
}
static int
-qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
- struct flow_rule *rule, struct qede_arfs_tuple *tuple)
+qede_parse_flow_attr(__be16 proto, struct flow_rule *rule,
+ struct qede_arfs_tuple *tuple,
+ struct netlink_ext_ack *extack)
{
struct flow_dissector *dissector = rule->match.dissector;
int rc = -EINVAL;
@@ -1832,14 +1843,18 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
BIT_ULL(FLOW_DISSECTOR_KEY_PORTS))) {
- DP_NOTICE(edev, "Unsupported key set:0x%llx\n",
- dissector->used_keys);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported key used: 0x%llx",
+ dissector->used_keys);
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (proto != htons(ETH_P_IP) &&
proto != htons(ETH_P_IPV6)) {
- DP_NOTICE(edev, "Unsupported proto=0x%x\n", proto);
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported proto=0x%x",
+ proto);
return -EPROTONOSUPPORT;
}
@@ -1851,15 +1866,15 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
}
if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IP))
- rc = qede_flow_parse_tcp_v4(edev, rule, tuple);
+ rc = qede_flow_parse_tcp_v4(rule, tuple, extack);
else if (ip_proto == IPPROTO_TCP && proto == htons(ETH_P_IPV6))
- rc = qede_flow_parse_tcp_v6(edev, rule, tuple);
+ rc = qede_flow_parse_tcp_v6(rule, tuple, extack);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IP))
- rc = qede_flow_parse_udp_v4(edev, rule, tuple);
+ rc = qede_flow_parse_udp_v4(rule, tuple, extack);
else if (ip_proto == IPPROTO_UDP && proto == htons(ETH_P_IPV6))
- rc = qede_flow_parse_udp_v6(edev, rule, tuple);
+ rc = qede_flow_parse_udp_v6(rule, tuple, extack);
else
- DP_NOTICE(edev, "Invalid protocol request\n");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid protocol request");
return rc;
}
@@ -1867,6 +1882,7 @@ qede_parse_flow_attr(struct qede_dev *edev, __be16 proto,
int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
struct flow_cls_offload *f)
{
+ struct netlink_ext_ack *extack = f->common.extack;
struct qede_arfs_fltr_node *n;
struct qede_arfs_tuple t;
int min_hlen, rc;
@@ -1879,7 +1895,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse flower attribute and prepare filter */
- rc = qede_parse_flow_attr(edev, proto, f->rule, &t);
+ rc = qede_parse_flow_attr(proto, f->rule, &t, extack);
if (rc)
goto unlock;
@@ -1894,7 +1910,7 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto,
}
/* parse tc actions and get the vf_id */
- rc = qede_parse_actions(edev, &f->rule->action, f->common.extack);
+ rc = qede_parse_actions(edev, &f->rule->action, extack);
if (rc)
goto unlock;
@@ -1941,8 +1957,11 @@ unlock:
static int qede_flow_spec_validate(struct qede_dev *edev,
struct flow_action *flow_action,
struct qede_arfs_tuple *t,
- __u32 location)
+ __u32 location,
+ struct netlink_ext_ack *extack)
{
+ int err;
+
if (location >= QEDE_RFS_MAX_FLTR) {
DP_INFO(edev, "Location out-of-bounds\n");
return -EINVAL;
@@ -1963,8 +1982,9 @@ static int qede_flow_spec_validate(struct qede_dev *edev,
return -EINVAL;
}
- if (qede_parse_actions(edev, flow_action, NULL))
- return -EINVAL;
+ err = qede_parse_actions(edev, flow_action, extack);
+ if (err)
+ return err;
return 0;
}
@@ -1975,11 +1995,13 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
{
struct ethtool_rx_flow_spec_input input = {};
struct ethtool_rx_flow_rule *flow;
+ struct netlink_ext_ack extack;
__be16 proto;
- int err = 0;
+ int err;
- if (qede_flow_spec_validate_unused(edev, fs))
- return -EOPNOTSUPP;
+ err = qede_flow_spec_validate_unused(edev, fs);
+ if (err)
+ return err;
switch ((fs->flow_type & ~FLOW_EXT)) {
case TCP_V4_FLOW:
@@ -2001,14 +2023,16 @@ static int qede_flow_spec_to_rule(struct qede_dev *edev,
if (IS_ERR(flow))
return PTR_ERR(flow);
- err = qede_parse_flow_attr(edev, proto, flow->rule, t);
+ err = qede_parse_flow_attr(proto, flow->rule, t, &extack);
if (err)
goto err_out;
/* Make sure location is valid and filter isn't already set */
err = qede_flow_spec_validate(edev, &flow->rule->action, t,
- fs->location);
+ fs->location, &extack);
err_out:
+ if (extack._msg)
+ DP_NOTICE(edev, "%s\n", extack._msg);
ethtool_rx_flow_rule_destroy(flow);
return err;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 747cc5e2bb78..63e3dac4d5f7 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -321,7 +321,7 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr)
sizeof(config)) ? -EFAULT : 0;
}
-int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *info)
{
struct qede_ptp *ptp = edev->ptp;
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
index 1db0f021c645..adafc894797e 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h
@@ -17,7 +17,7 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb);
int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req);
void qede_ptp_disable(struct qede_dev *edev);
int qede_ptp_enable(struct qede_dev *edev);
-int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts);
+int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *ts);
static inline void qede_ptp_record_rx_ts(struct qede_dev *edev,
union eth_rx_cqe *cqe,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 4b8bc46f55c2..ae4ee0326ee1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -1015,7 +1015,7 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
if (!rc)
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
return rc;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 4c06f55878de..99d4647bf245 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -216,7 +216,7 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu)
netif_dbg(adpt, hw, adpt->netdev,
"changing MTU from %d to %d\n", netdev->mtu,
new_mtu);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (netif_running(netdev))
return emac_reinit_locked(adpt);
diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c
index ff3b89e9028e..ad06da0fdaa0 100644
--- a/drivers/net/ethernet/qualcomm/qca_debug.c
+++ b/drivers/net/ethernet/qualcomm/qca_debug.c
@@ -98,10 +98,8 @@ qcaspi_info_show(struct seq_file *s, void *what)
seq_printf(s, "IRQ : %d\n",
qca->spi_dev->irq);
- seq_printf(s, "INTR REQ : %u\n",
- qca->intr_req);
- seq_printf(s, "INTR SVC : %u\n",
- qca->intr_svc);
+ seq_printf(s, "INTR : %lx\n",
+ qca->intr);
seq_printf(s, "SPI max speed : %lu\n",
(unsigned long)qca->spi_dev->max_speed_hz);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5799ecc88a87..8f7ce6b51a1c 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -35,6 +35,8 @@
#define MAX_DMA_BURST_LEN 5000
+#define SPI_INTR 0
+
/* Modules parameters */
#define QCASPI_CLK_SPEED_MIN 1000000
#define QCASPI_CLK_SPEED_MAX 16000000
@@ -579,14 +581,14 @@ qcaspi_spi_thread(void *data)
continue;
}
- if ((qca->intr_req == qca->intr_svc) &&
+ if (!test_bit(SPI_INTR, &qca->intr) &&
!qca->txr.skb[qca->txr.head])
schedule();
set_current_state(TASK_RUNNING);
- netdev_dbg(qca->net_dev, "have work to do. int: %d, tx_skb: %p\n",
- qca->intr_req - qca->intr_svc,
+ netdev_dbg(qca->net_dev, "have work to do. int: %lu, tx_skb: %p\n",
+ qca->intr,
qca->txr.skb[qca->txr.head]);
qcaspi_qca7k_sync(qca, QCASPI_EVENT_UPDATE);
@@ -600,8 +602,7 @@ qcaspi_spi_thread(void *data)
msleep(QCASPI_QCA7K_REBOOT_TIME_MS);
}
- if (qca->intr_svc != qca->intr_req) {
- qca->intr_svc = qca->intr_req;
+ if (test_and_clear_bit(SPI_INTR, &qca->intr)) {
start_spi_intr_handling(qca, &intr_cause);
if (intr_cause & SPI_INT_CPU_ON) {
@@ -663,7 +664,7 @@ qcaspi_intr_handler(int irq, void *data)
{
struct qcaspi *qca = data;
- qca->intr_req++;
+ set_bit(SPI_INTR, &qca->intr);
if (qca->spi_thread)
wake_up_process(qca->spi_thread);
@@ -679,8 +680,7 @@ qcaspi_netdev_open(struct net_device *dev)
if (!qca)
return -EINVAL;
- qca->intr_req = 1;
- qca->intr_svc = 0;
+ set_bit(SPI_INTR, &qca->intr);
qca->sync = QCASPI_SYNC_UNKNOWN;
qcafrm_fsm_init_spi(&qca->frm_handle);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h
index d59cb2352cee..8f4808695e82 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.h
+++ b/drivers/net/ethernet/qualcomm/qca_spi.h
@@ -81,8 +81,7 @@ struct qcaspi {
struct qcafrm_handle frm_handle;
struct sk_buff *rx_skb;
- unsigned int intr_req;
- unsigned int intr_svc;
+ unsigned long intr;
u16 reset_count;
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 9d2a9562c96f..f1e40aade127 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -90,7 +90,7 @@ static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
new_mtu > (priv->real_dev->mtu - headroom))
return -EINVAL;
- rmnet_dev->mtu = new_mtu;
+ WRITE_ONCE(rmnet_dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index f5786d78ed23..5652da8a178c 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1277,14 +1277,14 @@ static int cp_change_mtu(struct net_device *dev, int new_mtu)
/* if network interface not up, no need for complexity */
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
cp_set_rxbufsize(cp); /* set new rx buf size */
return 0;
}
/* network IS up, close it, reset MTU, and come up again. */
cp_close(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
cp_set_rxbufsize(cp);
return cp_open(dev);
}
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 0fc5fe564ae5..3507c2e28110 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -1608,7 +1608,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
if (!tp->dash_enabled) {
rtl_set_d3_pll_down(tp, !wolopts);
- tp->dev->wol_enabled = wolopts ? 1 : 0;
+ tp->dev->ethtool->wol_enabled = wolopts ? 1 : 0;
}
}
@@ -2229,6 +2229,8 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
* the wild. Let's disable detection.
* { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 },
*/
+ /* Realtek calls it RTL8168M, but it's handled like RTL8168H */
+ { 0x7cf, 0x6c0, RTL_GIGA_MAC_VER_46 },
/* 8168G family. */
{ 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 },
@@ -2272,7 +2274,9 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168B family. */
{ 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 },
- { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
+ /* This one is very old and rare, let's see if anybody complains.
+ * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 },
+ */
/* 8101 family. */
{ 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 },
@@ -3922,7 +3926,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
{
struct rtl8169_private *tp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
netdev_update_features(dev);
rtl_jumbo_config(tp);
rtl_set_eee_txidle_timer(tp);
@@ -4335,17 +4339,18 @@ static void rtl8169_doorbell(struct rtl8169_private *tp)
static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- unsigned int frags = skb_shinfo(skb)->nr_frags;
struct rtl8169_private *tp = netdev_priv(dev);
unsigned int entry = tp->cur_tx % NUM_TX_DESC;
struct TxDesc *txd_first, *txd_last;
bool stop_queue, door_bell;
+ unsigned int frags;
u32 opts[2];
if (unlikely(!rtl_tx_slots_avail(tp))) {
if (net_ratelimit())
netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
- goto err_stop_0;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
}
opts[1] = rtl8169_tx_vlan_tag(skb);
@@ -4362,6 +4367,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
txd_first = tp->TxDescArray + entry;
+ frags = skb_shinfo(skb)->nr_frags;
if (frags) {
if (rtl8169_xmit_frags(tp, skb, opts, entry))
goto err_dma_1;
@@ -4400,11 +4406,6 @@ err_dma_0:
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
-
-err_stop_0:
- netif_stop_queue(dev);
- dev->stats.tx_dropped++;
- return NETDEV_TX_BUSY;
}
static unsigned int rtl_last_frag_len(struct sk_buff *skb)
@@ -4655,10 +4656,8 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
}
- if (napi_schedule_prep(&tp->napi)) {
- rtl_irq_disable(tp);
- __napi_schedule(&tp->napi);
- }
+ rtl_irq_disable(tp);
+ napi_schedule(&tp->napi);
out:
rtl_ack_events(tp, status);
@@ -5085,12 +5084,10 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp)
tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg;
if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
- tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver;
+ tp->irq_mask |= SYSErr | RxFIFOOver;
else if (tp->mac_version == RTL_GIGA_MAC_VER_11)
/* special workaround needed */
tp->irq_mask |= RxFIFOOver;
- else
- tp->irq_mask |= RxOverflow;
}
static int rtl_alloc_irq(struct rtl8169_private *tp)
@@ -5104,7 +5101,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
rtl_lock_config_regs(tp);
fallthrough;
case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
- flags = PCI_IRQ_LEGACY;
+ flags = PCI_IRQ_INTX;
break;
default:
flags = PCI_IRQ_ALL_TYPES;
@@ -5477,7 +5474,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_set_d3_pll_down(tp, true);
} else {
rtl_set_d3_pll_down(tp, false);
- dev->wol_enabled = 1;
+ dev->ethtool->wol_enabled = 1;
}
jumbo_max = rtl_jumbo_max(tp);
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index b03fae7a0f72..9b7559c88bee 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -33,6 +33,7 @@ config RAVB
select CRC32
select MII
select MDIO_BITBANG
+ select PAGE_POOL
select PHYLIB
select RESET_CONTROLLER
help
@@ -58,4 +59,14 @@ config RENESAS_GEN4_PTP
help
Renesas R-Car Gen4 gPTP device driver.
+config RTSN
+ tristate "Renesas Ethernet-TSN support"
+ depends on ARCH_RENESAS || COMPILE_TEST
+ depends on PTP_1588_CLOCK
+ select CRC32
+ select PHYLIB
+ select RENESAS_GEN4_PTP
+ help
+ Renesas Ethernet-TSN device driver.
+
endif # NET_VENDOR_RENESAS
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile
index 9070acfd6aaf..f65fc76f8b4d 100644
--- a/drivers/net/ethernet/renesas/Makefile
+++ b/drivers/net/ethernet/renesas/Makefile
@@ -11,3 +11,5 @@ obj-$(CONFIG_RAVB) += ravb.o
obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch.o
obj-$(CONFIG_RENESAS_GEN4_PTP) += rcar_gen4_ptp.o
+
+obj-$(CONFIG_RTSN) += rtsn.o
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b48935ec7e28..9893c91af105 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -19,6 +19,7 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <linux/ptp_clock_kernel.h>
+#include <net/page_pool/types.h>
#define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */
#define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */
@@ -257,6 +258,7 @@ enum APSR_BIT {
APSR_CMSW = 0x00000010,
APSR_RDM = 0x00002000,
APSR_TDM = 0x00004000,
+ APSR_MIISELECT = 0x01000000, /* R-Car V4M only */
};
/* RCR */
@@ -1039,7 +1041,7 @@ struct ravb_ptp {
};
struct ravb_hw_info {
- bool (*receive)(struct net_device *ndev, int *quota, int q);
+ int (*receive)(struct net_device *ndev, int budget, int q);
void (*set_rate)(struct net_device *ndev);
int (*set_feature)(struct net_device *ndev, netdev_features_t features);
int (*dmac_init)(struct net_device *ndev);
@@ -1051,9 +1053,10 @@ struct ravb_hw_info {
int stats_len;
u32 tccr_mask;
u32 rx_max_frame_size;
- u32 rx_max_desc_use;
+ u32 rx_buffer_size;
u32 rx_desc_size;
unsigned aligned_tx: 1;
+ unsigned coalesce_irqs:1; /* Needs software IRQ coalescing */
/* hardware features */
unsigned internal_delay:1; /* AVB-DMAC has internal delays */
@@ -1070,6 +1073,11 @@ struct ravb_hw_info {
unsigned half_duplex:1; /* E-MAC supports half duplex mode */
};
+struct ravb_rx_buffer {
+ struct page *page;
+ unsigned int offset;
+};
+
struct ravb_private {
struct net_device *ndev;
struct platform_device *pdev;
@@ -1093,7 +1101,8 @@ struct ravb_private {
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
struct sk_buff *rx_1st_skb;
- struct sk_buff **rx_skb[NUM_RX_QUEUE];
+ struct page_pool *rx_pool[NUM_RX_QUEUE];
+ struct ravb_rx_buffer *rx_buffers[NUM_RX_QUEUE];
struct sk_buff **tx_skb[NUM_TX_QUEUE];
u32 rx_over_errors;
u32 rx_fifo_errors;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 9b1f639f64a1..c02fb296bf7d 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -30,6 +30,7 @@
#include <linux/reset.h>
#include <linux/math64.h>
#include <net/ip.h>
+#include <net/page_pool/helpers.h>
#include "ravb.h"
@@ -113,25 +114,6 @@ static void ravb_set_rate_rcar(struct net_device *ndev)
}
}
-static struct sk_buff *
-ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info,
- gfp_t gfp_mask)
-{
- struct sk_buff *skb;
- u32 reserve;
-
- skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1,
- gfp_mask);
- if (!skb)
- return NULL;
-
- reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1);
- if (reserve)
- skb_reserve(skb, RAVB_ALIGN - reserve);
-
- return skb;
-}
-
/* Get MAC address from the MAC address registers
*
* Ethernet AVB device doesn't have ROM for MAC address.
@@ -257,21 +239,10 @@ static void ravb_rx_ring_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
- unsigned int i;
if (!priv->rx_ring[q].raw)
return;
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i);
-
- if (!dma_mapping_error(ndev->dev.parent,
- le32_to_cpu(desc->dptr)))
- dma_unmap_single(ndev->dev.parent,
- le32_to_cpu(desc->dptr),
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- }
ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw,
priv->rx_desc_dma[q]);
@@ -298,13 +269,16 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_ring[q] = NULL;
}
- /* Free RX skb ringbuffer */
- if (priv->rx_skb[q]) {
- for (i = 0; i < priv->num_rx_ring[q]; i++)
- dev_kfree_skb(priv->rx_skb[q][i]);
+ /* Free RX buffers */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ if (priv->rx_buffers[q][i].page)
+ page_pool_put_page(priv->rx_pool[q],
+ priv->rx_buffers[q][i].page,
+ 0, true);
}
- kfree(priv->rx_skb[q]);
- priv->rx_skb[q] = NULL;
+ kfree(priv->rx_buffers[q]);
+ priv->rx_buffers[q] = NULL;
+ page_pool_destroy(priv->rx_pool[q]);
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
@@ -317,35 +291,64 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-static void ravb_rx_ring_format(struct net_device *ndev, int q)
+static int
+ravb_alloc_rx_buffer(struct net_device *ndev, int q, u32 entry, gfp_t gfp_mask,
+ struct ravb_rx_desc *rx_desc)
{
struct ravb_private *priv = netdev_priv(ndev);
- struct ravb_rx_desc *rx_desc;
- unsigned int rx_ring_size;
+ const struct ravb_hw_info *info = priv->info;
+ struct ravb_rx_buffer *rx_buff;
dma_addr_t dma_addr;
- unsigned int i;
+ unsigned int size;
- rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
- memset(priv->rx_ring[q].raw, 0, rx_ring_size);
- /* Build RX ring buffer */
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- /* RX descriptor */
- rx_desc = ravb_rx_get_desc(priv, q, i);
- rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
- dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
+ rx_buff = &priv->rx_buffers[q][entry];
+ size = info->rx_buffer_size;
+ rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset,
+ &size, gfp_mask);
+ if (unlikely(!rx_buff->page)) {
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
*/
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- rx_desc->ds_cc = cpu_to_le16(0);
- rx_desc->dptr = cpu_to_le32(dma_addr);
+ rx_desc->ds_cc = cpu_to_le16(0);
+ return -ENOMEM;
+ }
+
+ dma_addr = page_pool_get_dma_addr(rx_buff->page) + rx_buff->offset;
+ dma_sync_single_for_device(ndev->dev.parent, dma_addr,
+ info->rx_buffer_size, DMA_FROM_DEVICE);
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+
+ /* The end of the RX buffer is used to store skb shared data, so we need
+ * to ensure that the hardware leaves enough space for this.
+ */
+ rx_desc->ds_cc = cpu_to_le16(info->rx_buffer_size -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) -
+ ETH_FCS_LEN + sizeof(__sum16));
+ return 0;
+}
+
+static u32
+ravb_rx_ring_refill(struct net_device *ndev, int q, u32 count, gfp_t gfp_mask)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_rx_desc *rx_desc;
+ u32 i, entry;
+
+ for (i = 0; i < count; i++) {
+ entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q];
+ rx_desc = ravb_rx_get_desc(priv, q, entry);
+
+ if (!priv->rx_buffers[q][entry].page) {
+ if (unlikely(ravb_alloc_rx_buffer(ndev, q, entry,
+ gfp_mask, rx_desc)))
+ break;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
rx_desc->die_dt = DT_FEMPTY;
}
- rx_desc = ravb_rx_get_desc(priv, q, i);
- rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
- rx_desc->die_dt = DT_LINKFIX; /* type */
+
+ return i;
}
/* Format skb and descriptor buffer for Ethernet AVB */
@@ -353,6 +356,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int num_tx_desc = priv->num_tx_desc;
+ struct ravb_rx_desc *rx_desc;
struct ravb_tx_desc *tx_desc;
struct ravb_desc *desc;
unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] *
@@ -364,7 +368,13 @@ static void ravb_ring_format(struct net_device *ndev, int q)
priv->dirty_rx[q] = 0;
priv->dirty_tx[q] = 0;
- ravb_rx_ring_format(ndev, q);
+ /* Regular RX descriptors have already been initialized by
+ * ravb_rx_ring_refill(), we just need to initialize the final link
+ * descriptor.
+ */
+ rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]);
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
memset(priv->tx_ring[q], 0, tx_ring_size);
/* Build TX ring buffer */
@@ -408,26 +418,47 @@ static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
static int ravb_ring_init(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
- const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
+ struct page_pool_params params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP,
+ .pool_size = priv->num_rx_ring[q],
+ .nid = NUMA_NO_NODE,
+ .dev = ndev->dev.parent,
+ .dma_dir = DMA_FROM_DEVICE,
+ };
unsigned int ring_size;
- struct sk_buff *skb;
- unsigned int i;
+ u32 num_filled;
+
+ /* Allocate RX page pool and buffers */
+ priv->rx_pool[q] = page_pool_create(&params);
+ if (IS_ERR(priv->rx_pool[q]))
+ goto error;
- /* Allocate RX and TX skb rings */
- priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
- sizeof(*priv->rx_skb[q]), GFP_KERNEL);
+ /* Allocate RX buffers */
+ priv->rx_buffers[q] = kcalloc(priv->num_rx_ring[q],
+ sizeof(*priv->rx_buffers[q]), GFP_KERNEL);
+ if (!priv->rx_buffers[q])
+ goto error;
+
+ /* Allocate TX skb rings */
priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q],
sizeof(*priv->tx_skb[q]), GFP_KERNEL);
- if (!priv->rx_skb[q] || !priv->tx_skb[q])
+ if (!priv->tx_skb[q])
goto error;
- for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = ravb_alloc_skb(ndev, info, GFP_KERNEL);
- if (!skb)
- goto error;
- priv->rx_skb[q][i] = skb;
- }
+ /* Allocate all RX descriptors. */
+ if (!ravb_alloc_rx_desc(ndev, q))
+ goto error;
+
+ /* Populate RX ring buffer. */
+ priv->dirty_rx[q] = 0;
+ ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q];
+ memset(priv->rx_ring[q].raw, 0, ring_size);
+ num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q],
+ GFP_KERNEL);
+ if (num_filled != priv->num_rx_ring[q])
+ goto error;
if (num_tx_desc > 1) {
/* Allocate rings for the aligned buffers */
@@ -437,12 +468,6 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
}
- /* Allocate all RX descriptors. */
- if (!ravb_alloc_rx_desc(ndev, q))
- goto error;
-
- priv->dirty_rx[q] = 0;
-
/* Allocate all TX descriptors. */
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * num_tx_desc + 1);
@@ -554,6 +579,16 @@ static void ravb_emac_init_rcar(struct net_device *ndev)
ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR);
}
+static void ravb_emac_init_rcar_gen4(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ bool mii = priv->phy_interface == PHY_INTERFACE_MODE_MII;
+
+ ravb_modify(ndev, APSR, APSR_MIISELECT, mii ? APSR_MIISELECT : 0);
+
+ ravb_emac_init_rcar(ndev);
+}
+
/* E-MAC init function */
static void ravb_emac_init(struct net_device *ndev)
{
@@ -706,7 +741,9 @@ static void ravb_get_tx_tstamp(struct net_device *ndev)
static void ravb_rx_csum_gbeth(struct sk_buff *skb)
{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
__wsum csum_ip_hdr, csum_proto;
+ skb_frag_t *last_frag;
u8 *hw_csum;
/* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4
@@ -716,12 +753,24 @@ static void ravb_rx_csum_gbeth(struct sk_buff *skb)
if (unlikely(skb->len < sizeof(__sum16) * 2))
return;
- hw_csum = skb_tail_pointer(skb) - sizeof(__sum16);
+ if (skb_is_nonlinear(skb)) {
+ last_frag = &shinfo->frags[shinfo->nr_frags - 1];
+ hw_csum = skb_frag_address(last_frag) +
+ skb_frag_size(last_frag);
+ } else {
+ hw_csum = skb_tail_pointer(skb);
+ }
+
+ hw_csum -= sizeof(__sum16);
csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
hw_csum -= sizeof(__sum16);
csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum));
- skb_trim(skb, skb->len - 2 * sizeof(__sum16));
+
+ if (skb_is_nonlinear(skb))
+ skb_frag_size_sub(last_frag, 2 * sizeof(__sum16));
+ else
+ skb_trim(skb, skb->len - 2 * sizeof(__sum16));
/* TODO: IPV6 Rx checksum */
if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto)
@@ -743,30 +792,14 @@ static void ravb_rx_csum(struct sk_buff *skb)
skb_trim(skb, skb->len - sizeof(__sum16));
}
-static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
- struct ravb_rx_desc *desc)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct sk_buff *skb;
-
- skb = priv->rx_skb[RAVB_BE][entry];
- priv->rx_skb[RAVB_BE][entry] = NULL;
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- ALIGN(priv->info->rx_max_frame_size, 16),
- DMA_FROM_DEVICE);
-
- return skb;
-}
-
/* Packet receive function for Gigabit Ethernet */
-static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
+static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
struct net_device_stats *stats;
struct ravb_rx_desc *desc;
struct sk_buff *skb;
- dma_addr_t dma_addr;
int rx_packets = 0;
u8 desc_status;
u16 desc_len;
@@ -781,7 +814,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].desc[entry];
- if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */
@@ -807,87 +840,110 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
if (desc_status & MSC_CEEF)
stats->rx_missed_errors++;
} else {
+ struct ravb_rx_buffer *rx_buff;
+ void *rx_addr;
+
+ rx_buff = &priv->rx_buffers[q][entry];
+ rx_addr = page_address(rx_buff->page) + rx_buff->offset;
die_dt = desc->die_dt & 0xF0;
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ desc_len, DMA_FROM_DEVICE);
+
switch (die_dt) {
case DT_FSINGLE:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(skb, desc_len);
- skb->protocol = eth_type_trans(skb, ndev);
- if (ndev->features & NETIF_F_RXCSUM)
- ravb_rx_csum_gbeth(skb);
- napi_gro_receive(&priv->napi[q], skb);
- rx_packets++;
- stats->rx_bytes += desc_len;
- break;
case DT_FSTART:
- priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_put(priv->rx_1st_skb, desc_len);
+ /* Start of packet: Set initial data length. */
+ skb = napi_build_skb(rx_addr,
+ info->rx_buffer_size);
+ if (unlikely(!skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0,
+ true);
+ goto refill;
+ }
+ skb_mark_for_recycle(skb);
+ skb_put(skb, desc_len);
+
+ /* Save this skb if the packet spans multiple
+ * descriptors.
+ */
+ if (die_dt == DT_FSTART)
+ priv->rx_1st_skb = skb;
break;
+
case DT_FMID:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_copy_to_linear_data_offset(priv->rx_1st_skb,
- priv->rx_1st_skb->len,
- skb->data,
- desc_len);
- skb_put(priv->rx_1st_skb, desc_len);
- dev_kfree_skb(skb);
- break;
case DT_FEND:
- skb = ravb_get_skb_gbeth(ndev, entry, desc);
- skb_copy_to_linear_data_offset(priv->rx_1st_skb,
- priv->rx_1st_skb->len,
- skb->data,
- desc_len);
- skb_put(priv->rx_1st_skb, desc_len);
- dev_kfree_skb(skb);
- priv->rx_1st_skb->protocol =
- eth_type_trans(priv->rx_1st_skb, ndev);
+ /* Continuing a packet: Add this buffer as an RX
+ * frag.
+ */
+
+ /* rx_1st_skb will be NULL if napi_build_skb()
+ * failed for the first descriptor of a
+ * multi-descriptor packet.
+ */
+ if (unlikely(!priv->rx_1st_skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0,
+ true);
+
+ /* We may find a DT_FSINGLE or DT_FSTART
+ * descriptor in the queue which we can
+ * process, so don't give up yet.
+ */
+ continue;
+ }
+ skb_add_rx_frag(priv->rx_1st_skb,
+ skb_shinfo(priv->rx_1st_skb)->nr_frags,
+ rx_buff->page, rx_buff->offset,
+ desc_len, info->rx_buffer_size);
+
+ /* Set skb to point at the whole packet so that
+ * we only need one code path for finishing a
+ * packet.
+ */
+ skb = priv->rx_1st_skb;
+ }
+
+ switch (die_dt) {
+ case DT_FSINGLE:
+ case DT_FEND:
+ /* Finishing a packet: Determine protocol &
+ * checksum, hand off to NAPI and update our
+ * stats.
+ */
+ skb->protocol = eth_type_trans(skb, ndev);
if (ndev->features & NETIF_F_RXCSUM)
- ravb_rx_csum_gbeth(priv->rx_1st_skb);
- stats->rx_bytes += priv->rx_1st_skb->len;
- napi_gro_receive(&priv->napi[q],
- priv->rx_1st_skb);
+ ravb_rx_csum_gbeth(skb);
+ stats->rx_bytes += skb->len;
+ napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
- break;
+
+ /* Clear rx_1st_skb so that it will only be
+ * non-NULL when valid.
+ */
+ priv->rx_1st_skb = NULL;
}
+
+ /* Mark this RX buffer as consumed. */
+ rx_buff->page = NULL;
}
}
+refill:
/* Refill the RX ring buffers. */
- for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
- entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].desc[entry];
- desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
-
- if (!priv->rx_skb[q][entry]) {
- skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
- if (!skb)
- break;
- dma_addr = dma_map_single(ndev->dev.parent,
- skb->data,
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- skb_checksum_none_assert(skb);
- /* We just set the data size to 0 for a failed mapping
- * which should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- desc->ds_cc = cpu_to_le16(0);
- desc->dptr = cpu_to_le32(dma_addr);
- priv->rx_skb[q][entry] = skb;
- }
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
- desc->die_dt = DT_FEMPTY;
- }
+ priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
+ priv->cur_rx[q] - priv->dirty_rx[q],
+ GFP_ATOMIC);
stats->rx_packets += rx_packets;
- *quota -= rx_packets;
- return *quota == 0;
+ return rx_packets;
}
/* Packet receive function for Ethernet AVB */
-static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
+static int ravb_rx_rcar(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
@@ -895,7 +951,6 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
struct ravb_ex_rx_desc *desc;
unsigned int limit, i;
struct sk_buff *skb;
- dma_addr_t dma_addr;
struct timespec64 ts;
int rx_packets = 0;
u8 desc_status;
@@ -906,7 +961,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
for (i = 0; i < limit; i++, priv->cur_rx[q]++) {
entry = priv->cur_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q].ex_desc[entry];
- if (rx_packets == *quota || desc->die_dt == DT_FEMPTY)
+ if (rx_packets == budget || desc->die_dt == DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */
@@ -934,12 +989,23 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
stats->rx_missed_errors++;
} else {
u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE;
-
- skb = priv->rx_skb[q][entry];
- priv->rx_skb[q][entry] = NULL;
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
+ struct ravb_rx_buffer *rx_buff;
+ void *rx_addr;
+
+ rx_buff = &priv->rx_buffers[q][entry];
+ rx_addr = page_address(rx_buff->page) + rx_buff->offset;
+ dma_sync_single_for_cpu(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ pkt_len, DMA_FROM_DEVICE);
+
+ skb = napi_build_skb(rx_addr, info->rx_buffer_size);
+ if (unlikely(!skb)) {
+ stats->rx_errors++;
+ page_pool_put_page(priv->rx_pool[q],
+ rx_buff->page, 0, true);
+ break;
+ }
+ skb_mark_for_recycle(skb);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -961,48 +1027,28 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
napi_gro_receive(&priv->napi[q], skb);
rx_packets++;
stats->rx_bytes += pkt_len;
+
+ /* Mark this RX buffer as consumed. */
+ rx_buff->page = NULL;
}
}
/* Refill the RX ring buffers. */
- for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
- entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
- desc = &priv->rx_ring[q].ex_desc[entry];
- desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use);
-
- if (!priv->rx_skb[q][entry]) {
- skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC);
- if (!skb)
- break; /* Better luck next round. */
- dma_addr = dma_map_single(ndev->dev.parent, skb->data,
- priv->info->rx_max_frame_size,
- DMA_FROM_DEVICE);
- skb_checksum_none_assert(skb);
- /* We just set the data size to 0 for a failed mapping
- * which should prevent DMA from happening...
- */
- if (dma_mapping_error(ndev->dev.parent, dma_addr))
- desc->ds_cc = cpu_to_le16(0);
- desc->dptr = cpu_to_le32(dma_addr);
- priv->rx_skb[q][entry] = skb;
- }
- /* Descriptor type must be set after all the above writes */
- dma_wmb();
- desc->die_dt = DT_FEMPTY;
- }
+ priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q,
+ priv->cur_rx[q] - priv->dirty_rx[q],
+ GFP_ATOMIC);
stats->rx_packets += rx_packets;
- *quota -= rx_packets;
- return *quota == 0;
+ return rx_packets;
}
/* Packet receive function for Ethernet AVB */
-static bool ravb_rx(struct net_device *ndev, int *quota, int q)
+static int ravb_rx(struct net_device *ndev, int budget, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- return info->receive(ndev, quota, q);
+ return info->receive(ndev, budget, q);
}
static void ravb_rcv_snd_disable(struct net_device *ndev)
@@ -1319,13 +1365,12 @@ static int ravb_poll(struct napi_struct *napi, int budget)
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
- int quota = budget;
- bool unmask;
+ int work_done;
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- unmask = !ravb_rx(ndev, &quota, q);
+ work_done = ravb_rx(ndev, budget, q);
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1344,24 +1389,20 @@ static int ravb_poll(struct napi_struct *napi, int budget)
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
- if (!unmask)
- goto out;
-
- napi_complete(napi);
-
- /* Re-enable RX/TX interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- if (!info->irq_en_dis) {
- ravb_modify(ndev, RIC0, mask, mask);
- ravb_modify(ndev, TIC, mask, mask);
- } else {
- ravb_write(ndev, mask, RIE0);
- ravb_write(ndev, mask, TIE);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ /* Re-enable RX/TX interrupts */
+ spin_lock_irqsave(&priv->lock, flags);
+ if (!info->irq_en_dis) {
+ ravb_modify(ndev, RIC0, mask, mask);
+ ravb_modify(ndev, TIC, mask, mask);
+ } else {
+ ravb_write(ndev, mask, RIE0);
+ ravb_write(ndev, mask, TIE);
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
}
- spin_unlock_irqrestore(&priv->lock, flags);
-out:
- return budget - quota;
+ return work_done;
}
static void ravb_set_duplex_gbeth(struct net_device *ndev)
@@ -1696,7 +1737,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
}
static int ravb_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *hw_info = priv->info;
@@ -2423,7 +2464,7 @@ static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
{
struct ravb_private *priv = netdev_priv(ndev);
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
if (netif_running(ndev)) {
synchronize_irq(priv->emac_irq);
@@ -2564,6 +2605,7 @@ static int ravb_mdio_init(struct ravb_private *priv)
{
struct platform_device *pdev = priv->pdev;
struct device *dev = &pdev->dev;
+ struct device_node *mdio_node;
struct phy_device *phydev;
struct device_node *pn;
int error;
@@ -2583,7 +2625,13 @@ static int ravb_mdio_init(struct ravb_private *priv)
pdev->name, pdev->id);
/* Register MDIO bus */
- error = of_mdiobus_register(priv->mii_bus, dev->of_node);
+ mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!mdio_node) {
+ /* backwards compatibility for DT lacking mdio subnode */
+ mdio_node = of_node_get(dev->of_node);
+ }
+ error = of_mdiobus_register(priv->mii_bus, mdio_node);
+ of_node_put(mdio_node);
if (error)
goto out_free_bus;
@@ -2614,6 +2662,28 @@ static int ravb_mdio_release(struct ravb_private *priv)
return 0;
}
+static const struct ravb_hw_info ravb_gen2_hw_info = {
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
+ .gstrings_stats = ravb_gstrings_stats,
+ .gstrings_size = sizeof(ravb_gstrings_stats),
+ .net_hw_features = NETIF_F_RXCSUM,
+ .net_features = NETIF_F_RXCSUM,
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats),
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_frame_size = SZ_2K,
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
+ .rx_desc_size = sizeof(struct ravb_ex_rx_desc),
+ .aligned_tx = 1,
+ .gptp = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
static const struct ravb_hw_info ravb_gen3_hw_info = {
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
@@ -2627,7 +2697,8 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
.rx_max_frame_size = SZ_2K,
- .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.internal_delay = 1,
.tx_counters = 1,
@@ -2638,12 +2709,12 @@ static const struct ravb_hw_info ravb_gen3_hw_info = {
.magic_pkt = 1,
};
-static const struct ravb_hw_info ravb_gen2_hw_info = {
+static const struct ravb_hw_info ravb_gen4_hw_info = {
.receive = ravb_rx_rcar,
.set_rate = ravb_set_rate_rcar,
.set_feature = ravb_set_features_rcar,
.dmac_init = ravb_dmac_init_rcar,
- .emac_init = ravb_emac_init_rcar,
+ .emac_init = ravb_emac_init_rcar_gen4,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
@@ -2651,10 +2722,14 @@ static const struct ravb_hw_info ravb_gen2_hw_info = {
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
.rx_max_frame_size = SZ_2K,
- .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
- .aligned_tx = 1,
- .gptp = 1,
+ .internal_delay = 1,
+ .tx_counters = 1,
+ .multi_irqs = 1,
+ .irq_en_dis = 1,
+ .ccc_gac = 1,
.nc_queues = 1,
.magic_pkt = 1,
};
@@ -2672,7 +2747,8 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = {
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
.rx_max_frame_size = SZ_2K,
- .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16),
+ .rx_buffer_size = SZ_2K +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
.rx_desc_size = sizeof(struct ravb_ex_rx_desc),
.multi_irqs = 1,
.err_mgmt_irqs = 1,
@@ -2695,9 +2771,10 @@ static const struct ravb_hw_info gbeth_hw_info = {
.stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
.tccr_mask = TCCR_TSRQ0,
.rx_max_frame_size = SZ_8K,
- .rx_max_desc_use = 4080,
+ .rx_buffer_size = SZ_2K,
.rx_desc_size = sizeof(struct ravb_rx_desc),
.aligned_tx = 1,
+ .coalesce_irqs = 1,
.tx_counters = 1,
.carrier_counters = 1,
.half_duplex = 1,
@@ -2709,7 +2786,7 @@ static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
- { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen4_hw_info },
{ .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
{ .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
@@ -2974,6 +3051,12 @@ static int ravb_probe(struct platform_device *pdev)
if (info->nc_queues)
netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll);
+ if (info->coalesce_irqs) {
+ netdev_sw_irq_coalesce_default_on(ndev);
+ if (num_present_cpus() == 1)
+ dev_set_threaded(ndev, true);
+ }
+
/* Network device register */
error = register_netdev(ndev);
if (error)
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index dcab638c57fe..ff50e20856ec 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -871,13 +871,13 @@ static void rswitch_tx_free(struct net_device *ndev)
dma_rmb();
skb = gq->skbs[gq->dirty];
if (skb) {
+ rdev->ndev->stats.tx_packets++;
+ rdev->ndev->stats.tx_bytes += skb->len;
dma_unmap_single(ndev->dev.parent,
gq->unmap_addrs[gq->dirty],
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(gq->skbs[gq->dirty]);
gq->skbs[gq->dirty] = NULL;
- rdev->ndev->stats.tx_packets++;
- rdev->ndev->stats.tx_bytes += skb->len;
}
desc->desc.die_dt = DT_EEMPTY;
}
@@ -1809,7 +1809,7 @@ static const struct net_device_ops rswitch_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
struct rswitch_device *rdev = netdev_priv(ndev);
diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c
new file mode 100644
index 000000000000..0e6cea42f007
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rtsn.c
@@ -0,0 +1,1389 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Renesas Ethernet-TSN device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#include "rtsn.h"
+#include "rcar_gen4_ptp.h"
+
+struct rtsn_private {
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ void __iomem *base;
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct clk *clk;
+ struct reset_control *reset;
+
+ u32 num_tx_ring;
+ u32 num_rx_ring;
+ u32 tx_desc_bat_size;
+ dma_addr_t tx_desc_bat_dma;
+ struct rtsn_desc *tx_desc_bat;
+ u32 rx_desc_bat_size;
+ dma_addr_t rx_desc_bat_dma;
+ struct rtsn_desc *rx_desc_bat;
+ dma_addr_t tx_desc_dma;
+ dma_addr_t rx_desc_dma;
+ struct rtsn_ext_desc *tx_ring;
+ struct rtsn_ext_ts_desc *rx_ring;
+ struct sk_buff **tx_skb;
+ struct sk_buff **rx_skb;
+ spinlock_t lock; /* Register access lock */
+ u32 cur_tx;
+ u32 dirty_tx;
+ u32 cur_rx;
+ u32 dirty_rx;
+ u8 ts_tag;
+ struct napi_struct napi;
+ struct rtnl_link_stats64 stats;
+
+ struct mii_bus *mii;
+ phy_interface_t iface;
+ int link;
+ int speed;
+
+ int tx_data_irq;
+ int rx_data_irq;
+};
+
+static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg)
+{
+ return ioread32(priv->base + reg);
+}
+
+static void rtsn_write(struct rtsn_private *priv, enum rtsn_reg reg, u32 data)
+{
+ iowrite32(data, priv->base + reg);
+}
+
+static void rtsn_modify(struct rtsn_private *priv, enum rtsn_reg reg,
+ u32 clear, u32 set)
+{
+ rtsn_write(priv, reg, (rtsn_read(priv, reg) & ~clear) | set);
+}
+
+static int rtsn_reg_wait(struct rtsn_private *priv, enum rtsn_reg reg,
+ u32 mask, u32 expected)
+{
+ u32 val;
+
+ return readl_poll_timeout(priv->base + reg, val,
+ (val & mask) == expected,
+ RTSN_INTERVAL_US, RTSN_TIMEOUT_US);
+}
+
+static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable)
+{
+ if (enable) {
+ rtsn_write(priv, TDIE0, TDIE_TDID_TDX(TX_CHAIN_IDX));
+ rtsn_write(priv, RDIE0, RDIE_RDID_RDX(RX_CHAIN_IDX));
+ } else {
+ rtsn_write(priv, TDID0, TDIE_TDID_TDX(TX_CHAIN_IDX));
+ rtsn_write(priv, RDID0, RDIE_RDID_RDX(RX_CHAIN_IDX));
+ }
+}
+
+static void rtsn_get_timestamp(struct rtsn_private *priv, struct timespec64 *ts)
+{
+ struct rcar_gen4_ptp_private *ptp_priv = priv->ptp_priv;
+
+ ptp_priv->info.gettime64(&ptp_priv->info, ts);
+}
+
+static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct rtsn_ext_desc *desc;
+ struct sk_buff *skb;
+ int free_num = 0;
+ int entry, size;
+
+ for (; priv->cur_tx - priv->dirty_tx > 0; priv->dirty_tx++) {
+ entry = priv->dirty_tx % priv->num_tx_ring;
+ desc = &priv->tx_ring[entry];
+ if (free_txed_only && (desc->die_dt & DT_MASK) != DT_FEMPTY)
+ break;
+
+ dma_rmb();
+ size = le16_to_cpu(desc->info_ds) & TX_DS;
+ skb = priv->tx_skb[entry];
+ if (skb) {
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct timespec64 ts;
+
+ rtsn_get_timestamp(priv, &ts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
+ skb_tstamp_tx(skb, &shhwtstamps);
+ }
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ dev_kfree_skb_any(priv->tx_skb[entry]);
+ free_num++;
+
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += size;
+ }
+
+ desc->die_dt = DT_EEMPTY;
+ }
+
+ desc = &priv->tx_ring[priv->num_tx_ring];
+ desc->die_dt = DT_LINK;
+
+ return free_num;
+}
+
+static int rtsn_rx(struct net_device *ndev, int budget)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ unsigned int ndescriptors;
+ unsigned int rx_packets;
+ unsigned int i;
+ bool get_ts;
+
+ get_ts = priv->ptp_priv->tstamp_rx_ctrl &
+ RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+
+ ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx;
+ rx_packets = 0;
+ for (i = 0; i < ndescriptors; i++) {
+ const unsigned int entry = priv->cur_rx % priv->num_rx_ring;
+ struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u16 pkt_len;
+
+ /* Stop processing descriptors if budget is consumed. */
+ if (rx_packets >= budget)
+ break;
+
+ /* Stop processing descriptors on first empty. */
+ if ((desc->die_dt & DT_MASK) == DT_FEMPTY)
+ break;
+
+ dma_rmb();
+ pkt_len = le16_to_cpu(desc->info_ds) & RX_DS;
+
+ skb = priv->rx_skb[entry];
+ priv->rx_skb[entry] = NULL;
+ dma_addr = le32_to_cpu(desc->dptr);
+ dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+
+ /* Get timestamp if enabled. */
+ if (get_ts) {
+ struct skb_shared_hwtstamps *shhwtstamps;
+ struct timespec64 ts;
+
+ shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+
+ ts.tv_sec = (u64)le32_to_cpu(desc->ts_sec);
+ ts.tv_nsec = le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
+
+ shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
+ }
+
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi, skb);
+
+ /* Update statistics. */
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += pkt_len;
+
+ /* Update counters. */
+ priv->cur_rx++;
+ rx_packets++;
+ }
+
+ /* Refill the RX ring buffers */
+ for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
+ const unsigned int entry = priv->dirty_rx % priv->num_rx_ring;
+ struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
+
+ if (!priv->rx_skb[entry]) {
+ skb = napi_alloc_skb(&priv->napi,
+ PKT_BUF_SZ + RTSN_ALIGN - 1);
+ if (!skb)
+ break;
+ skb_reserve(skb, NET_IP_ALIGN);
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data,
+ le16_to_cpu(desc->info_ds),
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ desc->info_ds = cpu_to_le16(0);
+ desc->dptr = cpu_to_le32(dma_addr);
+ skb_checksum_none_assert(skb);
+ priv->rx_skb[entry] = skb;
+ }
+
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY | D_DIE;
+ }
+
+ priv->rx_ring[priv->num_rx_ring].die_dt = DT_LINK;
+
+ return rx_packets;
+}
+
+static int rtsn_poll(struct napi_struct *napi, int budget)
+{
+ struct rtsn_private *priv;
+ struct net_device *ndev;
+ unsigned long flags;
+ int work_done;
+
+ ndev = napi->dev;
+ priv = netdev_priv(ndev);
+
+ /* Processing RX Descriptor Ring */
+ work_done = rtsn_rx(ndev, budget);
+
+ /* Processing TX Descriptor Ring */
+ spin_lock_irqsave(&priv->lock, flags);
+ rtsn_tx_free(ndev, true);
+ netif_wake_subqueue(ndev, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Re-enable TX/RX interrupts */
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ rtsn_ctrl_data_irq(priv, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+
+ return work_done;
+}
+
+static int rtsn_desc_alloc(struct rtsn_private *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+ unsigned int i;
+
+ priv->tx_desc_bat_size = sizeof(struct rtsn_desc) * TX_NUM_CHAINS;
+ priv->tx_desc_bat = dma_alloc_coherent(dev, priv->tx_desc_bat_size,
+ &priv->tx_desc_bat_dma,
+ GFP_KERNEL);
+
+ if (!priv->tx_desc_bat)
+ return -ENOMEM;
+
+ for (i = 0; i < TX_NUM_CHAINS; i++)
+ priv->tx_desc_bat[i].die_dt = DT_EOS;
+
+ priv->rx_desc_bat_size = sizeof(struct rtsn_desc) * RX_NUM_CHAINS;
+ priv->rx_desc_bat = dma_alloc_coherent(dev, priv->rx_desc_bat_size,
+ &priv->rx_desc_bat_dma,
+ GFP_KERNEL);
+
+ if (!priv->rx_desc_bat)
+ return -ENOMEM;
+
+ for (i = 0; i < RX_NUM_CHAINS; i++)
+ priv->rx_desc_bat[i].die_dt = DT_EOS;
+
+ return 0;
+}
+
+static void rtsn_desc_free(struct rtsn_private *priv)
+{
+ if (priv->tx_desc_bat)
+ dma_free_coherent(&priv->pdev->dev, priv->tx_desc_bat_size,
+ priv->tx_desc_bat, priv->tx_desc_bat_dma);
+ priv->tx_desc_bat = NULL;
+
+ if (priv->rx_desc_bat)
+ dma_free_coherent(&priv->pdev->dev, priv->rx_desc_bat_size,
+ priv->rx_desc_bat, priv->rx_desc_bat_dma);
+ priv->rx_desc_bat = NULL;
+}
+
+static void rtsn_chain_free(struct rtsn_private *priv)
+{
+ struct device *dev = &priv->pdev->dev;
+
+ dma_free_coherent(dev,
+ sizeof(struct rtsn_ext_desc) * (priv->num_tx_ring + 1),
+ priv->tx_ring, priv->tx_desc_dma);
+ priv->tx_ring = NULL;
+
+ dma_free_coherent(dev,
+ sizeof(struct rtsn_ext_ts_desc) * (priv->num_rx_ring + 1),
+ priv->rx_ring, priv->rx_desc_dma);
+ priv->rx_ring = NULL;
+
+ kfree(priv->tx_skb);
+ priv->tx_skb = NULL;
+
+ kfree(priv->rx_skb);
+ priv->rx_skb = NULL;
+}
+
+static int rtsn_chain_init(struct rtsn_private *priv, int tx_size, int rx_size)
+{
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *skb;
+ int i;
+
+ priv->num_tx_ring = tx_size;
+ priv->num_rx_ring = rx_size;
+
+ priv->tx_skb = kcalloc(tx_size, sizeof(*priv->tx_skb), GFP_KERNEL);
+ priv->rx_skb = kcalloc(rx_size, sizeof(*priv->rx_skb), GFP_KERNEL);
+
+ if (!priv->rx_skb || !priv->tx_skb)
+ goto error;
+
+ for (i = 0; i < rx_size; i++) {
+ skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RTSN_ALIGN - 1);
+ if (!skb)
+ goto error;
+ skb_reserve(skb, NET_IP_ALIGN);
+ priv->rx_skb[i] = skb;
+ }
+
+ /* Allocate TX, RX descriptors */
+ priv->tx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rtsn_ext_desc) * (tx_size + 1),
+ &priv->tx_desc_dma, GFP_KERNEL);
+ priv->rx_ring = dma_alloc_coherent(ndev->dev.parent,
+ sizeof(struct rtsn_ext_ts_desc) * (rx_size + 1),
+ &priv->rx_desc_dma, GFP_KERNEL);
+
+ if (!priv->tx_ring || !priv->rx_ring)
+ goto error;
+
+ return 0;
+error:
+ rtsn_chain_free(priv);
+
+ return -ENOMEM;
+}
+
+static void rtsn_chain_format(struct rtsn_private *priv)
+{
+ struct net_device *ndev = priv->ndev;
+ struct rtsn_ext_ts_desc *rx_desc;
+ struct rtsn_ext_desc *tx_desc;
+ struct rtsn_desc *bat_desc;
+ dma_addr_t dma_addr;
+ unsigned int i;
+
+ priv->cur_tx = 0;
+ priv->cur_rx = 0;
+ priv->dirty_rx = 0;
+ priv->dirty_tx = 0;
+
+ /* TX */
+ memset(priv->tx_ring, 0, sizeof(*tx_desc) * priv->num_tx_ring);
+ for (i = 0, tx_desc = priv->tx_ring; i < priv->num_tx_ring; i++, tx_desc++)
+ tx_desc->die_dt = DT_EEMPTY | D_DIE;
+
+ tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
+ tx_desc->die_dt = DT_LINK;
+
+ bat_desc = &priv->tx_desc_bat[TX_CHAIN_IDX];
+ bat_desc->die_dt = DT_LINK;
+ bat_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
+
+ /* RX */
+ memset(priv->rx_ring, 0, sizeof(*rx_desc) * priv->num_rx_ring);
+ for (i = 0, rx_desc = priv->rx_ring; i < priv->num_rx_ring; i++, rx_desc++) {
+ dma_addr = dma_map_single(ndev->dev.parent,
+ priv->rx_skb[i]->data, PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ if (!dma_mapping_error(ndev->dev.parent, dma_addr))
+ rx_desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
+ rx_desc->dptr = cpu_to_le32((u32)dma_addr);
+ rx_desc->die_dt = DT_FEMPTY | D_DIE;
+ }
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
+ rx_desc->die_dt = DT_LINK;
+
+ bat_desc = &priv->rx_desc_bat[RX_CHAIN_IDX];
+ bat_desc->die_dt = DT_LINK;
+ bat_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
+}
+
+static int rtsn_dmac_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_chain_init(priv, TX_CHAIN_SIZE, RX_CHAIN_SIZE);
+ if (ret)
+ return ret;
+
+ rtsn_chain_format(priv);
+
+ return 0;
+}
+
+static enum rtsn_mode rtsn_read_mode(struct rtsn_private *priv)
+{
+ return (rtsn_read(priv, OSR) & OSR_OPS) >> 1;
+}
+
+static int rtsn_wait_mode(struct rtsn_private *priv, enum rtsn_mode mode)
+{
+ unsigned int i;
+
+ /* Need to busy loop as mode changes can happen in atomic context. */
+ for (i = 0; i < RTSN_TIMEOUT_US / RTSN_INTERVAL_US; i++) {
+ if (rtsn_read_mode(priv) == mode)
+ return 0;
+
+ udelay(RTSN_INTERVAL_US);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int rtsn_change_mode(struct rtsn_private *priv, enum rtsn_mode mode)
+{
+ int ret;
+
+ rtsn_write(priv, OCR, mode);
+ ret = rtsn_wait_mode(priv, mode);
+ if (ret)
+ netdev_err(priv->ndev, "Failed to switch operation mode\n");
+ return ret;
+}
+
+static int rtsn_get_data_irq_status(struct rtsn_private *priv)
+{
+ u32 val;
+
+ val = rtsn_read(priv, TDIS0) | TDIS_TDS(TX_CHAIN_IDX);
+ val |= rtsn_read(priv, RDIS0) | RDIS_RDS(RX_CHAIN_IDX);
+
+ return val;
+}
+
+static irqreturn_t rtsn_irq(int irq, void *dev_id)
+{
+ struct rtsn_private *priv = dev_id;
+ int ret = IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ if (rtsn_get_data_irq_status(priv)) {
+ /* Clear TX/RX irq status */
+ rtsn_write(priv, TDIS0, TDIS_TDS(TX_CHAIN_IDX));
+ rtsn_write(priv, RDIS0, RDIS_RDS(RX_CHAIN_IDX));
+
+ if (napi_schedule_prep(&priv->napi)) {
+ /* Disable TX/RX interrupts */
+ rtsn_ctrl_data_irq(priv, false);
+
+ __napi_schedule(&priv->napi);
+ }
+
+ ret = IRQ_HANDLED;
+ }
+
+ spin_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int rtsn_request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, struct rtsn_private *priv,
+ const char *ch)
+{
+ char *name;
+ int ret;
+
+ name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s:%s",
+ priv->ndev->name, ch);
+ if (!name)
+ return -ENOMEM;
+
+ ret = request_irq(irq, handler, flags, name, priv);
+ if (ret)
+ netdev_err(priv->ndev, "Cannot request IRQ %s\n", name);
+
+ return ret;
+}
+
+static void rtsn_free_irqs(struct rtsn_private *priv)
+{
+ free_irq(priv->tx_data_irq, priv);
+ free_irq(priv->rx_data_irq, priv);
+}
+
+static int rtsn_request_irqs(struct rtsn_private *priv)
+{
+ int ret;
+
+ priv->rx_data_irq = platform_get_irq_byname(priv->pdev, "rx");
+ if (priv->rx_data_irq < 0)
+ return priv->rx_data_irq;
+
+ priv->tx_data_irq = platform_get_irq_byname(priv->pdev, "tx");
+ if (priv->tx_data_irq < 0)
+ return priv->tx_data_irq;
+
+ ret = rtsn_request_irq(priv->tx_data_irq, rtsn_irq, 0, priv, "tx");
+ if (ret)
+ return ret;
+
+ ret = rtsn_request_irq(priv->rx_data_irq, rtsn_irq, 0, priv, "rx");
+ if (ret) {
+ free_irq(priv->tx_data_irq, priv);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtsn_reset(struct rtsn_private *priv)
+{
+ reset_control_reset(priv->reset);
+ mdelay(1);
+
+ return rtsn_wait_mode(priv, OCR_OPC_DISABLE);
+}
+
+static int rtsn_axibmi_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_reg_wait(priv, RR, RR_RST, RR_RST_COMPLETE);
+ if (ret)
+ return ret;
+
+ /* Set AXIWC */
+ rtsn_write(priv, AXIWC, AXIWC_DEFAULT);
+
+ /* Set AXIRC */
+ rtsn_write(priv, AXIRC, AXIRC_DEFAULT);
+
+ /* TX Descriptor chain setting */
+ rtsn_write(priv, TATLS0, TATLS0_TEDE | TATLS0_TATEN(TX_CHAIN_IDX));
+ rtsn_write(priv, TATLS1, priv->tx_desc_bat_dma + TX_CHAIN_ADDR_OFFSET);
+ rtsn_write(priv, TATLR, TATLR_TATL);
+
+ ret = rtsn_reg_wait(priv, TATLR, TATLR_TATL, 0);
+ if (ret)
+ return ret;
+
+ /* RX Descriptor chain setting */
+ rtsn_write(priv, RATLS0,
+ RATLS0_RETS | RATLS0_REDE | RATLS0_RATEN(RX_CHAIN_IDX));
+ rtsn_write(priv, RATLS1, priv->rx_desc_bat_dma + RX_CHAIN_ADDR_OFFSET);
+ rtsn_write(priv, RATLR, RATLR_RATL);
+
+ ret = rtsn_reg_wait(priv, RATLR, RATLR_RATL, 0);
+ if (ret)
+ return ret;
+
+ /* Enable TX/RX interrupts */
+ rtsn_ctrl_data_irq(priv, true);
+
+ return 0;
+}
+
+static void rtsn_mhd_init(struct rtsn_private *priv)
+{
+ /* TX General setting */
+ rtsn_write(priv, TGC1, TGC1_STTV_DEFAULT | TGC1_TQTM_SFM);
+ rtsn_write(priv, TMS0, TMS_MFS_MAX);
+
+ /* RX Filter IP */
+ rtsn_write(priv, CFCR0, CFCR_SDID(RX_CHAIN_IDX));
+ rtsn_write(priv, FMSCR, FMSCR_FMSIE(RX_CHAIN_IDX));
+}
+
+static int rtsn_get_phy_params(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = of_get_phy_mode(priv->pdev->dev.of_node, &priv->iface);
+ if (ret)
+ return ret;
+
+ switch (priv->iface) {
+ case PHY_INTERFACE_MODE_MII:
+ priv->speed = 100;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ priv->speed = 1000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void rtsn_set_phy_interface(struct rtsn_private *priv)
+{
+ u32 val;
+
+ switch (priv->iface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = MPIC_PIS_MII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = MPIC_PIS_GMII;
+ break;
+ default:
+ return;
+ }
+
+ rtsn_modify(priv, MPIC, MPIC_PIS_MASK, val);
+}
+
+static void rtsn_set_rate(struct rtsn_private *priv)
+{
+ u32 val;
+
+ switch (priv->speed) {
+ case 10:
+ val = MPIC_LSC_10M;
+ break;
+ case 100:
+ val = MPIC_LSC_100M;
+ break;
+ case 1000:
+ val = MPIC_LSC_1G;
+ break;
+ default:
+ return;
+ }
+
+ rtsn_modify(priv, MPIC, MPIC_LSC_MASK, val);
+}
+
+static int rtsn_rmac_init(struct rtsn_private *priv)
+{
+ const u8 *mac_addr = priv->ndev->dev_addr;
+ int ret;
+
+ /* Set MAC address */
+ rtsn_write(priv, MRMAC0, (mac_addr[0] << 8) | mac_addr[1]);
+ rtsn_write(priv, MRMAC1, (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5]);
+
+ /* Set xMII type */
+ rtsn_set_phy_interface(priv);
+ rtsn_set_rate(priv);
+
+ /* Enable MII */
+ rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
+ MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
+
+ /* Link verification */
+ rtsn_modify(priv, MLVC, MLVC_PLV, MLVC_PLV);
+ ret = rtsn_reg_wait(priv, MLVC, MLVC_PLV, 0);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int rtsn_hw_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_reset(priv);
+ if (ret)
+ return ret;
+
+ /* Change to CONFIG mode */
+ ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
+ if (ret)
+ return ret;
+
+ ret = rtsn_axibmi_init(priv);
+ if (ret)
+ return ret;
+
+ rtsn_mhd_init(priv);
+
+ ret = rtsn_rmac_init(priv);
+ if (ret)
+ return ret;
+
+ ret = rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ if (ret)
+ return ret;
+
+ /* Change to OPERATION mode */
+ ret = rtsn_change_mode(priv, OCR_OPC_OPERATION);
+
+ return ret;
+}
+
+static int rtsn_mii_access(struct mii_bus *bus, bool read, int phyad,
+ int regad, u16 data)
+{
+ struct rtsn_private *priv = bus->priv;
+ u32 val;
+ int ret;
+
+ val = MPSM_PDA(phyad) | MPSM_PRA(regad) | MPSM_PSME;
+
+ if (!read)
+ val |= MPSM_PSMAD | MPSM_PRD_SET(data);
+
+ rtsn_write(priv, MPSM, val);
+
+ ret = rtsn_reg_wait(priv, MPSM, MPSM_PSME, 0);
+ if (ret)
+ return ret;
+
+ if (read)
+ ret = MPSM_PRD_GET(rtsn_read(priv, MPSM));
+
+ return ret;
+}
+
+static int rtsn_mii_read(struct mii_bus *bus, int addr, int regnum)
+{
+ return rtsn_mii_access(bus, true, addr, regnum, 0);
+}
+
+static int rtsn_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
+{
+ return rtsn_mii_access(bus, false, addr, regnum, val);
+}
+
+static int rtsn_mdio_alloc(struct rtsn_private *priv)
+{
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *mdio_node;
+ struct mii_bus *mii;
+ int ret;
+
+ mii = mdiobus_alloc();
+ if (!mii)
+ return -ENOMEM;
+
+ mdio_node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!mdio_node) {
+ ret = -ENODEV;
+ goto out_free_bus;
+ }
+
+ /* Enter config mode before registering the MDIO bus */
+ ret = rtsn_reset(priv);
+ if (ret)
+ goto out_free_bus;
+
+ ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
+ if (ret)
+ goto out_free_bus;
+
+ rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
+ MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
+
+ /* Register the MDIO bus */
+ mii->name = "rtsn_mii";
+ snprintf(mii->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+ mii->priv = priv;
+ mii->read = rtsn_mii_read;
+ mii->write = rtsn_mii_write;
+ mii->parent = dev;
+
+ ret = of_mdiobus_register(mii, mdio_node);
+ of_node_put(mdio_node);
+ if (ret)
+ goto out_free_bus;
+
+ priv->mii = mii;
+
+ return 0;
+
+out_free_bus:
+ mdiobus_free(mii);
+ return ret;
+}
+
+static void rtsn_mdio_free(struct rtsn_private *priv)
+{
+ mdiobus_unregister(priv->mii);
+ mdiobus_free(priv->mii);
+ priv->mii = NULL;
+}
+
+static void rtsn_adjust_link(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ bool new_state = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (phydev->link) {
+ if (phydev->speed != priv->speed) {
+ new_state = true;
+ priv->speed = phydev->speed;
+ }
+
+ if (!priv->link) {
+ new_state = true;
+ priv->link = phydev->link;
+ }
+ } else if (priv->link) {
+ new_state = true;
+ priv->link = 0;
+ priv->speed = 0;
+ }
+
+ if (new_state) {
+ /* Need to transition to CONFIG mode before reconfiguring and
+ * then back to the original mode. Any state change to/from
+ * CONFIG or OPERATION must go over DISABLED to stop Rx/Tx.
+ */
+ enum rtsn_mode orgmode = rtsn_read_mode(priv);
+
+ /* Transit to CONFIG */
+ if (orgmode != OCR_OPC_CONFIG) {
+ if (orgmode != OCR_OPC_DISABLE &&
+ rtsn_change_mode(priv, OCR_OPC_DISABLE))
+ goto out;
+ if (rtsn_change_mode(priv, OCR_OPC_CONFIG))
+ goto out;
+ }
+
+ rtsn_set_rate(priv);
+
+ /* Transition to original mode */
+ if (orgmode != OCR_OPC_CONFIG) {
+ if (rtsn_change_mode(priv, OCR_OPC_DISABLE))
+ goto out;
+ if (orgmode != OCR_OPC_DISABLE &&
+ rtsn_change_mode(priv, orgmode))
+ goto out;
+ }
+ }
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ if (new_state)
+ phy_print_status(phydev);
+}
+
+static int rtsn_phy_init(struct rtsn_private *priv)
+{
+ struct device_node *np = priv->ndev->dev.parent->of_node;
+ struct phy_device *phydev;
+ struct device_node *phy;
+
+ priv->link = 0;
+
+ phy = of_parse_phandle(np, "phy-handle", 0);
+ if (!phy)
+ return -ENOENT;
+
+ phydev = of_phy_connect(priv->ndev, phy, rtsn_adjust_link, 0,
+ priv->iface);
+ of_node_put(phy);
+ if (!phydev)
+ return -ENOENT;
+
+ /* Only support full-duplex mode */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+static void rtsn_phy_deinit(struct rtsn_private *priv)
+{
+ phy_disconnect(priv->ndev->phydev);
+ priv->ndev->phydev = NULL;
+}
+
+static int rtsn_init(struct rtsn_private *priv)
+{
+ int ret;
+
+ ret = rtsn_desc_alloc(priv);
+ if (ret)
+ return ret;
+
+ ret = rtsn_dmac_init(priv);
+ if (ret)
+ goto error_free_desc;
+
+ ret = rtsn_hw_init(priv);
+ if (ret)
+ goto error_free_chain;
+
+ ret = rtsn_phy_init(priv);
+ if (ret)
+ goto error_free_chain;
+
+ ret = rtsn_request_irqs(priv);
+ if (ret)
+ goto error_free_phy;
+
+ return 0;
+error_free_phy:
+ rtsn_phy_deinit(priv);
+error_free_chain:
+ rtsn_chain_free(priv);
+error_free_desc:
+ rtsn_desc_free(priv);
+ return ret;
+}
+
+static void rtsn_deinit(struct rtsn_private *priv)
+{
+ rtsn_free_irqs(priv);
+ rtsn_phy_deinit(priv);
+ rtsn_chain_free(priv);
+ rtsn_desc_free(priv);
+}
+
+static void rtsn_parse_mac_address(struct device_node *np,
+ struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ u8 addr[ETH_ALEN];
+ u32 mrmac0;
+ u32 mrmac1;
+
+ /* Try to read address from Device Tree. */
+ if (!of_get_mac_address(np, addr)) {
+ eth_hw_addr_set(ndev, addr);
+ return;
+ }
+
+ /* Try to read address from device. */
+ mrmac0 = rtsn_read(priv, MRMAC0);
+ mrmac1 = rtsn_read(priv, MRMAC1);
+
+ addr[0] = (mrmac0 >> 8) & 0xff;
+ addr[1] = (mrmac0 >> 0) & 0xff;
+ addr[2] = (mrmac1 >> 24) & 0xff;
+ addr[3] = (mrmac1 >> 16) & 0xff;
+ addr[4] = (mrmac1 >> 8) & 0xff;
+ addr[5] = (mrmac1 >> 0) & 0xff;
+
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(ndev, addr);
+ return;
+ }
+
+ /* Fallback to a random address */
+ eth_hw_addr_random(ndev);
+}
+
+static int rtsn_open(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ int ret;
+
+ napi_enable(&priv->napi);
+
+ ret = rtsn_init(priv);
+ if (ret) {
+ napi_disable(&priv->napi);
+ return ret;
+ }
+
+ phy_start(ndev->phydev);
+
+ netif_start_queue(ndev);
+
+ return 0;
+}
+
+static int rtsn_stop(struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+
+ phy_stop(priv->ndev->phydev);
+ napi_disable(&priv->napi);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ rtsn_deinit(priv);
+
+ return 0;
+}
+
+static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ struct rtsn_ext_desc *desc;
+ int ret = NETDEV_TX_OK;
+ unsigned long flags;
+ dma_addr_t dma_addr;
+ int entry;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Drop packet if it won't fit in a single descriptor. */
+ if (skb->len >= TX_DS) {
+ priv->stats.tx_dropped++;
+ priv->stats.tx_errors++;
+ goto out;
+ }
+
+ if (priv->cur_tx - priv->dirty_tx > priv->num_tx_ring) {
+ netif_stop_subqueue(ndev, 0);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ if (skb_put_padto(skb, ETH_ZLEN))
+ goto out;
+
+ dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ goto out;
+ }
+
+ entry = priv->cur_tx % priv->num_tx_ring;
+ priv->tx_skb[entry] = skb;
+ desc = &priv->tx_ring[entry];
+ desc->dptr = cpu_to_le32(dma_addr);
+ desc->info_ds = cpu_to_le16(skb->len);
+ desc->info1 = cpu_to_le64(skb->len);
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->ts_tag++;
+ desc->info_ds |= cpu_to_le16(TXC);
+ desc->info = priv->ts_tag;
+ }
+
+ skb_tx_timestamp(skb);
+ dma_wmb();
+
+ desc->die_dt = DT_FSINGLE | D_DIE;
+ priv->cur_tx++;
+
+ /* Start xmit */
+ rtsn_write(priv, TRCR0, BIT(TX_CHAIN_IDX));
+out:
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return ret;
+}
+
+static void rtsn_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+ *storage = priv->stats;
+}
+
+static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ return phy_do_ioctl_running(ndev, ifr, cmd);
+}
+
+static int rtsn_hwtstamp_get(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config)
+{
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct rtsn_private *priv;
+
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ priv = netdev_priv(ndev);
+ ptp_priv = priv->ptp_priv;
+
+ config->flags = 0;
+
+ config->tx_type =
+ ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+
+ switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
+ case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
+ break;
+ case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ }
+
+ return 0;
+}
+
+static int rtsn_hwtstamp_set(struct net_device *ndev,
+ struct kernel_hwtstamp_config *config,
+ struct netlink_ext_ack *extack)
+{
+ struct rcar_gen4_ptp_private *ptp_priv;
+ struct rtsn_private *priv;
+ u32 tstamp_rx_ctrl;
+ u32 tstamp_tx_ctrl;
+
+ if (!netif_running(ndev))
+ return -ENODEV;
+
+ priv = netdev_priv(ndev);
+ ptp_priv = priv->ptp_priv;
+
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tstamp_tx_ctrl = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tstamp_rx_ctrl = 0;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
+ RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
+ break;
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
+ RCAR_GEN4_RXTSTAMP_TYPE_ALL;
+ break;
+ }
+
+ ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
+ ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
+
+ return 0;
+}
+
+static const struct net_device_ops rtsn_netdev_ops = {
+ .ndo_open = rtsn_open,
+ .ndo_stop = rtsn_stop,
+ .ndo_start_xmit = rtsn_start_xmit,
+ .ndo_get_stats64 = rtsn_get_stats64,
+ .ndo_eth_ioctl = rtsn_do_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_hwtstamp_set = rtsn_hwtstamp_set,
+ .ndo_hwtstamp_get = rtsn_hwtstamp_get,
+};
+
+static int rtsn_get_ts_info(struct net_device *ndev,
+ struct kernel_ethtool_ts_info *info)
+{
+ struct rtsn_private *priv = netdev_priv(ndev);
+
+ info->phc_index = ptp_clock_index(priv->ptp_priv->clock);
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+
+ return 0;
+}
+
+static const struct ethtool_ops rtsn_ethtool_ops = {
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = rtsn_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct of_device_id rtsn_match_table[] = {
+ { .compatible = "renesas,r8a779g0-ethertsn", },
+ { /* Sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rtsn_match_table);
+
+static int rtsn_probe(struct platform_device *pdev)
+{
+ struct rtsn_private *priv;
+ struct net_device *ndev;
+ struct resource *res;
+ int ret;
+
+ ndev = alloc_etherdev_mqs(sizeof(struct rtsn_private), TX_NUM_CHAINS,
+ RX_NUM_CHAINS);
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
+
+ spin_lock_init(&priv->lock);
+ platform_set_drvdata(pdev, priv);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ ret = PTR_ERR(priv->clk);
+ goto error_free;
+ }
+
+ priv->reset = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->reset)) {
+ ret = PTR_ERR(priv->reset);
+ goto error_free;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsnes");
+ if (!res) {
+ dev_err(&pdev->dev, "Can't find tsnes resource\n");
+ ret = -EINVAL;
+ goto error_free;
+ }
+
+ priv->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto error_free;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ndev->features = NETIF_F_RXCSUM;
+ ndev->hw_features = NETIF_F_RXCSUM;
+ ndev->base_addr = res->start;
+ ndev->netdev_ops = &rtsn_netdev_ops;
+ ndev->ethtool_ops = &rtsn_ethtool_ops;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gptp");
+ if (!res) {
+ dev_err(&pdev->dev, "Can't find gptp resource\n");
+ ret = -EINVAL;
+ goto error_free;
+ }
+
+ priv->ptp_priv->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->ptp_priv->addr)) {
+ ret = PTR_ERR(priv->ptp_priv->addr);
+ goto error_free;
+ }
+
+ ret = rtsn_get_phy_params(priv);
+ if (ret)
+ goto error_free;
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ netif_napi_add(ndev, &priv->napi, rtsn_poll);
+
+ rtsn_parse_mac_address(pdev->dev.of_node, ndev);
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+
+ ret = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
+ clk_get_rate(priv->clk));
+ if (ret)
+ goto error_pm;
+
+ ret = rtsn_mdio_alloc(priv);
+ if (ret)
+ goto error_ptp;
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto error_mdio;
+
+ netdev_info(ndev, "MAC address %pM\n", ndev->dev_addr);
+
+ return 0;
+
+error_mdio:
+ rtsn_mdio_free(priv);
+error_ptp:
+ rcar_gen4_ptp_unregister(priv->ptp_priv);
+error_pm:
+ netif_napi_del(&priv->napi);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+error_free:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static void rtsn_remove(struct platform_device *pdev)
+{
+ struct rtsn_private *priv = platform_get_drvdata(pdev);
+
+ unregister_netdev(priv->ndev);
+ rtsn_mdio_free(priv);
+ rcar_gen4_ptp_unregister(priv->ptp_priv);
+ rtsn_change_mode(priv, OCR_OPC_DISABLE);
+ netif_napi_del(&priv->napi);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ free_netdev(priv->ndev);
+}
+
+static struct platform_driver rtsn_driver = {
+ .probe = rtsn_probe,
+ .remove = rtsn_remove,
+ .driver = {
+ .name = "rtsn",
+ .of_match_table = rtsn_match_table,
+ }
+};
+module_platform_driver(rtsn_driver);
+
+MODULE_AUTHOR("Phong Hoang, Niklas Söderlund");
+MODULE_DESCRIPTION("Renesas Ethernet-TSN device driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/renesas/rtsn.h b/drivers/net/ethernet/renesas/rtsn.h
new file mode 100644
index 000000000000..3183e80d7e6b
--- /dev/null
+++ b/drivers/net/ethernet/renesas/rtsn.h
@@ -0,0 +1,464 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/* Renesas Ethernet-TSN device driver
+ *
+ * Copyright (C) 2022 Renesas Electronics Corporation
+ * Copyright (C) 2023 Niklas Söderlund <niklas.soderlund@ragnatech.se>
+ */
+
+#ifndef __RTSN_H__
+#define __RTSN_H__
+
+#include <linux/types.h>
+
+#define AXIBMI 0x0000
+#define TSNMHD 0x1000
+#define RMSO 0x2000
+#define RMRO 0x3800
+
+enum rtsn_reg {
+ AXIWC = AXIBMI + 0x0000,
+ AXIRC = AXIBMI + 0x0004,
+ TDPC0 = AXIBMI + 0x0010,
+ TFT = AXIBMI + 0x0090,
+ TATLS0 = AXIBMI + 0x00a0,
+ TATLS1 = AXIBMI + 0x00a4,
+ TATLR = AXIBMI + 0x00a8,
+ RATLS0 = AXIBMI + 0x00b0,
+ RATLS1 = AXIBMI + 0x00b4,
+ RATLR = AXIBMI + 0x00b8,
+ TSA0 = AXIBMI + 0x00c0,
+ TSS0 = AXIBMI + 0x00c4,
+ TRCR0 = AXIBMI + 0x0140,
+ RIDAUAS0 = AXIBMI + 0x0180,
+ RR = AXIBMI + 0x0200,
+ TATS = AXIBMI + 0x0210,
+ TATSR0 = AXIBMI + 0x0214,
+ TATSR1 = AXIBMI + 0x0218,
+ TATSR2 = AXIBMI + 0x021c,
+ RATS = AXIBMI + 0x0220,
+ RATSR0 = AXIBMI + 0x0224,
+ RATSR1 = AXIBMI + 0x0228,
+ RATSR2 = AXIBMI + 0x022c,
+ RIDASM0 = AXIBMI + 0x0240,
+ RIDASAM0 = AXIBMI + 0x0244,
+ RIDACAM0 = AXIBMI + 0x0248,
+ EIS0 = AXIBMI + 0x0300,
+ EIE0 = AXIBMI + 0x0304,
+ EID0 = AXIBMI + 0x0308,
+ EIS1 = AXIBMI + 0x0310,
+ EIE1 = AXIBMI + 0x0314,
+ EID1 = AXIBMI + 0x0318,
+ TCEIS0 = AXIBMI + 0x0340,
+ TCEIE0 = AXIBMI + 0x0344,
+ TCEID0 = AXIBMI + 0x0348,
+ RFSEIS0 = AXIBMI + 0x04c0,
+ RFSEIE0 = AXIBMI + 0x04c4,
+ RFSEID0 = AXIBMI + 0x04c8,
+ RFEIS0 = AXIBMI + 0x0540,
+ RFEIE0 = AXIBMI + 0x0544,
+ RFEID0 = AXIBMI + 0x0548,
+ RCEIS0 = AXIBMI + 0x05c0,
+ RCEIE0 = AXIBMI + 0x05c4,
+ RCEID0 = AXIBMI + 0x05c8,
+ RIDAOIS = AXIBMI + 0x0640,
+ RIDAOIE = AXIBMI + 0x0644,
+ RIDAOID = AXIBMI + 0x0648,
+ TSFEIS = AXIBMI + 0x06c0,
+ TSFEIE = AXIBMI + 0x06c4,
+ TSFEID = AXIBMI + 0x06c8,
+ TSCEIS = AXIBMI + 0x06d0,
+ TSCEIE = AXIBMI + 0x06d4,
+ TSCEID = AXIBMI + 0x06d8,
+ DIS = AXIBMI + 0x0b00,
+ DIE = AXIBMI + 0x0b04,
+ DID = AXIBMI + 0x0b08,
+ TDIS0 = AXIBMI + 0x0b10,
+ TDIE0 = AXIBMI + 0x0b14,
+ TDID0 = AXIBMI + 0x0b18,
+ RDIS0 = AXIBMI + 0x0b90,
+ RDIE0 = AXIBMI + 0x0b94,
+ RDID0 = AXIBMI + 0x0b98,
+ TSDIS = AXIBMI + 0x0c10,
+ TSDIE = AXIBMI + 0x0c14,
+ TSDID = AXIBMI + 0x0c18,
+ GPOUT = AXIBMI + 0x6000,
+
+ OCR = TSNMHD + 0x0000,
+ OSR = TSNMHD + 0x0004,
+ SWR = TSNMHD + 0x0008,
+ SIS = TSNMHD + 0x000c,
+ GIS = TSNMHD + 0x0010,
+ GIE = TSNMHD + 0x0014,
+ GID = TSNMHD + 0x0018,
+ TIS1 = TSNMHD + 0x0020,
+ TIE1 = TSNMHD + 0x0024,
+ TID1 = TSNMHD + 0x0028,
+ TIS2 = TSNMHD + 0x0030,
+ TIE2 = TSNMHD + 0x0034,
+ TID2 = TSNMHD + 0x0038,
+ RIS = TSNMHD + 0x0040,
+ RIE = TSNMHD + 0x0044,
+ RID = TSNMHD + 0x0048,
+ TGC1 = TSNMHD + 0x0050,
+ TGC2 = TSNMHD + 0x0054,
+ TFS0 = TSNMHD + 0x0060,
+ TCF0 = TSNMHD + 0x0070,
+ TCR1 = TSNMHD + 0x0080,
+ TCR2 = TSNMHD + 0x0084,
+ TCR3 = TSNMHD + 0x0088,
+ TCR4 = TSNMHD + 0x008c,
+ TMS0 = TSNMHD + 0x0090,
+ TSR1 = TSNMHD + 0x00b0,
+ TSR2 = TSNMHD + 0x00b4,
+ TSR3 = TSNMHD + 0x00b8,
+ TSR4 = TSNMHD + 0x00bc,
+ TSR5 = TSNMHD + 0x00c0,
+ RGC = TSNMHD + 0x00d0,
+ RDFCR = TSNMHD + 0x00d4,
+ RCFCR = TSNMHD + 0x00d8,
+ REFCNCR = TSNMHD + 0x00dc,
+ RSR1 = TSNMHD + 0x00e0,
+ RSR2 = TSNMHD + 0x00e4,
+ RSR3 = TSNMHD + 0x00e8,
+ TCIS = TSNMHD + 0x01e0,
+ TCIE = TSNMHD + 0x01e4,
+ TCID = TSNMHD + 0x01e8,
+ TPTPC = TSNMHD + 0x01f0,
+ TTML = TSNMHD + 0x01f4,
+ TTJ = TSNMHD + 0x01f8,
+ TCC = TSNMHD + 0x0200,
+ TCS = TSNMHD + 0x0204,
+ TGS = TSNMHD + 0x020c,
+ TACST0 = TSNMHD + 0x0210,
+ TACST1 = TSNMHD + 0x0214,
+ TACST2 = TSNMHD + 0x0218,
+ TALIT0 = TSNMHD + 0x0220,
+ TALIT1 = TSNMHD + 0x0224,
+ TALIT2 = TSNMHD + 0x0228,
+ TAEN0 = TSNMHD + 0x0230,
+ TAEN1 = TSNMHD + 0x0234,
+ TASFE = TSNMHD + 0x0240,
+ TACLL0 = TSNMHD + 0x0250,
+ TACLL1 = TSNMHD + 0x0254,
+ TACLL2 = TSNMHD + 0x0258,
+ CACC = TSNMHD + 0x0260,
+ CCS = TSNMHD + 0x0264,
+ CAIV0 = TSNMHD + 0x0270,
+ CAUL0 = TSNMHD + 0x0290,
+ TOCST0 = TSNMHD + 0x0300,
+ TOCST1 = TSNMHD + 0x0304,
+ TOCST2 = TSNMHD + 0x0308,
+ TOLIT0 = TSNMHD + 0x0310,
+ TOLIT1 = TSNMHD + 0x0314,
+ TOLIT2 = TSNMHD + 0x0318,
+ TOEN0 = TSNMHD + 0x0320,
+ TOEN1 = TSNMHD + 0x0324,
+ TOSFE = TSNMHD + 0x0330,
+ TCLR0 = TSNMHD + 0x0340,
+ TCLR1 = TSNMHD + 0x0344,
+ TCLR2 = TSNMHD + 0x0348,
+ TSMS = TSNMHD + 0x0350,
+ COCC = TSNMHD + 0x0360,
+ COIV0 = TSNMHD + 0x03b0,
+ COUL0 = TSNMHD + 0x03d0,
+ QSTMACU0 = TSNMHD + 0x0400,
+ QSTMACD0 = TSNMHD + 0x0404,
+ QSTMAMU0 = TSNMHD + 0x0408,
+ QSTMAMD0 = TSNMHD + 0x040c,
+ QSFTVL0 = TSNMHD + 0x0410,
+ QSFTVLM0 = TSNMHD + 0x0414,
+ QSFTMSD0 = TSNMHD + 0x0418,
+ QSFTGMI0 = TSNMHD + 0x041c,
+ QSFTLS = TSNMHD + 0x0600,
+ QSFTLIS = TSNMHD + 0x0604,
+ QSFTLIE = TSNMHD + 0x0608,
+ QSFTLID = TSNMHD + 0x060c,
+ QSMSMC = TSNMHD + 0x0610,
+ QSGTMC = TSNMHD + 0x0614,
+ QSEIS = TSNMHD + 0x0618,
+ QSEIE = TSNMHD + 0x061c,
+ QSEID = TSNMHD + 0x0620,
+ QGACST0 = TSNMHD + 0x0630,
+ QGACST1 = TSNMHD + 0x0634,
+ QGACST2 = TSNMHD + 0x0638,
+ QGALIT1 = TSNMHD + 0x0640,
+ QGALIT2 = TSNMHD + 0x0644,
+ QGAEN0 = TSNMHD + 0x0648,
+ QGAEN1 = TSNMHD + 0x074c,
+ QGIGS = TSNMHD + 0x0650,
+ QGGC = TSNMHD + 0x0654,
+ QGATL0 = TSNMHD + 0x0664,
+ QGATL1 = TSNMHD + 0x0668,
+ QGATL2 = TSNMHD + 0x066c,
+ QGOCST0 = TSNMHD + 0x0670,
+ QGOCST1 = TSNMHD + 0x0674,
+ QGOCST2 = TSNMHD + 0x0678,
+ QGOLIT0 = TSNMHD + 0x067c,
+ QGOLIT1 = TSNMHD + 0x0680,
+ QGOLIT2 = TSNMHD + 0x0684,
+ QGOEN0 = TSNMHD + 0x0688,
+ QGOEN1 = TSNMHD + 0x068c,
+ QGTRO = TSNMHD + 0x0690,
+ QGTR1 = TSNMHD + 0x0694,
+ QGTR2 = TSNMHD + 0x0698,
+ QGFSMS = TSNMHD + 0x069c,
+ QTMIS = TSNMHD + 0x06e0,
+ QTMIE = TSNMHD + 0x06e4,
+ QTMID = TSNMHD + 0x06e8,
+ QMEC = TSNMHD + 0x0700,
+ QMMC = TSNMHD + 0x0704,
+ QRFDC = TSNMHD + 0x0708,
+ QYFDC = TSNMHD + 0x070c,
+ QVTCMC0 = TSNMHD + 0x0710,
+ QMCBSC0 = TSNMHD + 0x0750,
+ QMCIRC0 = TSNMHD + 0x0790,
+ QMEBSC0 = TSNMHD + 0x07d0,
+ QMEIRC0 = TSNMHD + 0x0710,
+ QMCFC = TSNMHD + 0x0850,
+ QMEIS = TSNMHD + 0x0860,
+ QMEIE = TSNMHD + 0x0864,
+ QMEID = TSNMHD + 0x086c,
+ QSMFC0 = TSNMHD + 0x0870,
+ QMSPPC0 = TSNMHD + 0x08b0,
+ QMSRPC0 = TSNMHD + 0x08f0,
+ QGPPC0 = TSNMHD + 0x0930,
+ QGRPC0 = TSNMHD + 0x0950,
+ QMDPC0 = TSNMHD + 0x0970,
+ QMGPC0 = TSNMHD + 0x09b0,
+ QMYPC0 = TSNMHD + 0x09f0,
+ QMRPC0 = TSNMHD + 0x0a30,
+ MQSTMACU = TSNMHD + 0x0a70,
+ MQSTMACD = TSNMHD + 0x0a74,
+ MQSTMAMU = TSNMHD + 0x0a78,
+ MQSTMAMD = TSNMHD + 0x0a7c,
+ MQSFTVL = TSNMHD + 0x0a80,
+ MQSFTVLM = TSNMHD + 0x0a84,
+ MQSFTMSD = TSNMHD + 0x0a88,
+ MQSFTGMI = TSNMHD + 0x0a8c,
+
+ CFCR0 = RMSO + 0x0800,
+ FMSCR = RMSO + 0x0c10,
+
+ MMC = RMRO + 0x0000,
+ MPSM = RMRO + 0x0010,
+ MPIC = RMRO + 0x0014,
+ MTFFC = RMRO + 0x0020,
+ MTPFC = RMRO + 0x0024,
+ MTATC0 = RMRO + 0x0040,
+ MRGC = RMRO + 0x0080,
+ MRMAC0 = RMRO + 0x0084,
+ MRMAC1 = RMRO + 0x0088,
+ MRAFC = RMRO + 0x008c,
+ MRSCE = RMRO + 0x0090,
+ MRSCP = RMRO + 0x0094,
+ MRSCC = RMRO + 0x0098,
+ MRFSCE = RMRO + 0x009c,
+ MRFSCP = RMRO + 0x00a0,
+ MTRC = RMRO + 0x00a4,
+ MPFC = RMRO + 0x0100,
+ MLVC = RMRO + 0x0340,
+ MEEEC = RMRO + 0x0350,
+ MLBC = RMRO + 0x0360,
+ MGMR = RMRO + 0x0400,
+ MMPFTCT = RMRO + 0x0410,
+ MAPFTCT = RMRO + 0x0414,
+ MPFRCT = RMRO + 0x0418,
+ MFCICT = RMRO + 0x041c,
+ MEEECT = RMRO + 0x0420,
+ MEIS = RMRO + 0x0500,
+ MEIE = RMRO + 0x0504,
+ MEID = RMRO + 0x0508,
+ MMIS0 = RMRO + 0x0510,
+ MMIE0 = RMRO + 0x0514,
+ MMID0 = RMRO + 0x0518,
+ MMIS1 = RMRO + 0x0520,
+ MMIE1 = RMRO + 0x0524,
+ MMID1 = RMRO + 0x0528,
+ MMIS2 = RMRO + 0x0530,
+ MMIE2 = RMRO + 0x0534,
+ MMID2 = RMRO + 0x0538,
+ MXMS = RMRO + 0x0600,
+
+};
+
+/* AXIBMI */
+#define RR_RATRR BIT(0)
+#define RR_TATRR BIT(1)
+#define RR_RST (RR_RATRR | RR_TATRR)
+#define RR_RST_COMPLETE 0x03
+
+#define AXIWC_DEFAULT 0xffff
+#define AXIRC_DEFAULT 0xffff
+
+#define TATLS0_TEDE BIT(1)
+#define TATLS0_TATEN_SHIFT 24
+#define TATLS0_TATEN(n) ((n) << TATLS0_TATEN_SHIFT)
+#define TATLR_TATL BIT(31)
+
+#define RATLS0_RETS BIT(2)
+#define RATLS0_REDE BIT(3)
+#define RATLS0_RATEN_SHIFT 24
+#define RATLS0_RATEN(n) ((n) << RATLS0_RATEN_SHIFT)
+#define RATLR_RATL BIT(31)
+
+#define DIE_DID_TDICX(n) BIT((n))
+#define DIE_DID_RDICX(n) BIT((n) + 8)
+#define TDIE_TDID_TDX(n) BIT(n)
+#define RDIE_RDID_RDX(n) BIT(n)
+#define TDIS_TDS(n) BIT(n)
+#define RDIS_RDS(n) BIT(n)
+
+/* MHD */
+#define OSR_OPS 0x07
+#define SWR_SWR BIT(0)
+
+#define TGC1_TQTM_SFM 0xff00
+#define TGC1_STTV_DEFAULT 0x03
+
+#define TMS_MFS_MAX 0x2800
+
+/* RMAC System */
+#define CFCR_SDID(n) ((n) << 16)
+#define FMSCR_FMSIE(n) ((n) << 0)
+
+/* RMAC */
+#define MPIC_PIS_MASK GENMASK(1, 0)
+#define MPIC_PIS_MII 0
+#define MPIC_PIS_RMII 0x01
+#define MPIC_PIS_GMII 0x02
+#define MPIC_PIS_RGMII 0x03
+#define MPIC_LSC_SHIFT 2
+#define MPIC_LSC_MASK GENMASK(3, MPIC_LSC_SHIFT)
+#define MPIC_LSC_10M (0 << MPIC_LSC_SHIFT)
+#define MPIC_LSC_100M (0x01 << MPIC_LSC_SHIFT)
+#define MPIC_LSC_1G (0x02 << MPIC_LSC_SHIFT)
+#define MPIC_PSMCS_SHIFT 16
+#define MPIC_PSMCS_MASK GENMASK(21, MPIC_PSMCS_SHIFT)
+#define MPIC_PSMCS_DEFAULT (0x0a << MPIC_PSMCS_SHIFT)
+#define MPIC_PSMHT_SHIFT 24
+#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT)
+#define MPIC_PSMHT_DEFAULT (0x07 << MPIC_PSMHT_SHIFT)
+
+#define MLVC_PASE BIT(8)
+#define MLVC_PSE BIT(16)
+#define MLVC_PLV BIT(17)
+
+#define MPSM_PSME BIT(0)
+#define MPSM_PSMAD BIT(1)
+#define MPSM_PDA_SHIFT 3
+#define MPSM_PDA_MASK GENMASK(7, 3)
+#define MPSM_PDA(n) (((n) << MPSM_PDA_SHIFT) & MPSM_PDA_MASK)
+#define MPSM_PRA_SHIFT 8
+#define MPSM_PRA_MASK GENMASK(12, 8)
+#define MPSM_PRA(n) (((n) << MPSM_PRA_SHIFT) & MPSM_PRA_MASK)
+#define MPSM_PRD_SHIFT 16
+#define MPSM_PRD_SET(n) ((n) << MPSM_PRD_SHIFT)
+#define MPSM_PRD_GET(n) ((n) >> MPSM_PRD_SHIFT)
+
+#define GPOUT_RDM BIT(13)
+#define GPOUT_TDM BIT(14)
+
+/* RTSN */
+#define RTSN_INTERVAL_US 1000
+#define RTSN_TIMEOUT_US 1000000
+
+#define TX_NUM_CHAINS 1
+#define RX_NUM_CHAINS 1
+
+#define TX_CHAIN_SIZE 1024
+#define RX_CHAIN_SIZE 1024
+
+#define TX_CHAIN_IDX 0
+#define RX_CHAIN_IDX 0
+
+#define TX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * TX_CHAIN_IDX)
+#define RX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * RX_CHAIN_IDX)
+
+#define PKT_BUF_SZ 1584
+#define RTSN_ALIGN 128
+
+enum rtsn_mode {
+ OCR_OPC_DISABLE,
+ OCR_OPC_CONFIG,
+ OCR_OPC_OPERATION,
+};
+
+/* Descriptors */
+enum RX_DS_CC_BIT {
+ RX_DS = 0x0fff, /* Data size */
+ RX_TR = 0x1000, /* Truncation indication */
+ RX_EI = 0x2000, /* Error indication */
+ RX_PS = 0xc000, /* Padding selection */
+};
+
+enum TX_FS_TAGL_BIT {
+ TX_DS = 0x0fff, /* Data size */
+ TX_TAGL = 0xf000, /* Frame tag LSBs */
+};
+
+enum DIE_DT {
+ /* HW/SW arbitration */
+ DT_FEMPTY_IS = 0x10,
+ DT_FEMPTY_IC = 0x20,
+ DT_FEMPTY_ND = 0x30,
+ DT_FEMPTY = 0x40,
+ DT_FEMPTY_START = 0x50,
+ DT_FEMPTY_MID = 0x60,
+ DT_FEMPTY_END = 0x70,
+
+ /* Frame data */
+ DT_FSINGLE = 0x80,
+ DT_FSTART = 0x90,
+ DT_FMID = 0xa0,
+ DT_FEND = 0xb0,
+
+ /* Chain control */
+ DT_LEMPTY = 0xc0,
+ DT_EEMPTY = 0xd0,
+ DT_LINK = 0xe0,
+ DT_EOS = 0xf0,
+
+ DT_MASK = 0xf0,
+ D_DIE = 0x08,
+};
+
+struct rtsn_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+} __packed;
+
+struct rtsn_ts_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le32 ts_nsec;
+ __le32 ts_sec;
+} __packed;
+
+struct rtsn_ext_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le64 info1;
+} __packed;
+
+struct rtsn_ext_ts_desc {
+ __le16 info_ds;
+ __u8 info;
+ u8 die_dt;
+ __le32 dptr;
+ __le64 info1;
+ __le32 ts_nsec;
+ __le32 ts_sec;
+} __packed;
+
+enum EXT_INFO_DS_BIT {
+ TXC = 0x4000,
+};
+
+#endif
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 0786eb0da391..7a25903e35c3 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2624,7 +2624,7 @@ static int sh_eth_change_mtu(struct net_device *ndev, int new_mtu)
if (netif_running(ndev))
return -EBUSY;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
netdev_update_features(ndev);
return 0;
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 755db89db909..e097ce3e69ea 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1967,7 +1967,7 @@ static int rocker_port_change_mtu(struct net_device *dev, int new_mtu)
rocker_port_stop(dev);
netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu);
if (err)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index ecbe3994f2b1..12c8396b6942 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1804,7 +1804,7 @@ static int sxgbe_set_features(struct net_device *dev,
*/
static int sxgbe_change_mtu(struct net_device *dev, int new_mtu)
{
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!netif_running(dev))
return 0;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 8fa6c0e9195b..7d69302ffa0a 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1396,7 +1396,7 @@ static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx)
efx_mcdi_filter_table_reset_mc_allocations(efx);
nic_data->must_restore_piobufs = true;
efx_ef10_forget_old_piobufs(efx);
- efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
nic_data->must_probe_vswitching = true;
diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c
index cf55202b3a7b..896ffca4aee2 100644
--- a/drivers/net/ethernet/sfc/ef100_ethtool.c
+++ b/drivers/net/ethernet/sfc/ef100_ethtool.c
@@ -59,8 +59,12 @@ const struct ethtool_ops ef100_ethtool_ops = {
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
+ .create_rxfh_context = efx_ethtool_create_rxfh_context,
+ .modify_rxfh_context = efx_ethtool_modify_rxfh_context,
+ .remove_rxfh_context = efx_ethtool_remove_rxfh_context,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index e9d9de8e648a..6f1a01ded7d4 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -299,7 +299,7 @@ static int efx_probe_nic(struct efx_nic *efx)
if (efx->n_channels > 1)
netdev_rss_key_fill(efx->rss_context.rx_hash_key,
sizeof(efx->rss_context.rx_hash_key));
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table);
/* Initialise the interrupt moderation settings */
efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 48d3623735ba..7a6cab883d66 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -158,7 +158,7 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
}
/* RSS contexts */
-static inline bool efx_rss_active(struct efx_rss_context *ctx)
+static inline bool efx_rss_active(struct efx_rss_context_priv *ctx)
{
return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
}
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 551f890db90a..13cf647051af 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -302,7 +302,7 @@ int efx_change_mtu(struct net_device *net_dev, int new_mtu)
efx_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
efx_mac_reconfigure(efx, true);
mutex_unlock(&efx->mac_lock);
@@ -714,7 +714,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&efx->net_dev->ethtool->rss_lock);
efx->type->fini(efx);
}
@@ -777,7 +777,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (efx->type->rx_restore_rss_contexts)
efx->type->rx_restore_rss_contexts(efx);
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
efx->type->filter_table_restore(efx);
up_write(&efx->filter_sem);
@@ -793,7 +793,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
fail:
efx->port_initialized = false;
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
@@ -1000,9 +1000,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev)
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
efx->rx_packet_ts_offset =
efx->type->rx_ts_offset - efx->type->rx_prefix_size;
- INIT_LIST_HEAD(&efx->rss_context.list);
- efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- mutex_init(&efx->rss_lock);
+ efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
efx->vport_id = EVB_PORT_ID_ASSIGNED;
spin_lock_init(&efx->stats_lock);
efx->vi_stride = EFX_DEFAULT_VI_STRIDE;
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 37c69c8d90b1..7c887160e2ef 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -226,7 +226,7 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
@@ -268,8 +268,12 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
+ .rxfh_priv_size = sizeof(struct efx_rss_context_priv),
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
+ .create_rxfh_context = efx_ethtool_create_rxfh_context,
+ .modify_rxfh_context = efx_ethtool_modify_rxfh_context,
+ .remove_rxfh_context = efx_ethtool_remove_rxfh_context,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index 7d5e5db4eac5..6ded44b86052 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -820,10 +820,10 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev,
return 0;
case ETHTOOL_GRXFH: {
- struct efx_rss_context *ctx = &efx->rss_context;
+ struct efx_rss_context_priv *ctx = &efx->rss_context.priv;
__u64 data;
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&net_dev->ethtool->rss_lock);
if (info->flow_type & FLOW_RSS && info->rss_context) {
ctx = efx_find_rss_context_entry(efx, info->rss_context);
if (!ctx) {
@@ -864,7 +864,7 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev,
out_setdata_unlock:
info->data = data;
out_unlock:
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&net_dev->ethtool->rss_lock);
return rc;
}
@@ -1163,46 +1163,14 @@ u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
return efx->type->rx_hash_key_size;
}
-static int efx_ethtool_get_rxfh_context(struct net_device *net_dev,
- struct ethtool_rxfh_param *rxfh)
-{
- struct efx_nic *efx = efx_netdev_priv(net_dev);
- struct efx_rss_context *ctx;
- int rc = 0;
-
- if (!efx->type->rx_pull_rss_context_config)
- return -EOPNOTSUPP;
-
- mutex_lock(&efx->rss_lock);
- ctx = efx_find_rss_context_entry(efx, rxfh->rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
- rc = efx->type->rx_pull_rss_context_config(efx, ctx);
- if (rc)
- goto out_unlock;
-
- rxfh->hfunc = ETH_RSS_HASH_TOP;
- if (rxfh->indir)
- memcpy(rxfh->indir, ctx->rx_indir_table,
- sizeof(ctx->rx_indir_table));
- if (rxfh->key)
- memcpy(rxfh->key, ctx->rx_hash_key,
- efx->type->rx_hash_key_size);
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
-}
-
int efx_ethtool_get_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
int rc;
- if (rxfh->rss_context)
- return efx_ethtool_get_rxfh_context(net_dev, rxfh);
+ if (rxfh->rss_context) /* core should never call us for these */
+ return -EINVAL;
rc = efx->type->rx_pull_rss_config(efx);
if (rc)
@@ -1218,68 +1186,85 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev,
return 0;
}
-static int efx_ethtool_set_rxfh_context(struct net_device *net_dev,
- struct ethtool_rxfh_param *rxfh,
- struct netlink_ext_ack *extack)
+int efx_ethtool_modify_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
{
struct efx_nic *efx = efx_netdev_priv(net_dev);
- u32 *rss_context = &rxfh->rss_context;
- struct efx_rss_context *ctx;
- u32 *indir = rxfh->indir;
- bool allocated = false;
- u8 *key = rxfh->key;
- int rc;
+ struct efx_rss_context_priv *priv;
+ const u32 *indir = rxfh->indir;
+ const u8 *key = rxfh->key;
- if (!efx->type->rx_push_rss_context_config)
+ if (!efx->type->rx_push_rss_context_config) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "NIC type does not support custom contexts");
return -EOPNOTSUPP;
-
- mutex_lock(&efx->rss_lock);
-
- if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
- if (rxfh->rss_delete) {
- /* alloc + delete == Nothing to do */
- rc = -EINVAL;
- goto out_unlock;
- }
- ctx = efx_alloc_rss_context_entry(efx);
- if (!ctx) {
- rc = -ENOMEM;
- goto out_unlock;
- }
- ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- /* Initialise indir table and key to defaults */
- efx_set_default_rx_indir_table(efx, ctx);
- netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key));
- allocated = true;
- } else {
- ctx = efx_find_rss_context_entry(efx, *rss_context);
- if (!ctx) {
- rc = -ENOENT;
- goto out_unlock;
- }
}
-
- if (rxfh->rss_delete) {
- /* delete this context */
- rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL);
- if (!rc)
- efx_free_rss_context_entry(ctx);
- goto out_unlock;
+ /* Hash function is Toeplitz, cannot be changed */
+ if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ rxfh->hfunc != ETH_RSS_HASH_TOP) {
+ NL_SET_ERR_MSG_MOD(extack, "Only Toeplitz hash is supported");
+ return -EOPNOTSUPP;
}
+ priv = ethtool_rxfh_context_priv(ctx);
+
if (!key)
- key = ctx->rx_hash_key;
+ key = ethtool_rxfh_context_key(ctx);
if (!indir)
- indir = ctx->rx_indir_table;
+ indir = ethtool_rxfh_context_indir(ctx);
- rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key);
- if (rc && allocated)
- efx_free_rss_context_entry(ctx);
- else
- *rss_context = ctx->user_id;
-out_unlock:
- mutex_unlock(&efx->rss_lock);
- return rc;
+ return efx->type->rx_push_rss_context_config(efx, priv, indir, key,
+ false);
+}
+
+int efx_ethtool_create_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_rss_context_priv *priv;
+
+ priv = ethtool_rxfh_context_priv(ctx);
+
+ priv->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ priv->rx_hash_udp_4tuple = false;
+ /* Generate default indir table and/or key if not specified.
+ * We use ctx as a place to store these; this is fine because
+ * we're doing a create, so if we fail then the ctx will just
+ * be deleted.
+ */
+ if (!rxfh->indir)
+ efx_set_default_rx_indir_table(efx, ethtool_rxfh_context_indir(ctx));
+ if (!rxfh->key)
+ netdev_rss_key_fill(ethtool_rxfh_context_key(ctx),
+ ctx->key_size);
+ if (rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE)
+ ctx->hfunc = ETH_RSS_HASH_TOP;
+ if (rxfh->input_xfrm == RXH_XFRM_NO_CHANGE)
+ ctx->input_xfrm = 0;
+ return efx_ethtool_modify_rxfh_context(net_dev, ctx, rxfh, extack);
+}
+
+int efx_ethtool_remove_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack)
+{
+ struct efx_nic *efx = efx_netdev_priv(net_dev);
+ struct efx_rss_context_priv *priv;
+
+ if (!efx->type->rx_push_rss_context_config) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "NIC type does not support custom contexts");
+ return -EOPNOTSUPP;
+ }
+
+ priv = ethtool_rxfh_context_priv(ctx);
+ return efx->type->rx_push_rss_context_config(efx, priv, NULL, NULL,
+ true);
}
int efx_ethtool_set_rxfh(struct net_device *net_dev,
@@ -1295,8 +1280,9 @@ int efx_ethtool_set_rxfh(struct net_device *net_dev,
rxfh->hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (rxfh->rss_context)
- return efx_ethtool_set_rxfh_context(net_dev, rxfh, extack);
+ /* Custom contexts should use new API */
+ if (WARN_ON_ONCE(rxfh->rss_context))
+ return -EIO;
if (!indir && !key)
return 0;
diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h
index a680e5980213..fc52e891637d 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.h
+++ b/drivers/net/ethernet/sfc/ethtool_common.h
@@ -49,6 +49,18 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev,
int efx_ethtool_set_rxfh(struct net_device *net_dev,
struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack);
+int efx_ethtool_create_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+int efx_ethtool_modify_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ const struct ethtool_rxfh_param *rxfh,
+ struct netlink_ext_ack *extack);
+int efx_ethtool_remove_rxfh_context(struct net_device *net_dev,
+ struct ethtool_rxfh_context *ctx,
+ u32 rss_context,
+ struct netlink_ext_ack *extack);
int efx_ethtool_reset(struct net_device *net_dev, u32 *flags);
int efx_ethtool_get_module_eeprom(struct net_device *net_dev,
struct ethtool_eeprom *ee,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 1cb32aedd89c..8925745f1c17 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -2125,7 +2125,7 @@ static int ef4_change_mtu(struct net_device *net_dev, int new_mtu)
ef4_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
ef4_mac_reconfigure(efx);
mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c
index 7a1c9337081b..36114ce88034 100644
--- a/drivers/net/ethernet/sfc/falcon/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon/falcon.c
@@ -367,7 +367,7 @@ static const struct i2c_algo_bit_data falcon_i2c_bit_operations = {
.getsda = falcon_getsda,
.getscl = falcon_getscl,
.udelay = 5,
- /* Wait up to 50 ms for slave to let us pull SCL high */
+ /* Wait up to 50 ms for target to let us pull SCL high */
.timeout = DIV_ROUND_UP(HZ, 20),
};
diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h
index 9f413474bd9f..ada6e036fd97 100644
--- a/drivers/net/ethernet/sfc/falcon/nic.h
+++ b/drivers/net/ethernet/sfc/falcon/nic.h
@@ -297,7 +297,7 @@ static inline struct falcon_board *falcon_board(struct ef4_nic *efx)
return &data->board;
}
-struct ethtool_ts_info;
+struct kernel_ethtool_ts_info;
extern const struct ef4_nic_type falcon_a1_nic_type;
extern const struct ef4_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c
index 4ff6586116ee..6ef96292909a 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.c
+++ b/drivers/net/ethernet/sfc/mcdi_filters.c
@@ -194,7 +194,7 @@ efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx,
static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
const struct efx_filter_spec *spec,
efx_dword_t *inbuf, u64 handle,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
bool replacing)
{
u32 flags = spec->flags;
@@ -245,7 +245,7 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx,
static int efx_mcdi_filter_push(struct efx_nic *efx,
const struct efx_filter_spec *spec, u64 *handle,
- struct efx_rss_context *ctx, bool replacing)
+ struct efx_rss_context_priv *ctx, bool replacing)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
@@ -345,9 +345,9 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
bool replace_equal)
{
DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+ struct efx_rss_context_priv *ctx = NULL;
struct efx_mcdi_filter_table *table;
struct efx_filter_spec *saved_spec;
- struct efx_rss_context *ctx = NULL;
unsigned int match_pri, hash;
unsigned int priv_flags;
bool rss_locked = false;
@@ -380,12 +380,12 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&efx->net_dev->ethtool->rss_lock);
rss_locked = true;
if (spec->rss_context)
ctx = efx_find_rss_context_entry(efx, spec->rss_context);
else
- ctx = &efx->rss_context;
+ ctx = &efx->rss_context.priv;
if (!ctx) {
rc = -ENOENT;
goto out_unlock;
@@ -548,7 +548,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx,
out_unlock:
if (rss_locked)
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
up_write(&table->lock);
return rc;
}
@@ -611,13 +611,13 @@ static int efx_mcdi_filter_remove_internal(struct efx_nic *efx,
new_spec.priority = EFX_FILTER_PRI_AUTO;
new_spec.flags = (EFX_FILTER_FLAG_RX |
- (efx_rss_active(&efx->rss_context) ?
+ (efx_rss_active(&efx->rss_context.priv) ?
EFX_FILTER_FLAG_RX_RSS : 0));
new_spec.dmaq_id = 0;
new_spec.rss_context = 0;
rc = efx_mcdi_filter_push(efx, &new_spec,
&table->entry[filter_idx].handle,
- &efx->rss_context,
+ &efx->rss_context.priv,
true);
if (rc == 0)
@@ -764,7 +764,7 @@ static int efx_mcdi_filter_insert_addr_list(struct efx_nic *efx,
ids = vlan->uc;
}
- filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
+ filter_flags = efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0;
/* Insert/renew filters */
for (i = 0; i < addr_count; i++) {
@@ -833,7 +833,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx,
int rc;
u16 *id;
- filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0;
+ filter_flags = efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
@@ -1375,8 +1375,8 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
struct efx_mcdi_filter_table *table = efx->filter_state;
unsigned int invalid_filters = 0, failed = 0;
struct efx_mcdi_filter_vlan *vlan;
+ struct efx_rss_context_priv *ctx;
struct efx_filter_spec *spec;
- struct efx_rss_context *ctx;
unsigned int filter_idx;
u32 mcdi_flags;
int match_pri;
@@ -1388,7 +1388,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
return;
down_write(&table->lock);
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&efx->net_dev->ethtool->rss_lock);
for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) {
spec = efx_mcdi_filter_entry_spec(table, filter_idx);
@@ -1407,7 +1407,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx)
if (spec->rss_context)
ctx = efx_find_rss_context_entry(efx, spec->rss_context);
else
- ctx = &efx->rss_context;
+ ctx = &efx->rss_context.priv;
if (spec->flags & EFX_FILTER_FLAG_RX_RSS) {
if (!ctx) {
netif_warn(efx, drv, efx->net_dev,
@@ -1444,7 +1444,7 @@ not_restored:
}
}
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
up_write(&table->lock);
/*
@@ -1861,7 +1861,8 @@ out_unlock:
RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\
RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN)
-int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags)
+static int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context,
+ u32 *flags)
{
/*
* Firmware had a bug (sfc bug 61952) where it would not actually
@@ -1909,8 +1910,8 @@ int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags)
* Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we
* just need to set the UDP ports flags (for both IP versions).
*/
-void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
- struct efx_rss_context *ctx)
+static void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
+ struct efx_rss_context_priv *ctx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN);
u32 flags;
@@ -1931,7 +1932,7 @@ void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
}
static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
unsigned *context_size)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
@@ -2032,25 +2033,26 @@ void efx_mcdi_rx_free_indir_table(struct efx_nic *efx)
{
int rc;
- if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) {
- rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id);
+ if (efx->rss_context.priv.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) {
+ rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.priv.context_id);
WARN_ON(rc != 0);
}
- efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
}
static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx,
unsigned *context_size)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context,
- context_size);
+ int rc = efx_mcdi_filter_alloc_rss_context(efx, false,
+ &efx->rss_context.priv,
+ context_size);
if (rc != 0)
return rc;
table->rx_rss_context_exclusive = false;
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table);
return 0;
}
@@ -2058,26 +2060,27 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
const u32 *rx_indir_table,
const u8 *key)
{
+ u32 old_rx_rss_context = efx->rss_context.priv.context_id;
struct efx_mcdi_filter_table *table = efx->filter_state;
- u32 old_rx_rss_context = efx->rss_context.context_id;
int rc;
- if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
+ if (efx->rss_context.priv.context_id == EFX_MCDI_RSS_CONTEXT_INVALID ||
!table->rx_rss_context_exclusive) {
- rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context,
- NULL);
+ rc = efx_mcdi_filter_alloc_rss_context(efx, true,
+ &efx->rss_context.priv,
+ NULL);
if (rc == -EOPNOTSUPP)
return rc;
else if (rc != 0)
goto fail1;
}
- rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.context_id,
- rx_indir_table, key);
+ rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.priv.context_id,
+ rx_indir_table, key);
if (rc != 0)
goto fail2;
- if (efx->rss_context.context_id != old_rx_rss_context &&
+ if (efx->rss_context.priv.context_id != old_rx_rss_context &&
old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID)
WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0);
table->rx_rss_context_exclusive = true;
@@ -2091,9 +2094,9 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx,
return 0;
fail2:
- if (old_rx_rss_context != efx->rss_context.context_id) {
- WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id) != 0);
- efx->rss_context.context_id = old_rx_rss_context;
+ if (old_rx_rss_context != efx->rss_context.priv.context_id) {
+ WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.priv.context_id) != 0);
+ efx->rss_context.priv.context_id = old_rx_rss_context;
}
fail1:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
@@ -2101,33 +2104,28 @@ fail1:
}
int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
const u32 *rx_indir_table,
- const u8 *key)
+ const u8 *key, bool delete)
{
int rc;
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
+ WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) {
+ if (delete)
+ /* already wasn't in HW, nothing to do */
+ return 0;
rc = efx_mcdi_filter_alloc_rss_context(efx, true, ctx, NULL);
if (rc)
return rc;
}
- if (!rx_indir_table) /* Delete this context */
+ if (delete) /* Delete this context */
return efx_mcdi_filter_free_rss_context(efx, ctx->context_id);
- rc = efx_mcdi_filter_populate_rss_table(efx, ctx->context_id,
- rx_indir_table, key);
- if (rc)
- return rc;
-
- memcpy(ctx->rx_indir_table, rx_indir_table,
- sizeof(efx->rss_context.rx_indir_table));
- memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size);
-
- return 0;
+ return efx_mcdi_filter_populate_rss_table(efx, ctx->context_id,
+ rx_indir_table, key);
}
int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
@@ -2139,16 +2137,16 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
size_t outlen;
int rc, i;
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
+ WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
- if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID)
+ if (ctx->priv.context_id == EFX_MCDI_RSS_CONTEXT_INVALID)
return -ENOENT;
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
- ctx->context_id);
+ ctx->priv.context_id);
BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) !=
MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
@@ -2164,7 +2162,7 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
- ctx->context_id);
+ ctx->priv.context_id);
BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) !=
MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
@@ -2186,35 +2184,42 @@ int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx)
{
int rc;
- mutex_lock(&efx->rss_lock);
+ mutex_lock(&efx->net_dev->ethtool->rss_lock);
rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context);
- mutex_unlock(&efx->rss_lock);
+ mutex_unlock(&efx->net_dev->ethtool->rss_lock);
return rc;
}
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
{
struct efx_mcdi_filter_table *table = efx->filter_state;
- struct efx_rss_context *ctx;
+ struct ethtool_rxfh_context *ctx;
+ unsigned long context;
int rc;
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
+ WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
if (!table->must_restore_rss_contexts)
return;
- list_for_each_entry(ctx, &efx->rss_context.list, list) {
+ xa_for_each(&efx->net_dev->ethtool->rss_ctx, context, ctx) {
+ struct efx_rss_context_priv *priv;
+ u32 *indir;
+ u8 *key;
+
+ priv = ethtool_rxfh_context_priv(ctx);
/* previous NIC RSS context is gone */
- ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
+ priv->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
/* so try to allocate a new one */
- rc = efx_mcdi_rx_push_rss_context_config(efx, ctx,
- ctx->rx_indir_table,
- ctx->rx_hash_key);
+ indir = ethtool_rxfh_context_indir(ctx);
+ key = ethtool_rxfh_context_key(ctx);
+ rc = efx_mcdi_rx_push_rss_context_config(efx, priv, indir, key,
+ false);
if (rc)
netif_warn(efx, probe, efx->net_dev,
- "failed to restore RSS context %u, rc=%d"
+ "failed to restore RSS context %lu, rc=%d"
"; RSS filters may fail to be applied\n",
- ctx->user_id, rc);
+ context, rc);
}
table->must_restore_rss_contexts = false;
}
@@ -2276,7 +2281,7 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
{
if (user)
return -EOPNOTSUPP;
- if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID)
+ if (efx->rss_context.priv.context_id != EFX_MCDI_RSS_CONTEXT_INVALID)
return 0;
return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL);
}
@@ -2295,7 +2300,7 @@ int efx_mcdi_push_default_indir_table(struct efx_nic *efx,
efx_mcdi_rx_free_indir_table(efx);
if (rss_spread > 1) {
- efx_set_default_rx_indir_table(efx, &efx->rss_context);
+ efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table);
rc = efx->type->rx_push_rss_config(efx, false,
efx->rss_context.rx_indir_table, NULL);
}
diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h
index c0d6558b9fd2..11b9f87ed9e1 100644
--- a/drivers/net/ethernet/sfc/mcdi_filters.h
+++ b/drivers/net/ethernet/sfc/mcdi_filters.h
@@ -145,9 +145,9 @@ void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid);
void efx_mcdi_rx_free_indir_table(struct efx_nic *efx);
int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
const u32 *rx_indir_table,
- const u8 *key);
+ const u8 *key, bool delete);
int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
const u32 *rx_indir_table,
const u8 *key);
@@ -161,10 +161,6 @@ int efx_mcdi_push_default_indir_table(struct efx_nic *efx,
int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx);
int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx,
struct efx_rss_context *ctx);
-int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context,
- u32 *flags);
-void efx_mcdi_set_rss_context_flags(struct efx_nic *efx,
- struct efx_rss_context *ctx);
void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx);
static inline void efx_mcdi_update_rx_scatter(struct efx_nic *efx)
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f2dd7feb0e0c..b85c51cbe7f9 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -737,21 +737,24 @@ struct vfdi_status;
/* The reserved RSS context value */
#define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff
/**
- * struct efx_rss_context - A user-defined RSS context for filtering
- * @list: node of linked list on which this struct is stored
+ * struct efx_rss_context_priv - driver private data for an RSS context
* @context_id: the RSS_CONTEXT_ID returned by MC firmware, or
* %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC.
- * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID.
- * @user_id: the rss_context ID exposed to userspace over ethtool.
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
+ */
+struct efx_rss_context_priv {
+ u32 context_id;
+ bool rx_hash_udp_4tuple;
+};
+
+/**
+ * struct efx_rss_context - an RSS context
+ * @priv: hardware-specific state
* @rx_hash_key: Toeplitz hash key for this RSS context
* @indir_table: Indirection table for this RSS context
*/
struct efx_rss_context {
- struct list_head list;
- u32 context_id;
- u32 user_id;
- bool rx_hash_udp_4tuple;
+ struct efx_rss_context_priv priv;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
};
@@ -883,9 +886,7 @@ struct efx_mae;
* @rx_packet_ts_offset: Offset of timestamp from start of packet data
* (valid only if channel->sync_timestamps_enabled; always negative)
* @rx_scatter: Scatter mode enabled for receives
- * @rss_context: Main RSS context. Its @list member is the head of the list of
- * RSS contexts created by user requests
- * @rss_lock: Protects custom RSS context software state in @rss_context.list
+ * @rss_context: Main RSS context.
* @vport_id: The function's vport ID, only relevant for PFs
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
@@ -1052,7 +1053,6 @@ struct efx_nic {
int rx_packet_ts_offset;
bool rx_scatter;
struct efx_rss_context rss_context;
- struct mutex rss_lock;
u32 vport_id;
unsigned int_error_count;
@@ -1416,9 +1416,9 @@ struct efx_nic_type {
const u32 *rx_indir_table, const u8 *key);
int (*rx_pull_rss_config)(struct efx_nic *efx);
int (*rx_push_rss_context_config)(struct efx_nic *efx,
- struct efx_rss_context *ctx,
+ struct efx_rss_context_priv *ctx,
const u32 *rx_indir_table,
- const u8 *key);
+ const u8 *key, bool delete);
int (*rx_pull_rss_context_config)(struct efx_nic *efx,
struct efx_rss_context *ctx);
void (*rx_restore_rss_contexts)(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index c3bffbf0ba2b..6fd2fdbaa418 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1864,7 +1864,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct kernel_hwtstamp_config *i
return 0;
}
-void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info)
+void efx_ptp_get_ts_info(struct efx_nic *efx, struct kernel_ethtool_ts_info *ts_info)
{
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_nic *primary = efx->primary;
diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h
index 2f30dbb490d2..6946203499ef 100644
--- a/drivers/net/ethernet/sfc/ptp.h
+++ b/drivers/net/ethernet/sfc/ptp.h
@@ -12,7 +12,7 @@
#include <linux/net_tstamp.h>
#include "net_driver.h"
-struct ethtool_ts_info;
+struct kernel_ethtool_ts_info;
int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel);
void efx_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_ptp_channel(struct efx_nic *efx);
@@ -23,7 +23,8 @@ int efx_ptp_set_ts_config(struct efx_nic *efx,
struct netlink_ext_ack *extack);
int efx_ptp_get_ts_config(struct efx_nic *efx,
struct kernel_hwtstamp_config *config);
-void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
+void efx_ptp_get_ts_info(struct efx_nic *efx,
+ struct kernel_ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
int efx_ptp_get_mode(struct efx_nic *efx);
int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
index dcd901eccfc8..0b7dc75c40f9 100644
--- a/drivers/net/ethernet/sfc/rx_common.c
+++ b/drivers/net/ethernet/sfc/rx_common.c
@@ -557,69 +557,25 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
napi_gro_frags(napi);
}
-/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because
- * (a) this is an infrequent control-plane operation and (b) n is small (max 64)
- */
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx)
+struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx,
+ u32 id)
{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx, *new;
- u32 id = 1; /* Don't use zero, that refers to the master RSS context */
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
+ struct ethtool_rxfh_context *ctx;
- /* Search for first gap in the numbering */
- list_for_each_entry(ctx, head, list) {
- if (ctx->user_id != id)
- break;
- id++;
- /* Check for wrap. If this happens, we have nearly 2^32
- * allocated RSS contexts, which seems unlikely.
- */
- if (WARN_ON_ONCE(!id))
- return NULL;
- }
+ WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock));
- /* Create the new entry */
- new = kmalloc(sizeof(*new), GFP_KERNEL);
- if (!new)
+ ctx = xa_load(&efx->net_dev->ethtool->rss_ctx, id);
+ if (!ctx)
return NULL;
- new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID;
- new->rx_hash_udp_4tuple = false;
-
- /* Insert the new entry into the gap */
- new->user_id = id;
- list_add_tail(&new->list, &ctx->list);
- return new;
-}
-
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id)
-{
- struct list_head *head = &efx->rss_context.list;
- struct efx_rss_context *ctx;
-
- WARN_ON(!mutex_is_locked(&efx->rss_lock));
-
- list_for_each_entry(ctx, head, list)
- if (ctx->user_id == id)
- return ctx;
- return NULL;
-}
-
-void efx_free_rss_context_entry(struct efx_rss_context *ctx)
-{
- list_del(&ctx->list);
- kfree(ctx);
+ return ethtool_rxfh_context_priv(ctx);
}
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx)
+void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir)
{
size_t i;
- for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
- ctx->rx_indir_table[i] =
- ethtool_rxfh_indir_default(i, efx->rss_spread);
+ for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); i++)
+ indir[i] = ethtool_rxfh_indir_default(i, efx->rss_spread);
}
/**
diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h
index fbd2769307f9..75fa84192362 100644
--- a/drivers/net/ethernet/sfc/rx_common.h
+++ b/drivers/net/ethernet/sfc/rx_common.h
@@ -84,11 +84,9 @@ void
efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
unsigned int n_frags, u8 *eh, __wsum csum);
-struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx);
-struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id);
-void efx_free_rss_context_entry(struct efx_rss_context *ctx);
-void efx_set_default_rx_indir_table(struct efx_nic *efx,
- struct efx_rss_context *ctx);
+struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx,
+ u32 id);
+void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir);
bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
bool efx_filter_spec_equal(const struct efx_filter_spec *left,
diff --git a/drivers/net/ethernet/sfc/siena/efx_common.c b/drivers/net/ethernet/sfc/siena/efx_common.c
index 88e5bc347a44..cf195162e270 100644
--- a/drivers/net/ethernet/sfc/siena/efx_common.c
+++ b/drivers/net/ethernet/sfc/siena/efx_common.c
@@ -306,7 +306,7 @@ int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu)
efx_siena_stop_all(efx);
mutex_lock(&efx->mac_lock);
- net_dev->mtu = new_mtu;
+ WRITE_ONCE(net_dev->mtu, new_mtu);
efx_siena_mac_reconfigure(efx, true);
mutex_unlock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c
index 14dd3893bdef..4c182d4edfc2 100644
--- a/drivers/net/ethernet/sfc/siena/ethtool.c
+++ b/drivers/net/ethernet/sfc/siena/ethtool.c
@@ -226,7 +226,7 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c
index 4b5e2f0ba350..c473a4b6dd44 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.c
+++ b/drivers/net/ethernet/sfc/siena/ptp.c
@@ -1780,7 +1780,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx,
}
void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_nic *primary = efx->primary;
diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h
index 6352f84424f6..b6133e7c5608 100644
--- a/drivers/net/ethernet/sfc/siena/ptp.h
+++ b/drivers/net/ethernet/sfc/siena/ptp.h
@@ -12,7 +12,7 @@
#include <linux/net_tstamp.h>
#include "net_driver.h"
-struct ethtool_ts_info;
+struct kernel_ethtool_ts_info;
void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx);
struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx);
int efx_siena_ptp_set_ts_config(struct efx_nic *efx,
@@ -21,7 +21,7 @@ int efx_siena_ptp_set_ts_config(struct efx_nic *efx,
int efx_siena_ptp_get_ts_config(struct efx_nic *efx,
struct kernel_hwtstamp_config *config);
void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
- struct ethtool_ts_info *ts_info);
+ struct kernel_ethtool_ts_info *ts_info);
bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
int efx_siena_ptp_get_mode(struct efx_nic *efx);
int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 82e8891a619a..0d93164988fc 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -273,11 +273,10 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
match->value.ip_firstfrag = fm.key->flags & FLOW_DIS_FIRST_FRAG;
match->mask.ip_firstfrag = true;
}
- if (fm.mask->flags & ~(FLOW_DIS_IS_FRAGMENT | FLOW_DIS_FIRST_FRAG)) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on control.flags %#x",
- fm.mask->flags);
+ if (!flow_rule_is_supp_control_flags(FLOW_DIS_IS_FRAGMENT |
+ FLOW_DIS_FIRST_FRAG,
+ fm.mask->flags, extack))
return -EOPNOTSUPP;
- }
}
if (dissector->used_keys &
~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
@@ -388,11 +387,8 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx,
struct flow_match_control fm;
flow_rule_match_enc_control(rule, &fm);
- if (fm.mask->flags) {
- NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on enc_control.flags %#x",
- fm.mask->flags);
+ if (flow_rule_has_enc_control_flags(fm.mask->flags, extack))
return -EOPNOTSUPP;
- }
if (!IS_ALL_ONES(fm.mask->addr_type)) {
NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported enc addr_type mask %u (key %u)",
fm.mask->addr_type,
diff --git a/drivers/net/ethernet/sis/Kconfig b/drivers/net/ethernet/sis/Kconfig
index 775d76d9890e..7e498bdbca73 100644
--- a/drivers/net/ethernet/sis/Kconfig
+++ b/drivers/net/ethernet/sis/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_SIS
config SIS900
tristate "SiS 900/7016 PCI Fast Ethernet Adapter support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
select MII
help
@@ -35,7 +35,7 @@ config SIS900
config SIS190
tristate "SiS190/SiS191 gigabit ethernet support"
- depends on PCI
+ depends on PCI && HAS_IOPORT
select CRC32
select MII
help
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index cb7fec226cab..85b850372efe 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -2273,7 +2273,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map)
* (which seems to be different from the ifport(pcmcia) definition) */
switch(map->port){
case IF_PORT_UNKNOWN: /* use auto here */
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
/* we are going to change the media type, so the Link
* will be temporary down and we need to reflect that
* here. When the Link comes up again, it will be
@@ -2294,7 +2294,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map)
break;
case IF_PORT_10BASET: /* 10BaseT */
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
/* we are going to change the media type, so the Link
* will be temporary down and we need to reflect that
@@ -2315,7 +2315,7 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map)
case IF_PORT_100BASET: /* 100BaseT */
case IF_PORT_100BASETX: /* 100BaseTx */
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
/* we are going to change the media type, so the Link
* will be temporary down and we need to reflect that
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 5f22a8a4d27b..13ce9086a9ca 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -54,7 +54,7 @@ config SMC91X
config PCMCIA_SMC91C92
tristate "SMC 91Cxx PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
select CRC32
select MII
help
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index af661c65ffe2..e2e7b1c68563 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -1501,6 +1501,7 @@ static void smc_set_multicast_list(struct net_device *dev)
#ifdef MODULE
static struct net_device *devSMC9194;
+MODULE_DESCRIPTION("SMC 9194 Ethernet driver");
MODULE_LICENSE("GPL");
module_param_hw(io, int, ioport, 0);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 29bb19f42de9..86e3ec25df07 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1595,7 +1595,7 @@ static int s9k_config(struct net_device *dev, struct ifmap *map)
return -EOPNOTSUPP;
else if (map->port > 2)
return -EINVAL;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
smc_reset(dev);
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 78ff3af7911a..907498848028 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1574,12 +1574,8 @@ smc_ethtool_set_link_ksettings(struct net_device *dev,
(cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI))
return -EINVAL;
-// lp->port = cmd->base.port;
lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
-// if (netif_running(dev))
-// smc_set_port(dev);
-
ret = 0;
}
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 46eee747c699..38aa4374e813 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -142,22 +142,22 @@ static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg,
#define SMC_CAN_USE_32BIT 0
#define SMC_NOWAIT 1
-static inline void mcf_insw(void *a, unsigned char *p, int l)
+static inline void mcf_insw(void __iomem *a, unsigned char *p, int l)
{
u16 *wp = (u16 *) p;
while (l-- > 0)
*wp++ = readw(a);
}
-static inline void mcf_outsw(void *a, unsigned char *p, int l)
+static inline void mcf_outsw(void __iomem *a, unsigned char *p, int l)
{
u16 *wp = (u16 *) p;
while (l-- > 0)
writew(*wp++, a);
}
-#define SMC_inw(a, r) _swapw(readw((a) + (r)))
-#define SMC_outw(lp, v, a, r) writew(_swapw(v), (a) + (r))
+#define SMC_inw(a, r) ioread16be((a) + (r))
+#define SMC_outw(lp, v, a, r) iowrite16be(v, (a) + (r))
#define SMC_insw(a, r, p, l) mcf_insw(a + r, p, l)
#define SMC_outsw(a, r, p, l) mcf_outsw(a + r, p, l)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 4ec61f1ee71a..05cc07b8f48c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -142,6 +142,18 @@ config DWMAC_ROCKCHIP
This selects the Rockchip RK3288 SoC glue layer support for
the stmmac device driver.
+config DWMAC_RZN1
+ tristate "Renesas RZ/N1 dwmac support"
+ default ARCH_RZN1
+ depends on OF && (ARCH_RZN1 || COMPILE_TEST)
+ select PCS_RZN1_MIIC
+ help
+ Support for Ethernet controller on Renesas RZ/N1 SoC family.
+
+ This selects the Renesas RZ/N1 SoC glue layer support for
+ the stmmac device driver. This support can make use of a custom MII
+ converter PCS device.
+
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_INTEL_SOCFPGA
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 26cad4344701..c2f0e91f6bf8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
+obj-$(CONFIG_DWMAC_RZN1) += dwmac-rzn1.o
obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STARFIVE) += dwmac-starfive.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 3b7d4ac1e7be..cd36ff4da68c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -271,8 +271,6 @@ struct stmmac_safety_stats {
/* PCS defines */
#define STMMAC_PCS_RGMII (1 << 0)
#define STMMAC_PCS_SGMII (1 << 1)
-#define STMMAC_PCS_TBI (1 << 2)
-#define STMMAC_PCS_RTBI (1 << 3)
#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
@@ -594,7 +592,7 @@ struct mac_device_info {
const struct stmmac_mmc_ops *mmc;
const struct stmmac_est_ops *est;
struct dw_xpcs *xpcs;
- struct phylink_pcs *lynx_pcs; /* Lynx external PCS */
+ struct phylink_pcs *phylink_pcs;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
void __iomem *pcsr; /* vpointer to device CSRs */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
index 60283543ffc8..83ad7c7935e3 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -248,7 +248,7 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
priv->plat->max_speed = 2500;
priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
- priv->plat->mdio_bus_data->xpcs_an_inband = false;
+ priv->plat->mdio_bus_data->default_an_inband = false;
} else {
priv->plat->max_speed = 1000;
}
@@ -390,10 +390,11 @@ static int intel_crosststamp(ktime_t *device,
*device = ns_to_ktime(ptp_time);
read_unlock_irqrestore(&priv->ptp_lock, flags);
get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
- *system = convert_art_to_tsc(art_time);
+ system->cycles = art_time;
}
system->cycles *= intel_priv->crossts_adj;
+ system->cs_id = CSID_X86_ART;
priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN;
return 0;
@@ -443,6 +444,16 @@ static void common_default_data(struct plat_stmmacenet_data *plat)
plat->rx_queues_cfg[0].pkt_route = 0x0;
}
+static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ /* plat->mdio_bus_data->has_xpcs has been set true, so there
+ * should always be an XPCS. The original code would always
+ * return this if present.
+ */
+ return &priv->hw->xpcs->pcs;
+}
+
static int intel_mgbe_common_data(struct pci_dev *pdev,
struct plat_stmmacenet_data *plat)
{
@@ -585,19 +596,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev,
/* Intel mgbe SGMII interface uses pcs-xcps */
if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
- plat->mdio_bus_data->has_xpcs = true;
- plat->mdio_bus_data->xpcs_an_inband = true;
- }
-
- /* For fixed-link setup, we clear xpcs_an_inband */
- if (fwnode) {
- struct fwnode_handle *fixed_node;
-
- fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
- if (fixed_node)
- plat->mdio_bus_data->xpcs_an_inband = false;
-
- fwnode_handle_put(fixed_node);
+ plat->mdio_bus_data->pcs_mask = BIT(INTEL_MGBE_XPCS_ADDR);
+ plat->mdio_bus_data->default_an_inband = true;
+ plat->select_pcs = intel_mgbe_select_pcs;
}
/* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 281687d7083b..4ba15873d5b1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -171,6 +171,9 @@ static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
div = get_clk_div_rgmii(gmac, speed);
clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
@@ -410,6 +413,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
val |= NSS_COMMON_GMAC_CTL_CSYS_REQ;
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
break;
case PHY_INTERFACE_MODE_SGMII:
@@ -425,6 +431,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
break;
@@ -442,6 +451,9 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
switch (gmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
index e254b21fdb59..901a3c1959fa 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -21,6 +21,7 @@
#define RGMII_IO_MACRO_CONFIG2 0x1C
#define RGMII_IO_MACRO_DEBUG1 0x20
#define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28
+#define EMAC_WRAPPER_SGMII_PHY_CNTRL1 0xf4
/* RGMII_IO_MACRO_CONFIG fields */
#define RGMII_CONFIG_FUNC_CLK_EN BIT(30)
@@ -79,6 +80,9 @@
#define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14)
#define ETHQOS_MAC_CTRL_PORT_SEL BIT(15)
+/* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */
+#define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3)
+
#define SGMII_10M_RX_CLK_DVDR 0x31
struct ethqos_emac_por {
@@ -93,7 +97,9 @@ struct ethqos_emac_driver_data {
bool has_emac_ge_3;
const char *link_clk_name;
bool has_integrated_pcs;
+ u32 dma_addr_width;
struct dwmac4_addrs dwmac4_addrs;
+ bool needs_sgmii_loopback;
};
struct qcom_ethqos {
@@ -113,6 +119,7 @@ struct qcom_ethqos {
unsigned int num_por;
bool rgmii_config_loopback_en;
bool has_emac_ge_3;
+ bool needs_sgmii_loopback;
};
static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
@@ -190,8 +197,22 @@ ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed)
clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate);
}
+static void
+qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable)
+{
+ if (!ethqos->needs_sgmii_loopback ||
+ ethqos->phy_mode != PHY_INTERFACE_MODE_2500BASEX)
+ return;
+
+ rgmii_updatel(ethqos,
+ SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN,
+ enable ? SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN : 0,
+ EMAC_WRAPPER_SGMII_PHY_CNTRL1);
+}
+
static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
{
+ qcom_ethqos_set_sgmii_loopback(ethqos, true);
rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN,
RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
}
@@ -271,11 +292,13 @@ static const struct ethqos_emac_por emac_v4_0_0_por[] = {
static const struct ethqos_emac_driver_data emac_v4_0_0_data = {
.por = emac_v4_0_0_por,
- .num_por = ARRAY_SIZE(emac_v3_0_0_por),
+ .num_por = ARRAY_SIZE(emac_v4_0_0_por),
.rgmii_config_loopback_en = false,
.has_emac_ge_3 = true,
.link_clk_name = "phyaux",
.has_integrated_pcs = true,
+ .needs_sgmii_loopback = true,
+ .dma_addr_width = 36,
.dwmac4_addrs = {
.dma_chan = 0x00008100,
.dma_chan_offset = 0x1000,
@@ -605,6 +628,14 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos)
return 0;
}
+static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed)
+{
+ if (ethqos->serdes_speed != speed) {
+ phy_set_speed(ethqos->serdes_phy, speed);
+ ethqos->serdes_speed = speed;
+ }
+}
+
/* On interface toggle MAC registers gets reset.
* Configure MAC block for SGMII on ethernet phy link up
*/
@@ -622,9 +653,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
- if (ethqos->serdes_speed != SPEED_2500)
- phy_set_speed(ethqos->serdes_phy, SPEED_2500);
- ethqos->serdes_speed = SPEED_2500;
+ ethqos_set_serdes_speed(ethqos, SPEED_2500);
stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0);
break;
case SPEED_1000:
@@ -632,16 +661,12 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_CONFIG2_RGMII_CLK_SEL_CFG,
RGMII_IO_MACRO_CONFIG2);
- if (ethqos->serdes_speed != SPEED_1000)
- phy_set_speed(ethqos->serdes_phy, SPEED_1000);
- ethqos->serdes_speed = SPEED_1000;
+ ethqos_set_serdes_speed(ethqos, SPEED_1000);
stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
case SPEED_100:
val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE;
- if (ethqos->serdes_speed != SPEED_1000)
- phy_set_speed(ethqos->serdes_phy, SPEED_1000);
- ethqos->serdes_speed = SPEED_1000;
+ ethqos_set_serdes_speed(ethqos, SPEED_1000);
stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
case SPEED_10:
@@ -651,9 +676,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR,
SGMII_10M_RX_CLK_DVDR),
RGMII_IO_MACRO_CONFIG);
- if (ethqos->serdes_speed != SPEED_1000)
- phy_set_speed(ethqos->serdes_phy, ethqos->speed);
- ethqos->serdes_speed = SPEED_1000;
+ ethqos_set_serdes_speed(ethqos, SPEED_1000);
stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0);
break;
}
@@ -663,6 +686,14 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos)
return val;
}
+static void qcom_ethqos_speed_mode_2500(struct net_device *ndev, void *data)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ priv->plat->max_speed = 2500;
+ priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
+}
+
static int ethqos_configure(struct qcom_ethqos *ethqos)
{
return ethqos->configure_func(ethqos);
@@ -672,6 +703,7 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mo
{
struct qcom_ethqos *ethqos = priv;
+ qcom_ethqos_set_sgmii_loopback(ethqos, false);
ethqos->speed = speed;
ethqos_update_link_clk(ethqos, speed);
ethqos_configure(ethqos);
@@ -785,6 +817,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
case PHY_INTERFACE_MODE_RGMII_TXID:
ethqos->configure_func = ethqos_configure_rgmii;
break;
+ case PHY_INTERFACE_MODE_2500BASEX:
+ plat_dat->speed_mode_2500 = qcom_ethqos_speed_mode_2500;
+ fallthrough;
case PHY_INTERFACE_MODE_SGMII:
ethqos->configure_func = ethqos_configure_sgmii;
break;
@@ -807,6 +842,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
ethqos->num_por = data->num_por;
ethqos->rgmii_config_loopback_en = data->rgmii_config_loopback_en;
ethqos->has_emac_ge_3 = data->has_emac_ge_3;
+ ethqos->needs_sgmii_loopback = data->needs_sgmii_loopback;
ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii");
if (IS_ERR(ethqos->link_clk))
@@ -845,6 +881,8 @@ static int qcom_ethqos_probe(struct platform_device *pdev)
plat_dat->flags |= STMMAC_FLAG_RX_CLK_RUNS_IN_LPI;
if (data->has_integrated_pcs)
plat_dat->flags |= STMMAC_FLAG_HAS_INTEGRATED_PCS;
+ if (data->dma_addr_width)
+ plat_dat->host_dma_width = data->dma_addr_width;
if (ethqos->serdes_phy) {
plat_dat->serdes_powerup = qcom_ethqos_serdes_powerup;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 382e8de1255d..7ae04d8d291c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -12,10 +12,8 @@
#include <linux/clk.h>
#include <linux/phy.h>
#include <linux/of_net.h>
-#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/delay.h>
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
new file mode 100644
index 000000000000..59a7bd560f96
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2024 Schneider-Electric
+ *
+ * Clément Léger <clement.leger@bootlin.com>
+ */
+
+#include <linux/of.h>
+#include <linux/pcs-rzn1-miic.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+
+#include "stmmac_platform.h"
+#include "stmmac.h"
+
+static int rzn1_dwmac_pcs_init(struct stmmac_priv *priv)
+{
+ struct device_node *np = priv->device->of_node;
+ struct device_node *pcs_node;
+ struct phylink_pcs *pcs;
+
+ pcs_node = of_parse_phandle(np, "pcs-handle", 0);
+
+ if (pcs_node) {
+ pcs = miic_create(priv->device, pcs_node);
+ of_node_put(pcs_node);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+
+ priv->hw->phylink_pcs = pcs;
+ }
+
+ return 0;
+}
+
+static void rzn1_dwmac_pcs_exit(struct stmmac_priv *priv)
+{
+ if (priv->hw->phylink_pcs)
+ miic_destroy(priv->hw->phylink_pcs);
+}
+
+static struct phylink_pcs *rzn1_dwmac_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ return priv->hw->phylink_pcs;
+}
+
+static int rzn1_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = devm_stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ plat_dat->bsp_priv = plat_dat;
+ plat_dat->pcs_init = rzn1_dwmac_pcs_init;
+ plat_dat->pcs_exit = rzn1_dwmac_pcs_exit;
+ plat_dat->select_pcs = rzn1_dwmac_select_pcs;
+
+ ret = stmmac_dvr_probe(dev, plat_dat, &stmmac_res);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct of_device_id rzn1_dwmac_match[] = {
+ { .compatible = "renesas,rzn1-gmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rzn1_dwmac_match);
+
+static struct platform_driver rzn1_dwmac_driver = {
+ .probe = rzn1_dwmac_probe,
+ .remove_new = stmmac_pltfr_remove,
+ .driver = {
+ .name = "rzn1-dwmac",
+ .of_match_table = rzn1_dwmac_match,
+ },
+};
+module_platform_driver(rzn1_dwmac_driver);
+
+MODULE_AUTHOR("Clément Léger <clement.leger@bootlin.com>");
+MODULE_DESCRIPTION("Renesas RZN1 DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 68f85e4605cb..fdb4c773ec98 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -379,6 +379,62 @@ static int socfpga_gen10_set_phy_mode(struct socfpga_dwmac *dwmac)
return 0;
}
+static int socfpga_dwmac_pcs_init(struct stmmac_priv *priv)
+{
+ struct socfpga_dwmac *dwmac = priv->plat->bsp_priv;
+ struct regmap_config pcs_regmap_cfg = {
+ .reg_bits = 16,
+ .val_bits = 16,
+ .reg_shift = REGMAP_UPSHIFT(1),
+ };
+ struct mdio_regmap_config mrc;
+ struct regmap *pcs_regmap;
+ struct phylink_pcs *pcs;
+ struct mii_bus *pcs_bus;
+
+ if (!dwmac->tse_pcs_base)
+ return 0;
+
+ pcs_regmap = devm_regmap_init_mmio(priv->device, dwmac->tse_pcs_base,
+ &pcs_regmap_cfg);
+ if (IS_ERR(pcs_regmap))
+ return PTR_ERR(pcs_regmap);
+
+ memset(&mrc, 0, sizeof(mrc));
+ mrc.regmap = pcs_regmap;
+ mrc.parent = priv->device;
+ mrc.valid_addr = 0x0;
+ mrc.autoscan = false;
+
+ /* Can't use ndev->name here because it will not have been initialised,
+ * and in any case, the user can rename network interfaces at runtime.
+ */
+ snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii",
+ dev_name(priv->device));
+ pcs_bus = devm_mdio_regmap_register(priv->device, &mrc);
+ if (IS_ERR(pcs_bus))
+ return PTR_ERR(pcs_bus);
+
+ pcs = lynx_pcs_create_mdiodev(pcs_bus, 0);
+ if (IS_ERR(pcs))
+ return PTR_ERR(pcs);
+
+ priv->hw->phylink_pcs = pcs;
+ return 0;
+}
+
+static void socfpga_dwmac_pcs_exit(struct stmmac_priv *priv)
+{
+ if (priv->hw->phylink_pcs)
+ lynx_pcs_destroy(priv->hw->phylink_pcs);
+}
+
+static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv,
+ phy_interface_t interface)
+{
+ return priv->hw->phylink_pcs;
+}
+
static int socfpga_dwmac_probe(struct platform_device *pdev)
{
struct plat_stmmacenet_data *plat_dat;
@@ -426,6 +482,9 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
dwmac->ops = ops;
plat_dat->bsp_priv = dwmac;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+ plat_dat->pcs_init = socfpga_dwmac_pcs_init;
+ plat_dat->pcs_exit = socfpga_dwmac_pcs_exit;
+ plat_dat->select_pcs = socfpga_dwmac_select_pcs;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
@@ -444,48 +503,6 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
if (ret)
goto err_dvr_remove;
- /* Create a regmap for the PCS so that it can be used by the PCS driver,
- * if we have such a PCS
- */
- if (dwmac->tse_pcs_base) {
- struct regmap_config pcs_regmap_cfg;
- struct mdio_regmap_config mrc;
- struct regmap *pcs_regmap;
- struct mii_bus *pcs_bus;
-
- memset(&pcs_regmap_cfg, 0, sizeof(pcs_regmap_cfg));
- memset(&mrc, 0, sizeof(mrc));
-
- pcs_regmap_cfg.reg_bits = 16;
- pcs_regmap_cfg.val_bits = 16;
- pcs_regmap_cfg.reg_shift = REGMAP_UPSHIFT(1);
-
- pcs_regmap = devm_regmap_init_mmio(&pdev->dev, dwmac->tse_pcs_base,
- &pcs_regmap_cfg);
- if (IS_ERR(pcs_regmap)) {
- ret = PTR_ERR(pcs_regmap);
- goto err_dvr_remove;
- }
-
- mrc.regmap = pcs_regmap;
- mrc.parent = &pdev->dev;
- mrc.valid_addr = 0x0;
- mrc.autoscan = false;
-
- snprintf(mrc.name, MII_BUS_ID_SIZE, "%s-pcs-mii", ndev->name);
- pcs_bus = devm_mdio_regmap_register(&pdev->dev, &mrc);
- if (IS_ERR(pcs_bus)) {
- ret = PTR_ERR(pcs_bus);
- goto err_dvr_remove;
- }
-
- stpriv->hw->lynx_pcs = lynx_pcs_create_mdiodev(pcs_bus, 0);
- if (IS_ERR(stpriv->hw->lynx_pcs)) {
- ret = PTR_ERR(stpriv->hw->lynx_pcs);
- goto err_dvr_remove;
- }
- }
-
return 0;
err_dvr_remove:
@@ -494,17 +511,6 @@ err_dvr_remove:
return ret;
}
-static void socfpga_dwmac_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *priv = netdev_priv(ndev);
- struct phylink_pcs *pcs = priv->hw->lynx_pcs;
-
- stmmac_pltfr_remove(pdev);
-
- lynx_pcs_destroy(pcs);
-}
-
#ifdef CONFIG_PM_SLEEP
static int socfpga_dwmac_resume(struct device *dev)
{
@@ -576,7 +582,7 @@ MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
static struct platform_driver socfpga_dwmac_driver = {
.probe = socfpga_dwmac_probe,
- .remove_new = socfpga_dwmac_remove,
+ .remove_new = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
.pm = &socfpga_dwmac_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index c92dfc4ecf57..c1732955a697 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -53,12 +53,23 @@
#define SYSCFG_MCU_ETH_SEL_MII 0
#define SYSCFG_MCU_ETH_SEL_RMII 1
-/* STM32MP1 register definitions
+/* STM32MP2 register definitions */
+#define SYSCFG_MP2_ETH_MASK GENMASK(31, 0)
+
+#define SYSCFG_ETHCR_ETH_PTP_CLK_SEL BIT(2)
+#define SYSCFG_ETHCR_ETH_CLK_SEL BIT(1)
+#define SYSCFG_ETHCR_ETH_REF_CLK_SEL BIT(0)
+
+#define SYSCFG_ETHCR_ETH_SEL_MII 0
+#define SYSCFG_ETHCR_ETH_SEL_RGMII BIT(4)
+#define SYSCFG_ETHCR_ETH_SEL_RMII BIT(6)
+
+/* STM32MPx register definitions
*
* Below table summarizes the clock requirement and clock sources for
* supported phy interface modes.
* __________________________________________________________________________
- *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125Mhz from PHY|
+ *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125MHz from PHY|
*| | | 25MHz | 50MHz | |
* ---------------------------------------------------------------------------
*| MII | - | eth-ck | n/a | n/a |
@@ -90,6 +101,7 @@ struct stm32_dwmac {
int eth_ref_clk_sel_reg;
int irq_pwr_wakeup;
u32 mode_reg; /* MAC glue-logic mode register */
+ u32 mode_mask;
struct regmap *regmap;
u32 speed;
const struct stm32_ops *ops;
@@ -102,8 +114,9 @@ struct stm32_ops {
void (*resume)(struct stm32_dwmac *dwmac);
int (*parse_data)(struct stm32_dwmac *dwmac,
struct device *dev);
- u32 syscfg_eth_mask;
bool clk_rx_enable_in_suspend;
+ bool is_mp13, is_mp2;
+ u32 syscfg_clr_off;
};
static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume)
@@ -157,65 +170,190 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume)
return stm32_dwmac_clk_enable(dwmac, resume);
}
-static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
+static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat)
{
struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
- u32 reg = dwmac->mode_reg, clk_rate;
- int val;
- clk_rate = clk_get_rate(dwmac->clk_eth_ck);
- dwmac->enable_eth_ck = false;
switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
- if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk)
- dwmac->enable_eth_ck = true;
- val = SYSCFG_PMCR_ETH_SEL_MII;
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
+ dwmac->enable_eth_ck = dwmac->ext_phyclk;
+ return 0;
+ case PHY_INTERFACE_MODE_GMII:
+ dwmac->enable_eth_ck = dwmac->eth_clk_sel_reg ||
+ dwmac->ext_phyclk;
+ return 0;
+ case PHY_INTERFACE_MODE_RMII:
+ dwmac->enable_eth_ck = dwmac->eth_ref_clk_sel_reg ||
+ dwmac->ext_phyclk;
+ return 0;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dwmac->enable_eth_ck = dwmac->eth_clk_sel_reg ||
+ dwmac->ext_phyclk;
+ return 0;
+ default:
+ dwmac->enable_eth_ck = false;
+ dev_err(dwmac->dev, "Mode %s not supported",
+ phy_modes(plat_dat->mac_interface));
+ return -EINVAL;
+ }
+}
+
+static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ const u32 clk_rate = clk_get_rate(dwmac->clk_eth_ck);
+
+ if (!dwmac->enable_eth_ck)
+ return 0;
+
+ switch (plat_dat->mac_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ if (clk_rate == ETH_CK_F_25M)
+ return 0;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M)
+ return 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M)
+ return 0;
+ break;
+ default:
+ break;
+ }
+
+ dev_err(dwmac->dev, "Mode %s does not match eth-ck frequency %d Hz",
+ phy_modes(plat_dat->mac_interface), clk_rate);
+ return -EINVAL;
+}
+
+static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ u32 reg = dwmac->mode_reg;
+ int val = 0;
+
+ switch (plat_dat->mac_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ /*
+ * STM32MP15xx supports both MII and GMII, STM32MP13xx MII only.
+ * SYSCFG_PMCSETR ETH_SELMII is present only on STM32MP15xx and
+ * acts as a selector between 0:GMII and 1:MII. As STM32MP13xx
+ * supports only MII, ETH_SELMII is not present.
+ */
+ if (!dwmac->ops->is_mp13) /* Select MII mode on STM32MP15xx */
+ val |= SYSCFG_PMCR_ETH_SEL_MII;
break;
case PHY_INTERFACE_MODE_GMII:
val = SYSCFG_PMCR_ETH_SEL_GMII;
- if (clk_rate == ETH_CK_F_25M &&
- (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
- dwmac->enable_eth_ck = true;
+ if (dwmac->enable_eth_ck)
val |= SYSCFG_PMCR_ETH_CLK_SEL;
- }
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n");
break;
case PHY_INTERFACE_MODE_RMII:
val = SYSCFG_PMCR_ETH_SEL_RMII;
- if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M) &&
- (dwmac->eth_ref_clk_sel_reg || dwmac->ext_phyclk)) {
- dwmac->enable_eth_ck = true;
+ if (dwmac->enable_eth_ck)
val |= SYSCFG_PMCR_ETH_REF_CLK_SEL;
- }
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
break;
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
val = SYSCFG_PMCR_ETH_SEL_RGMII;
- if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M) &&
- (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
- dwmac->enable_eth_ck = true;
+ if (dwmac->enable_eth_ck)
val |= SYSCFG_PMCR_ETH_CLK_SEL;
- }
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n");
break;
default:
- pr_debug("SYSCFG init : Do not manage %d interface\n",
- plat_dat->mac_interface);
+ dev_err(dwmac->dev, "Mode %s not supported",
+ phy_modes(plat_dat->mac_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+
+ /* Shift value at correct ethernet MAC offset in SYSCFG_PMCSETR */
+ val <<= ffs(dwmac->mode_mask) - ffs(SYSCFG_MP1_ETH_MASK);
+
/* Need to update PMCCLRR (clear register) */
- regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
- dwmac->ops->syscfg_eth_mask);
+ regmap_write(dwmac->regmap, dwmac->ops->syscfg_clr_off,
+ dwmac->mode_mask);
/* Update PMCSETR (set register) */
return regmap_update_bits(dwmac->regmap, reg,
- dwmac->ops->syscfg_eth_mask, val);
+ dwmac->mode_mask, val);
+}
+
+static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ u32 reg = dwmac->mode_reg;
+ int val = 0;
+
+ switch (plat_dat->mac_interface) {
+ case PHY_INTERFACE_MODE_MII:
+ /* ETH_REF_CLK_SEL bit in SYSCFG register is not applicable in MII mode */
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val = SYSCFG_ETHCR_ETH_SEL_RMII;
+ if (dwmac->enable_eth_ck) {
+ /* Internal clock ETH_CLK of 50MHz from RCC is used */
+ val |= SYSCFG_ETHCR_ETH_REF_CLK_SEL;
+ }
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = SYSCFG_ETHCR_ETH_SEL_RGMII;
+ fallthrough;
+ case PHY_INTERFACE_MODE_GMII:
+ if (dwmac->enable_eth_ck) {
+ /* Internal clock ETH_CLK of 125MHz from RCC is used */
+ val |= SYSCFG_ETHCR_ETH_CLK_SEL;
+ }
+ break;
+ default:
+ dev_err(dwmac->dev, "Mode %s not supported",
+ phy_modes(plat_dat->mac_interface));
+ /* Do not manage others interfaces */
+ return -EINVAL;
+ }
+
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+
+ /* Select PTP (IEEE1588) clock selection from RCC (ck_ker_ethxptp) */
+ val |= SYSCFG_ETHCR_ETH_PTP_CLK_SEL;
+
+ /* Update ETHCR (set register) */
+ return regmap_update_bits(dwmac->regmap, reg,
+ SYSCFG_MP2_ETH_MASK, val);
+}
+
+static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ int ret;
+
+ ret = stm32mp1_select_ethck_external(plat_dat);
+ if (ret)
+ return ret;
+
+ ret = stm32mp1_validate_ethck_rate(plat_dat);
+ if (ret)
+ return ret;
+
+ if (!dwmac->ops->is_mp2)
+ return stm32mp1_configure_pmcr(plat_dat);
+ else
+ return stm32mp2_configure_syscfg(plat_dat);
}
static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
@@ -227,21 +365,21 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
switch (plat_dat->mac_interface) {
case PHY_INTERFACE_MODE_MII:
val = SYSCFG_MCU_ETH_SEL_MII;
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
break;
case PHY_INTERFACE_MODE_RMII:
val = SYSCFG_MCU_ETH_SEL_RMII;
- pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
break;
default:
- pr_debug("SYSCFG init : Do not manage %d interface\n",
- plat_dat->mac_interface);
+ dev_err(dwmac->dev, "Mode %s not supported",
+ phy_modes(plat_dat->mac_interface));
/* Do not manage others interfaces */
return -EINVAL;
}
+ dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface));
+
return regmap_update_bits(dwmac->regmap, reg,
- dwmac->ops->syscfg_eth_mask, val << 23);
+ SYSCFG_MCU_ETH_MASK, val << 23);
}
static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend)
@@ -286,8 +424,24 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
return PTR_ERR(dwmac->regmap);
err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg);
- if (err)
+ if (err) {
dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err);
+ return err;
+ }
+
+ if (dwmac->ops->is_mp2)
+ return 0;
+
+ dwmac->mode_mask = SYSCFG_MP1_ETH_MASK;
+ err = of_property_read_u32_index(np, "st,syscon", 2, &dwmac->mode_mask);
+ if (err) {
+ if (dwmac->ops->is_mp13) {
+ dev_err(dev, "Sysconfig register mask must be set (%d)\n", err);
+ } else {
+ dev_dbg(dev, "Warning sysconfig register mask not set\n");
+ err = 0;
+ }
+ }
return err;
}
@@ -305,7 +459,7 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
/* Gigabit Ethernet 125MHz clock selection. */
dwmac->eth_clk_sel_reg = of_property_read_bool(np, "st,eth-clk-sel");
- /* Ethernet 50Mhz RMII clock selection */
+ /* Ethernet 50MHz RMII clock selection */
dwmac->eth_ref_clk_sel_reg =
of_property_read_bool(np, "st,eth-ref-clk-sel");
@@ -478,8 +632,7 @@ static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
stm32_dwmac_suspend, stm32_dwmac_resume);
static struct stm32_ops stm32mcu_dwmac_data = {
- .set_mode = stm32mcu_set_mode,
- .syscfg_eth_mask = SYSCFG_MCU_ETH_MASK
+ .set_mode = stm32mcu_set_mode
};
static struct stm32_ops stm32mp1_dwmac_data = {
@@ -487,13 +640,35 @@ static struct stm32_ops stm32mp1_dwmac_data = {
.suspend = stm32mp1_suspend,
.resume = stm32mp1_resume,
.parse_data = stm32mp1_parse_data,
- .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
+ .syscfg_clr_off = 0x44,
+ .is_mp13 = false,
+ .clk_rx_enable_in_suspend = true
+};
+
+static struct stm32_ops stm32mp13_dwmac_data = {
+ .set_mode = stm32mp1_set_mode,
+ .suspend = stm32mp1_suspend,
+ .resume = stm32mp1_resume,
+ .parse_data = stm32mp1_parse_data,
+ .syscfg_clr_off = 0x08,
+ .is_mp13 = true,
+ .clk_rx_enable_in_suspend = true
+};
+
+static struct stm32_ops stm32mp25_dwmac_data = {
+ .set_mode = stm32mp1_set_mode,
+ .suspend = stm32mp1_suspend,
+ .resume = stm32mp1_resume,
+ .parse_data = stm32mp1_parse_data,
+ .is_mp2 = true,
.clk_rx_enable_in_suspend = true
};
static const struct of_device_id stm32_dwmac_match[] = {
{ .compatible = "st,stm32-dwmac", .data = &stm32mcu_dwmac_data},
{ .compatible = "st,stm32mp1-dwmac", .data = &stm32mp1_dwmac_data},
+ { .compatible = "st,stm32mp13-dwmac", .data = &stm32mp13_dwmac_data},
+ { .compatible = "st,stm32mp25-dwmac", .data = &stm32mp25_dwmac_data},
{ }
};
MODULE_DEVICE_TABLE(of, stm32_dwmac_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index 8555299443f4..d413d76a8936 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -15,7 +15,7 @@
#include <linux/crc32.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "stmmac.h"
#include "stmmac_pcs.h"
#include "dwmac1000.h"
@@ -404,11 +404,6 @@ static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
}
-static void dwmac1000_rane(void __iomem *ioaddr, bool restart)
-{
- dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
-}
-
static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
{
dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
@@ -519,7 +514,6 @@ const struct stmmac_ops dwmac1000_ops = {
.set_eee_pls = dwmac1000_set_eee_pls,
.debug = dwmac1000_debug,
.pcs_ctrl_ane = dwmac1000_ctrl_ane,
- .pcs_rane = dwmac1000_rane,
.pcs_get_adv_lp = dwmac1000_get_adv_lp,
.set_mac_loopback = dwmac1000_set_mac_loopback,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index daf79cdbd3ec..adccdd816ea9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -12,7 +12,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <asm/io.h>
+#include <linux/io.h>
#include "dwmac1000.h"
#include "dwmac_dma.h"
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index 7667d103cd0e..14e847c0e1a9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -15,7 +15,7 @@
*******************************************************************************/
#include <linux/crc32.h>
-#include <asm/io.h>
+#include <linux/io.h>
#include "stmmac.h"
#include "dwmac100.h"
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
index dea270f60cc3..b402fb54f613 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -14,7 +14,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <asm/io.h>
+#include <linux/io.h>
#include "dwmac100.h"
#include "dwmac_dma.h"
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
index d3c5306f1c41..93a78fd0737b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -573,8 +573,6 @@ static inline u32 mtl_low_credx_base_addr(const struct dwmac4_addrs *addrs,
#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19)
#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20)
#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21)
-/* LNKMOD */
-#define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1
/* LNKSPEED */
#define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2
#define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index a38226d7cc6a..31c387cc5f26 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -68,7 +68,7 @@ static void dwmac4_core_init(struct mac_device_info *hw,
init_waitqueue_head(&priv->tstamp_busy_wait);
}
-static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
+static void dwmac4_update_caps(struct stmmac_priv *priv)
{
if (priv->plat->tx_queues_to_use > 1)
priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
@@ -758,11 +758,6 @@ static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
}
-static void dwmac4_rane(void __iomem *ioaddr, bool restart)
-{
- dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
-}
-
static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
{
dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
@@ -791,7 +786,7 @@ static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
else
x->pcs_speed = SPEED_10;
- x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
+ x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD);
pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
x->pcs_duplex ? "Full" : "Half");
@@ -982,7 +977,7 @@ static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
}
static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- __le16 perfect_match, bool is_double)
+ u16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
@@ -1190,7 +1185,7 @@ static void dwmac4_set_hw_vlan_mode(struct mac_device_info *hw)
const struct stmmac_ops dwmac4_ops = {
.core_init = dwmac4_core_init,
- .phylink_get_caps = dwmac4_phylink_get_caps,
+ .update_caps = dwmac4_update_caps,
.set_mac = stmmac_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
@@ -1215,7 +1210,6 @@ const struct stmmac_ops dwmac4_ops = {
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_rane = dwmac4_rane,
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
@@ -1235,7 +1229,7 @@ const struct stmmac_ops dwmac4_ops = {
const struct stmmac_ops dwmac410_ops = {
.core_init = dwmac4_core_init,
- .phylink_get_caps = dwmac4_phylink_get_caps,
+ .update_caps = dwmac4_update_caps,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
@@ -1260,7 +1254,6 @@ const struct stmmac_ops dwmac410_ops = {
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_rane = dwmac4_rane,
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
@@ -1284,7 +1277,7 @@ const struct stmmac_ops dwmac410_ops = {
const struct stmmac_ops dwmac510_ops = {
.core_init = dwmac4_core_init,
- .phylink_get_caps = dwmac4_phylink_get_caps,
+ .update_caps = dwmac4_update_caps,
.set_mac = stmmac_dwmac4_set_mac,
.rx_ipc = dwmac4_rx_ipc_enable,
.rx_queue_enable = dwmac4_rx_queue_enable,
@@ -1309,7 +1302,6 @@ const struct stmmac_ops dwmac510_ops = {
.set_eee_timer = dwmac4_set_eee_timer,
.set_eee_pls = dwmac4_set_eee_pls,
.pcs_ctrl_ane = dwmac4_ctrl_ane,
- .pcs_rane = dwmac4_rane,
.pcs_get_adv_lp = dwmac4_get_adv_lp,
.debug = dwmac4_debug,
.set_filter = dwmac4_set_filter,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index f8e7775bb633..f196cd99d510 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -615,7 +615,7 @@ static int dwxgmac2_rss_configure(struct mac_device_info *hw,
}
static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
- __le16 perfect_match, bool is_double)
+ u16 perfect_match, bool is_double)
{
void __iomem *ioaddr = hw->pcsr;
@@ -1554,9 +1554,6 @@ const struct stmmac_ops dwxgmac210_ops = {
.reset_eee_mode = dwxgmac2_reset_eee_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
- .pcs_ctrl_ane = NULL,
- .pcs_rane = NULL,
- .pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
.safety_feat_config = dwxgmac3_safety_feat_config,
@@ -1614,9 +1611,6 @@ const struct stmmac_ops dwxlgmac2_ops = {
.reset_eee_mode = dwxgmac2_reset_eee_mode,
.set_eee_timer = dwxgmac2_set_eee_timer,
.set_eee_pls = dwxgmac2_set_eee_pls,
- .pcs_ctrl_ane = NULL,
- .pcs_rane = NULL,
- .pcs_get_adv_lp = NULL,
.debug = NULL,
.set_filter = dwxgmac2_set_filter,
.safety_feat_config = dwxgmac3_safety_feat_config,
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 7be04b54738b..e53c32362774 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -308,8 +308,8 @@ struct stmmac_est;
struct stmmac_ops {
/* MAC core initialization */
void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
- /* Get phylink capabilities */
- void (*phylink_get_caps)(struct stmmac_priv *priv);
+ /* Update MAC capabilities */
+ void (*update_caps)(struct stmmac_priv *priv);
/* Enable the MAC RX/TX */
void (*set_mac)(void __iomem *ioaddr, bool enable);
/* Enable and verify that the IPC module is supported */
@@ -370,7 +370,6 @@ struct stmmac_ops {
/* PCS calls */
void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
bool loopback);
- void (*pcs_rane)(void __iomem *ioaddr, bool restart);
void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
/* Safety Features */
int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp,
@@ -394,7 +393,7 @@ struct stmmac_ops {
struct stmmac_rss *cfg, u32 num_rxq);
/* VLAN */
void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
- __le16 perfect_match, bool is_double);
+ u16 perfect_match, bool is_double);
void (*enable_vlan)(struct mac_device_info *hw, u32 type);
void (*rx_hw_vlan)(struct mac_device_info *hw, struct dma_desc *rx_desc,
struct sk_buff *skb);
@@ -430,8 +429,8 @@ struct stmmac_ops {
#define stmmac_core_init(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, core_init, __args)
-#define stmmac_mac_phylink_get_caps(__priv) \
- stmmac_do_void_callback(__priv, mac, phylink_get_caps, __priv)
+#define stmmac_mac_update_caps(__priv) \
+ stmmac_do_void_callback(__priv, mac, update_caps, __priv)
#define stmmac_mac_set(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, set_mac, __args)
#define stmmac_rx_ipc(__priv, __args...) \
@@ -484,8 +483,6 @@ struct stmmac_ops {
stmmac_do_void_callback(__priv, mac, debug, __priv, __args)
#define stmmac_pcs_ctrl_ane(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args)
-#define stmmac_pcs_rane(__priv, __args...) \
- stmmac_do_void_callback(__priv, mac, pcs_rane, __priv, __args)
#define stmmac_pcs_get_adv_lp(__priv, __args...) \
stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args)
#define stmmac_safety_feat_config(__priv, __args...) \
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index dddcaa9220cc..b23b920eedb1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -221,6 +221,20 @@ struct stmmac_dma_conf {
unsigned int dma_tx_size;
};
+#define EST_GCL 1024
+struct stmmac_est {
+ int enable;
+ u32 btr_reserve[2];
+ u32 btr_offset[2];
+ u32 btr[2];
+ u32 ctr[2];
+ u32 ter;
+ u32 gcl_unaligned[EST_GCL];
+ u32 gcl[EST_GCL];
+ u32 gcl_size;
+ u32 max_sdu[MTL_MAX_TX_QUEUES];
+};
+
struct stmmac_priv {
/* Frequently used values are kept adjacent for cache effect */
u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
@@ -261,6 +275,9 @@ struct stmmac_priv {
struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
struct stmmac_safety_stats sstats;
struct plat_stmmacenet_data *plat;
+ /* Protect est parameters */
+ struct mutex est_lock;
+ struct stmmac_est *est;
struct dma_features dma_cap;
struct stmmac_counters mmc;
int hw_cap_support;
@@ -360,7 +377,8 @@ enum stmmac_state {
int stmmac_mdio_unregister(struct net_device *ndev);
int stmmac_mdio_register(struct net_device *ndev);
int stmmac_mdio_reset(struct mii_bus *mii);
-int stmmac_xpcs_setup(struct mii_bus *mii);
+int stmmac_pcs_setup(struct net_device *ndev);
+void stmmac_pcs_clean(struct net_device *ndev);
void stmmac_set_ethtool_ops(struct net_device *netdev);
int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 542e2633a6f5..7008219fd88d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -11,10 +11,10 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/mii.h>
#include <linux/phylink.h>
#include <linux/net_tstamp.h>
-#include <asm/io.h>
#include "stmmac.h"
#include "dwmac_dma.h"
@@ -1199,7 +1199,7 @@ static int stmmac_set_channels(struct net_device *dev,
}
static int stmmac_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct stmmac_priv *priv = netdev_priv(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index f05bd757dfe5..5ef52ef2698f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -218,6 +218,7 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
{
u32 num_snapshot, ts_status, tsync_int;
struct ptp_clock_event event;
+ u32 acr_value, channel;
unsigned long flags;
u64 ptp_time;
int i;
@@ -243,12 +244,15 @@ static void timestamp_interrupt(struct stmmac_priv *priv)
num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
GMAC_TIMESTAMP_ATSNS_SHIFT;
+ acr_value = readl(priv->ptpaddr + PTP_ACR);
+ channel = ilog2(FIELD_GET(PTP_ACR_MASK, acr_value));
+
for (i = 0; i < num_snapshot; i++) {
read_lock_irqsave(&priv->ptp_lock, flags);
get_ptptime(priv->ptpaddr, &ptp_time);
read_unlock_irqrestore(&priv->ptp_lock, flags);
event.type = PTP_CLOCK_EXTTS;
- event.index = 0;
+ event.index = channel;
event.timestamp = ptp_time;
ptp_clock_event(priv->ptp_clock, &event);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7c6fb14b5555..f3a1b179aaea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -471,13 +471,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
{
int eee_tw_timer = priv->eee_tw_timer;
- /* Using PCS we cannot dial with the phy registers at this stage
- * so we do not support extra feature like EEE.
- */
- if (priv->hw->pcs == STMMAC_PCS_TBI ||
- priv->hw->pcs == STMMAC_PCS_RTBI)
- return false;
-
/* Check if MAC core supports the EEE feature. */
if (!priv->dma_cap.eee)
return false;
@@ -936,16 +929,33 @@ static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
priv->pause, tx_cnt);
}
+static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ /* Refresh the MAC-specific capabilities */
+ stmmac_mac_update_caps(priv);
+
+ config->mac_capabilities = priv->hw->link.caps;
+
+ if (priv->plat->max_speed)
+ phylink_limit_mac_speed(config, priv->plat->max_speed);
+
+ return config->mac_capabilities;
+}
+
static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ struct phylink_pcs *pcs;
- if (priv->hw->xpcs)
- return &priv->hw->xpcs->pcs;
-
- if (priv->hw->lynx_pcs)
- return priv->hw->lynx_pcs;
+ if (priv->plat->select_pcs) {
+ pcs = priv->plat->select_pcs(priv, interface);
+ if (!IS_ERR(pcs))
+ return pcs;
+ }
return NULL;
}
@@ -1105,6 +1115,7 @@ static void stmmac_mac_link_up(struct phylink_config *config,
}
static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
+ .mac_get_caps = stmmac_mac_get_caps,
.mac_select_pcs = stmmac_mac_select_pcs,
.mac_config = stmmac_mac_config,
.mac_link_down = stmmac_mac_link_down,
@@ -1204,16 +1215,18 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
int mode = priv->plat->phy_interface;
struct fwnode_handle *fwnode;
struct phylink *phylink;
- int max_speed;
priv->phylink_config.dev = &priv->dev->dev;
priv->phylink_config.type = PHYLINK_NETDEV;
priv->phylink_config.mac_managed_pm = true;
+ /* Stmmac always requires an RX clock for hardware initialization */
+ priv->phylink_config.mac_requires_rxc = true;
+
mdio_bus_data = priv->plat->mdio_bus_data;
if (mdio_bus_data)
- priv->phylink_config.ovr_an_inband =
- mdio_bus_data->xpcs_an_inband;
+ priv->phylink_config.default_an_inband =
+ mdio_bus_data->default_an_inband;
/* Set the platform/firmware specified interface mode. Note, phylink
* deals with the PHY interface mode, not the MAC interface mode.
@@ -1225,15 +1238,6 @@ static int stmmac_phy_setup(struct stmmac_priv *priv)
xpcs_get_interfaces(priv->hw->xpcs,
priv->phylink_config.supported_interfaces);
- /* Get the MAC specific capabilities */
- stmmac_mac_phylink_get_caps(priv);
-
- priv->phylink_config.mac_capabilities = priv->hw->link.caps;
-
- max_speed = priv->plat->max_speed;
- if (max_speed)
- phylink_limit_mac_speed(&priv->phylink_config, max_speed);
-
fwnode = priv->plat->port_node;
if (!fwnode)
fwnode = dev_fwnode(priv->device);
@@ -2491,9 +2495,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
if (!xsk_tx_peek_desc(pool, &xdp_desc))
break;
- if (priv->plat->est && priv->plat->est->enable &&
- priv->plat->est->max_sdu[queue] &&
- xdp_desc.len > priv->plat->est->max_sdu[queue]) {
+ if (priv->est && priv->est->enable &&
+ priv->est->max_sdu[queue] &&
+ xdp_desc.len > priv->est->max_sdu[queue]) {
priv->xstats.max_sdu_txq_drop[queue]++;
continue;
}
@@ -2908,7 +2912,7 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
u32 channels_to_check = tx_channel_count > rx_channel_count ?
tx_channel_count : rx_channel_count;
u32 chan;
- int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
+ int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
/* Make sure we never check beyond our status buffer. */
if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
@@ -3396,6 +3400,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
u32 chan;
int ret;
+ /* Make sure RX clock is enabled */
+ if (priv->hw->phylink_pcs)
+ phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
+
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
@@ -3942,11 +3950,8 @@ static int __stmmac_open(struct net_device *dev,
if (ret < 0)
return ret;
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI &&
- (!priv->hw->xpcs ||
- xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
- !priv->hw->lynx_pcs) {
+ if ((!priv->hw->xpcs ||
+ xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
ret = stmmac_init_phy(dev);
if (ret) {
netdev_err(priv->dev,
@@ -4087,8 +4092,6 @@ static int stmmac_release(struct net_device *dev)
if (priv->plat->serdes_powerdown)
priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
- netif_carrier_off(dev);
-
stmmac_release_ptp(priv);
pm_runtime_put(priv->device);
@@ -4234,18 +4237,32 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct dma_desc *desc, *first, *mss_desc = NULL;
struct stmmac_priv *priv = netdev_priv(dev);
- int nfrags = skb_shinfo(skb)->nr_frags;
- u32 queue = skb_get_queue_mapping(skb);
+ int tmp_pay_len = 0, first_tx, nfrags;
unsigned int first_entry, tx_packets;
struct stmmac_txq_stats *txq_stats;
- int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
- bool has_vlan, set_ic;
+ u32 pay_len, mss, queue;
u8 proto_hdr_len, hdr;
- u32 pay_len, mss;
dma_addr_t des;
+ bool set_ic;
int i;
+ /* Always insert VLAN tag to SKB payload for TSO frames.
+ *
+ * Never insert VLAN tag by HW, since segments splited by
+ * TSO engine will be un-tagged by mistake.
+ */
+ if (skb_vlan_tag_present(skb)) {
+ skb = __vlan_hwaccel_push_inside(skb);
+ if (unlikely(!skb)) {
+ priv->xstats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+ queue = skb_get_queue_mapping(skb);
+
tx_q = &priv->dma_conf.tx_queue[queue];
txq_stats = &priv->xstats.txq_stats[queue];
first_tx = tx_q->cur_tx;
@@ -4298,9 +4315,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
skb->data_len);
}
- /* Check if VLAN can be inserted by HW */
- has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
-
first_entry = tx_q->cur_tx;
WARN_ON(tx_q->tx_skbuff[first_entry]);
@@ -4310,9 +4324,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
desc = &tx_q->dma_tx[first_entry];
first = desc;
- if (has_vlan)
- stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
-
/* first descriptor: fill Headers on Buf1 */
des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
@@ -4528,9 +4539,9 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
return stmmac_tso_xmit(skb, dev);
}
- if (priv->plat->est && priv->plat->est->enable &&
- priv->plat->est->max_sdu[queue] &&
- skb->len > priv->plat->est->max_sdu[queue]){
+ if (priv->est && priv->est->enable &&
+ priv->est->max_sdu[queue] &&
+ skb->len > priv->est->max_sdu[queue]){
priv->xstats.max_sdu_txq_drop[queue]++;
goto max_sdu_err;
}
@@ -4909,9 +4920,9 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
return STMMAC_XDP_CONSUMED;
- if (priv->plat->est && priv->plat->est->enable &&
- priv->plat->est->max_sdu[queue] &&
- xdpf->len > priv->plat->est->max_sdu[queue]) {
+ if (priv->est && priv->est->enable &&
+ priv->est->max_sdu[queue] &&
+ xdpf->len > priv->est->max_sdu[queue]) {
priv->xstats.max_sdu_txq_drop[queue]++;
return STMMAC_XDP_CONSUMED;
}
@@ -5094,9 +5105,8 @@ static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
unsigned int datasize = xdp->data_end - xdp->data;
struct sk_buff *skb;
- skb = __napi_alloc_skb(&ch->rxtx_napi,
- xdp->data_end - xdp->data_hard_start,
- GFP_ATOMIC | __GFP_NOWARN);
+ skb = napi_alloc_skb(&ch->rxtx_napi,
+ xdp->data_end - xdp->data_hard_start);
if (unlikely(!skb))
return NULL;
@@ -5352,7 +5362,7 @@ read_again:
/* RX buffer is good and fit into a XSK pool buffer */
buf->xdp->data_end = buf->xdp->data + buf1_len;
- xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(buf->xdp);
prog = READ_ONCE(priv->xdp_prog);
res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
@@ -5900,7 +5910,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
stmmac_set_rx_mode(dev);
}
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
netdev_update_features(dev);
return 0;
@@ -6631,7 +6641,7 @@ static u32 stmmac_vid_crc32_le(__le16 vid_le)
static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
{
u32 crc, hash = 0;
- __le16 pmatch = 0;
+ u16 pmatch = 0;
int count = 0;
u16 vid = 0;
@@ -6646,7 +6656,7 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
if (count > 2) /* VID = 0 always passes filter */
return -EOPNOTSUPP;
- pmatch = cpu_to_le16(vid);
+ pmatch = vid;
hash = 0;
}
@@ -7327,7 +7337,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret = 0, i;
- int max_speed;
if (netif_running(dev))
stmmac_release(dev);
@@ -7341,14 +7350,6 @@ int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
priv->rss.table[i] = ethtool_rxfh_indir_default(i,
rx_cnt);
- stmmac_mac_phylink_get_caps(priv);
-
- priv->phylink_config.mac_capabilities = priv->hw->link.caps;
-
- max_speed = priv->plat->max_speed;
- if (max_speed)
- phylink_limit_mac_speed(&priv->phylink_config, max_speed);
-
stmmac_napi_add(dev);
if (netif_running(dev))
@@ -7662,9 +7663,10 @@ int stmmac_dvr_probe(struct device *device,
#ifdef STMMAC_VLAN_TAG_USED
/* Both mac100 and gmac support receive VLAN tag detection */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
- ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- priv->hw->hw_vlan_en = true;
-
+ if (priv->plat->has_gmac4) {
+ ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ priv->hw->hw_vlan_en = true;
+ }
if (priv->dma_cap.vlhash) {
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
@@ -7689,8 +7691,6 @@ int stmmac_dvr_probe(struct device *device,
ndev->features |= NETIF_F_RXHASH;
ndev->vlan_features |= ndev->features;
- /* TSO doesn't work on VLANs yet */
- ndev->vlan_features &= ~NETIF_F_TSO;
/* MTU range: 46 - hw-specific max */
ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
@@ -7739,26 +7739,20 @@ int stmmac_dvr_probe(struct device *device,
if (!pm_runtime_enabled(device))
pm_runtime_enable(device);
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI) {
- /* MDIO bus Registration */
- ret = stmmac_mdio_register(ndev);
- if (ret < 0) {
- dev_err_probe(priv->device, ret,
- "%s: MDIO bus (id: %d) registration failed\n",
- __func__, priv->plat->bus_id);
- goto error_mdio_register;
- }
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0) {
+ dev_err_probe(priv->device, ret,
+ "MDIO bus (id: %d) registration failed\n",
+ priv->plat->bus_id);
+ goto error_mdio_register;
}
if (priv->plat->speed_mode_2500)
priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
- if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
- ret = stmmac_xpcs_setup(priv->mii);
- if (ret)
- goto error_xpcs_setup;
- }
+ ret = stmmac_pcs_setup(ndev);
+ if (ret)
+ goto error_pcs_setup;
ret = stmmac_phy_setup(priv);
if (ret) {
@@ -7789,11 +7783,10 @@ int stmmac_dvr_probe(struct device *device,
error_netdev_register:
phylink_destroy(priv->phylink);
-error_xpcs_setup:
error_phy_setup:
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI)
- stmmac_mdio_unregister(ndev);
+ stmmac_pcs_clean(ndev);
+error_pcs_setup:
+ stmmac_mdio_unregister(ndev);
error_mdio_register:
stmmac_napi_del(ndev);
error_hw_init:
@@ -7822,7 +7815,6 @@ void stmmac_dvr_remove(struct device *dev)
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
- netif_carrier_off(ndev);
unregister_netdev(ndev);
#ifdef CONFIG_DEBUG_FS
@@ -7832,9 +7824,10 @@ void stmmac_dvr_remove(struct device *dev)
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
reset_control_assert(priv->plat->stmmac_ahb_rst);
- if (priv->hw->pcs != STMMAC_PCS_TBI &&
- priv->hw->pcs != STMMAC_PCS_RTBI)
- stmmac_mdio_unregister(ndev);
+
+ stmmac_pcs_clean(ndev);
+ stmmac_mdio_unregister(ndev);
+
destroy_workqueue(priv->wq);
mutex_destroy(&priv->lock);
bitmap_free(priv->af_xdp_zc_qps);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 0542cfd1817e..03f90676b3ad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -495,34 +495,55 @@ int stmmac_mdio_reset(struct mii_bus *bus)
return 0;
}
-int stmmac_xpcs_setup(struct mii_bus *bus)
+int stmmac_pcs_setup(struct net_device *ndev)
{
- struct net_device *ndev = bus->priv;
+ struct fwnode_handle *devnode, *pcsnode;
+ struct dw_xpcs *xpcs = NULL;
struct stmmac_priv *priv;
- struct dw_xpcs *xpcs;
- int mode, addr;
+ int addr, mode, ret;
priv = netdev_priv(ndev);
mode = priv->plat->phy_interface;
-
- /* Try to probe the XPCS by scanning all addresses. */
- for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
- xpcs = xpcs_create_mdiodev(bus, addr, mode);
- if (IS_ERR(xpcs))
- continue;
-
- priv->hw->xpcs = xpcs;
- break;
+ devnode = priv->plat->port_node;
+
+ if (priv->plat->pcs_init) {
+ ret = priv->plat->pcs_init(priv);
+ } else if (fwnode_property_present(devnode, "pcs-handle")) {
+ pcsnode = fwnode_find_reference(devnode, "pcs-handle", 0);
+ xpcs = xpcs_create_fwnode(pcsnode, mode);
+ fwnode_handle_put(pcsnode);
+ ret = PTR_ERR_OR_ZERO(xpcs);
+ } else if (priv->plat->mdio_bus_data &&
+ priv->plat->mdio_bus_data->pcs_mask) {
+ addr = ffs(priv->plat->mdio_bus_data->pcs_mask) - 1;
+ xpcs = xpcs_create_mdiodev(priv->mii, addr, mode);
+ ret = PTR_ERR_OR_ZERO(xpcs);
+ } else {
+ return 0;
}
- if (!priv->hw->xpcs) {
- dev_warn(priv->device, "No xPCS found\n");
- return -ENODEV;
- }
+ if (ret)
+ return dev_err_probe(priv->device, ret, "No xPCS found\n");
+
+ priv->hw->xpcs = xpcs;
return 0;
}
+void stmmac_pcs_clean(struct net_device *ndev)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (priv->plat->pcs_exit)
+ priv->plat->pcs_exit(priv);
+
+ if (!priv->hw->xpcs)
+ return;
+
+ xpcs_destroy(priv->hw->xpcs);
+ priv->hw->xpcs = NULL;
+}
+
/**
* stmmac_mdio_register
* @ndev: net device structure
@@ -587,7 +608,7 @@ int stmmac_mdio_register(struct net_device *ndev)
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
new_bus->name, priv->plat->bus_id);
new_bus->priv = ndev;
- new_bus->phy_mask = mdio_bus_data->phy_mask;
+ new_bus->phy_mask = mdio_bus_data->phy_mask | mdio_bus_data->pcs_mask;
new_bus->parent = priv->device;
err = of_mdiobus_register(new_bus, mdio_node);
@@ -679,9 +700,6 @@ int stmmac_mdio_unregister(struct net_device *ndev)
if (!priv->mii)
return 0;
- if (priv->hw->xpcs)
- xpcs_destroy(priv->hw->xpcs);
-
mdiobus_unregister(priv->mii);
priv->mii->priv = NULL;
mdiobus_free(priv->mii);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
index 13a30e6df4c1..1bdf87b237c4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -75,23 +75,6 @@ static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg,
}
/**
- * dwmac_rane - To restart ANE
- * @ioaddr: IO registers pointer
- * @reg: Base address of the AN Control Register.
- * @restart: to restart ANE
- * Description: this is to just restart the Auto-Negotiation.
- */
-static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart)
-{
- u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
-
- if (restart)
- value |= GMAC_AN_CTRL_RAN;
-
- writel(value, ioaddr + GMAC_AN_CTRL(reg));
-}
-
-/**
* dwmac_ctrl_ane - To program the AN Control Register.
* @ioaddr: IO registers pointer
* @reg: Base address of the AN Control Register.
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 54797edc9b38..ad868e8d195d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -764,8 +764,8 @@ EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
* Description: Call the platform's init callback (if any) and propagate
* the return value.
*/
-int stmmac_pltfr_init(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
+static int stmmac_pltfr_init(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
{
int ret = 0;
@@ -774,7 +774,6 @@ int stmmac_pltfr_init(struct platform_device *pdev,
return ret;
}
-EXPORT_SYMBOL_GPL(stmmac_pltfr_init);
/**
* stmmac_pltfr_exit
@@ -782,13 +781,12 @@ EXPORT_SYMBOL_GPL(stmmac_pltfr_init);
* @plat: driver data platform structure
* Description: Call the platform's exit callback (if any).
*/
-void stmmac_pltfr_exit(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
+static void stmmac_pltfr_exit(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
{
if (plat->exit)
plat->exit(pdev, plat->bsp_priv);
}
-EXPORT_SYMBOL_GPL(stmmac_pltfr_exit);
/**
* stmmac_pltfr_probe
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
index bb6fc7e59aed..72dc1a32e46d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -17,11 +17,6 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
int stmmac_get_platform_resources(struct platform_device *pdev,
struct stmmac_resources *stmmac_res);
-int stmmac_pltfr_init(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat);
-void stmmac_pltfr_exit(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat);
-
int stmmac_pltfr_probe(struct platform_device *pdev,
struct plat_stmmacenet_data *plat,
struct stmmac_resources *res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index e04830a3a1fb..a6b1de9a251d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -68,13 +68,13 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
nsec = reminder;
/* If EST is enabled, disabled it before adjust ptp time. */
- if (priv->plat->est && priv->plat->est->enable) {
+ if (priv->est && priv->est->enable) {
est_rst = true;
- mutex_lock(&priv->plat->est->lock);
- priv->plat->est->enable = false;
- stmmac_est_configure(priv, priv, priv->plat->est,
+ mutex_lock(&priv->est_lock);
+ priv->est->enable = false;
+ stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
}
write_lock_irqsave(&priv->ptp_lock, flags);
@@ -87,24 +87,24 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
ktime_t current_time_ns, basetime;
u64 cycle_time;
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
- time.tv_nsec = priv->plat->est->btr_reserve[0];
- time.tv_sec = priv->plat->est->btr_reserve[1];
+ time.tv_nsec = priv->est->btr_reserve[0];
+ time.tv_sec = priv->est->btr_reserve[1];
basetime = timespec64_to_ktime(time);
- cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
- priv->plat->est->ctr[0];
+ cycle_time = (u64)priv->est->ctr[1] * NSEC_PER_SEC +
+ priv->est->ctr[0];
time = stmmac_calc_tas_basetime(basetime,
current_time_ns,
cycle_time);
- priv->plat->est->btr[0] = (u32)time.tv_nsec;
- priv->plat->est->btr[1] = (u32)time.tv_sec;
- priv->plat->est->enable = true;
- ret = stmmac_est_configure(priv, priv, priv->plat->est,
+ priv->est->btr[0] = (u32)time.tv_nsec;
+ priv->est->btr[1] = (u32)time.tv_sec;
+ priv->est->enable = true;
+ ret = stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
if (ret)
netdev_err(priv->dev, "failed to configure EST\n");
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index cce00719937d..996f2bcd07a2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -343,10 +343,11 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
struct tc_cbs_qopt_offload *qopt)
{
u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ s64 port_transmit_rate_kbps;
u32 queue = qopt->queue;
- u32 ptr, speed_div;
u32 mode_to_use;
u64 value;
+ u32 ptr;
int ret;
/* Queue 0 is not AVB capable */
@@ -355,30 +356,30 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
if (!priv->dma_cap.av)
return -EOPNOTSUPP;
- /* Port Transmit Rate and Speed Divider */
- switch (priv->speed) {
- case SPEED_10000:
- ptr = 32;
- speed_div = 10000000;
- break;
- case SPEED_5000:
- ptr = 32;
- speed_div = 5000000;
- break;
- case SPEED_2500:
- ptr = 8;
- speed_div = 2500000;
- break;
- case SPEED_1000:
- ptr = 8;
- speed_div = 1000000;
- break;
- case SPEED_100:
- ptr = 4;
- speed_div = 100000;
- break;
- default:
- return -EOPNOTSUPP;
+ port_transmit_rate_kbps = qopt->idleslope - qopt->sendslope;
+
+ if (qopt->enable) {
+ /* Port Transmit Rate and Speed Divider */
+ switch (div_s64(port_transmit_rate_kbps, 1000)) {
+ case SPEED_10000:
+ case SPEED_5000:
+ ptr = 32;
+ break;
+ case SPEED_2500:
+ case SPEED_1000:
+ ptr = 8;
+ break;
+ case SPEED_100:
+ ptr = 4;
+ break;
+ default:
+ netdev_err(priv->dev,
+ "Invalid portTransmitRate %lld (idleSlope - sendSlope)\n",
+ port_transmit_rate_kbps);
+ return -EINVAL;
+ }
+ } else {
+ ptr = 0;
}
mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
@@ -398,10 +399,10 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
}
/* Final adjustments for HW */
- value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ value = div_s64(qopt->idleslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
- value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
+ value = div_s64(-qopt->sendslope * 1024ll * ptr, port_transmit_rate_kbps);
priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
value = qopt->hicredit * 1024ll * 8;
@@ -918,7 +919,6 @@ struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
- struct plat_stmmacenet_data *plat = priv->plat;
u32 num_tc = qopt->mqprio.qopt.num_tc;
u32 offset, count, i, j;
@@ -933,7 +933,7 @@ static void tc_taprio_map_maxsdu_txq(struct stmmac_priv *priv,
count = qopt->mqprio.qopt.count[i];
for (j = offset; j < offset + count; j++)
- plat->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
+ priv->est->max_sdu[j] = qopt->max_sdu[i] + ETH_HLEN - ETH_TLEN;
}
}
@@ -941,7 +941,6 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
- struct plat_stmmacenet_data *plat = priv->plat;
struct timespec64 time, current_time, qopt_time;
ktime_t current_time_ns;
bool fpe = false;
@@ -998,23 +997,25 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
if (qopt->cycle_time_extension >= BIT(wid + 7))
return -ERANGE;
- if (!plat->est) {
- plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
+ if (!priv->est) {
+ priv->est = devm_kzalloc(priv->device, sizeof(*priv->est),
GFP_KERNEL);
- if (!plat->est)
+ if (!priv->est)
return -ENOMEM;
- mutex_init(&priv->plat->est->lock);
+ mutex_init(&priv->est_lock);
} else {
- memset(plat->est, 0, sizeof(*plat->est));
+ mutex_lock(&priv->est_lock);
+ memset(priv->est, 0, sizeof(*priv->est));
+ mutex_unlock(&priv->est_lock);
}
size = qopt->num_entries;
- mutex_lock(&priv->plat->est->lock);
- priv->plat->est->gcl_size = size;
- priv->plat->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
- mutex_unlock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
+ priv->est->gcl_size = size;
+ priv->est->enable = qopt->cmd == TAPRIO_CMD_REPLACE;
+ mutex_unlock(&priv->est_lock);
for (i = 0; i < size; i++) {
s64 delta_ns = qopt->entries[i].interval;
@@ -1042,33 +1043,33 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
return -EOPNOTSUPP;
}
- priv->plat->est->gcl[i] = delta_ns | (gates << wid);
+ priv->est->gcl[i] = delta_ns | (gates << wid);
}
- mutex_lock(&priv->plat->est->lock);
+ mutex_lock(&priv->est_lock);
/* Adjust for real system time */
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
qopt->cycle_time);
- priv->plat->est->btr[0] = (u32)time.tv_nsec;
- priv->plat->est->btr[1] = (u32)time.tv_sec;
+ priv->est->btr[0] = (u32)time.tv_nsec;
+ priv->est->btr[1] = (u32)time.tv_sec;
qopt_time = ktime_to_timespec64(qopt->base_time);
- priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
- priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
+ priv->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
+ priv->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
ctr = qopt->cycle_time;
- priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
- priv->plat->est->ctr[1] = (u32)ctr;
+ priv->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
+ priv->est->ctr[1] = (u32)ctr;
- priv->plat->est->ter = qopt->cycle_time_extension;
+ priv->est->ter = qopt->cycle_time_extension;
tc_taprio_map_maxsdu_txq(priv, qopt);
if (fpe && !priv->dma_cap.fpesel) {
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
return -EOPNOTSUPP;
}
@@ -1077,9 +1078,9 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
*/
priv->plat->fpe_cfg->enable = fpe;
- ret = stmmac_est_configure(priv, priv, priv->plat->est,
+ ret = stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
if (ret) {
netdev_err(priv->dev, "failed to configure EST\n");
goto disable;
@@ -1095,17 +1096,17 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
return 0;
disable:
- if (priv->plat->est) {
- mutex_lock(&priv->plat->est->lock);
- priv->plat->est->enable = false;
- stmmac_est_configure(priv, priv, priv->plat->est,
+ if (priv->est) {
+ mutex_lock(&priv->est_lock);
+ priv->est->enable = false;
+ stmmac_est_configure(priv, priv, priv->est,
priv->plat->clk_ptp_rate);
/* Reset taprio status */
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
priv->xstats.max_sdu_txq_drop[i] = 0;
priv->xstats.mtl_est_txq_hlbf[i] = 0;
}
- mutex_unlock(&priv->plat->est->lock);
+ mutex_unlock(&priv->est_lock);
}
priv->plat->fpe_cfg->enable = false;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index bfb903506367..b8948d5b779a 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -73,6 +73,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/skbuff_ref.h>
#include <linux/ethtool.h>
#include <linux/crc32.h>
#include <linux/random.h>
@@ -3803,7 +3804,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
{
struct cas *cp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!netif_running(dev) || !netif_device_present(dev))
return 0;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index f68aa813d4fb..41a27ae58ced 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6751,7 +6751,7 @@ static int niu_change_mtu(struct net_device *dev, int new_mtu)
orig_jumbo = (dev->mtu > ETH_DATA_LEN);
new_jumbo = (new_mtu > ETH_DATA_LEN);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (!netif_running(dev) ||
(orig_jumbo == new_jumbo))
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 9bd1df8308d2..3e5f9b17c777 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -949,17 +949,6 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void gem_poll_controller(struct net_device *dev)
-{
- struct gem *gp = netdev_priv(dev);
-
- disable_irq(gp->pdev->irq);
- gem_interrupt(gp->pdev->irq, dev);
- enable_irq(gp->pdev->irq);
-}
-#endif
-
static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gem *gp = netdev_priv(dev);
@@ -2499,7 +2488,7 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
{
struct gem *gp = netdev_priv(dev);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
/* We'll just catch it later when the device is up'd or resumed */
if (!netif_running(dev) || !netif_device_present(dev))
@@ -2839,9 +2828,6 @@ static const struct net_device_ops gem_netdev_ops = {
.ndo_change_mtu = gem_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = gem_set_mac_address,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = gem_poll_controller,
-#endif
};
static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index f8e133604146..131786aa4d5b 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -21,8 +21,6 @@
#include "dwc-xlgmac.h"
#include "dwc-xlgmac-reg.h"
-MODULE_LICENSE("Dual BSD/GPL");
-
static int debug = -1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "DWC ethernet debug level (0=none,...,16=all)");
@@ -725,3 +723,8 @@ void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata)
XLGMAC_PR("=====================================================\n");
XLGMAC_PR("\n");
}
+
+MODULE_DESCRIPTION(XLGMAC_DRV_DESC);
+MODULE_VERSION(XLGMAC_DRV_VERSION);
+MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 36b948820c1e..d1793b6154c7 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -823,7 +823,7 @@ static int xlgmac_change_mtu(struct net_device *netdev, int mtu)
return ret;
pdata->rx_buf_size = ret;
- netdev->mtu = mtu;
+ WRITE_ONCE(netdev->mtu, mtu);
xlgmac_restart_dev(pdata);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
index fa8604d7b797..36fe538e3332 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c
@@ -71,8 +71,3 @@ static struct pci_driver xlgmac_pci_driver = {
};
module_pci_driver(xlgmac_pci_driver);
-
-MODULE_DESCRIPTION(XLGMAC_DRV_DESC);
-MODULE_VERSION(XLGMAC_DRV_VERSION);
-MODULE_AUTHOR("Jie Deng <jiedeng@synopsys.com>");
-MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/tehuti/Kconfig b/drivers/net/ethernet/tehuti/Kconfig
index 8735633765a1..6db2c9817445 100644
--- a/drivers/net/ethernet/tehuti/Kconfig
+++ b/drivers/net/ethernet/tehuti/Kconfig
@@ -23,4 +23,19 @@ config TEHUTI
help
Tehuti Networks 10G Ethernet NIC
+config TEHUTI_TN40
+ tristate "Tehuti Networks TN40xx 10G Ethernet adapters"
+ depends on PCI
+ select PAGE_POOL
+ select FW_LOADER
+ select PHYLINK
+ help
+ This driver supports 10G Ethernet adapters using Tehuti Networks
+ TN40xx chips. Currently, adapters with Applied Micro Circuits
+ Corporation QT2025 are supported; Tehuti Networks TN9310,
+ DLink DXE-810S, ASUS XG-C100F, and Edimax EN-9320.
+
+ To compile this driver as a module, choose M here: the module
+ will be called tn40xx.
+
endif # NET_VENDOR_TEHUTI
diff --git a/drivers/net/ethernet/tehuti/Makefile b/drivers/net/ethernet/tehuti/Makefile
index 13a0ddd62088..0d4f4d63a65c 100644
--- a/drivers/net/ethernet/tehuti/Makefile
+++ b/drivers/net/ethernet/tehuti/Makefile
@@ -4,3 +4,6 @@
#
obj-$(CONFIG_TEHUTI) += tehuti.o
+
+tn40xx-y := tn40.o tn40_mdio.o tn40_phy.o
+obj-$(CONFIG_TEHUTI_TN40) += tn40xx.o
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index ca409515ead5..ede5f7890fb4 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -756,7 +756,7 @@ static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
{
ENTER;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
if (netif_running(ndev)) {
bdx_close(ndev);
bdx_open(ndev);
diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c
new file mode 100644
index 000000000000..259bdac24cf2
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40.c
@@ -0,0 +1,1850 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#include <linux/bitfield.h>
+#include <linux/ethtool.h>
+#include <linux/firmware.h>
+#include <linux/if_vlan.h>
+#include <linux/iopoll.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/phylink.h>
+#include <linux/vmalloc.h>
+#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
+
+#include "tn40.h"
+
+#define TN40_SHORT_PACKET_SIZE 60
+#define TN40_FIRMWARE_NAME "tehuti/bdx.bin"
+
+static void tn40_enable_interrupts(struct tn40_priv *priv)
+{
+ tn40_write_reg(priv, TN40_REG_IMR, priv->isr_mask);
+}
+
+static void tn40_disable_interrupts(struct tn40_priv *priv)
+{
+ tn40_write_reg(priv, TN40_REG_IMR, 0);
+}
+
+static int tn40_fifo_alloc(struct tn40_priv *priv, struct tn40_fifo *f,
+ int fsz_type,
+ u16 reg_cfg0, u16 reg_cfg1,
+ u16 reg_rptr, u16 reg_wptr)
+{
+ u16 memsz = TN40_FIFO_SIZE * (1 << fsz_type);
+ u64 cfg_base;
+
+ memset(f, 0, sizeof(struct tn40_fifo));
+ /* 1K extra space is allocated at the end of the fifo to simplify
+ * processing of descriptors that wraps around fifo's end.
+ */
+ f->va = dma_alloc_coherent(&priv->pdev->dev,
+ memsz + TN40_FIFO_EXTRA_SPACE, &f->da,
+ GFP_KERNEL);
+ if (!f->va)
+ return -ENOMEM;
+
+ f->reg_cfg0 = reg_cfg0;
+ f->reg_cfg1 = reg_cfg1;
+ f->reg_rptr = reg_rptr;
+ f->reg_wptr = reg_wptr;
+ f->rptr = 0;
+ f->wptr = 0;
+ f->memsz = memsz;
+ f->size_mask = memsz - 1;
+ cfg_base = lower_32_bits((f->da & TN40_TX_RX_CFG0_BASE) | fsz_type);
+ tn40_write_reg(priv, reg_cfg0, cfg_base);
+ tn40_write_reg(priv, reg_cfg1, upper_32_bits(f->da));
+ return 0;
+}
+
+static void tn40_fifo_free(struct tn40_priv *priv, struct tn40_fifo *f)
+{
+ dma_free_coherent(&priv->pdev->dev,
+ f->memsz + TN40_FIFO_EXTRA_SPACE, f->va, f->da);
+}
+
+static struct tn40_rxdb *tn40_rxdb_alloc(int nelem)
+{
+ size_t size = sizeof(struct tn40_rxdb) + (nelem * sizeof(int)) +
+ (nelem * sizeof(struct tn40_rx_map));
+ struct tn40_rxdb *db;
+ int i;
+
+ db = vzalloc(size);
+ if (db) {
+ db->stack = (int *)(db + 1);
+ db->elems = (void *)(db->stack + nelem);
+ db->nelem = nelem;
+ db->top = nelem;
+ /* make the first alloc close to db struct */
+ for (i = 0; i < nelem; i++)
+ db->stack[i] = nelem - i - 1;
+ }
+ return db;
+}
+
+static void tn40_rxdb_free(struct tn40_rxdb *db)
+{
+ vfree(db);
+}
+
+static int tn40_rxdb_alloc_elem(struct tn40_rxdb *db)
+{
+ return db->stack[--db->top];
+}
+
+static void *tn40_rxdb_addr_elem(struct tn40_rxdb *db, unsigned int n)
+{
+ return db->elems + n;
+}
+
+static int tn40_rxdb_available(struct tn40_rxdb *db)
+{
+ return db->top;
+}
+
+static void tn40_rxdb_free_elem(struct tn40_rxdb *db, unsigned int n)
+{
+ db->stack[db->top++] = n;
+}
+
+/**
+ * tn40_create_rx_ring - Initialize RX all related HW and SW resources
+ * @priv: NIC private structure
+ *
+ * create_rx_ring creates rxf and rxd fifos, updates the relevant HW registers,
+ * preallocates skbs for rx. It assumes that Rx is disabled in HW funcs are
+ * grouped for better cache usage
+ *
+ * RxD fifo is smaller then RxF fifo by design. Upon high load, RxD will be
+ * filled and packets will be dropped by the NIC without getting into the host
+ * or generating interrupts. In this situation the host has no chance of
+ * processing all the packets. Dropping packets by the NIC is cheaper, since it
+ * takes 0 CPU cycles.
+ *
+ * Return: 0 on success and negative value on error.
+ */
+static int tn40_create_rx_ring(struct tn40_priv *priv)
+{
+ struct page_pool_params pp = {
+ .dev = &priv->pdev->dev,
+ .napi = &priv->napi,
+ .dma_dir = DMA_FROM_DEVICE,
+ .netdev = priv->ndev,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .max_len = PAGE_SIZE,
+ };
+ int ret, pkt_size, nr;
+
+ priv->page_pool = page_pool_create(&pp);
+ if (IS_ERR(priv->page_pool))
+ return PTR_ERR(priv->page_pool);
+
+ ret = tn40_fifo_alloc(priv, &priv->rxd_fifo0.m, priv->rxd_size,
+ TN40_REG_RXD_CFG0_0, TN40_REG_RXD_CFG1_0,
+ TN40_REG_RXD_RPTR_0, TN40_REG_RXD_WPTR_0);
+ if (ret)
+ goto err_destroy_page_pool;
+
+ ret = tn40_fifo_alloc(priv, &priv->rxf_fifo0.m, priv->rxf_size,
+ TN40_REG_RXF_CFG0_0, TN40_REG_RXF_CFG1_0,
+ TN40_REG_RXF_RPTR_0, TN40_REG_RXF_WPTR_0);
+ if (ret)
+ goto err_free_rxd;
+
+ pkt_size = priv->ndev->mtu + VLAN_ETH_HLEN;
+ priv->rxf_fifo0.m.pktsz = pkt_size;
+ nr = priv->rxf_fifo0.m.memsz / sizeof(struct tn40_rxf_desc);
+ priv->rxdb0 = tn40_rxdb_alloc(nr);
+ if (!priv->rxdb0) {
+ ret = -ENOMEM;
+ goto err_free_rxf;
+ }
+ return 0;
+err_free_rxf:
+ tn40_fifo_free(priv, &priv->rxf_fifo0.m);
+err_free_rxd:
+ tn40_fifo_free(priv, &priv->rxd_fifo0.m);
+err_destroy_page_pool:
+ page_pool_destroy(priv->page_pool);
+ return ret;
+}
+
+static void tn40_rx_free_buffers(struct tn40_priv *priv)
+{
+ struct tn40_rxdb *db = priv->rxdb0;
+ struct tn40_rx_map *dm;
+ u16 i;
+
+ netdev_dbg(priv->ndev, "total =%d free =%d busy =%d\n", db->nelem,
+ tn40_rxdb_available(db),
+ db->nelem - tn40_rxdb_available(db));
+
+ for (i = 0; i < db->nelem; i++) {
+ dm = tn40_rxdb_addr_elem(db, i);
+ if (dm->page)
+ page_pool_put_full_page(priv->page_pool, dm->page,
+ false);
+ }
+}
+
+static void tn40_destroy_rx_ring(struct tn40_priv *priv)
+{
+ if (priv->rxdb0) {
+ tn40_rx_free_buffers(priv);
+ tn40_rxdb_free(priv->rxdb0);
+ priv->rxdb0 = NULL;
+ }
+ tn40_fifo_free(priv, &priv->rxf_fifo0.m);
+ tn40_fifo_free(priv, &priv->rxd_fifo0.m);
+ page_pool_destroy(priv->page_pool);
+}
+
+static void tn40_set_rx_desc(struct tn40_priv *priv, int idx, u64 dma)
+{
+ struct tn40_rxf_fifo *f = &priv->rxf_fifo0;
+ struct tn40_rxf_desc *rxfd;
+ int delta;
+
+ rxfd = (struct tn40_rxf_desc *)(f->m.va + f->m.wptr);
+ rxfd->info = cpu_to_le32(0x10003); /* INFO =1 BC =3 */
+ rxfd->va_lo = cpu_to_le32(idx);
+ rxfd->pa_lo = cpu_to_le32(lower_32_bits(dma));
+ rxfd->pa_hi = cpu_to_le32(upper_32_bits(dma));
+ rxfd->len = cpu_to_le32(f->m.pktsz);
+ f->m.wptr += sizeof(struct tn40_rxf_desc);
+ delta = f->m.wptr - f->m.memsz;
+ if (unlikely(delta >= 0)) {
+ f->m.wptr = delta;
+ if (delta > 0) {
+ memcpy(f->m.va, f->m.va + f->m.memsz, delta);
+ netdev_dbg(priv->ndev,
+ "wrapped rxd descriptor\n");
+ }
+ }
+}
+
+/**
+ * tn40_rx_alloc_buffers - Fill rxf fifo with buffers.
+ *
+ * @priv: NIC's private structure
+ *
+ * rx_alloc_buffers allocates buffers via the page pool API, builds rxf descs
+ * and pushes them (rxf descr) into the rxf fifo. The pages are stored in rxdb.
+ * To calculate the free space, we uses the cached values of RPTR and WPTR
+ * when needed. This function also updates RPTR and WPTR.
+ */
+static void tn40_rx_alloc_buffers(struct tn40_priv *priv)
+{
+ struct tn40_rxf_fifo *f = &priv->rxf_fifo0;
+ struct tn40_rxdb *db = priv->rxdb0;
+ struct tn40_rx_map *dm;
+ struct page *page;
+ int dno, i, idx;
+
+ dno = tn40_rxdb_available(db) - 1;
+ for (i = dno; i > 0; i--) {
+ page = page_pool_dev_alloc_pages(priv->page_pool);
+ if (!page)
+ break;
+
+ idx = tn40_rxdb_alloc_elem(db);
+ tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(page));
+ dm = tn40_rxdb_addr_elem(db, idx);
+ dm->page = page;
+ }
+ if (i != dno)
+ tn40_write_reg(priv, f->m.reg_wptr,
+ f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+ netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr 0x%x\n",
+ f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+ netdev_dbg(priv->ndev, "read_reg 0x%04x f->m.reg_rptr=0x%x\n",
+ f->m.reg_rptr, tn40_read_reg(priv, f->m.reg_rptr));
+ netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr=0x%x\n",
+ f->m.reg_wptr, tn40_read_reg(priv, f->m.reg_wptr));
+}
+
+static void tn40_recycle_rx_buffer(struct tn40_priv *priv,
+ struct tn40_rxd_desc *rxdd)
+{
+ struct tn40_rxf_fifo *f = &priv->rxf_fifo0;
+ struct tn40_rx_map *dm;
+ int idx;
+
+ idx = le32_to_cpu(rxdd->va_lo);
+ dm = tn40_rxdb_addr_elem(priv->rxdb0, idx);
+ tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(dm->page));
+
+ tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+}
+
+static int tn40_rx_receive(struct tn40_priv *priv, int budget)
+{
+ struct tn40_rxd_fifo *f = &priv->rxd_fifo0;
+ u32 rxd_val1, rxd_err, pkt_id;
+ int tmp_len, size, done = 0;
+ struct tn40_rxdb *db = NULL;
+ struct tn40_rxd_desc *rxdd;
+ struct tn40_rx_map *dm;
+ struct sk_buff *skb;
+ u16 len, rxd_vlan;
+ int idx;
+
+ f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_WR_PTR;
+ size = f->m.wptr - f->m.rptr;
+ if (size < 0)
+ size += f->m.memsz; /* Size is negative :-) */
+
+ while (size > 0) {
+ rxdd = (struct tn40_rxd_desc *)(f->m.va + f->m.rptr);
+ db = priv->rxdb0;
+
+ /* We have a chicken and egg problem here. If the
+ * descriptor is wrapped we first need to copy the tail
+ * of the descriptor to the end of the buffer before
+ * extracting values from the descriptor. However in
+ * order to know if the descriptor is wrapped we need to
+ * obtain the length of the descriptor from (the
+ * wrapped) descriptor. Luckily the length is the first
+ * word of the descriptor. Descriptor lengths are
+ * multiples of 8 bytes so in case of a wrapped
+ * descriptor the first 8 bytes guaranteed to appear
+ * before the end of the buffer. We first obtain the
+ * length, we then copy the rest of the descriptor if
+ * needed and then extract the rest of the values from
+ * the descriptor.
+ *
+ * Do not change the order of operations as it will
+ * break the code!!!
+ */
+ rxd_val1 = le32_to_cpu(rxdd->rxd_val1);
+ tmp_len = TN40_GET_RXD_BC(rxd_val1) << 3;
+ pkt_id = TN40_GET_RXD_PKT_ID(rxd_val1);
+ size -= tmp_len;
+ /* CHECK FOR A PARTIALLY ARRIVED DESCRIPTOR */
+ if (size < 0) {
+ netdev_dbg(priv->ndev,
+ "%s partially arrived desc tmp_len %d\n",
+ __func__, tmp_len);
+ break;
+ }
+ /* make sure that the descriptor fully is arrived
+ * before reading the rest of the descriptor.
+ */
+ rmb();
+
+ /* A special treatment is given to non-contiguous
+ * descriptors that start near the end, wraps around
+ * and continue at the beginning. The second part is
+ * copied right after the first, and then descriptor
+ * is interpreted as normal. The fifo has an extra
+ * space to allow such operations.
+ */
+
+ /* HAVE WE REACHED THE END OF THE QUEUE? */
+ f->m.rptr += tmp_len;
+ tmp_len = f->m.rptr - f->m.memsz;
+ if (unlikely(tmp_len >= 0)) {
+ f->m.rptr = tmp_len;
+ if (tmp_len > 0) {
+ /* COPY PARTIAL DESCRIPTOR
+ * TO THE END OF THE QUEUE
+ */
+ netdev_dbg(priv->ndev,
+ "wrapped desc rptr=%d tmp_len=%d\n",
+ f->m.rptr, tmp_len);
+ memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len);
+ }
+ }
+ idx = le32_to_cpu(rxdd->va_lo);
+ dm = tn40_rxdb_addr_elem(db, idx);
+ prefetch(dm);
+
+ len = le16_to_cpu(rxdd->len);
+ rxd_vlan = le16_to_cpu(rxdd->rxd_vlan);
+ /* CHECK FOR ERRORS */
+ rxd_err = TN40_GET_RXD_ERR(rxd_val1);
+ if (unlikely(rxd_err)) {
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.rx_errors++;
+ u64_stats_update_end(&priv->syncp);
+ tn40_recycle_rx_buffer(priv, rxdd);
+ continue;
+ }
+
+ skb = napi_build_skb(page_address(dm->page), PAGE_SIZE);
+ if (!skb) {
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.rx_dropped++;
+ priv->alloc_fail++;
+ u64_stats_update_end(&priv->syncp);
+ tn40_recycle_rx_buffer(priv, rxdd);
+ break;
+ }
+ skb_mark_for_recycle(skb);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, priv->ndev);
+ skb->ip_summed =
+ (pkt_id == 0) ? CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
+ if (TN40_GET_RXD_VTAG(rxd_val1))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ TN40_GET_RXD_VLAN_TCI(rxd_vlan));
+
+ dm->page = NULL;
+ tn40_rxdb_free_elem(db, idx);
+
+ napi_gro_receive(&priv->napi, skb);
+
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.rx_bytes += len;
+ u64_stats_update_end(&priv->syncp);
+
+ if (unlikely(++done >= budget))
+ break;
+ }
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.rx_packets += done;
+ u64_stats_update_end(&priv->syncp);
+ /* FIXME: Do something to minimize pci accesses */
+ tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR);
+ tn40_rx_alloc_buffers(priv);
+ return done;
+}
+
+/* TX HW/SW interaction overview
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * There are 2 types of TX communication channels between driver and NIC.
+ * 1) TX Free Fifo - TXF - Holds ack descriptors for sent packets.
+ * 2) TX Data Fifo - TXD - Holds descriptors of full buffers.
+ *
+ * Currently the NIC supports TSO, checksumming and gather DMA
+ * UFO and IP fragmentation is on the way.
+ *
+ * RX SW Data Structures
+ * ~~~~~~~~~~~~~~~~~~~~~
+ * TXDB is used to keep track of all skbs owned by SW and their DMA addresses.
+ * For TX case, ownership lasts from getting the packet via hard_xmit and
+ * until the HW acknowledges sending the packet by TXF descriptors.
+ * TXDB is implemented as a cyclic buffer.
+ *
+ * FIFO objects keep info about the fifo's size and location, relevant HW
+ * registers, usage and skb db. Each RXD and RXF fifo has their own fifo
+ * structure. Implemented as simple struct.
+ *
+ * TX SW Execution Flow
+ * ~~~~~~~~~~~~~~~~~~~~
+ * OS calls the driver's hard_xmit method with a packet to send. The driver
+ * creates DMA mappings, builds TXD descriptors and kicks the HW by updating
+ * TXD WPTR.
+ *
+ * When a packet is sent, The HW write a TXF descriptor and the SW
+ * frees the original skb. To prevent TXD fifo overflow without
+ * reading HW registers every time, the SW deploys "tx level"
+ * technique. Upon startup, the tx level is initialized to TXD fifo
+ * length. For every sent packet, the SW gets its TXD descriptor size
+ * (from a pre-calculated array) and subtracts it from tx level. The
+ * size is also stored in txdb. When a TXF ack arrives, the SW fetched
+ * the size of the original TXD descriptor from the txdb and adds it
+ * to the tx level. When the Tx level drops below some predefined
+ * threshold, the driver stops the TX queue. When the TX level rises
+ * above that level, the tx queue is enabled again.
+ *
+ * This technique avoids excessive reading of RPTR and WPTR registers.
+ * As our benchmarks shows, it adds 1.5 Gbit/sec to NIC's throughput.
+ */
+static void tn40_do_tx_db_ptr_next(struct tn40_txdb *db,
+ struct tn40_tx_map **pptr)
+{
+ ++*pptr;
+ if (unlikely(*pptr == db->end))
+ *pptr = db->start;
+}
+
+static void tn40_tx_db_inc_rptr(struct tn40_txdb *db)
+{
+ tn40_do_tx_db_ptr_next(db, &db->rptr);
+}
+
+static void tn40_tx_db_inc_wptr(struct tn40_txdb *db)
+{
+ tn40_do_tx_db_ptr_next(db, &db->wptr);
+}
+
+static int tn40_tx_db_init(struct tn40_txdb *d, int sz_type)
+{
+ int memsz = TN40_FIFO_SIZE * (1 << (sz_type + 1));
+
+ d->start = vzalloc(memsz);
+ if (!d->start)
+ return -ENOMEM;
+ /* In order to differentiate between an empty db state and a full db
+ * state at least one element should always be empty in order to
+ * avoid rptr == wptr, which means that the db is empty.
+ */
+ d->size = memsz / sizeof(struct tn40_tx_map) - 1;
+ d->end = d->start + d->size + 1; /* just after last element */
+
+ /* All dbs are created empty */
+ d->rptr = d->start;
+ d->wptr = d->start;
+ return 0;
+}
+
+static void tn40_tx_db_close(struct tn40_txdb *d)
+{
+ if (d->start) {
+ vfree(d->start);
+ d->start = NULL;
+ }
+}
+
+/* Sizes of tx desc (including padding if needed) as function of the SKB's
+ * frag number
+ * 7 - is number of lwords in txd with one phys buffer
+ * 3 - is number of lwords used for every additional phys buffer
+ * for (i = 0; i < TN40_MAX_PBL; i++) {
+ * lwords = 7 + (i * 3);
+ * if (lwords & 1)
+ * lwords++; pad it with 1 lword
+ * tn40_txd_sizes[i].bytes = lwords << 2;
+ * tn40_txd_sizes[i].qwords = lwords >> 1;
+ * }
+ */
+static struct {
+ u16 bytes;
+ u16 qwords; /* qword = 64 bit */
+} tn40_txd_sizes[] = {
+ {0x20, 0x04},
+ {0x28, 0x05},
+ {0x38, 0x07},
+ {0x40, 0x08},
+ {0x50, 0x0a},
+ {0x58, 0x0b},
+ {0x68, 0x0d},
+ {0x70, 0x0e},
+ {0x80, 0x10},
+ {0x88, 0x11},
+ {0x98, 0x13},
+ {0xa0, 0x14},
+ {0xb0, 0x16},
+ {0xb8, 0x17},
+ {0xc8, 0x19},
+ {0xd0, 0x1a},
+ {0xe0, 0x1c},
+ {0xe8, 0x1d},
+ {0xf8, 0x1f},
+};
+
+static void tn40_pbl_set(struct tn40_pbl *pbl, dma_addr_t dma, int len)
+{
+ pbl->len = cpu_to_le32(len);
+ pbl->pa_lo = cpu_to_le32(lower_32_bits(dma));
+ pbl->pa_hi = cpu_to_le32(upper_32_bits(dma));
+}
+
+static void tn40_txdb_set(struct tn40_txdb *db, dma_addr_t dma, int len)
+{
+ db->wptr->len = len;
+ db->wptr->addr.dma = dma;
+}
+
+struct tn40_mapping_info {
+ dma_addr_t dma;
+ size_t size;
+};
+
+/**
+ * tn40_tx_map_skb - create and store DMA mappings for skb's data blocks
+ * @priv: NIC private structure
+ * @skb: socket buffer to map
+ * @txdd: pointer to tx descriptor to be updated
+ * @pkt_len: pointer to unsigned long value
+ *
+ * This function creates DMA mappings for skb's data blocks and writes them to
+ * PBL of a new tx descriptor. It also stores them in the tx db, so they could
+ * be unmapped after the data has been sent. It is the responsibility of the
+ * caller to make sure that there is enough space in the txdb. The last
+ * element holds a pointer to skb itself and is marked with a zero length.
+ *
+ * Return: 0 on success and negative value on error.
+ */
+static int tn40_tx_map_skb(struct tn40_priv *priv, struct sk_buff *skb,
+ struct tn40_txd_desc *txdd, unsigned int *pkt_len)
+{
+ struct tn40_mapping_info info[TN40_MAX_PBL];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct tn40_pbl *pbl = &txdd->pbl[0];
+ struct tn40_txdb *db = &priv->txdb;
+ unsigned int size;
+ int i, len, ret;
+ dma_addr_t dma;
+
+ netdev_dbg(priv->ndev, "TX skb %p skbLen %d dataLen %d frags %d\n", skb,
+ skb->len, skb->data_len, nr_frags);
+ if (nr_frags > TN40_MAX_PBL - 1) {
+ ret = skb_linearize(skb);
+ if (ret)
+ return ret;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ }
+ /* initial skb */
+ len = skb->len - skb->data_len;
+ dma = dma_map_single(&priv->pdev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ ret = dma_mapping_error(&priv->pdev->dev, dma);
+ if (ret)
+ return ret;
+
+ tn40_txdb_set(db, dma, len);
+ tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len);
+ *pkt_len = db->wptr->len;
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ size = skb_frag_size(frag);
+ dma = skb_frag_dma_map(&priv->pdev->dev, frag, 0,
+ size, DMA_TO_DEVICE);
+
+ ret = dma_mapping_error(&priv->pdev->dev, dma);
+ if (ret)
+ goto mapping_error;
+ info[i].dma = dma;
+ info[i].size = size;
+ }
+
+ for (i = 0; i < nr_frags; i++) {
+ tn40_tx_db_inc_wptr(db);
+ tn40_txdb_set(db, info[i].dma, info[i].size);
+ tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len);
+ *pkt_len += db->wptr->len;
+ }
+
+ /* SHORT_PKT_FIX */
+ if (skb->len < TN40_SHORT_PACKET_SIZE)
+ ++nr_frags;
+
+ /* Add skb clean up info. */
+ tn40_tx_db_inc_wptr(db);
+ db->wptr->len = -tn40_txd_sizes[nr_frags].bytes;
+ db->wptr->addr.skb = skb;
+ tn40_tx_db_inc_wptr(db);
+
+ return 0;
+ mapping_error:
+ dma_unmap_page(&priv->pdev->dev, db->wptr->addr.dma, db->wptr->len,
+ DMA_TO_DEVICE);
+ for (; i > 0; i--)
+ dma_unmap_page(&priv->pdev->dev, info[i - 1].dma,
+ info[i - 1].size, DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+
+static int tn40_create_tx_ring(struct tn40_priv *priv)
+{
+ int ret;
+
+ ret = tn40_fifo_alloc(priv, &priv->txd_fifo0.m, priv->txd_size,
+ TN40_REG_TXD_CFG0_0, TN40_REG_TXD_CFG1_0,
+ TN40_REG_TXD_RPTR_0, TN40_REG_TXD_WPTR_0);
+ if (ret)
+ return ret;
+
+ ret = tn40_fifo_alloc(priv, &priv->txf_fifo0.m, priv->txf_size,
+ TN40_REG_TXF_CFG0_0, TN40_REG_TXF_CFG1_0,
+ TN40_REG_TXF_RPTR_0, TN40_REG_TXF_WPTR_0);
+ if (ret)
+ goto err_free_txd;
+
+ /* The TX db has to keep mappings for all packets sent (on
+ * TxD) and not yet reclaimed (on TxF).
+ */
+ ret = tn40_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size));
+ if (ret)
+ goto err_free_txf;
+
+ /* SHORT_PKT_FIX */
+ priv->b0_len = 64;
+ priv->b0_va = dma_alloc_coherent(&priv->pdev->dev, priv->b0_len,
+ &priv->b0_dma, GFP_KERNEL);
+ if (!priv->b0_va)
+ goto err_free_db;
+
+ priv->tx_level = TN40_MAX_TX_LEVEL;
+ priv->tx_update_mark = priv->tx_level - 1024;
+ return 0;
+err_free_db:
+ tn40_tx_db_close(&priv->txdb);
+err_free_txf:
+ tn40_fifo_free(priv, &priv->txf_fifo0.m);
+err_free_txd:
+ tn40_fifo_free(priv, &priv->txd_fifo0.m);
+ return -ENOMEM;
+}
+
+/**
+ * tn40_tx_space - Calculate the available space in the TX fifo.
+ * @priv: NIC private structure
+ *
+ * Return: available space in TX fifo in bytes
+ */
+static int tn40_tx_space(struct tn40_priv *priv)
+{
+ struct tn40_txd_fifo *f = &priv->txd_fifo0;
+ int fsize;
+
+ f->m.rptr = tn40_read_reg(priv, f->m.reg_rptr) & TN40_TXF_WPTR_WR_PTR;
+ fsize = f->m.rptr - f->m.wptr;
+ if (fsize <= 0)
+ fsize = f->m.memsz + fsize;
+ return fsize;
+}
+
+#define TN40_TXD_FULL_CHECKSUM 7
+
+static netdev_tx_t tn40_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ struct tn40_txd_fifo *f = &priv->txd_fifo0;
+ int txd_checksum = TN40_TXD_FULL_CHECKSUM;
+ struct tn40_txd_desc *txdd;
+ int nr_frags, len, err;
+ unsigned int pkt_len;
+ int txd_vlan_id = 0;
+ int txd_lgsnd = 0;
+ int txd_vtag = 0;
+ int txd_mss = 0;
+
+ /* Build tx descriptor */
+ txdd = (struct tn40_txd_desc *)(f->m.va + f->m.wptr);
+ err = tn40_tx_map_skb(priv, skb, txdd, &pkt_len);
+ if (err) {
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.tx_dropped++;
+ u64_stats_update_end(&priv->syncp);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+ txd_checksum = 0;
+
+ if (skb_shinfo(skb)->gso_size) {
+ txd_mss = skb_shinfo(skb)->gso_size;
+ txd_lgsnd = 1;
+ netdev_dbg(priv->ndev, "skb %p pkt len %d gso size = %d\n", skb,
+ pkt_len, txd_mss);
+ }
+ if (skb_vlan_tag_present(skb)) {
+ /* Don't cut VLAN ID to 12 bits */
+ txd_vlan_id = skb_vlan_tag_get(skb);
+ txd_vtag = 1;
+ }
+ txdd->va_hi = 0;
+ txdd->va_lo = 0;
+ txdd->length = cpu_to_le16(pkt_len);
+ txdd->mss = cpu_to_le16(txd_mss);
+ txdd->txd_val1 =
+ cpu_to_le32(TN40_TXD_W1_VAL
+ (tn40_txd_sizes[nr_frags].qwords, txd_checksum,
+ txd_vtag, txd_lgsnd, txd_vlan_id));
+ netdev_dbg(priv->ndev, "=== w1 qwords[%d] %d =====\n", nr_frags,
+ tn40_txd_sizes[nr_frags].qwords);
+ netdev_dbg(priv->ndev, "=== TxD desc =====================\n");
+ netdev_dbg(priv->ndev, "=== w1: 0x%x ================\n",
+ txdd->txd_val1);
+ netdev_dbg(priv->ndev, "=== w2: mss 0x%x len 0x%x\n", txdd->mss,
+ txdd->length);
+ /* SHORT_PKT_FIX */
+ if (pkt_len < TN40_SHORT_PACKET_SIZE) {
+ struct tn40_pbl *pbl = &txdd->pbl[++nr_frags];
+
+ txdd->length = cpu_to_le16(TN40_SHORT_PACKET_SIZE);
+ txdd->txd_val1 =
+ cpu_to_le32(TN40_TXD_W1_VAL
+ (tn40_txd_sizes[nr_frags].qwords,
+ txd_checksum, txd_vtag, txd_lgsnd,
+ txd_vlan_id));
+ pbl->len = cpu_to_le32(TN40_SHORT_PACKET_SIZE - pkt_len);
+ pbl->pa_lo = cpu_to_le32(lower_32_bits(priv->b0_dma));
+ pbl->pa_hi = cpu_to_le32(upper_32_bits(priv->b0_dma));
+ netdev_dbg(priv->ndev, "=== SHORT_PKT_FIX ==============\n");
+ netdev_dbg(priv->ndev, "=== nr_frags : %d ==============\n",
+ nr_frags);
+ }
+
+ /* Increment TXD write pointer. In case of fifo wrapping copy
+ * reminder of the descriptor to the beginning.
+ */
+ f->m.wptr += tn40_txd_sizes[nr_frags].bytes;
+ len = f->m.wptr - f->m.memsz;
+ if (unlikely(len >= 0)) {
+ f->m.wptr = len;
+ if (len > 0)
+ memcpy(f->m.va, f->m.va + f->m.memsz, len);
+ }
+ /* Force memory writes to complete before letting the HW know
+ * there are new descriptors to fetch.
+ */
+ wmb();
+
+ priv->tx_level -= tn40_txd_sizes[nr_frags].bytes;
+ if (priv->tx_level > priv->tx_update_mark) {
+ tn40_write_reg(priv, f->m.reg_wptr,
+ f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+ } else {
+ if (priv->tx_noupd++ > TN40_NO_UPD_PACKETS) {
+ priv->tx_noupd = 0;
+ tn40_write_reg(priv, f->m.reg_wptr,
+ f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+ }
+ }
+
+ u64_stats_update_begin(&priv->syncp);
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += pkt_len;
+ u64_stats_update_end(&priv->syncp);
+ if (priv->tx_level < TN40_MIN_TX_LEVEL) {
+ netdev_dbg(priv->ndev, "TX Q STOP level %d\n", priv->tx_level);
+ netif_stop_queue(ndev);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void tn40_tx_cleanup(struct tn40_priv *priv)
+{
+ struct tn40_txf_fifo *f = &priv->txf_fifo0;
+ struct tn40_txdb *db = &priv->txdb;
+ int tx_level = 0;
+
+ f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_MASK;
+
+ netif_tx_lock(priv->ndev);
+ while (f->m.wptr != f->m.rptr) {
+ f->m.rptr += TN40_TXF_DESC_SZ;
+ f->m.rptr &= f->m.size_mask;
+ /* Unmap all fragments */
+ /* First has to come tx_maps containing DMA */
+ do {
+ dma_addr_t addr = db->rptr->addr.dma;
+ size_t size = db->rptr->len;
+
+ netif_tx_unlock(priv->ndev);
+ dma_unmap_page(&priv->pdev->dev, addr,
+ size, DMA_TO_DEVICE);
+ netif_tx_lock(priv->ndev);
+ tn40_tx_db_inc_rptr(db);
+ } while (db->rptr->len > 0);
+ tx_level -= db->rptr->len; /* '-' Because the len is negative */
+
+ /* Now should come skb pointer - free it */
+ dev_kfree_skb_any(db->rptr->addr.skb);
+ netdev_dbg(priv->ndev, "dev_kfree_skb_any %p %d\n",
+ db->rptr->addr.skb, -db->rptr->len);
+ tn40_tx_db_inc_rptr(db);
+ }
+
+ /* Let the HW know which TXF descriptors were cleaned */
+ tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR);
+
+ /* We reclaimed resources, so in case the Q is stopped by xmit
+ * callback, we resume the transmission and use tx_lock to
+ * synchronize with xmit.
+ */
+ priv->tx_level += tx_level;
+ if (priv->tx_noupd) {
+ priv->tx_noupd = 0;
+ tn40_write_reg(priv, priv->txd_fifo0.m.reg_wptr,
+ priv->txd_fifo0.m.wptr & TN40_TXF_WPTR_WR_PTR);
+ }
+ if (unlikely(netif_queue_stopped(priv->ndev) &&
+ netif_carrier_ok(priv->ndev) &&
+ (priv->tx_level >= TN40_MAX_TX_LEVEL / 2))) {
+ netdev_dbg(priv->ndev, "TX Q WAKE level %d\n", priv->tx_level);
+ netif_wake_queue(priv->ndev);
+ }
+ netif_tx_unlock(priv->ndev);
+}
+
+static void tn40_tx_free_skbs(struct tn40_priv *priv)
+{
+ struct tn40_txdb *db = &priv->txdb;
+
+ while (db->rptr != db->wptr) {
+ if (likely(db->rptr->len))
+ dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma,
+ db->rptr->len, DMA_TO_DEVICE);
+ else
+ dev_kfree_skb(db->rptr->addr.skb);
+ tn40_tx_db_inc_rptr(db);
+ }
+}
+
+static void tn40_destroy_tx_ring(struct tn40_priv *priv)
+{
+ tn40_tx_free_skbs(priv);
+ tn40_fifo_free(priv, &priv->txd_fifo0.m);
+ tn40_fifo_free(priv, &priv->txf_fifo0.m);
+ tn40_tx_db_close(&priv->txdb);
+ /* SHORT_PKT_FIX */
+ if (priv->b0_len) {
+ dma_free_coherent(&priv->pdev->dev, priv->b0_len, priv->b0_va,
+ priv->b0_dma);
+ priv->b0_len = 0;
+ }
+}
+
+/**
+ * tn40_tx_push_desc - Push a descriptor to TxD fifo.
+ *
+ * @priv: NIC private structure
+ * @data: desc's data
+ * @size: desc's size
+ *
+ * This function pushes desc to TxD fifo and overlaps it if needed.
+ *
+ * This function does not check for available space, nor does it check
+ * that the data size is smaller than the fifo size. Checking for
+ * space is the responsibility of the caller.
+ */
+static void tn40_tx_push_desc(struct tn40_priv *priv, void *data, int size)
+{
+ struct tn40_txd_fifo *f = &priv->txd_fifo0;
+ int i = f->m.memsz - f->m.wptr;
+
+ if (size == 0)
+ return;
+
+ if (i > size) {
+ memcpy(f->m.va + f->m.wptr, data, size);
+ f->m.wptr += size;
+ } else {
+ memcpy(f->m.va + f->m.wptr, data, i);
+ f->m.wptr = size - i;
+ memcpy(f->m.va, data + i, f->m.wptr);
+ }
+ tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR);
+}
+
+/**
+ * tn40_tx_push_desc_safe - push descriptor to TxD fifo in a safe way.
+ *
+ * @priv: NIC private structure
+ * @data: descriptor data
+ * @size: descriptor size
+ *
+ * This function does check for available space and, if necessary,
+ * waits for the NIC to read existing data before writing new data.
+ */
+static void tn40_tx_push_desc_safe(struct tn40_priv *priv, void *data, int size)
+{
+ int timer = 0;
+
+ while (size > 0) {
+ /* We subtract 8 because when the fifo is full rptr ==
+ * wptr, which also means that fifo is empty, we can
+ * understand the difference, but could the HW do the
+ * same ???
+ */
+ int avail = tn40_tx_space(priv) - 8;
+
+ if (avail <= 0) {
+ if (timer++ > 300) /* Prevent endless loop */
+ break;
+ /* Give the HW a chance to clean the fifo */
+ usleep_range(50, 60);
+ continue;
+ }
+ avail = min(avail, size);
+ netdev_dbg(priv->ndev,
+ "about to push %d bytes starting %p size %d\n",
+ avail, data, size);
+ tn40_tx_push_desc(priv, data, avail);
+ size -= avail;
+ data += avail;
+ }
+}
+
+int tn40_set_link_speed(struct tn40_priv *priv, u32 speed)
+{
+ u32 val;
+ int i;
+
+ netdev_dbg(priv->ndev, "speed %d\n", speed);
+ switch (speed) {
+ case SPEED_10000:
+ case SPEED_5000:
+ case SPEED_2500:
+ netdev_dbg(priv->ndev, "link_speed %d\n", speed);
+
+ tn40_write_reg(priv, 0x1010, 0x217); /*ETHSD.REFCLK_CONF */
+ tn40_write_reg(priv, 0x104c, 0x4c); /*ETHSD.L0_RX_PCNT */
+ tn40_write_reg(priv, 0x1050, 0x4c); /*ETHSD.L1_RX_PCNT */
+ tn40_write_reg(priv, 0x1054, 0x4c); /*ETHSD.L2_RX_PCNT */
+ tn40_write_reg(priv, 0x1058, 0x4c); /*ETHSD.L3_RX_PCNT */
+ tn40_write_reg(priv, 0x102c, 0x434); /*ETHSD.L0_TX_PCNT */
+ tn40_write_reg(priv, 0x1030, 0x434); /*ETHSD.L1_TX_PCNT */
+ tn40_write_reg(priv, 0x1034, 0x434); /*ETHSD.L2_TX_PCNT */
+ tn40_write_reg(priv, 0x1038, 0x434); /*ETHSD.L3_TX_PCNT */
+ tn40_write_reg(priv, 0x6300, 0x0400); /*MAC.PCS_CTRL */
+
+ tn40_write_reg(priv, 0x1018, 0x00); /*Mike2 */
+ udelay(5);
+ tn40_write_reg(priv, 0x1018, 0x04); /*Mike2 */
+ udelay(5);
+ tn40_write_reg(priv, 0x1018, 0x06); /*Mike2 */
+ udelay(5);
+ /*MikeFix1 */
+ /*L0: 0x103c , L1: 0x1040 , L2: 0x1044 , L3: 0x1048 =0x81644 */
+ tn40_write_reg(priv, 0x103c, 0x81644); /*ETHSD.L0_TX_DCNT */
+ tn40_write_reg(priv, 0x1040, 0x81644); /*ETHSD.L1_TX_DCNT */
+ tn40_write_reg(priv, 0x1044, 0x81644); /*ETHSD.L2_TX_DCNT */
+ tn40_write_reg(priv, 0x1048, 0x81644); /*ETHSD.L3_TX_DCNT */
+ tn40_write_reg(priv, 0x1014, 0x043); /*ETHSD.INIT_STAT */
+ for (i = 1000; i; i--) {
+ usleep_range(50, 60);
+ /*ETHSD.INIT_STAT */
+ val = tn40_read_reg(priv, 0x1014);
+ if (val & (1 << 9)) {
+ /*ETHSD.INIT_STAT */
+ tn40_write_reg(priv, 0x1014, 0x3);
+ /*ETHSD.INIT_STAT */
+ val = tn40_read_reg(priv, 0x1014);
+
+ break;
+ }
+ }
+ if (!i)
+ netdev_err(priv->ndev, "MAC init timeout!\n");
+
+ tn40_write_reg(priv, 0x6350, 0x0); /*MAC.PCS_IF_MODE */
+ tn40_write_reg(priv, TN40_REG_CTRLST, 0xC13); /*0x93//0x13 */
+ tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */
+ usleep_range(2000, 2100);
+
+ tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */
+ break;
+
+ case SPEED_1000:
+ case SPEED_100:
+ tn40_write_reg(priv, 0x1010, 0x613); /*ETHSD.REFCLK_CONF */
+ tn40_write_reg(priv, 0x104c, 0x4d); /*ETHSD.L0_RX_PCNT */
+ tn40_write_reg(priv, 0x1050, 0x0); /*ETHSD.L1_RX_PCNT */
+ tn40_write_reg(priv, 0x1054, 0x0); /*ETHSD.L2_RX_PCNT */
+ tn40_write_reg(priv, 0x1058, 0x0); /*ETHSD.L3_RX_PCNT */
+ tn40_write_reg(priv, 0x102c, 0x35); /*ETHSD.L0_TX_PCNT */
+ tn40_write_reg(priv, 0x1030, 0x0); /*ETHSD.L1_TX_PCNT */
+ tn40_write_reg(priv, 0x1034, 0x0); /*ETHSD.L2_TX_PCNT */
+ tn40_write_reg(priv, 0x1038, 0x0); /*ETHSD.L3_TX_PCNT */
+ tn40_write_reg(priv, 0x6300, 0x01140); /*MAC.PCS_CTRL */
+
+ tn40_write_reg(priv, 0x1014, 0x043); /*ETHSD.INIT_STAT */
+ for (i = 1000; i; i--) {
+ usleep_range(50, 60);
+ val = tn40_read_reg(priv, 0x1014); /*ETHSD.INIT_STAT */
+ if (val & (1 << 9)) {
+ /*ETHSD.INIT_STAT */
+ tn40_write_reg(priv, 0x1014, 0x3);
+ /*ETHSD.INIT_STAT */
+ val = tn40_read_reg(priv, 0x1014);
+
+ break;
+ }
+ }
+ if (!i)
+ netdev_err(priv->ndev, "MAC init timeout!\n");
+
+ tn40_write_reg(priv, 0x6350, 0x2b); /*MAC.PCS_IF_MODE 1g */
+ tn40_write_reg(priv, 0x6310, 0x9801); /*MAC.PCS_DEV_AB */
+
+ tn40_write_reg(priv, 0x6314, 0x1); /*MAC.PCS_PART_AB */
+ tn40_write_reg(priv, 0x6348, 0xc8); /*MAC.PCS_LINK_LO */
+ tn40_write_reg(priv, 0x634c, 0xc8); /*MAC.PCS_LINK_HI */
+ usleep_range(50, 60);
+ tn40_write_reg(priv, TN40_REG_CTRLST, 0xC13); /*0x93//0x13 */
+ tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */
+ usleep_range(2000, 2100);
+
+ tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */
+ tn40_write_reg(priv, 0x6300, 0x1140); /*MAC.PCS_CTRL */
+ break;
+
+ case 0: /* Link down */
+ tn40_write_reg(priv, 0x104c, 0x0); /*ETHSD.L0_RX_PCNT */
+ tn40_write_reg(priv, 0x1050, 0x0); /*ETHSD.L1_RX_PCNT */
+ tn40_write_reg(priv, 0x1054, 0x0); /*ETHSD.L2_RX_PCNT */
+ tn40_write_reg(priv, 0x1058, 0x0); /*ETHSD.L3_RX_PCNT */
+ tn40_write_reg(priv, 0x102c, 0x0); /*ETHSD.L0_TX_PCNT */
+ tn40_write_reg(priv, 0x1030, 0x0); /*ETHSD.L1_TX_PCNT */
+ tn40_write_reg(priv, 0x1034, 0x0); /*ETHSD.L2_TX_PCNT */
+ tn40_write_reg(priv, 0x1038, 0x0); /*ETHSD.L3_TX_PCNT */
+
+ tn40_write_reg(priv, TN40_REG_CTRLST, 0x800);
+ tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */
+ usleep_range(2000, 2100);
+
+ tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */
+ break;
+
+ default:
+ netdev_err(priv->ndev,
+ "Link speed was not identified yet (%d)\n", speed);
+ speed = 0;
+ break;
+ }
+ return speed;
+}
+
+static void tn40_link_changed(struct tn40_priv *priv)
+{
+ u32 link = tn40_read_reg(priv,
+ TN40_REG_MAC_LNK_STAT) & TN40_MAC_LINK_STAT;
+
+ netdev_dbg(priv->ndev, "link changed %u\n", link);
+}
+
+static void tn40_isr_extra(struct tn40_priv *priv, u32 isr)
+{
+ if (isr & (TN40_IR_LNKCHG0 | TN40_IR_LNKCHG1 | TN40_IR_TMR0)) {
+ netdev_dbg(priv->ndev, "isr = 0x%x\n", isr);
+ tn40_link_changed(priv);
+ }
+}
+
+static irqreturn_t tn40_isr_napi(int irq, void *dev)
+{
+ struct tn40_priv *priv = netdev_priv((struct net_device *)dev);
+ u32 isr;
+
+ isr = tn40_read_reg(priv, TN40_REG_ISR_MSK0);
+
+ if (unlikely(!isr)) {
+ tn40_enable_interrupts(priv);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+
+ if (isr & TN40_IR_EXTRA)
+ tn40_isr_extra(priv, isr);
+
+ if (isr & (TN40_IR_RX_DESC_0 | TN40_IR_TX_FREE_0 | TN40_IR_TMR1)) {
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ __napi_schedule(&priv->napi);
+ return IRQ_HANDLED;
+ }
+ /* We get here if an interrupt has slept into the
+ * small time window between these lines in
+ * tn40_poll: tn40_enable_interrupts(priv); return 0;
+ *
+ * Currently interrupts are disabled (since we read
+ * the ISR register) and we have failed to register
+ * the next poll. So we read the regs to trigger the
+ * chip and allow further interrupts.
+ */
+ tn40_read_reg(priv, TN40_REG_TXF_WPTR_0);
+ tn40_read_reg(priv, TN40_REG_RXD_WPTR_0);
+ }
+
+ tn40_enable_interrupts(priv);
+ return IRQ_HANDLED;
+}
+
+static int tn40_poll(struct napi_struct *napi, int budget)
+{
+ struct tn40_priv *priv = container_of(napi, struct tn40_priv, napi);
+ int work_done;
+
+ tn40_tx_cleanup(priv);
+
+ if (!budget)
+ return 0;
+
+ work_done = tn40_rx_receive(priv, budget);
+ if (work_done == budget)
+ return budget;
+
+ if (napi_complete_done(napi, work_done))
+ tn40_enable_interrupts(priv);
+ return work_done;
+}
+
+static int tn40_fw_load(struct tn40_priv *priv)
+{
+ const struct firmware *fw = NULL;
+ int master, ret;
+ u32 val;
+
+ ret = request_firmware(&fw, TN40_FIRMWARE_NAME, &priv->pdev->dev);
+ if (ret)
+ return ret;
+
+ master = tn40_read_reg(priv, TN40_REG_INIT_SEMAPHORE);
+ if (!tn40_read_reg(priv, TN40_REG_INIT_STATUS) && master) {
+ netdev_dbg(priv->ndev, "Loading FW...\n");
+ tn40_tx_push_desc_safe(priv, (void *)fw->data, fw->size);
+ msleep(100);
+ }
+ ret = read_poll_timeout(tn40_read_reg, val, val, 2000, 400000, false,
+ priv, TN40_REG_INIT_STATUS);
+ if (master)
+ tn40_write_reg(priv, TN40_REG_INIT_SEMAPHORE, 1);
+
+ if (ret) {
+ netdev_err(priv->ndev, "firmware loading failed\n");
+ netdev_dbg(priv->ndev, "VPC: 0x%x VIC: 0x%x STATUS: 0x%xd\n",
+ tn40_read_reg(priv, TN40_REG_VPC),
+ tn40_read_reg(priv, TN40_REG_VIC),
+ tn40_read_reg(priv, TN40_REG_INIT_STATUS));
+ ret = -EIO;
+ } else {
+ netdev_dbg(priv->ndev, "firmware loading success\n");
+ }
+ release_firmware(fw);
+ return ret;
+}
+
+static void tn40_restore_mac(struct net_device *ndev, struct tn40_priv *priv)
+{
+ u32 val;
+
+ netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n",
+ tn40_read_reg(priv, TN40_REG_UNC_MAC0_A),
+ tn40_read_reg(priv, TN40_REG_UNC_MAC1_A),
+ tn40_read_reg(priv, TN40_REG_UNC_MAC2_A));
+
+ val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]);
+ tn40_write_reg(priv, TN40_REG_UNC_MAC2_A, val);
+ val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]);
+ tn40_write_reg(priv, TN40_REG_UNC_MAC1_A, val);
+ val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]);
+ tn40_write_reg(priv, TN40_REG_UNC_MAC0_A, val);
+
+ /* More then IP MAC address */
+ tn40_write_reg(priv, TN40_REG_MAC_ADDR_0,
+ (ndev->dev_addr[3] << 24) | (ndev->dev_addr[2] << 16) |
+ (ndev->dev_addr[1] << 8) | (ndev->dev_addr[0]));
+ tn40_write_reg(priv, TN40_REG_MAC_ADDR_1,
+ (ndev->dev_addr[5] << 8) | (ndev->dev_addr[4]));
+
+ netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n",
+ tn40_read_reg(priv, TN40_REG_UNC_MAC0_A),
+ tn40_read_reg(priv, TN40_REG_UNC_MAC1_A),
+ tn40_read_reg(priv, TN40_REG_UNC_MAC2_A));
+}
+
+static void tn40_hw_start(struct tn40_priv *priv)
+{
+ tn40_write_reg(priv, TN40_REG_FRM_LENGTH, 0X3FE0);
+ tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, 0X10fd);
+ /*MikeFix1 */
+ /*L0: 0x103c , L1: 0x1040 , L2: 0x1044 , L3: 0x1048 =0x81644 */
+ tn40_write_reg(priv, 0x103c, 0x81644); /*ETHSD.L0_TX_DCNT */
+ tn40_write_reg(priv, 0x1040, 0x81644); /*ETHSD.L1_TX_DCNT */
+ tn40_write_reg(priv, 0x1044, 0x81644); /*ETHSD.L2_TX_DCNT */
+ tn40_write_reg(priv, 0x1048, 0x81644); /*ETHSD.L3_TX_DCNT */
+ tn40_write_reg(priv, TN40_REG_RX_FIFO_SECTION, 0x10);
+ tn40_write_reg(priv, TN40_REG_TX_FIFO_SECTION, 0xE00010);
+ tn40_write_reg(priv, TN40_REG_RX_FULLNESS, 0);
+ tn40_write_reg(priv, TN40_REG_TX_FULLNESS, 0);
+
+ tn40_write_reg(priv, TN40_REG_VGLB, 0);
+ tn40_write_reg(priv, TN40_REG_MAX_FRAME_A,
+ priv->rxf_fifo0.m.pktsz & TN40_MAX_FRAME_AB_VAL);
+ tn40_write_reg(priv, TN40_REG_RDINTCM0, priv->rdintcm);
+ tn40_write_reg(priv, TN40_REG_RDINTCM2, 0);
+
+ /* old val = 0x300064 */
+ tn40_write_reg(priv, TN40_REG_TDINTCM0, priv->tdintcm);
+
+ /* Enable timer interrupt once in 2 secs. */
+ tn40_restore_mac(priv->ndev, priv);
+
+ /* Pause frame */
+ tn40_write_reg(priv, 0x12E0, 0x28);
+ tn40_write_reg(priv, TN40_REG_PAUSE_QUANT, 0xFFFF);
+ tn40_write_reg(priv, 0x6064, 0xF);
+
+ tn40_write_reg(priv, TN40_REG_GMAC_RXF_A,
+ TN40_GMAC_RX_FILTER_OSEN | TN40_GMAC_RX_FILTER_TXFC |
+ TN40_GMAC_RX_FILTER_AM | TN40_GMAC_RX_FILTER_AB);
+
+ tn40_enable_interrupts(priv);
+}
+
+static int tn40_hw_reset(struct tn40_priv *priv)
+{
+ u32 val;
+
+ /* Reset sequences: read, write 1, read, write 0 */
+ val = tn40_read_reg(priv, TN40_REG_CLKPLL);
+ tn40_write_reg(priv, TN40_REG_CLKPLL, (val | TN40_CLKPLL_SFTRST) + 0x8);
+ usleep_range(50, 60);
+ val = tn40_read_reg(priv, TN40_REG_CLKPLL);
+ tn40_write_reg(priv, TN40_REG_CLKPLL, val & ~TN40_CLKPLL_SFTRST);
+
+ /* Check that the PLLs are locked and reset ended */
+ val = read_poll_timeout(tn40_read_reg, val,
+ (val & TN40_CLKPLL_LKD) == TN40_CLKPLL_LKD,
+ 10000, 700000, false, priv, TN40_REG_CLKPLL);
+ if (val)
+ return -EIO;
+
+ usleep_range(50, 60);
+ /* Do any PCI-E read transaction */
+ tn40_read_reg(priv, TN40_REG_RXD_CFG0_0);
+ return 0;
+}
+
+static void tn40_sw_reset(struct tn40_priv *priv)
+{
+ int i, ret;
+ u32 val;
+
+ /* 1. load MAC (obsolete) */
+ /* 2. disable Rx (and Tx) */
+ tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, 0);
+ msleep(100);
+ /* 3. Disable port */
+ tn40_write_reg(priv, TN40_REG_DIS_PORT, 1);
+ /* 4. Disable queue */
+ tn40_write_reg(priv, TN40_REG_DIS_QU, 1);
+ /* 5. Wait until hw is disabled */
+ ret = read_poll_timeout(tn40_read_reg, val, val & 1, 10000, 500000,
+ false, priv, TN40_REG_RST_PORT);
+ if (ret)
+ netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
+
+ /* 6. Disable interrupts */
+ tn40_write_reg(priv, TN40_REG_RDINTCM0, 0);
+ tn40_write_reg(priv, TN40_REG_TDINTCM0, 0);
+ tn40_write_reg(priv, TN40_REG_IMR, 0);
+ tn40_read_reg(priv, TN40_REG_ISR);
+
+ /* 7. Reset queue */
+ tn40_write_reg(priv, TN40_REG_RST_QU, 1);
+ /* 8. Reset port */
+ tn40_write_reg(priv, TN40_REG_RST_PORT, 1);
+ /* 9. Zero all read and write pointers */
+ for (i = TN40_REG_TXD_WPTR_0; i <= TN40_REG_TXF_RPTR_3; i += 0x10)
+ tn40_write_reg(priv, i, 0);
+ /* 10. Unset port disable */
+ tn40_write_reg(priv, TN40_REG_DIS_PORT, 0);
+ /* 11. Unset queue disable */
+ tn40_write_reg(priv, TN40_REG_DIS_QU, 0);
+ /* 12. Unset queue reset */
+ tn40_write_reg(priv, TN40_REG_RST_QU, 0);
+ /* 13. Unset port reset */
+ tn40_write_reg(priv, TN40_REG_RST_PORT, 0);
+ /* 14. Enable Rx */
+ /* Skipped. will be done later */
+}
+
+static int tn40_start(struct tn40_priv *priv)
+{
+ int ret;
+
+ ret = tn40_create_tx_ring(priv);
+ if (ret) {
+ netdev_err(priv->ndev, "failed to tx init %d\n", ret);
+ return ret;
+ }
+
+ ret = tn40_create_rx_ring(priv);
+ if (ret) {
+ netdev_err(priv->ndev, "failed to rx init %d\n", ret);
+ goto err_tx_ring;
+ }
+
+ tn40_rx_alloc_buffers(priv);
+ if (tn40_rxdb_available(priv->rxdb0) != 1) {
+ ret = -ENOMEM;
+ netdev_err(priv->ndev, "failed to allocate rx buffers\n");
+ goto err_rx_ring;
+ }
+
+ ret = request_irq(priv->pdev->irq, &tn40_isr_napi, IRQF_SHARED,
+ priv->ndev->name, priv->ndev);
+ if (ret) {
+ netdev_err(priv->ndev, "failed to request irq %d\n", ret);
+ goto err_rx_ring;
+ }
+
+ tn40_hw_start(priv);
+ return 0;
+err_rx_ring:
+ tn40_destroy_rx_ring(priv);
+err_tx_ring:
+ tn40_destroy_tx_ring(priv);
+ return ret;
+}
+
+static void tn40_stop(struct tn40_priv *priv)
+{
+ tn40_disable_interrupts(priv);
+ free_irq(priv->pdev->irq, priv->ndev);
+ tn40_sw_reset(priv);
+ tn40_destroy_tx_ring(priv);
+ tn40_destroy_rx_ring(priv);
+}
+
+static int tn40_close(struct net_device *ndev)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+
+ phylink_stop(priv->phylink);
+ phylink_disconnect_phy(priv->phylink);
+
+ napi_disable(&priv->napi);
+ netif_napi_del(&priv->napi);
+ tn40_stop(priv);
+ return 0;
+}
+
+static int tn40_open(struct net_device *dev)
+{
+ struct tn40_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = phylink_connect_phy(priv->phylink, priv->phydev);
+ if (ret) {
+ netdev_err(dev, "failed to connect to phy %d\n", ret);
+ return ret;
+ }
+ tn40_sw_reset(priv);
+ ret = tn40_start(priv);
+ if (ret) {
+ phylink_disconnect_phy(priv->phylink);
+ netdev_err(dev, "failed to start %d\n", ret);
+ return ret;
+ }
+ napi_enable(&priv->napi);
+ phylink_start(priv->phylink);
+ netif_start_queue(priv->ndev);
+ return 0;
+}
+
+static void __tn40_vlan_rx_vid(struct net_device *ndev, uint16_t vid,
+ int enable)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ u32 reg, bit, val;
+
+ netdev_dbg(priv->ndev, "vid =%d value =%d\n", (int)vid, enable);
+ reg = TN40_REG_VLAN_0 + (vid / 32) * 4;
+ bit = 1 << vid % 32;
+ val = tn40_read_reg(priv, reg);
+ netdev_dbg(priv->ndev, "reg =%x, val =%x, bit =%d\n", reg, val, bit);
+ if (enable)
+ val |= bit;
+ else
+ val &= ~bit;
+ netdev_dbg(priv->ndev, "new val %x\n", val);
+ tn40_write_reg(priv, reg, val);
+}
+
+static int tn40_vlan_rx_add_vid(struct net_device *ndev,
+ __always_unused __be16 proto, u16 vid)
+{
+ __tn40_vlan_rx_vid(ndev, vid, 1);
+ return 0;
+}
+
+static int tn40_vlan_rx_kill_vid(struct net_device *ndev,
+ __always_unused __be16 proto, u16 vid)
+{
+ __tn40_vlan_rx_vid(ndev, vid, 0);
+ return 0;
+}
+
+static void tn40_setmulti(struct net_device *ndev)
+{
+ u32 rxf_val = TN40_GMAC_RX_FILTER_AM | TN40_GMAC_RX_FILTER_AB |
+ TN40_GMAC_RX_FILTER_OSEN | TN40_GMAC_RX_FILTER_TXFC;
+ struct tn40_priv *priv = netdev_priv(ndev);
+ int i;
+
+ /* IMF - imperfect (hash) rx multicast filter */
+ /* PMF - perfect rx multicast filter */
+
+ /* FIXME: RXE(OFF) */
+ if (ndev->flags & IFF_PROMISC) {
+ rxf_val |= TN40_GMAC_RX_FILTER_PRM;
+ } else if (ndev->flags & IFF_ALLMULTI) {
+ /* set IMF to accept all multicast frames */
+ for (i = 0; i < TN40_MAC_MCST_HASH_NUM; i++)
+ tn40_write_reg(priv,
+ TN40_REG_RX_MCST_HASH0 + i * 4, ~0);
+ } else if (netdev_mc_count(ndev)) {
+ struct netdev_hw_addr *mclist;
+ u32 reg, val;
+ u8 hash;
+
+ /* Set IMF to deny all multicast frames */
+ for (i = 0; i < TN40_MAC_MCST_HASH_NUM; i++)
+ tn40_write_reg(priv,
+ TN40_REG_RX_MCST_HASH0 + i * 4, 0);
+
+ /* Set PMF to deny all multicast frames */
+ for (i = 0; i < TN40_MAC_MCST_NUM; i++) {
+ tn40_write_reg(priv,
+ TN40_REG_RX_MAC_MCST0 + i * 8, 0);
+ tn40_write_reg(priv,
+ TN40_REG_RX_MAC_MCST1 + i * 8, 0);
+ }
+ /* Use PMF to accept first MAC_MCST_NUM (15) addresses */
+
+ /* TBD: Sort the addresses and write them in ascending
+ * order into RX_MAC_MCST regs. we skip this phase now
+ * and accept ALL multicast frames through IMF. Accept
+ * the rest of addresses throw IMF.
+ */
+ netdev_for_each_mc_addr(mclist, ndev) {
+ hash = 0;
+ for (i = 0; i < ETH_ALEN; i++)
+ hash ^= mclist->addr[i];
+
+ reg = TN40_REG_RX_MCST_HASH0 + ((hash >> 5) << 2);
+ val = tn40_read_reg(priv, reg);
+ val |= (1 << (hash % 32));
+ tn40_write_reg(priv, reg, val);
+ }
+ } else {
+ rxf_val |= TN40_GMAC_RX_FILTER_AB;
+ }
+ tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, rxf_val);
+ /* Enable RX */
+ /* FIXME: RXE(ON) */
+}
+
+static int tn40_set_mac(struct net_device *ndev, void *p)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ struct sockaddr *addr = p;
+
+ eth_hw_addr_set(ndev, addr->sa_data);
+ tn40_restore_mac(ndev, priv);
+ return 0;
+}
+
+static void tn40_mac_init(struct tn40_priv *priv)
+{
+ u8 addr[ETH_ALEN];
+ u64 val;
+
+ val = (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC0_A);
+ val |= (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC1_A) << 16;
+ val |= (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC2_A) << 32;
+
+ u64_to_ether_addr(val, addr);
+ eth_hw_addr_set(priv->ndev, addr);
+}
+
+static void tn40_get_stats(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+ stats->tx_packets = priv->stats.tx_packets;
+ stats->tx_bytes = priv->stats.tx_bytes;
+ stats->tx_dropped = priv->stats.tx_dropped;
+
+ stats->rx_packets = priv->stats.rx_packets;
+ stats->rx_bytes = priv->stats.rx_bytes;
+ stats->rx_dropped = priv->stats.rx_dropped;
+ stats->rx_errors = priv->stats.rx_errors;
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
+static const struct net_device_ops tn40_netdev_ops = {
+ .ndo_open = tn40_open,
+ .ndo_stop = tn40_close,
+ .ndo_start_xmit = tn40_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_rx_mode = tn40_setmulti,
+ .ndo_get_stats64 = tn40_get_stats,
+ .ndo_set_mac_address = tn40_set_mac,
+ .ndo_vlan_rx_add_vid = tn40_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = tn40_vlan_rx_kill_vid,
+};
+
+static int tn40_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
+static const struct ethtool_ops tn40_ethtool_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = tn40_ethtool_get_link_ksettings,
+};
+
+static void tn40_get_queue_stats_rx(struct net_device *ndev, int idx,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+
+ stats->packets = priv->stats.rx_packets;
+ stats->bytes = priv->stats.rx_bytes;
+ stats->alloc_fail = priv->alloc_fail;
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
+static void tn40_get_queue_stats_tx(struct net_device *ndev, int idx,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct tn40_priv *priv = netdev_priv(ndev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin(&priv->syncp);
+
+ stats->packets = priv->stats.tx_packets;
+ stats->bytes = priv->stats.tx_bytes;
+ } while (u64_stats_fetch_retry(&priv->syncp, start));
+}
+
+static void tn40_get_base_stats(struct net_device *ndev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ rx->packets = 0;
+ rx->bytes = 0;
+ rx->alloc_fail = 0;
+
+ tx->packets = 0;
+ tx->bytes = 0;
+}
+
+static const struct netdev_stat_ops tn40_stat_ops = {
+ .get_queue_stats_rx = tn40_get_queue_stats_rx,
+ .get_queue_stats_tx = tn40_get_queue_stats_tx,
+ .get_base_stats = tn40_get_base_stats,
+};
+
+static int tn40_priv_init(struct tn40_priv *priv)
+{
+ int ret;
+
+ tn40_set_link_speed(priv, 0);
+
+ /* Set GPIO[9:0] to output 0 */
+ tn40_write_reg(priv, 0x51E0, 0x30010006); /* GPIO_OE_ WR CMD */
+ tn40_write_reg(priv, 0x51F0, 0x0); /* GPIO_OE_ DATA */
+ tn40_write_reg(priv, TN40_REG_MDIO_CMD_STAT, 0x3ec8);
+
+ /* we use tx descriptors to load a firmware. */
+ ret = tn40_create_tx_ring(priv);
+ if (ret)
+ return ret;
+ ret = tn40_fw_load(priv);
+ tn40_destroy_tx_ring(priv);
+ return ret;
+}
+
+static struct net_device *tn40_netdev_alloc(struct pci_dev *pdev)
+{
+ struct net_device *ndev;
+
+ ndev = devm_alloc_etherdev(&pdev->dev, sizeof(struct tn40_priv));
+ if (!ndev)
+ return NULL;
+ ndev->netdev_ops = &tn40_netdev_ops;
+ ndev->ethtool_ops = &tn40_ethtool_ops;
+ ndev->stat_ops = &tn40_stat_ops;
+ ndev->tx_queue_len = TN40_NDEV_TXQ_LEN;
+ ndev->mem_start = pci_resource_start(pdev, 0);
+ ndev->mem_end = pci_resource_end(pdev, 0);
+ ndev->min_mtu = ETH_ZLEN;
+ ndev->max_mtu = TN40_MAX_MTU;
+
+ ndev->features = NETIF_F_IP_CSUM |
+ NETIF_F_SG |
+ NETIF_F_FRAGLIST |
+ NETIF_F_TSO | NETIF_F_GRO |
+ NETIF_F_RXCSUM |
+ NETIF_F_RXHASH |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->vlan_features = NETIF_F_IP_CSUM |
+ NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_GRO | NETIF_F_RXHASH;
+
+ if (dma_get_mask(&pdev->dev) == DMA_BIT_MASK(64)) {
+ ndev->features |= NETIF_F_HIGHDMA;
+ ndev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+ ndev->hw_features |= ndev->features;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ netif_stop_queue(ndev);
+ return ndev;
+}
+
+static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *ndev;
+ struct tn40_priv *priv;
+ unsigned int nvec = 1;
+ void __iomem *regs;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set DMA mask.\n");
+ goto err_disable_device;
+ }
+
+ ret = pci_request_regions(pdev, TN40_DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request PCI regions.\n");
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+
+ regs = pci_iomap(pdev, 0, TN40_REGS_SIZE);
+ if (!regs) {
+ ret = -EIO;
+ dev_err(&pdev->dev, "failed to map PCI bar.\n");
+ goto err_free_regions;
+ }
+
+ ndev = tn40_netdev_alloc(pdev);
+ if (!ndev) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "failed to allocate netdev.\n");
+ goto err_iounmap;
+ }
+
+ priv = netdev_priv(ndev);
+ pci_set_drvdata(pdev, priv);
+ netif_napi_add(ndev, &priv->napi, tn40_poll);
+
+ priv->regs = regs;
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ /* Initialize fifo sizes. */
+ priv->txd_size = 3;
+ priv->txf_size = 3;
+ priv->rxd_size = 3;
+ priv->rxf_size = 3;
+ /* Initialize the initial coalescing registers. */
+ priv->rdintcm = TN40_INT_REG_VAL(0x20, 1, 4, 12);
+ priv->tdintcm = TN40_INT_REG_VAL(0x20, 1, 0, 12);
+
+ ret = tn40_hw_reset(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to reset HW.\n");
+ goto err_unset_drvdata;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate irq.\n");
+ goto err_unset_drvdata;
+ }
+
+ ret = tn40_mdiobus_init(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize mdio bus.\n");
+ goto err_free_irq;
+ }
+
+ priv->stats_flag =
+ ((tn40_read_reg(priv, TN40_FPGA_VER) & 0xFFF) != 308);
+ u64_stats_init(&priv->syncp);
+
+ priv->isr_mask = TN40_IR_RX_FREE_0 | TN40_IR_LNKCHG0 | TN40_IR_PSE |
+ TN40_IR_TMR0 | TN40_IR_RX_DESC_0 | TN40_IR_TX_FREE_0 |
+ TN40_IR_TMR1;
+
+ tn40_mac_init(priv);
+ ret = tn40_phy_register(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set up PHY.\n");
+ goto err_free_irq;
+ }
+
+ ret = tn40_priv_init(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize tn40_priv.\n");
+ goto err_unregister_phydev;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register netdev.\n");
+ goto err_unregister_phydev;
+ }
+ return 0;
+err_unregister_phydev:
+ tn40_phy_unregister(priv);
+err_free_irq:
+ pci_free_irq_vectors(pdev);
+err_unset_drvdata:
+ pci_set_drvdata(pdev, NULL);
+err_iounmap:
+ iounmap(regs);
+err_free_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void tn40_remove(struct pci_dev *pdev)
+{
+ struct tn40_priv *priv = pci_get_drvdata(pdev);
+ struct net_device *ndev = priv->ndev;
+
+ unregister_netdev(ndev);
+
+ tn40_phy_unregister(priv);
+ pci_free_irq_vectors(priv->pdev);
+ pci_set_drvdata(pdev, NULL);
+ iounmap(priv->regs);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id tn40_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ PCI_VENDOR_ID_TEHUTI, 0x3015) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ PCI_VENDOR_ID_DLINK, 0x4d00) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ PCI_VENDOR_ID_ASUSTEK, 0x8709) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022,
+ PCI_VENDOR_ID_EDIMAX, 0x8103) },
+ { }
+};
+
+static struct pci_driver tn40_driver = {
+ .name = TN40_DRV_NAME,
+ .id_table = tn40_id_table,
+ .probe = tn40_probe,
+ .remove = tn40_remove,
+};
+
+module_pci_driver(tn40_driver);
+
+MODULE_DEVICE_TABLE(pci, tn40_id_table);
+MODULE_LICENSE("GPL");
+MODULE_FIRMWARE(TN40_FIRMWARE_NAME);
+MODULE_DESCRIPTION("Tehuti Network TN40xx Driver");
diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h
new file mode 100644
index 000000000000..490781fe5120
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40.h
@@ -0,0 +1,233 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#ifndef _TN40_H_
+#define _TN40_H_
+
+#include "tn40_regs.h"
+
+#define TN40_DRV_NAME "tn40xx"
+
+#define TN40_MDIO_SPEED_1MHZ (1)
+#define TN40_MDIO_SPEED_6MHZ (6)
+
+/* netdev tx queue len for Luxor. The default value is 1000.
+ * ifconfig eth1 txqueuelen 3000 - to change it at runtime.
+ */
+#define TN40_NDEV_TXQ_LEN 1000
+
+#define TN40_FIFO_SIZE 4096
+#define TN40_FIFO_EXTRA_SPACE 1024
+
+#define TN40_TXF_DESC_SZ 16
+#define TN40_MAX_TX_LEVEL (priv->txd_fifo0.m.memsz - 16)
+#define TN40_MIN_TX_LEVEL 256
+#define TN40_NO_UPD_PACKETS 40
+#define TN40_MAX_MTU BIT(14)
+
+#define TN40_PCK_TH_MULT 128
+#define TN40_INT_COAL_MULT 2
+
+#define TN40_INT_REG_VAL(coal, coal_rc, rxf_th, pck_th) ( \
+ FIELD_PREP(GENMASK(14, 0), (coal)) | \
+ FIELD_PREP(BIT(15), (coal_rc)) | \
+ FIELD_PREP(GENMASK(19, 16), (rxf_th)) | \
+ FIELD_PREP(GENMASK(31, 20), (pck_th)) \
+ )
+
+struct tn40_fifo {
+ dma_addr_t da; /* Physical address of fifo (used by HW) */
+ char *va; /* Virtual address of fifo (used by SW) */
+ u32 rptr, wptr;
+ /* Cached values of RPTR and WPTR registers,
+ * they're 32 bits on both 32 and 64 archs.
+ */
+ u16 reg_cfg0;
+ u16 reg_cfg1;
+ u16 reg_rptr;
+ u16 reg_wptr;
+ u16 memsz; /* Memory size allocated for fifo */
+ u16 size_mask;
+ u16 pktsz; /* Skb packet size to allocate */
+ u16 rcvno; /* Number of buffers that come from this RXF */
+};
+
+struct tn40_txf_fifo {
+ struct tn40_fifo m; /* The minimal set of variables used by all fifos */
+};
+
+struct tn40_txd_fifo {
+ struct tn40_fifo m; /* The minimal set of variables used by all fifos */
+};
+
+struct tn40_rxf_fifo {
+ struct tn40_fifo m; /* The minimal set of variables used by all fifos */
+};
+
+struct tn40_rxd_fifo {
+ struct tn40_fifo m; /* The minimal set of variables used by all fifos */
+};
+
+struct tn40_rx_map {
+ struct page *page;
+};
+
+struct tn40_rxdb {
+ unsigned int *stack;
+ struct tn40_rx_map *elems;
+ unsigned int nelem;
+ unsigned int top;
+};
+
+union tn40_tx_dma_addr {
+ dma_addr_t dma;
+ struct sk_buff *skb;
+};
+
+/* Entry in the db.
+ * if len == 0 addr is dma
+ * if len != 0 addr is skb
+ */
+struct tn40_tx_map {
+ union tn40_tx_dma_addr addr;
+ int len;
+};
+
+/* tx database - implemented as circular fifo buffer */
+struct tn40_txdb {
+ struct tn40_tx_map *start; /* Points to the first element */
+ struct tn40_tx_map *end; /* Points just AFTER the last element */
+ struct tn40_tx_map *rptr; /* Points to the next element to read */
+ struct tn40_tx_map *wptr; /* Points to the next element to write */
+ int size; /* Number of elements in the db */
+};
+
+struct tn40_priv {
+ struct net_device *ndev;
+ struct pci_dev *pdev;
+
+ struct napi_struct napi;
+ /* RX FIFOs: 1 for data (full) descs, and 2 for free descs */
+ struct tn40_rxd_fifo rxd_fifo0;
+ struct tn40_rxf_fifo rxf_fifo0;
+ struct tn40_rxdb *rxdb0; /* Rx dbs to store skb pointers */
+ struct page_pool *page_pool;
+
+ /* Tx FIFOs: 1 for data desc, 1 for empty (acks) desc */
+ struct tn40_txd_fifo txd_fifo0;
+ struct tn40_txf_fifo txf_fifo0;
+ struct tn40_txdb txdb;
+ int tx_level;
+ int tx_update_mark;
+ int tx_noupd;
+
+ int stats_flag;
+ struct rtnl_link_stats64 stats;
+ u64 alloc_fail;
+ struct u64_stats_sync syncp;
+
+ u8 txd_size;
+ u8 txf_size;
+ u8 rxd_size;
+ u8 rxf_size;
+ u32 rdintcm;
+ u32 tdintcm;
+
+ u32 isr_mask;
+
+ void __iomem *regs;
+
+ /* SHORT_PKT_FIX */
+ u32 b0_len;
+ dma_addr_t b0_dma; /* Physical address of buffer */
+ char *b0_va; /* Virtual address of buffer */
+
+ struct mii_bus *mdio;
+ struct phy_device *phydev;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+};
+
+/* RX FREE descriptor - 64bit */
+struct tn40_rxf_desc {
+ __le32 info; /* Buffer Count + Info - described below */
+ __le32 va_lo; /* VAdr[31:0] */
+ __le32 va_hi; /* VAdr[63:32] */
+ __le32 pa_lo; /* PAdr[31:0] */
+ __le32 pa_hi; /* PAdr[63:32] */
+ __le32 len; /* Buffer Length */
+};
+
+#define TN40_GET_RXD_BC(x) FIELD_GET(GENMASK(4, 0), (x))
+#define TN40_GET_RXD_ERR(x) FIELD_GET(GENMASK(26, 21), (x))
+#define TN40_GET_RXD_PKT_ID(x) FIELD_GET(GENMASK(30, 28), (x))
+#define TN40_GET_RXD_VTAG(x) FIELD_GET(BIT(31), (x))
+#define TN40_GET_RXD_VLAN_TCI(x) FIELD_GET(GENMASK(15, 0), (x))
+
+struct tn40_rxd_desc {
+ __le32 rxd_val1;
+ __le16 len;
+ __le16 rxd_vlan;
+ __le32 va_lo;
+ __le32 va_hi;
+ __le32 rss_lo;
+ __le32 rss_hash;
+};
+
+#define TN40_MAX_PBL (19)
+/* PBL describes each virtual buffer to be transmitted from the host. */
+struct tn40_pbl {
+ __le32 pa_lo;
+ __le32 pa_hi;
+ __le32 len;
+};
+
+/* First word for TXD descriptor. It means: type = 3 for regular Tx packet,
+ * hw_csum = 7 for IP+UDP+TCP HW checksums.
+ */
+#define TN40_TXD_W1_VAL(bc, checksum, vtag, lgsnd, vlan_id) ( \
+ GENMASK(17, 16) | \
+ FIELD_PREP(GENMASK(4, 0), (bc)) | \
+ FIELD_PREP(GENMASK(7, 5), (checksum)) | \
+ FIELD_PREP(BIT(8), (vtag)) | \
+ FIELD_PREP(GENMASK(12, 9), (lgsnd)) | \
+ FIELD_PREP(GENMASK(15, 13), \
+ FIELD_GET(GENMASK(15, 13), (vlan_id))) | \
+ FIELD_PREP(GENMASK(31, 20), \
+ FIELD_GET(GENMASK(11, 0), (vlan_id))) \
+ )
+
+struct tn40_txd_desc {
+ __le32 txd_val1;
+ __le16 mss;
+ __le16 length;
+ __le32 va_lo;
+ __le32 va_hi;
+ struct tn40_pbl pbl[]; /* Fragments */
+};
+
+struct tn40_txf_desc {
+ u32 status;
+ u32 va_lo; /* VAdr[31:0] */
+ u32 va_hi; /* VAdr[63:32] */
+ u32 pad;
+};
+
+static inline u32 tn40_read_reg(struct tn40_priv *priv, u32 reg)
+{
+ return readl(priv->regs + reg);
+}
+
+static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val)
+{
+ writel(val, priv->regs + reg);
+}
+
+int tn40_set_link_speed(struct tn40_priv *priv, u32 speed);
+
+int tn40_mdiobus_init(struct tn40_priv *priv);
+
+int tn40_phy_register(struct tn40_priv *priv);
+void tn40_phy_unregister(struct tn40_priv *priv);
+
+#endif /* _TN40XX_H */
diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c
new file mode 100644
index 000000000000..af18615d64a8
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40_mdio.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/phylink.h>
+
+#include "tn40.h"
+
+#define TN40_MDIO_DEVAD_MASK GENMASK(4, 0)
+#define TN40_MDIO_PRTAD_MASK GENMASK(9, 5)
+#define TN40_MDIO_CMD_VAL(device, port) \
+ (FIELD_PREP(TN40_MDIO_DEVAD_MASK, (device)) | \
+ (FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port))))
+#define TN40_MDIO_CMD_READ BIT(15)
+
+static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed)
+{
+ void __iomem *regs = priv->regs;
+ int mdio_cfg;
+
+ if (speed == TN40_MDIO_SPEED_1MHZ)
+ mdio_cfg = (0x7d << 7) | 0x08; /* 1MHz */
+ else
+ mdio_cfg = 0xA08; /* 6MHz */
+ mdio_cfg |= (1 << 6);
+ writel(mdio_cfg, regs + TN40_REG_MDIO_CMD_STAT);
+ msleep(100);
+}
+
+static u32 tn40_mdio_stat(struct tn40_priv *priv)
+{
+ void __iomem *regs = priv->regs;
+
+ return readl(regs + TN40_REG_MDIO_CMD_STAT);
+}
+
+static int tn40_mdio_wait_nobusy(struct tn40_priv *priv, u32 *val)
+{
+ u32 stat;
+ int ret;
+
+ ret = readx_poll_timeout_atomic(tn40_mdio_stat, priv, stat,
+ TN40_GET_MDIO_BUSY(stat) == 0, 10,
+ 10000);
+ if (val)
+ *val = stat;
+ return ret;
+}
+
+static int tn40_mdio_read(struct tn40_priv *priv, int port, int device,
+ u16 regnum)
+{
+ void __iomem *regs = priv->regs;
+ u32 i;
+
+ /* wait until MDIO is not busy */
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+
+ i = TN40_MDIO_CMD_VAL(device, port);
+ writel(i, regs + TN40_REG_MDIO_CMD);
+ writel((u32)regnum, regs + TN40_REG_MDIO_ADDR);
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+
+ writel(TN40_MDIO_CMD_READ | i, regs + TN40_REG_MDIO_CMD);
+ /* read CMD_STAT until not busy */
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+
+ return lower_16_bits(readl(regs + TN40_REG_MDIO_DATA));
+}
+
+static int tn40_mdio_write(struct tn40_priv *priv, int port, int device,
+ u16 regnum, u16 data)
+{
+ void __iomem *regs = priv->regs;
+ u32 tmp_reg = 0;
+ int ret;
+
+ /* wait until MDIO is not busy */
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+ writel(TN40_MDIO_CMD_VAL(device, port), regs + TN40_REG_MDIO_CMD);
+ writel((u32)regnum, regs + TN40_REG_MDIO_ADDR);
+ if (tn40_mdio_wait_nobusy(priv, NULL))
+ return -EIO;
+ writel((u32)data, regs + TN40_REG_MDIO_DATA);
+ /* read CMD_STAT until not busy */
+ ret = tn40_mdio_wait_nobusy(priv, &tmp_reg);
+ if (ret)
+ return -EIO;
+
+ if (TN40_GET_MDIO_RD_ERR(tmp_reg)) {
+ dev_err(&priv->pdev->dev, "MDIO error after write command\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int tn40_mdio_read_c45(struct mii_bus *mii_bus, int addr, int devnum,
+ int regnum)
+{
+ return tn40_mdio_read(mii_bus->priv, addr, devnum, regnum);
+}
+
+static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum,
+ int regnum, u16 val)
+{
+ return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val);
+}
+
+int tn40_mdiobus_init(struct tn40_priv *priv)
+{
+ struct pci_dev *pdev = priv->pdev;
+ struct mii_bus *bus;
+ int ret;
+
+ bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!bus)
+ return -ENOMEM;
+
+ bus->name = TN40_DRV_NAME;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "tn40xx-%x-%x",
+ pci_domain_nr(pdev->bus), pci_dev_id(pdev));
+ bus->priv = priv;
+
+ bus->read_c45 = tn40_mdio_read_c45;
+ bus->write_c45 = tn40_mdio_write_c45;
+
+ ret = devm_mdiobus_register(&pdev->dev, bus);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n",
+ ret, bus->state, MDIOBUS_UNREGISTERED);
+ return ret;
+ }
+ tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ);
+ priv->mdio = bus;
+ return 0;
+}
diff --git a/drivers/net/ethernet/tehuti/tn40_phy.c b/drivers/net/ethernet/tehuti/tn40_phy.c
new file mode 100644
index 000000000000..39eef7ca7958
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40_phy.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/phylink.h>
+
+#include "tn40.h"
+
+static struct tn40_priv *tn40_config_to_priv(struct phylink_config *config)
+{
+ return container_of(config, struct tn40_priv, phylink_config);
+}
+
+static void tn40_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct tn40_priv *priv = tn40_config_to_priv(config);
+
+ tn40_set_link_speed(priv, speed);
+ netif_wake_queue(priv->ndev);
+}
+
+static void tn40_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct tn40_priv *priv = tn40_config_to_priv(config);
+
+ netif_stop_queue(priv->ndev);
+ tn40_set_link_speed(priv, 0);
+}
+
+static void tn40_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+}
+
+static const struct phylink_mac_ops tn40_mac_ops = {
+ .mac_config = tn40_mac_config,
+ .mac_link_up = tn40_link_up,
+ .mac_link_down = tn40_link_down,
+};
+
+int tn40_phy_register(struct tn40_priv *priv)
+{
+ struct phylink_config *config;
+ struct phy_device *phydev;
+ struct phylink *phylink;
+
+ phydev = phy_find_first(priv->mdio);
+ if (!phydev) {
+ dev_err(&priv->pdev->dev, "PHY isn't found\n");
+ return -ENODEV;
+ }
+
+ config = &priv->phylink_config;
+ config->dev = &priv->ndev->dev;
+ config->type = PHYLINK_NETDEV;
+ config->mac_capabilities = MAC_10000FD;
+ __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces);
+
+ phylink = phylink_create(config, NULL, PHY_INTERFACE_MODE_XAUI,
+ &tn40_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ priv->phydev = phydev;
+ priv->phylink = phylink;
+ return 0;
+}
+
+void tn40_phy_unregister(struct tn40_priv *priv)
+{
+ phylink_destroy(priv->phylink);
+}
diff --git a/drivers/net/ethernet/tehuti/tn40_regs.h b/drivers/net/ethernet/tehuti/tn40_regs.h
new file mode 100644
index 000000000000..95171aa57a9e
--- /dev/null
+++ b/drivers/net/ethernet/tehuti/tn40_regs.h
@@ -0,0 +1,245 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) Tehuti Networks Ltd. */
+
+#ifndef _TN40_REGS_H_
+#define _TN40_REGS_H_
+
+/* Register region size */
+#define TN40_REGS_SIZE 0x10000
+
+/* Registers from 0x0000-0x00fc were remapped to 0x4000-0x40fc */
+#define TN40_REG_TXD_CFG1_0 0x4000
+#define TN40_REG_TXD_CFG1_1 0x4004
+#define TN40_REG_TXD_CFG1_2 0x4008
+#define TN40_REG_TXD_CFG1_3 0x400C
+
+#define TN40_REG_RXF_CFG1_0 0x4010
+#define TN40_REG_RXF_CFG1_1 0x4014
+#define TN40_REG_RXF_CFG1_2 0x4018
+#define TN40_REG_RXF_CFG1_3 0x401C
+
+#define TN40_REG_RXD_CFG1_0 0x4020
+#define TN40_REG_RXD_CFG1_1 0x4024
+#define TN40_REG_RXD_CFG1_2 0x4028
+#define TN40_REG_RXD_CFG1_3 0x402C
+
+#define TN40_REG_TXF_CFG1_0 0x4030
+#define TN40_REG_TXF_CFG1_1 0x4034
+#define TN40_REG_TXF_CFG1_2 0x4038
+#define TN40_REG_TXF_CFG1_3 0x403C
+
+#define TN40_REG_TXD_CFG0_0 0x4040
+#define TN40_REG_TXD_CFG0_1 0x4044
+#define TN40_REG_TXD_CFG0_2 0x4048
+#define TN40_REG_TXD_CFG0_3 0x404C
+
+#define TN40_REG_RXF_CFG0_0 0x4050
+#define TN40_REG_RXF_CFG0_1 0x4054
+#define TN40_REG_RXF_CFG0_2 0x4058
+#define TN40_REG_RXF_CFG0_3 0x405C
+
+#define TN40_REG_RXD_CFG0_0 0x4060
+#define TN40_REG_RXD_CFG0_1 0x4064
+#define TN40_REG_RXD_CFG0_2 0x4068
+#define TN40_REG_RXD_CFG0_3 0x406C
+
+#define TN40_REG_TXF_CFG0_0 0x4070
+#define TN40_REG_TXF_CFG0_1 0x4074
+#define TN40_REG_TXF_CFG0_2 0x4078
+#define TN40_REG_TXF_CFG0_3 0x407C
+
+#define TN40_REG_TXD_WPTR_0 0x4080
+#define TN40_REG_TXD_WPTR_1 0x4084
+#define TN40_REG_TXD_WPTR_2 0x4088
+#define TN40_REG_TXD_WPTR_3 0x408C
+
+#define TN40_REG_RXF_WPTR_0 0x4090
+#define TN40_REG_RXF_WPTR_1 0x4094
+#define TN40_REG_RXF_WPTR_2 0x4098
+#define TN40_REG_RXF_WPTR_3 0x409C
+
+#define TN40_REG_RXD_WPTR_0 0x40A0
+#define TN40_REG_RXD_WPTR_1 0x40A4
+#define TN40_REG_RXD_WPTR_2 0x40A8
+#define TN40_REG_RXD_WPTR_3 0x40AC
+
+#define TN40_REG_TXF_WPTR_0 0x40B0
+#define TN40_REG_TXF_WPTR_1 0x40B4
+#define TN40_REG_TXF_WPTR_2 0x40B8
+#define TN40_REG_TXF_WPTR_3 0x40BC
+
+#define TN40_REG_TXD_RPTR_0 0x40C0
+#define TN40_REG_TXD_RPTR_1 0x40C4
+#define TN40_REG_TXD_RPTR_2 0x40C8
+#define TN40_REG_TXD_RPTR_3 0x40CC
+
+#define TN40_REG_RXF_RPTR_0 0x40D0
+#define TN40_REG_RXF_RPTR_1 0x40D4
+#define TN40_REG_RXF_RPTR_2 0x40D8
+#define TN40_REG_RXF_RPTR_3 0x40DC
+
+#define TN40_REG_RXD_RPTR_0 0x40E0
+#define TN40_REG_RXD_RPTR_1 0x40E4
+#define TN40_REG_RXD_RPTR_2 0x40E8
+#define TN40_REG_RXD_RPTR_3 0x40EC
+
+#define TN40_REG_TXF_RPTR_0 0x40F0
+#define TN40_REG_TXF_RPTR_1 0x40F4
+#define TN40_REG_TXF_RPTR_2 0x40F8
+#define TN40_REG_TXF_RPTR_3 0x40FC
+
+/* Hardware versioning */
+#define TN40_FPGA_VER 0x5030
+
+/* Registers from 0x0100-0x0150 were remapped to 0x5100-0x5150 */
+#define TN40_REG_ISR TN40_REG_ISR0
+#define TN40_REG_ISR0 0x5100
+
+#define TN40_REG_IMR TN40_REG_IMR0
+#define TN40_REG_IMR0 0x5110
+
+#define TN40_REG_RDINTCM0 0x5120
+#define TN40_REG_RDINTCM2 0x5128
+
+#define TN40_REG_TDINTCM0 0x5130
+
+#define TN40_REG_ISR_MSK0 0x5140
+
+#define TN40_REG_INIT_SEMAPHORE 0x5170
+#define TN40_REG_INIT_STATUS 0x5180
+
+#define TN40_REG_MAC_LNK_STAT 0x0200
+#define TN40_MAC_LINK_STAT 0x0004 /* Link state */
+
+#define TN40_REG_BLNK_LED 0x0210
+
+#define TN40_REG_GMAC_RXF_A 0x1240
+
+#define TN40_REG_UNC_MAC0_A 0x1250
+#define TN40_REG_UNC_MAC1_A 0x1260
+#define TN40_REG_UNC_MAC2_A 0x1270
+
+#define TN40_REG_VLAN_0 0x1800
+
+#define TN40_REG_MAX_FRAME_A 0x12C0
+
+#define TN40_REG_RX_MAC_MCST0 0x1A80
+#define TN40_REG_RX_MAC_MCST1 0x1A84
+#define TN40_MAC_MCST_NUM 15
+#define TN40_REG_RX_MCST_HASH0 0x1A00
+#define TN40_MAC_MCST_HASH_NUM 8
+
+#define TN40_REG_VPC 0x2300
+#define TN40_REG_VIC 0x2320
+#define TN40_REG_VGLB 0x2340
+
+#define TN40_REG_CLKPLL 0x5000
+
+/* MDIO interface */
+
+#define TN40_REG_MDIO_CMD_STAT 0x6030
+#define TN40_REG_MDIO_CMD 0x6034
+#define TN40_REG_MDIO_DATA 0x6038
+#define TN40_REG_MDIO_ADDR 0x603C
+#define TN40_GET_MDIO_BUSY(x) FIELD_GET(GENMASK(0, 0), (x))
+#define TN40_GET_MDIO_RD_ERR(x) FIELD_GET(GENMASK(1, 1), (x))
+
+#define TN40_REG_REVISION 0x6000
+#define TN40_REG_SCRATCH 0x6004
+#define TN40_REG_CTRLST 0x6008
+#define TN40_REG_MAC_ADDR_0 0x600C
+#define TN40_REG_MAC_ADDR_1 0x6010
+#define TN40_REG_FRM_LENGTH 0x6014
+#define TN40_REG_PAUSE_QUANT 0x6054
+#define TN40_REG_RX_FIFO_SECTION 0x601C
+#define TN40_REG_TX_FIFO_SECTION 0x6020
+#define TN40_REG_RX_FULLNESS 0x6024
+#define TN40_REG_TX_FULLNESS 0x6028
+#define TN40_REG_HASHTABLE 0x602C
+
+#define TN40_REG_RST_PORT 0x7000
+#define TN40_REG_DIS_PORT 0x7010
+#define TN40_REG_RST_QU 0x7020
+#define TN40_REG_DIS_QU 0x7030
+
+#define TN40_REG_CTRLST_TX_ENA 0x0001
+#define TN40_REG_CTRLST_RX_ENA 0x0002
+#define TN40_REG_CTRLST_PRM_ENA 0x0010
+#define TN40_REG_CTRLST_PAD_ENA 0x0020
+
+#define TN40_REG_CTRLST_BASE (TN40_REG_CTRLST_PAD_ENA | REG_CTRLST_PRM_ENA)
+
+/* TXD TXF RXF RXD CONFIG 0x0000 --- 0x007c */
+#define TN40_TX_RX_CFG1_BASE 0xffffffff /*0-31 */
+#define TN40_TX_RX_CFG0_BASE 0xfffff000 /*31:12 */
+#define TN40_TX_RX_CFG0_RSVD 0x00000ffc /*11:2 */
+#define TN40_TX_RX_CFG0_SIZE 0x00000003 /*1:0 */
+
+/* TXD TXF RXF RXD WRITE 0x0080 --- 0x00BC */
+#define TN40_TXF_WPTR_WR_PTR 0x00007ff8 /*14:3 */
+
+/* TXD TXF RXF RXD READ 0x00CO --- 0x00FC */
+#define TN40_TXF_RPTR_RD_PTR 0x00007ff8 /*14:3 */
+
+/* The last 4 bits are dropped size is rounded to 16 */
+#define TN40_TXF_WPTR_MASK 0x7ff0
+
+/* regISR 0x0100 */
+/* regIMR 0x0110 */
+#define TN40_IMR_INPROG 0x80000000 /*31 */
+#define TN40_IR_LNKCHG1 0x10000000 /*28 */
+#define TN40_IR_LNKCHG0 0x08000000 /*27 */
+#define TN40_IR_GPIO 0x04000000 /*26 */
+#define TN40_IR_RFRSH 0x02000000 /*25 */
+#define TN40_IR_RSVD 0x01000000 /*24 */
+#define TN40_IR_SWI 0x00800000 /*23 */
+#define TN40_IR_RX_FREE_3 0x00400000 /*22 */
+#define TN40_IR_RX_FREE_2 0x00200000 /*21 */
+#define TN40_IR_RX_FREE_1 0x00100000 /*20 */
+#define TN40_IR_RX_FREE_0 0x00080000 /*19 */
+#define TN40_IR_TX_FREE_3 0x00040000 /*18 */
+#define TN40_IR_TX_FREE_2 0x00020000 /*17 */
+#define TN40_IR_TX_FREE_1 0x00010000 /*16 */
+#define TN40_IR_TX_FREE_0 0x00008000 /*15 */
+#define TN40_IR_RX_DESC_3 0x00004000 /*14 */
+#define TN40_IR_RX_DESC_2 0x00002000 /*13 */
+#define TN40_IR_RX_DESC_1 0x00001000 /*12 */
+#define TN40_IR_RX_DESC_0 0x00000800 /*11 */
+#define TN40_IR_PSE 0x00000400 /*10 */
+#define TN40_IR_TMR3 0x00000200 /* 9 */
+#define TN40_IR_TMR2 0x00000100 /* 8 */
+#define TN40_IR_TMR1 0x00000080 /* 7 */
+#define TN40_IR_TMR0 0x00000040 /* 6 */
+#define TN40_IR_VNT 0x00000020 /* 5 */
+#define TN40_IR_RxFL 0x00000010 /* 4 */
+#define TN40_IR_SDPERR 0x00000008 /* 3 */
+#define TN40_IR_TR 0x00000004 /* 2 */
+#define TN40_IR_PCIE_LINK 0x00000002 /* 1 */
+#define TN40_IR_PCIE_TOUT 0x00000001 /* 0 */
+
+#define TN40_IR_EXTRA \
+ (TN40_IR_RX_FREE_0 | TN40_IR_LNKCHG0 | TN40_IR_LNKCHG1 |\
+ TN40_IR_PSE | TN40_IR_TMR0 | TN40_IR_PCIE_LINK | \
+ TN40_IR_PCIE_TOUT)
+
+#define TN40_GMAC_RX_FILTER_OSEN 0x1000 /* shared OS enable */
+#define TN40_GMAC_RX_FILTER_TXFC 0x0400 /* Tx flow control */
+#define TN40_GMAC_RX_FILTER_RSV0 0x0200 /* reserved */
+#define TN40_GMAC_RX_FILTER_FDA 0x0100 /* filter out direct address */
+#define TN40_GMAC_RX_FILTER_AOF 0x0080 /* accept over run */
+#define TN40_GMAC_RX_FILTER_ACF 0x0040 /* accept control frames */
+#define TN40_GMAC_RX_FILTER_ARUNT 0x0020 /* accept under run */
+#define TN40_GMAC_RX_FILTER_ACRC 0x0010 /* accept crc error */
+#define TN40_GMAC_RX_FILTER_AM 0x0008 /* accept multicast */
+#define TN40_GMAC_RX_FILTER_AB 0x0004 /* accept broadcast */
+#define TN40_GMAC_RX_FILTER_PRM 0x0001 /* [0:1] promiscuous mode */
+
+#define TN40_MAX_FRAME_AB_VAL 0x3fff /* 13:0 */
+
+#define TN40_CLKPLL_PLLLKD 0x0200 /* 9 */
+#define TN40_CLKPLL_RSTEND 0x0100 /* 8 */
+#define TN40_CLKPLL_SFTRST 0x0001 /* 0 */
+
+#define TN40_CLKPLL_LKD (TN40_CLKPLL_PLLLKD | TN40_CLKPLL_RSTEND)
+
+#endif
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 1530d13984d4..0d5a862cd78a 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -167,7 +167,7 @@ config TI_KEYSTONE_NETCP_ETHSS
config TLAN
tristate "TI ThunderLAN support"
- depends on (PCI || EISA)
+ depends on (PCI || EISA) && HAS_IOPORT
help
If you have a PCI Ethernet network card based on the ThunderLAN chip
which is supported by this driver, say Y here.
@@ -188,6 +188,7 @@ config TI_ICSSG_PRUETH
select TI_ICSS_IEP
select TI_K3_CPPI_DESC_POOL
depends on PRU_REMOTEPROC
+ depends on NET_SWITCHDEV
depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
depends on PTP_1588_CLOCK_OPTIONAL
help
@@ -198,6 +199,22 @@ config TI_ICSSG_PRUETH
to support the Ethernet operation. Currently, it supports Ethernet
with 1G and 100M link speed.
+config TI_ICSSG_PRUETH_SR1
+ tristate "TI Gigabit PRU SR1.0 Ethernet driver"
+ select PHYLIB
+ select TI_ICSS_IEP
+ select TI_K3_CPPI_DESC_POOL
+ depends on PRU_REMOTEPROC
+ depends on NET_SWITCHDEV
+ depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ help
+ Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem.
+ This subsystem is available on the AM65 SR1.0 platform.
+
+ This driver requires firmware binaries which will run on the PRUs
+ to support the Ethernet operation. Currently, it supports Ethernet
+ with 1G, 100M and 10M link speed.
+
config TI_ICSS_IEP
tristate "TI PRU ICSS IEP driver"
depends on PTP_1588_CLOCK_OPTIONAL
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
index d8590304f3df..cbcf44806924 100644
--- a/drivers/net/ethernet/ti/Makefile
+++ b/drivers/net/ethernet/ti/Makefile
@@ -31,12 +31,18 @@ ti-am65-cpsw-nuss-$(CONFIG_TI_AM65_CPSW_QOS) += am65-cpsw-qos.o
ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
-obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o
-icssg-prueth-y := icssg/icssg_prueth.o \
- icssg/icssg_classifier.o \
- icssg/icssg_queues.o \
- icssg/icssg_config.o \
- icssg/icssg_mii_cfg.o \
- icssg/icssg_stats.o \
- icssg/icssg_ethtool.o
+obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o icssg.o
+icssg-prueth-y := icssg/icssg_prueth.o icssg/icssg_switchdev.o
+
+obj-$(CONFIG_TI_ICSSG_PRUETH_SR1) += icssg-prueth-sr1.o icssg.o
+icssg-prueth-sr1-y := icssg/icssg_prueth_sr1.o
+
+icssg-y := icssg/icssg_common.o \
+ icssg/icssg_classifier.o \
+ icssg/icssg_queues.o \
+ icssg/icssg_config.o \
+ icssg/icssg_mii_cfg.o \
+ icssg/icssg_stats.o \
+ icssg/icssg_ethtool.o
+
obj-$(CONFIG_TI_ICSS_IEP) += icssg/icss_iep.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index d6ce2c9f0a8d..b60976947da5 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -692,9 +692,20 @@ static void am65_cpsw_get_eth_mac_stats(struct net_device *ndev,
};
static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ unsigned int ptp_v2_filter;
+
+ ptp_v2_filter = BIT(HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
return ethtool_op_get_ts_info(ndev, info);
@@ -708,7 +719,7 @@ static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->phc_index = am65_cpts_phc_index(common->cpts);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
- info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | ptp_v2_filter;
return 0;
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 1d00e21808c1..81d9f21086ec 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -5,6 +5,7 @@
*
*/
+#include <linux/bpf_trace.h>
#include <linux/clk.h>
#include <linux/etherdevice.h>
#include <linux/if_vlan.h>
@@ -30,6 +31,7 @@
#include <linux/sys_soc.h>
#include <linux/dma/ti-cppi5.h>
#include <linux/dma/k3-udma-glue.h>
+#include <net/page_pool/helpers.h>
#include <net/switchdev.h>
#include "cpsw_ale.h"
@@ -101,6 +103,12 @@
#define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11)
#define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16
+#define AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN BIT(0)
+#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN BIT(1)
+#define AM65_CPSW_PN_TS_CTL_RX_VLAN_LT2_EN BIT(2)
+#define AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN BIT(3)
+#define AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN BIT(9)
+
/* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
#define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16
@@ -124,6 +132,11 @@
AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \
AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
+#define AM65_CPSW_TS_RX_ANX_ALL_EN \
+ (AM65_CPSW_PN_TS_CTL_RX_ANX_D_EN | \
+ AM65_CPSW_PN_TS_CTL_RX_ANX_E_EN | \
+ AM65_CPSW_PN_TS_CTL_RX_ANX_F_EN)
+
#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
/* Number of TX/RX descriptors */
#define AM65_CPSW_MAX_TX_DESC 500
@@ -138,6 +151,18 @@
#define AM65_CPSW_DEFAULT_TX_CHNS 8
+/* CPPI streaming packet interface */
+#define AM65_CPSW_CPPI_TX_FLOW_ID 0x3FFF
+#define AM65_CPSW_CPPI_TX_PKT_TYPE 0x7
+
+/* XDP */
+#define AM65_CPSW_XDP_CONSUMED 2
+#define AM65_CPSW_XDP_REDIRECT 1
+#define AM65_CPSW_XDP_PASS 0
+
+/* Include headroom compatible with both skb and xdpf */
+#define AM65_CPSW_HEADROOM (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN)
+
static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
const u8 *dev_addr)
{
@@ -305,12 +330,11 @@ static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
}
static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
- struct sk_buff *skb)
+ struct page *page)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
- u32 pkt_len = skb_tailroom(skb);
dma_addr_t desc_dma;
dma_addr_t buf_dma;
void *swdata;
@@ -322,20 +346,22 @@ static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
}
desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
- buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len,
- DMA_FROM_DEVICE);
+ buf_dma = dma_map_single(rx_chn->dma_dev,
+ page_address(page) + AM65_CPSW_HEADROOM,
+ AM65_CPSW_MAX_PACKET_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- dev_err(dev, "Failed to map rx skb buffer\n");
+ dev_err(dev, "Failed to map rx buffer\n");
return -EINVAL;
}
cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, AM65_CPSW_MAX_PACKET_SIZE,
+ buf_dma, AM65_CPSW_MAX_PACKET_SIZE);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- *((void **)swdata) = skb;
+ *((void **)swdata) = page_address(page);
return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
}
@@ -369,25 +395,137 @@ static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+static void am65_cpsw_destroy_xdp_rxqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct xdp_rxq_info *rxq;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ if (!common->ports[i].ndev)
+ continue;
+
+ rxq = &common->ports[i].xdp_rxq;
+
+ if (xdp_rxq_info_is_reg(rxq))
+ xdp_rxq_info_unreg(rxq);
+ }
+
+ if (rx_chn->page_pool) {
+ page_pool_destroy(rx_chn->page_pool);
+ rx_chn->page_pool = NULL;
+ }
+}
+
+static int am65_cpsw_create_xdp_rxqs(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct page_pool_params pp_params = {
+ .flags = PP_FLAG_DMA_MAP,
+ .order = 0,
+ .pool_size = AM65_CPSW_MAX_RX_DESC,
+ .nid = dev_to_node(common->dev),
+ .dev = common->dev,
+ .dma_dir = DMA_BIDIRECTIONAL,
+ .napi = &common->napi_rx,
+ };
+ struct xdp_rxq_info *rxq;
+ struct page_pool *pool;
+ int i, ret;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ rx_chn->page_pool = pool;
+
+ for (i = 0; i < common->port_num; i++) {
+ if (!common->ports[i].ndev)
+ continue;
+
+ rxq = &common->ports[i].xdp_rxq;
+
+ ret = xdp_rxq_info_reg(rxq, common->ports[i].ndev, i, 0);
+ if (ret)
+ goto err;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ am65_cpsw_destroy_xdp_rxqs(common);
+ return ret;
+}
+
+static int am65_cpsw_nuss_desc_idx(struct k3_cppi_desc_pool *desc_pool,
+ void *desc,
+ unsigned char dsize_log2)
+{
+ void *pool_addr = k3_cppi_desc_pool_cpuaddr(desc_pool);
+
+ return (desc - pool_addr) >> dsize_log2;
+}
+
+static void am65_cpsw_nuss_set_buf_type(struct am65_cpsw_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc,
+ enum am65_cpsw_tx_buf_type buf_type)
+{
+ int desc_idx;
+
+ desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc,
+ tx_chn->dsize_log2);
+ k3_cppi_desc_pool_desc_info_set(tx_chn->desc_pool, desc_idx,
+ (void *)buf_type);
+}
+
+static enum am65_cpsw_tx_buf_type am65_cpsw_nuss_buf_type(struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma)
+{
+ struct cppi5_host_desc_t *desc_tx;
+ int desc_idx;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ desc_idx = am65_cpsw_nuss_desc_idx(tx_chn->desc_pool, desc_tx,
+ tx_chn->dsize_log2);
+
+ return (enum am65_cpsw_tx_buf_type)k3_cppi_desc_pool_desc_info(tx_chn->desc_pool,
+ desc_idx);
+}
+
+static inline void am65_cpsw_put_page(struct am65_cpsw_rx_chn *rx_chn,
+ struct page *page,
+ bool allow_direct,
+ int desc_idx)
+{
+ page_pool_put_full_page(rx_chn->page_pool, page, allow_direct);
+ rx_chn->pages[desc_idx] = NULL;
+}
+
static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
{
struct am65_cpsw_rx_chn *rx_chn = data;
struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
dma_addr_t buf_dma;
u32 buf_dma_len;
+ void *page_addr;
void **swdata;
+ int desc_idx;
desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page_addr = *swdata;
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
-
dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- dev_kfree_skb_any(skb);
+ desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
+ rx_chn->dsize_log2);
+ am65_cpsw_put_page(rx_chn, virt_to_page(page_addr), false, desc_idx);
}
static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
@@ -440,12 +578,32 @@ static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
dev_kfree_skb_any(skb);
}
+static struct sk_buff *am65_cpsw_build_skb(void *page_addr,
+ struct net_device *ndev,
+ unsigned int len)
+{
+ struct sk_buff *skb;
+
+ len += AM65_CPSW_HEADROOM;
+
+ skb = build_skb(page_addr, len);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, AM65_CPSW_HEADROOM);
+ skb->dev = ndev;
+
+ return skb;
+}
+
static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
{
struct am65_cpsw_host *host_p = am65_common_get_host(common);
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
int port_idx, i, ret, tx;
- struct sk_buff *skb;
u32 val, port_mask;
+ struct page *page;
if (common->usage_count)
return 0;
@@ -505,25 +663,29 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
am65_cpsw_qos_tx_p0_rate_init(common);
- for (i = 0; i < common->rx_chns.descs_num; i++) {
- skb = __netdev_alloc_skb_ip_align(NULL,
- AM65_CPSW_MAX_PACKET_SIZE,
- GFP_KERNEL);
- if (!skb) {
+ ret = am65_cpsw_create_xdp_rxqs(common);
+ if (ret) {
+ dev_err(common->dev, "Failed to create XDP rx queues\n");
+ return ret;
+ }
+
+ for (i = 0; i < rx_chn->descs_num; i++) {
+ page = page_pool_dev_alloc_pages(rx_chn->page_pool);
+ if (!page) {
ret = -ENOMEM;
- dev_err(common->dev, "cannot allocate skb\n");
if (i)
goto fail_rx;
return ret;
}
+ rx_chn->pages[i] = page;
- ret = am65_cpsw_nuss_rx_push(common, skb);
+ ret = am65_cpsw_nuss_rx_push(common, page);
if (ret < 0) {
dev_err(common->dev,
- "cannot submit skb to channel rx, error %d\n",
+ "cannot submit page to channel rx: %d\n",
ret);
- kfree_skb(skb);
+ am65_cpsw_put_page(rx_chn, page, false, i);
if (i)
goto fail_rx;
@@ -531,27 +693,27 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
}
}
- ret = k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+ ret = k3_udma_glue_enable_rx_chn(rx_chn->rx_chn);
if (ret) {
dev_err(common->dev, "couldn't enable rx chn: %d\n", ret);
goto fail_rx;
}
for (tx = 0; tx < common->tx_ch_num; tx++) {
- ret = k3_udma_glue_enable_tx_chn(common->tx_chns[tx].tx_chn);
+ ret = k3_udma_glue_enable_tx_chn(tx_chn[tx].tx_chn);
if (ret) {
dev_err(common->dev, "couldn't enable tx chn %d: %d\n",
tx, ret);
tx--;
goto fail_tx;
}
- napi_enable(&common->tx_chns[tx].napi_tx);
+ napi_enable(&tx_chn[tx].napi_tx);
}
napi_enable(&common->napi_rx);
if (common->rx_irq_disabled) {
common->rx_irq_disabled = false;
- enable_irq(common->rx_chns.irq);
+ enable_irq(rx_chn->irq);
}
dev_dbg(common->dev, "cpsw_nuss started\n");
@@ -559,22 +721,23 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
fail_tx:
while (tx >= 0) {
- napi_disable(&common->tx_chns[tx].napi_tx);
- k3_udma_glue_disable_tx_chn(common->tx_chns[tx].tx_chn);
+ napi_disable(&tx_chn[tx].napi_tx);
+ k3_udma_glue_disable_tx_chn(tx_chn[tx].tx_chn);
tx--;
}
- k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+ k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
fail_rx:
- k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, 0,
- &common->rx_chns,
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, 0, rx_chn,
am65_cpsw_nuss_rx_cleanup, 0);
return ret;
}
static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct am65_cpsw_tx_chn *tx_chn = common->tx_chns;
int i;
if (common->usage_count != 1)
@@ -590,26 +753,25 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
reinit_completion(&common->tdown_complete);
for (i = 0; i < common->tx_ch_num; i++)
- k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
+ k3_udma_glue_tdown_tx_chn(tx_chn[i].tx_chn, false);
i = wait_for_completion_timeout(&common->tdown_complete,
msecs_to_jiffies(1000));
if (!i)
dev_err(common->dev, "tx timeout\n");
for (i = 0; i < common->tx_ch_num; i++) {
- napi_disable(&common->tx_chns[i].napi_tx);
- hrtimer_cancel(&common->tx_chns[i].tx_hrtimer);
+ napi_disable(&tx_chn[i].napi_tx);
+ hrtimer_cancel(&tx_chn[i].tx_hrtimer);
}
for (i = 0; i < common->tx_ch_num; i++) {
- k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
- &common->tx_chns[i],
+ k3_udma_glue_reset_tx_chn(tx_chn[i].tx_chn, &tx_chn[i],
am65_cpsw_nuss_tx_cleanup);
- k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
+ k3_udma_glue_disable_tx_chn(tx_chn[i].tx_chn);
}
reinit_completion(&common->tdown_complete);
- k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
+ k3_udma_glue_tdown_rx_chn(rx_chn->rx_chn, true);
if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
@@ -621,17 +783,22 @@ static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
hrtimer_cancel(&common->rx_hrtimer);
for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
- k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
- &common->rx_chns,
+ k3_udma_glue_reset_rx_chn(rx_chn->rx_chn, i, rx_chn,
am65_cpsw_nuss_rx_cleanup, !!i);
- k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+ k3_udma_glue_disable_rx_chn(rx_chn->rx_chn);
cpsw_ale_stop(common->ale);
writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
+ for (i = 0; i < rx_chn->descs_num; i++) {
+ if (rx_chn->pages[i])
+ am65_cpsw_put_page(rx_chn, rx_chn->pages[i], false, i);
+ }
+ am65_cpsw_destroy_xdp_rxqs(common);
+
dev_dbg(common->dev, "cpsw_nuss stopped\n");
return 0;
}
@@ -729,7 +896,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
/* mac_sl should be configured via phy-link interface */
am65_cpsw_sl_ctl_reset(port);
- ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
+ ret = phylink_of_phy_connect(port->slave.phylink, port->slave.port_np, 0);
if (ret)
goto error_cleanup;
@@ -749,16 +916,149 @@ runtime_put:
return ret;
}
-static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
+static int am65_cpsw_xdp_tx_frame(struct net_device *ndev,
+ struct am65_cpsw_tx_chn *tx_chn,
+ struct xdp_frame *xdpf,
+ enum am65_cpsw_tx_buf_type buf_type)
{
- struct skb_shared_hwtstamps *ssh;
- u64 ns;
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct cppi5_host_desc_t *host_desc;
+ struct netdev_queue *netif_txq;
+ dma_addr_t dma_desc, dma_buf;
+ u32 pkt_len = xdpf->len;
+ void **swdata;
+ int ret;
+
+ host_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (unlikely(!host_desc)) {
+ ndev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+
+ am65_cpsw_nuss_set_buf_type(tx_chn, host_desc, buf_type);
- ns = ((u64)psdata[1] << 32) | psdata[0];
+ dma_buf = dma_map_single(tx_chn->dma_dev, xdpf->data,
+ pkt_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, dma_buf))) {
+ ndev->stats.tx_dropped++;
+ ret = -ENOMEM;
+ goto pool_free;
+ }
+
+ cppi5_hdesc_init(host_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ AM65_CPSW_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(host_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
+ cppi5_hdesc_set_pktlen(host_desc, pkt_len);
+ cppi5_desc_set_pktids(&host_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
+ cppi5_desc_set_tags_ids(&host_desc->hdr, 0, port->port_id);
+
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &dma_buf);
+ cppi5_hdesc_attach_buf(host_desc, dma_buf, pkt_len, dma_buf, pkt_len);
+
+ swdata = cppi5_hdesc_get_swdata(host_desc);
+ *(swdata) = xdpf;
+
+ /* Report BQL before sending the packet */
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ dma_desc = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, host_desc);
+ if (AM65_CPSW_IS_CPSW2G(common)) {
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
+ dma_desc);
+ } else {
+ spin_lock_bh(&tx_chn->lock);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, host_desc,
+ dma_desc);
+ spin_unlock_bh(&tx_chn->lock);
+ }
+ if (ret) {
+ /* Inform BQL */
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ ndev->stats.tx_errors++;
+ goto dma_unmap;
+ }
+
+ return 0;
- ssh = skb_hwtstamps(skb);
- memset(ssh, 0, sizeof(*ssh));
- ssh->hwtstamp = ns_to_ktime(ns);
+dma_unmap:
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &dma_buf);
+ dma_unmap_single(tx_chn->dma_dev, dma_buf, pkt_len, DMA_TO_DEVICE);
+pool_free:
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, host_desc);
+ return ret;
+}
+
+static int am65_cpsw_run_xdp(struct am65_cpsw_common *common,
+ struct am65_cpsw_port *port,
+ struct xdp_buff *xdp,
+ int desc_idx, int cpu, int *len)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct net_device *ndev = port->ndev;
+ int ret = AM65_CPSW_XDP_CONSUMED;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ struct page *page;
+ u32 act;
+
+ prog = READ_ONCE(port->xdp_prog);
+ if (!prog)
+ return AM65_CPSW_XDP_PASS;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ /* XDP prog might have changed packet data and boundaries */
+ *len = xdp->data_end - xdp->data;
+
+ switch (act) {
+ case XDP_PASS:
+ ret = AM65_CPSW_XDP_PASS;
+ goto out;
+ case XDP_TX:
+ tx_chn = &common->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES];
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ break;
+
+ __netif_tx_lock(netif_txq, cpu);
+ ret = am65_cpsw_xdp_tx_frame(ndev, tx_chn, xdpf,
+ AM65_CPSW_TX_BUF_TYPE_XDP_TX);
+ __netif_tx_unlock(netif_txq);
+ if (ret)
+ break;
+
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
+ ret = AM65_CPSW_XDP_CONSUMED;
+ goto out;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(ndev, xdp, prog)))
+ break;
+
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
+ ret = AM65_CPSW_XDP_REDIRECT;
+ goto out;
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ ndev->stats.rx_dropped++;
+ }
+
+ page = virt_to_head_page(xdp->data);
+ am65_cpsw_put_page(rx_chn, page, true, desc_idx);
+
+out:
+ return ret;
}
/* RX psdata[2] word format - checksum information */
@@ -795,7 +1095,7 @@ static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
}
static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
- u32 flow_idx)
+ u32 flow_idx, int cpu)
{
struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
@@ -803,13 +1103,16 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
struct am65_cpsw_ndev_stats *stats;
struct cppi5_host_desc_t *desc_rx;
struct device *dev = common->dev;
- struct sk_buff *skb, *new_skb;
+ struct page *page, *new_page;
dma_addr_t desc_dma, buf_dma;
struct am65_cpsw_port *port;
+ int headroom, desc_idx, ret;
struct net_device *ndev;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ void *page_addr;
void **swdata;
u32 *psdata;
- int ret = 0;
ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
if (ret) {
@@ -830,7 +1133,8 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
__func__, flow_idx, &desc_dma);
swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
+ page_addr = *swdata;
+ page = virt_to_page(page_addr);
cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
@@ -838,12 +1142,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
port = am65_common_get_port(common, port_id);
ndev = port->ndev;
- skb->dev = ndev;
-
psdata = cppi5_hdesc_get_psdata(desc_rx);
- /* add RX timestamp */
- if (port->rx_ts_enabled)
- am65_cpsw_nuss_rx_ts(skb, psdata);
csum_info = psdata[2];
dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
@@ -851,36 +1150,64 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
- if (new_skb) {
- ndev_priv = netdev_priv(ndev);
- am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, ndev);
- am65_cpsw_nuss_rx_csum(skb, csum_info);
- napi_gro_receive(&common->napi_rx, skb);
-
- stats = this_cpu_ptr(ndev_priv->stats);
-
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += pkt_len;
- u64_stats_update_end(&stats->syncp);
- kmemleak_not_leak(new_skb);
- } else {
- ndev->stats.rx_dropped++;
- new_skb = skb;
+ desc_idx = am65_cpsw_nuss_desc_idx(rx_chn->desc_pool, desc_rx,
+ rx_chn->dsize_log2);
+
+ skb = am65_cpsw_build_skb(page_addr, ndev,
+ AM65_CPSW_MAX_PACKET_SIZE);
+ if (unlikely(!skb)) {
+ new_page = page;
+ goto requeue;
}
+ if (port->xdp_prog) {
+ xdp_init_buff(&xdp, AM65_CPSW_MAX_PACKET_SIZE, &port->xdp_rxq);
+
+ xdp_prepare_buff(&xdp, page_addr, skb_headroom(skb),
+ pkt_len, false);
+
+ ret = am65_cpsw_run_xdp(common, port, &xdp, desc_idx,
+ cpu, &pkt_len);
+ if (ret != AM65_CPSW_XDP_PASS)
+ return ret;
+
+ /* Compute additional headroom to be reserved */
+ headroom = (xdp.data - xdp.data_hard_start) - skb_headroom(skb);
+ skb_reserve(skb, headroom);
+ }
+
+ ndev_priv = netdev_priv(ndev);
+ am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
+ skb_put(skb, pkt_len);
+ if (port->rx_ts_enabled)
+ am65_cpts_rx_timestamp(common->cpts, skb);
+ skb_mark_for_recycle(skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+ am65_cpsw_nuss_rx_csum(skb, csum_info);
+ napi_gro_receive(&common->napi_rx, skb);
+
+ stats = this_cpu_ptr(ndev_priv->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ u64_stats_update_end(&stats->syncp);
+
+ new_page = page_pool_dev_alloc_pages(rx_chn->page_pool);
+ if (unlikely(!new_page))
+ return -ENOMEM;
+ rx_chn->pages[desc_idx] = new_page;
+
if (netif_dormant(ndev)) {
- dev_kfree_skb_any(new_skb);
+ am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
ndev->stats.rx_dropped++;
return 0;
}
- ret = am65_cpsw_nuss_rx_push(common, new_skb);
+requeue:
+ ret = am65_cpsw_nuss_rx_push(common, new_page);
if (WARN_ON(ret < 0)) {
- dev_kfree_skb_any(new_skb);
+ am65_cpsw_put_page(rx_chn, new_page, true, desc_idx);
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
}
@@ -901,6 +1228,8 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
{
struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
int flow = AM65_CPSW_MAX_RX_FLOWS;
+ int cpu = smp_processor_id();
+ bool xdp_redirect = false;
int cur_budget, ret;
int num_rx = 0;
@@ -909,9 +1238,12 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
cur_budget = budget - num_rx;
while (cur_budget--) {
- ret = am65_cpsw_nuss_rx_packets(common, flow);
- if (ret)
+ ret = am65_cpsw_nuss_rx_packets(common, flow, cpu);
+ if (ret) {
+ if (ret == AM65_CPSW_XDP_REDIRECT)
+ xdp_redirect = true;
break;
+ }
num_rx++;
}
@@ -919,6 +1251,9 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
break;
}
+ if (xdp_redirect)
+ xdp_do_flush();
+
dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
@@ -938,8 +1273,8 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
}
static struct sk_buff *
-am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
- dma_addr_t desc_dma)
+am65_cpsw_nuss_tx_compl_packet_skb(struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma)
{
struct am65_cpsw_ndev_priv *ndev_priv;
struct am65_cpsw_ndev_stats *stats;
@@ -968,6 +1303,39 @@ am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
return skb;
}
+static struct xdp_frame *
+am65_cpsw_nuss_tx_compl_packet_xdp(struct am65_cpsw_common *common,
+ struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma,
+ struct net_device **ndev)
+{
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct am65_cpsw_ndev_stats *stats;
+ struct cppi5_host_desc_t *desc_tx;
+ struct am65_cpsw_port *port;
+ struct xdp_frame *xdpf;
+ u32 port_id = 0;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ cppi5_desc_get_tags_ids(&desc_tx->hdr, NULL, &port_id);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ xdpf = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+
+ port = am65_common_get_port(common, port_id);
+ *ndev = port->ndev;
+
+ ndev_priv = netdev_priv(*ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += xdpf->len;
+ u64_stats_update_end(&stats->syncp);
+
+ return xdpf;
+}
+
static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
struct netdev_queue *netif_txq)
{
@@ -988,11 +1356,13 @@ static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_d
static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
int chn, unsigned int budget, bool *tdown)
{
+ enum am65_cpsw_tx_buf_type buf_type;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -1013,10 +1383,21 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
break;
}
- skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
- total_bytes = skb->len;
- ndev = skb->dev;
- napi_consume_skb(skb, budget);
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
+ ndev = skb->dev;
+ total_bytes = skb->len;
+ napi_consume_skb(skb, budget);
+ } else {
+ xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
+ desc_dma, &ndev);
+ total_bytes = xdpf->len;
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
+ }
num_tx++;
netif_txq = netdev_get_tx_queue(ndev, chn);
@@ -1034,11 +1415,13 @@ static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
int chn, unsigned int budget, bool *tdown)
{
+ enum am65_cpsw_tx_buf_type buf_type;
struct device *dev = common->dev;
struct am65_cpsw_tx_chn *tx_chn;
struct netdev_queue *netif_txq;
unsigned int total_bytes = 0;
struct net_device *ndev;
+ struct xdp_frame *xdpf;
struct sk_buff *skb;
dma_addr_t desc_dma;
int res, num_tx = 0;
@@ -1057,11 +1440,21 @@ static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
break;
}
- skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
-
- ndev = skb->dev;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
+ buf_type = am65_cpsw_nuss_buf_type(tx_chn, desc_dma);
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_SKB) {
+ skb = am65_cpsw_nuss_tx_compl_packet_skb(tx_chn, desc_dma);
+ ndev = skb->dev;
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ } else {
+ xdpf = am65_cpsw_nuss_tx_compl_packet_xdp(common, tx_chn,
+ desc_dma, &ndev);
+ total_bytes += xdpf->len;
+ if (buf_type == AM65_CPSW_TX_BUF_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else
+ xdp_return_frame(xdpf);
+ }
num_tx++;
}
@@ -1183,10 +1576,13 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_stop_q;
}
+ am65_cpsw_nuss_set_buf_type(tx_chn, first_desc,
+ AM65_CPSW_TX_BUF_TYPE_SKB);
+
cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
AM65_CPSW_NAV_PS_DATA_SIZE);
- cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF);
- cppi5_hdesc_set_pkttype(first_desc, 0x7);
+ cppi5_desc_set_pktids(&first_desc->hdr, 0, AM65_CPSW_CPPI_TX_FLOW_ID);
+ cppi5_hdesc_set_pkttype(first_desc, AM65_CPSW_CPPI_TX_PKT_TYPE);
cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
@@ -1225,6 +1621,9 @@ static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
goto busy_free_descs;
}
+ am65_cpsw_nuss_set_buf_type(tx_chn, next_desc,
+ AM65_CPSW_TX_BUF_TYPE_SKB);
+
buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
@@ -1334,7 +1733,6 @@ static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
struct ifreq *ifr)
{
- struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
struct hwtstamp_config cfg;
@@ -1358,11 +1756,6 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
case HWTSTAMP_FILTER_NONE:
port->rx_ts_enabled = false;
break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
@@ -1372,10 +1765,13 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
port->rx_ts_enabled = true;
- cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ return -EOPNOTSUPP;
default:
return -ERANGE;
}
@@ -1405,6 +1801,10 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
+ if (port->rx_ts_enabled)
+ ts_ctrl |= AM65_CPSW_TS_RX_ANX_ALL_EN |
+ AM65_CPSW_PN_TS_CTL_RX_VLAN_LT1_EN;
+
writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
writel(ts_vlan_ltype, port->port_base +
AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
@@ -1412,9 +1812,6 @@ static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
- /* en/dis RX timestamp */
- am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
-
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -1431,7 +1828,7 @@ static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
cfg.tx_type = port->tx_ts_enabled ?
HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
cfg.rx_filter = port->rx_ts_enabled ?
- HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+ HWTSTAMP_FILTER_PTP_V2_EVENT : HWTSTAMP_FILTER_NONE;
return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
}
@@ -1488,6 +1885,59 @@ static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
stats->tx_dropped = dev->stats.tx_dropped;
}
+static int am65_cpsw_xdp_prog_setup(struct net_device *ndev,
+ struct bpf_prog *prog)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ bool running = netif_running(ndev);
+ struct bpf_prog *old_prog;
+
+ if (running)
+ am65_cpsw_nuss_ndo_slave_stop(ndev);
+
+ old_prog = xchg(&port->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (running)
+ return am65_cpsw_nuss_ndo_slave_open(ndev);
+
+ return 0;
+}
+
+static int am65_cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return am65_cpsw_xdp_prog_setup(ndev, bpf->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int am65_cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ int cpu = smp_processor_id();
+ int i, nxmit = 0;
+
+ tx_chn = &am65_ndev_to_common(ndev)->tx_chns[cpu % AM65_CPSW_MAX_TX_QUEUES];
+ netif_txq = netdev_get_tx_queue(ndev, tx_chn->id);
+
+ __netif_tx_lock(netif_txq, cpu);
+ for (i = 0; i < n; i++) {
+ if (am65_cpsw_xdp_tx_frame(ndev, tx_chn, frames[i],
+ AM65_CPSW_TX_BUF_TYPE_XDP_NDO))
+ break;
+ nxmit++;
+ }
+ __netif_tx_unlock(netif_txq);
+
+ return nxmit;
+}
+
static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_open = am65_cpsw_nuss_ndo_slave_open,
.ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
@@ -1502,6 +1952,8 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
.ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
.ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
.ndo_set_tx_maxrate = am65_cpsw_qos_ndo_tx_p0_set_maxrate,
+ .ndo_bpf = am65_cpsw_ndo_bpf,
+ .ndo_xdp_xmit = am65_cpsw_ndo_xdp_xmit,
};
static void am65_cpsw_disable_phy(struct phy *phy)
@@ -1772,7 +2224,7 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
.mode = K3_RINGACC_RING_MODE_RING,
.flags = 0
};
- u32 hdesc_size;
+ u32 hdesc_size, hdesc_size_out;
int i, ret = 0;
hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
@@ -1816,6 +2268,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
goto err;
}
+ hdesc_size_out = k3_cppi_desc_pool_desc_size(tx_chn->desc_pool);
+ tx_chn->dsize_log2 = __fls(hdesc_size_out);
+ WARN_ON(hdesc_size_out != (1 << tx_chn->dsize_log2));
+
tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
if (tx_chn->irq < 0) {
dev_err(dev, "Failed to get tx dma irq %d\n",
@@ -1862,8 +2318,8 @@ static void am65_cpsw_nuss_free_rx_chns(void *data)
static void am65_cpsw_nuss_remove_rx_chns(void *data)
{
struct am65_cpsw_common *common = data;
- struct am65_cpsw_rx_chn *rx_chn;
struct device *dev = common->dev;
+ struct am65_cpsw_rx_chn *rx_chn;
rx_chn = &common->rx_chns;
devm_remove_action(dev, am65_cpsw_nuss_free_rx_chns, common);
@@ -1873,11 +2329,7 @@ static void am65_cpsw_nuss_remove_rx_chns(void *data)
netif_napi_del(&common->napi_rx);
- if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
- k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
-
- if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
- k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+ am65_cpsw_nuss_free_rx_chns(common);
common->rx_flow_id_base = -1;
}
@@ -1888,7 +2340,7 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
struct device *dev = common->dev;
- u32 hdesc_size;
+ u32 hdesc_size, hdesc_size_out;
u32 fdqring_id;
int i, ret = 0;
@@ -1920,6 +2372,17 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
goto err;
}
+ hdesc_size_out = k3_cppi_desc_pool_desc_size(rx_chn->desc_pool);
+ rx_chn->dsize_log2 = __fls(hdesc_size_out);
+ WARN_ON(hdesc_size_out != (1 << rx_chn->dsize_log2));
+
+ rx_chn->page_pool = NULL;
+
+ rx_chn->pages = devm_kcalloc(dev, rx_chn->descs_num,
+ sizeof(*rx_chn->pages), GFP_KERNEL);
+ if (!rx_chn->pages)
+ return -ENOMEM;
+
common->rx_flow_id_base =
k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
@@ -1961,10 +2424,10 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
- if (rx_chn->irq <= 0) {
+ if (rx_chn->irq < 0) {
dev_err(dev, "Failed to get rx dma irq %d\n",
rx_chn->irq);
- ret = -ENXIO;
+ ret = rx_chn->irq;
goto err;
}
}
@@ -2148,7 +2611,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
of_property_read_bool(port_np, "ti,mac-only");
/* get phy/link info */
- port->slave.phy_node = port_np;
+ port->slave.port_np = port_np;
ret = of_get_phy_mode(port_np, &port->slave.phy_if);
if (ret) {
dev_err(dev, "%pOF read phy-mode err %d\n",
@@ -2240,6 +2703,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
mutex_init(&ndev_priv->mm_lock);
port->qos.link_speed = SPEED_UNKNOWN;
SET_NETDEV_DEV(port->ndev, dev);
+ port->ndev->dev.of_node = port->slave.port_np;
eth_hw_addr_set(port->ndev, port->slave.mac_addr);
@@ -2252,6 +2716,9 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
NETIF_F_HW_TC;
port->ndev->features = port->ndev->hw_features |
NETIF_F_HW_VLAN_CTAG_FILTER;
+ port->ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT;
port->ndev->vlan_features |= NETIF_F_SG;
port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
@@ -2294,7 +2761,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
}
phylink = phylink_create(&port->slave.phylink_config,
- of_node_to_fwnode(port->slave.phy_node),
+ of_node_to_fwnode(port->slave.port_np),
port->slave.phy_if,
&am65_cpsw_phylink_mac_ops);
if (IS_ERR(phylink))
@@ -2315,6 +2782,8 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
if (ret)
dev_err(dev, "failed to add percpu stat free action %d\n", ret);
+ port->xdp_prog = NULL;
+
if (!common->dma_ndev)
common->dma_ndev = port->ndev;
@@ -2588,7 +3057,8 @@ static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
}
static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
struct am65_cpsw_common *cpsw = dl_priv->common;
@@ -2922,7 +3392,8 @@ static const struct am65_cpsw_pdata j784s4_cpswxg_pdata = {
.quirks = 0,
.ale_dev_id = "am64-cpswxg",
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
- .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_USXGMII),
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII) | BIT(PHY_INTERFACE_MODE_SGMII) |
+ BIT(PHY_INTERFACE_MODE_USXGMII),
};
static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
@@ -2958,9 +3429,9 @@ static int am65_cpsw_nuss_probe(struct platform_device *pdev)
struct device_node *node;
struct resource *res;
struct clk *clk;
+ int ale_entries;
u64 id_temp;
int ret, i;
- int ale_entries;
common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
if (!common)
@@ -3172,10 +3643,10 @@ static int am65_cpsw_nuss_suspend(struct device *dev)
static int am65_cpsw_nuss_resume(struct device *dev)
{
struct am65_cpsw_common *common = dev_get_drvdata(dev);
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
struct am65_cpsw_port *port;
struct net_device *ndev;
int i, ret;
- struct am65_cpsw_host *host_p = am65_common_get_host(common);
ret = am65_cpsw_nuss_init_tx_chns(common);
if (ret)
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
index 7da0492dc091..e2ce2be320bd 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/soc/ti/k3-ringacc.h>
#include <net/devlink.h>
+#include <net/xdp.h>
#include "am65-cpsw-qos.h"
struct am65_cpts;
@@ -29,7 +30,7 @@ struct am65_cpts;
struct am65_cpsw_slave_data {
bool mac_only;
struct cpsw_sl *mac_sl;
- struct device_node *phy_node;
+ struct device_node *port_np;
phy_interface_t phy_if;
struct phy *ifphy;
struct phy *serdes_phy;
@@ -56,10 +57,18 @@ struct am65_cpsw_port {
bool rx_ts_enabled;
struct am65_cpsw_qos qos;
struct devlink_port devlink_port;
+ struct bpf_prog *xdp_prog;
+ struct xdp_rxq_info xdp_rxq;
/* Only for suspend resume context */
u32 vid_context;
};
+enum am65_cpsw_tx_buf_type {
+ AM65_CPSW_TX_BUF_TYPE_SKB,
+ AM65_CPSW_TX_BUF_TYPE_XDP_TX,
+ AM65_CPSW_TX_BUF_TYPE_XDP_NDO,
+};
+
struct am65_cpsw_host {
struct am65_cpsw_common *common;
void __iomem *port_base;
@@ -80,6 +89,7 @@ struct am65_cpsw_tx_chn {
int irq;
u32 id;
u32 descs_num;
+ unsigned char dsize_log2;
char tx_chn_name[128];
u32 rate_mbps;
};
@@ -89,7 +99,10 @@ struct am65_cpsw_rx_chn {
struct device *dma_dev;
struct k3_cppi_desc_pool *desc_pool;
struct k3_udma_glue_rx_channel *rx_chn;
+ struct page_pool *page_pool;
+ struct page **pages;
u32 descs_num;
+ unsigned char dsize_log2;
int irq;
};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
index 816e73a3d6e4..fa96db7c1a13 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-qos.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -9,6 +9,7 @@
#include <linux/pm_runtime.h>
#include <linux/math.h>
+#include <linux/math64.h>
#include <linux/time.h>
#include <linux/units.h>
#include <net/pkt_cls.h>
@@ -837,6 +838,7 @@ static int am65_cpsw_taprio_replace(struct net_device *ndev,
struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
struct am65_cpts *cpts = common->cpts;
struct am65_cpsw_est *est_new;
+ u64 cur_time, n;
int ret, tact;
if (!netif_running(ndev)) {
@@ -888,13 +890,21 @@ static int am65_cpsw_taprio_replace(struct net_device *ndev,
if (tact == TACT_PROG)
am65_cpsw_timer_stop(ndev);
- if (!est_new->taprio.base_time)
- est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
-
am65_cpsw_port_est_get_buf_num(ndev, est_new);
am65_cpsw_est_set_sched_list(ndev, est_new);
am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
+ /* If the base-time is in the past, start schedule from the time:
+ * base_time + (N*cycle_time)
+ * where N is the smallest possible integer such that the above
+ * time is in the future.
+ */
+ cur_time = am65_cpts_ns_gettime(cpts);
+ if (est_new->taprio.base_time < cur_time) {
+ n = div64_u64(cur_time - est_new->taprio.base_time, est_new->taprio.cycle_time);
+ est_new->taprio.base_time += (n + 1) * est_new->taprio.cycle_time;
+ }
+
am65_cpsw_est_set(ndev, 1);
if (tact == TACT_PROG) {
@@ -1008,6 +1018,9 @@ static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
index f89716b1cfb6..59d6ab989c55 100644
--- a/drivers/net/ethernet/ti/am65-cpts.c
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -275,15 +275,13 @@ static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
return true;
}
-static int am65_cpts_fifo_read(struct am65_cpts *cpts)
+static int __am65_cpts_fifo_read(struct am65_cpts *cpts)
{
struct ptp_clock_event pevent;
struct am65_cpts_event *event;
bool schedule = false;
int i, type, ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&cpts->lock, flags);
for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
event = list_first_entry_or_null(&cpts->pool,
struct am65_cpts_event, list);
@@ -312,8 +310,7 @@ static int am65_cpts_fifo_read(struct am65_cpts *cpts)
event->tmo = jiffies +
msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
- list_del_init(&event->list);
- list_add_tail(&event->list, &cpts->events);
+ list_move_tail(&event->list, &cpts->events);
dev_dbg(cpts->dev,
"AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
@@ -356,14 +353,24 @@ static int am65_cpts_fifo_read(struct am65_cpts *cpts)
}
out:
- spin_unlock_irqrestore(&cpts->lock, flags);
-
if (schedule)
ptp_schedule_worker(cpts->ptp_clock, 0);
return ret;
}
+static int am65_cpts_fifo_read(struct am65_cpts *cpts)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ ret = __am65_cpts_fifo_read(cpts);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return ret;
+}
+
static u64 am65_cpts_gettime(struct am65_cpts *cpts,
struct ptp_system_timestamp *sts)
{
@@ -864,29 +871,6 @@ static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
return delay;
}
-/**
- * am65_cpts_rx_enable - enable rx timestamping
- * @cpts: cpts handle
- * @en: enable
- *
- * This functions enables rx packets timestamping. The CPTS can timestamp all
- * rx packets.
- */
-void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
-{
- u32 val;
-
- mutex_lock(&cpts->ptp_clk_lock);
- val = am65_cpts_read32(cpts, control);
- if (en)
- val |= AM65_CPTS_CONTROL_TSTAMP_EN;
- else
- val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
- am65_cpts_write32(cpts, val, control);
- mutex_unlock(&cpts->ptp_clk_lock);
-}
-EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
-
static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
{
unsigned int ptp_class = ptp_classify_raw(skb);
@@ -911,6 +895,69 @@ static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
return 1;
}
+static u64 am65_cpts_find_rx_ts(struct am65_cpts *cpts, u32 skb_mtype_seqid)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ unsigned long flags;
+ u32 mtype_seqid;
+ u64 ns = 0;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ __am65_cpts_fifo_read(cpts);
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_move(&event->list, &cpts->pool);
+ continue;
+ }
+
+ mtype_seqid = event->event1 &
+ (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
+ AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK |
+ AM65_CPTS_EVENT_1_EVENT_TYPE_MASK);
+
+ if (mtype_seqid == skb_mtype_seqid) {
+ ns = event->timestamp;
+ list_move(&event->list, &cpts->pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return ns;
+}
+
+void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (struct am65_cpts_skb_cb_data *)skb->cb;
+ struct skb_shared_hwtstamps *ssh;
+ int ret;
+ u64 ns;
+
+ /* am65_cpts_rx_timestamp() is called before eth_type_trans(), so
+ * skb MAC Hdr properties are not configured yet. Hence need to
+ * reset skb MAC header here
+ */
+ skb_reset_mac_header(skb);
+ ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return; /* if not PTP class packet */
+
+ skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_RX << AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
+
+ dev_dbg(cpts->dev, "%s mtype seqid %08x\n", __func__, skb_cb->skb_mtype_seqid);
+
+ ns = am65_cpts_find_rx_ts(cpts, skb_cb->skb_mtype_seqid);
+ if (!ns)
+ return;
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_rx_timestamp);
+
/**
* am65_cpts_tx_timestamp - save tx packet for timestamping
* @cpts: cpts handle
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
index 6e14df0be113..6099d772799d 100644
--- a/drivers/net/ethernet/ti/am65-cpts.h
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -22,9 +22,9 @@ void am65_cpts_release(struct am65_cpts *cpts);
struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
struct device_node *node);
int am65_cpts_phc_index(struct am65_cpts *cpts);
+void am65_cpts_rx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
-void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en);
u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
struct am65_cpts_estf_cfg *cfg);
@@ -48,17 +48,18 @@ static inline int am65_cpts_phc_index(struct am65_cpts *cpts)
return -1;
}
-static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+static inline void am65_cpts_rx_timestamp(struct am65_cpts *cpts,
struct sk_buff *skb)
{
}
-static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
- struct sk_buff *skb)
+static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
{
}
-static inline void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
{
}
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
index f7b283353ba2..53ed23d68722 100644
--- a/drivers/net/ethernet/ti/cpsw_ethtool.c
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -717,7 +717,7 @@ err:
}
#if IS_ENABLED(CONFIG_TI_CPTS)
-int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
@@ -738,7 +738,7 @@ int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
return 0;
}
#else
-int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 087dcb67505a..2baa198ebfa0 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1625,7 +1625,8 @@ static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
}
static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
@@ -1762,7 +1763,8 @@ static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
}
static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct cpsw_devlink *dl_priv = devlink_priv(dl);
struct cpsw_common *cpsw = dl_priv->cpsw;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
index 764ed298b570..6fe4edabba44 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.c
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -1404,6 +1404,9 @@ static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
return -EOPNOTSUPP;
}
+ if (flow_rule_match_has_control_flags(rule, extack))
+ return -EOPNOTSUPP;
+
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
index 7efa72502c86..1f448290b9f4 100644
--- a/drivers/net/ethernet/ti/cpsw_priv.h
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -510,6 +510,6 @@ int cpsw_set_ringparam(struct net_device *ndev,
int cpsw_set_channels_common(struct net_device *ndev,
struct ethtool_channels *chs,
cpdma_handler_fn rx_handler);
-int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info);
+int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info);
#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ */
diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c
index 3025e9c18970..75c294ce6fb6 100644
--- a/drivers/net/ethernet/ti/icssg/icss_iep.c
+++ b/drivers/net/ethernet/ti/icssg/icss_iep.c
@@ -17,6 +17,7 @@
#include <linux/timekeeping.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
+#include <linux/workqueue.h>
#include "icss_iep.h"
@@ -94,7 +95,7 @@ enum {
* @flags: Flags to represent IEP properties
*/
struct icss_iep_plat_data {
- struct regmap_config *config;
+ const struct regmap_config *config;
u32 reg_offs[ICSS_IEP_MAX_REGS];
u32 flags;
};
@@ -110,7 +111,6 @@ struct icss_iep {
struct ptp_clock_info ptp_info;
struct ptp_clock *ptp_clock;
struct mutex ptp_clk_mutex; /* PHC access serializer */
- spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */
u32 def_inc;
s16 slow_cmp_inc;
u32 slow_cmp_count;
@@ -122,6 +122,7 @@ struct icss_iep {
int cap_cmp_irq;
u64 period;
u32 latch_enable;
+ struct work_struct work;
};
/**
@@ -192,14 +193,11 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns)
*/
static void icss_iep_settime(struct icss_iep *iep, u64 ns)
{
- unsigned long flags;
-
if (iep->ops && iep->ops->settime) {
iep->ops->settime(iep->clockops_data, ns);
return;
}
- spin_lock_irqsave(&iep->irq_lock, flags);
if (iep->pps_enabled || iep->perout_enabled)
writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
@@ -210,7 +208,6 @@ static void icss_iep_settime(struct icss_iep *iep, u64 ns)
writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN,
iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]);
}
- spin_unlock_irqrestore(&iep->irq_lock, flags);
}
/**
@@ -546,7 +543,6 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep,
static int icss_iep_perout_enable(struct icss_iep *iep,
struct ptp_perout_request *req, int on)
{
- unsigned long flags;
int ret = 0;
mutex_lock(&iep->ptp_clk_mutex);
@@ -559,11 +555,9 @@ static int icss_iep_perout_enable(struct icss_iep *iep,
if (iep->perout_enabled == !!on)
goto exit;
- spin_lock_irqsave(&iep->irq_lock, flags);
ret = icss_iep_perout_enable_hw(iep, req, on);
if (!ret)
iep->perout_enabled = !!on;
- spin_unlock_irqrestore(&iep->irq_lock, flags);
exit:
mutex_unlock(&iep->ptp_clk_mutex);
@@ -571,11 +565,61 @@ exit:
return ret;
}
+static void icss_iep_cap_cmp_work(struct work_struct *work)
+{
+ struct icss_iep *iep = container_of(work, struct icss_iep, work);
+ const u32 *reg_offs = iep->plat_data->reg_offs;
+ struct ptp_clock_event pevent;
+ unsigned int val;
+ u64 ns, ns_next;
+
+ mutex_lock(&iep->ptp_clk_mutex);
+
+ ns = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) {
+ val = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
+ ns |= (u64)val << 32;
+ }
+ /* set next event */
+ ns_next = ns + iep->period;
+ writel(lower_32_bits(ns_next),
+ iep->base + reg_offs[ICSS_IEP_CMP1_REG0]);
+ if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT)
+ writel(upper_32_bits(ns_next),
+ iep->base + reg_offs[ICSS_IEP_CMP1_REG1]);
+
+ pevent.pps_times.ts_real = ns_to_timespec64(ns);
+ pevent.type = PTP_CLOCK_PPSUSR;
+ pevent.index = 0;
+ ptp_clock_event(iep->ptp_clock, &pevent);
+ dev_dbg(iep->dev, "IEP:pps ts: %llu next:%llu:\n", ns, ns_next);
+
+ mutex_unlock(&iep->ptp_clk_mutex);
+}
+
+static irqreturn_t icss_iep_cap_cmp_irq(int irq, void *dev_id)
+{
+ struct icss_iep *iep = (struct icss_iep *)dev_id;
+ const u32 *reg_offs = iep->plat_data->reg_offs;
+ unsigned int val;
+
+ val = readl(iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
+ /* The driver only enables CMP1 */
+ if (val & BIT(1)) {
+ /* Clear the event */
+ writel(BIT(1), iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]);
+ if (iep->pps_enabled || iep->perout_enabled)
+ schedule_work(&iep->work);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
static int icss_iep_pps_enable(struct icss_iep *iep, int on)
{
struct ptp_clock_request rq;
struct timespec64 ts;
- unsigned long flags;
int ret = 0;
u64 ns;
@@ -589,8 +633,6 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
if (iep->pps_enabled == !!on)
goto exit;
- spin_lock_irqsave(&iep->irq_lock, flags);
-
rq.perout.index = 0;
if (on) {
ns = icss_iep_gettime(iep, NULL);
@@ -602,13 +644,13 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on)
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
} else {
ret = icss_iep_perout_enable_hw(iep, &rq.perout, on);
+ if (iep->cap_cmp_irq)
+ cancel_work_sync(&iep->work);
}
if (!ret)
iep->pps_enabled = !!on;
- spin_unlock_irqrestore(&iep->irq_lock, flags);
-
exit:
mutex_unlock(&iep->ptp_clk_mutex);
@@ -777,6 +819,8 @@ int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops,
if (iep->ops && iep->ops->perout_enable) {
iep->ptp_info.n_per_out = 1;
iep->ptp_info.pps = 1;
+ } else if (iep->cap_cmp_irq) {
+ iep->ptp_info.pps = 1;
}
if (iep->ops && iep->ops->extts_enable)
@@ -817,6 +861,7 @@ static int icss_iep_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct icss_iep *iep;
struct clk *iep_clk;
+ int ret, irq;
iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL);
if (!iep)
@@ -827,6 +872,22 @@ static int icss_iep_probe(struct platform_device *pdev)
if (IS_ERR(iep->base))
return -ENODEV;
+ irq = platform_get_irq_byname_optional(pdev, "iep_cap_cmp");
+ if (irq == -EPROBE_DEFER)
+ return irq;
+
+ if (irq > 0) {
+ ret = devm_request_irq(dev, irq, icss_iep_cap_cmp_irq,
+ IRQF_TRIGGER_HIGH, "iep_cap_cmp", iep);
+ if (ret) {
+ dev_info(iep->dev, "cap_cmp irq request failed: %x\n",
+ ret);
+ } else {
+ iep->cap_cmp_irq = irq;
+ INIT_WORK(&iep->work, icss_iep_cap_cmp_work);
+ }
+ }
+
iep_clk = devm_clk_get(dev, NULL);
if (IS_ERR(iep_clk))
return PTR_ERR(iep_clk);
@@ -853,7 +914,6 @@ static int icss_iep_probe(struct platform_device *pdev)
iep->ptp_info = icss_iep_ptp_info;
mutex_init(&iep->ptp_clk_mutex);
- spin_lock_init(&iep->irq_lock);
dev_set_drvdata(dev, iep);
icss_iep_disable(iep);
@@ -892,7 +952,7 @@ static int icss_iep_regmap_read(void *context, unsigned int reg,
return 0;
}
-static struct regmap_config am654_icss_iep_regmap_config = {
+static const struct regmap_config am654_icss_iep_regmap_config = {
.name = "icss iep",
.reg_stride = 1,
.reg_write = icss_iep_regmap_write,
diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
index 6df53ab17fbc..9ec504d976d6 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c
@@ -274,6 +274,16 @@ static void rx_class_set_or(struct regmap *miig_rt, int slice, int n,
regmap_write(miig_rt, offset, data);
}
+static u32 rx_class_get_or(struct regmap *miig_rt, int slice, int n)
+{
+ u32 offset, val;
+
+ offset = RX_CLASS_N_REG(slice, n, RX_CLASS_OR_EN);
+ regmap_read(miig_rt, offset, &val);
+
+ return val;
+}
+
void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac)
{
regmap_write(miig_rt, MAC_INTERFACE_0, (u32)(mac[0] | mac[1] << 8 |
@@ -287,6 +297,27 @@ void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac)
mac[2] << 16 | mac[3] << 24));
regmap_write(miig_rt, offs[slice].mac1, (u32)(mac[4] | mac[5] << 8));
}
+EXPORT_SYMBOL_GPL(icssg_class_set_mac_addr);
+
+static void icssg_class_ft1_add_mcast(struct regmap *miig_rt, int slice,
+ int slot, const u8 *addr, const u8 *mask)
+{
+ u32 val;
+ int i;
+
+ WARN(slot >= FT1_NUM_SLOTS, "invalid slot: %d\n", slot);
+
+ rx_class_ft1_set_da(miig_rt, slice, slot, addr);
+ rx_class_ft1_set_da_mask(miig_rt, slice, slot, mask);
+ rx_class_ft1_cfg_set_type(miig_rt, slice, slot, FT1_CFG_TYPE_EQ);
+
+ /* Enable the FT1 slot in OR enable for all classifiers */
+ for (i = 0; i < ICSSG_NUM_CLASSIFIERS_IN_USE; i++) {
+ val = rx_class_get_or(miig_rt, slice, i);
+ val |= RX_CLASS_FT_FT1_MATCH(slot);
+ rx_class_set_or(miig_rt, slice, i, val);
+ }
+}
/* disable all RX traffic */
void icssg_class_disable(struct regmap *miig_rt, int slice)
@@ -330,38 +361,108 @@ void icssg_class_disable(struct regmap *miig_rt, int slice)
/* clear CFG2 */
regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
}
+EXPORT_SYMBOL_GPL(icssg_class_disable);
-void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti)
+void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti,
+ bool is_sr1)
{
+ int num_classifiers = is_sr1 ? ICSSG_NUM_CLASSIFIERS_IN_USE : 1;
u32 data;
+ int n;
/* defaults */
icssg_class_disable(miig_rt, slice);
/* Setup Classifier */
- /* match on Broadcast or MAC_PRU address */
- data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P;
+ for (n = 0; n < num_classifiers; n++) {
+ /* match on Broadcast or MAC_PRU address */
+ data = RX_CLASS_FT_BC | RX_CLASS_FT_DA_P;
- /* multicast */
- if (allmulti)
- data |= RX_CLASS_FT_MC;
+ /* multicast */
+ if (allmulti)
+ data |= RX_CLASS_FT_MC;
- rx_class_set_or(miig_rt, slice, 0, data);
+ rx_class_set_or(miig_rt, slice, n, data);
- /* set CFG1 for OR_OR_AND for classifier */
- rx_class_sel_set_type(miig_rt, slice, 0, RX_CLASS_SEL_TYPE_OR_OR_AND);
+ /* set CFG1 for OR_OR_AND for classifier */
+ rx_class_sel_set_type(miig_rt, slice, n,
+ RX_CLASS_SEL_TYPE_OR_OR_AND);
+ }
/* clear CFG2 */
regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0);
}
+EXPORT_SYMBOL_GPL(icssg_class_default);
+
+void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice)
+{
+ u32 data, offset;
+ int n;
+
+ /* defaults */
+ icssg_class_disable(miig_rt, slice);
+
+ /* Setup Classifier */
+ for (n = 0; n < ICSSG_NUM_CLASSIFIERS_IN_USE; n++) {
+ /* set RAW_MASK to bypass filters */
+ offset = RX_CLASS_GATES_N_REG(slice, n);
+ regmap_read(miig_rt, offset, &data);
+ data |= RX_CLASS_GATES_RAW_MASK;
+ regmap_write(miig_rt, offset, data);
+ }
+}
+EXPORT_SYMBOL_GPL(icssg_class_promiscuous_sr1);
+
+void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice,
+ struct net_device *ndev)
+{
+ u8 mask_addr[6] = { 0, 0, 0, 0, 0, 0xff };
+ struct netdev_hw_addr *ha;
+ int slot = 2;
+
+ rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
+ /* reserve first 2 slots for
+ * 1) 01-80-C2-00-00-XX Known Service Ethernet Multicast addresses
+ * 2) 01-00-5e-00-00-XX Local Network Control Block
+ * (224.0.0.0 - 224.0.0.255 (224.0.0/24))
+ */
+ icssg_class_ft1_add_mcast(miig_rt, slice, 0,
+ eth_reserved_addr_base, mask_addr);
+ icssg_class_ft1_add_mcast(miig_rt, slice, 1,
+ eth_ipv4_mcast_addr_base, mask_addr);
+ mask_addr[5] = 0;
+ netdev_for_each_mc_addr(ha, ndev) {
+ /* skip addresses matching reserved slots */
+ if (!memcmp(eth_reserved_addr_base, ha->addr, 5) ||
+ !memcmp(eth_ipv4_mcast_addr_base, ha->addr, 5)) {
+ netdev_dbg(ndev, "mcast skip %pM\n", ha->addr);
+ continue;
+ }
+
+ if (slot >= FT1_NUM_SLOTS) {
+ netdev_dbg(ndev,
+ "can't add more than %d MC addresses, enabling allmulti\n",
+ FT1_NUM_SLOTS);
+ icssg_class_default(miig_rt, slice, 1, true);
+ break;
+ }
+
+ netdev_dbg(ndev, "mcast add %pM\n", ha->addr);
+ icssg_class_ft1_add_mcast(miig_rt, slice, slot,
+ ha->addr, mask_addr);
+ slot++;
+ }
+}
+EXPORT_SYMBOL_GPL(icssg_class_add_mcast_sr1);
/* required for SAV check */
void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr)
{
const u8 mask_addr[] = { 0, 0, 0, 0, 0, 0, };
- rx_class_ft1_set_start_len(miig_rt, slice, 0, 6);
+ rx_class_ft1_set_start_len(miig_rt, slice, ETH_ALEN, ETH_ALEN);
rx_class_ft1_set_da(miig_rt, slice, 0, mac_addr);
rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr);
rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ);
}
+EXPORT_SYMBOL_GPL(icssg_ft1_set_mac_addr);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c
new file mode 100644
index 000000000000..b9d8a93d1680
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_common.c
@@ -0,0 +1,1284 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSG Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (C) Siemens AG, 2024
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/remoteproc/pruss.h>
+#include <linux/regmap.h>
+#include <linux/remoteproc.h>
+
+#include "icssg_prueth.h"
+#include "../k3-cppi-desc-pool.h"
+
+/* Netif debug messages possible */
+#define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_INTR | \
+ NETIF_MSG_TX_DONE | \
+ NETIF_MSG_RX_STATUS | \
+ NETIF_MSG_PKTDATA | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_WOL)
+
+#define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
+
+void prueth_cleanup_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ int max_rflows)
+{
+ if (rx_chn->desc_pool)
+ k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+
+ if (rx_chn->rx_chn)
+ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+}
+EXPORT_SYMBOL_GPL(prueth_cleanup_rx_chns);
+
+void prueth_cleanup_tx_chns(struct prueth_emac *emac)
+{
+ int i;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ if (tx_chn->desc_pool)
+ k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+
+ if (tx_chn->tx_chn)
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
+ /* Assume prueth_cleanup_tx_chns() is called at the
+ * end after all channel resources are freed
+ */
+ memset(tx_chn, 0, sizeof(*tx_chn));
+ }
+}
+EXPORT_SYMBOL_GPL(prueth_cleanup_tx_chns);
+
+void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ if (tx_chn->irq)
+ free_irq(tx_chn->irq, tx_chn);
+ netif_napi_del(&tx_chn->napi_tx);
+ }
+}
+EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi);
+
+void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc;
+ dma_addr_t buf_dma, next_desc_dma;
+ u32 buf_dma_len;
+
+ first_desc = desc;
+ next_desc = first_desc;
+
+ cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+ while (next_desc_dma) {
+ next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ next_desc_dma);
+ cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ }
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
+}
+EXPORT_SYMBOL_GPL(prueth_xmit_free);
+
+int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
+ int budget, bool *tdown)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_tx;
+ struct netdev_queue *netif_txq;
+ struct prueth_tx_chn *tx_chn;
+ unsigned int total_bytes = 0;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+ void **swdata;
+
+ tx_chn = &emac->tx_chns[chn];
+
+ while (true) {
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ if (res == -ENODATA)
+ break;
+
+ /* teardown completion */
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ if (atomic_dec_and_test(&emac->tdown_cnt))
+ complete(&emac->tdown_complete);
+ *tdown = true;
+ break;
+ }
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+
+ /* was this command's TX complete? */
+ if (emac->is_sr1 && *(swdata) == emac->cmd_data) {
+ prueth_xmit_free(tx_chn, desc_tx);
+ continue;
+ }
+
+ skb = *(swdata);
+ prueth_xmit_free(tx_chn, desc_tx);
+
+ ndev = skb->dev;
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += skb->len;
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ num_tx++;
+ }
+
+ if (!num_tx)
+ return 0;
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* If the TX queue was stopped, wake it now
+ * if we have enough room.
+ */
+ __netif_tx_lock(netif_txq, smp_processor_id());
+ if (netif_running(ndev) &&
+ (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS))
+ netif_tx_wake_queue(netif_txq);
+ __netif_tx_unlock(netif_txq);
+ }
+
+ return num_tx;
+}
+
+static enum hrtimer_restart emac_tx_timer_callback(struct hrtimer *timer)
+{
+ struct prueth_tx_chn *tx_chns =
+ container_of(timer, struct prueth_tx_chn, tx_hrtimer);
+
+ enable_irq(tx_chns->irq);
+ return HRTIMER_NORESTART;
+}
+
+static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
+ struct prueth_emac *emac = tx_chn->emac;
+ bool tdown = false;
+ int num_tx_packets;
+
+ num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget,
+ &tdown);
+
+ if (num_tx_packets >= budget)
+ return budget;
+
+ if (napi_complete_done(napi_tx, num_tx_packets)) {
+ if (unlikely(tx_chn->tx_pace_timeout_ns && !tdown)) {
+ hrtimer_start(&tx_chn->tx_hrtimer,
+ ns_to_ktime(tx_chn->tx_pace_timeout_ns),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ enable_irq(tx_chn->irq);
+ }
+ }
+
+ return num_tx_packets;
+}
+
+static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
+{
+ struct prueth_tx_chn *tx_chn = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&tx_chn->napi_tx);
+
+ return IRQ_HANDLED;
+}
+
+int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int i, ret;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
+ hrtimer_init(&tx_chn->tx_hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ tx_chn->tx_hrtimer.function = &emac_tx_timer_callback;
+ ret = request_irq(tx_chn->irq, prueth_tx_irq,
+ IRQF_TRIGGER_HIGH, tx_chn->name,
+ tx_chn);
+ if (ret) {
+ netif_napi_del(&tx_chn->napi_tx);
+ dev_err(prueth->dev, "unable to request TX IRQ %d\n",
+ tx_chn->irq);
+ goto fail;
+ }
+ }
+
+ return 0;
+fail:
+ prueth_ndev_del_tx_napi(emac, i);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(prueth_ndev_add_tx_napi);
+
+int prueth_init_tx_chns(struct prueth_emac *emac)
+{
+ static const struct k3_ring_cfg ring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ .size = PRUETH_MAX_TX_DESC,
+ };
+ struct k3_udma_glue_tx_channel_cfg tx_cfg;
+ struct device *dev = emac->prueth->dev;
+ struct net_device *ndev = emac->ndev;
+ int ret, slice, i;
+ u32 hdesc_size;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0)
+ return slice;
+
+ init_completion(&emac->tdown_complete);
+
+ hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
+ PRUETH_NAV_SW_DATA_SIZE);
+ memset(&tx_cfg, 0, sizeof(tx_cfg));
+ tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
+ tx_cfg.tx_cfg = ring_cfg;
+ tx_cfg.txcq_cfg = ring_cfg;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
+
+ /* To differentiate channels for SLICE0 vs SLICE1 */
+ snprintf(tx_chn->name, sizeof(tx_chn->name),
+ "tx%d-%d", slice, i);
+
+ tx_chn->emac = emac;
+ tx_chn->id = i;
+ tx_chn->descs_num = PRUETH_MAX_TX_DESC;
+
+ tx_chn->tx_chn =
+ k3_udma_glue_request_tx_chn(dev, tx_chn->name,
+ &tx_cfg);
+ if (IS_ERR(tx_chn->tx_chn)) {
+ ret = PTR_ERR(tx_chn->tx_chn);
+ tx_chn->tx_chn = NULL;
+ netdev_err(ndev,
+ "Failed to request tx dma ch: %d\n", ret);
+ goto fail;
+ }
+
+ tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
+ tx_chn->desc_pool =
+ k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ tx_chn->desc_pool = NULL;
+ netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
+ goto fail;
+ }
+
+ ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to get tx irq\n");
+ goto fail;
+ }
+ tx_chn->irq = ret;
+
+ snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
+ dev_name(dev), tx_chn->id);
+ }
+
+ return 0;
+
+fail:
+ prueth_cleanup_tx_chns(emac);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(prueth_init_tx_chns);
+
+int prueth_init_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ char *name, u32 max_rflows,
+ u32 max_desc_num)
+{
+ struct k3_udma_glue_rx_channel_cfg rx_cfg;
+ struct device *dev = emac->prueth->dev;
+ struct net_device *ndev = emac->ndev;
+ u32 fdqring_id, hdesc_size;
+ int i, ret = 0, slice;
+ int flow_id_base;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0)
+ return slice;
+
+ /* To differentiate channels for SLICE0 vs SLICE1 */
+ snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
+
+ hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
+ PRUETH_NAV_SW_DATA_SIZE);
+ memset(&rx_cfg, 0, sizeof(rx_cfg));
+ rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
+ rx_cfg.flow_id_num = max_rflows;
+ rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
+
+ /* init all flows */
+ rx_chn->dev = dev;
+ rx_chn->descs_num = max_desc_num;
+
+ rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
+ &rx_cfg);
+ if (IS_ERR(rx_chn->rx_chn)) {
+ ret = PTR_ERR(rx_chn->rx_chn);
+ rx_chn->rx_chn = NULL;
+ netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
+ goto fail;
+ }
+
+ rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
+ rx_chn->descs_num,
+ hdesc_size,
+ rx_chn->name);
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ rx_chn->desc_pool = NULL;
+ netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
+ goto fail;
+ }
+
+ flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
+ if (emac->is_sr1 && !strcmp(name, "rxmgm")) {
+ emac->rx_mgm_flow_id_base = flow_id_base;
+ netdev_dbg(ndev, "mgm flow id base = %d\n", flow_id_base);
+ } else {
+ emac->rx_flow_id_base = flow_id_base;
+ netdev_dbg(ndev, "flow id base = %d\n", flow_id_base);
+ }
+
+ fdqring_id = K3_RINGACC_RING_ID_ANY;
+ for (i = 0; i < rx_cfg.flow_id_num; i++) {
+ struct k3_ring_cfg rxring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ };
+ struct k3_ring_cfg fdqring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .flags = K3_RINGACC_RING_SHARED,
+ };
+ struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
+ .rx_cfg = rxring_cfg,
+ .rxfdq_cfg = fdqring_cfg,
+ .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
+ .src_tag_lo_sel =
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
+ };
+
+ rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
+ rx_flow_cfg.rx_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
+
+ ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
+ i, &rx_flow_cfg);
+ if (ret) {
+ netdev_err(ndev, "Failed to init rx flow%d %d\n",
+ i, ret);
+ goto fail;
+ }
+ if (!i)
+ fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
+ i);
+ ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+ if (ret < 0) {
+ netdev_err(ndev, "Failed to get rx dma irq");
+ goto fail;
+ }
+ rx_chn->irq[i] = ret;
+ }
+
+ return 0;
+
+fail:
+ prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(prueth_init_rx_chns);
+
+int prueth_dma_rx_push(struct prueth_emac *emac,
+ struct sk_buff *skb,
+ struct prueth_rx_chn *rx_chn)
+{
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ u32 pkt_len = skb_tailroom(skb);
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+ void **swdata;
+
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ netdev_err(ndev, "rx push: failed to allocate descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
+ return -EINVAL;
+ }
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ *swdata = skb;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
+ desc_rx, desc_dma);
+}
+EXPORT_SYMBOL_GPL(prueth_dma_rx_push);
+
+u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
+{
+ u32 iepcount_lo, iepcount_hi, hi_rollover_count;
+ u64 ns;
+
+ iepcount_lo = lo & GENMASK(19, 0);
+ iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
+ hi_rollover_count = hi >> 11;
+
+ ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
+ ns = ns * cycle_time_ns + iepcount_lo;
+
+ return ns;
+}
+EXPORT_SYMBOL_GPL(icssg_ts_to_ns);
+
+void emac_rx_timestamp(struct prueth_emac *emac,
+ struct sk_buff *skb, u32 *psdata)
+{
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ if (emac->is_sr1) {
+ ns = (u64)psdata[1] << 32 | psdata[0];
+ } else {
+ u32 hi_sw = readl(emac->prueth->shram.va +
+ TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
+ ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
+ IEP_DEFAULT_CYCLE_TIME_NS);
+ }
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
+static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_chns;
+ u32 buf_dma_len, pkt_len, port_id = 0;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb, *new_skb;
+ dma_addr_t desc_dma, buf_dma;
+ void **swdata;
+ u32 *psdata;
+ int ret;
+
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ netdev_err(ndev, "rx pop: failed: %d\n", ret);
+ return ret;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
+ return 0;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* RX HW timestamp */
+ if (emac->rx_ts_enabled)
+ emac_rx_timestamp(emac, skb, psdata);
+
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ /* firmware adds 4 CRC bytes, strip them */
+ pkt_len -= 4;
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ skb->dev = ndev;
+ new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
+ /* if allocation fails we drop the packet but push the
+ * descriptor back to the ring with old skb to prevent a stall
+ */
+ if (!new_skb) {
+ ndev->stats.rx_dropped++;
+ new_skb = skb;
+ } else {
+ /* send the filled skb up the n/w stack */
+ skb_put(skb, pkt_len);
+ if (emac->prueth->is_switch_mode)
+ skb->offload_fwd_mark = emac->offload_fwd_mark;
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&emac->napi_rx, skb);
+ ndev->stats.rx_bytes += pkt_len;
+ ndev->stats.rx_packets++;
+ }
+
+ /* queue another RX DMA */
+ ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
+ if (WARN_ON(ret < 0)) {
+ dev_kfree_skb_any(new_skb);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ }
+
+ return ret;
+}
+
+static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct prueth_rx_chn *rx_chn = data;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ u32 buf_dma_len;
+ void **swdata;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ dev_kfree_skb_any(skb);
+}
+
+static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
+{
+ int i;
+
+ /* search and get the next free slot */
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (!emac->tx_ts_skb[i]) {
+ emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
+ return i;
+ }
+ }
+
+ return -EBUSY;
+}
+
+/**
+ * icssg_ndo_start_xmit - EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ * Doesn't wait for completion we'll check for TX completion in
+ * emac_tx_complete_packets().
+ *
+ * Return: enum netdev_tx
+ */
+enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct netdev_queue *netif_txq;
+ struct prueth_tx_chn *tx_chn;
+ dma_addr_t desc_dma, buf_dma;
+ int i, ret = 0, q_idx;
+ bool in_tx_ts = 0;
+ int tx_ts_cookie;
+ void **swdata;
+ u32 pkt_len;
+ u32 *epib;
+
+ pkt_len = skb_headlen(skb);
+ q_idx = skb_get_queue_mapping(skb);
+
+ tx_chn = &emac->tx_chns[q_idx];
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+
+ /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "tx: failed to map skb buffer\n");
+ ret = NETDEV_TX_OK;
+ goto drop_free_skb;
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ goto drop_stop_q_busy;
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, 0);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ emac->tx_ts_enabled) {
+ tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
+ if (tx_ts_cookie >= 0) {
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ /* Request TX timestamp */
+ epib[0] = (u32)tx_ts_cookie;
+ epib[1] = 0x80000000; /* TX TS request */
+ emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
+ in_tx_ts = 1;
+ }
+ }
+
+ /* set dst tag to indicate internal qid at the firmware which is at
+ * bit8..bit15. bit0..bit7 indicates port num for directed
+ * packets in case of switch mode operation
+ */
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ *swdata = skb;
+
+ /* Handle the case where skb is fragmented in pages */
+ cur_desc = first_desc;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 frag_size = skb_frag_size(frag);
+
+ next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!next_desc) {
+ netdev_err(ndev,
+ "tx: failed to allocate frag. descriptor\n");
+ goto free_desc_stop_q_busy_cleanup_tx_ts;
+ }
+
+ buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(ndev, "tx: Failed to map skb page\n");
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ ret = NETDEV_TX_OK;
+ goto cleanup_tx_ts;
+ }
+
+ cppi5_hdesc_reset_hbdesc(next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(next_desc,
+ buf_dma, frag_size, buf_dma, frag_size);
+
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
+ cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
+
+ pkt_len += frag_size;
+ cur_desc = next_desc;
+ }
+ WARN_ON_ONCE(pkt_len != skb->len);
+
+ /* report bql before sending packet */
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+ /* cppi5_desc_dump(first_desc, 64); */
+
+ skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(ndev, "tx: push failed: %d\n", ret);
+ goto drop_free_descs;
+ }
+
+ if (in_tx_ts)
+ atomic_inc(&emac->tx_ts_pending);
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
+ netif_tx_stop_queue(netif_txq);
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS)
+ netif_tx_wake_queue(netif_txq);
+ }
+
+ return NETDEV_TX_OK;
+
+cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
+
+drop_free_descs:
+ prueth_xmit_free(tx_chn, first_desc);
+
+drop_free_skb:
+ dev_kfree_skb_any(skb);
+
+ /* error */
+ ndev->stats.tx_dropped++;
+ netdev_err(ndev, "tx: error: %d\n", ret);
+
+ return ret;
+
+free_desc_stop_q_busy_cleanup_tx_ts:
+ if (in_tx_ts) {
+ dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
+ emac->tx_ts_skb[tx_ts_cookie] = NULL;
+ }
+ prueth_xmit_free(tx_chn, first_desc);
+
+drop_stop_q_busy:
+ netif_tx_stop_queue(netif_txq);
+ return NETDEV_TX_BUSY;
+}
+EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit);
+
+static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct prueth_tx_chn *tx_chn = data;
+ struct cppi5_host_desc_t *desc_tx;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ prueth_xmit_free(tx_chn, desc_tx);
+
+ dev_kfree_skb_any(skb);
+}
+
+irqreturn_t prueth_rx_irq(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&emac->napi_rx);
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL_GPL(prueth_rx_irq);
+
+void prueth_emac_stop(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice;
+
+ switch (emac->port_id) {
+ case PRUETH_PORT_MII0:
+ slice = ICSS_SLICE0;
+ break;
+ case PRUETH_PORT_MII1:
+ slice = ICSS_SLICE1;
+ break;
+ default:
+ netdev_err(emac->ndev, "invalid port\n");
+ return;
+ }
+
+ emac->fw_running = 0;
+ if (!emac->is_sr1)
+ rproc_shutdown(prueth->txpru[slice]);
+ rproc_shutdown(prueth->rtu[slice]);
+ rproc_shutdown(prueth->pru[slice]);
+}
+EXPORT_SYMBOL_GPL(prueth_emac_stop);
+
+void prueth_cleanup_tx_ts(struct prueth_emac *emac)
+{
+ int i;
+
+ for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
+ if (emac->tx_ts_skb[i]) {
+ dev_kfree_skb_any(emac->tx_ts_skb[i]);
+ emac->tx_ts_skb[i] = NULL;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(prueth_cleanup_tx_ts);
+
+int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
+ int rx_flow = emac->is_sr1 ?
+ PRUETH_RX_FLOW_DATA_SR1 : PRUETH_RX_FLOW_DATA;
+ int flow = emac->is_sr1 ?
+ PRUETH_MAX_RX_FLOWS_SR1 : PRUETH_MAX_RX_FLOWS;
+ int num_rx = 0;
+ int cur_budget;
+ int ret;
+
+ while (flow--) {
+ cur_budget = budget - num_rx;
+
+ while (cur_budget--) {
+ ret = emac_rx_packet(emac, flow);
+ if (ret)
+ break;
+ num_rx++;
+ }
+
+ if (num_rx >= budget)
+ break;
+ }
+
+ if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
+ if (unlikely(emac->rx_pace_timeout_ns)) {
+ hrtimer_start(&emac->rx_hrtimer,
+ ns_to_ktime(emac->rx_pace_timeout_ns),
+ HRTIMER_MODE_REL_PINNED);
+ } else {
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ }
+ }
+
+ return num_rx;
+}
+EXPORT_SYMBOL_GPL(icssg_napi_rx_poll);
+
+int prueth_prepare_rx_chan(struct prueth_emac *emac,
+ struct prueth_rx_chn *chn,
+ int buf_size)
+{
+ struct sk_buff *skb;
+ int i, ret;
+
+ for (i = 0; i < chn->descs_num; i++) {
+ skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ ret = prueth_dma_rx_push(emac, skb, chn);
+ if (ret < 0) {
+ netdev_err(emac->ndev,
+ "cannot submit skb for rx chan %s ret %d\n",
+ chn->name, ret);
+ kfree_skb(skb);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan);
+
+void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
+ bool free_skb)
+{
+ int i;
+
+ for (i = 0; i < ch_num; i++) {
+ if (free_skb)
+ k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
+ &emac->tx_chns[i],
+ prueth_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
+ }
+}
+EXPORT_SYMBOL_GPL(prueth_reset_tx_chan);
+
+void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
+ int num_flows, bool disable)
+{
+ int i;
+
+ for (i = 0; i < num_flows; i++)
+ k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
+ prueth_rx_cleanup, !!i);
+ if (disable)
+ k3_udma_glue_disable_rx_chn(chn->rx_chn);
+}
+EXPORT_SYMBOL_GPL(prueth_reset_rx_chan);
+
+void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ ndev->stats.tx_errors++;
+}
+EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout);
+
+static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ emac->tx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ emac->tx_ts_enabled = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ emac->rx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ emac->rx_ts_enabled = 1;
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct hwtstamp_config config;
+
+ config.flags = 0;
+ config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return emac_get_ts_config(ndev, ifr);
+ case SIOCSHWTSTAMP:
+ return emac_set_ts_config(ndev, ifr);
+ default:
+ break;
+ }
+
+ return phy_do_ioctl(ndev, ifr, cmd);
+}
+EXPORT_SYMBOL_GPL(icssg_ndo_ioctl);
+
+void icssg_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+
+ emac_update_hardware_stats(emac);
+
+ stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets");
+ stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes");
+ stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets");
+ stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes");
+ stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors");
+ stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
+ stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
+
+ stats->rx_errors = ndev->stats.rx_errors;
+ stats->rx_dropped = ndev->stats.rx_dropped;
+ stats->tx_errors = ndev->stats.tx_errors;
+ stats->tx_dropped = ndev->stats.tx_dropped;
+}
+EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64);
+
+int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret;
+
+ ret = snprintf(name, len, "p%d", emac->port_id);
+ if (ret >= len)
+ return -EINVAL;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icssg_ndo_get_phys_port_name);
+
+/* get emac_port corresponding to eth_node name */
+int prueth_node_port(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_PORT_MII0;
+ else if (port_id == 1)
+ return PRUETH_PORT_MII1;
+ else
+ return PRUETH_PORT_INVALID;
+}
+EXPORT_SYMBOL_GPL(prueth_node_port);
+
+/* get MAC instance corresponding to eth_node name */
+int prueth_node_mac(struct device_node *eth_node)
+{
+ u32 port_id;
+ int ret;
+
+ ret = of_property_read_u32(eth_node, "reg", &port_id);
+ if (ret)
+ return ret;
+
+ if (port_id == 0)
+ return PRUETH_MAC0;
+ else if (port_id == 1)
+ return PRUETH_MAC1;
+ else
+ return PRUETH_MAC_INVALID;
+}
+EXPORT_SYMBOL_GPL(prueth_node_mac);
+
+void prueth_netdev_exit(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ enum prueth_mac mac;
+
+ mac = prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return;
+
+ emac = prueth->emac[mac];
+ if (!emac)
+ return;
+
+ if (of_phy_is_fixed_link(emac->phy_node))
+ of_phy_deregister_fixed_link(emac->phy_node);
+
+ netif_napi_del(&emac->napi_rx);
+
+ pruss_release_mem_region(prueth->pruss, &emac->dram);
+ destroy_workqueue(emac->cmd_wq);
+ free_netdev(emac->ndev);
+ prueth->emac[mac] = NULL;
+}
+EXPORT_SYMBOL_GPL(prueth_netdev_exit);
+
+int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1)
+{
+ struct device *dev = prueth->dev;
+ enum pruss_pru_id pruss_id;
+ struct device_node *np;
+ int idx = -1, ret;
+
+ np = dev->of_node;
+
+ switch (slice) {
+ case ICSS_SLICE0:
+ idx = 0;
+ break;
+ case ICSS_SLICE1:
+ idx = is_sr1 ? 2 : 3;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id);
+ if (IS_ERR(prueth->pru[slice])) {
+ ret = PTR_ERR(prueth->pru[slice]);
+ prueth->pru[slice] = NULL;
+ return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice);
+ }
+ prueth->pru_id[slice] = pruss_id;
+
+ idx++;
+ prueth->rtu[slice] = pru_rproc_get(np, idx, NULL);
+ if (IS_ERR(prueth->rtu[slice])) {
+ ret = PTR_ERR(prueth->rtu[slice]);
+ prueth->rtu[slice] = NULL;
+ return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice);
+ }
+
+ if (is_sr1)
+ return 0;
+
+ idx++;
+ prueth->txpru[slice] = pru_rproc_get(np, idx, NULL);
+ if (IS_ERR(prueth->txpru[slice])) {
+ ret = PTR_ERR(prueth->txpru[slice]);
+ prueth->txpru[slice] = NULL;
+ return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(prueth_get_cores);
+
+void prueth_put_cores(struct prueth *prueth, int slice)
+{
+ if (prueth->txpru[slice])
+ pru_rproc_put(prueth->txpru[slice]);
+
+ if (prueth->rtu[slice])
+ pru_rproc_put(prueth->rtu[slice]);
+
+ if (prueth->pru[slice])
+ pru_rproc_put(prueth->pru[slice]);
+}
+EXPORT_SYMBOL_GPL(prueth_put_cores);
+
+#ifdef CONFIG_PM_SLEEP
+static int prueth_suspend(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ netif_device_detach(ndev);
+ ret = ndev->netdev_ops->ndo_stop(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to stop: %d", ret);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int prueth_resume(struct device *dev)
+{
+ struct prueth *prueth = dev_get_drvdata(dev);
+ struct net_device *ndev;
+ int i, ret;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ ndev = prueth->registered_netdevs[i];
+
+ if (!ndev)
+ continue;
+
+ if (netif_running(ndev)) {
+ ret = ndev->netdev_ops->ndo_open(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to start: %d", ret);
+ return ret;
+ }
+ netif_device_attach(ndev);
+ }
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+const struct dev_pm_ops prueth_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume)
+};
+EXPORT_SYMBOL_GPL(prueth_dev_pm_ops);
+
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
+MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver Common Module");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c
index 99de8a40ed60..dae52a83a378 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.c
@@ -20,6 +20,8 @@
/* IPG is in core_clk cycles */
#define MII_RT_TX_IPG_100M 0x17
#define MII_RT_TX_IPG_1G 0xb
+#define MII_RT_TX_IPG_100M_SR1 0x166
+#define MII_RT_TX_IPG_1G_SR1 0x1a
#define ICSSG_QUEUES_MAX 64
#define ICSSG_QUEUE_OFFSET 0xd00
@@ -105,28 +107,49 @@ static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = {
},
};
+static void icssg_config_mii_init_switch(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ int mii = prueth_emac_slice(emac);
+ u32 txcfg_reg, pcnt_reg, txcfg;
+ struct regmap *mii_rt;
+
+ mii_rt = prueth->mii_rt;
+
+ txcfg_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
+ PRUSS_MII_RT_TXCFG1;
+ pcnt_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
+ PRUSS_MII_RT_RX_PCNT1;
+
+ txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE |
+ PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE |
+ PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN;
+
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII && mii == ICSS_MII1)
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+ else if (emac->phy_if != PHY_INTERFACE_MODE_MII && mii == ICSS_MII0)
+ txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
+
+ regmap_write(mii_rt, txcfg_reg, txcfg);
+ regmap_write(mii_rt, pcnt_reg, 0x1);
+}
+
static void icssg_config_mii_init(struct prueth_emac *emac)
{
- u32 rxcfg, txcfg, rxcfg_reg, txcfg_reg, pcnt_reg;
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
+ u32 txcfg, txcfg_reg, pcnt_reg;
struct regmap *mii_rt;
mii_rt = prueth->mii_rt;
- rxcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RXCFG0 :
- PRUSS_MII_RT_RXCFG1;
txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 :
PRUSS_MII_RT_TXCFG1;
pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 :
PRUSS_MII_RT_RX_PCNT1;
- rxcfg = MII_RXCFG_DEFAULT;
txcfg = MII_TXCFG_DEFAULT;
- if (slice == ICSS_MII1)
- rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL;
-
/* In MII mode TX lines swapped inside ICSSG, so TX_MUX_SEL cfg need
* to be swapped also comparing to RGMII mode.
*/
@@ -135,7 +158,6 @@ static void icssg_config_mii_init(struct prueth_emac *emac)
else if (emac->phy_if != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1)
txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL;
- regmap_write(mii_rt, rxcfg_reg, rxcfg);
regmap_write(mii_rt, txcfg_reg, txcfg);
regmap_write(mii_rt, pcnt_reg, 0x1);
}
@@ -202,24 +224,31 @@ void icssg_config_ipg(struct prueth_emac *emac)
{
struct prueth *prueth = emac->prueth;
int slice = prueth_emac_slice(emac);
+ u32 ipg;
switch (emac->speed) {
case SPEED_1000:
- icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_1G);
+ ipg = emac->is_sr1 ? MII_RT_TX_IPG_1G_SR1 : MII_RT_TX_IPG_1G;
break;
case SPEED_100:
- icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M);
+ ipg = emac->is_sr1 ? MII_RT_TX_IPG_100M_SR1 : MII_RT_TX_IPG_100M;
break;
case SPEED_10:
+ /* Firmware hardcodes IPG for SR1.0 */
+ if (emac->is_sr1)
+ return;
/* IPG for 10M is same as 100M */
- icssg_mii_update_ipg(prueth->mii_rt, slice, MII_RT_TX_IPG_100M);
+ ipg = MII_RT_TX_IPG_100M;
break;
default:
/* Other links speeds not supported */
netdev_err(emac->ndev, "Unsupported link speed\n");
return;
}
+
+ icssg_mii_update_ipg(prueth->mii_rt, slice, ipg);
}
+EXPORT_SYMBOL_GPL(icssg_config_ipg);
static void emac_r30_cmd_init(struct prueth_emac *emac)
{
@@ -249,6 +278,66 @@ static int emac_r30_is_done(struct prueth_emac *emac)
return 1;
}
+static int prueth_switch_buffer_setup(struct prueth_emac *emac)
+{
+ struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
+ struct icssg_rxq_ctx __iomem *rxq_ctx;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ u32 addr;
+ int i;
+
+ addr = lower_32_bits(prueth->msmcram.pa);
+ if (slice)
+ addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+
+ if (addr % SZ_64K) {
+ dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n");
+ return -EINVAL;
+ }
+
+ bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET;
+ /* workaround for f/w bug. bpool 0 needs to be initialized */
+ for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) {
+ writel(addr, &bpool_cfg[i].addr);
+ writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len);
+ addr += PRUETH_EMAC_BUF_POOL_SIZE;
+ }
+
+ if (!slice)
+ addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE;
+ else
+ addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
+
+ for (i = PRUETH_NUM_BUF_POOLS;
+ i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS;
+ i++) {
+ /* The driver only uses first 4 queues per PRU so only initialize them */
+ if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) {
+ writel(addr, &bpool_cfg[i].addr);
+ writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len);
+ addr += PRUETH_SW_BUF_POOL_SIZE_HOST;
+ } else {
+ writel(0, &bpool_cfg[i].addr);
+ writel(0, &bpool_cfg[i].len);
+ }
+ }
+
+ if (!slice)
+ addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST;
+ else
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+
+ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET;
+ for (i = 0; i < 3; i++)
+ writel(addr, &rxq_ctx->start[i]);
+
+ addr += PRUETH_EMAC_RX_CTX_BUF_SIZE;
+ writel(addr - SZ_2K, &rxq_ctx->end);
+
+ return 0;
+}
+
static int prueth_emac_buffer_setup(struct prueth_emac *emac)
{
struct icssg_buffer_pool_cfg __iomem *bpool_cfg;
@@ -313,25 +402,63 @@ static void icssg_init_emac_mode(struct prueth *prueth)
/* When the device is configured as a bridge and it is being brought
* back to the emac mode, the host mac address has to be set as 0.
*/
+ u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
+ int i;
u8 mac[ETH_ALEN] = { 0 };
if (prueth->emacs_initialized)
return;
- regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1,
- SMEM_VLAN_OFFSET_MASK, 0);
- regmap_write(prueth->miig_rt, FDB_GEN_CFG2, 0);
+ /* Set VLAN TABLE address base */
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
+ addr << SMEM_VLAN_OFFSET);
+ /* Set enable VLAN aware mode, and FDBs for all PRUs */
+ regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN));
+ prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
+ EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET);
+ for (i = 0; i < SZ_4K - 1; i++) {
+ prueth->vlan_tbl[i].fid = i;
+ prueth->vlan_tbl[i].fid_c1 = 0;
+ }
/* Clear host MAC address */
icssg_class_set_host_mac_addr(prueth->miig_rt, mac);
}
+static void icssg_init_switch_mode(struct prueth *prueth)
+{
+ u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET;
+ int i;
+
+ if (prueth->emacs_initialized)
+ return;
+
+ /* Set VLAN TABLE address base */
+ regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK,
+ addr << SMEM_VLAN_OFFSET);
+ /* Set enable VLAN aware mode, and FDBs for all PRUs */
+ regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL);
+ prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va +
+ EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET);
+ for (i = 0; i < SZ_4K - 1; i++) {
+ prueth->vlan_tbl[i].fid = i;
+ prueth->vlan_tbl[i].fid_c1 = 0;
+ }
+
+ if (prueth->hw_bridge_dev)
+ icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr);
+ icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST);
+}
+
int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
{
void __iomem *config = emac->dram.va + ICSSG_CONFIG_OFFSET;
struct icssg_flow_cfg __iomem *flow_cfg;
int ret;
- icssg_init_emac_mode(prueth);
+ if (prueth->is_switch_mode)
+ icssg_init_switch_mode(prueth);
+ else
+ icssg_init_emac_mode(prueth);
memset_io(config, 0, TAS_GATE_MASK_LIST0);
icssg_miig_queues_init(prueth, slice);
@@ -345,7 +472,10 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET,
ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT);
icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if);
- icssg_config_mii_init(emac);
+ if (prueth->is_switch_mode)
+ icssg_config_mii_init_switch(emac);
+ else
+ icssg_config_mii_init(emac);
icssg_config_ipg(emac);
icssg_update_rgmii_cfg(prueth->miig_rt, emac);
@@ -368,7 +498,10 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
writeb(0, config + SPL_PKT_DEFAULT_PRIORITY);
writeb(0, config + QUEUE_NUM_UNTAGGED);
- ret = prueth_emac_buffer_setup(emac);
+ if (prueth->is_switch_mode)
+ ret = prueth_switch_buffer_setup(emac);
+ else
+ ret = prueth_emac_buffer_setup(emac);
if (ret)
return ret;
@@ -376,6 +509,7 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice)
return 0;
}
+EXPORT_SYMBOL_GPL(icssg_config);
/* Bitmask for ICSSG r30 commands */
static const struct icssg_r30_cmd emac_r32_bitmask[] = {
@@ -400,8 +534,8 @@ static const struct icssg_r30_cmd emac_r32_bitmask[] = {
{{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}} /* VLAN UNWARE*/
};
-int emac_set_port_state(struct prueth_emac *emac,
- enum icssg_port_state_cmd cmd)
+int icssg_set_port_state(struct prueth_emac *emac,
+ enum icssg_port_state_cmd cmd)
{
struct icssg_r30_cmd __iomem *p;
int ret = -ETIMEDOUT;
@@ -432,6 +566,7 @@ int emac_set_port_state(struct prueth_emac *emac,
return ret;
}
+EXPORT_SYMBOL_GPL(icssg_set_port_state);
void icssg_config_half_duplex(struct prueth_emac *emac)
{
@@ -443,6 +578,7 @@ void icssg_config_half_duplex(struct prueth_emac *emac)
val = get_random_u32();
writel(val, emac->dram.va + HD_RAND_SEED_OFFSET);
}
+EXPORT_SYMBOL_GPL(icssg_config_half_duplex);
void icssg_config_set_speed(struct prueth_emac *emac)
{
@@ -469,3 +605,180 @@ void icssg_config_set_speed(struct prueth_emac *emac)
writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET);
}
+EXPORT_SYMBOL_GPL(icssg_config_set_speed);
+
+int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd,
+ struct mgmt_cmd_rsp *rsp)
+{
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ int addr, ret;
+
+ addr = icssg_queue_pop(prueth, slice == 0 ?
+ ICSSG_CMD_POP_SLICE0 : ICSSG_CMD_POP_SLICE1);
+ if (addr < 0)
+ return addr;
+
+ /* First 4 bytes have FW owned buffer linking info which should
+ * not be touched
+ */
+ memcpy_toio(prueth->shram.va + addr + 4, cmd, sizeof(*cmd));
+ icssg_queue_push(prueth, slice == 0 ?
+ ICSSG_CMD_PUSH_SLICE0 : ICSSG_CMD_PUSH_SLICE1, addr);
+ ret = read_poll_timeout(icssg_queue_pop, addr, addr >= 0,
+ 2000, 20000000, false, prueth, slice == 0 ?
+ ICSSG_RSP_POP_SLICE0 : ICSSG_RSP_POP_SLICE1);
+ if (ret) {
+ netdev_err(emac->ndev, "Timedout sending HWQ message\n");
+ return ret;
+ }
+
+ memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp));
+ /* Return buffer back for to pool */
+ icssg_queue_push(prueth, slice == 0 ?
+ ICSSG_RSP_PUSH_SLICE0 : ICSSG_RSP_PUSH_SLICE1, addr);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icssg_send_fdb_msg);
+
+static void icssg_fdb_setup(struct prueth_emac *emac, struct mgmt_cmd *fdb_cmd,
+ const unsigned char *addr, u8 fid, int cmd)
+{
+ int slice = prueth_emac_slice(emac);
+ u8 mac_fid[ETH_ALEN + 2];
+ u16 fdb_slot;
+
+ ether_addr_copy(mac_fid, addr);
+
+ /* 1-1 VID-FID mapping is already setup */
+ mac_fid[ETH_ALEN] = fid;
+ mac_fid[ETH_ALEN + 1] = 0;
+
+ fdb_slot = bitrev32(crc32_le(0, mac_fid, 8)) & PRUETH_SWITCH_FDB_MASK;
+
+ fdb_cmd->header = ICSSG_FW_MGMT_CMD_HEADER;
+ fdb_cmd->type = ICSSG_FW_MGMT_FDB_CMD_TYPE;
+ fdb_cmd->seqnum = ++(emac->prueth->icssg_hwcmdseq);
+ fdb_cmd->param = cmd;
+ fdb_cmd->param |= (slice << 4);
+
+ memcpy(&fdb_cmd->cmd_args[0], addr, 4);
+ memcpy(&fdb_cmd->cmd_args[1], &addr[4], 2);
+ fdb_cmd->cmd_args[2] = fdb_slot;
+
+ netdev_dbg(emac->ndev, "MAC %pM slot %X FID %X\n", addr, fdb_slot, fid);
+}
+
+int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid, u8 fid_c2, bool add)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ struct mgmt_cmd fdb_cmd = { 0 };
+ u8 fid = vid;
+ int ret;
+
+ icssg_fdb_setup(emac, &fdb_cmd, addr, fid, add ? ICSS_CMD_ADD_FDB : ICSS_CMD_DEL_FDB);
+
+ fid_c2 |= ICSSG_FDB_ENTRY_VALID;
+ fdb_cmd.cmd_args[1] |= ((fid << 16) | (fid_c2 << 24));
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+ if (fdb_cmd_rsp.status == 1)
+ return 0;
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(icssg_fdb_add_del);
+
+int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid)
+{
+ struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 };
+ struct mgmt_cmd fdb_cmd = { 0 };
+ struct prueth_fdb_slot *slot;
+ u8 fid = vid;
+ int ret, i;
+
+ icssg_fdb_setup(emac, &fdb_cmd, addr, fid, ICSS_CMD_GET_FDB_SLOT);
+
+ fdb_cmd.cmd_args[1] |= fid << 16;
+
+ ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp);
+ if (ret)
+ return ret;
+
+ WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum);
+
+ slot = (struct prueth_fdb_slot __force *)(emac->dram.va + FDB_CMD_BUFFER);
+ for (i = 0; i < 4; i++) {
+ if (ether_addr_equal(addr, slot->mac) && vid == slot->fid)
+ return (slot->fid_c2 & ~ICSSG_FDB_ENTRY_VALID);
+ slot++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(icssg_fdb_lookup);
+
+void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
+ u8 untag_mask, bool add)
+{
+ struct prueth *prueth = emac->prueth;
+ struct prueth_vlan_tbl *tbl;
+ u8 fid_c1;
+
+ tbl = prueth->vlan_tbl;
+ fid_c1 = tbl[vid].fid_c1;
+
+ /* FID_C1: bit0..2 port membership mask,
+ * bit3..5 tagging mask for each port
+ * bit6 Stream VID (not handled currently)
+ * bit7 MC flood (not handled currently)
+ */
+ if (add) {
+ fid_c1 |= (port_mask | port_mask << 3);
+ fid_c1 &= ~(untag_mask << 3);
+ } else {
+ fid_c1 &= ~(port_mask | port_mask << 3);
+ }
+
+ tbl[vid].fid_c1 = fid_c1;
+}
+EXPORT_SYMBOL_GPL(icssg_vtbl_modify);
+
+u16 icssg_get_pvid(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ u32 pvid;
+
+ if (emac->port_id == PRUETH_PORT_MII0)
+ pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
+ else
+ pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
+
+ pvid = pvid >> 24;
+
+ return pvid;
+}
+EXPORT_SYMBOL_GPL(icssg_get_pvid);
+
+void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port)
+{
+ u32 pvid;
+
+ /* only 256 VLANs are supported */
+ pvid = (u32 __force)cpu_to_be32((ETH_P_8021Q << 16) | (vid & 0xff));
+
+ if (port == PRUETH_PORT_MII0)
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET);
+ else if (port == PRUETH_PORT_MII1)
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET);
+ else
+ writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET);
+}
+EXPORT_SYMBOL_GPL(icssg_set_pvid);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h
index 43eb0922172a..1ac60283923b 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_config.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_config.h
@@ -35,6 +35,15 @@ struct icssg_flow_cfg {
(2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \
PRUETH_EMAC_RX_CTX_BUF_SIZE * 2))
+#define PRUETH_SW_BUF_POOL_SIZE_HOST SZ_4K
+#define PRUETH_SW_NUM_BUF_POOLS_HOST 8
+#define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4
+#define MSMC_RAM_SIZE_SWITCH_MODE \
+ (MSMC_RAM_SIZE + \
+ (2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST))
+
+#define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1)
+
struct icssg_rxq_ctx {
__le32 start[3];
__le32 end;
@@ -109,6 +118,62 @@ enum icssg_port_state_cmd {
#define ICSSG_FLAG_MASK 0xff00ffff
+/* SR1.0-specific bits */
+#define PRUETH_MAX_RX_FLOWS_SR1 4 /* excluding default flow */
+#define PRUETH_RX_FLOW_DATA_SR1 3 /* highest priority flow */
+#define PRUETH_MAX_RX_MGM_DESC_SR1 8
+#define PRUETH_MAX_RX_MGM_FLOWS_SR1 2 /* excluding default flow */
+#define PRUETH_RX_MGM_FLOW_RESPONSE_SR1 0
+#define PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1 1
+
+#define PRUETH_NUM_BUF_POOLS_SR1 16
+#define PRUETH_EMAC_BUF_POOL_START_SR1 8
+#define PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1 128
+#define PRUETH_EMAC_BUF_SIZE_SR1 1536
+#define PRUETH_EMAC_NUM_BUF_SR1 4
+#define PRUETH_EMAC_BUF_POOL_SIZE_SR1 (PRUETH_EMAC_NUM_BUF_SR1 * \
+ PRUETH_EMAC_BUF_SIZE_SR1)
+#define MSMC_RAM_SIZE_SR1 (SZ_64K + SZ_32K + SZ_2K) /* 0x1880 x 8 x 2 */
+
+struct icssg_sr1_config {
+ __le32 status; /* Firmware status */
+ __le32 addr_lo; /* MSMC Buffer pool base address low. */
+ __le32 addr_hi; /* MSMC Buffer pool base address high. Must be 0 */
+ __le32 tx_buf_sz[16]; /* Array of buffer pool sizes */
+ __le32 num_tx_threads; /* Number of active egress threads, 1 to 4 */
+ __le32 tx_rate_lim_en; /* Bitmask: Egress rate limit en per thread */
+ __le32 rx_flow_id; /* RX flow id for first rx ring */
+ __le32 rx_mgr_flow_id; /* RX flow id for the first management ring */
+ __le32 flags; /* TBD */
+ __le32 n_burst; /* for debug */
+ __le32 rtu_status; /* RTU status */
+ __le32 info; /* reserved */
+ __le32 reserve;
+ __le32 rand_seed; /* Used for the random number generation at fw */
+} __packed;
+
+/* SR1.0 shutdown command to stop processing at firmware.
+ * Command format: 0x8101ss00, where
+ * - ss: sequence number. Currently not used by driver.
+ */
+#define ICSSG_SHUTDOWN_CMD_SR1 0x81010000
+
+/* SR1.0 pstate speed/duplex command to set speed and duplex settings
+ * in firmware.
+ * Command format: 0x8102ssPN, where
+ * - ss: sequence number. Currently not used by driver.
+ * - P: port number (for switch mode).
+ * - N: Speed/Duplex state:
+ * 0x0 - 10Mbps/Half duplex;
+ * 0x8 - 10Mbps/Full duplex;
+ * 0x2 - 100Mbps/Half duplex;
+ * 0xa - 100Mbps/Full duplex;
+ * 0xc - 1Gbps/Full duplex;
+ * NOTE: The above are the same value as bits [3..1](slice 0)
+ * or bits [7..5](slice 1) of RGMII CFG register.
+ */
+#define ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1 0x81020000
+
struct icssg_setclock_desc {
u8 request;
u8 restore;
@@ -146,6 +211,23 @@ struct icssg_setclock_desc {
#define ICSSG_TS_PUSH_SLICE0 40
#define ICSSG_TS_PUSH_SLICE1 41
+struct mgmt_cmd {
+ u8 param;
+ u8 seqnum;
+ u8 type;
+ u8 header;
+ u32 cmd_args[3];
+};
+
+struct mgmt_cmd_rsp {
+ u32 reserved;
+ u8 status;
+ u8 seqnum;
+ u8 type;
+ u8 header;
+ u32 cmd_args[3];
+};
+
/* FDB FID_C2 flag definitions */
/* Indicates host port membership.*/
#define ICSSG_FDB_ENTRY_P0_MEMBERSHIP BIT(0)
diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
index 9a7dd7efcf69..5688f054cec5 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c
@@ -110,7 +110,7 @@ static void emac_get_ethtool_stats(struct net_device *ndev,
}
static int emac_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct prueth_emac *emac = netdev_priv(ndev);
@@ -142,6 +142,9 @@ static int emac_set_channels(struct net_device *ndev,
emac->tx_ch_num = ch->tx_count;
+ if (emac->is_sr1)
+ emac->tx_ch_num++;
+
return 0;
}
@@ -152,8 +155,17 @@ static void emac_get_channels(struct net_device *ndev,
ch->max_rx = 1;
ch->max_tx = PRUETH_MAX_TX_QUEUES;
+
+ /* Disable multiple TX channels due to timeouts
+ * when using more than one queue */
+ if (emac->is_sr1)
+ ch->max_tx = 1;
+
ch->rx_count = 1;
ch->tx_count = emac->tx_ch_num;
+
+ if (emac->is_sr1)
+ ch->tx_count--;
}
static const struct ethtool_rmon_hist_range emac_rmon_ranges[] = {
@@ -189,6 +201,93 @@ static void emac_get_rmon_stats(struct net_device *ndev,
rmon_stats->hist_tx[4] = emac_get_stat_by_name(emac, "tx_bucket5_frames");
}
+static int emac_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_tx_chn *tx_chn;
+
+ tx_chn = &emac->tx_chns[0];
+
+ coal->rx_coalesce_usecs = emac->rx_pace_timeout_ns / 1000;
+ coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout_ns / 1000;
+
+ return 0;
+}
+
+static int emac_get_per_queue_coalesce(struct net_device *ndev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth_tx_chn *tx_chn;
+
+ if (queue >= PRUETH_MAX_TX_QUEUES)
+ return -EINVAL;
+
+ tx_chn = &emac->tx_chns[queue];
+
+ coal->tx_coalesce_usecs = tx_chn->tx_pace_timeout_ns / 1000;
+
+ return 0;
+}
+
+static int emac_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct prueth_tx_chn *tx_chn;
+
+ tx_chn = &emac->tx_chns[0];
+
+ if (coal->rx_coalesce_usecs &&
+ coal->rx_coalesce_usecs < ICSSG_MIN_COALESCE_USECS) {
+ dev_info(prueth->dev, "defaulting to min value of %dus for rx-usecs\n",
+ ICSSG_MIN_COALESCE_USECS);
+ coal->rx_coalesce_usecs = ICSSG_MIN_COALESCE_USECS;
+ }
+
+ if (coal->tx_coalesce_usecs &&
+ coal->tx_coalesce_usecs < ICSSG_MIN_COALESCE_USECS) {
+ dev_info(prueth->dev, "defaulting to min value of %dus for tx-usecs\n",
+ ICSSG_MIN_COALESCE_USECS);
+ coal->tx_coalesce_usecs = ICSSG_MIN_COALESCE_USECS;
+ }
+
+ emac->rx_pace_timeout_ns = coal->rx_coalesce_usecs * 1000;
+ tx_chn->tx_pace_timeout_ns = coal->tx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
+static int emac_set_per_queue_coalesce(struct net_device *ndev, u32 queue,
+ struct ethtool_coalesce *coal)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ struct prueth_tx_chn *tx_chn;
+
+ if (queue >= PRUETH_MAX_TX_QUEUES)
+ return -EINVAL;
+
+ tx_chn = &emac->tx_chns[queue];
+
+ if (coal->tx_coalesce_usecs &&
+ coal->tx_coalesce_usecs < ICSSG_MIN_COALESCE_USECS) {
+ dev_info(prueth->dev, "defaulting to min value of %dus for tx-usecs for tx-%u\n",
+ ICSSG_MIN_COALESCE_USECS, queue);
+ coal->tx_coalesce_usecs = ICSSG_MIN_COALESCE_USECS;
+ }
+
+ tx_chn->tx_pace_timeout_ns = coal->tx_coalesce_usecs * 1000;
+
+ return 0;
+}
+
const struct ethtool_ops icssg_ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
.get_msglevel = emac_get_msglevel,
@@ -197,6 +296,12 @@ const struct ethtool_ops icssg_ethtool_ops = {
.get_ethtool_stats = emac_get_ethtool_stats,
.get_strings = emac_get_strings,
.get_ts_info = emac_get_ts_info,
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_TX_USECS,
+ .get_coalesce = emac_get_coalesce,
+ .set_coalesce = emac_set_coalesce,
+ .get_per_queue_coalesce = emac_get_per_queue_coalesce,
+ .set_per_queue_coalesce = emac_set_per_queue_coalesce,
.get_channels = emac_get_channels,
.set_channels = emac_set_channels,
.get_link_ksettings = emac_get_link_ksettings,
@@ -207,3 +312,4 @@ const struct ethtool_ops icssg_ethtool_ops = {
.nway_reset = emac_nway_reset,
.get_rmon_stats = emac_get_rmon_stats,
};
+EXPORT_SYMBOL_GPL(icssg_ethtool_ops);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c
index 92718ae40d7e..b64955438bb2 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c
@@ -40,6 +40,7 @@ void icssg_mii_update_mtu(struct regmap *mii_rt, int mii, int mtu)
(mtu - 1) << PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT);
}
}
+EXPORT_SYMBOL_GPL(icssg_mii_update_mtu);
void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac)
{
@@ -66,6 +67,7 @@ void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac)
regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, full_duplex_mask,
full_duplex_val);
}
+EXPORT_SYMBOL_GPL(icssg_update_rgmii_cfg);
void icssg_miig_set_interface_mode(struct regmap *miig_rt, int mii, phy_interface_t phy_if)
{
@@ -105,6 +107,7 @@ u32 icssg_rgmii_get_speed(struct regmap *miig_rt, int mii)
return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift);
}
+EXPORT_SYMBOL_GPL(icssg_rgmii_get_speed);
u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii)
{
@@ -118,3 +121,4 @@ u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii)
return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift);
}
+EXPORT_SYMBOL_GPL(icssg_rgmii_get_fullduplex);
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index b69af69a1ccd..3e51b3a9b0a5 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -27,577 +27,22 @@
#include <linux/remoteproc/pruss.h>
#include <linux/regmap.h>
#include <linux/remoteproc.h>
+#include <net/switchdev.h>
#include "icssg_prueth.h"
#include "icssg_mii_rt.h"
+#include "icssg_switchdev.h"
#include "../k3-cppi-desc-pool.h"
#define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver"
-/* Netif debug messages possible */
-#define PRUETH_EMAC_DEBUG (NETIF_MSG_DRV | \
- NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | \
- NETIF_MSG_IFDOWN | \
- NETIF_MSG_IFUP | \
- NETIF_MSG_RX_ERR | \
- NETIF_MSG_TX_ERR | \
- NETIF_MSG_TX_QUEUED | \
- NETIF_MSG_INTR | \
- NETIF_MSG_TX_DONE | \
- NETIF_MSG_RX_STATUS | \
- NETIF_MSG_PKTDATA | \
- NETIF_MSG_HW | \
- NETIF_MSG_WOL)
-
-#define prueth_napi_to_emac(napi) container_of(napi, struct prueth_emac, napi_rx)
+#define DEFAULT_VID 1
+#define DEFAULT_PORT_MASK 1
+#define DEFAULT_UNTAG_MASK 1
/* CTRLMMR_ICSSG_RGMII_CTRL register bits */
#define ICSSG_CTRL_RGMII_ID_MODE BIT(24)
-#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */
-
-static void prueth_cleanup_rx_chns(struct prueth_emac *emac,
- struct prueth_rx_chn *rx_chn,
- int max_rflows)
-{
- if (rx_chn->desc_pool)
- k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
-
- if (rx_chn->rx_chn)
- k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
-}
-
-static void prueth_cleanup_tx_chns(struct prueth_emac *emac)
-{
- int i;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
-
- if (tx_chn->desc_pool)
- k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
-
- if (tx_chn->tx_chn)
- k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
-
- /* Assume prueth_cleanup_tx_chns() is called at the
- * end after all channel resources are freed
- */
- memset(tx_chn, 0, sizeof(*tx_chn));
- }
-}
-
-static void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num)
-{
- int i;
-
- for (i = 0; i < num; i++) {
- struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
-
- if (tx_chn->irq)
- free_irq(tx_chn->irq, tx_chn);
- netif_napi_del(&tx_chn->napi_tx);
- }
-}
-
-static void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
- struct cppi5_host_desc_t *desc)
-{
- struct cppi5_host_desc_t *first_desc, *next_desc;
- dma_addr_t buf_dma, next_desc_dma;
- u32 buf_dma_len;
-
- first_desc = desc;
- next_desc = first_desc;
-
- cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
-
- dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
-
- next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
- while (next_desc_dma) {
- next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- next_desc_dma);
- cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
-
- dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_TO_DEVICE);
-
- next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
- k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
-
- k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
- }
-
- k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
-}
-
-static int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
- int budget)
-{
- struct net_device *ndev = emac->ndev;
- struct cppi5_host_desc_t *desc_tx;
- struct netdev_queue *netif_txq;
- struct prueth_tx_chn *tx_chn;
- unsigned int total_bytes = 0;
- struct sk_buff *skb;
- dma_addr_t desc_dma;
- int res, num_tx = 0;
- void **swdata;
-
- tx_chn = &emac->tx_chns[chn];
-
- while (true) {
- res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
- if (res == -ENODATA)
- break;
-
- /* teardown completion */
- if (cppi5_desc_is_tdcm(desc_dma)) {
- if (atomic_dec_and_test(&emac->tdown_cnt))
- complete(&emac->tdown_complete);
- break;
- }
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
- desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
-
- skb = *(swdata);
- prueth_xmit_free(tx_chn, desc_tx);
-
- ndev = skb->dev;
- ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += skb->len;
- total_bytes += skb->len;
- napi_consume_skb(skb, budget);
- num_tx++;
- }
-
- if (!num_tx)
- return 0;
-
- netif_txq = netdev_get_tx_queue(ndev, chn);
- netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
-
- if (netif_tx_queue_stopped(netif_txq)) {
- /* If the TX queue was stopped, wake it now
- * if we have enough room.
- */
- __netif_tx_lock(netif_txq, smp_processor_id());
- if (netif_running(ndev) &&
- (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
- MAX_SKB_FRAGS))
- netif_tx_wake_queue(netif_txq);
- __netif_tx_unlock(netif_txq);
- }
-
- return num_tx;
-}
-
-static int emac_napi_tx_poll(struct napi_struct *napi_tx, int budget)
-{
- struct prueth_tx_chn *tx_chn = prueth_napi_to_tx_chn(napi_tx);
- struct prueth_emac *emac = tx_chn->emac;
- int num_tx_packets;
-
- num_tx_packets = emac_tx_complete_packets(emac, tx_chn->id, budget);
-
- if (num_tx_packets >= budget)
- return budget;
-
- if (napi_complete_done(napi_tx, num_tx_packets))
- enable_irq(tx_chn->irq);
-
- return num_tx_packets;
-}
-
-static irqreturn_t prueth_tx_irq(int irq, void *dev_id)
-{
- struct prueth_tx_chn *tx_chn = dev_id;
-
- disable_irq_nosync(irq);
- napi_schedule(&tx_chn->napi_tx);
-
- return IRQ_HANDLED;
-}
-
-static int prueth_ndev_add_tx_napi(struct prueth_emac *emac)
-{
- struct prueth *prueth = emac->prueth;
- int i, ret;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
-
- netif_napi_add_tx(emac->ndev, &tx_chn->napi_tx, emac_napi_tx_poll);
- ret = request_irq(tx_chn->irq, prueth_tx_irq,
- IRQF_TRIGGER_HIGH, tx_chn->name,
- tx_chn);
- if (ret) {
- netif_napi_del(&tx_chn->napi_tx);
- dev_err(prueth->dev, "unable to request TX IRQ %d\n",
- tx_chn->irq);
- goto fail;
- }
- }
-
- return 0;
-fail:
- prueth_ndev_del_tx_napi(emac, i);
- return ret;
-}
-
-static int prueth_init_tx_chns(struct prueth_emac *emac)
-{
- static const struct k3_ring_cfg ring_cfg = {
- .elm_size = K3_RINGACC_RING_ELSIZE_8,
- .mode = K3_RINGACC_RING_MODE_RING,
- .flags = 0,
- .size = PRUETH_MAX_TX_DESC,
- };
- struct k3_udma_glue_tx_channel_cfg tx_cfg;
- struct device *dev = emac->prueth->dev;
- struct net_device *ndev = emac->ndev;
- int ret, slice, i;
- u32 hdesc_size;
-
- slice = prueth_emac_slice(emac);
- if (slice < 0)
- return slice;
-
- init_completion(&emac->tdown_complete);
-
- hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
- PRUETH_NAV_SW_DATA_SIZE);
- memset(&tx_cfg, 0, sizeof(tx_cfg));
- tx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
- tx_cfg.tx_cfg = ring_cfg;
- tx_cfg.txcq_cfg = ring_cfg;
-
- for (i = 0; i < emac->tx_ch_num; i++) {
- struct prueth_tx_chn *tx_chn = &emac->tx_chns[i];
-
- /* To differentiate channels for SLICE0 vs SLICE1 */
- snprintf(tx_chn->name, sizeof(tx_chn->name),
- "tx%d-%d", slice, i);
-
- tx_chn->emac = emac;
- tx_chn->id = i;
- tx_chn->descs_num = PRUETH_MAX_TX_DESC;
-
- tx_chn->tx_chn =
- k3_udma_glue_request_tx_chn(dev, tx_chn->name,
- &tx_cfg);
- if (IS_ERR(tx_chn->tx_chn)) {
- ret = PTR_ERR(tx_chn->tx_chn);
- tx_chn->tx_chn = NULL;
- netdev_err(ndev,
- "Failed to request tx dma ch: %d\n", ret);
- goto fail;
- }
-
- tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
- tx_chn->desc_pool =
- k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
- tx_chn->descs_num,
- hdesc_size,
- tx_chn->name);
- if (IS_ERR(tx_chn->desc_pool)) {
- ret = PTR_ERR(tx_chn->desc_pool);
- tx_chn->desc_pool = NULL;
- netdev_err(ndev, "Failed to create tx pool: %d\n", ret);
- goto fail;
- }
-
- ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
- if (ret < 0) {
- netdev_err(ndev, "failed to get tx irq\n");
- goto fail;
- }
- tx_chn->irq = ret;
-
- snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
- dev_name(dev), tx_chn->id);
- }
-
- return 0;
-
-fail:
- prueth_cleanup_tx_chns(emac);
- return ret;
-}
-
-static int prueth_init_rx_chns(struct prueth_emac *emac,
- struct prueth_rx_chn *rx_chn,
- char *name, u32 max_rflows,
- u32 max_desc_num)
-{
- struct k3_udma_glue_rx_channel_cfg rx_cfg;
- struct device *dev = emac->prueth->dev;
- struct net_device *ndev = emac->ndev;
- u32 fdqring_id, hdesc_size;
- int i, ret = 0, slice;
-
- slice = prueth_emac_slice(emac);
- if (slice < 0)
- return slice;
-
- /* To differentiate channels for SLICE0 vs SLICE1 */
- snprintf(rx_chn->name, sizeof(rx_chn->name), "%s%d", name, slice);
-
- hdesc_size = cppi5_hdesc_calc_size(true, PRUETH_NAV_PS_DATA_SIZE,
- PRUETH_NAV_SW_DATA_SIZE);
- memset(&rx_cfg, 0, sizeof(rx_cfg));
- rx_cfg.swdata_size = PRUETH_NAV_SW_DATA_SIZE;
- rx_cfg.flow_id_num = max_rflows;
- rx_cfg.flow_id_base = -1; /* udmax will auto select flow id base */
-
- /* init all flows */
- rx_chn->dev = dev;
- rx_chn->descs_num = max_desc_num;
-
- rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, rx_chn->name,
- &rx_cfg);
- if (IS_ERR(rx_chn->rx_chn)) {
- ret = PTR_ERR(rx_chn->rx_chn);
- rx_chn->rx_chn = NULL;
- netdev_err(ndev, "Failed to request rx dma ch: %d\n", ret);
- goto fail;
- }
-
- rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
- rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
- rx_chn->descs_num,
- hdesc_size,
- rx_chn->name);
- if (IS_ERR(rx_chn->desc_pool)) {
- ret = PTR_ERR(rx_chn->desc_pool);
- rx_chn->desc_pool = NULL;
- netdev_err(ndev, "Failed to create rx pool: %d\n", ret);
- goto fail;
- }
-
- emac->rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
- netdev_dbg(ndev, "flow id base = %d\n", emac->rx_flow_id_base);
-
- fdqring_id = K3_RINGACC_RING_ID_ANY;
- for (i = 0; i < rx_cfg.flow_id_num; i++) {
- struct k3_ring_cfg rxring_cfg = {
- .elm_size = K3_RINGACC_RING_ELSIZE_8,
- .mode = K3_RINGACC_RING_MODE_RING,
- .flags = 0,
- };
- struct k3_ring_cfg fdqring_cfg = {
- .elm_size = K3_RINGACC_RING_ELSIZE_8,
- .flags = K3_RINGACC_RING_SHARED,
- };
- struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
- .rx_cfg = rxring_cfg,
- .rxfdq_cfg = fdqring_cfg,
- .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
- .src_tag_lo_sel =
- K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
- };
-
- rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
- rx_flow_cfg.rx_cfg.size = max_desc_num;
- rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
- rx_flow_cfg.rxfdq_cfg.mode = emac->prueth->pdata.fdqring_mode;
-
- ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
- i, &rx_flow_cfg);
- if (ret) {
- netdev_err(ndev, "Failed to init rx flow%d %d\n",
- i, ret);
- goto fail;
- }
- if (!i)
- fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
- i);
- ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
- if (ret <= 0) {
- if (!ret)
- ret = -ENXIO;
- netdev_err(ndev, "Failed to get rx dma irq");
- goto fail;
- }
- rx_chn->irq[i] = ret;
- }
-
- return 0;
-
-fail:
- prueth_cleanup_rx_chns(emac, rx_chn, max_rflows);
- return ret;
-}
-
-static int prueth_dma_rx_push(struct prueth_emac *emac,
- struct sk_buff *skb,
- struct prueth_rx_chn *rx_chn)
-{
- struct net_device *ndev = emac->ndev;
- struct cppi5_host_desc_t *desc_rx;
- u32 pkt_len = skb_tailroom(skb);
- dma_addr_t desc_dma;
- dma_addr_t buf_dma;
- void **swdata;
-
- desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
- if (!desc_rx) {
- netdev_err(ndev, "rx push: failed to allocate descriptor\n");
- return -ENOMEM;
- }
- desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
-
- buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
- netdev_err(ndev, "rx push: failed to map rx pkt buffer\n");
- return -EINVAL;
- }
-
- cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
- PRUETH_NAV_PS_DATA_SIZE);
- k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
-
- swdata = cppi5_hdesc_get_swdata(desc_rx);
- *swdata = skb;
-
- return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0,
- desc_rx, desc_dma);
-}
-
-static u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns)
-{
- u32 iepcount_lo, iepcount_hi, hi_rollover_count;
- u64 ns;
-
- iepcount_lo = lo & GENMASK(19, 0);
- iepcount_hi = (hi & GENMASK(11, 0)) << 12 | lo >> 20;
- hi_rollover_count = hi >> 11;
-
- ns = ((u64)hi_rollover_count) << 23 | (iepcount_hi + hi_sw);
- ns = ns * cycle_time_ns + iepcount_lo;
-
- return ns;
-}
-
-static void emac_rx_timestamp(struct prueth_emac *emac,
- struct sk_buff *skb, u32 *psdata)
-{
- struct skb_shared_hwtstamps *ssh;
- u64 ns;
-
- u32 hi_sw = readl(emac->prueth->shram.va +
- TIMESYNC_FW_WC_COUNT_HI_SW_OFFSET_OFFSET);
- ns = icssg_ts_to_ns(hi_sw, psdata[1], psdata[0],
- IEP_DEFAULT_CYCLE_TIME_NS);
-
- ssh = skb_hwtstamps(skb);
- memset(ssh, 0, sizeof(*ssh));
- ssh->hwtstamp = ns_to_ktime(ns);
-}
-
-static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id)
-{
- struct prueth_rx_chn *rx_chn = &emac->rx_chns;
- u32 buf_dma_len, pkt_len, port_id = 0;
- struct net_device *ndev = emac->ndev;
- struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb, *new_skb;
- dma_addr_t desc_dma, buf_dma;
- void **swdata;
- u32 *psdata;
- int ret;
-
- ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
- if (ret) {
- if (ret != -ENODATA)
- netdev_err(ndev, "rx pop: failed: %d\n", ret);
- return ret;
- }
-
- if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown ? */
- return 0;
-
- desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
-
- swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
-
- psdata = cppi5_hdesc_get_psdata(desc_rx);
- /* RX HW timestamp */
- if (emac->rx_ts_enabled)
- emac_rx_timestamp(emac, skb, psdata);
-
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
- pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
- /* firmware adds 4 CRC bytes, strip them */
- pkt_len -= 4;
- cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
-
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- skb->dev = ndev;
- new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
- /* if allocation fails we drop the packet but push the
- * descriptor back to the ring with old skb to prevent a stall
- */
- if (!new_skb) {
- ndev->stats.rx_dropped++;
- new_skb = skb;
- } else {
- /* send the filled skb up the n/w stack */
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, ndev);
- napi_gro_receive(&emac->napi_rx, skb);
- ndev->stats.rx_bytes += pkt_len;
- ndev->stats.rx_packets++;
- }
-
- /* queue another RX DMA */
- ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_chns);
- if (WARN_ON(ret < 0)) {
- dev_kfree_skb_any(new_skb);
- ndev->stats.rx_errors++;
- ndev->stats.rx_dropped++;
- }
-
- return ret;
-}
-
-static void prueth_rx_cleanup(void *data, dma_addr_t desc_dma)
-{
- struct prueth_rx_chn *rx_chn = data;
- struct cppi5_host_desc_t *desc_rx;
- struct sk_buff *skb;
- dma_addr_t buf_dma;
- u32 buf_dma_len;
- void **swdata;
-
- desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_rx);
- skb = *swdata;
- cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
- k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
-
- dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len,
- DMA_FROM_DEVICE);
- k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
-
- dev_kfree_skb_any(skb);
-}
-
static int emac_get_tx_ts(struct prueth_emac *emac,
struct emac_tx_ts_response *rsp)
{
@@ -663,208 +108,6 @@ static void tx_ts_work(struct prueth_emac *emac)
}
}
-static int prueth_tx_ts_cookie_get(struct prueth_emac *emac)
-{
- int i;
-
- /* search and get the next free slot */
- for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
- if (!emac->tx_ts_skb[i]) {
- emac->tx_ts_skb[i] = ERR_PTR(-EBUSY); /* reserve slot */
- return i;
- }
- }
-
- return -EBUSY;
-}
-
-/**
- * emac_ndo_start_xmit - EMAC Transmit function
- * @skb: SKB pointer
- * @ndev: EMAC network adapter
- *
- * Called by the system to transmit a packet - we queue the packet in
- * EMAC hardware transmit queue
- * Doesn't wait for completion we'll check for TX completion in
- * emac_tx_complete_packets().
- *
- * Return: enum netdev_tx
- */
-static enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
- struct prueth_emac *emac = netdev_priv(ndev);
- struct netdev_queue *netif_txq;
- struct prueth_tx_chn *tx_chn;
- dma_addr_t desc_dma, buf_dma;
- int i, ret = 0, q_idx;
- bool in_tx_ts = 0;
- int tx_ts_cookie;
- void **swdata;
- u32 pkt_len;
- u32 *epib;
-
- pkt_len = skb_headlen(skb);
- q_idx = skb_get_queue_mapping(skb);
-
- tx_chn = &emac->tx_chns[q_idx];
- netif_txq = netdev_get_tx_queue(ndev, q_idx);
-
- /* Map the linear buffer */
- buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len, DMA_TO_DEVICE);
- if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
- netdev_err(ndev, "tx: failed to map skb buffer\n");
- ret = NETDEV_TX_OK;
- goto drop_free_skb;
- }
-
- first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
- if (!first_desc) {
- netdev_dbg(ndev, "tx: failed to allocate descriptor\n");
- dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
- goto drop_stop_q_busy;
- }
-
- cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
- PRUETH_NAV_PS_DATA_SIZE);
- cppi5_hdesc_set_pkttype(first_desc, 0);
- epib = first_desc->epib;
- epib[0] = 0;
- epib[1] = 0;
- if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- emac->tx_ts_enabled) {
- tx_ts_cookie = prueth_tx_ts_cookie_get(emac);
- if (tx_ts_cookie >= 0) {
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- /* Request TX timestamp */
- epib[0] = (u32)tx_ts_cookie;
- epib[1] = 0x80000000; /* TX TS request */
- emac->tx_ts_skb[tx_ts_cookie] = skb_get(skb);
- in_tx_ts = 1;
- }
- }
-
- /* set dst tag to indicate internal qid at the firmware which is at
- * bit8..bit15. bit0..bit7 indicates port num for directed
- * packets in case of switch mode operation
- */
- cppi5_desc_set_tags_ids(&first_desc->hdr, 0, (emac->port_id | (q_idx << 8)));
- k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
- swdata = cppi5_hdesc_get_swdata(first_desc);
- *swdata = skb;
-
- /* Handle the case where skb is fragmented in pages */
- cur_desc = first_desc;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- u32 frag_size = skb_frag_size(frag);
-
- next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
- if (!next_desc) {
- netdev_err(ndev,
- "tx: failed to allocate frag. descriptor\n");
- goto free_desc_stop_q_busy_cleanup_tx_ts;
- }
-
- buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
- netdev_err(ndev, "tx: Failed to map skb page\n");
- k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
- ret = NETDEV_TX_OK;
- goto cleanup_tx_ts;
- }
-
- cppi5_hdesc_reset_hbdesc(next_desc);
- k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
- cppi5_hdesc_attach_buf(next_desc,
- buf_dma, frag_size, buf_dma, frag_size);
-
- desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
- next_desc);
- k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
- cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
-
- pkt_len += frag_size;
- cur_desc = next_desc;
- }
- WARN_ON_ONCE(pkt_len != skb->len);
-
- /* report bql before sending packet */
- netdev_tx_sent_queue(netif_txq, pkt_len);
-
- cppi5_hdesc_set_pktlen(first_desc, pkt_len);
- desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
- /* cppi5_desc_dump(first_desc, 64); */
-
- skb_tx_timestamp(skb); /* SW timestamp if SKBTX_IN_PROGRESS not set */
- ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
- if (ret) {
- netdev_err(ndev, "tx: push failed: %d\n", ret);
- goto drop_free_descs;
- }
-
- if (in_tx_ts)
- atomic_inc(&emac->tx_ts_pending);
-
- if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
- netif_tx_stop_queue(netif_txq);
- /* Barrier, so that stop_queue visible to other cpus */
- smp_mb__after_atomic();
-
- if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
- MAX_SKB_FRAGS)
- netif_tx_wake_queue(netif_txq);
- }
-
- return NETDEV_TX_OK;
-
-cleanup_tx_ts:
- if (in_tx_ts) {
- dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
- emac->tx_ts_skb[tx_ts_cookie] = NULL;
- }
-
-drop_free_descs:
- prueth_xmit_free(tx_chn, first_desc);
-
-drop_free_skb:
- dev_kfree_skb_any(skb);
-
- /* error */
- ndev->stats.tx_dropped++;
- netdev_err(ndev, "tx: error: %d\n", ret);
-
- return ret;
-
-free_desc_stop_q_busy_cleanup_tx_ts:
- if (in_tx_ts) {
- dev_kfree_skb_any(emac->tx_ts_skb[tx_ts_cookie]);
- emac->tx_ts_skb[tx_ts_cookie] = NULL;
- }
- prueth_xmit_free(tx_chn, first_desc);
-
-drop_stop_q_busy:
- netif_tx_stop_queue(netif_txq);
- return NETDEV_TX_BUSY;
-}
-
-static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma)
-{
- struct prueth_tx_chn *tx_chn = data;
- struct cppi5_host_desc_t *desc_tx;
- struct sk_buff *skb;
- void **swdata;
-
- desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
- swdata = cppi5_hdesc_get_swdata(desc_tx);
- skb = *(swdata);
- prueth_xmit_free(tx_chn, desc_tx);
-
- dev_kfree_skb_any(skb);
-}
-
static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
{
struct prueth_emac *emac = dev_id;
@@ -875,20 +118,17 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static irqreturn_t prueth_rx_irq(int irq, void *dev_id)
-{
- struct prueth_emac *emac = dev_id;
-
- disable_irq_nosync(irq);
- napi_schedule(&emac->napi_rx);
-
- return IRQ_HANDLED;
-}
-
-struct icssg_firmwares {
- char *pru;
- char *rtu;
- char *txpru;
+static struct icssg_firmwares icssg_switch_firmwares[] = {
+ {
+ .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf",
+ },
+ {
+ .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf",
+ .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf",
+ .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf",
+ }
};
static struct icssg_firmwares icssg_emac_firmwares[] = {
@@ -910,7 +150,10 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
struct device *dev = prueth->dev;
int slice, ret;
- firmwares = icssg_emac_firmwares;
+ if (prueth->is_switch_mode)
+ firmwares = icssg_switch_firmwares;
+ else
+ firmwares = icssg_emac_firmwares;
slice = prueth_emac_slice(emac);
if (slice < 0) {
@@ -955,41 +198,6 @@ halt_pru:
return ret;
}
-static void prueth_emac_stop(struct prueth_emac *emac)
-{
- struct prueth *prueth = emac->prueth;
- int slice;
-
- switch (emac->port_id) {
- case PRUETH_PORT_MII0:
- slice = ICSS_SLICE0;
- break;
- case PRUETH_PORT_MII1:
- slice = ICSS_SLICE1;
- break;
- default:
- netdev_err(emac->ndev, "invalid port\n");
- return;
- }
-
- emac->fw_running = 0;
- rproc_shutdown(prueth->txpru[slice]);
- rproc_shutdown(prueth->rtu[slice]);
- rproc_shutdown(prueth->pru[slice]);
-}
-
-static void prueth_cleanup_tx_ts(struct prueth_emac *emac)
-{
- int i;
-
- for (i = 0; i < PRUETH_MAX_TX_TS_REQUESTS; i++) {
- if (emac->tx_ts_skb[i]) {
- dev_kfree_skb_any(emac->tx_ts_skb[i]);
- emac->tx_ts_skb[i] = NULL;
- }
- }
-}
-
/* called back by PHY layer if there is change in link state of hw port*/
static void emac_adjust_link(struct net_device *ndev)
{
@@ -1041,10 +249,10 @@ static void emac_adjust_link(struct net_device *ndev)
icssg_config_ipg(emac);
spin_unlock_irqrestore(&emac->lock, flags);
icssg_config_set_speed(emac);
- emac_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD);
} else {
- emac_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE);
}
}
@@ -1057,84 +265,14 @@ static void emac_adjust_link(struct net_device *ndev)
}
}
-static int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget)
+static enum hrtimer_restart emac_rx_timer_callback(struct hrtimer *timer)
{
- struct prueth_emac *emac = prueth_napi_to_emac(napi_rx);
+ struct prueth_emac *emac =
+ container_of(timer, struct prueth_emac, rx_hrtimer);
int rx_flow = PRUETH_RX_FLOW_DATA;
- int flow = PRUETH_MAX_RX_FLOWS;
- int num_rx = 0;
- int cur_budget;
- int ret;
-
- while (flow--) {
- cur_budget = budget - num_rx;
-
- while (cur_budget--) {
- ret = emac_rx_packet(emac, flow);
- if (ret)
- break;
- num_rx++;
- }
-
- if (num_rx >= budget)
- break;
- }
-
- if (num_rx < budget && napi_complete_done(napi_rx, num_rx))
- enable_irq(emac->rx_chns.irq[rx_flow]);
-
- return num_rx;
-}
-
-static int prueth_prepare_rx_chan(struct prueth_emac *emac,
- struct prueth_rx_chn *chn,
- int buf_size)
-{
- struct sk_buff *skb;
- int i, ret;
-
- for (i = 0; i < chn->descs_num; i++) {
- skb = __netdev_alloc_skb_ip_align(NULL, buf_size, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
-
- ret = prueth_dma_rx_push(emac, skb, chn);
- if (ret < 0) {
- netdev_err(emac->ndev,
- "cannot submit skb for rx chan %s ret %d\n",
- chn->name, ret);
- kfree_skb(skb);
- return ret;
- }
- }
- return 0;
-}
-
-static void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
- bool free_skb)
-{
- int i;
-
- for (i = 0; i < ch_num; i++) {
- if (free_skb)
- k3_udma_glue_reset_tx_chn(emac->tx_chns[i].tx_chn,
- &emac->tx_chns[i],
- prueth_tx_cleanup);
- k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn);
- }
-}
-
-static void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
- int num_flows, bool disable)
-{
- int i;
-
- for (i = 0; i < num_flows; i++)
- k3_udma_glue_reset_rx_chn(chn->rx_chn, i, chn,
- prueth_rx_cleanup, !!i);
- if (disable)
- k3_udma_glue_disable_rx_chn(chn->rx_chn);
+ enable_irq(emac->rx_chns.irq[rx_flow]);
+ return HRTIMER_NORESTART;
}
static int emac_phy_connect(struct prueth_emac *emac)
@@ -1301,6 +439,37 @@ const struct icss_iep_clockops prueth_iep_clockops = {
.perout_enable = prueth_perout_enable,
};
+static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int port_mask = BIT(emac->port_id);
+
+ port_mask |= icssg_fdb_lookup(emac, addr, 0);
+ icssg_fdb_add_del(emac, addr, 0, port_mask, true);
+ icssg_vtbl_modify(emac, 0, port_mask, port_mask, true);
+
+ return 0;
+}
+
+static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int port_mask = BIT(emac->port_id);
+ int other_port_mask;
+
+ other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0);
+
+ icssg_fdb_add_del(emac, addr, 0, port_mask, false);
+ icssg_vtbl_modify(emac, 0, port_mask, port_mask, false);
+
+ if (other_port_mask) {
+ icssg_fdb_add_del(emac, addr, 0, other_port_mask, true);
+ icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true);
+ }
+
+ return 0;
+}
+
/**
* emac_ndo_open - EMAC device open
* @ndev: network adapter device
@@ -1329,10 +498,9 @@ static int emac_ndo_open(struct net_device *ndev)
ether_addr_copy(emac->mac_addr, ndev->dev_addr);
icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
+ icssg_class_default(prueth->miig_rt, slice, 0, false);
icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
- icssg_class_default(prueth->miig_rt, slice, 0);
-
/* Notify the stack of the actual queue counts. */
ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
if (ret) {
@@ -1462,6 +630,8 @@ static int emac_ndo_stop(struct net_device *ndev)
icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
+ __dev_mc_unsync(ndev, icssg_prueth_del_mcast);
+
atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
/* ensure new tdown_cnt value is visible */
smp_mb__after_atomic();
@@ -1476,8 +646,10 @@ static int emac_ndo_stop(struct net_device *ndev)
netdev_err(ndev, "tx teardown timeout\n");
prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
- for (i = 0; i < emac->tx_ch_num; i++)
+ for (i = 0; i < emac->tx_ch_num; i++) {
napi_disable(&emac->tx_chns[i].napi_tx);
+ hrtimer_cancel(&emac->tx_chns[i].tx_hrtimer);
+ }
max_rx_flows = PRUETH_MAX_RX_FLOWS;
k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
@@ -1485,6 +657,7 @@ static int emac_ndo_stop(struct net_device *ndev)
prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
napi_disable(&emac->napi_rx);
+ hrtimer_cancel(&emac->rx_hrtimer);
cancel_work_sync(&emac->rx_mode_work);
@@ -1510,11 +683,6 @@ static int emac_ndo_stop(struct net_device *ndev)
return 0;
}
-static void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
-{
- ndev->stats.tx_errors++;
-}
-
static void emac_ndo_set_rx_mode_work(struct work_struct *work)
{
struct prueth_emac *emac = container_of(work, struct prueth_emac, rx_mode_work);
@@ -1526,24 +694,21 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work)
promisc = ndev->flags & IFF_PROMISC;
allmulti = ndev->flags & IFF_ALLMULTI;
- emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
- emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE);
if (promisc) {
- emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
- emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
return;
}
if (allmulti) {
- emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
return;
}
- if (!netdev_mc_empty(ndev)) {
- emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE);
- return;
- }
+ __dev_mc_sync(ndev, icssg_prueth_add_mcast, icssg_prueth_del_mcast);
}
/**
@@ -1560,165 +725,19 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev)
queue_work(emac->cmd_wq, &emac->rx_mode_work);
}
-static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr)
-{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct hwtstamp_config config;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- emac->tx_ts_enabled = 0;
- break;
- case HWTSTAMP_TX_ON:
- emac->tx_ts_enabled = 1;
- break;
- default:
- return -ERANGE;
- }
-
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- emac->rx_ts_enabled = 0;
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
- emac->rx_ts_enabled = 1;
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- return -ERANGE;
- }
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr)
-{
- struct prueth_emac *emac = netdev_priv(ndev);
- struct hwtstamp_config config;
-
- config.flags = 0;
- config.tx_type = emac->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
- config.rx_filter = emac->rx_ts_enabled ? HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
-
- return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
- -EFAULT : 0;
-}
-
-static int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return emac_get_ts_config(ndev, ifr);
- case SIOCSHWTSTAMP:
- return emac_set_ts_config(ndev, ifr);
- default:
- break;
- }
-
- return phy_do_ioctl(ndev, ifr, cmd);
-}
-
-static void emac_ndo_get_stats64(struct net_device *ndev,
- struct rtnl_link_stats64 *stats)
-{
- struct prueth_emac *emac = netdev_priv(ndev);
-
- emac_update_hardware_stats(emac);
-
- stats->rx_packets = emac_get_stat_by_name(emac, "rx_packets");
- stats->rx_bytes = emac_get_stat_by_name(emac, "rx_bytes");
- stats->tx_packets = emac_get_stat_by_name(emac, "tx_packets");
- stats->tx_bytes = emac_get_stat_by_name(emac, "tx_bytes");
- stats->rx_crc_errors = emac_get_stat_by_name(emac, "rx_crc_errors");
- stats->rx_over_errors = emac_get_stat_by_name(emac, "rx_over_errors");
- stats->multicast = emac_get_stat_by_name(emac, "rx_multicast_frames");
-
- stats->rx_errors = ndev->stats.rx_errors;
- stats->rx_dropped = ndev->stats.rx_dropped;
- stats->tx_errors = ndev->stats.tx_errors;
- stats->tx_dropped = ndev->stats.tx_dropped;
-}
-
-static int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name,
- size_t len)
-{
- struct prueth_emac *emac = netdev_priv(ndev);
- int ret;
-
- ret = snprintf(name, len, "p%d", emac->port_id);
- if (ret >= len)
- return -EINVAL;
-
- return 0;
-}
-
static const struct net_device_ops emac_netdev_ops = {
.ndo_open = emac_ndo_open,
.ndo_stop = emac_ndo_stop,
- .ndo_start_xmit = emac_ndo_start_xmit,
+ .ndo_start_xmit = icssg_ndo_start_xmit,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
- .ndo_tx_timeout = emac_ndo_tx_timeout,
+ .ndo_tx_timeout = icssg_ndo_tx_timeout,
.ndo_set_rx_mode = emac_ndo_set_rx_mode,
- .ndo_eth_ioctl = emac_ndo_ioctl,
- .ndo_get_stats64 = emac_ndo_get_stats64,
- .ndo_get_phys_port_name = emac_ndo_get_phys_port_name,
+ .ndo_eth_ioctl = icssg_ndo_ioctl,
+ .ndo_get_stats64 = icssg_ndo_get_stats64,
+ .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
};
-/* get emac_port corresponding to eth_node name */
-static int prueth_node_port(struct device_node *eth_node)
-{
- u32 port_id;
- int ret;
-
- ret = of_property_read_u32(eth_node, "reg", &port_id);
- if (ret)
- return ret;
-
- if (port_id == 0)
- return PRUETH_PORT_MII0;
- else if (port_id == 1)
- return PRUETH_PORT_MII1;
- else
- return PRUETH_PORT_INVALID;
-}
-
-/* get MAC instance corresponding to eth_node name */
-static int prueth_node_mac(struct device_node *eth_node)
-{
- u32 port_id;
- int ret;
-
- ret = of_property_read_u32(eth_node, "reg", &port_id);
- if (ret)
- return ret;
-
- if (port_id == 0)
- return PRUETH_MAC0;
- else if (port_id == 1)
- return PRUETH_MAC1;
- else
- return PRUETH_MAC_INVALID;
-}
-
static int prueth_netdev_init(struct prueth *prueth,
struct device_node *eth_node)
{
@@ -1752,7 +771,7 @@ static int prueth_netdev_init(struct prueth *prueth,
}
INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work);
- INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler);
+ INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
ret = pruss_request_mem_region(prueth->pruss,
port == PRUETH_PORT_MII0 ?
@@ -1845,7 +864,10 @@ static int prueth_netdev_init(struct prueth *prueth,
ndev->hw_features = NETIF_F_SG;
ndev->features = ndev->hw_features;
- netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll);
+ netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
+ hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL_PINNED);
+ emac->rx_hrtimer.function = &emac_rx_timer_callback;
prueth->emac[mac] = emac;
return 0;
@@ -1862,88 +884,212 @@ free_ndev:
return ret;
}
-static void prueth_netdev_exit(struct prueth *prueth,
- struct device_node *eth_node)
+bool prueth_dev_check(const struct net_device *ndev)
{
- struct prueth_emac *emac;
- enum prueth_mac mac;
+ if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) {
+ struct prueth_emac *emac = netdev_priv(ndev);
- mac = prueth_node_mac(eth_node);
- if (mac == PRUETH_MAC_INVALID)
- return;
+ return emac->prueth->is_switch_mode;
+ }
- emac = prueth->emac[mac];
- if (!emac)
- return;
+ return false;
+}
- if (of_phy_is_fixed_link(emac->phy_node))
- of_phy_deregister_fixed_link(emac->phy_node);
+static void prueth_offload_fwd_mark_update(struct prueth *prueth)
+{
+ int set_val = 0;
+ int i;
- netif_napi_del(&emac->napi_rx);
+ if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1)))
+ set_val = 1;
- pruss_release_mem_region(prueth->pruss, &emac->dram);
- destroy_workqueue(emac->cmd_wq);
- free_netdev(emac->ndev);
- prueth->emac[mac] = NULL;
+ dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val);
+
+ for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) {
+ struct prueth_emac *emac = prueth->emac[i];
+
+ if (!emac || !emac->ndev)
+ continue;
+
+ emac->offload_fwd_mark = set_val;
+ }
}
-static int prueth_get_cores(struct prueth *prueth, int slice)
+static void prueth_emac_restart(struct prueth *prueth)
{
- struct device *dev = prueth->dev;
- enum pruss_pru_id pruss_id;
- struct device_node *np;
- int idx = -1, ret;
+ struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0];
+ struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1];
- np = dev->of_node;
+ /* Detach the net_device for both PRUeth ports*/
+ if (netif_running(emac0->ndev))
+ netif_device_detach(emac0->ndev);
+ if (netif_running(emac1->ndev))
+ netif_device_detach(emac1->ndev);
- switch (slice) {
- case ICSS_SLICE0:
- idx = 0;
- break;
- case ICSS_SLICE1:
- idx = 3;
- break;
- default:
- return -EINVAL;
+ /* Disable both PRUeth ports */
+ icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE);
+ icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE);
+
+ /* Stop both pru cores for both PRUeth ports*/
+ prueth_emac_stop(emac0);
+ prueth->emacs_initialized--;
+ prueth_emac_stop(emac1);
+ prueth->emacs_initialized--;
+
+ /* Start both pru cores for both PRUeth ports */
+ prueth_emac_start(prueth, emac0);
+ prueth->emacs_initialized++;
+ prueth_emac_start(prueth, emac1);
+ prueth->emacs_initialized++;
+
+ /* Enable forwarding for both PRUeth ports */
+ icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD);
+ icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD);
+
+ /* Attache net_device for both PRUeth ports */
+ netif_device_attach(emac0->ndev);
+ netif_device_attach(emac1->ndev);
+}
+
+static void icssg_enable_switch_mode(struct prueth *prueth)
+{
+ struct prueth_emac *emac;
+ int mac;
+
+ prueth_emac_restart(prueth);
+
+ for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) {
+ emac = prueth->emac[mac];
+ if (netif_running(emac->ndev)) {
+ icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan,
+ ICSSG_FDB_ENTRY_P0_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P1_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_P2_MEMBERSHIP |
+ ICSSG_FDB_ENTRY_BLOCK,
+ true);
+ icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID,
+ BIT(emac->port_id) | DEFAULT_PORT_MASK,
+ BIT(emac->port_id) | DEFAULT_UNTAG_MASK,
+ true);
+ icssg_set_pvid(prueth, emac->port_vlan, emac->port_id);
+ icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE);
+ }
}
+}
- prueth->pru[slice] = pru_rproc_get(np, idx, &pruss_id);
- if (IS_ERR(prueth->pru[slice])) {
- ret = PTR_ERR(prueth->pru[slice]);
- prueth->pru[slice] = NULL;
- return dev_err_probe(dev, ret, "unable to get PRU%d\n", slice);
+static int prueth_netdevice_port_link(struct net_device *ndev,
+ struct net_device *br_ndev,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+ int err;
+
+ if (!prueth->br_members) {
+ prueth->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ if (prueth->hw_bridge_dev != br_ndev)
+ return -EOPNOTSUPP;
+ }
+
+ err = switchdev_bridge_port_offload(br_ndev, ndev, emac,
+ &prueth->prueth_switchdev_nb,
+ &prueth->prueth_switchdev_bl_nb,
+ false, extack);
+ if (err)
+ return err;
+
+ prueth->br_members |= BIT(emac->port_id);
+
+ if (!prueth->is_switch_mode) {
+ if (prueth->br_members & BIT(PRUETH_PORT_MII0) &&
+ prueth->br_members & BIT(PRUETH_PORT_MII1)) {
+ prueth->is_switch_mode = true;
+ prueth->default_vlan = 1;
+ emac->port_vlan = prueth->default_vlan;
+ icssg_enable_switch_mode(prueth);
+ }
}
- prueth->pru_id[slice] = pruss_id;
- idx++;
- prueth->rtu[slice] = pru_rproc_get(np, idx, NULL);
- if (IS_ERR(prueth->rtu[slice])) {
- ret = PTR_ERR(prueth->rtu[slice]);
- prueth->rtu[slice] = NULL;
- return dev_err_probe(dev, ret, "unable to get RTU%d\n", slice);
+ prueth_offload_fwd_mark_update(prueth);
+
+ return NOTIFY_DONE;
+}
+
+static void prueth_netdevice_port_unlink(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct prueth *prueth = emac->prueth;
+
+ prueth->br_members &= ~BIT(emac->port_id);
+
+ if (prueth->is_switch_mode) {
+ prueth->is_switch_mode = false;
+ emac->port_vlan = 0;
+ prueth_emac_restart(prueth);
}
- idx++;
- prueth->txpru[slice] = pru_rproc_get(np, idx, NULL);
- if (IS_ERR(prueth->txpru[slice])) {
- ret = PTR_ERR(prueth->txpru[slice]);
- prueth->txpru[slice] = NULL;
- return dev_err_probe(dev, ret, "unable to get TX_PRU%d\n", slice);
+ prueth_offload_fwd_mark_update(prueth);
+
+ if (!prueth->br_members)
+ prueth->hw_bridge_dev = NULL;
+}
+
+/* netdev notifier */
+static int prueth_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (ndev->netdev_ops != &emac_netdev_ops)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack);
+ else
+ prueth_netdevice_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
}
- return 0;
+ return notifier_from_errno(ret);
}
-static void prueth_put_cores(struct prueth *prueth, int slice)
+static int prueth_register_notifiers(struct prueth *prueth)
{
- if (prueth->txpru[slice])
- pru_rproc_put(prueth->txpru[slice]);
+ int ret = 0;
- if (prueth->rtu[slice])
- pru_rproc_put(prueth->rtu[slice]);
+ prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event;
+ ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb);
+ if (ret) {
+ dev_err(prueth->dev, "can't register netdevice notifier\n");
+ return ret;
+ }
+
+ ret = prueth_switchdev_register_notifiers(prueth);
+ if (ret)
+ unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
- if (prueth->pru[slice])
- pru_rproc_put(prueth->pru[slice]);
+ return ret;
+}
+
+static void prueth_unregister_notifiers(struct prueth *prueth)
+{
+ prueth_switchdev_unregister_notifiers(prueth);
+ unregister_netdevice_notifier(&prueth->prueth_netdevice_nb);
}
static int prueth_probe(struct platform_device *pdev)
@@ -2036,13 +1182,13 @@ static int prueth_probe(struct platform_device *pdev)
}
if (eth0_node) {
- ret = prueth_get_cores(prueth, ICSS_SLICE0);
+ ret = prueth_get_cores(prueth, ICSS_SLICE0, false);
if (ret)
goto put_cores;
}
if (eth1_node) {
- ret = prueth_get_cores(prueth, ICSS_SLICE1);
+ ret = prueth_get_cores(prueth, ICSS_SLICE1, false);
if (ret)
goto put_cores;
}
@@ -2073,6 +1219,9 @@ static int prueth_probe(struct platform_device *pdev)
}
msmc_ram_size = MSMC_RAM_SIZE;
+ prueth->is_switchmode_supported = prueth->pdata.switch_mode;
+ if (prueth->is_switchmode_supported)
+ msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE;
/* NOTE: FW bug needs buffer base to be 64KB aligned */
prueth->msmcram.va =
@@ -2152,7 +1301,12 @@ static int prueth_probe(struct platform_device *pdev)
prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
- emac_phy_connect(prueth->emac[PRUETH_MAC0]);
+ ret = emac_phy_connect(prueth->emac[PRUETH_MAC0]);
+ if (ret) {
+ dev_err(dev,
+ "can't connect to MII0 PHY, error -%d", ret);
+ goto netdev_unregister;
+ }
phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
}
@@ -2164,10 +1318,23 @@ static int prueth_probe(struct platform_device *pdev)
}
prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
- emac_phy_connect(prueth->emac[PRUETH_MAC1]);
+ ret = emac_phy_connect(prueth->emac[PRUETH_MAC1]);
+ if (ret) {
+ dev_err(dev,
+ "can't connect to MII1 PHY, error %d", ret);
+ goto netdev_unregister;
+ }
phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
}
+ if (prueth->is_switchmode_supported) {
+ ret = prueth_register_notifiers(prueth);
+ if (ret)
+ goto netdev_unregister;
+
+ sprintf(prueth->switch_id, "%s", dev_name(dev));
+ }
+
dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n",
(!eth0_node || !eth1_node) ? "single" : "dual");
@@ -2237,6 +1404,8 @@ static void prueth_remove(struct platform_device *pdev)
struct device_node *eth_node;
int i;
+ prueth_unregister_notifiers(prueth);
+
for (i = 0; i < PRUETH_NUM_MACS; i++) {
if (!prueth->registered_netdevs[i])
continue;
@@ -2275,69 +1444,15 @@ static void prueth_remove(struct platform_device *pdev)
prueth_put_cores(prueth, ICSS_SLICE0);
}
-#ifdef CONFIG_PM_SLEEP
-static int prueth_suspend(struct device *dev)
-{
- struct prueth *prueth = dev_get_drvdata(dev);
- struct net_device *ndev;
- int i, ret;
-
- for (i = 0; i < PRUETH_NUM_MACS; i++) {
- ndev = prueth->registered_netdevs[i];
-
- if (!ndev)
- continue;
-
- if (netif_running(ndev)) {
- netif_device_detach(ndev);
- ret = emac_ndo_stop(ndev);
- if (ret < 0) {
- netdev_err(ndev, "failed to stop: %d", ret);
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-static int prueth_resume(struct device *dev)
-{
- struct prueth *prueth = dev_get_drvdata(dev);
- struct net_device *ndev;
- int i, ret;
-
- for (i = 0; i < PRUETH_NUM_MACS; i++) {
- ndev = prueth->registered_netdevs[i];
-
- if (!ndev)
- continue;
-
- if (netif_running(ndev)) {
- ret = emac_ndo_open(ndev);
- if (ret < 0) {
- netdev_err(ndev, "failed to start: %d", ret);
- return ret;
- }
- netif_device_attach(ndev);
- }
- }
-
- return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static const struct dev_pm_ops prueth_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume)
-};
-
static const struct prueth_pdata am654_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
.quirk_10m_link_issue = 1,
+ .switch_mode = 1,
};
static const struct prueth_pdata am64x_icssg_pdata = {
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
+ .switch_mode = 1,
};
static const struct of_device_id prueth_dt_match[] = {
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
index 8b6d6b497010..f678d656a3ed 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h
@@ -55,6 +55,8 @@
#define ICSSG_NUM_STANDARD_STATS 31
#define ICSSG_NUM_ETHTOOL_STATS (ICSSG_NUM_STATS - ICSSG_NUM_STANDARD_STATS)
+#define IEP_DEFAULT_CYCLE_TIME_NS 1000000 /* 1 ms */
+
/* Firmware status codes */
#define ICSS_HS_FW_READY 0x55555555
#define ICSS_HS_FW_DEAD 0xDEAD0000 /* lower 16 bits contain error code */
@@ -106,6 +108,8 @@ struct prueth_tx_chn {
u32 descs_num;
unsigned int irq;
char name[32];
+ struct hrtimer tx_hrtimer;
+ unsigned long tx_pace_timeout_ns;
};
struct prueth_rx_chn {
@@ -125,8 +129,12 @@ struct prueth_rx_chn {
#define PRUETH_MAX_TX_TS_REQUESTS 50 /* Max simultaneous TX_TS requests */
+/* Minimum coalesce time in usecs for both Tx and Rx */
+#define ICSSG_MIN_COALESCE_USECS 20
+
/* data for each emac port */
struct prueth_emac {
+ bool is_sr1;
bool fw_running;
struct prueth *prueth;
struct net_device *ndev;
@@ -155,6 +163,10 @@ struct prueth_emac {
int rx_flow_id_base;
int tx_ch_num;
+ /* SR1.0 Management channel */
+ struct prueth_rx_chn rx_mgm_chn;
+ int rx_mgm_flow_id_base;
+
spinlock_t lock; /* serialize access */
/* TX HW Timestamping */
@@ -165,7 +177,7 @@ struct prueth_emac {
u8 cmd_seq;
/* shutdown related */
- u32 cmd_data[4];
+ __le32 cmd_data[4];
struct completion cmd_complete;
/* Mutex to serialize access to firmware command interface */
struct mutex cmd_lock;
@@ -174,18 +186,33 @@ struct prueth_emac {
struct pruss_mem_region dram;
+ bool offload_fwd_mark;
+ int port_vlan;
+
struct delayed_work stats_work;
u64 stats[ICSSG_NUM_STATS];
+
+ /* RX IRQ Coalescing Related */
+ struct hrtimer rx_hrtimer;
+ unsigned long rx_pace_timeout_ns;
};
/**
* struct prueth_pdata - PRUeth platform data
* @fdqring_mode: Free desc queue mode
* @quirk_10m_link_issue: 10M link detect errata
+ * @switch_mode: switch firmware support
*/
struct prueth_pdata {
enum k3_ring_mode fdqring_mode;
u32 quirk_10m_link_issue:1;
+ u32 switch_mode:1;
+};
+
+struct icssg_firmwares {
+ char *pru;
+ char *rtu;
+ char *txpru;
};
/**
@@ -210,6 +237,16 @@ struct prueth_pdata {
* @emacs_initialized: num of EMACs/ext ports that are up/running
* @iep0: pointer to IEP0 device
* @iep1: pointer to IEP1 device
+ * @vlan_tbl: VLAN-FID table pointer
+ * @hw_bridge_dev: pointer to HW bridge net device
+ * @br_members: bitmask of bridge member ports
+ * @prueth_netdevice_nb: netdevice notifier block
+ * @prueth_switchdev_nb: switchdev notifier block
+ * @prueth_switchdev_bl_nb: switchdev blocking notifier block
+ * @is_switch_mode: flag to indicate if device is in Switch mode
+ * @is_switchmode_supported: indicates platform support for switch mode
+ * @switch_id: ID for mapping switch ports to bridge
+ * @default_vlan: Default VLAN for host
*/
struct prueth {
struct device *dev;
@@ -234,6 +271,17 @@ struct prueth {
int emacs_initialized;
struct icss_iep *iep0;
struct icss_iep *iep1;
+ struct prueth_vlan_tbl *vlan_tbl;
+
+ struct net_device *hw_bridge_dev;
+ u8 br_members;
+ struct notifier_block prueth_netdevice_nb;
+ struct notifier_block prueth_switchdev_nb;
+ struct notifier_block prueth_switchdev_bl_nb;
+ bool is_switch_mode;
+ bool is_switchmode_supported;
+ unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN];
+ int default_vlan;
};
struct emac_tx_ts_response {
@@ -243,6 +291,13 @@ struct emac_tx_ts_response {
u32 hi_ts;
};
+struct emac_tx_ts_response_sr1 {
+ __le32 lo_ts;
+ __le32 hi_ts;
+ __le32 reserved;
+ __le32 cookie;
+};
+
/* get PRUSS SLICE number from prueth_emac */
static inline int prueth_emac_slice(struct prueth_emac *emac)
{
@@ -257,20 +312,25 @@ static inline int prueth_emac_slice(struct prueth_emac *emac)
}
extern const struct ethtool_ops icssg_ethtool_ops;
+extern const struct dev_pm_ops prueth_dev_pm_ops;
/* Classifier helpers */
void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac);
void icssg_class_set_host_mac_addr(struct regmap *miig_rt, const u8 *mac);
void icssg_class_disable(struct regmap *miig_rt, int slice);
-void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti);
+void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti,
+ bool is_sr1);
+void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice);
+void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice,
+ struct net_device *ndev);
void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr);
/* config helpers */
void icssg_config_ipg(struct prueth_emac *emac);
int icssg_config(struct prueth *prueth, struct prueth_emac *emac,
int slice);
-int emac_set_port_state(struct prueth_emac *emac,
- enum icssg_port_state_cmd state);
+int icssg_set_port_state(struct prueth_emac *emac,
+ enum icssg_port_state_cmd state);
void icssg_config_set_speed(struct prueth_emac *emac);
void icssg_config_half_duplex(struct prueth_emac *emac);
@@ -279,10 +339,70 @@ int icssg_queue_pop(struct prueth *prueth, u8 queue);
void icssg_queue_push(struct prueth *prueth, int queue, u16 addr);
u32 icssg_queue_level(struct prueth *prueth, int queue);
+int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd,
+ struct mgmt_cmd_rsp *rsp);
+int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid, u8 fid_c2, bool add);
+int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr,
+ u8 vid);
+void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask,
+ u8 untag_mask, bool add);
+u16 icssg_get_pvid(struct prueth_emac *emac);
+void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port);
#define prueth_napi_to_tx_chn(pnapi) \
container_of(pnapi, struct prueth_tx_chn, napi_tx)
-void emac_stats_work_handler(struct work_struct *work);
+void icssg_stats_work_handler(struct work_struct *work);
void emac_update_hardware_stats(struct prueth_emac *emac);
int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name);
+
+/* Common functions */
+void prueth_cleanup_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ int max_rflows);
+void prueth_cleanup_tx_chns(struct prueth_emac *emac);
+void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num);
+void prueth_xmit_free(struct prueth_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc);
+int emac_tx_complete_packets(struct prueth_emac *emac, int chn,
+ int budget, bool *tdown);
+int prueth_ndev_add_tx_napi(struct prueth_emac *emac);
+int prueth_init_tx_chns(struct prueth_emac *emac);
+int prueth_init_rx_chns(struct prueth_emac *emac,
+ struct prueth_rx_chn *rx_chn,
+ char *name, u32 max_rflows,
+ u32 max_desc_num);
+int prueth_dma_rx_push(struct prueth_emac *emac,
+ struct sk_buff *skb,
+ struct prueth_rx_chn *rx_chn);
+void emac_rx_timestamp(struct prueth_emac *emac,
+ struct sk_buff *skb, u32 *psdata);
+enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+irqreturn_t prueth_rx_irq(int irq, void *dev_id);
+void prueth_emac_stop(struct prueth_emac *emac);
+void prueth_cleanup_tx_ts(struct prueth_emac *emac);
+int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget);
+int prueth_prepare_rx_chan(struct prueth_emac *emac,
+ struct prueth_rx_chn *chn,
+ int buf_size);
+void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num,
+ bool free_skb);
+void prueth_reset_rx_chan(struct prueth_rx_chn *chn,
+ int num_flows, bool disable);
+void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
+int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd);
+void icssg_ndo_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats);
+int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len);
+int prueth_node_port(struct device_node *eth_node);
+int prueth_node_mac(struct device_node *eth_node);
+void prueth_netdev_exit(struct prueth *prueth,
+ struct device_node *eth_node);
+int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1);
+void prueth_put_cores(struct prueth *prueth, int slice);
+
+/* Revision specific helper */
+u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns);
+
#endif /* __NET_TI_ICSSG_PRUETH_H */
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
new file mode 100644
index 000000000000..e180c1166170
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c
@@ -0,0 +1,1230 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments ICSSG SR1.0 Ethernet Driver
+ *
+ * Copyright (C) 2018-2022 Texas Instruments Incorporated - https://www.ti.com/
+ * Copyright (c) Siemens AG, 2024
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/genalloc.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/phy.h>
+#include <linux/remoteproc/pruss.h>
+#include <linux/pruss_driver.h>
+
+#include "icssg_prueth.h"
+#include "icssg_mii_rt.h"
+#include "../k3-cppi-desc-pool.h"
+
+#define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG SR1.0 Ethernet driver"
+
+/* SR1: Set buffer sizes for the pools. There are 8 internal queues
+ * implemented in firmware, but only 4 tx channels/threads in the Egress
+ * direction to firmware. Need a high priority queue for management
+ * messages since they shouldn't be blocked even during high traffic
+ * situation. So use Q0-Q2 as data queues and Q3 as management queue
+ * in the max case. However for ease of configuration, use the max
+ * data queue + 1 for management message if we are not using max
+ * case.
+ *
+ * Allocate 4 MTU buffers per data queue. Firmware requires
+ * pool sizes to be set for internal queues. Set the upper 5 queue
+ * pool size to min size of 128 bytes since there are only 3 tx
+ * data channels and management queue requires only minimum buffer.
+ * i.e lower queues are used by driver and highest priority queue
+ * from that is used for management message.
+ */
+
+static int emac_egress_buf_pool_size[] = {
+ PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_SIZE_SR1,
+ PRUETH_EMAC_BUF_POOL_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
+ PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1,
+ PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1, PRUETH_EMAC_BUF_POOL_MIN_SIZE_SR1
+};
+
+static void icssg_config_sr1(struct prueth *prueth, struct prueth_emac *emac,
+ int slice)
+{
+ struct icssg_sr1_config config;
+ void __iomem *va;
+ int i, index;
+
+ memset(&config, 0, sizeof(config));
+ config.addr_lo = cpu_to_le32(lower_32_bits(prueth->msmcram.pa));
+ config.addr_hi = cpu_to_le32(upper_32_bits(prueth->msmcram.pa));
+ config.rx_flow_id = cpu_to_le32(emac->rx_flow_id_base); /* flow id for host port */
+ config.rx_mgr_flow_id = cpu_to_le32(emac->rx_mgm_flow_id_base); /* for mgm ch */
+ config.rand_seed = cpu_to_le32(get_random_u32());
+
+ for (i = PRUETH_EMAC_BUF_POOL_START_SR1; i < PRUETH_NUM_BUF_POOLS_SR1; i++) {
+ index = i - PRUETH_EMAC_BUF_POOL_START_SR1;
+ config.tx_buf_sz[i] = cpu_to_le32(emac_egress_buf_pool_size[index]);
+ }
+
+ va = prueth->shram.va + slice * ICSSG_CONFIG_OFFSET_SLICE1;
+ memcpy_toio(va, &config, sizeof(config));
+
+ emac->speed = SPEED_1000;
+ emac->duplex = DUPLEX_FULL;
+}
+
+static int emac_send_command_sr1(struct prueth_emac *emac, u32 cmd)
+{
+ struct cppi5_host_desc_t *first_desc;
+ u32 pkt_len = sizeof(emac->cmd_data);
+ __le32 *data = emac->cmd_data;
+ dma_addr_t desc_dma, buf_dma;
+ struct prueth_tx_chn *tx_chn;
+ void **swdata;
+ int ret = 0;
+ u32 *epib;
+
+ netdev_dbg(emac->ndev, "Sending cmd %x\n", cmd);
+
+ /* only one command at a time allowed to firmware */
+ mutex_lock(&emac->cmd_lock);
+ data[0] = cpu_to_le32(cmd);
+
+ /* highest priority channel for management messages */
+ tx_chn = &emac->tx_chns[emac->tx_ch_num - 1];
+
+ /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, data, pkt_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_chn->dma_dev, buf_dma)) {
+ netdev_err(emac->ndev, "cmd %x: failed to map cmd buffer\n", cmd);
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ netdev_err(emac->ndev, "cmd %x: failed to allocate descriptor\n", cmd);
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len, DMA_TO_DEVICE);
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ PRUETH_NAV_PS_DATA_SIZE);
+ cppi5_hdesc_set_pkttype(first_desc, PRUETH_PKT_TYPE_CMD);
+ epib = first_desc->epib;
+ epib[0] = 0;
+ epib[1] = 0;
+
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ *swdata = data;
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+
+ /* send command */
+ reinit_completion(&emac->cmd_complete);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ if (ret) {
+ netdev_err(emac->ndev, "cmd %x: push failed: %d\n", cmd, ret);
+ goto free_desc;
+ }
+ ret = wait_for_completion_timeout(&emac->cmd_complete, msecs_to_jiffies(100));
+ if (!ret)
+ netdev_err(emac->ndev, "cmd %x: completion timeout\n", cmd);
+
+ mutex_unlock(&emac->cmd_lock);
+
+ return ret;
+free_desc:
+ prueth_xmit_free(tx_chn, first_desc);
+err_unlock:
+ mutex_unlock(&emac->cmd_lock);
+
+ return ret;
+}
+
+static void icssg_config_set_speed_sr1(struct prueth_emac *emac)
+{
+ u32 cmd = ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1, val;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+
+ val = icssg_rgmii_get_speed(prueth->miig_rt, slice);
+ /* firmware expects speed settings in bit 2-1 */
+ val <<= 1;
+ cmd |= val;
+
+ val = icssg_rgmii_get_fullduplex(prueth->miig_rt, slice);
+ /* firmware expects full duplex settings in bit 3 */
+ val <<= 3;
+ cmd |= val;
+
+ emac_send_command_sr1(emac, cmd);
+}
+
+/* called back by PHY layer if there is change in link state of hw port*/
+static void emac_adjust_link_sr1(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ struct prueth *prueth = emac->prueth;
+ bool new_state = false;
+ unsigned long flags;
+
+ if (phydev->link) {
+ /* check the mode of operation - full/half duplex */
+ if (phydev->duplex != emac->duplex) {
+ new_state = true;
+ emac->duplex = phydev->duplex;
+ }
+ if (phydev->speed != emac->speed) {
+ new_state = true;
+ emac->speed = phydev->speed;
+ }
+ if (!emac->link) {
+ new_state = true;
+ emac->link = 1;
+ }
+ } else if (emac->link) {
+ new_state = true;
+ emac->link = 0;
+
+ /* f/w should support 100 & 1000 */
+ emac->speed = SPEED_1000;
+
+ /* half duplex may not be supported by f/w */
+ emac->duplex = DUPLEX_FULL;
+ }
+
+ if (new_state) {
+ phy_print_status(phydev);
+
+ /* update RGMII and MII configuration based on PHY negotiated
+ * values
+ */
+ if (emac->link) {
+ /* Set the RGMII cfg for gig en and full duplex */
+ icssg_update_rgmii_cfg(prueth->miig_rt, emac);
+
+ /* update the Tx IPG based on 100M/1G speed */
+ spin_lock_irqsave(&emac->lock, flags);
+ icssg_config_ipg(emac);
+ spin_unlock_irqrestore(&emac->lock, flags);
+ icssg_config_set_speed_sr1(emac);
+ }
+ }
+
+ if (emac->link) {
+ /* reactivate the transmit queue */
+ netif_tx_wake_all_queues(ndev);
+ } else {
+ netif_tx_stop_all_queues(ndev);
+ prueth_cleanup_tx_ts(emac);
+ }
+}
+
+static int emac_phy_connect(struct prueth_emac *emac)
+{
+ struct prueth *prueth = emac->prueth;
+ struct net_device *ndev = emac->ndev;
+ /* connect PHY */
+ ndev->phydev = of_phy_connect(emac->ndev, emac->phy_node,
+ &emac_adjust_link_sr1, 0,
+ emac->phy_if);
+ if (!ndev->phydev) {
+ dev_err(prueth->dev, "couldn't connect to phy %s\n",
+ emac->phy_node->full_name);
+ return -ENODEV;
+ }
+
+ if (!emac->half_duplex) {
+ dev_dbg(prueth->dev, "half duplex mode is not supported\n");
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ }
+
+ /* Remove 100Mbits half-duplex due to RGMII misreporting connection
+ * as full duplex */
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+
+ /* remove unsupported modes */
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(ndev->phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+
+ if (emac->phy_if == PHY_INTERFACE_MODE_MII)
+ phy_set_max_speed(ndev->phydev, SPEED_100);
+
+ return 0;
+}
+
+/* get one packet from requested flow_id
+ *
+ * Returns skb pointer if packet found else NULL
+ * Caller must free the returned skb.
+ */
+static struct sk_buff *prueth_process_rx_mgm(struct prueth_emac *emac,
+ u32 flow_id)
+{
+ struct prueth_rx_chn *rx_chn = &emac->rx_mgm_chn;
+ struct net_device *ndev = emac->ndev;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb, *new_skb;
+ dma_addr_t desc_dma, buf_dma;
+ u32 buf_dma_len, pkt_len;
+ void **swdata;
+ int ret;
+
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_id, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ netdev_err(ndev, "rx mgm pop: failed: %d\n", ret);
+ return NULL;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) /* Teardown */
+ return NULL;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+
+ /* Fix FW bug about incorrect PSDATA size */
+ if (cppi5_hdesc_get_psdata_size(desc_rx) != PRUETH_NAV_PS_DATA_SIZE) {
+ cppi5_hdesc_update_psdata_size(desc_rx,
+ PRUETH_NAV_PS_DATA_SIZE);
+ }
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ new_skb = netdev_alloc_skb_ip_align(ndev, PRUETH_MAX_PKT_SIZE);
+ /* if allocation fails we drop the packet but push the
+ * descriptor back to the ring with old skb to prevent a stall
+ */
+ if (!new_skb) {
+ netdev_err(ndev,
+ "skb alloc failed, dropped mgm pkt from flow %d\n",
+ flow_id);
+ new_skb = skb;
+ skb = NULL; /* return NULL */
+ } else {
+ /* return the filled skb */
+ skb_put(skb, pkt_len);
+ }
+
+ /* queue another DMA */
+ ret = prueth_dma_rx_push(emac, new_skb, &emac->rx_mgm_chn);
+ if (WARN_ON(ret < 0))
+ dev_kfree_skb_any(new_skb);
+
+ return skb;
+}
+
+static void prueth_tx_ts_sr1(struct prueth_emac *emac,
+ struct emac_tx_ts_response_sr1 *tsr)
+{
+ struct skb_shared_hwtstamps ssh;
+ u32 hi_ts, lo_ts, cookie;
+ struct sk_buff *skb;
+ u64 ns;
+
+ hi_ts = le32_to_cpu(tsr->hi_ts);
+ lo_ts = le32_to_cpu(tsr->lo_ts);
+
+ ns = (u64)hi_ts << 32 | lo_ts;
+
+ cookie = le32_to_cpu(tsr->cookie);
+ if (cookie >= PRUETH_MAX_TX_TS_REQUESTS) {
+ netdev_dbg(emac->ndev, "Invalid TX TS cookie 0x%x\n",
+ cookie);
+ return;
+ }
+
+ skb = emac->tx_ts_skb[cookie];
+ emac->tx_ts_skb[cookie] = NULL; /* free slot */
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+
+ skb_tstamp_tx(skb, &ssh);
+ dev_consume_skb_any(skb);
+}
+
+static irqreturn_t prueth_rx_mgm_ts_thread_sr1(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+ struct sk_buff *skb;
+
+ skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1);
+ if (!skb)
+ return IRQ_NONE;
+
+ prueth_tx_ts_sr1(emac, (void *)skb->data);
+ dev_kfree_skb_any(skb);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t prueth_rx_mgm_rsp_thread(int irq, void *dev_id)
+{
+ struct prueth_emac *emac = dev_id;
+ struct sk_buff *skb;
+ u32 rsp;
+
+ skb = prueth_process_rx_mgm(emac, PRUETH_RX_MGM_FLOW_RESPONSE_SR1);
+ if (!skb)
+ return IRQ_NONE;
+
+ /* Process command response */
+ rsp = le32_to_cpu(*(__le32 *)skb->data) & 0xffff0000;
+ if (rsp == ICSSG_SHUTDOWN_CMD_SR1) {
+ netdev_dbg(emac->ndev, "f/w Shutdown cmd resp %x\n", rsp);
+ complete(&emac->cmd_complete);
+ } else if (rsp == ICSSG_PSTATE_SPEED_DUPLEX_CMD_SR1) {
+ netdev_dbg(emac->ndev, "f/w Speed/Duplex cmd rsp %x\n", rsp);
+ complete(&emac->cmd_complete);
+ }
+
+ dev_kfree_skb_any(skb);
+
+ return IRQ_HANDLED;
+}
+
+static struct icssg_firmwares icssg_sr1_emac_firmwares[] = {
+ {
+ .pru = "ti-pruss/am65x-pru0-prueth-fw.elf",
+ .rtu = "ti-pruss/am65x-rtu0-prueth-fw.elf",
+ },
+ {
+ .pru = "ti-pruss/am65x-pru1-prueth-fw.elf",
+ .rtu = "ti-pruss/am65x-rtu1-prueth-fw.elf",
+ }
+};
+
+static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac)
+{
+ struct icssg_firmwares *firmwares;
+ struct device *dev = prueth->dev;
+ int slice, ret;
+
+ firmwares = icssg_sr1_emac_firmwares;
+
+ slice = prueth_emac_slice(emac);
+ if (slice < 0) {
+ netdev_err(emac->ndev, "invalid port\n");
+ return -EINVAL;
+ }
+
+ icssg_config_sr1(prueth, emac, slice);
+
+ ret = rproc_set_firmware(prueth->pru[slice], firmwares[slice].pru);
+ ret = rproc_boot(prueth->pru[slice]);
+ if (ret) {
+ dev_err(dev, "failed to boot PRU%d: %d\n", slice, ret);
+ return -EINVAL;
+ }
+
+ ret = rproc_set_firmware(prueth->rtu[slice], firmwares[slice].rtu);
+ ret = rproc_boot(prueth->rtu[slice]);
+ if (ret) {
+ dev_err(dev, "failed to boot RTU%d: %d\n", slice, ret);
+ goto halt_pru;
+ }
+
+ emac->fw_running = 1;
+ return 0;
+
+halt_pru:
+ rproc_shutdown(prueth->pru[slice]);
+
+ return ret;
+}
+
+/**
+ * emac_ndo_open - EMAC device open
+ * @ndev: network adapter device
+ *
+ * Called when system wants to start the interface.
+ *
+ * Return: 0 for a successful open, or appropriate error code
+ */
+static int emac_ndo_open(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int num_data_chn = emac->tx_ch_num - 1;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+ struct device *dev = prueth->dev;
+ int max_rx_flows, rx_flow;
+ int ret, i;
+
+ /* clear SMEM and MSMC settings for all slices */
+ if (!prueth->emacs_initialized) {
+ memset_io(prueth->msmcram.va, 0, prueth->msmcram.size);
+ memset_io(prueth->shram.va, 0, ICSSG_CONFIG_OFFSET_SLICE1 * PRUETH_NUM_MACS);
+ }
+
+ /* set h/w MAC as user might have re-configured */
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr);
+
+ icssg_class_default(prueth->miig_rt, slice, 0, true);
+
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, num_data_chn);
+ if (ret) {
+ dev_err(dev, "cannot set real number of tx queues\n");
+ return ret;
+ }
+
+ init_completion(&emac->cmd_complete);
+ ret = prueth_init_tx_chns(emac);
+ if (ret) {
+ dev_err(dev, "failed to init tx channel: %d\n", ret);
+ return ret;
+ }
+
+ max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
+ ret = prueth_init_rx_chns(emac, &emac->rx_chns, "rx",
+ max_rx_flows, PRUETH_MAX_RX_DESC);
+ if (ret) {
+ dev_err(dev, "failed to init rx channel: %d\n", ret);
+ goto cleanup_tx;
+ }
+
+ ret = prueth_init_rx_chns(emac, &emac->rx_mgm_chn, "rxmgm",
+ PRUETH_MAX_RX_MGM_FLOWS_SR1,
+ PRUETH_MAX_RX_MGM_DESC_SR1);
+ if (ret) {
+ dev_err(dev, "failed to init rx mgmt channel: %d\n",
+ ret);
+ goto cleanup_rx;
+ }
+
+ ret = prueth_ndev_add_tx_napi(emac);
+ if (ret)
+ goto cleanup_rx_mgm;
+
+ /* we use only the highest priority flow for now i.e. @irq[3] */
+ rx_flow = PRUETH_RX_FLOW_DATA_SR1;
+ ret = request_irq(emac->rx_chns.irq[rx_flow], prueth_rx_irq,
+ IRQF_TRIGGER_HIGH, dev_name(dev), emac);
+ if (ret) {
+ dev_err(dev, "unable to request RX IRQ\n");
+ goto cleanup_napi;
+ }
+
+ ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
+ NULL, prueth_rx_mgm_rsp_thread,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ dev_name(dev), emac);
+ if (ret) {
+ dev_err(dev, "unable to request RX Management RSP IRQ\n");
+ goto free_rx_irq;
+ }
+
+ ret = request_threaded_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
+ NULL, prueth_rx_mgm_ts_thread_sr1,
+ IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+ dev_name(dev), emac);
+ if (ret) {
+ dev_err(dev, "unable to request RX Management TS IRQ\n");
+ goto free_rx_mgm_rsp_irq;
+ }
+
+ /* reset and start PRU firmware */
+ ret = prueth_emac_start(prueth, emac);
+ if (ret)
+ goto free_rx_mgmt_ts_irq;
+
+ icssg_mii_update_mtu(prueth->mii_rt, slice, ndev->max_mtu);
+
+ /* Prepare RX */
+ ret = prueth_prepare_rx_chan(emac, &emac->rx_chns, PRUETH_MAX_PKT_SIZE);
+ if (ret)
+ goto stop;
+
+ ret = prueth_prepare_rx_chan(emac, &emac->rx_mgm_chn, 64);
+ if (ret)
+ goto reset_rx_chn;
+
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_mgm_chn.rx_chn);
+ if (ret)
+ goto reset_rx_chn;
+
+ ret = k3_udma_glue_enable_rx_chn(emac->rx_chns.rx_chn);
+ if (ret)
+ goto reset_rx_mgm_chn;
+
+ for (i = 0; i < emac->tx_ch_num; i++) {
+ ret = k3_udma_glue_enable_tx_chn(emac->tx_chns[i].tx_chn);
+ if (ret)
+ goto reset_tx_chan;
+ }
+
+ /* Enable NAPI in Tx and Rx direction */
+ for (i = 0; i < emac->tx_ch_num; i++)
+ napi_enable(&emac->tx_chns[i].napi_tx);
+ napi_enable(&emac->napi_rx);
+
+ /* start PHY */
+ phy_start(ndev->phydev);
+
+ prueth->emacs_initialized++;
+
+ queue_work(system_long_wq, &emac->stats_work.work);
+
+ return 0;
+
+reset_tx_chan:
+ /* Since interface is not yet up, there is wouldn't be
+ * any SKB for completion. So set false to free_skb
+ */
+ prueth_reset_tx_chan(emac, i, false);
+reset_rx_mgm_chn:
+ prueth_reset_rx_chan(&emac->rx_mgm_chn,
+ PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
+reset_rx_chn:
+ prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, false);
+stop:
+ prueth_emac_stop(emac);
+free_rx_mgmt_ts_irq:
+ free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1],
+ emac);
+free_rx_mgm_rsp_irq:
+ free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1],
+ emac);
+free_rx_irq:
+ free_irq(emac->rx_chns.irq[rx_flow], emac);
+cleanup_napi:
+ prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
+cleanup_rx_mgm:
+ prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn,
+ PRUETH_MAX_RX_MGM_FLOWS_SR1);
+cleanup_rx:
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+cleanup_tx:
+ prueth_cleanup_tx_chns(emac);
+
+ return ret;
+}
+
+/**
+ * emac_ndo_stop - EMAC device stop
+ * @ndev: network adapter device
+ *
+ * Called when system wants to stop or down the interface.
+ *
+ * Return: Always 0 (Success)
+ */
+static int emac_ndo_stop(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int rx_flow = PRUETH_RX_FLOW_DATA_SR1;
+ struct prueth *prueth = emac->prueth;
+ int max_rx_flows;
+ int ret, i;
+
+ /* inform the upper layers. */
+ netif_tx_stop_all_queues(ndev);
+
+ /* block packets from wire */
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
+
+ icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac));
+
+ emac_send_command_sr1(emac, ICSSG_SHUTDOWN_CMD_SR1);
+
+ atomic_set(&emac->tdown_cnt, emac->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ /* tear down and disable UDMA channels */
+ reinit_completion(&emac->tdown_complete);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ k3_udma_glue_tdown_tx_chn(emac->tx_chns[i].tx_chn, false);
+
+ ret = wait_for_completion_timeout(&emac->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!ret)
+ netdev_err(ndev, "tx teardown timeout\n");
+
+ prueth_reset_tx_chan(emac, emac->tx_ch_num, true);
+ for (i = 0; i < emac->tx_ch_num; i++)
+ napi_disable(&emac->tx_chns[i].napi_tx);
+
+ max_rx_flows = PRUETH_MAX_RX_FLOWS_SR1;
+ k3_udma_glue_tdown_rx_chn(emac->rx_chns.rx_chn, true);
+
+ prueth_reset_rx_chan(&emac->rx_chns, max_rx_flows, true);
+ /* Teardown RX MGM channel */
+ k3_udma_glue_tdown_rx_chn(emac->rx_mgm_chn.rx_chn, true);
+ prueth_reset_rx_chan(&emac->rx_mgm_chn,
+ PRUETH_MAX_RX_MGM_FLOWS_SR1, true);
+
+ napi_disable(&emac->napi_rx);
+
+ /* Destroying the queued work in ndo_stop() */
+ cancel_delayed_work_sync(&emac->stats_work);
+
+ /* stop PRUs */
+ prueth_emac_stop(emac);
+
+ free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_TIMESTAMP_SR1], emac);
+ free_irq(emac->rx_mgm_chn.irq[PRUETH_RX_MGM_FLOW_RESPONSE_SR1], emac);
+ free_irq(emac->rx_chns.irq[rx_flow], emac);
+ prueth_ndev_del_tx_napi(emac, emac->tx_ch_num);
+ prueth_cleanup_tx_chns(emac);
+
+ prueth_cleanup_rx_chns(emac, &emac->rx_mgm_chn, PRUETH_MAX_RX_MGM_FLOWS_SR1);
+ prueth_cleanup_rx_chns(emac, &emac->rx_chns, max_rx_flows);
+
+ prueth->emacs_initialized--;
+
+ return 0;
+}
+
+static void emac_ndo_set_rx_mode_sr1(struct net_device *ndev)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ bool allmulti = ndev->flags & IFF_ALLMULTI;
+ bool promisc = ndev->flags & IFF_PROMISC;
+ struct prueth *prueth = emac->prueth;
+ int slice = prueth_emac_slice(emac);
+
+ if (promisc) {
+ icssg_class_promiscuous_sr1(prueth->miig_rt, slice);
+ return;
+ }
+
+ if (allmulti) {
+ icssg_class_default(prueth->miig_rt, slice, 1, true);
+ return;
+ }
+
+ icssg_class_default(prueth->miig_rt, slice, 0, true);
+ if (!netdev_mc_empty(ndev)) {
+ /* program multicast address list into Classifier */
+ icssg_class_add_mcast_sr1(prueth->miig_rt, slice, ndev);
+ }
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_ndo_open,
+ .ndo_stop = emac_ndo_stop,
+ .ndo_start_xmit = icssg_ndo_start_xmit,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = icssg_ndo_tx_timeout,
+ .ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1,
+ .ndo_eth_ioctl = icssg_ndo_ioctl,
+ .ndo_get_stats64 = icssg_ndo_get_stats64,
+ .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name,
+};
+
+static int prueth_netdev_init(struct prueth *prueth,
+ struct device_node *eth_node)
+{
+ struct prueth_emac *emac;
+ struct net_device *ndev;
+ enum prueth_port port;
+ enum prueth_mac mac;
+ /* Only enable one TX channel due to timeouts when
+ * using multiple channels */
+ int num_tx_chn = 1;
+ int ret;
+
+ port = prueth_node_port(eth_node);
+ if (port == PRUETH_PORT_INVALID)
+ return -EINVAL;
+
+ mac = prueth_node_mac(eth_node);
+ if (mac == PRUETH_MAC_INVALID)
+ return -EINVAL;
+
+ ndev = alloc_etherdev_mq(sizeof(*emac), num_tx_chn);
+ if (!ndev)
+ return -ENOMEM;
+
+ emac = netdev_priv(ndev);
+ emac->is_sr1 = 1;
+ emac->prueth = prueth;
+ emac->ndev = ndev;
+ emac->port_id = port;
+ emac->cmd_wq = create_singlethread_workqueue("icssg_cmd_wq");
+ if (!emac->cmd_wq) {
+ ret = -ENOMEM;
+ goto free_ndev;
+ }
+
+ INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler);
+
+ ret = pruss_request_mem_region(prueth->pruss,
+ port == PRUETH_PORT_MII0 ?
+ PRUSS_MEM_DRAM0 : PRUSS_MEM_DRAM1,
+ &emac->dram);
+ if (ret) {
+ dev_err(prueth->dev, "unable to get DRAM: %d\n", ret);
+ ret = -ENOMEM;
+ goto free_wq;
+ }
+
+ /* SR1.0 uses a dedicated high priority channel
+ * to send commands to the firmware
+ */
+ emac->tx_ch_num = 2;
+
+ SET_NETDEV_DEV(ndev, prueth->dev);
+ spin_lock_init(&emac->lock);
+ mutex_init(&emac->cmd_lock);
+
+ emac->phy_node = of_parse_phandle(eth_node, "phy-handle", 0);
+ if (!emac->phy_node && !of_phy_is_fixed_link(eth_node)) {
+ dev_err(prueth->dev, "couldn't find phy-handle\n");
+ ret = -ENODEV;
+ goto free;
+ } else if (of_phy_is_fixed_link(eth_node)) {
+ ret = of_phy_register_fixed_link(eth_node);
+ if (ret) {
+ ret = dev_err_probe(prueth->dev, ret,
+ "failed to register fixed-link phy\n");
+ goto free;
+ }
+
+ emac->phy_node = eth_node;
+ }
+
+ ret = of_get_phy_mode(eth_node, &emac->phy_if);
+ if (ret) {
+ dev_err(prueth->dev, "could not get phy-mode property\n");
+ goto free;
+ }
+
+ if (emac->phy_if != PHY_INTERFACE_MODE_MII &&
+ !phy_interface_mode_is_rgmii(emac->phy_if)) {
+ dev_err(prueth->dev, "PHY mode unsupported %s\n", phy_modes(emac->phy_if));
+ ret = -EINVAL;
+ goto free;
+ }
+
+ /* AM65 SR2.0 has TX Internal delay always enabled by hardware
+ * and it is not possible to disable TX Internal delay. The below
+ * switch case block describes how we handle different phy modes
+ * based on hardware restriction.
+ */
+ switch (emac->phy_if) {
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ emac->phy_if = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ emac->phy_if = PHY_INTERFACE_MODE_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ dev_err(prueth->dev, "RGMII mode without TX delay is not supported");
+ ret = -EINVAL;
+ goto free;
+ default:
+ break;
+ }
+
+ /* get mac address from DT and set private and netdev addr */
+ ret = of_get_ethdev_address(eth_node, ndev);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(prueth->dev, "port %d: using random MAC addr: %pM\n",
+ port, ndev->dev_addr);
+ }
+ ether_addr_copy(emac->mac_addr, ndev->dev_addr);
+
+ ndev->min_mtu = PRUETH_MIN_PKT_SIZE;
+ ndev->max_mtu = PRUETH_MAX_MTU;
+ ndev->netdev_ops = &emac_netdev_ops;
+ ndev->ethtool_ops = &icssg_ethtool_ops;
+ ndev->hw_features = NETIF_F_SG;
+ ndev->features = ndev->hw_features;
+
+ netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll);
+ prueth->emac[mac] = emac;
+
+ return 0;
+
+free:
+ pruss_release_mem_region(prueth->pruss, &emac->dram);
+free_wq:
+ destroy_workqueue(emac->cmd_wq);
+free_ndev:
+ emac->ndev = NULL;
+ prueth->emac[mac] = NULL;
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int prueth_probe(struct platform_device *pdev)
+{
+ struct device_node *eth_node, *eth_ports_node;
+ struct device_node *eth0_node = NULL;
+ struct device_node *eth1_node = NULL;
+ struct device *dev = &pdev->dev;
+ struct device_node *np;
+ struct prueth *prueth;
+ struct pruss *pruss;
+ u32 msmc_ram_size;
+ int i, ret;
+
+ np = dev->of_node;
+
+ prueth = devm_kzalloc(dev, sizeof(*prueth), GFP_KERNEL);
+ if (!prueth)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, prueth);
+ prueth->pdev = pdev;
+ prueth->pdata = *(const struct prueth_pdata *)device_get_match_data(dev);
+
+ prueth->dev = dev;
+ eth_ports_node = of_get_child_by_name(np, "ethernet-ports");
+ if (!eth_ports_node)
+ return -ENOENT;
+
+ for_each_child_of_node(eth_ports_node, eth_node) {
+ u32 reg;
+
+ if (strcmp(eth_node->name, "port"))
+ continue;
+ ret = of_property_read_u32(eth_node, "reg", &reg);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ eth_node, ret);
+ }
+
+ of_node_get(eth_node);
+
+ if (reg == 0) {
+ eth0_node = eth_node;
+ if (!of_device_is_available(eth0_node)) {
+ of_node_put(eth0_node);
+ eth0_node = NULL;
+ }
+ } else if (reg == 1) {
+ eth1_node = eth_node;
+ if (!of_device_is_available(eth1_node)) {
+ of_node_put(eth1_node);
+ eth1_node = NULL;
+ }
+ } else {
+ dev_err(dev, "port reg should be 0 or 1\n");
+ }
+ }
+
+ of_node_put(eth_ports_node);
+
+ /* At least one node must be present and available else we fail */
+ if (!eth0_node && !eth1_node) {
+ dev_err(dev, "neither port0 nor port1 node available\n");
+ return -ENODEV;
+ }
+
+ if (eth0_node == eth1_node) {
+ dev_err(dev, "port0 and port1 can't have same reg\n");
+ of_node_put(eth0_node);
+ return -ENODEV;
+ }
+
+ prueth->eth_node[PRUETH_MAC0] = eth0_node;
+ prueth->eth_node[PRUETH_MAC1] = eth1_node;
+
+ prueth->miig_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-g-rt");
+ if (IS_ERR(prueth->miig_rt)) {
+ dev_err(dev, "couldn't get ti,mii-g-rt syscon regmap\n");
+ return -ENODEV;
+ }
+
+ prueth->mii_rt = syscon_regmap_lookup_by_phandle(np, "ti,mii-rt");
+ if (IS_ERR(prueth->mii_rt)) {
+ dev_err(dev, "couldn't get ti,mii-rt syscon regmap\n");
+ return -ENODEV;
+ }
+
+ if (eth0_node) {
+ ret = prueth_get_cores(prueth, ICSS_SLICE0, true);
+ if (ret)
+ goto put_cores;
+ }
+
+ if (eth1_node) {
+ ret = prueth_get_cores(prueth, ICSS_SLICE1, true);
+ if (ret)
+ goto put_cores;
+ }
+
+ pruss = pruss_get(eth0_node ?
+ prueth->pru[ICSS_SLICE0] : prueth->pru[ICSS_SLICE1]);
+ if (IS_ERR(pruss)) {
+ ret = PTR_ERR(pruss);
+ dev_err(dev, "unable to get pruss handle\n");
+ goto put_cores;
+ }
+
+ prueth->pruss = pruss;
+
+ ret = pruss_request_mem_region(pruss, PRUSS_MEM_SHRD_RAM2,
+ &prueth->shram);
+ if (ret) {
+ dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
+ goto put_pruss;
+ }
+
+ prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
+ if (!prueth->sram_pool) {
+ dev_err(dev, "unable to get SRAM pool\n");
+ ret = -ENODEV;
+
+ goto put_mem;
+ }
+
+ msmc_ram_size = MSMC_RAM_SIZE_SR1;
+
+ prueth->msmcram.va = (void __iomem *)gen_pool_alloc(prueth->sram_pool,
+ msmc_ram_size);
+
+ if (!prueth->msmcram.va) {
+ ret = -ENOMEM;
+ dev_err(dev, "unable to allocate MSMC resource\n");
+ goto put_mem;
+ }
+ prueth->msmcram.pa = gen_pool_virt_to_phys(prueth->sram_pool,
+ (unsigned long)prueth->msmcram.va);
+ prueth->msmcram.size = msmc_ram_size;
+ memset_io(prueth->msmcram.va, 0, msmc_ram_size);
+ dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa,
+ prueth->msmcram.va, prueth->msmcram.size);
+
+ prueth->iep0 = icss_iep_get_idx(np, 0);
+ if (IS_ERR(prueth->iep0)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep0),
+ "iep0 get failed\n");
+ goto free_pool;
+ }
+
+ prueth->iep1 = icss_iep_get_idx(np, 1);
+ if (IS_ERR(prueth->iep1)) {
+ ret = dev_err_probe(dev, PTR_ERR(prueth->iep1),
+ "iep1 get failed\n");
+ goto put_iep0;
+ }
+
+ ret = icss_iep_init(prueth->iep0, NULL, NULL, 0);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init iep0\n");
+ goto put_iep;
+ }
+
+ ret = icss_iep_init(prueth->iep1, NULL, NULL, 0);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init iep1\n");
+ goto exit_iep0;
+ }
+
+ if (eth0_node) {
+ ret = prueth_netdev_init(prueth, eth0_node);
+ if (ret) {
+ dev_err_probe(dev, ret, "netdev init %s failed\n",
+ eth0_node->name);
+ goto exit_iep;
+ }
+
+ if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL))
+ prueth->emac[PRUETH_MAC0]->half_duplex = 1;
+
+ prueth->emac[PRUETH_MAC0]->iep = prueth->iep0;
+ }
+
+ if (eth1_node) {
+ ret = prueth_netdev_init(prueth, eth1_node);
+ if (ret) {
+ dev_err_probe(dev, ret, "netdev init %s failed\n",
+ eth1_node->name);
+ goto netdev_exit;
+ }
+
+ if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL))
+ prueth->emac[PRUETH_MAC1]->half_duplex = 1;
+
+ prueth->emac[PRUETH_MAC1]->iep = prueth->iep1;
+ }
+
+ /* register the network devices */
+ if (eth0_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC0]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII0\n");
+ goto netdev_exit;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC0] = prueth->emac[PRUETH_MAC0]->ndev;
+ emac_phy_connect(prueth->emac[PRUETH_MAC0]);
+ phy_attached_info(prueth->emac[PRUETH_MAC0]->ndev->phydev);
+ }
+
+ if (eth1_node) {
+ ret = register_netdev(prueth->emac[PRUETH_MAC1]->ndev);
+ if (ret) {
+ dev_err(dev, "can't register netdev for port MII1\n");
+ goto netdev_unregister;
+ }
+
+ prueth->registered_netdevs[PRUETH_MAC1] = prueth->emac[PRUETH_MAC1]->ndev;
+ emac_phy_connect(prueth->emac[PRUETH_MAC1]);
+ phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev);
+ }
+
+ dev_info(dev, "TI PRU SR1.0 ethernet driver initialized: %s EMAC mode\n",
+ (!eth0_node || !eth1_node) ? "single" : "dual");
+
+ if (eth1_node)
+ of_node_put(eth1_node);
+ if (eth0_node)
+ of_node_put(eth0_node);
+
+ return 0;
+
+netdev_unregister:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+
+ if (prueth->emac[i]->ndev->phydev) {
+ phy_disconnect(prueth->emac[i]->ndev->phydev);
+ prueth->emac[i]->ndev->phydev = NULL;
+ }
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+netdev_exit:
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ prueth_netdev_exit(prueth, eth_node);
+ }
+
+exit_iep:
+ icss_iep_exit(prueth->iep1);
+exit_iep0:
+ icss_iep_exit(prueth->iep0);
+
+put_iep:
+ icss_iep_put(prueth->iep1);
+
+put_iep0:
+ icss_iep_put(prueth->iep0);
+ prueth->iep0 = NULL;
+ prueth->iep1 = NULL;
+
+free_pool:
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->msmcram.va, msmc_ram_size);
+
+put_mem:
+ pruss_release_mem_region(prueth->pruss, &prueth->shram);
+
+put_pruss:
+ pruss_put(prueth->pruss);
+
+put_cores:
+ if (eth1_node) {
+ prueth_put_cores(prueth, ICSS_SLICE1);
+ of_node_put(eth1_node);
+ }
+
+ if (eth0_node) {
+ prueth_put_cores(prueth, ICSS_SLICE0);
+ of_node_put(eth0_node);
+ }
+
+ return ret;
+}
+
+static void prueth_remove(struct platform_device *pdev)
+{
+ struct prueth *prueth = platform_get_drvdata(pdev);
+ struct device_node *eth_node;
+ int i;
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ if (!prueth->registered_netdevs[i])
+ continue;
+ phy_stop(prueth->emac[i]->ndev->phydev);
+ phy_disconnect(prueth->emac[i]->ndev->phydev);
+ prueth->emac[i]->ndev->phydev = NULL;
+ unregister_netdev(prueth->registered_netdevs[i]);
+ }
+
+ for (i = 0; i < PRUETH_NUM_MACS; i++) {
+ eth_node = prueth->eth_node[i];
+ if (!eth_node)
+ continue;
+
+ prueth_netdev_exit(prueth, eth_node);
+ }
+
+ icss_iep_exit(prueth->iep1);
+ icss_iep_exit(prueth->iep0);
+
+ icss_iep_put(prueth->iep1);
+ icss_iep_put(prueth->iep0);
+
+ gen_pool_free(prueth->sram_pool,
+ (unsigned long)prueth->msmcram.va,
+ MSMC_RAM_SIZE_SR1);
+
+ pruss_release_mem_region(prueth->pruss, &prueth->shram);
+
+ pruss_put(prueth->pruss);
+
+ if (prueth->eth_node[PRUETH_MAC1])
+ prueth_put_cores(prueth, ICSS_SLICE1);
+
+ if (prueth->eth_node[PRUETH_MAC0])
+ prueth_put_cores(prueth, ICSS_SLICE0);
+}
+
+static const struct prueth_pdata am654_sr1_icssg_pdata = {
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+};
+
+static const struct of_device_id prueth_dt_match[] = {
+ { .compatible = "ti,am654-sr1-icssg-prueth", .data = &am654_sr1_icssg_pdata },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, prueth_dt_match);
+
+static struct platform_driver prueth_driver = {
+ .probe = prueth_probe,
+ .remove_new = prueth_remove,
+ .driver = {
+ .name = "icssg-prueth-sr1",
+ .of_match_table = prueth_dt_match,
+ .pm = &prueth_dev_pm_ops,
+ },
+};
+module_platform_driver(prueth_driver);
+
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_AUTHOR("Md Danish Anwar <danishanwar@ti.com>");
+MODULE_AUTHOR("Diogo Ivo <diogo.ivo@siemens.com>");
+MODULE_DESCRIPTION(PRUETH_MODULE_DESCRIPTION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/icssg/icssg_queues.c b/drivers/net/ethernet/ti/icssg/icssg_queues.c
index 3c34f61ad40b..e5052d9e7807 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_queues.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_queues.c
@@ -28,6 +28,7 @@ int icssg_queue_pop(struct prueth *prueth, u8 queue)
return val;
}
+EXPORT_SYMBOL_GPL(icssg_queue_pop);
void icssg_queue_push(struct prueth *prueth, int queue, u16 addr)
{
@@ -36,6 +37,7 @@ void icssg_queue_push(struct prueth *prueth, int queue, u16 addr)
regmap_write(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, addr);
}
+EXPORT_SYMBOL_GPL(icssg_queue_push);
u32 icssg_queue_level(struct prueth *prueth, int queue)
{
diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c
index 3dbadddd7e35..2fb150c13078 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_stats.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c
@@ -42,7 +42,7 @@ void emac_update_hardware_stats(struct prueth_emac *emac)
}
}
-void emac_stats_work_handler(struct work_struct *work)
+void icssg_stats_work_handler(struct work_struct *work)
{
struct prueth_emac *emac = container_of(work, struct prueth_emac,
stats_work.work);
@@ -51,6 +51,7 @@ void emac_stats_work_handler(struct work_struct *work)
queue_delayed_work(system_long_wq, &emac->stats_work,
msecs_to_jiffies((STATS_TIME_LIMIT_1G_MS * 1000) / emac->speed));
}
+EXPORT_SYMBOL_GPL(icssg_stats_work_handler);
int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name)
{
diff --git a/drivers/net/ethernet/ti/icssg/icssg_switchdev.c b/drivers/net/ethernet/ti/icssg/icssg_switchdev.c
new file mode 100644
index 000000000000..67e2927e176d
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_switchdev.c
@@ -0,0 +1,477 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Texas Instruments K3 ICSSG Ethernet Switchdev Driver
+ *
+ * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <net/switchdev.h>
+
+#include "icssg_prueth.h"
+#include "icssg_switchdev.h"
+#include "icssg_mii_rt.h"
+
+struct prueth_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct prueth_emac *emac;
+ unsigned long event;
+};
+
+static int prueth_switchdev_stp_state_set(struct prueth_emac *emac,
+ u8 state)
+{
+ enum icssg_port_state_cmd emac_state;
+ int ret = 0;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ emac_state = ICSSG_EMAC_PORT_FORWARD;
+ break;
+ case BR_STATE_DISABLED:
+ emac_state = ICSSG_EMAC_PORT_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ emac_state = ICSSG_EMAC_PORT_BLOCK;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ icssg_set_port_state(emac, emac_state);
+ netdev_dbg(emac->ndev, "STP state: %u\n", emac_state);
+
+ return ret;
+}
+
+static int prueth_switchdev_attr_br_flags_set(struct prueth_emac *emac,
+ struct net_device *orig_dev,
+ struct switchdev_brport_flags brport_flags)
+{
+ enum icssg_port_state_cmd emac_state;
+
+ if (brport_flags.mask & BR_MCAST_FLOOD)
+ emac_state = ICSSG_EMAC_PORT_MC_FLOODING_ENABLE;
+ else
+ emac_state = ICSSG_EMAC_PORT_MC_FLOODING_DISABLE;
+
+ netdev_dbg(emac->ndev, "BR_MCAST_FLOOD: %d port %u\n",
+ emac_state, emac->port_id);
+
+ icssg_set_port_state(emac, emac_state);
+
+ return 0;
+}
+
+static int prueth_switchdev_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_brport_flags brport_flags)
+{
+ if (brport_flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int prueth_switchdev_attr_set(struct net_device *ndev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int ret;
+
+ netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, emac->port_id);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = prueth_switchdev_attr_br_flags_pre_set(ndev,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = prueth_switchdev_stp_state_set(emac,
+ attr->u.stp_state);
+ netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = prueth_switchdev_attr_br_flags_set(emac, attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static void prueth_switchdev_fdb_offload_notify(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info;
+
+ memset(&info, 0, sizeof(info));
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ ndev, &info.info, NULL);
+}
+
+static void prueth_switchdev_event_work(struct work_struct *work)
+{
+ struct prueth_switchdev_event_work *switchdev_work =
+ container_of(work, struct prueth_switchdev_event_work, work);
+ struct prueth_emac *emac = switchdev_work->emac;
+ struct switchdev_notifier_fdb_info *fdb;
+ int port_id = emac->port_id;
+ int ret;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(emac->ndev, "prueth_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user)
+ break;
+ if (!ether_addr_equal(emac->mac_addr, fdb->addr))
+ break;
+
+ ret = icssg_fdb_add_del(emac, fdb->addr, fdb->vid,
+ BIT(port_id), true);
+ if (!ret)
+ prueth_switchdev_fdb_offload_notify(emac->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(emac->ndev, "prueth_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user)
+ break;
+ if (!ether_addr_equal(emac->mac_addr, fdb->addr))
+ break;
+ icssg_fdb_add_del(emac, fdb->addr, fdb->vid,
+ BIT(port_id), false);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(emac->ndev);
+}
+
+static int prueth_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct prueth_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int err;
+
+ if (!prueth_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ prueth_dev_check,
+ prueth_switchdev_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, prueth_switchdev_event_work);
+ switchdev_work->emac = emac;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(ndev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static int prueth_switchdev_vlan_add(struct prueth_emac *emac, bool untag, bool pvid,
+ u8 vid, struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ int untag_mask = 0;
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(PRUETH_PORT_HOST);
+ else
+ port_mask = BIT(emac->port_id);
+
+ if (untag)
+ untag_mask = port_mask;
+
+ icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true);
+
+ netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X PVID %d\n",
+ vid, port_mask, untag_mask, pvid);
+
+ if (!pvid)
+ return ret;
+
+ icssg_set_pvid(emac->prueth, vid, emac->port_id);
+
+ return ret;
+}
+
+static int prueth_switchdev_vlan_del(struct prueth_emac *emac, u16 vid,
+ struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(PRUETH_PORT_HOST);
+ else
+ port_mask = BIT(emac->port_id);
+
+ icssg_vtbl_modify(emac, vid, port_mask, 0, false);
+
+ if (cpu_port)
+ icssg_fdb_add_del(emac, emac->mac_addr, vid,
+ BIT(PRUETH_PORT_HOST), false);
+
+ if (vid == icssg_get_pvid(emac))
+ icssg_set_pvid(emac->prueth, 0, emac->port_id);
+
+ netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X\n",
+ vid, port_mask);
+
+ return ret;
+}
+
+static int prueth_switchdev_vlans_add(struct prueth_emac *emac,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+ netdev_dbg(emac->ndev, "VID add vid:%u flags:%X\n",
+ vlan->vid, vlan->flags);
+
+ if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY))
+ return 0;
+
+ if (vlan->vid > 0xff)
+ return 0;
+
+ return prueth_switchdev_vlan_add(emac, untag, pvid, vlan->vid,
+ orig_dev);
+}
+
+static int prueth_switchdev_vlans_del(struct prueth_emac *emac,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ if (vlan->vid > 0xff)
+ return 0;
+
+ return prueth_switchdev_vlan_del(emac, vlan->vid,
+ vlan->obj.orig_dev);
+}
+
+static int prueth_switchdev_mdb_add(struct prueth_emac *emac,
+ struct switchdev_obj_port_mdb *mdb)
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ u8 port_mask, fid_c2;
+ bool cpu_port;
+ int err;
+
+ cpu_port = netif_is_bridge_master(orig_dev);
+
+ if (cpu_port)
+ port_mask = BIT(PRUETH_PORT_HOST);
+ else
+ port_mask = BIT(emac->port_id);
+
+ fid_c2 = icssg_fdb_lookup(emac, mdb->addr, mdb->vid);
+
+ err = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, fid_c2 | port_mask, true);
+ netdev_dbg(emac->ndev, "MDB add vid %u:%pM ports: %X\n",
+ mdb->vid, mdb->addr, port_mask);
+
+ return err;
+}
+
+static int prueth_switchdev_mdb_del(struct prueth_emac *emac,
+ struct switchdev_obj_port_mdb *mdb)
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ int del_mask, ret, fid_c2;
+ bool cpu_port;
+
+ cpu_port = netif_is_bridge_master(orig_dev);
+
+ if (cpu_port)
+ del_mask = BIT(PRUETH_PORT_HOST);
+ else
+ del_mask = BIT(emac->port_id);
+
+ fid_c2 = icssg_fdb_lookup(emac, mdb->addr, mdb->vid);
+
+ if (fid_c2 & ~del_mask)
+ ret = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, fid_c2 & ~del_mask, true);
+ else
+ ret = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, 0, false);
+
+ netdev_dbg(emac->ndev, "MDB del vid %u:%pM ports: %X\n",
+ mdb->vid, mdb->addr, del_mask);
+
+ return ret;
+}
+
+static int prueth_switchdev_obj_add(struct net_device *ndev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, emac->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = prueth_switchdev_vlans_add(emac, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = prueth_switchdev_mdb_add(emac, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int prueth_switchdev_obj_del(struct net_device *ndev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct prueth_emac *emac = netdev_priv(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, emac->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = prueth_switchdev_vlans_del(emac, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = prueth_switchdev_mdb_del(emac, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int prueth_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ prueth_dev_check,
+ prueth_switchdev_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ prueth_dev_check,
+ prueth_switchdev_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ prueth_dev_check,
+ prueth_switchdev_attr_set);
+ return notifier_from_errno(err);
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+int prueth_switchdev_register_notifiers(struct prueth *prueth)
+{
+ int ret = 0;
+
+ prueth->prueth_switchdev_nb.notifier_call = &prueth_switchdev_event;
+ ret = register_switchdev_notifier(&prueth->prueth_switchdev_nb);
+ if (ret) {
+ dev_err(prueth->dev, "register switchdev notifier fail ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ prueth->prueth_switchdev_bl_nb.notifier_call = &prueth_switchdev_blocking_event;
+ ret = register_switchdev_blocking_notifier(&prueth->prueth_switchdev_bl_nb);
+ if (ret) {
+ dev_err(prueth->dev, "register switchdev blocking notifier ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&prueth->prueth_switchdev_nb);
+ }
+
+ return ret;
+}
+
+void prueth_switchdev_unregister_notifiers(struct prueth *prueth)
+{
+ unregister_switchdev_blocking_notifier(&prueth->prueth_switchdev_bl_nb);
+ unregister_switchdev_notifier(&prueth->prueth_switchdev_nb);
+}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_switchdev.h b/drivers/net/ethernet/ti/icssg/icssg_switchdev.h
new file mode 100644
index 000000000000..0e64e7760a00
--- /dev/null
+++ b/drivers/net/ethernet/ti/icssg/icssg_switchdev.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/
+ */
+#ifndef __NET_TI_ICSSG_SWITCHDEV_H
+#define __NET_TI_ICSSG_SWITCHDEV_H
+
+#include "icssg_prueth.h"
+
+int prueth_switchdev_register_notifiers(struct prueth *prueth);
+void prueth_switchdev_unregister_notifiers(struct prueth *prueth);
+bool prueth_dev_check(const struct net_device *ndev);
+
+#endif /* __NET_TI_ICSSG_SWITCHDEV_H */
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
index 05cc7aab1ec8..739bae8e11ee 100644
--- a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
@@ -22,6 +22,7 @@ struct k3_cppi_desc_pool {
size_t mem_size;
size_t num_desc;
struct gen_pool *gen_pool;
+ void **desc_infos;
};
void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool)
@@ -37,7 +38,11 @@ void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool)
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem,
pool->dma_addr);
+ kfree(pool->desc_infos);
+
gen_pool_destroy(pool->gen_pool); /* frees pool->name */
+
+ kfree(pool);
}
EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_destroy);
@@ -50,7 +55,7 @@ k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
const char *pool_name = NULL;
int ret = -ENOMEM;
- pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
if (!pool)
return ERR_PTR(ret);
@@ -62,18 +67,21 @@ k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
pool_name = kstrdup_const(name ? name : dev_name(pool->dev),
GFP_KERNEL);
if (!pool_name)
- return ERR_PTR(-ENOMEM);
+ goto gen_pool_create_fail;
pool->gen_pool = gen_pool_create(ilog2(pool->desc_size), -1);
if (!pool->gen_pool) {
- ret = -ENOMEM;
- dev_err(pool->dev, "pool create failed %d\n", ret);
kfree_const(pool_name);
goto gen_pool_create_fail;
}
pool->gen_pool->name = pool_name;
+ pool->desc_infos = kcalloc(pool->num_desc,
+ sizeof(*pool->desc_infos), GFP_KERNEL);
+ if (!pool->desc_infos)
+ goto gen_pool_desc_infos_alloc_fail;
+
pool->cpumem = dma_alloc_coherent(pool->dev, pool->mem_size,
&pool->dma_addr, GFP_KERNEL);
@@ -94,9 +102,11 @@ gen_pool_add_virt_fail:
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem,
pool->dma_addr);
dma_alloc_fail:
+ kfree(pool->desc_infos);
+gen_pool_desc_infos_alloc_fail:
gen_pool_destroy(pool->gen_pool); /* frees pool->name */
gen_pool_create_fail:
- devm_kfree(pool->dev, pool);
+ kfree(pool);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_create_name);
@@ -132,5 +142,31 @@ size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool)
}
EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_avail);
+size_t k3_cppi_desc_pool_desc_size(const struct k3_cppi_desc_pool *pool)
+{
+ return pool->desc_size;
+}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_desc_size);
+
+void *k3_cppi_desc_pool_cpuaddr(const struct k3_cppi_desc_pool *pool)
+{
+ return pool->cpumem;
+}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_cpuaddr);
+
+void k3_cppi_desc_pool_desc_info_set(struct k3_cppi_desc_pool *pool,
+ int desc_idx, void *info)
+{
+ pool->desc_infos[desc_idx] = info;
+}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_desc_info_set);
+
+void *k3_cppi_desc_pool_desc_info(const struct k3_cppi_desc_pool *pool,
+ int desc_idx)
+{
+ return pool->desc_infos[desc_idx];
+}
+EXPORT_SYMBOL_GPL(k3_cppi_desc_pool_desc_info);
+
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TI K3 CPPI5 descriptors pool API");
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.h b/drivers/net/ethernet/ti/k3-cppi-desc-pool.h
index a7e3fa5e7b62..851d352b338b 100644
--- a/drivers/net/ethernet/ti/k3-cppi-desc-pool.h
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.h
@@ -26,5 +26,11 @@ k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma);
void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool);
void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr);
size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool);
+size_t k3_cppi_desc_pool_desc_size(const struct k3_cppi_desc_pool *pool);
+void *k3_cppi_desc_pool_cpuaddr(const struct k3_cppi_desc_pool *pool);
+void k3_cppi_desc_pool_desc_info_set(struct k3_cppi_desc_pool *pool,
+ int desc_idx, void *info);
+void *k3_cppi_desc_pool_desc_info(const struct k3_cppi_desc_pool *pool,
+ int desc_idx);
#endif /* K3_CPPI_DESC_POOL_H_ */
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 02cb6474f6dc..d286709ca3b9 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1999,7 +1999,7 @@ static int keystone_set_link_ksettings(struct net_device *ndev,
#if IS_ENABLED(CONFIG_TI_CPTS)
static int keystone_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct netcp_intf *netcp = netdev_priv(ndev);
struct gbe_intf *gbe_intf;
@@ -2027,7 +2027,7 @@ static int keystone_get_ts_info(struct net_device *ndev,
}
#else
static int keystone_get_ts_info(struct net_device *ndev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index da287ef65be7..00773f5e4d7e 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -20,6 +20,7 @@ config VIA_RHINE
tristate "VIA Rhine support"
depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
depends on PCI || ARCH_VT8500 || COMPILE_TEST
+ depends on HAS_IOPORT
depends on HAS_DMA
select CRC32
select MII
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 1c6b2a9bba08..55fff4d0d380 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2294,7 +2294,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
int ret = 0;
if (!netif_running(dev)) {
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
goto out_0;
}
@@ -2336,7 +2336,7 @@ static int velocity_change_mtu(struct net_device *dev, int new_mtu)
tmp_vptr->rx = rx;
tmp_vptr->tx = tx;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
velocity_init_registers(vptr, VELOCITY_INIT_COLD);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
index cc3bec42ed8e..abe5921dde02 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c
@@ -43,6 +43,11 @@ static const struct wx_stats wx_gstrings_stats[] = {
WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
};
+static const struct wx_stats wx_gstrings_fdir_stats[] = {
+ WX_STAT("fdir_match", stats.fdirmatch),
+ WX_STAT("fdir_miss", stats.fdirmiss),
+};
+
/* drivers allocates num_tx_queues and num_rx_queues symmetrically so
* we set the num_rx_queues to evaluate to num_tx_queues. This is
* used because we do not have a good way to get the max number of
@@ -55,13 +60,17 @@ static const struct wx_stats wx_gstrings_stats[] = {
(WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \
(sizeof(struct wx_queue_stats) / sizeof(u64)))
#define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats)
+#define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats)
#define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN)
int wx_get_sset_count(struct net_device *netdev, int sset)
{
+ struct wx *wx = netdev_priv(netdev);
+
switch (sset) {
case ETH_SS_STATS:
- return WX_STATS_LEN;
+ return (wx->mac.type == wx_mac_sp) ?
+ WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -70,6 +79,7 @@ EXPORT_SYMBOL(wx_get_sset_count);
void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
+ struct wx *wx = netdev_priv(netdev);
u8 *p = data;
int i;
@@ -77,6 +87,10 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
case ETH_SS_STATS:
for (i = 0; i < WX_GLOBAL_STATS_LEN; i++)
ethtool_puts(&p, wx_gstrings_stats[i].stat_string);
+ if (wx->mac.type == wx_mac_sp) {
+ for (i = 0; i < WX_FDIR_STATS_LEN; i++)
+ ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string);
+ }
for (i = 0; i < netdev->num_tx_queues; i++) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -96,7 +110,7 @@ void wx_get_ethtool_stats(struct net_device *netdev,
struct wx *wx = netdev_priv(netdev);
struct wx_ring *ring;
unsigned int start;
- int i, j;
+ int i, j, k;
char *p;
wx_update_stats(wx);
@@ -107,6 +121,13 @@ void wx_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ if (wx->mac.type == wx_mac_sp) {
+ for (k = 0; k < WX_FDIR_STATS_LEN; k++) {
+ p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset;
+ data[i++] = *(u64 *)p;
+ }
+ }
+
for (j = 0; j < netdev->num_tx_queues; j++) {
ring = wx->tx_ring[j];
if (!ring) {
@@ -172,17 +193,21 @@ EXPORT_SYMBOL(wx_get_pause_stats);
void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
{
+ unsigned int stats_len = WX_STATS_LEN;
struct wx *wx = netdev_priv(netdev);
+ if (wx->mac.type == wx_mac_sp)
+ stats_len += WX_FDIR_STATS_LEN;
+
strscpy(info->driver, wx->driver_name, sizeof(info->driver));
strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version));
strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info));
if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) {
- info->n_stats = WX_STATS_LEN -
+ info->n_stats = stats_len -
(WX_NUM_TX_QUEUES - wx->num_tx_queues) *
(sizeof(struct wx_queue_stats) / sizeof(u64)) * 2;
} else {
- info->n_stats = WX_STATS_LEN;
+ info->n_stats = stats_len;
}
}
EXPORT_SYMBOL(wx_get_drvinfo);
@@ -383,6 +408,9 @@ void wx_get_channels(struct net_device *dev,
/* record RSS queues */
ch->combined_count = wx->ring_feature[RING_F_RSS].indices;
+
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
+ ch->combined_count = wx->ring_feature[RING_F_FDIR].indices;
}
EXPORT_SYMBOL(wx_get_channels);
@@ -400,6 +428,9 @@ int wx_set_channels(struct net_device *dev,
if (count > wx_max_channels(wx))
return -EINVAL;
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
+ wx->ring_feature[RING_F_FDIR].limit = count;
+
wx->ring_feature[RING_F_RSS].limit = count;
return 0;
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
index 945c13d1a982..1bf9c38e4125 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c
@@ -1147,8 +1147,15 @@ static void wx_enable_rx(struct wx *wx)
static void wx_set_rxpba(struct wx *wx)
{
u32 rxpktsize, txpktsize, txpbthresh;
+ u32 pbsize = wx->mac.rx_pb_size;
- rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT;
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
+ if (test_bit(WX_FLAG_FDIR_HASH, wx->flags) ||
+ test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))
+ pbsize -= 64; /* Default 64KB */
+ }
+
+ rxpktsize = pbsize << WX_RDB_PB_SZ_SHIFT;
wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
/* Only support an equally distributed Tx packet buffer strategy. */
@@ -1261,7 +1268,7 @@ static void wx_configure_port(struct wx *wx)
* Stops the receive data path and waits for the HW to internally empty
* the Rx security block
**/
-static int wx_disable_sec_rx_path(struct wx *wx)
+int wx_disable_sec_rx_path(struct wx *wx)
{
u32 secrx;
@@ -1271,6 +1278,7 @@ static int wx_disable_sec_rx_path(struct wx *wx)
return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY,
1000, 40000, false, wx, WX_RSC_ST);
}
+EXPORT_SYMBOL(wx_disable_sec_rx_path);
/**
* wx_enable_sec_rx_path - Enables the receive data path
@@ -1278,11 +1286,12 @@ static int wx_disable_sec_rx_path(struct wx *wx)
*
* Enables the receive data path.
**/
-static void wx_enable_sec_rx_path(struct wx *wx)
+void wx_enable_sec_rx_path(struct wx *wx)
{
wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
WX_WRITE_FLUSH(wx);
}
+EXPORT_SYMBOL(wx_enable_sec_rx_path);
static void wx_vlan_strip_control(struct wx *wx, bool enable)
{
@@ -1408,7 +1417,7 @@ int wx_change_mtu(struct net_device *netdev, int new_mtu)
{
struct wx *wx = netdev_priv(netdev);
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
wx_set_rx_buffer_len(wx);
return 0;
@@ -1499,6 +1508,13 @@ static void wx_configure_tx_ring(struct wx *wx,
txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT;
txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT;
+ ring->atr_count = 0;
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) &&
+ test_bit(WX_FLAG_FDIR_HASH, wx->flags))
+ ring->atr_sample_rate = wx->atr_sample_rate;
+ else
+ ring->atr_sample_rate = 0;
+
/* reinitialize tx_buffer_info */
memset(ring->tx_buffer_info, 0,
sizeof(struct wx_tx_buffer) * ring->count);
@@ -1732,7 +1748,9 @@ void wx_configure(struct wx *wx)
wx_set_rx_mode(wx->netdev);
wx_restore_vlan(wx);
- wx_enable_sec_rx_path(wx);
+
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
+ wx->configure_fdir(wx);
wx_configure_tx(wx);
wx_configure_rx(wx);
@@ -1958,6 +1976,10 @@ int wx_sw_init(struct wx *wx)
return -ENOMEM;
}
+ bitmap_zero(wx->state, WX_STATE_NBITS);
+ bitmap_zero(wx->flags, WX_PF_FLAGS_NBITS);
+ wx->misc_irq_domain = false;
+
return 0;
}
EXPORT_SYMBOL(wx_sw_init);
@@ -2331,6 +2353,11 @@ void wx_update_stats(struct wx *wx)
hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
+ if (wx->mac.type == wx_mac_sp) {
+ hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH);
+ hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
+ }
+
for (i = 0; i < wx->mac.max_rx_queues; i++)
hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
}
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
index 9e219fa717a2..11fb33349482 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h
@@ -28,6 +28,8 @@ void wx_mac_set_default_filter(struct wx *wx, u8 *addr);
void wx_flush_sw_mac_table(struct wx *wx);
int wx_set_mac(struct net_device *netdev, void *p);
void wx_disable_rx(struct wx *wx);
+int wx_disable_sec_rx_path(struct wx *wx);
+void wx_enable_sec_rx_path(struct wx *wx);
void wx_set_rx_mode(struct net_device *netdev);
int wx_change_mtu(struct net_device *netdev, int new_mtu);
void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
index 6fae161cbcb8..1eecba984f3b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c
@@ -148,10 +148,11 @@ static struct wx_dec_ptype wx_ptype_lookup[256] = {
[0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4),
};
-static struct wx_dec_ptype wx_decode_ptype(const u8 ptype)
+struct wx_dec_ptype wx_decode_ptype(const u8 ptype)
{
return wx_ptype_lookup[ptype];
}
+EXPORT_SYMBOL(wx_decode_ptype);
/* wx_test_staterr - tests bits in Rx descriptor status and error fields */
static __le32 wx_test_staterr(union wx_rx_desc *rx_desc,
@@ -1453,6 +1454,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first,
static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
struct wx_ring *tx_ring)
{
+ struct wx *wx = netdev_priv(tx_ring->netdev);
u16 count = TXD_USE_COUNT(skb_headlen(skb));
struct wx_tx_buffer *first;
u8 hdr_len = 0, ptype;
@@ -1498,6 +1500,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb,
goto out_drop;
else if (!tso)
wx_tx_csum(tx_ring, first, ptype);
+
+ if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate)
+ wx->atr(tx_ring, first, ptype);
+
wx_tx_map(tx_ring, first, hdr_len);
return NETDEV_TX_OK;
@@ -1574,8 +1580,27 @@ static void wx_set_rss_queues(struct wx *wx)
f = &wx->ring_feature[RING_F_RSS];
f->indices = f->limit;
- wx->num_rx_queues = f->limit;
- wx->num_tx_queues = f->limit;
+ if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
+ goto out;
+
+ clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
+
+ /* Use Flow Director in addition to RSS to ensure the best
+ * distribution of flows across cores, even when an FDIR flow
+ * isn't matched.
+ */
+ if (f->indices > 1) {
+ f = &wx->ring_feature[RING_F_FDIR];
+
+ f->indices = f->limit;
+
+ if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
+ set_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ }
+
+out:
+ wx->num_rx_queues = f->indices;
+ wx->num_tx_queues = f->indices;
}
static void wx_set_num_queues(struct wx *wx)
@@ -1674,18 +1699,19 @@ static int wx_set_interrupt_capability(struct wx *wx)
/* minmum one for queue, one for misc*/
nvecs = 1;
nvecs = pci_alloc_irq_vectors(pdev, nvecs,
- nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
+ nvecs, PCI_IRQ_MSI | PCI_IRQ_INTX);
if (nvecs == 1) {
if (pdev->msi_enabled)
wx_err(wx, "Fallback to MSI.\n");
else
- wx_err(wx, "Fallback to LEGACY.\n");
+ wx_err(wx, "Fallback to INTx.\n");
} else {
- wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs);
+ wx_err(wx, "Failed to allocate MSI/INTx interrupts. Error: %d\n", nvecs);
return nvecs;
}
pdev->irq = pci_irq_vector(pdev, 0);
+ wx->num_q_vectors = 1;
return 0;
}
@@ -1996,7 +2022,8 @@ void wx_free_irq(struct wx *wx)
int vector;
if (!(pdev->msix_enabled)) {
- free_irq(pdev->irq, wx);
+ if (!wx->misc_irq_domain)
+ free_irq(pdev->irq, wx);
return;
}
@@ -2011,7 +2038,7 @@ void wx_free_irq(struct wx *wx)
free_irq(entry->vector, q_vector);
}
- if (wx->mac.type == wx_mac_em)
+ if (!wx->misc_irq_domain)
free_irq(wx->msix_entry->vector, wx);
}
EXPORT_SYMBOL(wx_free_irq);
@@ -2026,6 +2053,9 @@ int wx_setup_isb_resources(struct wx *wx)
{
struct pci_dev *pdev = wx->pdev;
+ if (wx->isb_mem)
+ return 0;
+
wx->isb_mem = dma_alloc_coherent(&pdev->dev,
sizeof(u32) * 4,
&wx->isb_dma,
@@ -2127,7 +2157,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector)
* wx_configure_vectors - Configure vectors for hardware
* @wx: board private structure
*
- * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY
+ * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/INTx
* interrupts.
**/
void wx_configure_vectors(struct wx *wx)
@@ -2385,7 +2415,6 @@ static void wx_free_all_tx_resources(struct wx *wx)
void wx_free_resources(struct wx *wx)
{
- wx_free_isb_resources(wx);
wx_free_all_rx_resources(wx);
wx_free_all_tx_resources(wx);
}
@@ -2680,6 +2709,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
{
netdev_features_t changed = netdev->features ^ features;
struct wx *wx = netdev_priv(netdev);
+ bool need_reset = false;
if (features & NETIF_F_RXHASH) {
wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN,
@@ -2690,15 +2720,93 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features)
wx->rss_enabled = false;
}
- if (changed &
- (NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_STAG_RX))
+ netdev->features = features;
+
+ if (wx->mac.type == wx_mac_sp && changed & NETIF_F_HW_VLAN_CTAG_RX)
+ wx->do_reset(netdev);
+ else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER))
wx_set_rx_mode(netdev);
- return 1;
+ if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)))
+ return 0;
+
+ /* Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ switch (features & NETIF_F_NTUPLE) {
+ case NETIF_F_NTUPLE:
+ /* turn off ATR, enable perfect filters and reset */
+ if (!(test_and_set_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
+ need_reset = true;
+
+ clear_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ break;
+ default:
+ /* turn off perfect filters, enable ATR and reset */
+ if (test_and_clear_bit(WX_FLAG_FDIR_PERFECT, wx->flags))
+ need_reset = true;
+
+ /* We cannot enable ATR if RSS is disabled */
+ if (wx->ring_feature[RING_F_RSS].limit <= 1)
+ break;
+
+ set_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ break;
+ }
+
+ if (need_reset)
+ wx->do_reset(netdev);
+
+ return 0;
}
EXPORT_SYMBOL(wx_set_features);
+#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_STAG_RX)
+
+#define NETIF_VLAN_INSERTION_FEATURES (NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_HW_VLAN_STAG_TX)
+
+#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_HW_VLAN_STAG_FILTER)
+
+netdev_features_t wx_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = netdev->features ^ features;
+ struct wx *wx = netdev_priv(netdev);
+
+ if (changed & NETIF_VLAN_STRIPPING_FEATURES) {
+ if ((features & NETIF_VLAN_STRIPPING_FEATURES) != NETIF_VLAN_STRIPPING_FEATURES &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_STRIPPING_FEATURES;
+ features |= netdev->features & NETIF_VLAN_STRIPPING_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN stripping must be either both on or both off.");
+ }
+ }
+
+ if (changed & NETIF_VLAN_INSERTION_FEATURES) {
+ if ((features & NETIF_VLAN_INSERTION_FEATURES) != NETIF_VLAN_INSERTION_FEATURES &&
+ (features & NETIF_VLAN_INSERTION_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_INSERTION_FEATURES;
+ features |= netdev->features & NETIF_VLAN_INSERTION_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN insertion must be either both on or both off.");
+ }
+ }
+
+ if (changed & NETIF_VLAN_FILTERING_FEATURES) {
+ if ((features & NETIF_VLAN_FILTERING_FEATURES) != NETIF_VLAN_FILTERING_FEATURES &&
+ (features & NETIF_VLAN_FILTERING_FEATURES) != 0) {
+ features &= ~NETIF_VLAN_FILTERING_FEATURES;
+ features |= netdev->features & NETIF_VLAN_FILTERING_FEATURES;
+ wx_err(wx, "802.1Q and 802.1ad VLAN filtering must be either both on or both off.");
+ }
+ }
+
+ return features;
+}
+EXPORT_SYMBOL(wx_fix_features);
+
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring)
{
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
index ec909e876720..fdeb0c315b75 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h
@@ -7,6 +7,7 @@
#ifndef _WX_LIB_H_
#define _WX_LIB_H_
+struct wx_dec_ptype wx_decode_ptype(const u8 ptype);
void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count);
u16 wx_desc_unused(struct wx_ring *ring);
netdev_tx_t wx_xmit_frame(struct sk_buff *skb,
@@ -30,6 +31,8 @@ int wx_setup_resources(struct wx *wx);
void wx_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats);
int wx_set_features(struct net_device *netdev, netdev_features_t features);
+netdev_features_t wx_fix_features(struct net_device *netdev,
+ netdev_features_t features);
void wx_set_ring(struct wx *wx, u32 new_tx_count,
u32 new_rx_count, struct wx_ring *temp_ring);
diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h
index 1fdeb464d5f4..1d57b047817b 100644
--- a/drivers/net/ethernet/wangxun/libwx/wx_type.h
+++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h
@@ -157,6 +157,8 @@
#define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21)
#define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22)
#define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23)
+#define WX_RDB_FDIR_MATCH 0x19558
+#define WX_RDB_FDIR_MISS 0x1955C
/******************************* PSR Registers *******************************/
/* psr control */
@@ -503,6 +505,34 @@ enum WX_MSCA_CMD_value {
#define WX_PTYPE_TYP_TCP 0x04
#define WX_PTYPE_TYP_SCTP 0x05
+/* Packet type non-ip values */
+enum wx_l2_ptypes {
+ WX_PTYPE_L2_ABORTED = (WX_PTYPE_PKT_MAC),
+ WX_PTYPE_L2_MAC = (WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC),
+
+ WX_PTYPE_L2_IPV4_FRAG = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IPFRAG),
+ WX_PTYPE_L2_IPV4 = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IP),
+ WX_PTYPE_L2_IPV4_UDP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_UDP),
+ WX_PTYPE_L2_IPV4_TCP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_TCP),
+ WX_PTYPE_L2_IPV4_SCTP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_SCTP),
+ WX_PTYPE_L2_IPV6_FRAG = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
+ WX_PTYPE_TYP_IPFRAG),
+ WX_PTYPE_L2_IPV6 = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
+ WX_PTYPE_TYP_IP),
+ WX_PTYPE_L2_IPV6_UDP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
+ WX_PTYPE_TYP_UDP),
+ WX_PTYPE_L2_IPV6_TCP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
+ WX_PTYPE_TYP_TCP),
+ WX_PTYPE_L2_IPV6_SCTP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 |
+ WX_PTYPE_TYP_SCTP),
+
+ WX_PTYPE_L2_TUN4_MAC = (WX_PTYPE_TUN_IPV4 | WX_PTYPE_PKT_IGM),
+ WX_PTYPE_L2_TUN6_MAC = (WX_PTYPE_TUN_IPV6 | WX_PTYPE_PKT_IGM),
+};
+
+#define WX_PTYPE_PKT(_pt) ((_pt) & 0x30)
+#define WX_PTYPE_TYPL4(_pt) ((_pt) & 0x07)
+
#define WX_RXD_PKTTYPE(_rxd) \
((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF)
#define WX_RXD_IPV6EX(_rxd) \
@@ -552,6 +582,9 @@ enum wx_tx_flags {
WX_TX_FLAGS_OUTER_IPV4 = 0x100,
WX_TX_FLAGS_LINKSEC = 0x200,
WX_TX_FLAGS_IPSEC = 0x400,
+
+ /* software defined flags */
+ WX_TX_FLAGS_SW_VLAN = 0x40,
};
/* VLAN info */
@@ -900,7 +933,13 @@ struct wx_ring {
*/
u16 next_to_use;
u16 next_to_clean;
- u16 next_to_alloc;
+ union {
+ u16 next_to_alloc;
+ struct {
+ u8 atr_sample_rate;
+ u8 atr_count;
+ };
+ };
struct wx_queue_stats stats;
struct u64_stats_sync syncp;
@@ -939,6 +978,7 @@ struct wx_ring_feature {
enum wx_ring_f_enum {
RING_F_NONE = 0,
RING_F_RSS,
+ RING_F_FDIR,
RING_F_ARRAY_SIZE /* must be last in enum set */
};
@@ -980,10 +1020,26 @@ struct wx_hw_stats {
u64 crcerrs;
u64 rlec;
u64 qmprc;
+ u64 fdirmatch;
+ u64 fdirmiss;
+};
+
+enum wx_state {
+ WX_STATE_RESETTING,
+ WX_STATE_NBITS, /* must be last */
+};
+
+enum wx_pf_flags {
+ WX_FLAG_FDIR_CAPABLE,
+ WX_FLAG_FDIR_HASH,
+ WX_FLAG_FDIR_PERFECT,
+ WX_PF_FLAGS_NBITS /* must be last */
};
struct wx {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ DECLARE_BITMAP(state, WX_STATE_NBITS);
+ DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS);
void *priv;
u8 __iomem *hw_addr;
@@ -1053,6 +1109,7 @@ struct wx {
dma_addr_t isb_dma;
u32 *isb_mem;
u32 isb_tag[WX_ISB_MAX];
+ bool misc_irq_domain;
#define WX_MAX_RETA_ENTRIES 128
#define WX_RSS_INDIR_TBL_MAX 64
@@ -1071,6 +1128,11 @@ struct wx {
u64 hw_csum_rx_good;
u64 hw_csum_rx_error;
u64 alloc_rx_buff_failed;
+
+ u32 atr_sample_rate;
+ void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
+ void (*configure_fdir)(struct wx *wx);
+ void (*do_reset)(struct net_device *netdev);
};
#define WX_INTR_ALL (~0ULL)
@@ -1131,4 +1193,19 @@ static inline struct wx *phylink_to_wx(struct phylink_config *config)
return container_of(config, struct wx, phylink_config);
}
+static inline int wx_set_state_reset(struct wx *wx)
+{
+ u8 timeout = 50;
+
+ while (test_and_set_bit(WX_STATE_RESETTING, wx->state)) {
+ timeout--;
+ if (!timeout)
+ return -EBUSY;
+
+ usleep_range(1000, 2000);
+ }
+
+ return 0;
+}
+
#endif /* _WX_TYPE_H_ */
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
index 786a652ae64f..e868f7ef4920 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c
@@ -37,9 +37,9 @@ static int ngbe_set_wol(struct net_device *netdev,
wx->wol = 0;
if (wol->wolopts & WAKE_MAGIC)
wx->wol = WX_PSR_WKUP_CTL_MAG;
- netdev->wol_enabled = !!(wx->wol);
+ netdev->ethtool->wol_enabled = !!(wx->wol);
wr32(wx, WX_PSR_WKUP_CTL, wx->wol);
- device_set_wakeup_enable(&pdev->dev, netdev->wol_enabled);
+ device_set_wakeup_enable(&pdev->dev, netdev->ethtool->wol_enabled);
return 0;
}
@@ -52,7 +52,7 @@ static int ngbe_set_ringparam(struct net_device *netdev,
struct wx *wx = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
struct wx_ring *temp_ring;
- int i;
+ int i, err = 0;
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
@@ -64,6 +64,10 @@ static int ngbe_set_ringparam(struct net_device *netdev,
new_rx_count == wx->rx_ring_count)
return 0;
+ err = wx_set_state_reset(wx);
+ if (err)
+ return err;
+
if (!netif_running(wx->netdev)) {
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count;
@@ -72,14 +76,16 @@ static int ngbe_set_ringparam(struct net_device *netdev,
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
- return 0;
+ goto clear_reset;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
- if (!temp_ring)
- return -ENOMEM;
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
ngbe_down(wx);
@@ -89,7 +95,9 @@ static int ngbe_set_ringparam(struct net_device *netdev,
wx_configure(wx);
ngbe_up(wx);
- return 0;
+clear_reset:
+ clear_bit(WX_STATE_RESETTING, wx->state);
+ return err;
}
static int ngbe_set_channels(struct net_device *dev,
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
index fdd6b4f70b7a..53aeae2f884b 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c
@@ -387,6 +387,7 @@ err_dis_phy:
err_free_irq:
wx_free_irq(wx);
err_free_resources:
+ wx_free_isb_resources(wx);
wx_free_resources(wx);
return err;
}
@@ -408,6 +409,7 @@ static int ngbe_close(struct net_device *netdev)
ngbe_down(wx);
wx_free_irq(wx);
+ wx_free_isb_resources(wx);
wx_free_resources(wx);
phylink_disconnect_phy(wx->phylink);
wx_control_hw(wx, false);
@@ -499,6 +501,7 @@ static const struct net_device_ops ngbe_netdev_ops = {
.ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
+ .ndo_fix_features = wx_fix_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
@@ -649,7 +652,7 @@ static int ngbe_probe(struct pci_dev *pdev,
if (wx->wol_hw_supported)
wx->wol = NGBE_PSR_WKUP_CTL_MAG;
- netdev->wol_enabled = !!(wx->wol);
+ netdev->ethtool->wol_enabled = !!(wx->wol);
wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol);
device_set_wakeup_enable(&pdev->dev, wx->wol);
diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
index ec54b18c5fe7..a5e9b779c44d 100644
--- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
+++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c
@@ -124,8 +124,12 @@ static int ngbe_phylink_init(struct wx *wx)
MAC_SYM_PAUSE | MAC_ASYM_PAUSE;
config->mac_managed_pm = true;
- phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
- __set_bit(PHY_INTERFACE_MODE_RGMII_ID, config->supported_interfaces);
+ /* The MAC only has add the Tx delay and it can not be modified.
+ * So just disable TX delay in PHY, and it is does not matter to
+ * internal phy.
+ */
+ phy_mode = PHY_INTERFACE_MODE_RGMII_RXID;
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, config->supported_interfaces);
phylink = phylink_create(config, NULL, phy_mode, &ngbe_mac_ops);
if (IS_ERR(phylink))
diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile
index 42718875277c..f74576fe7062 100644
--- a/drivers/net/ethernet/wangxun/txgbe/Makefile
+++ b/drivers/net/ethernet/wangxun/txgbe/Makefile
@@ -10,4 +10,5 @@ txgbe-objs := txgbe_main.o \
txgbe_hw.o \
txgbe_phy.o \
txgbe_irq.o \
+ txgbe_fdir.o \
txgbe_ethtool.o
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
index db675512ce4d..d98314b26c19 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c
@@ -9,6 +9,7 @@
#include "../libwx/wx_type.h"
#include "../libwx/wx_lib.h"
#include "txgbe_type.h"
+#include "txgbe_fdir.h"
#include "txgbe_ethtool.h"
static int txgbe_set_ringparam(struct net_device *netdev,
@@ -19,7 +20,7 @@ static int txgbe_set_ringparam(struct net_device *netdev,
struct wx *wx = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
struct wx_ring *temp_ring;
- int i;
+ int i, err = 0;
new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE);
@@ -31,6 +32,10 @@ static int txgbe_set_ringparam(struct net_device *netdev,
new_rx_count == wx->rx_ring_count)
return 0;
+ err = wx_set_state_reset(wx);
+ if (err)
+ return err;
+
if (!netif_running(wx->netdev)) {
for (i = 0; i < wx->num_tx_queues; i++)
wx->tx_ring[i]->count = new_tx_count;
@@ -39,14 +44,16 @@ static int txgbe_set_ringparam(struct net_device *netdev,
wx->tx_ring_count = new_tx_count;
wx->rx_ring_count = new_rx_count;
- return 0;
+ goto clear_reset;
}
/* allocate temporary buffer to store rings in */
i = max_t(int, wx->num_tx_queues, wx->num_rx_queues);
temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL);
- if (!temp_ring)
- return -ENOMEM;
+ if (!temp_ring) {
+ err = -ENOMEM;
+ goto clear_reset;
+ }
txgbe_down(wx);
@@ -55,7 +62,9 @@ static int txgbe_set_ringparam(struct net_device *netdev,
txgbe_up(wx);
- return 0;
+clear_reset:
+ clear_bit(WX_STATE_RESETTING, wx->state);
+ return err;
}
static int txgbe_set_channels(struct net_device *dev,
@@ -71,6 +80,430 @@ static int txgbe_set_channels(struct net_device *dev,
return txgbe_setup_tc(dev, netdev_get_num_tc(dev));
}
+static int txgbe_get_ethtool_fdir_entry(struct txgbe *txgbe,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ union txgbe_atr_input *mask = &txgbe->fdir_mask;
+ struct txgbe_fdir_filter *rule = NULL;
+ struct hlist_node *node;
+
+ /* report total rule count */
+ cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2;
+
+ hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
+ fdir_node) {
+ if (fsp->location <= rule->sw_idx)
+ break;
+ }
+
+ if (!rule || fsp->location != rule->sw_idx)
+ return -EINVAL;
+
+ /* set flow type field */
+ switch (rule->filter.formatted.flow_type) {
+ case TXGBE_ATR_FLOW_TYPE_TCPV4:
+ fsp->flow_type = TCP_V4_FLOW;
+ break;
+ case TXGBE_ATR_FLOW_TYPE_UDPV4:
+ fsp->flow_type = UDP_V4_FLOW;
+ break;
+ case TXGBE_ATR_FLOW_TYPE_SCTPV4:
+ fsp->flow_type = SCTP_V4_FLOW;
+ break;
+ case TXGBE_ATR_FLOW_TYPE_IPV4:
+ fsp->flow_type = IP_USER_FLOW;
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port;
+ fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port;
+ fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0];
+ fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0];
+ fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes;
+ fsp->m_ext.vlan_etype = mask->formatted.flex_bytes;
+ fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool);
+ fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool);
+ fsp->flow_type |= FLOW_EXT;
+
+ /* record action */
+ if (rule->action == TXGBE_RDB_FDIR_DROP_QUEUE)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->action;
+
+ return 0;
+}
+
+static int txgbe_get_ethtool_fdir_all(struct txgbe *txgbe,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct txgbe_fdir_filter *rule;
+ struct hlist_node *node;
+ int cnt = 0;
+
+ /* report total rule count */
+ cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2;
+
+ hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
+ fdir_node) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+ rule_locs[cnt] = rule->sw_idx;
+ cnt++;
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct wx *wx = netdev_priv(dev);
+ struct txgbe *txgbe = wx->priv;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = wx->num_rx_queues;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = txgbe->fdir_filter_count;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = txgbe_get_ethtool_fdir_entry(txgbe, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = txgbe_get_ethtool_fdir_all(txgbe, cmd, (u32 *)rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp,
+ u8 *flow_type)
+{
+ switch (fsp->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case UDP_V4_FLOW:
+ *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case SCTP_V4_FLOW:
+ *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case IP_USER_FLOW:
+ switch (fsp->h_u.usr_ip4_spec.proto) {
+ case IPPROTO_TCP:
+ *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
+ break;
+ case IPPROTO_UDP:
+ *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4;
+ break;
+ case IPPROTO_SCTP:
+ *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4;
+ break;
+ case 0:
+ if (!fsp->m_u.usr_ip4_spec.proto) {
+ *flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
+ break;
+ }
+ fallthrough;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool txgbe_match_ethtool_fdir_entry(struct txgbe *txgbe,
+ struct txgbe_fdir_filter *input)
+{
+ struct txgbe_fdir_filter *rule = NULL;
+ struct hlist_node *node2;
+
+ hlist_for_each_entry_safe(rule, node2, &txgbe->fdir_filter_list,
+ fdir_node) {
+ if (rule->filter.formatted.bkt_hash ==
+ input->filter.formatted.bkt_hash &&
+ rule->action == input->action) {
+ wx_dbg(txgbe->wx, "FDIR entry already exist\n");
+ return true;
+ }
+ }
+ return false;
+}
+
+static int txgbe_update_ethtool_fdir_entry(struct txgbe *txgbe,
+ struct txgbe_fdir_filter *input,
+ u16 sw_idx)
+{
+ struct hlist_node *node = NULL, *parent = NULL;
+ struct txgbe_fdir_filter *rule;
+ struct wx *wx = txgbe->wx;
+ bool deleted = false;
+ int err;
+
+ hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list,
+ fdir_node) {
+ /* hash found, or no matching entry */
+ if (rule->sw_idx >= sw_idx)
+ break;
+ parent = node;
+ }
+
+ /* if there is an old rule occupying our place remove it */
+ if (rule && rule->sw_idx == sw_idx) {
+ /* hardware filters are only configured when interface is up,
+ * and we should not issue filter commands while the interface
+ * is down
+ */
+ if (netif_running(wx->netdev) &&
+ (!input || rule->filter.formatted.bkt_hash !=
+ input->filter.formatted.bkt_hash)) {
+ err = txgbe_fdir_erase_perfect_filter(wx,
+ &rule->filter,
+ sw_idx);
+ if (err)
+ return -EINVAL;
+ }
+
+ hlist_del(&rule->fdir_node);
+ kfree(rule);
+ txgbe->fdir_filter_count--;
+ deleted = true;
+ }
+
+ /* If we weren't given an input, then this was a request to delete a
+ * filter. We should return -EINVAL if the filter wasn't found, but
+ * return 0 if the rule was successfully deleted.
+ */
+ if (!input)
+ return deleted ? 0 : -EINVAL;
+
+ /* initialize node and set software index */
+ INIT_HLIST_NODE(&input->fdir_node);
+
+ /* add filter to the list */
+ if (parent)
+ hlist_add_behind(&input->fdir_node, parent);
+ else
+ hlist_add_head(&input->fdir_node,
+ &txgbe->fdir_filter_list);
+
+ /* update counts */
+ txgbe->fdir_filter_count++;
+
+ return 0;
+}
+
+static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct txgbe_fdir_filter *input;
+ union txgbe_atr_input mask;
+ struct wx *wx = txgbe->wx;
+ int err = -EINVAL;
+ u16 ptype = 0;
+ u8 queue;
+
+ if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)))
+ return -EOPNOTSUPP;
+
+ /* ring_cookie is a masked into a set of queues and txgbe pools or
+ * we use drop index
+ */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ queue = TXGBE_RDB_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+
+ if (ring >= wx->num_rx_queues)
+ return -EINVAL;
+
+ /* Map the ring onto the absolute queue index */
+ queue = wx->rx_ring[ring]->reg_idx;
+ }
+
+ /* Don't allow indexes to exist outside of available space */
+ if (fsp->location >= ((1024 << TXGBE_FDIR_PBALLOC_64K) - 2)) {
+ wx_err(wx, "Location out of range\n");
+ return -EINVAL;
+ }
+
+ input = kzalloc(sizeof(*input), GFP_ATOMIC);
+ if (!input)
+ return -ENOMEM;
+
+ memset(&mask, 0, sizeof(union txgbe_atr_input));
+
+ /* set SW index */
+ input->sw_idx = fsp->location;
+
+ /* record flow type */
+ if (txgbe_flowspec_to_flow_type(fsp,
+ &input->filter.formatted.flow_type)) {
+ wx_err(wx, "Unrecognized flow type\n");
+ goto err_out;
+ }
+
+ mask.formatted.flow_type = TXGBE_ATR_L4TYPE_IPV6_MASK |
+ TXGBE_ATR_L4TYPE_MASK;
+
+ if (input->filter.formatted.flow_type == TXGBE_ATR_FLOW_TYPE_IPV4)
+ mask.formatted.flow_type &= TXGBE_ATR_L4TYPE_IPV6_MASK;
+
+ /* Copy input into formatted structures */
+ input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src;
+ input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+ mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst;
+ input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc;
+ mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc;
+ input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst;
+ mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst;
+
+ if (fsp->flow_type & FLOW_EXT) {
+ input->filter.formatted.vm_pool =
+ (unsigned char)ntohl(fsp->h_ext.data[1]);
+ mask.formatted.vm_pool =
+ (unsigned char)ntohl(fsp->m_ext.data[1]);
+ input->filter.formatted.flex_bytes =
+ fsp->h_ext.vlan_etype;
+ mask.formatted.flex_bytes = fsp->m_ext.vlan_etype;
+ }
+
+ switch (input->filter.formatted.flow_type) {
+ case TXGBE_ATR_FLOW_TYPE_TCPV4:
+ ptype = WX_PTYPE_L2_IPV4_TCP;
+ break;
+ case TXGBE_ATR_FLOW_TYPE_UDPV4:
+ ptype = WX_PTYPE_L2_IPV4_UDP;
+ break;
+ case TXGBE_ATR_FLOW_TYPE_SCTPV4:
+ ptype = WX_PTYPE_L2_IPV4_SCTP;
+ break;
+ case TXGBE_ATR_FLOW_TYPE_IPV4:
+ ptype = WX_PTYPE_L2_IPV4;
+ break;
+ default:
+ break;
+ }
+
+ input->filter.formatted.vlan_id = htons(ptype);
+ if (mask.formatted.flow_type & TXGBE_ATR_L4TYPE_MASK)
+ mask.formatted.vlan_id = htons(0xFFFF);
+ else
+ mask.formatted.vlan_id = htons(0xFFF8);
+
+ /* determine if we need to drop or route the packet */
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC)
+ input->action = TXGBE_RDB_FDIR_DROP_QUEUE;
+ else
+ input->action = fsp->ring_cookie;
+
+ spin_lock(&txgbe->fdir_perfect_lock);
+
+ if (hlist_empty(&txgbe->fdir_filter_list)) {
+ /* save mask and program input mask into HW */
+ memcpy(&txgbe->fdir_mask, &mask, sizeof(mask));
+ err = txgbe_fdir_set_input_mask(wx, &mask);
+ if (err)
+ goto err_unlock;
+ } else if (memcmp(&txgbe->fdir_mask, &mask, sizeof(mask))) {
+ wx_err(wx, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n");
+ goto err_unlock;
+ }
+
+ /* apply mask and compute/store hash */
+ txgbe_atr_compute_perfect_hash(&input->filter, &mask);
+
+ /* check if new entry does not exist on filter list */
+ if (txgbe_match_ethtool_fdir_entry(txgbe, input))
+ goto err_unlock;
+
+ /* only program filters to hardware if the net device is running, as
+ * we store the filters in the Rx buffer which is not allocated when
+ * the device is down
+ */
+ if (netif_running(wx->netdev)) {
+ err = txgbe_fdir_write_perfect_filter(wx, &input->filter,
+ input->sw_idx, queue);
+ if (err)
+ goto err_unlock;
+ }
+
+ txgbe_update_ethtool_fdir_entry(txgbe, input, input->sw_idx);
+
+ spin_unlock(&txgbe->fdir_perfect_lock);
+
+ return 0;
+err_unlock:
+ spin_unlock(&txgbe->fdir_perfect_lock);
+err_out:
+ kfree(input);
+ return err;
+}
+
+static int txgbe_del_ethtool_fdir_entry(struct txgbe *txgbe,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ int err = 0;
+
+ spin_lock(&txgbe->fdir_perfect_lock);
+ err = txgbe_update_ethtool_fdir_entry(txgbe, NULL, fsp->location);
+ spin_unlock(&txgbe->fdir_perfect_lock);
+
+ return err;
+}
+
+static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct wx *wx = netdev_priv(dev);
+ struct txgbe *txgbe = wx->priv;
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = txgbe_add_ethtool_fdir_entry(txgbe, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = txgbe_del_ethtool_fdir_entry(txgbe, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
static const struct ethtool_ops txgbe_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ,
@@ -92,6 +525,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = {
.set_coalesce = wx_set_coalesce,
.get_channels = wx_get_channels,
.set_channels = txgbe_set_channels,
+ .get_rxnfc = txgbe_get_rxnfc,
+ .set_rxnfc = txgbe_set_rxnfc,
.get_msglevel = wx_get_msglevel,
.set_msglevel = wx_set_msglevel,
};
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
new file mode 100644
index 000000000000..ef50efbaec0f
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c
@@ -0,0 +1,643 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+
+#include "../libwx/wx_type.h"
+#include "../libwx/wx_lib.h"
+#include "../libwx/wx_hw.h"
+#include "txgbe_type.h"
+#include "txgbe_fdir.h"
+
+/* These defines allow us to quickly generate all of the necessary instructions
+ * in the function below by simply calling out TXGBE_COMPUTE_SIG_HASH_ITERATION
+ * for values 0 through 15
+ */
+#define TXGBE_ATR_COMMON_HASH_KEY \
+ (TXGBE_ATR_BUCKET_HASH_KEY & TXGBE_ATR_SIGNATURE_HASH_KEY)
+#define TXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ common_hash ^= lo_hash_dword >> n; \
+ else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ sig_hash ^= lo_hash_dword << (16 - n); \
+ if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ common_hash ^= hi_hash_dword >> n; \
+ else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+ else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ sig_hash ^= hi_hash_dword << (16 - n); \
+} while (0)
+
+/**
+ * txgbe_atr_compute_sig_hash - Compute the signature hash
+ * @input: input bitstream to compute the hash on
+ * @common: compressed common input dword
+ * @hash: pointer to the computed hash
+ *
+ * This function is almost identical to the function above but contains
+ * several optimizations such as unwinding all of the loops, letting the
+ * compiler work out all of the conditional ifs since the keys are static
+ * defines, and computing two keys at once since the hashed dword stream
+ * will be the same for both keys.
+ **/
+static void txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input,
+ union txgbe_atr_hash_dword common,
+ u32 *hash)
+{
+ u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 i;
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(input.dword);
+
+ /* generate common hash dword */
+ hi_hash_dword = ntohl(common.dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(0);
+
+ /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the VLAN until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ for (i = 1; i <= 15; i++)
+ TXGBE_COMPUTE_SIG_HASH_ITERATION(i);
+
+ /* combine common_hash result with signature and bucket hashes */
+ bucket_hash ^= common_hash;
+ bucket_hash &= TXGBE_ATR_HASH_MASK;
+
+ sig_hash ^= common_hash << 16;
+ sig_hash &= TXGBE_ATR_HASH_MASK << 16;
+
+ /* return completed signature hash */
+ *hash = sig_hash ^ bucket_hash;
+}
+
+#define TXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
+do { \
+ u32 n = (_n); \
+ if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ bucket_hash ^= lo_hash_dword >> n; \
+ if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ bucket_hash ^= hi_hash_dword >> n; \
+} while (0)
+
+/**
+ * txgbe_atr_compute_perfect_hash - Compute the perfect filter hash
+ * @input: input bitstream to compute the hash on
+ * @input_mask: mask for the input bitstream
+ *
+ * This function serves two main purposes. First it applies the input_mask
+ * to the atr_input resulting in a cleaned up atr_input data stream.
+ * Secondly it computes the hash and stores it in the bkt_hash field at
+ * the end of the input byte stream. This way it will be available for
+ * future use without needing to recompute the hash.
+ **/
+void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input,
+ union txgbe_atr_input *input_mask)
+{
+ u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
+ u32 bucket_hash = 0;
+ __be32 hi_dword = 0;
+ u32 i = 0;
+
+ /* Apply masks to input data */
+ for (i = 0; i < 11; i++)
+ input->dword_stream[i] &= input_mask->dword_stream[i];
+
+ /* record the flow_vm_vlan bits as they are a key part to the hash */
+ flow_vm_vlan = ntohl(input->dword_stream[0]);
+
+ /* generate common hash dword */
+ for (i = 1; i <= 10; i++)
+ hi_dword ^= input->dword_stream[i];
+ hi_hash_dword = ntohl(hi_dword);
+
+ /* low dword is word swapped version of common */
+ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
+
+ /* apply flow ID/VM pool/VLAN ID bits to hash words */
+ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
+
+ /* Process bits 0 and 16 */
+ TXGBE_COMPUTE_BKT_HASH_ITERATION(0);
+
+ /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
+ * delay this because bit 0 of the stream should not be processed
+ * so we do not add the VLAN until after bit 0 was processed
+ */
+ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
+
+ /* Process remaining 30 bit of the key */
+ for (i = 1; i <= 15; i++)
+ TXGBE_COMPUTE_BKT_HASH_ITERATION(i);
+
+ /* Limit hash to 13 bits since max bucket count is 8K.
+ * Store result at the end of the input stream.
+ */
+ input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF);
+}
+
+static int txgbe_fdir_check_cmd_complete(struct wx *wx)
+{
+ u32 val;
+
+ return read_poll_timeout_atomic(rd32, val,
+ !(val & TXGBE_RDB_FDIR_CMD_CMD_MASK),
+ 10, 100, false,
+ wx, TXGBE_RDB_FDIR_CMD);
+}
+
+/**
+ * txgbe_fdir_add_signature_filter - Adds a signature hash filter
+ * @wx: pointer to hardware structure
+ * @input: unique input dword
+ * @common: compressed common input dword
+ * @queue: queue index to direct traffic to
+ *
+ * @return: 0 on success and negative on failure
+ **/
+static int txgbe_fdir_add_signature_filter(struct wx *wx,
+ union txgbe_atr_hash_dword input,
+ union txgbe_atr_hash_dword common,
+ u8 queue)
+{
+ u32 fdirhashcmd, fdircmd;
+ u8 flow_type;
+ int err;
+
+ /* Get the flow_type in order to program FDIRCMD properly
+ * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
+ * fifth is FDIRCMD.TUNNEL_FILTER
+ */
+ flow_type = input.formatted.flow_type;
+ switch (flow_type) {
+ case TXGBE_ATR_FLOW_TYPE_TCPV4:
+ case TXGBE_ATR_FLOW_TYPE_UDPV4:
+ case TXGBE_ATR_FLOW_TYPE_SCTPV4:
+ case TXGBE_ATR_FLOW_TYPE_TCPV6:
+ case TXGBE_ATR_FLOW_TYPE_UDPV6:
+ case TXGBE_ATR_FLOW_TYPE_SCTPV6:
+ break;
+ default:
+ wx_err(wx, "Error on flow type input\n");
+ return -EINVAL;
+ }
+
+ /* configure FDIRCMD register */
+ fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW |
+ TXGBE_RDB_FDIR_CMD_FILTER_UPDATE |
+ TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN;
+ fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(flow_type);
+ fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue);
+
+ txgbe_atr_compute_sig_hash(input, common, &fdirhashcmd);
+ fdirhashcmd |= TXGBE_RDB_FDIR_HASH_BUCKET_VALID;
+ wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhashcmd);
+ wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd);
+
+ wx_dbg(wx, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
+
+ err = txgbe_fdir_check_cmd_complete(wx);
+ if (err)
+ wx_err(wx, "Flow Director command did not complete!\n");
+
+ return err;
+}
+
+void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype)
+{
+ union txgbe_atr_hash_dword common = { .dword = 0 };
+ union txgbe_atr_hash_dword input = { .dword = 0 };
+ struct wx_q_vector *q_vector = ring->q_vector;
+ struct wx_dec_ptype dptype;
+ union network_header {
+ struct ipv6hdr *ipv6;
+ struct iphdr *ipv4;
+ void *raw;
+ } hdr;
+ struct tcphdr *th;
+
+ /* if ring doesn't have a interrupt vector, cannot perform ATR */
+ if (!q_vector)
+ return;
+
+ ring->atr_count++;
+ dptype = wx_decode_ptype(ptype);
+ if (dptype.etype) {
+ if (WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP)
+ return;
+ hdr.raw = (void *)skb_inner_network_header(first->skb);
+ th = inner_tcp_hdr(first->skb);
+ } else {
+ if (WX_PTYPE_PKT(ptype) != WX_PTYPE_PKT_IP ||
+ WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP)
+ return;
+ hdr.raw = (void *)skb_network_header(first->skb);
+ th = tcp_hdr(first->skb);
+ }
+
+ /* skip this packet since it is invalid or the socket is closing */
+ if (!th || th->fin)
+ return;
+
+ /* sample on all syn packets or once every atr sample count */
+ if (!th->syn && ring->atr_count < ring->atr_sample_rate)
+ return;
+
+ /* reset sample count */
+ ring->atr_count = 0;
+
+ /* src and dst are inverted, think how the receiver sees them
+ *
+ * The input is broken into two sections, a non-compressed section
+ * containing vm_pool, vlan_id, and flow_type. The rest of the data
+ * is XORed together and stored in the compressed dword.
+ */
+ input.formatted.vlan_id = htons((u16)ptype);
+
+ /* since src port and flex bytes occupy the same word XOR them together
+ * and write the value to source port portion of compressed dword
+ */
+ if (first->tx_flags & WX_TX_FLAGS_SW_VLAN)
+ common.port.src ^= th->dest ^ first->skb->protocol;
+ else if (first->tx_flags & WX_TX_FLAGS_HW_VLAN)
+ common.port.src ^= th->dest ^ first->skb->vlan_proto;
+ else
+ common.port.src ^= th->dest ^ first->protocol;
+ common.port.dst ^= th->source;
+
+ if (WX_PTYPE_PKT_IPV6 & WX_PTYPE_PKT(ptype)) {
+ input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV6;
+ common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
+ hdr.ipv6->saddr.s6_addr32[1] ^
+ hdr.ipv6->saddr.s6_addr32[2] ^
+ hdr.ipv6->saddr.s6_addr32[3] ^
+ hdr.ipv6->daddr.s6_addr32[0] ^
+ hdr.ipv6->daddr.s6_addr32[1] ^
+ hdr.ipv6->daddr.s6_addr32[2] ^
+ hdr.ipv6->daddr.s6_addr32[3];
+ } else {
+ input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4;
+ common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
+ }
+
+ /* This assumes the Rx queue and Tx queue are bound to the same CPU */
+ txgbe_fdir_add_signature_filter(q_vector->wx, input, common,
+ ring->queue_index);
+}
+
+int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask)
+{
+ u32 fdirm = 0, fdirtcpm = 0, flex = 0;
+
+ /* Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+
+ /* verify bucket hash is cleared on hash generation */
+ if (input_mask->formatted.bkt_hash)
+ wx_dbg(wx, "bucket hash should always be 0 in mask\n");
+
+ /* Program FDIRM and verify partial masks */
+ switch (input_mask->formatted.vm_pool & 0x7F) {
+ case 0x0:
+ fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_POOL;
+ break;
+ case 0x7F:
+ break;
+ default:
+ wx_err(wx, "Error on vm pool mask\n");
+ return -EINVAL;
+ }
+
+ switch (input_mask->formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) {
+ case 0x0:
+ fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_L4P;
+ if (input_mask->formatted.dst_port ||
+ input_mask->formatted.src_port) {
+ wx_err(wx, "Error on src/dst port mask\n");
+ return -EINVAL;
+ }
+ break;
+ case TXGBE_ATR_L4TYPE_MASK:
+ break;
+ default:
+ wx_err(wx, "Error on flow type mask\n");
+ return -EINVAL;
+ }
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ wr32(wx, TXGBE_RDB_FDIR_OTHER_MSK, fdirm);
+
+ flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
+ flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
+ flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
+ TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
+
+ switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) {
+ case 0x0000:
+ /* Mask Flex Bytes */
+ flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK;
+ break;
+ case 0xFFFF:
+ break;
+ default:
+ wx_err(wx, "Error on flexible byte mask\n");
+ return -EINVAL;
+ }
+ wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
+
+ /* store the TCP/UDP port masks, bit reversed from port layout */
+ fdirtcpm = ntohs(input_mask->formatted.dst_port);
+ fdirtcpm <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT;
+ fdirtcpm |= ntohs(input_mask->formatted.src_port);
+
+ /* write both the same so that UDP and TCP use the same mask */
+ wr32(wx, TXGBE_RDB_FDIR_TCP_MSK, ~fdirtcpm);
+ wr32(wx, TXGBE_RDB_FDIR_UDP_MSK, ~fdirtcpm);
+ wr32(wx, TXGBE_RDB_FDIR_SCTP_MSK, ~fdirtcpm);
+
+ /* store source and destination IP masks (little-enian) */
+ wr32(wx, TXGBE_RDB_FDIR_SA4_MSK,
+ ntohl(~input_mask->formatted.src_ip[0]));
+ wr32(wx, TXGBE_RDB_FDIR_DA4_MSK,
+ ntohl(~input_mask->formatted.dst_ip[0]));
+
+ return 0;
+}
+
+int txgbe_fdir_write_perfect_filter(struct wx *wx,
+ union txgbe_atr_input *input,
+ u16 soft_id, u8 queue)
+{
+ u32 fdirport, fdirvlan, fdirhash, fdircmd;
+ int err = 0;
+
+ /* currently IPv6 is not supported, must be programmed with 0 */
+ wr32(wx, TXGBE_RDB_FDIR_IP6(2), ntohl(input->formatted.src_ip[0]));
+ wr32(wx, TXGBE_RDB_FDIR_IP6(1), ntohl(input->formatted.src_ip[1]));
+ wr32(wx, TXGBE_RDB_FDIR_IP6(0), ntohl(input->formatted.src_ip[2]));
+
+ /* record the source address (little-endian) */
+ wr32(wx, TXGBE_RDB_FDIR_SA, ntohl(input->formatted.src_ip[0]));
+
+ /* record the first 32 bits of the destination address
+ * (little-endian)
+ */
+ wr32(wx, TXGBE_RDB_FDIR_DA, ntohl(input->formatted.dst_ip[0]));
+
+ /* record source and destination port (little-endian)*/
+ fdirport = ntohs(input->formatted.dst_port);
+ fdirport <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT;
+ fdirport |= ntohs(input->formatted.src_port);
+ wr32(wx, TXGBE_RDB_FDIR_PORT, fdirport);
+
+ /* record packet type and flex_bytes (little-endian) */
+ fdirvlan = ntohs(input->formatted.flex_bytes);
+ fdirvlan <<= TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT;
+ fdirvlan |= ntohs(input->formatted.vlan_id);
+ wr32(wx, TXGBE_RDB_FDIR_FLEX, fdirvlan);
+
+ /* configure FDIRHASH register */
+ fdirhash = (__force u32)input->formatted.bkt_hash |
+ TXGBE_RDB_FDIR_HASH_BUCKET_VALID |
+ TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id);
+ wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash);
+
+ /* flush all previous writes to make certain registers are
+ * programmed prior to issuing the command
+ */
+ WX_WRITE_FLUSH(wx);
+
+ /* configure FDIRCMD register */
+ fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW |
+ TXGBE_RDB_FDIR_CMD_FILTER_UPDATE |
+ TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN;
+ if (queue == TXGBE_RDB_FDIR_DROP_QUEUE)
+ fdircmd |= TXGBE_RDB_FDIR_CMD_DROP;
+ fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(input->formatted.flow_type);
+ fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue);
+ fdircmd |= TXGBE_RDB_FDIR_CMD_VT_POOL(input->formatted.vm_pool);
+
+ wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd);
+ err = txgbe_fdir_check_cmd_complete(wx);
+ if (err)
+ wx_err(wx, "Flow Director command did not complete!\n");
+
+ return err;
+}
+
+int txgbe_fdir_erase_perfect_filter(struct wx *wx,
+ union txgbe_atr_input *input,
+ u16 soft_id)
+{
+ u32 fdirhash, fdircmd;
+ int err = 0;
+
+ /* configure FDIRHASH register */
+ fdirhash = (__force u32)input->formatted.bkt_hash;
+ fdirhash |= TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id);
+ wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash);
+
+ /* flush hash to HW */
+ WX_WRITE_FLUSH(wx);
+
+ /* Query if filter is present */
+ wr32(wx, TXGBE_RDB_FDIR_CMD, TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT);
+
+ err = txgbe_fdir_check_cmd_complete(wx);
+ if (err) {
+ wx_err(wx, "Flow Director command did not complete!\n");
+ return err;
+ }
+
+ fdircmd = rd32(wx, TXGBE_RDB_FDIR_CMD);
+ /* if filter exists in hardware then remove it */
+ if (fdircmd & TXGBE_RDB_FDIR_CMD_FILTER_VALID) {
+ wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash);
+ WX_WRITE_FLUSH(wx);
+ wr32(wx, TXGBE_RDB_FDIR_CMD,
+ TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW);
+ }
+
+ return 0;
+}
+
+/**
+ * txgbe_fdir_enable - Initialize Flow Director control registers
+ * @wx: pointer to hardware structure
+ * @fdirctrl: value to write to flow director control register
+ **/
+static void txgbe_fdir_enable(struct wx *wx, u32 fdirctrl)
+{
+ u32 val;
+ int ret;
+
+ /* Prime the keys for hashing */
+ wr32(wx, TXGBE_RDB_FDIR_HKEY, TXGBE_ATR_BUCKET_HASH_KEY);
+ wr32(wx, TXGBE_RDB_FDIR_SKEY, TXGBE_ATR_SIGNATURE_HASH_KEY);
+
+ wr32(wx, TXGBE_RDB_FDIR_CTL, fdirctrl);
+ WX_WRITE_FLUSH(wx);
+ ret = read_poll_timeout(rd32, val, val & TXGBE_RDB_FDIR_CTL_INIT_DONE,
+ 1000, 10000, false, wx, TXGBE_RDB_FDIR_CTL);
+
+ if (ret < 0)
+ wx_dbg(wx, "Flow Director poll time exceeded!\n");
+}
+
+/**
+ * txgbe_init_fdir_signature -Initialize Flow Director sig filters
+ * @wx: pointer to hardware structure
+ **/
+static void txgbe_init_fdir_signature(struct wx *wx)
+{
+ u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K;
+ u32 flex = 0;
+
+ flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0));
+ flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0;
+
+ flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC |
+ TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6));
+ wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex);
+
+ /* Continue setup of fdirctrl register bits:
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 filters are left
+ */
+ fdirctrl |= TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) |
+ TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) |
+ TXGBE_RDB_FDIR_CTL_FULL_THRESH(4);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ txgbe_fdir_enable(wx, fdirctrl);
+}
+
+/**
+ * txgbe_init_fdir_perfect - Initialize Flow Director perfect filters
+ * @wx: pointer to hardware structure
+ **/
+static void txgbe_init_fdir_perfect(struct wx *wx)
+{
+ u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K;
+
+ /* Continue setup of fdirctrl register bits:
+ * Turn perfect match filtering on
+ * Report hash in RSS field of Rx wb descriptor
+ * Initialize the drop queue
+ * Move the flexible bytes to use the ethertype - shift 6 words
+ * Set the maximum length per hash bucket to 0xA filters
+ * Send interrupt when 64 (0x4 * 16) filters are left
+ */
+ fdirctrl |= TXGBE_RDB_FDIR_CTL_PERFECT_MATCH |
+ TXGBE_RDB_FDIR_CTL_DROP_Q(TXGBE_RDB_FDIR_DROP_QUEUE) |
+ TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) |
+ TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) |
+ TXGBE_RDB_FDIR_CTL_FULL_THRESH(4);
+
+ /* write hashes and fdirctrl register, poll for completion */
+ txgbe_fdir_enable(wx, fdirctrl);
+}
+
+static void txgbe_fdir_filter_restore(struct wx *wx)
+{
+ struct txgbe_fdir_filter *filter;
+ struct txgbe *txgbe = wx->priv;
+ struct hlist_node *node;
+ u8 queue = 0;
+ int ret = 0;
+
+ spin_lock(&txgbe->fdir_perfect_lock);
+
+ if (!hlist_empty(&txgbe->fdir_filter_list))
+ ret = txgbe_fdir_set_input_mask(wx, &txgbe->fdir_mask);
+
+ if (ret)
+ goto unlock;
+
+ hlist_for_each_entry_safe(filter, node,
+ &txgbe->fdir_filter_list, fdir_node) {
+ if (filter->action == TXGBE_RDB_FDIR_DROP_QUEUE) {
+ queue = TXGBE_RDB_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(filter->action);
+
+ if (ring >= wx->num_rx_queues) {
+ wx_err(wx, "FDIR restore failed, ring:%u\n",
+ ring);
+ continue;
+ }
+
+ /* Map the ring onto the absolute queue index */
+ queue = wx->rx_ring[ring]->reg_idx;
+ }
+
+ ret = txgbe_fdir_write_perfect_filter(wx,
+ &filter->filter,
+ filter->sw_idx,
+ queue);
+ if (ret)
+ wx_err(wx, "FDIR restore failed, index:%u\n",
+ filter->sw_idx);
+ }
+
+unlock:
+ spin_unlock(&txgbe->fdir_perfect_lock);
+}
+
+void txgbe_configure_fdir(struct wx *wx)
+{
+ wx_disable_sec_rx_path(wx);
+
+ if (test_bit(WX_FLAG_FDIR_HASH, wx->flags)) {
+ txgbe_init_fdir_signature(wx);
+ } else if (test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) {
+ txgbe_init_fdir_perfect(wx);
+ txgbe_fdir_filter_restore(wx);
+ }
+
+ wx_enable_sec_rx_path(wx);
+}
+
+void txgbe_fdir_filter_exit(struct wx *wx)
+{
+ struct txgbe_fdir_filter *filter;
+ struct txgbe *txgbe = wx->priv;
+ struct hlist_node *node;
+
+ spin_lock(&txgbe->fdir_perfect_lock);
+
+ hlist_for_each_entry_safe(filter, node,
+ &txgbe->fdir_filter_list, fdir_node) {
+ hlist_del(&filter->fdir_node);
+ kfree(filter);
+ }
+ txgbe->fdir_filter_count = 0;
+
+ spin_unlock(&txgbe->fdir_perfect_lock);
+}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h
new file mode 100644
index 000000000000..1f44ce60becb
--- /dev/null
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
+
+#ifndef _TXGBE_FDIR_H_
+#define _TXGBE_FDIR_H_
+
+void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input,
+ union txgbe_atr_input *input_mask);
+void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype);
+int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask);
+int txgbe_fdir_write_perfect_filter(struct wx *wx,
+ union txgbe_atr_input *input,
+ u16 soft_id, u8 queue);
+int txgbe_fdir_erase_perfect_filter(struct wx *wx,
+ union txgbe_atr_input *input,
+ u16 soft_id);
+void txgbe_configure_fdir(struct wx *wx);
+void txgbe_fdir_filter_exit(struct wx *wx);
+
+#endif /* _TXGBE_FDIR_H_ */
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
index b3e3605d1edb..a4cf682dca65 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c
@@ -27,57 +27,19 @@ void txgbe_irq_enable(struct wx *wx, bool queues)
}
/**
- * txgbe_intr - msi/legacy mode Interrupt Handler
- * @irq: interrupt number
- * @data: pointer to a network interface device structure
- **/
-static irqreturn_t txgbe_intr(int __always_unused irq, void *data)
-{
- struct wx_q_vector *q_vector;
- struct wx *wx = data;
- struct pci_dev *pdev;
- u32 eicr;
-
- q_vector = wx->q_vector[0];
- pdev = wx->pdev;
-
- eicr = wx_misc_isb(wx, WX_ISB_VEC0);
- if (!eicr) {
- /* shared interrupt alert!
- * the interrupt that we masked before the ICR read.
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, true);
- return IRQ_NONE; /* Not our interrupt */
- }
- wx->isb_mem[WX_ISB_VEC0] = 0;
- if (!(pdev->msi_enabled))
- wr32(wx, WX_PX_INTA, 1);
-
- wx->isb_mem[WX_ISB_MISC] = 0;
- /* would disable interrupts here but it is auto disabled */
- napi_schedule_irqoff(&q_vector->napi);
-
- /* re-enable link(maybe) and non-queue interrupts, no flush.
- * txgbe_poll will re-enable the queue interrupts
- */
- if (netif_running(wx->netdev))
- txgbe_irq_enable(wx, false);
-
- return IRQ_HANDLED;
-}
-
-/**
- * txgbe_request_msix_irqs - Initialize MSI-X interrupts
+ * txgbe_request_queue_irqs - Initialize MSI-X queue interrupts
* @wx: board private structure
*
- * Allocate MSI-X vectors and request interrupts from the kernel.
+ * Allocate MSI-X queue vectors and request interrupts from the kernel.
**/
-static int txgbe_request_msix_irqs(struct wx *wx)
+int txgbe_request_queue_irqs(struct wx *wx)
{
struct net_device *netdev = wx->netdev;
int vector, err;
+ if (!wx->pdev->msix_enabled)
+ return 0;
+
for (vector = 0; vector < wx->num_q_vectors; vector++) {
struct wx_q_vector *q_vector = wx->q_vector[vector];
struct msix_entry *entry = &wx->msix_q_entries[vector];
@@ -110,34 +72,6 @@ free_queue_irqs:
return err;
}
-/**
- * txgbe_request_irq - initialize interrupts
- * @wx: board private structure
- *
- * Attempt to configure interrupts using the best available
- * capabilities of the hardware and kernel.
- **/
-int txgbe_request_irq(struct wx *wx)
-{
- struct net_device *netdev = wx->netdev;
- struct pci_dev *pdev = wx->pdev;
- int err;
-
- if (pdev->msix_enabled)
- err = txgbe_request_msix_irqs(wx);
- else if (pdev->msi_enabled)
- err = request_irq(wx->pdev->irq, &txgbe_intr, 0,
- netdev->name, wx);
- else
- err = request_irq(wx->pdev->irq, &txgbe_intr, IRQF_SHARED,
- netdev->name, wx);
-
- if (err)
- wx_err(wx, "request_irq failed, Error %d\n", err);
-
- return err;
-}
-
static int txgbe_request_gpio_irq(struct txgbe *txgbe)
{
txgbe->gpio_irq = irq_find_mapping(txgbe->misc.domain, TXGBE_IRQ_GPIO);
@@ -178,6 +112,36 @@ static const struct irq_domain_ops txgbe_misc_irq_domain_ops = {
static irqreturn_t txgbe_misc_irq_handle(int irq, void *data)
{
+ struct wx_q_vector *q_vector;
+ struct txgbe *txgbe = data;
+ struct wx *wx = txgbe->wx;
+ u32 eicr;
+
+ if (wx->pdev->msix_enabled)
+ return IRQ_WAKE_THREAD;
+
+ eicr = wx_misc_isb(wx, WX_ISB_VEC0);
+ if (!eicr) {
+ /* shared interrupt alert!
+ * the interrupt that we masked before the ICR read.
+ */
+ if (netif_running(wx->netdev))
+ txgbe_irq_enable(wx, true);
+ return IRQ_NONE; /* Not our interrupt */
+ }
+ wx->isb_mem[WX_ISB_VEC0] = 0;
+ if (!(wx->pdev->msi_enabled))
+ wr32(wx, WX_PX_INTA, 1);
+
+ /* would disable interrupts here but it is auto disabled */
+ q_vector = wx->q_vector[0];
+ napi_schedule_irqoff(&q_vector->napi);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data)
+{
struct txgbe *txgbe = data;
struct wx *wx = txgbe->wx;
unsigned int nhandled = 0;
@@ -223,6 +187,7 @@ void txgbe_free_misc_irq(struct txgbe *txgbe)
int txgbe_setup_misc_irq(struct txgbe *txgbe)
{
+ unsigned long flags = IRQF_ONESHOT;
struct wx *wx = txgbe->wx;
int hwirq, err;
@@ -236,14 +201,17 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
irq_create_mapping(txgbe->misc.domain, hwirq);
txgbe->misc.chip = txgbe_irq_chip;
- if (wx->pdev->msix_enabled)
+ if (wx->pdev->msix_enabled) {
txgbe->misc.irq = wx->msix_entry->vector;
- else
+ } else {
txgbe->misc.irq = wx->pdev->irq;
+ if (!wx->pdev->msi_enabled)
+ flags |= IRQF_SHARED;
+ }
- err = request_threaded_irq(txgbe->misc.irq, NULL,
- txgbe_misc_irq_handle,
- IRQF_ONESHOT,
+ err = request_threaded_irq(txgbe->misc.irq, txgbe_misc_irq_handle,
+ txgbe_misc_irq_thread_fn,
+ flags,
wx->netdev->name, txgbe);
if (err)
goto del_misc_irq;
@@ -256,6 +224,8 @@ int txgbe_setup_misc_irq(struct txgbe *txgbe)
if (err)
goto free_gpio_irq;
+ wx->misc_irq_domain = true;
+
return 0;
free_gpio_irq:
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
index b77945e7a0f2..e6285b94625e 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.h
@@ -2,6 +2,6 @@
/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */
void txgbe_irq_enable(struct wx *wx, bool queues);
-int txgbe_request_irq(struct wx *wx);
+int txgbe_request_queue_irqs(struct wx *wx);
void txgbe_free_misc_irq(struct txgbe *txgbe);
int txgbe_setup_misc_irq(struct txgbe *txgbe);
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
index bd4624d14ca0..93180225a6f1 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
@@ -18,6 +18,7 @@
#include "txgbe_hw.h"
#include "txgbe_phy.h"
#include "txgbe_irq.h"
+#include "txgbe_fdir.h"
#include "txgbe_ethtool.h"
char txgbe_driver_name[] = "txgbe";
@@ -257,6 +258,14 @@ static int txgbe_sw_init(struct wx *wx)
num_online_cpus());
wx->rss_enabled = true;
+ wx->ring_feature[RING_F_FDIR].limit = min_t(int, TXGBE_MAX_FDIR_INDICES,
+ num_online_cpus());
+ set_bit(WX_FLAG_FDIR_CAPABLE, wx->flags);
+ set_bit(WX_FLAG_FDIR_HASH, wx->flags);
+ wx->atr_sample_rate = TXGBE_DEFAULT_ATR_SAMPLE_RATE;
+ wx->atr = txgbe_atr;
+ wx->configure_fdir = txgbe_configure_fdir;
+
/* enable itr by default in dynamic mode */
wx->rx_itr_setting = 1;
wx->tx_itr_setting = 1;
@@ -269,9 +278,17 @@ static int txgbe_sw_init(struct wx *wx)
wx->tx_work_limit = TXGBE_DEFAULT_TX_WORK;
wx->rx_work_limit = TXGBE_DEFAULT_RX_WORK;
+ wx->do_reset = txgbe_do_reset;
+
return 0;
}
+static void txgbe_init_fdir(struct txgbe *txgbe)
+{
+ txgbe->fdir_filter_count = 0;
+ spin_lock_init(&txgbe->fdir_perfect_lock);
+}
+
/**
* txgbe_open - Called when a network interface is made active
* @netdev: network interface device structure
@@ -292,9 +309,9 @@ static int txgbe_open(struct net_device *netdev)
wx_configure(wx);
- err = txgbe_request_irq(wx);
+ err = txgbe_request_queue_irqs(wx);
if (err)
- goto err_free_isb;
+ goto err_free_resources;
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
@@ -311,8 +328,8 @@ static int txgbe_open(struct net_device *netdev)
err_free_irq:
wx_free_irq(wx);
-err_free_isb:
- wx_free_isb_resources(wx);
+err_free_resources:
+ wx_free_resources(wx);
err_reset:
txgbe_reset(wx);
@@ -350,6 +367,7 @@ static int txgbe_close(struct net_device *netdev)
txgbe_down(wx);
wx_free_irq(wx);
wx_free_resources(wx);
+ txgbe_fdir_filter_exit(wx);
wx_control_hw(wx, false);
return 0;
@@ -421,6 +439,34 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc)
return 0;
}
+static void txgbe_reinit_locked(struct wx *wx)
+{
+ int err = 0;
+
+ netif_trans_update(wx->netdev);
+
+ err = wx_set_state_reset(wx);
+ if (err) {
+ wx_err(wx, "wait device reset timeout\n");
+ return;
+ }
+
+ txgbe_down(wx);
+ txgbe_up(wx);
+
+ clear_bit(WX_STATE_RESETTING, wx->state);
+}
+
+void txgbe_do_reset(struct net_device *netdev)
+{
+ struct wx *wx = netdev_priv(netdev);
+
+ if (netif_running(netdev))
+ txgbe_reinit_locked(wx);
+ else
+ txgbe_reset(wx);
+}
+
static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open,
.ndo_stop = txgbe_close,
@@ -428,6 +474,7 @@ static const struct net_device_ops txgbe_netdev_ops = {
.ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode,
.ndo_set_features = wx_set_features,
+ .ndo_fix_features = wx_fix_features,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
@@ -629,6 +676,8 @@ static int txgbe_probe(struct pci_dev *pdev,
txgbe->wx = wx;
wx->priv = txgbe;
+ txgbe_init_fdir(txgbe);
+
err = txgbe_setup_misc_irq(txgbe);
if (err)
goto err_release_hw;
@@ -698,6 +747,7 @@ static void txgbe_remove(struct pci_dev *pdev)
txgbe_remove_phy(txgbe);
txgbe_free_misc_irq(txgbe);
+ wx_free_isb_resources(wx);
pci_release_selected_regions(pdev,
pci_select_bars(pdev, IORESOURCE_MEM));
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
index 93295916b1d2..5f502265f0a6 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
@@ -302,7 +302,7 @@ irqreturn_t txgbe_link_irq_handler(int irq, void *data)
status = rd32(wx, TXGBE_CFG_PORT_ST);
up = !!(status & TXGBE_CFG_PORT_ST_LINK_UP);
- phylink_mac_change(wx->phylink, up);
+ phylink_pcs_change(&txgbe->xpcs->pcs, up);
return IRQ_HANDLED;
}
diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
index 1b4ff50d5857..959102c4c379 100644
--- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
+++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
@@ -89,6 +89,55 @@
#define TXGBE_XPCS_IDA_ADDR 0x13000
#define TXGBE_XPCS_IDA_DATA 0x13004
+/********************************* Flow Director *****************************/
+#define TXGBE_RDB_FDIR_DROP_QUEUE 127
+#define TXGBE_RDB_FDIR_CTL 0x19500
+#define TXGBE_RDB_FDIR_CTL_INIT_DONE BIT(3)
+#define TXGBE_RDB_FDIR_CTL_PERFECT_MATCH BIT(4)
+#define TXGBE_RDB_FDIR_CTL_DROP_Q(v) FIELD_PREP(GENMASK(14, 8), v)
+#define TXGBE_RDB_FDIR_CTL_HASH_BITS(v) FIELD_PREP(GENMASK(23, 20), v)
+#define TXGBE_RDB_FDIR_CTL_MAX_LENGTH(v) FIELD_PREP(GENMASK(27, 24), v)
+#define TXGBE_RDB_FDIR_CTL_FULL_THRESH(v) FIELD_PREP(GENMASK(31, 28), v)
+#define TXGBE_RDB_FDIR_IP6(_i) (0x1950C + ((_i) * 4)) /* 0-2 */
+#define TXGBE_RDB_FDIR_SA 0x19518
+#define TXGBE_RDB_FDIR_DA 0x1951C
+#define TXGBE_RDB_FDIR_PORT 0x19520
+#define TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT 16
+#define TXGBE_RDB_FDIR_FLEX 0x19524
+#define TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT 16
+#define TXGBE_RDB_FDIR_HASH 0x19528
+#define TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(v) FIELD_PREP(GENMASK(31, 16), v)
+#define TXGBE_RDB_FDIR_HASH_BUCKET_VALID BIT(15)
+#define TXGBE_RDB_FDIR_CMD 0x1952C
+#define TXGBE_RDB_FDIR_CMD_CMD_MASK GENMASK(1, 0)
+#define TXGBE_RDB_FDIR_CMD_CMD(v) FIELD_PREP(GENMASK(1, 0), v)
+#define TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW TXGBE_RDB_FDIR_CMD_CMD(1)
+#define TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW TXGBE_RDB_FDIR_CMD_CMD(2)
+#define TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT TXGBE_RDB_FDIR_CMD_CMD(3)
+#define TXGBE_RDB_FDIR_CMD_FILTER_VALID BIT(2)
+#define TXGBE_RDB_FDIR_CMD_FILTER_UPDATE BIT(3)
+#define TXGBE_RDB_FDIR_CMD_FLOW_TYPE(v) FIELD_PREP(GENMASK(6, 5), v)
+#define TXGBE_RDB_FDIR_CMD_DROP BIT(9)
+#define TXGBE_RDB_FDIR_CMD_LAST BIT(11)
+#define TXGBE_RDB_FDIR_CMD_QUEUE_EN BIT(15)
+#define TXGBE_RDB_FDIR_CMD_RX_QUEUE(v) FIELD_PREP(GENMASK(22, 16), v)
+#define TXGBE_RDB_FDIR_CMD_VT_POOL(v) FIELD_PREP(GENMASK(29, 24), v)
+#define TXGBE_RDB_FDIR_DA4_MSK 0x1953C
+#define TXGBE_RDB_FDIR_SA4_MSK 0x19540
+#define TXGBE_RDB_FDIR_TCP_MSK 0x19544
+#define TXGBE_RDB_FDIR_UDP_MSK 0x19548
+#define TXGBE_RDB_FDIR_SCTP_MSK 0x19560
+#define TXGBE_RDB_FDIR_HKEY 0x19568
+#define TXGBE_RDB_FDIR_SKEY 0x1956C
+#define TXGBE_RDB_FDIR_OTHER_MSK 0x19570
+#define TXGBE_RDB_FDIR_OTHER_MSK_POOL BIT(2)
+#define TXGBE_RDB_FDIR_OTHER_MSK_L4P BIT(3)
+#define TXGBE_RDB_FDIR_FLEX_CFG(_i) (0x19580 + ((_i) * 4))
+#define TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 GENMASK(7, 0)
+#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC FIELD_PREP(GENMASK(1, 0), 0)
+#define TXGBE_RDB_FDIR_FLEX_CFG_MSK BIT(2)
+#define TXGBE_RDB_FDIR_FLEX_CFG_OFST(v) FIELD_PREP(GENMASK(7, 3), v)
+
/* Checksum and EEPROM pointers */
#define TXGBE_EEPROM_LAST_WORD 0x800
#define TXGBE_EEPROM_CHECKSUM 0x2F
@@ -112,6 +161,98 @@
#define TXGBE_SP_RX_PB_SIZE 512
#define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */
+#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20
+
+/* Software ATR hash keys */
+#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
+#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
+
+/* Software ATR input stream values and masks */
+#define TXGBE_ATR_HASH_MASK 0x7fff
+#define TXGBE_ATR_L4TYPE_MASK 0x3
+#define TXGBE_ATR_L4TYPE_UDP 0x1
+#define TXGBE_ATR_L4TYPE_TCP 0x2
+#define TXGBE_ATR_L4TYPE_SCTP 0x3
+#define TXGBE_ATR_L4TYPE_IPV6_MASK 0x4
+#define TXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10
+
+enum txgbe_atr_flow_type {
+ TXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
+ TXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
+ TXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
+ TXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
+ TXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
+ TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
+ TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
+ TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16,
+ TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17,
+};
+
+/* Flow Director ATR input struct. */
+union txgbe_atr_input {
+ /* Byte layout in order, all values with MSB first:
+ *
+ * vm_pool - 1 byte
+ * flow_type - 1 byte
+ * vlan_id - 2 bytes
+ * dst_ip - 16 bytes
+ * src_ip - 16 bytes
+ * src_port - 2 bytes
+ * dst_port - 2 bytes
+ * flex_bytes - 2 bytes
+ * bkt_hash - 2 bytes
+ */
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ __be32 dst_ip[4];
+ __be32 src_ip[4];
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 flex_bytes;
+ __be16 bkt_hash;
+ } formatted;
+ __be32 dword_stream[11];
+};
+
+/* Flow Director compressed ATR hash input struct */
+union txgbe_atr_hash_dword {
+ struct {
+ u8 vm_pool;
+ u8 flow_type;
+ __be16 vlan_id;
+ } formatted;
+ __be32 ip;
+ struct {
+ __be16 src;
+ __be16 dst;
+ } port;
+ __be16 flex_bytes;
+ __be32 dword;
+};
+
+enum txgbe_fdir_pballoc_type {
+ TXGBE_FDIR_PBALLOC_NONE = 0,
+ TXGBE_FDIR_PBALLOC_64K = 1,
+ TXGBE_FDIR_PBALLOC_128K = 2,
+ TXGBE_FDIR_PBALLOC_256K = 3,
+};
+
+struct txgbe_fdir_filter {
+ struct hlist_node fdir_node;
+ union txgbe_atr_input filter;
+ u16 sw_idx;
+ u16 action;
+};
+
/* TX/RX descriptor defines */
#define TXGBE_DEFAULT_TXD 512
#define TXGBE_DEFAULT_TX_WORK 256
@@ -134,6 +275,7 @@ extern char txgbe_driver_name[];
void txgbe_down(struct wx *wx);
void txgbe_up(struct wx *wx);
int txgbe_setup_tc(struct net_device *dev, u8 tc);
+void txgbe_do_reset(struct net_device *netdev);
#define NODE_PROP(_NAME, _PROP) \
(const struct software_node) { \
@@ -195,6 +337,12 @@ struct txgbe {
struct gpio_chip *gpio;
unsigned int gpio_irq;
unsigned int link_irq;
+
+ /* flow director */
+ struct hlist_head fdir_filter_list;
+ union txgbe_atr_input fdir_mask;
+ int fdir_filter_count;
+ spinlock_t fdir_perfect_lock; /* spinlock for FDIR */
};
#endif /* _TXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 807ead678551..09c9f9787180 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -160,16 +160,17 @@
#define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */
#define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */
#define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */
-#define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */
-#define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */
+#define XAE_EMMC_OFFSET 0x00000410 /* MAC speed configuration */
+#define XAE_PHYC_OFFSET 0x00000414 /* RX Max Frame Configuration */
#define XAE_ID_OFFSET 0x000004F8 /* Identification register */
-#define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */
-#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */
-#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */
-#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */
+#define XAE_MDIO_MC_OFFSET 0x00000500 /* MDIO Setup */
+#define XAE_MDIO_MCR_OFFSET 0x00000504 /* MDIO Control */
+#define XAE_MDIO_MWD_OFFSET 0x00000508 /* MDIO Write Data */
+#define XAE_MDIO_MRD_OFFSET 0x0000050C /* MDIO Read Data */
#define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */
#define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */
-#define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */
+#define XAE_FMI_OFFSET 0x00000708 /* Frame Filter Control */
+#define XAE_FFE_OFFSET 0x0000070C /* Frame Filter Enable */
#define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */
#define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */
@@ -308,7 +309,7 @@
*/
#define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF
-/* Bit masks for Axi Ethernet FMI register */
+/* Bit masks for Axi Ethernet FMC register */
#define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */
#define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */
@@ -359,6 +360,7 @@
* @app2: MM2S/S2MM User Application Field 2.
* @app3: MM2S/S2MM User Application Field 3.
* @app4: MM2S/S2MM User Application Field 4.
+ * @skb: Pointer to SKB transferred using DMA
*/
struct axidma_bd {
u32 next; /* Physical address of next buffer descriptor */
@@ -399,7 +401,6 @@ struct skbuf_dma_descriptor {
* struct axienet_local - axienet private per device data
* @ndev: Pointer for net_device to which it will be attached.
* @dev: Pointer to device structure
- * @phy_node: Pointer to device node structure
* @phylink: Pointer to phylink instance
* @phylink_config: phylink configuration settings
* @pcs_phy: Reference to PCS/PMA PHY if used
@@ -537,7 +538,7 @@ struct axienet_local {
};
/**
- * struct axiethernet_option - Used to set axi ethernet hardware options
+ * struct axienet_option - Used to set axi ethernet hardware options
* @opt: Option to be set.
* @reg: Register offset to be written for setting the option
* @m_or: Mask to be ORed for setting the option in the register
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index aaf780fd4f5e..9aeb7b9f3ae4 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -432,7 +432,7 @@ static int netdev_set_mac_address(struct net_device *ndev, void *p)
*/
static void axienet_set_multicast_list(struct net_device *ndev)
{
- int i;
+ int i = 0;
u32 reg, af0reg, af1reg;
struct axienet_local *lp = netdev_priv(ndev);
@@ -450,7 +450,10 @@ static void axienet_set_multicast_list(struct net_device *ndev)
} else if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
- i = 0;
+ reg = axienet_ior(lp, XAE_FMI_OFFSET);
+ reg &= ~XAE_FMI_PM_MASK;
+ axienet_iow(lp, XAE_FMI_OFFSET, reg);
+
netdev_for_each_mc_addr(ha, ndev) {
if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
break;
@@ -469,6 +472,7 @@ static void axienet_set_multicast_list(struct net_device *ndev)
axienet_iow(lp, XAE_FMI_OFFSET, reg);
axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
+ axienet_iow(lp, XAE_FFE_OFFSET, 1);
i++;
}
} else {
@@ -476,18 +480,15 @@ static void axienet_set_multicast_list(struct net_device *ndev)
reg &= ~XAE_FMI_PM_MASK;
axienet_iow(lp, XAE_FMI_OFFSET, reg);
-
- for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
- reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
- reg |= i;
-
- axienet_iow(lp, XAE_FMI_OFFSET, reg);
- axienet_iow(lp, XAE_AF0_OFFSET, 0);
- axienet_iow(lp, XAE_AF1_OFFSET, 0);
- }
-
dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
}
+
+ for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
+ reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
+ reg |= i;
+ axienet_iow(lp, XAE_FMI_OFFSET, reg);
+ axienet_iow(lp, XAE_FFE_OFFSET, 0);
+ }
}
/**
@@ -1641,7 +1642,7 @@ static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
XAE_TRL_SIZE) > lp->rxmem)
return -EINVAL;
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
@@ -1945,9 +1946,9 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
- netdev_err(ndev,
- "Please stop netif before applying configuration\n");
- return -EFAULT;
+ NL_SET_ERR_MSG(extack,
+ "Please stop netif before applying configuration");
+ return -EBUSY;
}
if (ecoalesce->rx_max_coalesced_frames)
@@ -2219,9 +2220,9 @@ static void axienet_dma_err_handler(struct work_struct *work)
~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
axienet_set_mac_address(ndev, NULL);
axienet_set_multicast_list(ndev);
- axienet_setoptions(ndev, lp->options);
napi_enable(&lp->napi_rx);
napi_enable(&lp->napi_tx);
+ axienet_setoptions(ndev, lp->options);
}
/**
@@ -2254,7 +2255,6 @@ static int axienet_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
- ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
ndev->features = NETIF_F_SG;
ndev->ethtool_ops = &axienet_ethtool_ops;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 2f07fde361aa..9ca2643c921e 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -20,7 +20,14 @@
#define DEFAULT_MDIO_FREQ 2500000 /* 2.5 MHz */
#define DEFAULT_HOST_CLOCK 150000000 /* 150 MHz */
-/* Wait till MDIO interface is ready to accept a new transaction.*/
+/**
+ * axienet_mdio_wait_until_ready - MDIO wait function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Return : 0 on success, Negative value on errors
+ *
+ * Wait till MDIO interface is ready to accept a new transaction.
+ */
static int axienet_mdio_wait_until_ready(struct axienet_local *lp)
{
u32 val;
@@ -30,14 +37,24 @@ static int axienet_mdio_wait_until_ready(struct axienet_local *lp)
1, 20000);
}
-/* Enable the MDIO MDC. Called prior to a read/write operation */
+/**
+ * axienet_mdio_mdc_enable - MDIO MDC enable function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Enable the MDIO MDC. Called prior to a read/write operation
+ */
static void axienet_mdio_mdc_enable(struct axienet_local *lp)
{
axienet_iow(lp, XAE_MDIO_MC_OFFSET,
((u32)lp->mii_clk_div | XAE_MDIO_MC_MDIOEN_MASK));
}
-/* Disable the MDIO MDC. Called after a read/write operation*/
+/**
+ * axienet_mdio_mdc_disable - MDIO MDC disable function
+ * @lp: Pointer to axienet local data structure.
+ *
+ * Disable the MDIO MDC. Called after a read/write operation
+ */
static void axienet_mdio_mdc_disable(struct axienet_local *lp)
{
u32 mc_reg;
diff --git a/drivers/net/ethernet/xircom/Kconfig b/drivers/net/ethernet/xircom/Kconfig
index 7497b9bea511..bfbdcf758afb 100644
--- a/drivers/net/ethernet/xircom/Kconfig
+++ b/drivers/net/ethernet/xircom/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_XIRCOM
config PCMCIA_XIRC2PS
tristate "Xircom 16-bit PCMCIA support"
- depends on PCMCIA
+ depends on PCMCIA && HAS_IOPORT
help
Say Y here if you intend to attach a Xircom 16-bit PCMCIA (PC-card)
Ethernet or Fast Ethernet card to your computer.
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index e9bc38fd2025..a31d5d5e6593 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1366,10 +1366,10 @@ do_config(struct net_device *dev, struct ifmap *map)
return -EINVAL;
if (!map->port) {
local->probe_port = 1;
- dev->if_port = 1;
+ WRITE_ONCE(dev->if_port, 1);
} else {
local->probe_port = 0;
- dev->if_port = map->port;
+ WRITE_ONCE(dev->if_port, map->port);
}
netdev_info(dev, "switching to %s port\n", if_names[dev->if_port]);
do_reset(dev,1); /* not the fine way :-) */
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index e0d26148dfd9..56df37f8d50a 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1015,7 +1015,7 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
}
static int ixp4xx_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct port *port = netdev_priv(dev);
@@ -1233,7 +1233,7 @@ static int ixp4xx_eth_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 1fef8a9b1a0f..0fbbb7286008 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -254,7 +254,7 @@ static const char version[] =
#define DFX_BUS_TC(dev) 0
#endif
-#if defined(CONFIG_EISA) || defined(CONFIG_PCI)
+#ifdef CONFIG_HAS_IOPORT
#define dfx_use_mmio bp->mmio
#else
#define dfx_use_mmio true
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 5fbe33a09bb0..fad5b6564464 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -14,9 +14,7 @@
#include "fjes.h"
#include "fjes_trace.h"
-#define MAJ 1
-#define MIN 2
-#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN)
+#define DRV_VERSION "1.2"
#define DRV_NAME "fjes"
char fjes_driver_name[] = DRV_NAME;
char fjes_driver_version[] = DRV_VERSION;
@@ -156,7 +154,6 @@ static void fjes_acpi_remove(struct acpi_device *device)
static struct acpi_driver fjes_acpi_driver = {
.name = DRV_NAME,
.class = DRV_NAME,
- .owner = THIS_MODULE,
.ids = fjes_acpi_ids,
.ops = {
.add = fjes_acpi_add,
@@ -811,7 +808,7 @@ static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
netif_tx_stop_all_queues(netdev);
}
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
if (running) {
for (epidx = 0; epidx < hw->max_epid; epidx++) {
diff --git a/drivers/net/fjes/fjes_trace.h b/drivers/net/fjes/fjes_trace.h
index 6437ddbd7842..37c6071cb333 100644
--- a/drivers/net/fjes/fjes_trace.h
+++ b/drivers/net/fjes/fjes_trace.h
@@ -85,7 +85,7 @@ TRACE_EVENT(fjes_hw_request_info_err,
__string(err, err)
),
TP_fast_assign(
- __assign_str(err, err);
+ __assign_str(err);
),
TP_printk("%s", __get_str(err))
);
@@ -145,7 +145,7 @@ TRACE_EVENT(fjes_hw_register_buff_addr_err,
__string(err, err)
),
TP_fast_assign(
- __assign_str(err, err);
+ __assign_str(err);
),
TP_printk("%s", __get_str(err))
);
@@ -189,7 +189,7 @@ TRACE_EVENT(fjes_hw_unregister_buff_addr_err,
__string(err, err)
),
TP_fast_assign(
- __assign_str(err, err);
+ __assign_str(err);
),
TP_printk("%s", __get_str(err))
);
@@ -232,7 +232,7 @@ TRACE_EVENT(fjes_hw_start_debug_err,
__string(err, err)
),
TP_fast_assign(
- __assign_str(err, err);
+ __assign_str(err);
),
TP_printk("%s", __get_str(err))
);
@@ -258,7 +258,7 @@ TRACE_EVENT(fjes_hw_stop_debug_err,
__string(err, err)
),
TP_fast_assign(
- __assign_str(err, err);
+ __assign_str(err);
),
TP_printk("%s", __get_str(err))
);
@@ -358,7 +358,7 @@ TRACE_EVENT(fjes_stop_req_irq_post,
#undef TRACE_INCLUDE_PATH
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH ../../../drivers/net/fjes
+#define TRACE_INCLUDE_PATH ../../drivers/net/fjes
#define TRACE_INCLUDE_FILE fjes_trace
/* This part must be outside protection */
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 6c2835086b57..838e85ddec67 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -225,10 +225,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
void *oiph;
if (ip_tunnel_collect_metadata() || gs->collect_md) {
- __be16 flags;
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
- flags = TUNNEL_KEY | (gnvh->oam ? TUNNEL_OAM : 0) |
- (gnvh->critical ? TUNNEL_CRIT_OPT : 0);
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+ __assign_bit(IP_TUNNEL_OAM_BIT, flags, gnvh->oam);
+ __assign_bit(IP_TUNNEL_CRIT_OPT_BIT, flags, gnvh->critical);
tun_dst = udp_tun_rx_dst(skb, geneve_get_sk_family(gs), flags,
vni_to_tunnel_id(gnvh->vni),
@@ -238,9 +239,11 @@ static void geneve_rx(struct geneve_dev *geneve, struct geneve_sock *gs,
goto drop;
}
/* Update tunnel dst according to Geneve options. */
+ ip_tunnel_flags_zero(flags);
+ __set_bit(IP_TUNNEL_GENEVE_OPT_BIT, flags);
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,
gnvh->options, gnvh->opt_len * 4,
- TUNNEL_GENEVE_OPT);
+ flags);
} else {
/* Drop packets w/ critical options,
* since we don't support any...
@@ -745,14 +748,15 @@ static void geneve_build_header(struct genevehdr *geneveh,
{
geneveh->ver = GENEVE_VER;
geneveh->opt_len = info->options_len / 4;
- geneveh->oam = !!(info->key.tun_flags & TUNNEL_OAM);
- geneveh->critical = !!(info->key.tun_flags & TUNNEL_CRIT_OPT);
+ geneveh->oam = test_bit(IP_TUNNEL_OAM_BIT, info->key.tun_flags);
+ geneveh->critical = test_bit(IP_TUNNEL_CRIT_OPT_BIT,
+ info->key.tun_flags);
geneveh->rsvd1 = 0;
tunnel_id_to_vni(info->key.tun_id, geneveh->vni);
geneveh->proto_type = inner_proto;
geneveh->rsvd2 = 0;
- if (info->key.tun_flags & TUNNEL_GENEVE_OPT)
+ if (test_bit(IP_TUNNEL_GENEVE_OPT_BIT, info->key.tun_flags))
ip_tunnel_info_opts_get(geneveh->options, info);
}
@@ -761,7 +765,7 @@ static int geneve_build_skb(struct dst_entry *dst, struct sk_buff *skb,
bool xnet, int ip_hdr_len,
bool inner_proto_inherit)
{
- bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
+ bool udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
struct genevehdr *gnvh;
__be16 inner_proto;
int min_headroom;
@@ -811,6 +815,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
+ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs4 = rcu_dereference(geneve->sock4);
const struct ip_tunnel_key *key = &info->key;
@@ -822,7 +827,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!skb_vlan_inet_prepare(skb))
+ if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
if (!gs4)
@@ -878,7 +883,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
if (geneve->cfg.collect_md) {
ttl = key->ttl;
- df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
+ df = test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags) ?
+ htons(IP_DF) : 0;
} else {
if (geneve->cfg.ttl_inherit)
ttl = ip_tunnel_get_ttl(ip_hdr(skb), skb);
@@ -903,14 +909,15 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
}
err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr),
- geneve->cfg.inner_proto_inherit);
+ inner_proto_inherit);
if (unlikely(err))
return err;
udp_tunnel_xmit_skb(rt, gs4->sock->sk, skb, saddr, info->key.u.ipv4.dst,
tos, ttl, df, sport, geneve->cfg.info.key.tp_dst,
!net_eq(geneve->net, dev_net(geneve->dev)),
- !(info->key.tun_flags & TUNNEL_CSUM));
+ !test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags));
return 0;
}
@@ -919,6 +926,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
struct geneve_dev *geneve,
const struct ip_tunnel_info *info)
{
+ bool inner_proto_inherit = geneve->cfg.inner_proto_inherit;
bool xnet = !net_eq(geneve->net, dev_net(geneve->dev));
struct geneve_sock *gs6 = rcu_dereference(geneve->sock6);
const struct ip_tunnel_key *key = &info->key;
@@ -929,7 +937,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
- if (!skb_vlan_inet_prepare(skb))
+ if (!skb_vlan_inet_prepare(skb, inner_proto_inherit))
return -EINVAL;
if (!gs6)
@@ -991,14 +999,15 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
ttl = ttl ? : ip6_dst_hoplimit(dst);
}
err = geneve_build_skb(dst, skb, info, xnet, sizeof(struct ipv6hdr),
- geneve->cfg.inner_proto_inherit);
+ inner_proto_inherit);
if (unlikely(err))
return err;
udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
&saddr, &key->u.ipv6.dst, prio, ttl,
info->key.label, sport, geneve->cfg.info.key.tp_dst,
- !(info->key.tun_flags & TUNNEL_CSUM));
+ !test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags));
return 0;
}
#endif
@@ -1052,7 +1061,7 @@ static int geneve_change_mtu(struct net_device *dev, int new_mtu)
else if (new_mtu < dev->min_mtu)
new_mtu = dev->min_mtu;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -1297,7 +1306,8 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
static bool is_tnl_info_zero(const struct ip_tunnel_info *info)
{
- return !(info->key.tun_id || info->key.tun_flags || info->key.tos ||
+ return !(info->key.tun_id || info->key.tos ||
+ !ip_tunnel_flags_empty(info->key.tun_flags) ||
info->key.ttl || info->key.label || info->key.tp_src ||
memchr_inv(&info->key.u, 0, sizeof(info->key.u)));
}
@@ -1435,7 +1445,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
"Remote IPv6 address cannot be Multicast");
return -EINVAL;
}
- info->key.tun_flags |= TUNNEL_CSUM;
+ __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
cfg->use_udp6_rx_checksums = true;
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_REMOTE6],
@@ -1510,7 +1520,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_CSUM]))
- info->key.tun_flags |= TUNNEL_CSUM;
+ __set_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) {
@@ -1520,7 +1530,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[],
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
- info->key.tun_flags &= ~TUNNEL_CSUM;
+ __clear_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
#else
NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX],
"IPv6 support not enabled in the kernel");
@@ -1753,7 +1763,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
info->key.u.ipv4.dst))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_GENEVE_UDP_CSUM,
- !!(info->key.tun_flags & TUNNEL_CSUM)))
+ test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags)))
goto nla_put_failure;
#if IS_ENABLED(CONFIG_IPV6)
@@ -1762,7 +1773,8 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
&info->key.u.ipv6.dst))
goto nla_put_failure;
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_TX,
- !(info->key.tun_flags & TUNNEL_CSUM)))
+ !test_bit(IP_TUNNEL_CSUM_BIT,
+ info->key.tun_flags)))
goto nla_put_failure;
#endif
}
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index e62d6cbdf9bc..0696faf60013 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -24,6 +24,7 @@
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
+#include <net/ipv6.h>
#include <net/udp.h>
#include <net/udp_tunnel.h>
#include <net/icmp.h>
@@ -50,8 +51,14 @@ struct pdp_ctx {
u8 gtp_version;
u16 af;
- struct in_addr ms_addr_ip4;
- struct in_addr peer_addr_ip4;
+ union {
+ struct in_addr addr;
+ struct in6_addr addr6;
+ } ms;
+ union {
+ struct in_addr addr;
+ struct in6_addr addr6;
+ } peer;
struct sock *sk;
struct net_device *dev;
@@ -80,9 +87,15 @@ struct gtp_dev {
};
struct echo_info {
- struct in_addr ms_addr_ip4;
- struct in_addr peer_addr_ip4;
+ u16 af;
u8 gtp_version;
+
+ union {
+ struct in_addr addr;
+ } ms;
+ union {
+ struct in_addr addr;
+ } peer;
};
static unsigned int gtp_net_id __read_mostly;
@@ -121,8 +134,14 @@ static inline u32 ipv4_hashfn(__be32 ip)
return jhash_1word((__force u32)ip, gtp_h_initval);
}
+static u32 ipv6_hashfn(const struct in6_addr *ip6)
+{
+ return jhash_2words((__force u32)ip6->s6_addr32[0],
+ (__force u32)ip6->s6_addr32[1], gtp_h_initval);
+}
+
/* Resolve a PDP context structure based on the 64bit TID. */
-static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
+static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid, u16 family)
{
struct hlist_head *head;
struct pdp_ctx *pdp;
@@ -130,7 +149,8 @@ static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
head = &gtp->tid_hash[gtp0_hashfn(tid) % gtp->hash_size];
hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
- if (pdp->gtp_version == GTP_V0 &&
+ if (pdp->af == family &&
+ pdp->gtp_version == GTP_V0 &&
pdp->u.v0.tid == tid)
return pdp;
}
@@ -138,7 +158,7 @@ static struct pdp_ctx *gtp0_pdp_find(struct gtp_dev *gtp, u64 tid)
}
/* Resolve a PDP context structure based on the 32bit TEI. */
-static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
+static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid, u16 family)
{
struct hlist_head *head;
struct pdp_ctx *pdp;
@@ -146,7 +166,8 @@ static struct pdp_ctx *gtp1_pdp_find(struct gtp_dev *gtp, u32 tid)
head = &gtp->tid_hash[gtp1u_hashfn(tid) % gtp->hash_size];
hlist_for_each_entry_rcu(pdp, head, hlist_tid) {
- if (pdp->gtp_version == GTP_V1 &&
+ if (pdp->af == family &&
+ pdp->gtp_version == GTP_V1 &&
pdp->u.v1.i_tei == tid)
return pdp;
}
@@ -163,7 +184,42 @@ static struct pdp_ctx *ipv4_pdp_find(struct gtp_dev *gtp, __be32 ms_addr)
hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
if (pdp->af == AF_INET &&
- pdp->ms_addr_ip4.s_addr == ms_addr)
+ pdp->ms.addr.s_addr == ms_addr)
+ return pdp;
+ }
+
+ return NULL;
+}
+
+/* 3GPP TS 29.060: PDN Connection: the association between a MS represented by
+ * [...] one IPv6 *prefix* and a PDN represented by an APN.
+ *
+ * Then, 3GPP TS 29.061, Section 11.2.1.3 says: The size of the prefix shall be
+ * according to the maximum prefix length for a global IPv6 address as
+ * specified in the IPv6 Addressing Architecture, see RFC 4291.
+ *
+ * Finally, RFC 4291 section 2.5.4 states: All Global Unicast addresses other
+ * than those that start with binary 000 have a 64-bit interface ID field
+ * (i.e., n + m = 64).
+ */
+static bool ipv6_pdp_addr_equal(const struct in6_addr *a,
+ const struct in6_addr *b)
+{
+ return a->s6_addr32[0] == b->s6_addr32[0] &&
+ a->s6_addr32[1] == b->s6_addr32[1];
+}
+
+static struct pdp_ctx *ipv6_pdp_find(struct gtp_dev *gtp,
+ const struct in6_addr *ms_addr)
+{
+ struct hlist_head *head;
+ struct pdp_ctx *pdp;
+
+ head = &gtp->addr_hash[ipv6_hashfn(ms_addr) % gtp->hash_size];
+
+ hlist_for_each_entry_rcu(pdp, head, hlist_addr) {
+ if (pdp->af == AF_INET6 &&
+ ipv6_pdp_addr_equal(&pdp->ms.addr6, ms_addr))
return pdp;
}
@@ -181,34 +237,85 @@ static bool gtp_check_ms_ipv4(struct sk_buff *skb, struct pdp_ctx *pctx,
iph = (struct iphdr *)(skb->data + hdrlen);
if (role == GTP_ROLE_SGSN)
- return iph->daddr == pctx->ms_addr_ip4.s_addr;
+ return iph->daddr == pctx->ms.addr.s_addr;
else
- return iph->saddr == pctx->ms_addr_ip4.s_addr;
+ return iph->saddr == pctx->ms.addr.s_addr;
+}
+
+static bool gtp_check_ms_ipv6(struct sk_buff *skb, struct pdp_ctx *pctx,
+ unsigned int hdrlen, unsigned int role)
+{
+ struct ipv6hdr *ip6h;
+ int ret;
+
+ if (!pskb_may_pull(skb, hdrlen + sizeof(struct ipv6hdr)))
+ return false;
+
+ ip6h = (struct ipv6hdr *)(skb->data + hdrlen);
+
+ if ((ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL) ||
+ (ipv6_addr_type(&ip6h->daddr) & IPV6_ADDR_LINKLOCAL))
+ return false;
+
+ if (role == GTP_ROLE_SGSN) {
+ ret = ipv6_pdp_addr_equal(&ip6h->daddr, &pctx->ms.addr6);
+ } else {
+ ret = ipv6_pdp_addr_equal(&ip6h->saddr, &pctx->ms.addr6);
+ }
+
+ return ret;
}
/* Check if the inner IP address in this packet is assigned to any
* existing mobile subscriber.
*/
static bool gtp_check_ms(struct sk_buff *skb, struct pdp_ctx *pctx,
- unsigned int hdrlen, unsigned int role)
+ unsigned int hdrlen, unsigned int role,
+ __u16 inner_proto)
{
- switch (ntohs(skb->protocol)) {
+ switch (inner_proto) {
case ETH_P_IP:
return gtp_check_ms_ipv4(skb, pctx, hdrlen, role);
+ case ETH_P_IPV6:
+ return gtp_check_ms_ipv6(skb, pctx, hdrlen, role);
}
return false;
}
+static int gtp_inner_proto(struct sk_buff *skb, unsigned int hdrlen,
+ __u16 *inner_proto)
+{
+ __u8 *ip_version, _ip_version;
+
+ ip_version = skb_header_pointer(skb, hdrlen, sizeof(*ip_version),
+ &_ip_version);
+ if (!ip_version)
+ return -1;
+
+ switch (*ip_version & 0xf0) {
+ case 0x40:
+ *inner_proto = ETH_P_IP;
+ break;
+ case 0x60:
+ *inner_proto = ETH_P_IPV6;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
static int gtp_rx(struct pdp_ctx *pctx, struct sk_buff *skb,
- unsigned int hdrlen, unsigned int role)
+ unsigned int hdrlen, unsigned int role, __u16 inner_proto)
{
- if (!gtp_check_ms(skb, pctx, hdrlen, role)) {
+ if (!gtp_check_ms(skb, pctx, hdrlen, role, inner_proto)) {
netdev_dbg(pctx->dev, "No PDP ctx for this MS\n");
return 1;
}
/* Get rid of the GTP + UDP headers. */
- if (iptunnel_pull_header(skb, hdrlen, skb->protocol,
+ if (iptunnel_pull_header(skb, hdrlen, htons(inner_proto),
!net_eq(sock_net(pctx->sk), dev_net(pctx->dev)))) {
pctx->dev->stats.rx_length_errors++;
goto err;
@@ -250,6 +357,27 @@ static struct rtable *ip4_route_output_gtp(struct flowi4 *fl4,
return ip_route_output_key(sock_net(sk), fl4);
}
+static struct rt6_info *ip6_route_output_gtp(struct net *net,
+ struct flowi6 *fl6,
+ const struct sock *sk,
+ const struct in6_addr *daddr,
+ struct in6_addr *saddr)
+{
+ struct dst_entry *dst;
+
+ memset(fl6, 0, sizeof(*fl6));
+ fl6->flowi6_oif = sk->sk_bound_dev_if;
+ fl6->daddr = *daddr;
+ fl6->saddr = *saddr;
+ fl6->flowi6_proto = sk->sk_protocol;
+
+ dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, fl6, NULL);
+ if (IS_ERR(dst))
+ return ERR_PTR(-ENETUNREACH);
+
+ return (struct rt6_info *)dst;
+}
+
/* GSM TS 09.60. 7.3
* In all Path Management messages:
* - TID: is not used and shall be set to 0.
@@ -292,13 +420,39 @@ static void gtp0_build_echo_msg(struct gtp0_header *hdr, __u8 msg_type)
hdr->length = 0;
}
+static int gtp0_send_echo_resp_ip(struct gtp_dev *gtp, struct sk_buff *skb)
+{
+ struct iphdr *iph = ip_hdr(skb);
+ struct flowi4 fl4;
+ struct rtable *rt;
+
+ /* find route to the sender,
+ * src address becomes dst address and vice versa.
+ */
+ rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
+ if (IS_ERR(rt)) {
+ netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
+ &iph->saddr);
+ return -1;
+ }
+
+ udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
+ fl4.saddr, fl4.daddr,
+ iph->tos,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ htons(GTP0_PORT), htons(GTP0_PORT),
+ !net_eq(sock_net(gtp->sk1u),
+ dev_net(gtp->dev)),
+ false);
+
+ return 0;
+}
+
static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
{
struct gtp0_packet *gtp_pkt;
struct gtp0_header *gtp0;
- struct rtable *rt;
- struct flowi4 fl4;
- struct iphdr *iph;
__be16 seq;
gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
@@ -325,27 +479,15 @@ static int gtp0_send_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
gtp_pkt->ie.tag = GTPIE_RECOVERY;
gtp_pkt->ie.val = gtp->restart_count;
- iph = ip_hdr(skb);
-
- /* find route to the sender,
- * src address becomes dst address and vice versa.
- */
- rt = ip4_route_output_gtp(&fl4, gtp->sk0, iph->saddr, iph->daddr);
- if (IS_ERR(rt)) {
- netdev_dbg(gtp->dev, "no route for echo response from %pI4\n",
- &iph->saddr);
+ switch (gtp->sk0->sk_family) {
+ case AF_INET:
+ if (gtp0_send_echo_resp_ip(gtp, skb) < 0)
+ return -1;
+ break;
+ case AF_INET6:
return -1;
}
- udp_tunnel_xmit_skb(rt, gtp->sk0, skb,
- fl4.saddr, fl4.daddr,
- iph->tos,
- ip4_dst_hoplimit(&rt->dst),
- 0,
- htons(GTP0_PORT), htons(GTP0_PORT),
- !net_eq(sock_net(gtp->sk1u),
- dev_net(gtp->dev)),
- false);
return 0;
}
@@ -360,8 +502,8 @@ static int gtp_genl_fill_echo(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
goto failure;
if (nla_put_u32(skb, GTPA_VERSION, echo.gtp_version) ||
- nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer_addr_ip4.s_addr) ||
- nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms_addr_ip4.s_addr))
+ nla_put_be32(skb, GTPA_PEER_ADDRESS, echo.peer.addr.s_addr) ||
+ nla_put_be32(skb, GTPA_MS_ADDRESS, echo.ms.addr.s_addr))
goto failure;
genlmsg_end(skb, genlh);
@@ -372,12 +514,20 @@ failure:
return -EMSGSIZE;
}
+static void gtp0_handle_echo_resp_ip(struct sk_buff *skb, struct echo_info *echo)
+{
+ struct iphdr *iph = ip_hdr(skb);
+
+ echo->ms.addr.s_addr = iph->daddr;
+ echo->peer.addr.s_addr = iph->saddr;
+ echo->gtp_version = GTP_V0;
+}
+
static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
{
struct gtp0_header *gtp0;
struct echo_info echo;
struct sk_buff *msg;
- struct iphdr *iph;
int ret;
gtp0 = (struct gtp0_header *)(skb->data + sizeof(struct udphdr));
@@ -385,10 +535,13 @@ static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
if (!gtp0_validate_echo_hdr(gtp0))
return -1;
- iph = ip_hdr(skb);
- echo.ms_addr_ip4.s_addr = iph->daddr;
- echo.peer_addr_ip4.s_addr = iph->saddr;
- echo.gtp_version = GTP_V0;
+ switch (gtp->sk0->sk_family) {
+ case AF_INET:
+ gtp0_handle_echo_resp_ip(skb, &echo);
+ break;
+ case AF_INET6:
+ return -1;
+ }
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
if (!msg)
@@ -404,6 +557,21 @@ static int gtp0_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
}
+static int gtp_proto_to_family(__u16 proto)
+{
+ switch (proto) {
+ case ETH_P_IP:
+ return AF_INET;
+ case ETH_P_IPV6:
+ return AF_INET6;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return AF_UNSPEC;
+}
+
/* 1 means pass up to the stack, -1 means drop and 0 means decapsulated. */
static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
@@ -411,6 +579,7 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
sizeof(struct gtp0_header);
struct gtp0_header *gtp0;
struct pdp_ctx *pctx;
+ __u16 inner_proto;
if (!pskb_may_pull(skb, hdrlen))
return -1;
@@ -433,13 +602,19 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if (gtp0->type != GTP_TPDU)
return 1;
- pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
+ if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) {
+ netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n");
+ return -1;
+ }
+
+ pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid),
+ gtp_proto_to_family(inner_proto));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
return 1;
}
- return gtp_rx(pctx, skb, hdrlen, gtp->role);
+ return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);
}
/* msg_type has to be GTP_ECHO_REQ or GTP_ECHO_RSP */
@@ -549,8 +724,8 @@ static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
return -1;
iph = ip_hdr(skb);
- echo.ms_addr_ip4.s_addr = iph->daddr;
- echo.peer_addr_ip4.s_addr = iph->saddr;
+ echo.ms.addr.s_addr = iph->daddr;
+ echo.peer.addr.s_addr = iph->saddr;
echo.gtp_version = GTP_V1;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
@@ -567,12 +742,50 @@ static int gtp1u_handle_echo_resp(struct gtp_dev *gtp, struct sk_buff *skb)
msg, 0, GTP_GENL_MCGRP, GFP_ATOMIC);
}
+static int gtp_parse_exthdrs(struct sk_buff *skb, unsigned int *hdrlen)
+{
+ struct gtp_ext_hdr *gtp_exthdr, _gtp_exthdr;
+ unsigned int offset = *hdrlen;
+ __u8 *next_type, _next_type;
+
+ /* From 29.060: "The Extension Header Length field specifies the length
+ * of the particular Extension header in 4 octets units."
+ *
+ * This length field includes length field size itself (1 byte),
+ * payload (variable length) and next type (1 byte). The extension
+ * header is aligned to to 4 bytes.
+ */
+
+ do {
+ gtp_exthdr = skb_header_pointer(skb, offset, sizeof(*gtp_exthdr),
+ &_gtp_exthdr);
+ if (!gtp_exthdr || !gtp_exthdr->len)
+ return -1;
+
+ offset += gtp_exthdr->len * 4;
+
+ /* From 29.060: "If no such Header follows, then the value of
+ * the Next Extension Header Type shall be 0."
+ */
+ next_type = skb_header_pointer(skb, offset - 1,
+ sizeof(_next_type), &_next_type);
+ if (!next_type)
+ return -1;
+
+ } while (*next_type != 0);
+
+ *hdrlen = offset;
+
+ return 0;
+}
+
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
{
unsigned int hdrlen = sizeof(struct udphdr) +
sizeof(struct gtp1_header);
struct gtp1_header *gtp1;
struct pdp_ctx *pctx;
+ __u16 inner_proto;
if (!pskb_may_pull(skb, hdrlen))
return -1;
@@ -608,15 +821,25 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb)
if (!pskb_may_pull(skb, hdrlen))
return -1;
+ if (gtp_inner_proto(skb, hdrlen, &inner_proto) < 0) {
+ netdev_dbg(gtp->dev, "GTP packet does not encapsulate an IP packet\n");
+ return -1;
+ }
+
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
- pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
+ pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid),
+ gtp_proto_to_family(inner_proto));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
return 1;
}
- return gtp_rx(pctx, skb, hdrlen, gtp->role);
+ if (gtp1->flags & GTP1_F_EXTHDR &&
+ gtp_parse_exthdrs(skb, &hdrlen) < 0)
+ return -1;
+
+ return gtp_rx(pctx, skb, hdrlen, gtp->role, inner_proto);
}
static void __gtp_encap_destroy(struct sock *sk)
@@ -760,11 +983,17 @@ static inline void gtp1_push_header(struct sk_buff *skb, struct pdp_ctx *pctx)
struct gtp_pktinfo {
struct sock *sk;
- struct iphdr *iph;
- struct flowi4 fl4;
- struct rtable *rt;
+ union {
+ struct flowi4 fl4;
+ struct flowi6 fl6;
+ };
+ union {
+ struct rtable *rt;
+ struct rt6_info *rt6;
+ };
struct pdp_ctx *pctx;
struct net_device *dev;
+ __u8 tos;
__be16 gtph_port;
};
@@ -783,64 +1012,61 @@ static void gtp_push_header(struct sk_buff *skb, struct gtp_pktinfo *pktinfo)
}
static inline void gtp_set_pktinfo_ipv4(struct gtp_pktinfo *pktinfo,
- struct sock *sk, struct iphdr *iph,
+ struct sock *sk, __u8 tos,
struct pdp_ctx *pctx, struct rtable *rt,
struct flowi4 *fl4,
struct net_device *dev)
{
pktinfo->sk = sk;
- pktinfo->iph = iph;
+ pktinfo->tos = tos;
pktinfo->pctx = pctx;
pktinfo->rt = rt;
pktinfo->fl4 = *fl4;
pktinfo->dev = dev;
}
-static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
- struct gtp_pktinfo *pktinfo)
+static void gtp_set_pktinfo_ipv6(struct gtp_pktinfo *pktinfo,
+ struct sock *sk, __u8 tos,
+ struct pdp_ctx *pctx, struct rt6_info *rt6,
+ struct flowi6 *fl6,
+ struct net_device *dev)
+{
+ pktinfo->sk = sk;
+ pktinfo->tos = tos;
+ pktinfo->pctx = pctx;
+ pktinfo->rt6 = rt6;
+ pktinfo->fl6 = *fl6;
+ pktinfo->dev = dev;
+}
+
+static int gtp_build_skb_outer_ip4(struct sk_buff *skb, struct net_device *dev,
+ struct gtp_pktinfo *pktinfo,
+ struct pdp_ctx *pctx, __u8 tos,
+ __be16 frag_off)
{
- struct gtp_dev *gtp = netdev_priv(dev);
- struct pdp_ctx *pctx;
struct rtable *rt;
struct flowi4 fl4;
- struct iphdr *iph;
__be16 df;
int mtu;
- /* Read the IP destination address and resolve the PDP context.
- * Prepend PDP header with TEI/TID from PDP ctx.
- */
- iph = ip_hdr(skb);
- if (gtp->role == GTP_ROLE_SGSN)
- pctx = ipv4_pdp_find(gtp, iph->saddr);
- else
- pctx = ipv4_pdp_find(gtp, iph->daddr);
-
- if (!pctx) {
- netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
- &iph->daddr);
- return -ENOENT;
- }
- netdev_dbg(dev, "found PDP context %p\n", pctx);
-
- rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer_addr_ip4.s_addr,
+ rt = ip4_route_output_gtp(&fl4, pctx->sk, pctx->peer.addr.s_addr,
inet_sk(pctx->sk)->inet_saddr);
if (IS_ERR(rt)) {
netdev_dbg(dev, "no route to SSGN %pI4\n",
- &pctx->peer_addr_ip4.s_addr);
+ &pctx->peer.addr.s_addr);
dev->stats.tx_carrier_errors++;
goto err;
}
if (rt->dst.dev == dev) {
netdev_dbg(dev, "circular route to SSGN %pI4\n",
- &pctx->peer_addr_ip4.s_addr);
+ &pctx->peer.addr.s_addr);
dev->stats.collisions++;
goto err_rt;
}
/* This is similar to tnl_update_pmtu(). */
- df = iph->frag_off;
+ df = frag_off;
if (df) {
mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
sizeof(struct iphdr) - sizeof(struct udphdr);
@@ -858,7 +1084,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
skb_dst_update_pmtu_no_confirm(skb, mtu);
- if (iph->frag_off & htons(IP_DF) &&
+ if (frag_off & htons(IP_DF) &&
((!skb_is_gso(skb) && skb->len > mtu) ||
(skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu)))) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
@@ -867,7 +1093,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
goto err_rt;
}
- gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, iph, pctx, rt, &fl4, dev);
+ gtp_set_pktinfo_ipv4(pktinfo, pctx->sk, tos, pctx, rt, &fl4, dev);
gtp_push_header(skb, pktinfo);
return 0;
@@ -877,6 +1103,162 @@ err:
return -EBADMSG;
}
+static int gtp_build_skb_outer_ip6(struct net *net, struct sk_buff *skb,
+ struct net_device *dev,
+ struct gtp_pktinfo *pktinfo,
+ struct pdp_ctx *pctx, __u8 tos)
+{
+ struct dst_entry *dst;
+ struct rt6_info *rt;
+ struct flowi6 fl6;
+ int mtu;
+
+ rt = ip6_route_output_gtp(net, &fl6, pctx->sk, &pctx->peer.addr6,
+ &inet6_sk(pctx->sk)->saddr);
+ if (IS_ERR(rt)) {
+ netdev_dbg(dev, "no route to SSGN %pI6\n",
+ &pctx->peer.addr6);
+ dev->stats.tx_carrier_errors++;
+ goto err;
+ }
+ dst = &rt->dst;
+
+ if (rt->dst.dev == dev) {
+ netdev_dbg(dev, "circular route to SSGN %pI6\n",
+ &pctx->peer.addr6);
+ dev->stats.collisions++;
+ goto err_rt;
+ }
+
+ mtu = dst_mtu(&rt->dst) - dev->hard_header_len -
+ sizeof(struct ipv6hdr) - sizeof(struct udphdr);
+ switch (pctx->gtp_version) {
+ case GTP_V0:
+ mtu -= sizeof(struct gtp0_header);
+ break;
+ case GTP_V1:
+ mtu -= sizeof(struct gtp1_header);
+ break;
+ }
+
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
+
+ if ((!skb_is_gso(skb) && skb->len > mtu) ||
+ (skb_is_gso(skb) && !skb_gso_validate_network_len(skb, mtu))) {
+ netdev_dbg(dev, "packet too big, fragmentation needed\n");
+ icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ goto err_rt;
+ }
+
+ gtp_set_pktinfo_ipv6(pktinfo, pctx->sk, tos, pctx, rt, &fl6, dev);
+ gtp_push_header(skb, pktinfo);
+
+ return 0;
+err_rt:
+ dst_release(dst);
+err:
+ return -EBADMSG;
+}
+
+static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
+ struct gtp_pktinfo *pktinfo)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+ struct net *net = gtp->net;
+ struct pdp_ctx *pctx;
+ struct iphdr *iph;
+ int ret;
+
+ /* Read the IP destination address and resolve the PDP context.
+ * Prepend PDP header with TEI/TID from PDP ctx.
+ */
+ iph = ip_hdr(skb);
+ if (gtp->role == GTP_ROLE_SGSN)
+ pctx = ipv4_pdp_find(gtp, iph->saddr);
+ else
+ pctx = ipv4_pdp_find(gtp, iph->daddr);
+
+ if (!pctx) {
+ netdev_dbg(dev, "no PDP ctx found for %pI4, skip\n",
+ &iph->daddr);
+ return -ENOENT;
+ }
+ netdev_dbg(dev, "found PDP context %p\n", pctx);
+
+ switch (pctx->sk->sk_family) {
+ case AF_INET:
+ ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx,
+ iph->tos, iph->frag_off);
+ break;
+ case AF_INET6:
+ ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx,
+ iph->tos);
+ break;
+ default:
+ ret = -1;
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ netdev_dbg(dev, "gtp -> IP src: %pI4 dst: %pI4\n",
+ &iph->saddr, &iph->daddr);
+
+ return 0;
+}
+
+static int gtp_build_skb_ip6(struct sk_buff *skb, struct net_device *dev,
+ struct gtp_pktinfo *pktinfo)
+{
+ struct gtp_dev *gtp = netdev_priv(dev);
+ struct net *net = gtp->net;
+ struct pdp_ctx *pctx;
+ struct ipv6hdr *ip6h;
+ __u8 tos;
+ int ret;
+
+ /* Read the IP destination address and resolve the PDP context.
+ * Prepend PDP header with TEI/TID from PDP ctx.
+ */
+ ip6h = ipv6_hdr(skb);
+ if (gtp->role == GTP_ROLE_SGSN)
+ pctx = ipv6_pdp_find(gtp, &ip6h->saddr);
+ else
+ pctx = ipv6_pdp_find(gtp, &ip6h->daddr);
+
+ if (!pctx) {
+ netdev_dbg(dev, "no PDP ctx found for %pI6, skip\n",
+ &ip6h->daddr);
+ return -ENOENT;
+ }
+ netdev_dbg(dev, "found PDP context %p\n", pctx);
+
+ tos = ipv6_get_dsfield(ip6h);
+
+ switch (pctx->sk->sk_family) {
+ case AF_INET:
+ ret = gtp_build_skb_outer_ip4(skb, dev, pktinfo, pctx, tos, 0);
+ break;
+ case AF_INET6:
+ ret = gtp_build_skb_outer_ip6(net, skb, dev, pktinfo, pctx, tos);
+ break;
+ default:
+ ret = -1;
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ netdev_dbg(dev, "gtp -> IP src: %pI6 dst: %pI6\n",
+ &ip6h->saddr, &ip6h->daddr);
+
+ return 0;
+}
+
static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned int proto = ntohs(skb->protocol);
@@ -887,6 +1269,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb_cow_head(skb, dev->needed_headroom))
goto tx_err;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_err;
+
skb_reset_inner_headers(skb);
/* PDP context lookups in gtp_build_skb_*() need rcu read-side lock. */
@@ -895,6 +1280,9 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
case ETH_P_IP:
err = gtp_build_skb_ip4(skb, dev, &pktinfo);
break;
+ case ETH_P_IPV6:
+ err = gtp_build_skb_ip6(skb, dev, &pktinfo);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -904,13 +1292,11 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
if (err < 0)
goto tx_err;
- switch (proto) {
- case ETH_P_IP:
- netdev_dbg(pktinfo.dev, "gtp -> IP src: %pI4 dst: %pI4\n",
- &pktinfo.iph->saddr, &pktinfo.iph->daddr);
+ switch (pktinfo.pctx->sk->sk_family) {
+ case AF_INET:
udp_tunnel_xmit_skb(pktinfo.rt, pktinfo.sk, skb,
pktinfo.fl4.saddr, pktinfo.fl4.daddr,
- pktinfo.iph->tos,
+ pktinfo.tos,
ip4_dst_hoplimit(&pktinfo.rt->dst),
0,
pktinfo.gtph_port, pktinfo.gtph_port,
@@ -918,6 +1304,19 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
dev_net(dev)),
false);
break;
+ case AF_INET6:
+#if IS_ENABLED(CONFIG_IPV6)
+ udp_tunnel6_xmit_skb(&pktinfo.rt6->dst, pktinfo.sk, skb, dev,
+ &pktinfo.fl6.saddr, &pktinfo.fl6.daddr,
+ pktinfo.tos,
+ ip6_dst_hoplimit(&pktinfo.rt->dst),
+ 0,
+ pktinfo.gtph_port, pktinfo.gtph_port,
+ false);
+#else
+ goto tx_err;
+#endif
+ break;
}
return NETDEV_TX_OK;
@@ -936,11 +1335,11 @@ static const struct device_type gtp_type = {
.name = "gtp",
};
+#define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
+#define GTP_IPV4_MAXLEN (sizeof(struct iphdr) + GTP_TH_MAXLEN)
+
static void gtp_link_setup(struct net_device *dev)
{
- unsigned int max_gtp_header_len = sizeof(struct iphdr) +
- sizeof(struct udphdr) +
- sizeof(struct gtp0_header);
struct gtp_dev *gtp = netdev_priv(dev);
dev->netdev_ops = &gtp_netdev_ops;
@@ -949,7 +1348,7 @@ static void gtp_link_setup(struct net_device *dev)
dev->hard_header_len = 0;
dev->addr_len = 0;
- dev->mtu = ETH_DATA_LEN - max_gtp_header_len;
+ dev->mtu = ETH_DATA_LEN - GTP_IPV4_MAXLEN;
/* Zero header length. */
dev->type = ARPHRD_NONE;
@@ -960,7 +1359,7 @@ static void gtp_link_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
netif_keep_dst(dev);
- dev->needed_headroom = LL_MAX_HEADER + max_gtp_header_len;
+ dev->needed_headroom = LL_MAX_HEADER + GTP_IPV4_MAXLEN;
gtp->dev = dev;
}
@@ -975,17 +1374,45 @@ static void gtp_destructor(struct net_device *dev)
kfree(gtp->tid_hash);
}
-static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp)
+static int gtp_sock_udp_config(struct udp_port_cfg *udp_conf,
+ const struct nlattr *nla, int family)
+{
+ udp_conf->family = family;
+
+ switch (udp_conf->family) {
+ case AF_INET:
+ udp_conf->local_ip.s_addr = nla_get_be32(nla);
+ break;
+#if IS_ENABLED(CONFIG_IPV6)
+ case AF_INET6:
+ udp_conf->local_ip6 = nla_get_in6_addr(nla);
+ break;
+#endif
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp,
+ const struct nlattr *nla, int family)
{
struct udp_tunnel_sock_cfg tuncfg = {};
- struct udp_port_cfg udp_conf = {
- .local_ip.s_addr = htonl(INADDR_ANY),
- .family = AF_INET,
- };
+ struct udp_port_cfg udp_conf = {};
struct net *net = gtp->net;
struct socket *sock;
int err;
+ if (nla) {
+ err = gtp_sock_udp_config(&udp_conf, nla, family);
+ if (err < 0)
+ return ERR_PTR(err);
+ } else {
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+ udp_conf.family = AF_INET;
+ }
+
if (type == UDP_ENCAP_GTP0)
udp_conf.local_udp_port = htons(GTP0_PORT);
else if (type == UDP_ENCAP_GTP1U)
@@ -1007,16 +1434,17 @@ static struct sock *gtp_create_sock(int type, struct gtp_dev *gtp)
return sock->sk;
}
-static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[])
+static int gtp_create_sockets(struct gtp_dev *gtp, const struct nlattr *nla,
+ int family)
{
- struct sock *sk1u = NULL;
- struct sock *sk0 = NULL;
+ struct sock *sk1u;
+ struct sock *sk0;
- sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp);
+ sk0 = gtp_create_sock(UDP_ENCAP_GTP0, gtp, nla, family);
if (IS_ERR(sk0))
return PTR_ERR(sk0);
- sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp);
+ sk1u = gtp_create_sock(UDP_ENCAP_GTP1U, gtp, nla, family);
if (IS_ERR(sk1u)) {
udp_tunnel_sock_release(sk0->sk_socket);
return PTR_ERR(sk1u);
@@ -1029,6 +1457,9 @@ static int gtp_create_sockets(struct gtp_dev *gtp, struct nlattr *data[])
return 0;
}
+#define GTP_TH_MAXLEN (sizeof(struct udphdr) + sizeof(struct gtp0_header))
+#define GTP_IPV6_MAXLEN (sizeof(struct ipv6hdr) + GTP_TH_MAXLEN)
+
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[],
struct netlink_ext_ack *extack)
@@ -1038,6 +1469,11 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct gtp_net *gn;
int hashsize, err;
+#if !IS_ENABLED(CONFIG_IPV6)
+ if (data[IFLA_GTP_LOCAL6])
+ return -EAFNOSUPPORT;
+#endif
+
gtp = netdev_priv(dev);
if (!data[IFLA_GTP_PDP_HASHSIZE]) {
@@ -1066,13 +1502,24 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
if (err < 0)
return err;
- if (data[IFLA_GTP_CREATE_SOCKETS])
- err = gtp_create_sockets(gtp, data);
- else
+ if (data[IFLA_GTP_CREATE_SOCKETS]) {
+ if (data[IFLA_GTP_LOCAL6])
+ err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL6], AF_INET6);
+ else
+ err = gtp_create_sockets(gtp, data[IFLA_GTP_LOCAL], AF_INET);
+ } else {
err = gtp_encap_enable(gtp, data);
+ }
+
if (err < 0)
goto out_hashtable;
+ if ((gtp->sk0 && gtp->sk0->sk_family == AF_INET6) ||
+ (gtp->sk1u && gtp->sk1u->sk_family == AF_INET6)) {
+ dev->mtu = ETH_DATA_LEN - GTP_IPV6_MAXLEN;
+ dev->needed_headroom = LL_MAX_HEADER + GTP_IPV6_MAXLEN;
+ }
+
err = register_netdevice(dev);
if (err < 0) {
netdev_dbg(dev, "failed to register new netdev %d\n", err);
@@ -1117,6 +1564,8 @@ static const struct nla_policy gtp_policy[IFLA_GTP_MAX + 1] = {
[IFLA_GTP_ROLE] = { .type = NLA_U32 },
[IFLA_GTP_CREATE_SOCKETS] = { .type = NLA_U8 },
[IFLA_GTP_RESTART_COUNT] = { .type = NLA_U8 },
+ [IFLA_GTP_LOCAL] = { .type = NLA_U32 },
+ [IFLA_GTP_LOCAL6] = { .len = sizeof(struct in6_addr) },
};
static int gtp_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -1216,6 +1665,12 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
goto out_sock;
}
+ if (sk->sk_family == AF_INET6 &&
+ !sk->sk_ipv6only) {
+ sk = ERR_PTR(-EADDRNOTAVAIL);
+ goto out_sock;
+ }
+
lock_sock(sk);
if (sk->sk_user_data) {
sk = ERR_PTR(-EBUSY);
@@ -1267,6 +1722,13 @@ static int gtp_encap_enable(struct gtp_dev *gtp, struct nlattr *data[])
gtp->sk0 = sk0;
gtp->sk1u = sk1u;
+ if (sk0 && sk1u &&
+ sk0->sk_family != sk1u->sk_family) {
+ gtp_encap_disable_sock(sk0);
+ gtp_encap_disable_sock(sk1u);
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1296,14 +1758,9 @@ static struct gtp_dev *gtp_find_dev(struct net *src_net, struct nlattr *nla[])
return gtp;
}
-static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
+static void gtp_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
{
pctx->gtp_version = nla_get_u32(info->attrs[GTPA_VERSION]);
- pctx->af = AF_INET;
- pctx->peer_addr_ip4.s_addr =
- nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
- pctx->ms_addr_ip4.s_addr =
- nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
switch (pctx->gtp_version) {
case GTP_V0:
@@ -1323,29 +1780,102 @@ static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
}
}
+static void ip_pdp_peer_fill(struct pdp_ctx *pctx, struct genl_info *info)
+{
+ if (info->attrs[GTPA_PEER_ADDRESS]) {
+ pctx->peer.addr.s_addr =
+ nla_get_be32(info->attrs[GTPA_PEER_ADDRESS]);
+ } else if (info->attrs[GTPA_PEER_ADDR6]) {
+ pctx->peer.addr6 = nla_get_in6_addr(info->attrs[GTPA_PEER_ADDR6]);
+ }
+}
+
+static void ipv4_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
+{
+ ip_pdp_peer_fill(pctx, info);
+ pctx->ms.addr.s_addr =
+ nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+ gtp_pdp_fill(pctx, info);
+}
+
+static bool ipv6_pdp_fill(struct pdp_ctx *pctx, struct genl_info *info)
+{
+ ip_pdp_peer_fill(pctx, info);
+ pctx->ms.addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]);
+ if (pctx->ms.addr6.s6_addr32[2] ||
+ pctx->ms.addr6.s6_addr32[3])
+ return false;
+
+ gtp_pdp_fill(pctx, info);
+
+ return true;
+}
+
static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
struct genl_info *info)
{
struct pdp_ctx *pctx, *pctx_tid = NULL;
struct net_device *dev = gtp->dev;
u32 hash_ms, hash_tid = 0;
+ struct in6_addr ms_addr6;
unsigned int version;
bool found = false;
__be32 ms_addr;
+ int family;
- ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
- hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
version = nla_get_u32(info->attrs[GTPA_VERSION]);
- pctx = ipv4_pdp_find(gtp, ms_addr);
+ if (info->attrs[GTPA_FAMILY])
+ family = nla_get_u8(info->attrs[GTPA_FAMILY]);
+ else
+ family = AF_INET;
+
+#if !IS_ENABLED(CONFIG_IPV6)
+ if (family == AF_INET6)
+ return ERR_PTR(-EAFNOSUPPORT);
+#endif
+ if (!info->attrs[GTPA_PEER_ADDRESS] &&
+ !info->attrs[GTPA_PEER_ADDR6])
+ return ERR_PTR(-EINVAL);
+
+ if ((info->attrs[GTPA_PEER_ADDRESS] &&
+ sk->sk_family == AF_INET6) ||
+ (info->attrs[GTPA_PEER_ADDR6] &&
+ sk->sk_family == AF_INET))
+ return ERR_PTR(-EAFNOSUPPORT);
+
+ switch (family) {
+ case AF_INET:
+ if (!info->attrs[GTPA_MS_ADDRESS] ||
+ info->attrs[GTPA_MS_ADDR6])
+ return ERR_PTR(-EINVAL);
+
+ ms_addr = nla_get_be32(info->attrs[GTPA_MS_ADDRESS]);
+ hash_ms = ipv4_hashfn(ms_addr) % gtp->hash_size;
+ pctx = ipv4_pdp_find(gtp, ms_addr);
+ break;
+ case AF_INET6:
+ if (!info->attrs[GTPA_MS_ADDR6] ||
+ info->attrs[GTPA_MS_ADDRESS])
+ return ERR_PTR(-EINVAL);
+
+ ms_addr6 = nla_get_in6_addr(info->attrs[GTPA_MS_ADDR6]);
+ hash_ms = ipv6_hashfn(&ms_addr6) % gtp->hash_size;
+ pctx = ipv6_pdp_find(gtp, &ms_addr6);
+ break;
+ default:
+ return ERR_PTR(-EAFNOSUPPORT);
+ }
if (pctx)
found = true;
if (version == GTP_V0)
pctx_tid = gtp0_pdp_find(gtp,
- nla_get_u64(info->attrs[GTPA_TID]));
+ nla_get_u64(info->attrs[GTPA_TID]),
+ family);
else if (version == GTP_V1)
pctx_tid = gtp1_pdp_find(gtp,
- nla_get_u32(info->attrs[GTPA_I_TEI]));
+ nla_get_u32(info->attrs[GTPA_I_TEI]),
+ family);
if (pctx_tid)
found = true;
@@ -1360,7 +1890,15 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
if (!pctx)
pctx = pctx_tid;
- ipv4_pdp_fill(pctx, info);
+ switch (pctx->af) {
+ case AF_INET:
+ ipv4_pdp_fill(pctx, info);
+ break;
+ case AF_INET6:
+ if (!ipv6_pdp_fill(pctx, info))
+ return ERR_PTR(-EADDRNOTAVAIL);
+ break;
+ }
if (pctx->gtp_version == GTP_V0)
netdev_dbg(dev, "GTPv0-U: update tunnel id = %llx (pdp %p)\n",
@@ -1380,7 +1918,32 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
sock_hold(sk);
pctx->sk = sk;
pctx->dev = gtp->dev;
- ipv4_pdp_fill(pctx, info);
+ pctx->af = family;
+
+ switch (pctx->af) {
+ case AF_INET:
+ if (!info->attrs[GTPA_MS_ADDRESS]) {
+ sock_put(sk);
+ kfree(pctx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ipv4_pdp_fill(pctx, info);
+ break;
+ case AF_INET6:
+ if (!info->attrs[GTPA_MS_ADDR6]) {
+ sock_put(sk);
+ kfree(pctx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!ipv6_pdp_fill(pctx, info)) {
+ sock_put(sk);
+ kfree(pctx);
+ return ERR_PTR(-EADDRNOTAVAIL);
+ }
+ break;
+ }
atomic_set(&pctx->tx_seq, 0);
switch (pctx->gtp_version) {
@@ -1403,13 +1966,13 @@ static struct pdp_ctx *gtp_pdp_add(struct gtp_dev *gtp, struct sock *sk,
switch (pctx->gtp_version) {
case GTP_V0:
netdev_dbg(dev, "GTPv0-U: new PDP ctx id=%llx ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
- pctx->u.v0.tid, &pctx->peer_addr_ip4,
- &pctx->ms_addr_ip4, pctx);
+ pctx->u.v0.tid, &pctx->peer.addr,
+ &pctx->ms.addr, pctx);
break;
case GTP_V1:
netdev_dbg(dev, "GTPv1-U: new PDP ctx id=%x/%x ssgn=%pI4 ms=%pI4 (pdp=%p)\n",
pctx->u.v1.i_tei, pctx->u.v1.o_tei,
- &pctx->peer_addr_ip4, &pctx->ms_addr_ip4, pctx);
+ &pctx->peer.addr, &pctx->ms.addr, pctx);
break;
}
@@ -1442,9 +2005,7 @@ static int gtp_genl_new_pdp(struct sk_buff *skb, struct genl_info *info)
int err;
if (!info->attrs[GTPA_VERSION] ||
- !info->attrs[GTPA_LINK] ||
- !info->attrs[GTPA_PEER_ADDRESS] ||
- !info->attrs[GTPA_MS_ADDRESS])
+ !info->attrs[GTPA_LINK])
return -EINVAL;
version = nla_get_u32(info->attrs[GTPA_VERSION]);
@@ -1502,6 +2063,12 @@ static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
struct nlattr *nla[])
{
struct gtp_dev *gtp;
+ int family;
+
+ if (nla[GTPA_FAMILY])
+ family = nla_get_u8(nla[GTPA_FAMILY]);
+ else
+ family = AF_INET;
gtp = gtp_find_dev(net, nla);
if (!gtp)
@@ -1510,14 +2077,31 @@ static struct pdp_ctx *gtp_find_pdp_by_link(struct net *net,
if (nla[GTPA_MS_ADDRESS]) {
__be32 ip = nla_get_be32(nla[GTPA_MS_ADDRESS]);
+ if (family != AF_INET)
+ return ERR_PTR(-EINVAL);
+
return ipv4_pdp_find(gtp, ip);
+ } else if (nla[GTPA_MS_ADDR6]) {
+ struct in6_addr addr = nla_get_in6_addr(nla[GTPA_MS_ADDR6]);
+
+ if (family != AF_INET6)
+ return ERR_PTR(-EINVAL);
+
+ if (addr.s6_addr32[2] ||
+ addr.s6_addr32[3])
+ return ERR_PTR(-EADDRNOTAVAIL);
+
+ return ipv6_pdp_find(gtp, &addr);
} else if (nla[GTPA_VERSION]) {
u32 gtp_version = nla_get_u32(nla[GTPA_VERSION]);
- if (gtp_version == GTP_V0 && nla[GTPA_TID])
- return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]));
- else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI])
- return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]));
+ if (gtp_version == GTP_V0 && nla[GTPA_TID]) {
+ return gtp0_pdp_find(gtp, nla_get_u64(nla[GTPA_TID]),
+ family);
+ } else if (gtp_version == GTP_V1 && nla[GTPA_I_TEI]) {
+ return gtp1_pdp_find(gtp, nla_get_u32(nla[GTPA_I_TEI]),
+ family);
+ }
}
return ERR_PTR(-EINVAL);
@@ -1581,10 +2165,31 @@ static int gtp_genl_fill_info(struct sk_buff *skb, u32 snd_portid, u32 snd_seq,
if (nla_put_u32(skb, GTPA_VERSION, pctx->gtp_version) ||
nla_put_u32(skb, GTPA_LINK, pctx->dev->ifindex) ||
- nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer_addr_ip4.s_addr) ||
- nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms_addr_ip4.s_addr))
+ nla_put_u8(skb, GTPA_FAMILY, pctx->af))
goto nla_put_failure;
+ switch (pctx->af) {
+ case AF_INET:
+ if (nla_put_be32(skb, GTPA_MS_ADDRESS, pctx->ms.addr.s_addr))
+ goto nla_put_failure;
+ break;
+ case AF_INET6:
+ if (nla_put_in6_addr(skb, GTPA_MS_ADDR6, &pctx->ms.addr6))
+ goto nla_put_failure;
+ break;
+ }
+
+ switch (pctx->sk->sk_family) {
+ case AF_INET:
+ if (nla_put_be32(skb, GTPA_PEER_ADDRESS, pctx->peer.addr.s_addr))
+ goto nla_put_failure;
+ break;
+ case AF_INET6:
+ if (nla_put_in6_addr(skb, GTPA_PEER_ADDR6, &pctx->peer.addr6))
+ goto nla_put_failure;
+ break;
+ }
+
switch (pctx->gtp_version) {
case GTP_V0:
if (nla_put_u64_64bit(skb, GTPA_TID, pctx->u.v0.tid, GTPA_PAD) ||
@@ -1811,6 +2416,9 @@ static const struct nla_policy gtp_genl_policy[GTPA_MAX + 1] = {
[GTPA_NET_NS_FD] = { .type = NLA_U32, },
[GTPA_I_TEI] = { .type = NLA_U32, },
[GTPA_O_TEI] = { .type = NLA_U32, },
+ [GTPA_PEER_ADDR6] = { .len = sizeof(struct in6_addr), },
+ [GTPA_MS_ADDR6] = { .len = sizeof(struct in6_addr), },
+ [GTPA_FAMILY] = { .type = NLA_U8, },
};
static const struct genl_small_ops gtp_genl_ops[] = {
diff --git a/drivers/net/hamradio/Kconfig b/drivers/net/hamradio/Kconfig
index 25b1f929c422..36a9aade9f33 100644
--- a/drivers/net/hamradio/Kconfig
+++ b/drivers/net/hamradio/Kconfig
@@ -83,7 +83,7 @@ config SCC_TRXECHO
config BAYCOM_SER_FDX
tristate "BAYCOM ser12 fullduplex driver for AX.25"
- depends on AX25 && !S390
+ depends on AX25 && HAS_IOPORT
select CRC_CCITT
help
This is one of two drivers for Baycom style simple amateur radio
@@ -103,7 +103,7 @@ config BAYCOM_SER_FDX
config BAYCOM_SER_HDX
tristate "BAYCOM ser12 halfduplex driver for AX.25"
- depends on AX25 && !S390
+ depends on AX25 && HAS_IOPORT
select CRC_CCITT
help
This is one of two drivers for Baycom style simple amateur radio
@@ -150,7 +150,7 @@ config BAYCOM_EPP
config YAM
tristate "YAM driver for AX.25"
- depends on AX25 && !S390
+ depends on AX25 && HAS_IOPORT
help
The YAM is a modem for packet radio which connects to the serial
port and includes some of the functions of a Terminal Node
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index ccfc83857c26..9e366f275406 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -1193,7 +1193,6 @@ static int baycom_epp_par_probe(struct pardevice *par_dev)
static struct parport_driver baycom_epp_par_driver = {
.name = "bce",
.probe = baycom_epp_par_probe,
- .devmodel = true,
};
static void __init baycom_epp_dev_setup(struct net_device *dev)
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c
index fd7da5bb1fa5..00ebc25d0b22 100644
--- a/drivers/net/hamradio/baycom_par.c
+++ b/drivers/net/hamradio/baycom_par.c
@@ -503,7 +503,6 @@ static int baycom_par_probe(struct pardevice *par_dev)
static struct parport_driver baycom_par_driver = {
.name = "bcp",
.probe = baycom_par_probe,
- .devmodel = true,
};
static int __init init_baycompar(void)
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 11831a1c9762..44142245343d 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1233,14 +1233,14 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (ret)
goto rollback_vf;
- ndev->mtu = mtu;
+ WRITE_ONCE(ndev->mtu, mtu);
ret = netvsc_attach(ndev, device_info);
if (!ret)
goto out;
/* Attempt rollback to original MTU */
- ndev->mtu = orig_mtu;
+ WRITE_ONCE(ndev->mtu, orig_mtu);
if (netvsc_attach(ndev, device_info))
netdev_err(ndev, "restoring mtu failed\n");
diff --git a/drivers/net/hyperv/netvsc_trace.h b/drivers/net/hyperv/netvsc_trace.h
index f7585563dea5..05e620cbdd29 100644
--- a/drivers/net/hyperv/netvsc_trace.h
+++ b/drivers/net/hyperv/netvsc_trace.h
@@ -51,7 +51,7 @@ DECLARE_EVENT_CLASS(rndis_msg_class,
__field( u32, msg_len )
),
TP_fast_assign(
- __assign_str(name, ndev->name);
+ __assign_str(name);
__entry->queue = q;
__entry->req_id = msg->msg.init_req.req_id;
__entry->msg_type = msg->ndis_msg_type;
@@ -121,7 +121,7 @@ TRACE_EVENT(nvsp_send,
__field( u32, msg_type )
),
TP_fast_assign(
- __assign_str(name, ndev->name);
+ __assign_str(name);
__entry->msg_type = msg->hdr.msg_type;
),
TP_printk("dev=%s type=%s",
@@ -142,7 +142,7 @@ TRACE_EVENT(nvsp_send_pkt,
__field( u32, section_size )
),
TP_fast_assign(
- __assign_str(name, ndev->name);
+ __assign_str(name);
__entry->qid = chan->offermsg.offer.sub_channel_index;
__entry->channel_type = rpkt->channel_type;
__entry->section_index = rpkt->send_buf_section_index;
@@ -165,7 +165,7 @@ TRACE_EVENT(nvsp_recv,
__field( u32, msg_type )
),
TP_fast_assign(
- __assign_str(name, ndev->name);
+ __assign_str(name);
__entry->qid = chan->offermsg.offer.sub_channel_index;
__entry->msg_type = msg->hdr.msg_type;
),
diff --git a/drivers/net/ipa/data/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index 3380fb3483b2..e902d731776d 100644
--- a/drivers/net/ipa/data/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index 4287114b24db..f632aab56f4c 100644
--- a/drivers/net/ipa/data/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -1,15 +1,16 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c
index 1b4b52501ee3..c1428483ca34 100644
--- a/drivers/net/ipa/data/ipa_data-v4.11.c
+++ b/drivers/net/ipa/data/ipa_data-v4.11.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021 Linaro Ltd. */
+/* Copyright (C) 2021-2024 Linaro Ltd. */
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c
index 199ed0ed868b..2c7e8cb429b9 100644
--- a/drivers/net/ipa/data/ipa_data-v4.2.c
+++ b/drivers/net/ipa/data/ipa_data-v4.2.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2019-2021 Linaro Ltd. */
+/* Copyright (C) 2019-2024 Linaro Ltd. */
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c
index 19b549f2998b..57dc78c526b0 100644
--- a/drivers/net/ipa/data/ipa_data-v4.5.c
+++ b/drivers/net/ipa/data/ipa_data-v4.5.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021 Linaro Ltd. */
+/* Copyright (C) 2021-2024 Linaro Ltd. */
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v4.7.c b/drivers/net/ipa/data/ipa_data-v4.7.c
index b83390c48615..c8c23d9be961 100644
--- a/drivers/net/ipa/data/ipa_data-v4.7.c
+++ b/drivers/net/ipa/data/ipa_data-v4.7.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.7 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c
index d30fc1fe6ca2..4eb9c909d5b3 100644
--- a/drivers/net/ipa/data/ipa_data-v4.9.c
+++ b/drivers/net/ipa/data/ipa_data-v4.9.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021 Linaro Ltd. */
+/* Copyright (C) 2021-2024 Linaro Ltd. */
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v5.0.c b/drivers/net/ipa/data/ipa_data-v5.0.c
index 4d8171dae4cd..050580c99b65 100644
--- a/drivers/net/ipa/data/ipa_data-v5.0.c
+++ b/drivers/net/ipa/data/ipa_data-v5.0.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
#include <linux/log2.h>
-#include "../gsi.h"
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v5.0 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/data/ipa_data-v5.5.c b/drivers/net/ipa/data/ipa_data-v5.5.c
index 2c6390f11354..0e6663e22533 100644
--- a/drivers/net/ipa/data/ipa_data-v5.5.c
+++ b/drivers/net/ipa/data/ipa_data-v5.5.c
@@ -1,13 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
-#include <linux/kernel.h>
+#include <linux/array_size.h>
#include <linux/log2.h>
#include "../ipa_data.h"
#include "../ipa_endpoint.h"
#include "../ipa_mem.h"
+#include "../ipa_version.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v5.5 */
enum ipa_resource_type {
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index 9a0b1fe4a93a..4c3227e77898 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1,28 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2023 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
-#include <linux/types.h>
#include <linux/bits.h>
-#include <linux/bitfield.h>
-#include <linux/mutex.h>
-#include <linux/completion.h>
-#include <linux/io.h>
#include <linux/bug.h>
+#include <linux/completion.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
#include "gsi.h"
-#include "reg.h"
-#include "gsi_reg.h"
#include "gsi_private.h"
+#include "gsi_reg.h"
#include "gsi_trans.h"
-#include "ipa_gsi.h"
#include "ipa_data.h"
+#include "ipa_gsi.h"
#include "ipa_version.h"
+#include "reg.h"
/**
* DOC: The IPA Generic Software Interface
@@ -1730,10 +1728,10 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
gsi_channel_program(channel, true);
if (channel->toward_ipa)
- netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
+ netif_napi_add_tx(gsi->dummy_dev, &channel->napi,
gsi_channel_poll);
else
- netif_napi_add(&gsi->dummy_dev, &channel->napi,
+ netif_napi_add(gsi->dummy_dev, &channel->napi,
gsi_channel_poll);
return 0;
@@ -2369,12 +2367,14 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
/* GSI uses NAPI on all channels. Create a dummy network device
* for the channel NAPI contexts to be associated with.
*/
- init_dummy_netdev(&gsi->dummy_dev);
+ gsi->dummy_dev = alloc_netdev_dummy(0);
+ if (!gsi->dummy_dev)
+ return -ENOMEM;
init_completion(&gsi->completion);
ret = gsi_reg_init(gsi, pdev);
if (ret)
- return ret;
+ goto err_reg_exit;
ret = gsi_irq_init(gsi, pdev); /* No matching exit required */
if (ret)
@@ -2389,6 +2389,7 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
return 0;
err_reg_exit:
+ free_netdev(gsi->dummy_dev);
gsi_reg_exit(gsi);
return ret;
@@ -2399,6 +2400,7 @@ void gsi_exit(struct gsi *gsi)
{
mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi);
+ free_netdev(gsi->dummy_dev);
gsi_reg_exit(gsi);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 42063b227c18..9d8e05d950e3 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -1,17 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2023 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _GSI_H_
#define _GSI_H_
-#include <linux/types.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/completion.h>
-#include <linux/platform_device.h>
+#include <linux/mutex.h>
#include <linux/netdevice.h>
+#include <linux/types.h>
#include "ipa_version.h"
@@ -23,12 +21,10 @@
#define GSI_TLV_MAX 64
struct device;
-struct scatterlist;
struct platform_device;
struct gsi;
struct gsi_trans;
-struct gsi_channel_data;
struct ipa_gsi_endpoint_data;
struct gsi_ring {
@@ -155,7 +151,7 @@ struct gsi {
struct mutex mutex; /* protects commands, programming */
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
- struct net_device dummy_dev; /* needed for NAPI */
+ struct net_device *dummy_dev; /* needed for NAPI */
};
/**
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
index c65f7c5cdc8d..968ab1e596e8 100644
--- a/drivers/net/ipa/gsi_private.h
+++ b/drivers/net/ipa/gsi_private.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _GSI_PRIVATE_H_
#define _GSI_PRIVATE_H_
@@ -10,9 +10,10 @@
#include <linux/types.h>
-struct gsi_trans;
-struct gsi_ring;
+struct gsi;
struct gsi_channel;
+struct gsi_ring;
+struct gsi_trans;
#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */
diff --git a/drivers/net/ipa/gsi_reg.c b/drivers/net/ipa/gsi_reg.c
index 106c43884aef..825598661188 100644
--- a/drivers/net/ipa/gsi_reg.c
+++ b/drivers/net/ipa/gsi_reg.c
@@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
-#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include "gsi.h"
-#include "reg.h"
#include "gsi_reg.h"
+#include "reg.h"
/* Is this register ID valid for the current GSI version? */
static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id)
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index ee6fb00b71eb..19531883864a 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -1,22 +1,22 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/bits.h>
#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/dma-direction.h>
#include <linux/refcount.h>
#include <linux/scatterlist.h>
-#include <linux/dma-direction.h>
+#include <linux/types.h>
#include "gsi.h"
#include "gsi_private.h"
#include "gsi_trans.h"
-#include "ipa_gsi.h"
-#include "ipa_data.h"
#include "ipa_cmd.h"
+#include "ipa_data.h"
+#include "ipa_gsi.h"
/**
* DOC: GSI Transactions
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 30c1c2dc77c6..c1b3386cbb9d 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -1,25 +1,24 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _GSI_TRANS_H_
#define _GSI_TRANS_H_
-#include <linux/types.h>
-#include <linux/refcount.h>
#include <linux/completion.h>
#include <linux/dma-direction.h>
+#include <linux/refcount.h>
+#include <linux/types.h>
#include "ipa_cmd.h"
+struct device;
struct page;
struct scatterlist;
-struct device;
struct sk_buff;
struct gsi;
-struct gsi_trans;
struct gsi_trans_pool;
/* Maximum number of TREs in an IPA immediate command transaction */
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 334cd62cf286..7ef10a4ff35e 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -1,30 +1,25 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_H_
#define _IPA_H_
-#include <linux/types.h>
-#include <linux/device.h>
#include <linux/notifier.h>
-#include <linux/pm_wakeup.h>
+#include <linux/types.h>
-#include "ipa_version.h"
#include "gsi.h"
+#include "ipa_endpoint.h"
#include "ipa_mem.h"
#include "ipa_qmi.h"
-#include "ipa_endpoint.h"
-#include "ipa_interrupt.h"
+#include "ipa_version.h"
-struct clk;
-struct icc_path;
struct net_device;
+struct ipa_interrupt;
struct ipa_power;
struct ipa_smp2p;
-struct ipa_interrupt;
/**
* struct ipa - IPA information
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index 39219963dbb3..984311a9a5f2 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -1,22 +1,23 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/slab.h>
#include <linux/bitfield.h>
+#include <linux/bits.h>
+#include <linux/device.h>
#include <linux/dma-direction.h>
+#include <linux/types.h>
#include "gsi.h"
#include "gsi_trans.h"
#include "ipa.h"
-#include "ipa_endpoint.h"
-#include "ipa_table.h"
#include "ipa_cmd.h"
+#include "ipa_endpoint.h"
#include "ipa_mem.h"
+#include "ipa_reg.h"
+#include "ipa_table.h"
/**
* DOC: IPA Immediate Commands
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index e2cf1c2b0ef2..2077fdbade99 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -1,21 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_CMD_H_
#define _IPA_CMD_H_
#include <linux/types.h>
-#include <linux/dma-direction.h>
-
-struct sk_buff;
-struct scatterlist;
+struct gsi_channel;
+struct gsi_trans;
struct ipa;
struct ipa_mem;
-struct gsi_trans;
-struct gsi_channel;
/**
* enum ipa_cmd_opcode: IPA immediate commands
@@ -58,14 +54,6 @@ bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
bool route);
/**
- * ipa_cmd_data_valid() - Validate command-realted configuration is valid
- * @ipa: - IPA pointer
- *
- * Return: true if assumptions required for command are valid
- */
-bool ipa_cmd_data_valid(struct ipa *ipa);
-
-/**
* ipa_cmd_pool_init() - initialize command channel pools
* @channel: AP->IPA command TX GSI channel pointer
* @tre_count: Number of pool elements to allocate
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 2a1605e67b65..d88cbbbf18b7 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -1,16 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_DATA_H_
#define _IPA_DATA_H_
#include <linux/types.h>
-#include "ipa_version.h"
#include "ipa_endpoint.h"
#include "ipa_mem.h"
+#include "ipa_version.h"
/**
* DOC: IPA/GSI Configuration Data
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index dd490941615e..0bd9b9fbbf56 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1,27 +1,30 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/slab.h>
#include <linux/bitfield.h>
-#include <linux/if_rmnet.h>
+#include <linux/bits.h>
+#include <linux/device.h>
#include <linux/dma-direction.h>
+#include <linux/if_rmnet.h>
+#include <linux/types.h>
#include "gsi.h"
#include "gsi_trans.h"
#include "ipa.h"
+#include "ipa_cmd.h"
#include "ipa_data.h"
#include "ipa_endpoint.h"
-#include "ipa_cmd.h"
+#include "ipa_gsi.h"
+#include "ipa_interrupt.h"
#include "ipa_mem.h"
#include "ipa_modem.h"
-#include "ipa_table.h"
-#include "ipa_gsi.h"
#include "ipa_power.h"
+#include "ipa_reg.h"
+#include "ipa_table.h"
+#include "ipa_version.h"
/* Hardware is told about receive buffers once a "batch" has been queued */
#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 3ad2e802040a..e7d8ae6c6f6a 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -1,21 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_ENDPOINT_H_
#define _IPA_ENDPOINT_H_
#include <linux/types.h>
#include <linux/workqueue.h>
-#include <linux/if_ether.h>
-#include "gsi.h"
#include "ipa_reg.h"
+#include "ipa_version.h"
struct net_device;
struct sk_buff;
+struct gsi_trans;
struct ipa;
struct ipa_gsi_endpoint_data;
@@ -199,9 +199,9 @@ int ipa_endpoint_init(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data);
void ipa_endpoint_exit(struct ipa *ipa);
-void ipa_endpoint_trans_complete(struct ipa_endpoint *ipa,
+void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
struct gsi_trans *trans);
-void ipa_endpoint_trans_release(struct ipa_endpoint *ipa,
+void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
struct gsi_trans *trans);
#endif /* _IPA_ENDPOINT_H_ */
diff --git a/drivers/net/ipa/ipa_gsi.c b/drivers/net/ipa/ipa_gsi.c
index d323adb03383..cb654c7b5498 100644
--- a/drivers/net/ipa/ipa_gsi.c
+++ b/drivers/net/ipa/ipa_gsi.c
@@ -1,16 +1,17 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#include <linux/types.h>
-#include "ipa_gsi.h"
#include "gsi_trans.h"
#include "ipa.h"
-#include "ipa_endpoint.h"
#include "ipa_data.h"
+#include "ipa_endpoint.h"
+#include "ipa_gsi.h"
+#include "ipa_version.h"
void ipa_gsi_trans_complete(struct gsi_trans *trans)
{
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index c3e8784d51d9..245a06997055 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
/* DOC: IPA Interrupts
@@ -19,29 +19,31 @@
* time only these three are supported.
*/
-#include <linux/platform_device.h>
-#include <linux/types.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
+#include <linux/types.h>
#include "ipa.h"
-#include "ipa_reg.h"
#include "ipa_endpoint.h"
+#include "ipa_interrupt.h"
#include "ipa_power.h"
+#include "ipa_reg.h"
#include "ipa_uc.h"
-#include "ipa_interrupt.h"
/**
* struct ipa_interrupt - IPA interrupt information
* @ipa: IPA pointer
* @irq: Linux IRQ number used for IPA interrupts
* @enabled: Mask indicating which interrupts are enabled
+ * @suspend_enabled: Bitmap of endpoints with the SUSPEND interrupt enabled
*/
struct ipa_interrupt {
struct ipa *ipa;
u32 irq;
u32 enabled;
+ unsigned long *suspend_enabled;
};
/* Clear the suspend interrupt for all endpoints that signaled it */
@@ -194,6 +196,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
u32 mask = BIT(endpoint_id % 32);
u32 unit = endpoint_id / 32;
const struct reg *reg;
+ unsigned long weight;
u32 offset;
u32 val;
@@ -203,6 +206,10 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
if (ipa->version == IPA_VERSION_3_0)
return;
+ weight = bitmap_weight(interrupt->suspend_enabled, ipa->endpoint_count);
+ if (weight == 1 && !enable)
+ ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
+
reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
offset = reg_n_offset(reg, unit);
val = ioread32(ipa->reg_virt + offset);
@@ -211,8 +218,12 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
val |= mask;
else
val &= ~mask;
+ __change_bit(endpoint_id, interrupt->suspend_enabled);
iowrite32(val, ipa->reg_virt + offset);
+
+ if (!weight && enable)
+ ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
}
/* Enable TX_SUSPEND for an endpoint */
@@ -246,7 +257,16 @@ int ipa_interrupt_config(struct ipa *ipa)
interrupt->ipa = ipa;
- /* Disable all IPA interrupt types */
+ /* Initially all IPA interrupt types are disabled */
+ interrupt->enabled = 0;
+ interrupt->suspend_enabled = bitmap_zalloc(ipa->endpoint_count,
+ GFP_KERNEL);
+ if (!interrupt->suspend_enabled) {
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
+
+ /* Disable IPA interrupt types */
reg = ipa_reg(ipa, IPA_IRQ_EN);
iowrite32(0, ipa->reg_virt + reg_offset(reg));
@@ -254,22 +274,32 @@ int ipa_interrupt_config(struct ipa *ipa)
"ipa", interrupt);
if (ret) {
dev_err(dev, "error %d requesting \"ipa\" IRQ\n", ret);
- goto err_kfree;
+ goto err_free_bitmap;
+ }
+
+ ret = device_init_wakeup(dev, true);
+ if (ret) {
+ dev_err(dev, "error %d enabling wakeup\n", ret);
+ goto err_free_irq;
}
ret = dev_pm_set_wake_irq(dev, irq);
if (ret) {
dev_err(dev, "error %d registering \"ipa\" IRQ as wakeirq\n",
ret);
- goto err_free_irq;
+ goto err_disable_wakeup;
}
ipa->interrupt = interrupt;
return 0;
+err_disable_wakeup:
+ (void)device_init_wakeup(dev, false);
err_free_irq:
free_irq(interrupt->irq, interrupt);
+err_free_bitmap:
+ bitmap_free(interrupt->suspend_enabled);
err_kfree:
kfree(interrupt);
@@ -285,22 +315,20 @@ void ipa_interrupt_deconfig(struct ipa *ipa)
ipa->interrupt = NULL;
dev_pm_clear_wake_irq(dev);
+ (void)device_init_wakeup(dev, false);
free_irq(interrupt->irq, interrupt);
+ bitmap_free(interrupt->suspend_enabled);
}
/* Initialize the IPA interrupt structure */
struct ipa_interrupt *ipa_interrupt_init(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
struct ipa_interrupt *interrupt;
int irq;
irq = platform_get_irq_byname(pdev, "ipa");
- if (irq <= 0) {
- dev_err(dev, "DT error %d getting \"ipa\" IRQ property\n", irq);
-
+ if (irq <= 0)
return ERR_PTR(irq ? : -EINVAL);
- }
interrupt = kzalloc(sizeof(*interrupt), GFP_KERNEL);
if (!interrupt)
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index f3f4f4330a59..d11c4af14fa2 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -1,16 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_INTERRUPT_H_
#define _IPA_INTERRUPT_H_
#include <linux/types.h>
-#include <linux/bits.h>
+
+struct platform_device;
struct ipa;
struct ipa_interrupt;
+
enum ipa_irq_id;
/**
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index 57b241417e8c..5f3dd5a2dcf4 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -1,38 +1,37 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2023 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/atomic.h>
-#include <linux/bitfield.h>
#include <linux/bug.h>
-#include <linux/io.h>
#include <linux/firmware.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/types.h>
+
#include <linux/firmware/qcom/qcom_scm.h>
#include <linux/soc/qcom/mdt_loader.h>
#include "ipa.h"
-#include "ipa_power.h"
+#include "ipa_cmd.h"
#include "ipa_data.h"
#include "ipa_endpoint.h"
-#include "ipa_resource.h"
-#include "ipa_cmd.h"
-#include "ipa_reg.h"
+#include "ipa_interrupt.h"
#include "ipa_mem.h"
-#include "ipa_table.h"
-#include "ipa_smp2p.h"
#include "ipa_modem.h"
-#include "ipa_uc.h"
-#include "ipa_interrupt.h"
-#include "gsi_trans.h"
+#include "ipa_power.h"
+#include "ipa_reg.h"
+#include "ipa_resource.h"
+#include "ipa_smp2p.h"
#include "ipa_sysfs.h"
+#include "ipa_table.h"
+#include "ipa_uc.h"
+#include "ipa_version.h"
/**
* DOC: The IP Accelerator
@@ -120,10 +119,6 @@ int ipa_setup(struct ipa *ipa)
if (ret)
return ret;
- ret = ipa_power_setup(ipa);
- if (ret)
- goto err_gsi_teardown;
-
ipa_endpoint_setup(ipa);
/* We need to use the AP command TX endpoint to perform other
@@ -170,8 +165,6 @@ err_command_disable:
ipa_endpoint_disable_one(command_endpoint);
err_endpoint_teardown:
ipa_endpoint_teardown(ipa);
- ipa_power_teardown(ipa);
-err_gsi_teardown:
gsi_teardown(&ipa->gsi);
return ret;
@@ -196,7 +189,6 @@ static void ipa_teardown(struct ipa *ipa)
command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
ipa_endpoint_disable_one(command_endpoint);
ipa_endpoint_teardown(ipa);
- ipa_power_teardown(ipa);
gsi_teardown(&ipa->gsi);
}
@@ -818,11 +810,6 @@ static int ipa_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (!ipa_version_supported(data->version)) {
- dev_err(dev, "unsupported IPA version %u\n", data->version);
- return -EINVAL;
- }
-
if (!data->modem_route_count) {
dev_err(dev, "modem_route_count cannot be zero\n");
return -EINVAL;
@@ -873,6 +860,10 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_reg_exit;
+ ret = ipa_cmd_init(ipa);
+ if (ret)
+ goto err_mem_exit;
+
ret = gsi_init(&ipa->gsi, pdev, ipa->version, data->endpoint_count,
data->endpoint_data);
if (ret)
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 709f061ede61..dee985eb08cb 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -1,25 +1,24 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/bitfield.h>
-#include <linux/bug.h>
#include <linux/dma-mapping.h>
+#include <linux/io.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
-#include <linux/io.h>
+#include <linux/types.h>
+
#include <linux/soc/qcom/smem.h>
+#include "gsi_trans.h"
#include "ipa.h"
-#include "ipa_reg.h"
-#include "ipa_data.h"
#include "ipa_cmd.h"
+#include "ipa_data.h"
#include "ipa_mem.h"
+#include "ipa_reg.h"
#include "ipa_table.h"
-#include "gsi_trans.h"
/* "Canary" value placed between memory regions to detect overflow */
#define IPA_MEM_CANARY_VAL cpu_to_le32(0xdeadbeef)
diff --git a/drivers/net/ipa/ipa_mem.h b/drivers/net/ipa/ipa_mem.h
index 28aad00a151d..b25babade787 100644
--- a/drivers/net/ipa/ipa_mem.h
+++ b/drivers/net/ipa/ipa_mem.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_MEM_H_
#define _IPA_MEM_H_
+#include <linux/types.h>
+
struct platform_device;
struct ipa;
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index c27ca3f27f7d..8fe0d0e1a00f 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -1,29 +1,27 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#include <linux/errno.h>
+#include <linux/etherdevice.h>
#include <linux/if_arp.h>
+#include <linux/if_rmnet.h>
#include <linux/netdevice.h>
+#include <linux/pm_runtime.h>
#include <linux/skbuff.h>
-#include <linux/if_rmnet.h>
-#include <linux/etherdevice.h>
#include <net/pkt_sched.h>
-#include <linux/pm_runtime.h>
+
#include <linux/remoteproc/qcom_rproc.h>
#include "ipa.h"
-#include "ipa_data.h"
#include "ipa_endpoint.h"
-#include "ipa_table.h"
#include "ipa_mem.h"
#include "ipa_modem.h"
#include "ipa_smp2p.h"
-#include "ipa_qmi.h"
+#include "ipa_table.h"
#include "ipa_uc.h"
-#include "ipa_power.h"
#define IPA_NETDEV_NAME "rmnet_ipa%d"
#define IPA_NETDEV_TAILROOM 0 /* for padding by mux layer */
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
index d85718db9a57..b1d2c80ed096 100644
--- a/drivers/net/ipa/ipa_modem.h
+++ b/drivers/net/ipa/ipa_modem.h
@@ -1,15 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_MODEM_H_
#define _IPA_MODEM_H_
-struct ipa;
struct net_device;
struct sk_buff;
+struct ipa;
+
int ipa_modem_start(struct ipa *ipa);
int ipa_modem_stop(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index 41ca7ef5e20f..65fd14da0f86 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#include <linux/clk.h>
@@ -9,15 +9,15 @@
#include <linux/interconnect.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
-#include <linux/bitops.h>
#include "linux/soc/qcom/qcom_aoss.h"
#include "ipa.h"
-#include "ipa_power.h"
+#include "ipa_data.h"
#include "ipa_endpoint.h"
+#include "ipa_interrupt.h"
#include "ipa_modem.h"
-#include "ipa_data.h"
+#include "ipa_power.h"
/**
* DOC: IPA Power Management
@@ -232,25 +232,6 @@ void ipa_power_retention(struct ipa *ipa, bool enable)
ret, enable ? "en" : "dis");
}
-int ipa_power_setup(struct ipa *ipa)
-{
- int ret;
-
- ipa_interrupt_enable(ipa, IPA_IRQ_TX_SUSPEND);
-
- ret = device_init_wakeup(ipa->dev, true);
- if (ret)
- ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
-
- return ret;
-}
-
-void ipa_power_teardown(struct ipa *ipa)
-{
- (void)device_init_wakeup(ipa->dev, false);
- ipa_interrupt_disable(ipa, IPA_IRQ_TX_SUSPEND);
-}
-
/* Initialize IPA power management */
struct ipa_power *
ipa_power_init(struct device *dev, const struct ipa_power_data *data)
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 227cc04bea80..a83524a61c28 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -1,16 +1,17 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_POWER_H_
#define _IPA_POWER_H_
+#include <linux/types.h>
+
struct device;
struct ipa;
struct ipa_power_data;
-enum ipa_irq_id;
/* IPA device power management function block */
extern const struct dev_pm_ops ipa_pm_ops;
@@ -31,20 +32,6 @@ u32 ipa_core_clock_rate(struct ipa *ipa);
void ipa_power_retention(struct ipa *ipa, bool enable);
/**
- * ipa_power_setup() - Set up IPA power management
- * @ipa: IPA pointer
- *
- * Return: 0 if successful, or a negative error code
- */
-int ipa_power_setup(struct ipa *ipa);
-
-/**
- * ipa_power_teardown() - Inverse of ipa_power_setup()
- * @ipa: IPA pointer
- */
-void ipa_power_teardown(struct ipa *ipa);
-
-/**
* ipa_power_init() - Initialize IPA power management
* @dev: IPA device
* @data: Clock configuration data
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 65c40e207802..d771f3a71f94 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -1,19 +1,15 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/slab.h>
#include <linux/qrtr.h>
-#include <linux/soc/qcom/qmi.h>
+#include <linux/string.h>
+#include <linux/types.h>
#include "ipa.h"
-#include "ipa_endpoint.h"
#include "ipa_mem.h"
-#include "ipa_table.h"
#include "ipa_modem.h"
#include "ipa_qmi_msg.h"
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
index 1c236826c17a..fb15ea7f47e0 100644
--- a/drivers/net/ipa/ipa_qmi.h
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -1,12 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_QMI_H_
#define _IPA_QMI_H_
#include <linux/types.h>
+#include <linux/workqueue.h>
+
#include <linux/soc/qcom/qmi.h>
struct ipa;
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 894f99517233..51dc13a577a5 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -1,9 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#include <linux/stddef.h>
+
#include <linux/soc/qcom/qmi.h>
#include "ipa_qmi_msg.h"
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index b73503552c4d..644b8c27108b 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_QMI_MSG_H_
#define _IPA_QMI_MSG_H_
@@ -9,6 +9,7 @@
/* === Only "ipa_qmi" and "ipa_qmi_msg.c" should include this file === */
#include <linux/types.h>
+
#include <linux/soc/qcom/qmi.h>
/* Request/response/indication QMI message ids used for IPA. Receiving
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index 98625956e0bb..c574f798fdc9 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2023 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
-#include <linux/platform_device.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include "ipa.h"
#include "ipa_reg.h"
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index 62c62495b796..61b7c441ae95 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -1,15 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2023 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#ifndef _IPA_REG_H_
#define _IPA_REG_H_
-#include <linux/bitfield.h>
-#include <linux/bug.h>
-
-#include "ipa_version.h"
#include "reg.h"
struct platform_device;
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
index 82c88a744d10..1b0c4695c32a 100644
--- a/drivers/net/ipa/ipa_resource.c
+++ b/drivers/net/ipa/ipa_resource.c
@@ -1,11 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
#include <linux/types.h>
-#include <linux/kernel.h>
#include "ipa.h"
#include "ipa_data.h"
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 2f917582c423..fcaadd111a8a 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -1,20 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/panic_notifier.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-#include <linux/soc/qcom/smem.h>
+#include <linux/types.h>
+
#include <linux/soc/qcom/smem_state.h>
-#include "ipa_smp2p.h"
#include "ipa.h"
+#include "ipa_smp2p.h"
#include "ipa_uc.h"
/**
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index 2ff09ce343b7..a59bd215494c 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -1,15 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021-2022 Linaro Ltd. */
+/* Copyright (C) 2021-2024 Linaro Ltd. */
-#include <linux/kernel.h>
-#include <linux/types.h>
#include <linux/device.h>
#include <linux/sysfs.h>
+#include <linux/types.h>
#include "ipa.h"
-#include "ipa_version.h"
#include "ipa_sysfs.h"
+#include "ipa_version.h"
static const char *ipa_version_string(struct ipa *ipa)
{
diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h
index 58ba22810bab..43d9cb0722a4 100644
--- a/drivers/net/ipa/ipa_sysfs.h
+++ b/drivers/net/ipa/ipa_sysfs.h
@@ -1,13 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_SYSFS_H_
#define _IPA_SYSFS_H_
-struct attribute_group;
-
extern const struct attribute_group ipa_attribute_group;
extern const struct attribute_group ipa_feature_attribute_group;
extern const struct attribute_group ipa_endpoint_id_attribute_group;
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index a24ac11b8893..4e4a3f8aa8e8 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -1,28 +1,25 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2023 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/bits.h>
#include <linux/bitops.h>
-#include <linux/bitfield.h>
-#include <linux/io.h>
#include <linux/build_bug.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/types.h>
+#include "gsi.h"
+#include "gsi_trans.h"
#include "ipa.h"
-#include "ipa_version.h"
+#include "ipa_cmd.h"
#include "ipa_endpoint.h"
-#include "ipa_table.h"
-#include "ipa_reg.h"
#include "ipa_mem.h"
-#include "ipa_cmd.h"
-#include "gsi.h"
-#include "gsi_trans.h"
+#include "ipa_reg.h"
+#include "ipa_table.h"
+#include "ipa_version.h"
/**
* DOC: IPA Filter and Route Tables
@@ -161,6 +158,12 @@ ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6)
return ipa_mem_find(ipa, mem_id);
}
+/* Return true if hashed tables are supported */
+bool ipa_table_hash_support(struct ipa *ipa)
+{
+ return ipa->version != IPA_VERSION_4_2;
+}
+
bool ipa_filtered_valid(struct ipa *ipa, u64 filtered)
{
struct device *dev = ipa->dev;
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index 7cc951904bb4..16d4d15df9e9 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_TABLE_H_
#define _IPA_TABLE_H_
@@ -23,10 +23,7 @@ bool ipa_filtered_valid(struct ipa *ipa, u64 filtered);
* ipa_table_hash_support() - Return true if hashed tables are supported
* @ipa: IPA pointer
*/
-static inline bool ipa_table_hash_support(struct ipa *ipa)
-{
- return ipa->version != IPA_VERSION_4_2;
-}
+bool ipa_table_hash_support(struct ipa *ipa);
/**
* ipa_table_reset() - Reset filter and route tables entries to "none"
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index bfd5dc6dab43..2963db83ab6b 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -1,17 +1,19 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2022 Linaro Ltd.
+ * Copyright (C) 2018-2024 Linaro Ltd.
*/
-#include <linux/types.h>
-#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/types.h>
#include "ipa.h"
-#include "ipa_uc.h"
+#include "ipa_interrupt.h"
#include "ipa_power.h"
+#include "ipa_reg.h"
+#include "ipa_uc.h"
/**
* DOC: The IPA embedded microcontroller
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
index 85aa0df818c2..12997ecf5faa 100644
--- a/drivers/net/ipa/ipa_uc.h
+++ b/drivers/net/ipa/ipa_uc.h
@@ -1,13 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_UC_H_
#define _IPA_UC_H_
struct ipa;
-enum ipa_irq_id;
/**
* ipa_uc_interrupt_handler() - Handler for microcontroller IPA interrupts
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index 38150345b607..38c47f51a50c 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -1,11 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2022 Linaro Ltd.
+ * Copyright (C) 2019-2024 Linaro Ltd.
*/
#ifndef _IPA_VERSION_H_
#define _IPA_VERSION_H_
+#include <linux/types.h>
+
/**
* enum ipa_version
* @IPA_VERSION_3_0: IPA version 3.0/GSI version 1.0
@@ -45,24 +47,6 @@ enum ipa_version {
IPA_VERSION_COUNT, /* Last; not a version */
};
-static inline bool ipa_version_supported(enum ipa_version version)
-{
- switch (version) {
- case IPA_VERSION_3_1:
- case IPA_VERSION_3_5_1:
- case IPA_VERSION_4_2:
- case IPA_VERSION_4_5:
- case IPA_VERSION_4_7:
- case IPA_VERSION_4_9:
- case IPA_VERSION_4_11:
- case IPA_VERSION_5_0:
- case IPA_VERSION_5_5:
- return true;
- default:
- return false;
- }
-}
-
/* Execution environment IDs */
enum gsi_ee_id {
GSI_EE_AP = 0x0,
diff --git a/drivers/net/ipa/reg.h b/drivers/net/ipa/reg.h
index 2ee07eebca67..53c16e594ea4 100644
--- a/drivers/net/ipa/reg.h
+++ b/drivers/net/ipa/reg.h
@@ -1,13 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* *Copyright (C) 2022-2023 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
#ifndef _REG_H_
#define _REG_H_
-#include <linux/types.h>
-#include <linux/log2.h>
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/bug.h>
+#include <linux/log2.h>
+#include <linux/types.h>
/**
* struct reg - A register descriptor
diff --git a/drivers/net/ipa/reg/gsi_reg-v3.1.c b/drivers/net/ipa/reg/gsi_reg-v3.1.c
index e036805a7882..8c577b8b5c7a 100644
--- a/drivers/net/ipa/reg/gsi_reg-v3.1.c
+++ b/drivers/net/ipa/reg/gsi_reg-v3.1.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/gsi_reg-v3.5.1.c b/drivers/net/ipa/reg/gsi_reg-v3.5.1.c
index 8c3ab3a5288e..a1c609f40d99 100644
--- a/drivers/net/ipa/reg/gsi_reg-v3.5.1.c
+++ b/drivers/net/ipa/reg/gsi_reg-v3.5.1.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.0.c b/drivers/net/ipa/reg/gsi_reg-v4.0.c
index 7cc7a21d07f9..ff1fb1ca47dd 100644
--- a/drivers/net/ipa/reg/gsi_reg-v4.0.c
+++ b/drivers/net/ipa/reg/gsi_reg-v4.0.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.11.c b/drivers/net/ipa/reg/gsi_reg-v4.11.c
index 01696519032f..ab9757ce42e7 100644
--- a/drivers/net/ipa/reg/gsi_reg-v4.11.c
+++ b/drivers/net/ipa/reg/gsi_reg-v4.11.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.5.c b/drivers/net/ipa/reg/gsi_reg-v4.5.c
index 2900e5c3ff88..01b45f79c315 100644
--- a/drivers/net/ipa/reg/gsi_reg-v4.5.c
+++ b/drivers/net/ipa/reg/gsi_reg-v4.5.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/gsi_reg-v4.9.c b/drivers/net/ipa/reg/gsi_reg-v4.9.c
index 8b5d95425a76..783eaaee2936 100644
--- a/drivers/net/ipa/reg/gsi_reg-v4.9.c
+++ b/drivers/net/ipa/reg/gsi_reg-v4.9.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c020 + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/gsi_reg-v5.0.c b/drivers/net/ipa/reg/gsi_reg-v5.0.c
index 145eb0bd096d..36d1e65df71b 100644
--- a/drivers/net/ipa/reg/gsi_reg-v5.0.c
+++ b/drivers/net/ipa/reg/gsi_reg-v5.0.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../gsi.h"
-#include "../reg.h"
#include "../gsi_reg.h"
+#include "../ipa_version.h"
+#include "../reg.h"
REG(INTER_EE_SRC_CH_IRQ_MSK, inter_ee_src_ch_irq_msk,
0x0000c01c + 0x1000 * GSI_EE_AP);
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c
index 648dbfe1fce3..a89103701583 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
[COMP_CFG_ENABLE] = BIT(0),
@@ -76,19 +78,6 @@ static const u32 reg_qsb_max_reads_fmask[] = {
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 reg_filt_rout_hash_en_fmask[] = {
- [IPV6_ROUTER_HASH] = BIT(0),
- /* Bits 1-3 reserved */
- [IPV6_FILTER_HASH] = BIT(4),
- /* Bits 5-7 reserved */
- [IPV4_ROUTER_HASH] = BIT(8),
- /* Bits 9-11 reserved */
- [IPV4_FILTER_HASH] = BIT(12),
- /* Bits 13-31 reserved */
-};
-
-REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
-
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
@@ -403,7 +392,6 @@ static const struct reg *reg_array[] = {
[SHARED_MEM_SIZE] = &reg_shared_mem_size,
[QSB_MAX_WRITES] = &reg_qsb_max_writes,
[QSB_MAX_READS] = &reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
[IPA_BCR] = &reg_ipa_bcr,
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
index 78b1bf60cd02..c81c48ec51f9 100644
--- a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
+++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
[COMP_CFG_ENABLE] = BIT(0),
@@ -81,19 +83,6 @@ static const u32 reg_qsb_max_reads_fmask[] = {
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 reg_filt_rout_hash_en_fmask[] = {
- [IPV6_ROUTER_HASH] = BIT(0),
- /* Bits 1-3 reserved */
- [IPV6_FILTER_HASH] = BIT(4),
- /* Bits 5-7 reserved */
- [IPV4_ROUTER_HASH] = BIT(8),
- /* Bits 9-11 reserved */
- [IPV4_FILTER_HASH] = BIT(12),
- /* Bits 13-31 reserved */
-};
-
-REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
-
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
@@ -414,7 +403,6 @@ static const struct reg *reg_array[] = {
[SHARED_MEM_SIZE] = &reg_shared_mem_size,
[QSB_MAX_WRITES] = &reg_qsb_max_writes,
[QSB_MAX_READS] = &reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
[IPA_BCR] = &reg_ipa_bcr,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c
index 29e71cce4a84..18bddc32c931 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.11.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
@@ -113,19 +115,6 @@ static const u32 reg_qsb_max_reads_fmask[] = {
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 reg_filt_rout_hash_en_fmask[] = {
- [IPV6_ROUTER_HASH] = BIT(0),
- /* Bits 1-3 reserved */
- [IPV6_FILTER_HASH] = BIT(4),
- /* Bits 5-7 reserved */
- [IPV4_ROUTER_HASH] = BIT(8),
- /* Bits 9-11 reserved */
- [IPV4_FILTER_HASH] = BIT(12),
- /* Bits 13-31 reserved */
-};
-
-REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
@@ -470,7 +459,6 @@ static const struct reg *reg_array[] = {
[SHARED_MEM_SIZE] = &reg_shared_mem_size,
[QSB_MAX_WRITES] = &reg_qsb_max_writes,
[QSB_MAX_READS] = &reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c
index bb7cf488144d..e78dd71e8b03 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.2.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
/* Bit 0 reserved */
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c
index 1c58f78851c2..8494731efdd3 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.5.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
/* Bit 0 reserved */
@@ -107,19 +109,6 @@ static const u32 reg_qsb_max_reads_fmask[] = {
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 reg_filt_rout_hash_en_fmask[] = {
- [IPV6_ROUTER_HASH] = BIT(0),
- /* Bits 1-3 reserved */
- [IPV6_FILTER_HASH] = BIT(4),
- /* Bits 5-7 reserved */
- [IPV4_ROUTER_HASH] = BIT(8),
- /* Bits 9-11 reserved */
- [IPV4_FILTER_HASH] = BIT(12),
- /* Bits 13-31 reserved */
-};
-
-REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
@@ -489,7 +478,6 @@ static const struct reg *reg_array[] = {
[SHARED_MEM_SIZE] = &reg_shared_mem_size,
[QSB_MAX_WRITES] = &reg_qsb_max_writes,
[QSB_MAX_READS] = &reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.7.c b/drivers/net/ipa/reg/ipa_reg-v4.7.c
index 731824fce1d4..2c161cf69193 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.7.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.7.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
@@ -107,19 +109,6 @@ static const u32 reg_qsb_max_reads_fmask[] = {
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 reg_filt_rout_hash_en_fmask[] = {
- [IPV6_ROUTER_HASH] = BIT(0),
- /* Bits 1-3 reserved */
- [IPV6_FILTER_HASH] = BIT(4),
- /* Bits 5-7 reserved */
- [IPV4_ROUTER_HASH] = BIT(8),
- /* Bits 9-11 reserved */
- [IPV4_FILTER_HASH] = BIT(12),
- /* Bits 13-31 reserved */
-};
-
-REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
@@ -462,7 +451,6 @@ static const struct reg *reg_array[] = {
[SHARED_MEM_SIZE] = &reg_shared_mem_size,
[QSB_MAX_WRITES] = &reg_qsb_max_writes,
[QSB_MAX_READS] = &reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c
index 01f87b5290e0..fa6fd312e486 100644
--- a/drivers/net/ipa/reg/ipa_reg-v4.9.c
+++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2022 Linaro Ltd. */
+/* Copyright (C) 2022-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_comp_cfg_fmask[] = {
[RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
@@ -112,19 +114,6 @@ static const u32 reg_qsb_max_reads_fmask[] = {
REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
-static const u32 reg_filt_rout_hash_en_fmask[] = {
- [IPV6_ROUTER_HASH] = BIT(0),
- /* Bits 1-3 reserved */
- [IPV6_FILTER_HASH] = BIT(4),
- /* Bits 5-7 reserved */
- [IPV4_ROUTER_HASH] = BIT(8),
- /* Bits 9-11 reserved */
- [IPV4_FILTER_HASH] = BIT(12),
- /* Bits 13-31 reserved */
-};
-
-REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
-
static const u32 reg_filt_rout_hash_flush_fmask[] = {
[IPV6_ROUTER_HASH] = BIT(0),
/* Bits 1-3 reserved */
@@ -467,7 +456,6 @@ static const struct reg *reg_array[] = {
[SHARED_MEM_SIZE] = &reg_shared_mem_size,
[QSB_MAX_WRITES] = &reg_qsb_max_writes,
[QSB_MAX_READS] = &reg_qsb_max_reads,
- [FILT_ROUT_HASH_EN] = &reg_filt_rout_hash_en,
[FILT_ROUT_HASH_FLUSH] = &reg_filt_rout_hash_flush,
[STATE_AGGR_ACTIVE] = &reg_state_aggr_active,
[LOCAL_PKT_PROC_CNTXT] = &reg_local_pkt_proc_cntxt,
diff --git a/drivers/net/ipa/reg/ipa_reg-v5.0.c b/drivers/net/ipa/reg/ipa_reg-v5.0.c
index 95e0edff4170..b26b5f57ac03 100644
--- a/drivers/net/ipa/reg/ipa_reg-v5.0.c
+++ b/drivers/net/ipa/reg/ipa_reg-v5.0.c
@@ -1,11 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
+#include <linux/array_size.h>
+#include <linux/bits.h>
#include <linux/types.h>
-#include "../ipa.h"
#include "../ipa_reg.h"
+#include "../ipa_version.h"
static const u32 reg_flavor_0_fmask[] = {
[MAX_PIPES] = GENMASK(7, 0),
diff --git a/drivers/net/ipa/reg/ipa_reg-v5.5.c b/drivers/net/ipa/reg/ipa_reg-v5.5.c
index 26ca9c9bac59..abb0c443ef66 100644
--- a/drivers/net/ipa/reg/ipa_reg-v5.5.c
+++ b/drivers/net/ipa/reg/ipa_reg-v5.5.c
@@ -1,10 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2023 Linaro Ltd. */
+/* Copyright (C) 2023-2024 Linaro Ltd. */
-#include <linux/kernel.h>
-#include <linux/types.h>
+#include <linux/array_size.h>
#include <linux/bits.h>
+#include <linux/types.h>
#include "../ipa_reg.h"
#include "../ipa_version.h"
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 2d5b021b4ea6..fef4eff7753a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -439,7 +439,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
- err = ip_local_out(net, skb->sk, skb);
+ err = ip_local_out(net, NULL, skb);
if (unlikely(net_xmit_eval(err)))
DEV_STATS_INC(dev, tx_errors);
else
@@ -494,7 +494,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
- err = ip6_local_out(dev_net(dev), skb->sk, skb);
+ err = ip6_local_out(dev_net(dev), NULL, skb);
if (unlikely(net_xmit_eval(err)))
DEV_STATS_INC(dev, tx_errors);
else
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 5920f7e63352..094f44dac5c8 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -735,6 +735,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
switch (event) {
case NETDEV_UP:
+ case NETDEV_DOWN:
case NETDEV_CHANGE:
list_for_each_entry(ipvlan, &port->ipvlans, pnode)
netif_stacked_transfer_operstate(ipvlan->phy_dev,
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index f6eab66c2660..2b486e7c749c 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -141,9 +141,6 @@ static const struct ethtool_ops loopback_ethtool_ops = {
static int loopback_dev_init(struct net_device *dev)
{
- dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
- if (!dev->lstats)
- return -ENOMEM;
netdev_lockdep_set_classes(dev);
return 0;
}
@@ -151,7 +148,6 @@ static int loopback_dev_init(struct net_device *dev)
static void loopback_dev_free(struct net_device *dev)
{
dev_net(dev)->loopback_dev = NULL;
- free_percpu(dev->lstats);
}
static const struct net_device_ops loopback_ops = {
@@ -191,6 +187,7 @@ static void gen_lo_setup(struct net_device *dev,
dev->header_ops = hdr_ops;
dev->netdev_ops = dev_ops;
dev->needs_free_netdev = true;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_LSTATS;
dev->priv_destructor = dev_destructor;
netif_set_tso_max_size(dev, GSO_MAX_SIZE);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index ff016c11b4a0..2da70bc3dd86 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -3753,7 +3753,7 @@ static int macsec_change_mtu(struct net_device *dev, int new_mtu)
if (macsec->real_dev->mtu - extra < new_mtu)
return -ERANGE;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0cec2783a3e7..24298a33e0e9 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -865,7 +865,7 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
if (vlan->lowerdev->mtu < new_mtu)
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -1084,7 +1084,7 @@ static int macvlan_ethtool_get_link_ksettings(struct net_device *dev,
}
static int macvlan_ethtool_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct net_device *real_dev = macvlan_dev_real_dev(dev);
diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c
index b37a9e4bade4..4dc057c121f5 100644
--- a/drivers/net/mctp/mctp-i2c.c
+++ b/drivers/net/mctp/mctp-i2c.c
@@ -442,6 +442,42 @@ static void mctp_i2c_unlock_reset(struct mctp_i2c_dev *midev)
i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT);
}
+static void mctp_i2c_invalidate_tx_flow(struct mctp_i2c_dev *midev,
+ struct sk_buff *skb)
+{
+ struct mctp_sk_key *key;
+ struct mctp_flow *flow;
+ unsigned long flags;
+ bool release;
+
+ flow = skb_ext_find(skb, SKB_EXT_MCTP);
+ if (!flow)
+ return;
+
+ key = flow->key;
+ if (!key)
+ return;
+
+ spin_lock_irqsave(&key->lock, flags);
+ if (key->manual_alloc) {
+ /* we don't have control over lifetimes for manually-allocated
+ * keys, so cannot assume we can invalidate all future flows
+ * that would use this key.
+ */
+ release = false;
+ } else {
+ release = key->dev_flow_state == MCTP_I2C_FLOW_STATE_ACTIVE;
+ key->dev_flow_state = MCTP_I2C_FLOW_STATE_INVALID;
+ }
+ spin_unlock_irqrestore(&key->lock, flags);
+
+ /* if we have changed state from active, the flow held a reference on
+ * the lock; release that now.
+ */
+ if (release)
+ mctp_i2c_unlock_nest(midev);
+}
+
static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
{
struct net_device_stats *stats = &midev->ndev->stats;
@@ -500,6 +536,11 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb)
case MCTP_I2C_TX_FLOW_EXISTING:
/* existing flow: we already have the lock; just tx */
rc = __i2c_transfer(midev->adapter, &msg, 1);
+
+ /* on tx errors, the flow can no longer be considered valid */
+ if (rc)
+ mctp_i2c_invalidate_tx_flow(midev, skb);
+
break;
case MCTP_I2C_TX_FLOW_INVALID:
@@ -1042,8 +1083,8 @@ static struct notifier_block mctp_i2c_notifier = {
};
static const struct i2c_device_id mctp_i2c_id[] = {
- { "mctp-i2c-interface", 0 },
- {},
+ { "mctp-i2c-interface" },
+ {}
};
MODULE_DEVICE_TABLE(i2c, mctp_i2c_id);
diff --git a/drivers/net/mdio/mdio-gpio.c b/drivers/net/mdio/mdio-gpio.c
index 778db310a28d..82088741debd 100644
--- a/drivers/net/mdio/mdio-gpio.c
+++ b/drivers/net/mdio/mdio-gpio.c
@@ -132,8 +132,7 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask;
}
- if (dev->of_node &&
- of_device_is_compatible(dev->of_node, "microchip,mdio-smi0")) {
+ if (device_is_compatible(dev, "microchip,mdio-smi0")) {
bitbang->ctrl.op_c22_read = 0;
bitbang->ctrl.op_c22_write = 0;
bitbang->ctrl.override_op_c22 = 1;
diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c
index c29377c85307..62c47e0dd142 100644
--- a/drivers/net/mdio/mdio-mscc-miim.c
+++ b/drivers/net/mdio/mdio-mscc-miim.c
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#define MSCC_MIIM_REG_STATUS 0x0
#define MSCC_MIIM_STATUS_STAT_PENDING BIT(2)
@@ -271,10 +272,17 @@ static int mscc_miim_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct regmap *mii_regmap, *phy_regmap;
struct device *dev = &pdev->dev;
+ struct reset_control *reset;
struct mscc_miim_dev *miim;
struct mii_bus *bus;
int ret;
+ reset = devm_reset_control_get_optional_shared(dev, "switch");
+ if (IS_ERR(reset))
+ return dev_err_probe(dev, PTR_ERR(reset), "Failed to get reset\n");
+
+ reset_control_reset(reset);
+
mii_regmap = ocelot_regmap_from_resource(pdev, 0,
&mscc_miim_regmap_config);
if (IS_ERR(mii_regmap))
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index d0c916a53d7c..963d8b4af28d 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -231,7 +231,7 @@ static int net_failover_change_mtu(struct net_device *dev, int new_mtu)
}
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index d7070dd4fe73..9c09293b5258 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -344,7 +344,7 @@ static ssize_t enabled_store(struct config_item *item,
goto out_unlock;
err = -EINVAL;
- if ((bool)enabled == nt->enabled) {
+ if (enabled == nt->enabled) {
pr_info("network logging has already %s\n",
nt->enabled ? "started" : "stopped");
goto out_unlock;
@@ -369,6 +369,7 @@ static ssize_t enabled_store(struct config_item *item,
if (err)
goto out_unlock;
+ nt->enabled = true;
pr_info("network logging started\n");
} else { /* false */
/* We need to disable the netconsole before cleaning it up
@@ -381,8 +382,6 @@ static ssize_t enabled_store(struct config_item *item,
netpoll_cleanup(&nt->np);
}
- nt->enabled = enabled;
-
mutex_unlock(&dynamic_netconsole_mutex);
return strnlen(buf, count);
out_unlock:
@@ -974,6 +973,7 @@ restart:
/* rtnl_lock already held
* we might sleep in __netpoll_cleanup()
*/
+ nt->enabled = false;
spin_unlock_irqrestore(&target_list_lock, flags);
__netpoll_cleanup(&nt->np);
@@ -981,7 +981,6 @@ restart:
spin_lock_irqsave(&target_list_lock, flags);
netdev_put(nt->np.dev, &nt->np.dev_tracker);
nt->np.dev = NULL;
- nt->enabled = false;
stopped = true;
netconsole_target_put(nt);
goto restart;
@@ -1262,6 +1261,8 @@ static int __init init_netconsole(void)
while ((target_config = strsep(&input, ";"))) {
nt = alloc_param_target(target_config, count);
if (IS_ERR(nt)) {
+ if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC))
+ continue;
err = PTR_ERR(nt);
goto fail;
}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index bd546d4d26c6..1436905bc106 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -140,8 +140,15 @@ nsim_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam)
return 0;
}
+static void
+nsim_get_fec_stats(struct net_device *dev, struct ethtool_fec_stats *fec_stats)
+{
+ fec_stats->corrected_blocks.total = 123;
+ fec_stats->uncorrectable_blocks.total = 4;
+}
+
static int nsim_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct netdevsim *ns = netdev_priv(dev);
@@ -163,6 +170,7 @@ static const struct ethtool_ops nsim_ethtool_ops = {
.set_channels = nsim_set_channels,
.get_fecparam = nsim_get_fecparam,
.set_fecparam = nsim_set_fecparam,
+ .get_fec_stats = nsim_get_fec_stats,
.get_ts_info = nsim_get_ts_info,
};
@@ -182,6 +190,9 @@ void nsim_ethtool_init(struct netdevsim *ns)
nsim_ethtool_ring_init(ns);
+ ns->ethtool.pauseparam.report_stats_rx = true;
+ ns->ethtool.pauseparam.report_stats_tx = true;
+
ns->ethtool.fec.fec = ETHTOOL_FEC_NONE;
ns->ethtool.fec.active_fec = ETHTOOL_FEC_NONE;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 8330bc0bcb7e..017a6102be0a 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -19,6 +19,8 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
+#include <net/netdev_queues.h>
+#include <net/page_pool/helpers.h>
#include <net/netlink.h>
#include <net/pkt_cls.h>
#include <net/rtnetlink.h>
@@ -26,11 +28,33 @@
#include "netdevsim.h"
+#define NSIM_RING_SIZE 256
+
+static int nsim_napi_rx(struct nsim_rq *rq, struct sk_buff *skb)
+{
+ if (skb_queue_len(&rq->skb_queue) > NSIM_RING_SIZE) {
+ dev_kfree_skb_any(skb);
+ return NET_RX_DROP;
+ }
+
+ skb_queue_tail(&rq->skb_queue, skb);
+ return NET_RX_SUCCESS;
+}
+
+static int nsim_forward_skb(struct net_device *dev, struct sk_buff *skb,
+ struct nsim_rq *rq)
+{
+ return __dev_forward_skb(dev, skb) ?: nsim_napi_rx(rq, skb);
+}
+
static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdevsim *ns = netdev_priv(dev);
+ struct net_device *peer_dev;
unsigned int len = skb->len;
struct netdevsim *peer_ns;
+ struct nsim_rq *rq;
+ int rxq;
rcu_read_lock();
if (!nsim_ipsec_tx(ns, skb))
@@ -40,10 +64,18 @@ static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (!peer_ns)
goto out_drop_free;
+ peer_dev = peer_ns->netdev;
+ rxq = skb_get_queue_mapping(skb);
+ if (rxq >= peer_dev->num_rx_queues)
+ rxq = rxq % peer_dev->num_rx_queues;
+ rq = &peer_ns->rq[rxq];
+
skb_tx_timestamp(skb);
- if (unlikely(dev_forward_skb(peer_ns->netdev, skb) == NET_RX_DROP))
+ if (unlikely(nsim_forward_skb(peer_dev, skb, rq) == NET_RX_DROP))
goto out_drop_cnt;
+ napi_schedule(&rq->napi);
+
rcu_read_unlock();
u64_stats_update_begin(&ns->syncp);
ns->tx_packets++;
@@ -72,7 +104,7 @@ static int nsim_change_mtu(struct net_device *dev, int new_mtu)
if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU)
return -EBUSY;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -292,12 +324,157 @@ static int nsim_get_iflink(const struct net_device *dev)
rcu_read_lock();
peer = rcu_dereference(nsim->peer);
- iflink = peer ? READ_ONCE(peer->netdev->ifindex) : 0;
+ iflink = peer ? READ_ONCE(peer->netdev->ifindex) :
+ READ_ONCE(dev->ifindex);
rcu_read_unlock();
return iflink;
}
+static int nsim_rcv(struct nsim_rq *rq, int budget)
+{
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < budget; i++) {
+ if (skb_queue_empty(&rq->skb_queue))
+ break;
+
+ skb = skb_dequeue(&rq->skb_queue);
+ netif_receive_skb(skb);
+ }
+
+ return i;
+}
+
+static int nsim_poll(struct napi_struct *napi, int budget)
+{
+ struct nsim_rq *rq = container_of(napi, struct nsim_rq, napi);
+ int done;
+
+ done = nsim_rcv(rq, budget);
+ napi_complete(napi);
+
+ return done;
+}
+
+static int nsim_create_page_pool(struct nsim_rq *rq)
+{
+ struct page_pool_params p = {
+ .order = 0,
+ .pool_size = NSIM_RING_SIZE,
+ .nid = NUMA_NO_NODE,
+ .dev = &rq->napi.dev->dev,
+ .napi = &rq->napi,
+ .dma_dir = DMA_BIDIRECTIONAL,
+ .netdev = rq->napi.dev,
+ };
+
+ rq->page_pool = page_pool_create(&p);
+ if (IS_ERR(rq->page_pool)) {
+ int err = PTR_ERR(rq->page_pool);
+
+ rq->page_pool = NULL;
+ return err;
+ }
+ return 0;
+}
+
+static int nsim_init_napi(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ struct nsim_rq *rq;
+ int err, i;
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ rq = &ns->rq[i];
+
+ netif_napi_add(dev, &rq->napi, nsim_poll);
+ }
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ rq = &ns->rq[i];
+
+ err = nsim_create_page_pool(rq);
+ if (err)
+ goto err_pp_destroy;
+ }
+
+ return 0;
+
+err_pp_destroy:
+ while (i--) {
+ page_pool_destroy(ns->rq[i].page_pool);
+ ns->rq[i].page_pool = NULL;
+ }
+
+ for (i = 0; i < dev->num_rx_queues; i++)
+ __netif_napi_del(&ns->rq[i].napi);
+
+ return err;
+}
+
+static void nsim_enable_napi(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ int i;
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ struct nsim_rq *rq = &ns->rq[i];
+
+ netif_queue_set_napi(dev, i, NETDEV_QUEUE_TYPE_RX, &rq->napi);
+ napi_enable(&rq->napi);
+ }
+}
+
+static int nsim_open(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ int err;
+
+ err = nsim_init_napi(ns);
+ if (err)
+ return err;
+
+ nsim_enable_napi(ns);
+
+ return 0;
+}
+
+static void nsim_del_napi(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ int i;
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ struct nsim_rq *rq = &ns->rq[i];
+
+ napi_disable(&rq->napi);
+ __netif_napi_del(&rq->napi);
+ }
+ synchronize_net();
+
+ for (i = 0; i < dev->num_rx_queues; i++) {
+ page_pool_destroy(ns->rq[i].page_pool);
+ ns->rq[i].page_pool = NULL;
+ }
+}
+
+static int nsim_stop(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct netdevsim *peer;
+
+ netif_carrier_off(dev);
+ peer = rtnl_dereference(ns->peer);
+ if (peer)
+ netif_carrier_off(peer->netdev);
+
+ nsim_del_napi(ns);
+
+ return 0;
+}
+
static const struct net_device_ops nsim_netdev_ops = {
.ndo_start_xmit = nsim_start_xmit,
.ndo_set_rx_mode = nsim_set_rx_mode,
@@ -317,6 +494,8 @@ static const struct net_device_ops nsim_netdev_ops = {
.ndo_set_features = nsim_set_features,
.ndo_get_iflink = nsim_get_iflink,
.ndo_bpf = nsim_bpf,
+ .ndo_open = nsim_open,
+ .ndo_stop = nsim_stop,
};
static const struct net_device_ops nsim_vf_netdev_ops = {
@@ -330,6 +509,107 @@ static const struct net_device_ops nsim_vf_netdev_ops = {
.ndo_set_features = nsim_set_features,
};
+/* We don't have true per-queue stats, yet, so do some random fakery here.
+ * Only report stuff for queue 0.
+ */
+static void nsim_get_queue_stats_rx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct rtnl_link_stats64 rtstats = {};
+
+ if (!idx)
+ nsim_get_stats64(dev, &rtstats);
+
+ stats->packets = rtstats.rx_packets - !!rtstats.rx_packets;
+ stats->bytes = rtstats.rx_bytes;
+}
+
+static void nsim_get_queue_stats_tx(struct net_device *dev, int idx,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct rtnl_link_stats64 rtstats = {};
+
+ if (!idx)
+ nsim_get_stats64(dev, &rtstats);
+
+ stats->packets = rtstats.tx_packets - !!rtstats.tx_packets;
+ stats->bytes = rtstats.tx_bytes;
+}
+
+static void nsim_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct rtnl_link_stats64 rtstats = {};
+
+ nsim_get_stats64(dev, &rtstats);
+
+ rx->packets = !!rtstats.rx_packets;
+ rx->bytes = 0;
+ tx->packets = !!rtstats.tx_packets;
+ tx->bytes = 0;
+}
+
+static const struct netdev_stat_ops nsim_stat_ops = {
+ .get_queue_stats_tx = nsim_get_queue_stats_tx,
+ .get_queue_stats_rx = nsim_get_queue_stats_rx,
+ .get_base_stats = nsim_get_base_stats,
+};
+
+static ssize_t
+nsim_pp_hold_read(struct file *file, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct netdevsim *ns = file->private_data;
+ char buf[3] = "n\n";
+
+ if (ns->page)
+ buf[0] = 'y';
+
+ return simple_read_from_buffer(data, count, ppos, buf, 2);
+}
+
+static ssize_t
+nsim_pp_hold_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct netdevsim *ns = file->private_data;
+ ssize_t ret;
+ bool val;
+
+ ret = kstrtobool_from_user(data, count, &val);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ ret = count;
+ if (val == !!ns->page)
+ goto exit;
+
+ if (!netif_running(ns->netdev) && val) {
+ ret = -ENETDOWN;
+ } else if (val) {
+ ns->page = page_pool_dev_alloc_pages(ns->rq[0].page_pool);
+ if (!ns->page)
+ ret = -ENOMEM;
+ } else {
+ page_pool_put_full_page(ns->page->pp, ns->page, false);
+ ns->page = NULL;
+ }
+ rtnl_unlock();
+
+exit:
+ return count;
+}
+
+static const struct file_operations nsim_pp_hold_fops = {
+ .open = simple_open,
+ .read = nsim_pp_hold_read,
+ .write = nsim_pp_hold_write,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
static void nsim_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -349,6 +629,35 @@ static void nsim_setup(struct net_device *dev)
dev->xdp_features = NETDEV_XDP_ACT_HW_OFFLOAD;
}
+static int nsim_queue_init(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ int i;
+
+ ns->rq = kvcalloc(dev->num_rx_queues, sizeof(*ns->rq),
+ GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
+ if (!ns->rq)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->num_rx_queues; i++)
+ skb_queue_head_init(&ns->rq[i].skb_queue);
+
+ return 0;
+}
+
+static void nsim_queue_free(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+ int i;
+
+ for (i = 0; i < dev->num_rx_queues; i++)
+ skb_queue_purge_reason(&ns->rq[i].skb_queue,
+ SKB_DROP_REASON_QUEUE_PURGE);
+
+ kvfree(ns->rq);
+ ns->rq = NULL;
+}
+
static int nsim_init_netdevsim(struct netdevsim *ns)
{
struct mock_phc *phc;
@@ -360,16 +669,21 @@ static int nsim_init_netdevsim(struct netdevsim *ns)
ns->phc = phc;
ns->netdev->netdev_ops = &nsim_netdev_ops;
+ ns->netdev->stat_ops = &nsim_stat_ops;
err = nsim_udp_tunnels_info_create(ns->nsim_dev, ns->netdev);
if (err)
goto err_phc_destroy;
rtnl_lock();
- err = nsim_bpf_init(ns);
+ err = nsim_queue_init(ns);
if (err)
goto err_utn_destroy;
+ err = nsim_bpf_init(ns);
+ if (err)
+ goto err_rq_destroy;
+
nsim_macsec_init(ns);
nsim_ipsec_init(ns);
@@ -383,6 +697,8 @@ err_ipsec_teardown:
nsim_ipsec_teardown(ns);
nsim_macsec_teardown(ns);
nsim_bpf_uninit(ns);
+err_rq_destroy:
+ nsim_queue_free(ns);
err_utn_destroy:
rtnl_unlock();
nsim_udp_tunnels_info_destroy(ns->netdev);
@@ -436,6 +752,10 @@ nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
err = nsim_init_netdevsim_vf(ns);
if (err)
goto err_free_netdev;
+
+ ns->pp_dfs = debugfs_create_file("pp_hold", 0600, nsim_dev_port->ddir,
+ ns, &nsim_pp_hold_fops);
+
return ns;
err_free_netdev:
@@ -448,6 +768,8 @@ void nsim_destroy(struct netdevsim *ns)
struct net_device *dev = ns->netdev;
struct netdevsim *peer;
+ debugfs_remove(ns->pp_dfs);
+
rtnl_lock();
peer = rtnl_dereference(ns->peer);
if (peer)
@@ -458,10 +780,18 @@ void nsim_destroy(struct netdevsim *ns)
nsim_macsec_teardown(ns);
nsim_ipsec_teardown(ns);
nsim_bpf_uninit(ns);
+ nsim_queue_free(ns);
}
rtnl_unlock();
if (nsim_dev_port_is_pf(ns->nsim_dev_port))
nsim_exit_netdevsim(ns);
+
+ /* Put this intentionally late to exercise the orphaning path */
+ if (ns->page) {
+ page_pool_put_full_page(ns->page->pp, ns->page, false);
+ ns->page = NULL;
+ }
+
free_netdev(dev);
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 553c4b9b4f63..bf02efa10956 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -90,11 +90,18 @@ struct nsim_ethtool {
struct ethtool_fecparam fec;
};
+struct nsim_rq {
+ struct napi_struct napi;
+ struct sk_buff_head skb_queue;
+ struct page_pool *page_pool;
+};
+
struct netdevsim {
struct net_device *netdev;
struct nsim_dev *nsim_dev;
struct nsim_dev_port *nsim_dev_port;
struct mock_phc *phc;
+ struct nsim_rq *rq;
u64 tx_packets;
u64 tx_bytes;
@@ -125,6 +132,9 @@ struct netdevsim {
struct debugfs_u32_array dfs_ports[2];
} udp_ports;
+ struct page *page;
+ struct dentry *pp_dfs;
+
struct nsim_ethtool ethtool;
struct netdevsim __rcu *peer;
};
diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c
index a4d2e76a8d58..16789cd446e9 100644
--- a/drivers/net/netkit.c
+++ b/drivers/net/netkit.c
@@ -55,6 +55,7 @@ static void netkit_prep_forward(struct sk_buff *skb, bool xnet)
skb_scrub_packet(skb, xnet);
skb->priority = 0;
nf_skip_egress(skb, true);
+ skb_reset_mac_header(skb);
}
static struct netkit *netkit_priv(const struct net_device *dev)
@@ -78,6 +79,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
skb_orphan_frags(skb, GFP_ATOMIC)))
goto drop;
netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer)));
+ eth_skb_pkt_type(skb, peer);
skb->dev = peer;
entry = rcu_dereference(nk->active);
if (entry)
@@ -85,7 +87,7 @@ static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev)
switch (ret) {
case NETKIT_NEXT:
case NETKIT_PASS:
- skb->protocol = eth_type_trans(skb, skb->dev);
+ eth_skb_pull_mac(skb);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
dev_sw_netstats_tx_add(dev, 1, len);
@@ -155,6 +157,16 @@ static void netkit_set_multicast(struct net_device *dev)
/* Nothing to do, we receive whatever gets pushed to us! */
}
+static int netkit_set_macaddr(struct net_device *dev, void *sa)
+{
+ struct netkit *nk = netkit_priv(dev);
+
+ if (nk->mode != NETKIT_L2)
+ return -EOPNOTSUPP;
+
+ return eth_mac_addr(dev, sa);
+}
+
static void netkit_set_headroom(struct net_device *dev, int headroom)
{
struct netkit *nk = netkit_priv(dev), *nk2;
@@ -198,6 +210,7 @@ static const struct net_device_ops netkit_netdev_ops = {
.ndo_start_xmit = netkit_xmit,
.ndo_set_rx_mode = netkit_set_multicast,
.ndo_set_rx_headroom = netkit_set_headroom,
+ .ndo_set_mac_address = netkit_set_macaddr,
.ndo_get_iflink = netkit_get_iflink,
.ndo_get_peer_dev = netkit_peer_dev,
.ndo_get_stats64 = netkit_get_stats,
@@ -300,9 +313,11 @@ static int netkit_validate(struct nlattr *tb[], struct nlattr *data[],
if (!attr)
return 0;
- NL_SET_ERR_MSG_ATTR(extack, attr,
- "Setting Ethernet address is not supported");
- return -EOPNOTSUPP;
+ if (nla_len(attr) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(attr)))
+ return -EADDRNOTAVAIL;
+ return 0;
}
static struct rtnl_link_ops netkit_link_ops;
@@ -365,6 +380,9 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
strscpy(ifname, "nk%d", IFNAMSIZ);
ifname_assign_type = NET_NAME_ENUM;
}
+ if (mode != NETKIT_L2 &&
+ (tb[IFLA_ADDRESS] || tbp[IFLA_ADDRESS]))
+ return -EOPNOTSUPP;
net = rtnl_link_get_net(src_net, tbp);
if (IS_ERR(net))
@@ -379,7 +397,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
netif_inherit_tso_max(peer, dev);
- if (mode == NETKIT_L2)
+ if (mode == NETKIT_L2 && !(ifmp && tbp[IFLA_ADDRESS]))
eth_hw_addr_random(peer);
if (ifmp && dev->ifindex)
peer->ifindex = ifmp->ifi_index;
@@ -402,7 +420,7 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev,
if (err < 0)
goto err_configure_peer;
- if (mode == NETKIT_L2)
+ if (mode == NETKIT_L2 && !tb[IFLA_ADDRESS])
eth_hw_addr_random(dev);
if (tb[IFLA_IFNAME])
nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 536bd6564f8b..ef6df0e37bea 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -119,7 +119,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
- if (__netif_rx(skb) == NET_RX_DROP) {
+ if (netif_rx(skb) == NET_RX_DROP) {
ndev->stats.rx_errors++;
ndev->stats.rx_dropped++;
} else {
@@ -306,7 +306,7 @@ static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
return -EINVAL;
if (!netif_running(ndev)) {
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
return 0;
}
@@ -335,7 +335,7 @@ static int ntb_netdev_change_mtu(struct net_device *ndev, int new_mtu)
}
}
- ndev->mtu = new_mtu;
+ WRITE_ONCE(ndev->mtu, new_mtu);
ntb_transport_link_up(dev->qp);
diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig
index 87cf308fc6d8..f6aa437473de 100644
--- a/drivers/net/pcs/Kconfig
+++ b/drivers/net/pcs/Kconfig
@@ -6,11 +6,11 @@
menu "PCS device drivers"
config PCS_XPCS
- tristate
+ tristate "Synopsys DesignWare Ethernet XPCS"
select PHYLINK
help
- This module provides helper functions for Synopsys DesignWare XPCS
- controllers.
+ This module provides a driver and helper functions for Synopsys
+ DesignWare XPCS controllers.
config PCS_LYNX
tristate
diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile
index fb1694192ae6..4f7920618b90 100644
--- a/drivers/net/pcs/Makefile
+++ b/drivers/net/pcs/Makefile
@@ -1,7 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for Linux PCS drivers
-pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o pcs-xpcs-wx.o
+pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-plat.o \
+ pcs-xpcs-nxp.o pcs-xpcs-wx.o
obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o
obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o
diff --git a/drivers/net/pcs/pcs-lynx.c b/drivers/net/pcs/pcs-lynx.c
index 853b8c138718..b79aedad855b 100644
--- a/drivers/net/pcs/pcs-lynx.c
+++ b/drivers/net/pcs/pcs-lynx.c
@@ -61,11 +61,10 @@ static void lynx_pcs_get_state_usxgmii(struct mdio_device *pcs,
static void lynx_pcs_get_state_2500basex(struct mdio_device *pcs,
struct phylink_link_state *state)
{
- int bmsr, lpa;
+ int bmsr;
bmsr = mdiodev_read(pcs, MII_BMSR);
- lpa = mdiodev_read(pcs, MII_LPA);
- if (bmsr < 0 || lpa < 0) {
+ if (bmsr < 0) {
state->link = false;
return;
}
diff --git a/drivers/net/pcs/pcs-rzn1-miic.c b/drivers/net/pcs/pcs-rzn1-miic.c
index 4bd66fdde367..d0a722d43368 100644
--- a/drivers/net/pcs/pcs-rzn1-miic.c
+++ b/drivers/net/pcs/pcs-rzn1-miic.c
@@ -279,10 +279,38 @@ static int miic_validate(struct phylink_pcs *pcs, unsigned long *supported,
return -EINVAL;
}
+static int miic_pre_init(struct phylink_pcs *pcs)
+{
+ struct miic_port *miic_port = phylink_pcs_to_miic_port(pcs);
+ struct miic *miic = miic_port->miic;
+ u32 val, mask;
+
+ /* Start RX clock if required */
+ if (pcs->rxc_always_on) {
+ /* In MII through mode, the clock signals will be driven by the
+ * external PHY, which might not be initialized yet. Set RMII
+ * as default mode to ensure that a reference clock signal is
+ * generated.
+ */
+ miic_port->interface = PHY_INTERFACE_MODE_RMII;
+
+ val = FIELD_PREP(MIIC_CONVCTRL_CONV_MODE, CONV_MODE_RMII) |
+ FIELD_PREP(MIIC_CONVCTRL_CONV_SPEED, CONV_MODE_100MBPS);
+ mask = MIIC_CONVCTRL_CONV_MODE | MIIC_CONVCTRL_CONV_SPEED;
+
+ miic_reg_rmw(miic, MIIC_CONVCTRL(miic_port->port), mask, val);
+
+ miic_converter_enable(miic, miic_port->port, 1);
+ }
+
+ return 0;
+}
+
static const struct phylink_pcs_ops miic_phylink_ops = {
.pcs_validate = miic_validate,
.pcs_config = miic_config,
.pcs_link_up = miic_link_up,
+ .pcs_pre_init = miic_pre_init,
};
struct phylink_pcs *miic_create(struct device *dev, struct device_node *np)
diff --git a/drivers/net/pcs/pcs-xpcs-plat.c b/drivers/net/pcs/pcs-xpcs-plat.c
new file mode 100644
index 000000000000..629315f1e57c
--- /dev/null
+++ b/drivers/net/pcs/pcs-xpcs-plat.c
@@ -0,0 +1,460 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Synopsys DesignWare XPCS platform device driver
+ *
+ * Copyright (C) 2024 Serge Semin
+ */
+
+#include <linux/atomic.h>
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/pcs/pcs-xpcs.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/property.h>
+#include <linux/sizes.h>
+
+#include "pcs-xpcs.h"
+
+/* Page select register for the indirect MMIO CSRs access */
+#define DW_VR_CSR_VIEWPORT 0xff
+
+struct dw_xpcs_plat {
+ struct platform_device *pdev;
+ struct mii_bus *bus;
+ bool reg_indir;
+ int reg_width;
+ void __iomem *reg_base;
+ struct clk *cclk;
+};
+
+static ptrdiff_t xpcs_mmio_addr_format(int dev, int reg)
+{
+ return FIELD_PREP(0x1f0000, dev) | FIELD_PREP(0xffff, reg);
+}
+
+static u16 xpcs_mmio_addr_page(ptrdiff_t csr)
+{
+ return FIELD_GET(0x1fff00, csr);
+}
+
+static ptrdiff_t xpcs_mmio_addr_offset(ptrdiff_t csr)
+{
+ return FIELD_GET(0xff, csr);
+}
+
+static int xpcs_mmio_read_reg_indirect(struct dw_xpcs_plat *pxpcs,
+ int dev, int reg)
+{
+ ptrdiff_t csr, ofs;
+ u16 page;
+ int ret;
+
+ csr = xpcs_mmio_addr_format(dev, reg);
+ page = xpcs_mmio_addr_page(csr);
+ ofs = xpcs_mmio_addr_offset(csr);
+
+ ret = pm_runtime_resume_and_get(&pxpcs->pdev->dev);
+ if (ret)
+ return ret;
+
+ switch (pxpcs->reg_width) {
+ case 4:
+ writel(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 2));
+ ret = readl(pxpcs->reg_base + (ofs << 2));
+ break;
+ default:
+ writew(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 1));
+ ret = readw(pxpcs->reg_base + (ofs << 1));
+ break;
+ }
+
+ pm_runtime_put(&pxpcs->pdev->dev);
+
+ return ret;
+}
+
+static int xpcs_mmio_write_reg_indirect(struct dw_xpcs_plat *pxpcs,
+ int dev, int reg, u16 val)
+{
+ ptrdiff_t csr, ofs;
+ u16 page;
+ int ret;
+
+ csr = xpcs_mmio_addr_format(dev, reg);
+ page = xpcs_mmio_addr_page(csr);
+ ofs = xpcs_mmio_addr_offset(csr);
+
+ ret = pm_runtime_resume_and_get(&pxpcs->pdev->dev);
+ if (ret)
+ return ret;
+
+ switch (pxpcs->reg_width) {
+ case 4:
+ writel(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 2));
+ writel(val, pxpcs->reg_base + (ofs << 2));
+ break;
+ default:
+ writew(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 1));
+ writew(val, pxpcs->reg_base + (ofs << 1));
+ break;
+ }
+
+ pm_runtime_put(&pxpcs->pdev->dev);
+
+ return 0;
+}
+
+static int xpcs_mmio_read_reg_direct(struct dw_xpcs_plat *pxpcs,
+ int dev, int reg)
+{
+ ptrdiff_t csr;
+ int ret;
+
+ csr = xpcs_mmio_addr_format(dev, reg);
+
+ ret = pm_runtime_resume_and_get(&pxpcs->pdev->dev);
+ if (ret)
+ return ret;
+
+ switch (pxpcs->reg_width) {
+ case 4:
+ ret = readl(pxpcs->reg_base + (csr << 2));
+ break;
+ default:
+ ret = readw(pxpcs->reg_base + (csr << 1));
+ break;
+ }
+
+ pm_runtime_put(&pxpcs->pdev->dev);
+
+ return ret;
+}
+
+static int xpcs_mmio_write_reg_direct(struct dw_xpcs_plat *pxpcs,
+ int dev, int reg, u16 val)
+{
+ ptrdiff_t csr;
+ int ret;
+
+ csr = xpcs_mmio_addr_format(dev, reg);
+
+ ret = pm_runtime_resume_and_get(&pxpcs->pdev->dev);
+ if (ret)
+ return ret;
+
+ switch (pxpcs->reg_width) {
+ case 4:
+ writel(val, pxpcs->reg_base + (csr << 2));
+ break;
+ default:
+ writew(val, pxpcs->reg_base + (csr << 1));
+ break;
+ }
+
+ pm_runtime_put(&pxpcs->pdev->dev);
+
+ return 0;
+}
+
+static int xpcs_mmio_read_c22(struct mii_bus *bus, int addr, int reg)
+{
+ struct dw_xpcs_plat *pxpcs = bus->priv;
+
+ if (addr != 0)
+ return -ENODEV;
+
+ if (pxpcs->reg_indir)
+ return xpcs_mmio_read_reg_indirect(pxpcs, MDIO_MMD_VEND2, reg);
+ else
+ return xpcs_mmio_read_reg_direct(pxpcs, MDIO_MMD_VEND2, reg);
+}
+
+static int xpcs_mmio_write_c22(struct mii_bus *bus, int addr, int reg, u16 val)
+{
+ struct dw_xpcs_plat *pxpcs = bus->priv;
+
+ if (addr != 0)
+ return -ENODEV;
+
+ if (pxpcs->reg_indir)
+ return xpcs_mmio_write_reg_indirect(pxpcs, MDIO_MMD_VEND2, reg, val);
+ else
+ return xpcs_mmio_write_reg_direct(pxpcs, MDIO_MMD_VEND2, reg, val);
+}
+
+static int xpcs_mmio_read_c45(struct mii_bus *bus, int addr, int dev, int reg)
+{
+ struct dw_xpcs_plat *pxpcs = bus->priv;
+
+ if (addr != 0)
+ return -ENODEV;
+
+ if (pxpcs->reg_indir)
+ return xpcs_mmio_read_reg_indirect(pxpcs, dev, reg);
+ else
+ return xpcs_mmio_read_reg_direct(pxpcs, dev, reg);
+}
+
+static int xpcs_mmio_write_c45(struct mii_bus *bus, int addr, int dev,
+ int reg, u16 val)
+{
+ struct dw_xpcs_plat *pxpcs = bus->priv;
+
+ if (addr != 0)
+ return -ENODEV;
+
+ if (pxpcs->reg_indir)
+ return xpcs_mmio_write_reg_indirect(pxpcs, dev, reg, val);
+ else
+ return xpcs_mmio_write_reg_direct(pxpcs, dev, reg, val);
+}
+
+static struct dw_xpcs_plat *xpcs_plat_create_data(struct platform_device *pdev)
+{
+ struct dw_xpcs_plat *pxpcs;
+
+ pxpcs = devm_kzalloc(&pdev->dev, sizeof(*pxpcs), GFP_KERNEL);
+ if (!pxpcs)
+ return ERR_PTR(-ENOMEM);
+
+ pxpcs->pdev = pdev;
+
+ dev_set_drvdata(&pdev->dev, pxpcs);
+
+ return pxpcs;
+}
+
+static int xpcs_plat_init_res(struct dw_xpcs_plat *pxpcs)
+{
+ struct platform_device *pdev = pxpcs->pdev;
+ struct device *dev = &pdev->dev;
+ resource_size_t spc_size;
+ struct resource *res;
+
+ if (!device_property_read_u32(dev, "reg-io-width", &pxpcs->reg_width)) {
+ if (pxpcs->reg_width != 2 && pxpcs->reg_width != 4) {
+ dev_err(dev, "Invalid reg-space data width\n");
+ return -EINVAL;
+ }
+ } else {
+ pxpcs->reg_width = 2;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "direct") ?:
+ platform_get_resource_byname(pdev, IORESOURCE_MEM, "indirect");
+ if (!res) {
+ dev_err(dev, "No reg-space found\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(res->name, "indirect"))
+ pxpcs->reg_indir = true;
+
+ if (pxpcs->reg_indir)
+ spc_size = pxpcs->reg_width * SZ_256;
+ else
+ spc_size = pxpcs->reg_width * SZ_2M;
+
+ if (resource_size(res) < spc_size) {
+ dev_err(dev, "Invalid reg-space size\n");
+ return -EINVAL;
+ }
+
+ pxpcs->reg_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pxpcs->reg_base)) {
+ dev_err(dev, "Failed to map reg-space\n");
+ return PTR_ERR(pxpcs->reg_base);
+ }
+
+ return 0;
+}
+
+static int xpcs_plat_init_clk(struct dw_xpcs_plat *pxpcs)
+{
+ struct device *dev = &pxpcs->pdev->dev;
+ int ret;
+
+ pxpcs->cclk = devm_clk_get(dev, "csr");
+ if (IS_ERR(pxpcs->cclk))
+ return dev_err_probe(dev, PTR_ERR(pxpcs->cclk),
+ "Failed to get CSR clock\n");
+
+ pm_runtime_set_active(dev);
+ ret = devm_pm_runtime_enable(dev);
+ if (ret) {
+ dev_err(dev, "Failed to enable runtime-PM\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int xpcs_plat_init_bus(struct dw_xpcs_plat *pxpcs)
+{
+ struct device *dev = &pxpcs->pdev->dev;
+ static atomic_t id = ATOMIC_INIT(-1);
+ int ret;
+
+ pxpcs->bus = devm_mdiobus_alloc_size(dev, 0);
+ if (!pxpcs->bus)
+ return -ENOMEM;
+
+ pxpcs->bus->name = "DW XPCS MCI/APB3";
+ pxpcs->bus->read = xpcs_mmio_read_c22;
+ pxpcs->bus->write = xpcs_mmio_write_c22;
+ pxpcs->bus->read_c45 = xpcs_mmio_read_c45;
+ pxpcs->bus->write_c45 = xpcs_mmio_write_c45;
+ pxpcs->bus->phy_mask = ~0;
+ pxpcs->bus->parent = dev;
+ pxpcs->bus->priv = pxpcs;
+
+ snprintf(pxpcs->bus->id, MII_BUS_ID_SIZE,
+ "dwxpcs-%x", atomic_inc_return(&id));
+
+ /* MDIO-bus here serves as just a back-end engine abstracting out
+ * the MDIO and MCI/APB3 IO interfaces utilized for the DW XPCS CSRs
+ * access.
+ */
+ ret = devm_mdiobus_register(dev, pxpcs->bus);
+ if (ret) {
+ dev_err(dev, "Failed to create MDIO bus\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+/* Note there is no need in the next function antagonist because the MDIO-bus
+ * de-registration will effectively remove and destroy all the MDIO-devices
+ * registered on the bus.
+ */
+static int xpcs_plat_init_dev(struct dw_xpcs_plat *pxpcs)
+{
+ struct device *dev = &pxpcs->pdev->dev;
+ struct mdio_device *mdiodev;
+ int ret;
+
+ /* There is a single memory-mapped DW XPCS device */
+ mdiodev = mdio_device_create(pxpcs->bus, 0);
+ if (IS_ERR(mdiodev))
+ return PTR_ERR(mdiodev);
+
+ /* Associate the FW-node with the device structure so it can be looked
+ * up later. Make sure DD-core is aware of the OF-node being re-used.
+ */
+ device_set_node(&mdiodev->dev, fwnode_handle_get(dev_fwnode(dev)));
+ mdiodev->dev.of_node_reused = true;
+
+ /* Pass the data further so the DW XPCS driver core could use it */
+ mdiodev->dev.platform_data = (void *)device_get_match_data(dev);
+
+ ret = mdio_device_register(mdiodev);
+ if (ret) {
+ dev_err(dev, "Failed to register MDIO device\n");
+ goto err_clean_data;
+ }
+
+ return 0;
+
+err_clean_data:
+ mdiodev->dev.platform_data = NULL;
+
+ fwnode_handle_put(dev_fwnode(&mdiodev->dev));
+ device_set_node(&mdiodev->dev, NULL);
+
+ mdio_device_free(mdiodev);
+
+ return ret;
+}
+
+static int xpcs_plat_probe(struct platform_device *pdev)
+{
+ struct dw_xpcs_plat *pxpcs;
+ int ret;
+
+ pxpcs = xpcs_plat_create_data(pdev);
+ if (IS_ERR(pxpcs))
+ return PTR_ERR(pxpcs);
+
+ ret = xpcs_plat_init_res(pxpcs);
+ if (ret)
+ return ret;
+
+ ret = xpcs_plat_init_clk(pxpcs);
+ if (ret)
+ return ret;
+
+ ret = xpcs_plat_init_bus(pxpcs);
+ if (ret)
+ return ret;
+
+ ret = xpcs_plat_init_dev(pxpcs);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int __maybe_unused xpcs_plat_pm_runtime_suspend(struct device *dev)
+{
+ struct dw_xpcs_plat *pxpcs = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pxpcs->cclk);
+
+ return 0;
+}
+
+static int __maybe_unused xpcs_plat_pm_runtime_resume(struct device *dev)
+{
+ struct dw_xpcs_plat *pxpcs = dev_get_drvdata(dev);
+
+ return clk_prepare_enable(pxpcs->cclk);
+}
+
+static const struct dev_pm_ops xpcs_plat_pm_ops = {
+ SET_RUNTIME_PM_OPS(xpcs_plat_pm_runtime_suspend,
+ xpcs_plat_pm_runtime_resume,
+ NULL)
+};
+
+DW_XPCS_INFO_DECLARE(xpcs_generic, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_ID_NATIVE);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen1_3g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN1_3G_ID);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen2_3g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN2_3G_ID);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen2_6g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN2_6G_ID);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen4_3g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN4_3G_ID);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen4_6g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN4_6G_ID);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen5_10g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN5_10G_ID);
+DW_XPCS_INFO_DECLARE(xpcs_pma_gen5_12g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN5_12G_ID);
+
+static const struct of_device_id xpcs_of_ids[] = {
+ { .compatible = "snps,dw-xpcs", .data = &xpcs_generic },
+ { .compatible = "snps,dw-xpcs-gen1-3g", .data = &xpcs_pma_gen1_3g },
+ { .compatible = "snps,dw-xpcs-gen2-3g", .data = &xpcs_pma_gen2_3g },
+ { .compatible = "snps,dw-xpcs-gen2-6g", .data = &xpcs_pma_gen2_6g },
+ { .compatible = "snps,dw-xpcs-gen4-3g", .data = &xpcs_pma_gen4_3g },
+ { .compatible = "snps,dw-xpcs-gen4-6g", .data = &xpcs_pma_gen4_6g },
+ { .compatible = "snps,dw-xpcs-gen5-10g", .data = &xpcs_pma_gen5_10g },
+ { .compatible = "snps,dw-xpcs-gen5-12g", .data = &xpcs_pma_gen5_12g },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, xpcs_of_ids);
+
+static struct platform_driver xpcs_plat_driver = {
+ .probe = xpcs_plat_probe,
+ .driver = {
+ .name = "dwxpcs",
+ .pm = &xpcs_plat_pm_ops,
+ .of_match_table = xpcs_of_ids,
+ },
+};
+module_platform_driver(xpcs_plat_driver);
+
+MODULE_DESCRIPTION("Synopsys DesignWare XPCS platform device driver");
+MODULE_AUTHOR("Signed-off-by: Serge Semin <fancer.lancer@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 31525fe9c32e..82463f9d50c8 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -6,10 +6,13 @@
* Author: Jose Abreu <Jose.Abreu@synopsys.com>
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/pcs/pcs-xpcs.h>
#include <linux/mdio.h>
+#include <linux/phy.h>
#include <linux/phylink.h>
+#include <linux/property.h>
#include "pcs-xpcs.h"
@@ -143,7 +146,7 @@ enum {
DW_XPCS_INTERFACE_MAX,
};
-struct xpcs_compat {
+struct dw_xpcs_compat {
const int *supported;
const phy_interface_t *interface;
int num_interfaces;
@@ -151,19 +154,19 @@ struct xpcs_compat {
int (*pma_config)(struct dw_xpcs *xpcs);
};
-struct xpcs_id {
+struct dw_xpcs_desc {
u32 id;
u32 mask;
- const struct xpcs_compat *compat;
+ const struct dw_xpcs_compat *compat;
};
-static const struct xpcs_compat *xpcs_find_compat(const struct xpcs_id *id,
- phy_interface_t interface)
+static const struct dw_xpcs_compat *
+xpcs_find_compat(const struct dw_xpcs_desc *desc, phy_interface_t interface)
{
int i, j;
for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) {
- const struct xpcs_compat *compat = &id->compat[i];
+ const struct dw_xpcs_compat *compat = &desc->compat[i];
for (j = 0; j < compat->num_interfaces; j++)
if (compat->interface[j] == interface)
@@ -175,9 +178,9 @@ static const struct xpcs_compat *xpcs_find_compat(const struct xpcs_id *id,
int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface)
{
- const struct xpcs_compat *compat;
+ const struct dw_xpcs_compat *compat;
- compat = xpcs_find_compat(xpcs->id, interface);
+ compat = xpcs_find_compat(xpcs->desc, interface);
if (!compat)
return -ENODEV;
@@ -185,7 +188,7 @@ int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface)
}
EXPORT_SYMBOL_GPL(xpcs_get_an_mode);
-static bool __xpcs_linkmode_supported(const struct xpcs_compat *compat,
+static bool __xpcs_linkmode_supported(const struct dw_xpcs_compat *compat,
enum ethtool_link_mode_bit_indices linkmode)
{
int i;
@@ -237,29 +240,6 @@ int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val)
return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val);
}
-static int xpcs_dev_flag(struct dw_xpcs *xpcs)
-{
- int ret, oui;
-
- ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID1);
- if (ret < 0)
- return ret;
-
- oui = ret;
-
- ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID2);
- if (ret < 0)
- return ret;
-
- ret = (ret >> 10) & 0x3F;
- oui |= ret << 16;
-
- if (oui == DW_OUI_WX)
- xpcs->dev_flag = DW_DEV_TXGBE;
-
- return 0;
-}
-
static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev)
{
/* Poll until the reset bit clears (50ms per retry == 0.6 sec) */
@@ -277,7 +257,7 @@ static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev)
}
static int xpcs_soft_reset(struct dw_xpcs *xpcs,
- const struct xpcs_compat *compat)
+ const struct dw_xpcs_compat *compat)
{
int ret, dev;
@@ -418,7 +398,7 @@ out:
}
static int _xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
- const struct xpcs_compat *compat)
+ const struct dw_xpcs_compat *compat)
{
int ret, adv;
@@ -463,7 +443,7 @@ static int _xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
}
static int xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
- const struct xpcs_compat *compat)
+ const struct dw_xpcs_compat *compat)
{
int ret;
@@ -482,7 +462,7 @@ static int xpcs_config_aneg_c73(struct dw_xpcs *xpcs,
static int xpcs_aneg_done_c73(struct dw_xpcs *xpcs,
struct phylink_link_state *state,
- const struct xpcs_compat *compat, u16 an_stat1)
+ const struct dw_xpcs_compat *compat, u16 an_stat1)
{
int ret;
@@ -607,12 +587,12 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported,
const struct phylink_link_state *state)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported) = { 0, };
- const struct xpcs_compat *compat;
+ const struct dw_xpcs_compat *compat;
struct dw_xpcs *xpcs;
int i;
xpcs = phylink_pcs_to_xpcs(pcs);
- compat = xpcs_find_compat(xpcs->id, state->interface);
+ compat = xpcs_find_compat(xpcs->desc, state->interface);
if (!compat)
return -EINVAL;
@@ -633,7 +613,7 @@ void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces)
int i, j;
for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) {
- const struct xpcs_compat *compat = &xpcs->id->compat[i];
+ const struct dw_xpcs_compat *compat = &xpcs->desc->compat[i];
for (j = 0; j < compat->num_interfaces; j++)
__set_bit(compat->interface[j], interfaces);
@@ -684,7 +664,7 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs,
{
int ret, mdio_ctrl, tx_conf;
- if (xpcs->dev_flag == DW_DEV_TXGBE)
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID)
xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1);
/* For AN for C37 SGMII mode, the settings are :-
@@ -722,7 +702,7 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs,
ret |= (DW_VR_MII_PCS_MODE_C37_SGMII <<
DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT &
DW_VR_MII_PCS_MODE_MASK);
- if (xpcs->dev_flag == DW_DEV_TXGBE) {
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) {
ret |= DW_VR_MII_AN_CTRL_8BIT;
/* Hardware requires it to be PHY side SGMII */
tx_conf = DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII;
@@ -744,7 +724,7 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs,
else
ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW;
- if (xpcs->dev_flag == DW_DEV_TXGBE)
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID)
ret |= DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL;
ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret);
@@ -766,7 +746,7 @@ static int xpcs_config_aneg_c37_1000basex(struct dw_xpcs *xpcs,
int ret, mdio_ctrl, adv;
bool changed = 0;
- if (xpcs->dev_flag == DW_DEV_TXGBE)
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID)
xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1);
/* According to Chap 7.12, to set 1000BASE-X C37 AN, AN must
@@ -850,14 +830,14 @@ static int xpcs_config_2500basex(struct dw_xpcs *xpcs)
int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface,
const unsigned long *advertising, unsigned int neg_mode)
{
- const struct xpcs_compat *compat;
+ const struct dw_xpcs_compat *compat;
int ret;
- compat = xpcs_find_compat(xpcs->id, interface);
+ compat = xpcs_find_compat(xpcs->desc, interface);
if (!compat)
return -ENODEV;
- if (xpcs->dev_flag == DW_DEV_TXGBE) {
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) {
ret = txgbe_xpcs_switch_mode(xpcs, interface);
if (ret)
return ret;
@@ -915,7 +895,7 @@ static int xpcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
static int xpcs_get_state_c73(struct dw_xpcs *xpcs,
struct phylink_link_state *state,
- const struct xpcs_compat *compat)
+ const struct dw_xpcs_compat *compat)
{
bool an_enabled;
int pcs_stat1;
@@ -1115,10 +1095,10 @@ static void xpcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs);
- const struct xpcs_compat *compat;
+ const struct dw_xpcs_compat *compat;
int ret;
- compat = xpcs_find_compat(xpcs->id, state->interface);
+ compat = xpcs_find_compat(xpcs->desc, state->interface);
if (!compat)
return;
@@ -1229,47 +1209,73 @@ static void xpcs_an_restart(struct phylink_pcs *pcs)
}
}
-static u32 xpcs_get_id(struct dw_xpcs *xpcs)
+static int xpcs_get_id(struct dw_xpcs *xpcs)
{
int ret;
u32 id;
- /* First, search C73 PCS using PCS MMD */
+ /* First, search C73 PCS using PCS MMD 3. Return ENODEV if communication
+ * failed indicating that device couldn't be reached.
+ */
ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID1);
if (ret < 0)
- return 0xffffffff;
+ return -ENODEV;
id = ret << 16;
ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID2);
if (ret < 0)
- return 0xffffffff;
+ return ret;
+
+ id |= ret;
- /* If Device IDs are not all zeros or all ones,
- * we found C73 AN-type device
+ /* If Device IDs are not all zeros or ones, then 10GBase-X/R or C73
+ * KR/KX4 PCS found. Otherwise fallback to detecting 1000Base-X or C37
+ * PCS in MII MMD 31.
*/
- if ((id | ret) && (id | ret) != 0xffffffff)
- return id | ret;
+ if (!id || id == 0xffffffff) {
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID1);
+ if (ret < 0)
+ return ret;
+
+ id = ret << 16;
+
+ ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID2);
+ if (ret < 0)
+ return ret;
- /* Next, search C37 PCS using Vendor-Specific MII MMD */
- ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID1);
+ id |= ret;
+ }
+
+ /* Set the PCS ID if it hasn't been pre-initialized */
+ if (xpcs->info.pcs == DW_XPCS_ID_NATIVE)
+ xpcs->info.pcs = id;
+
+ /* Find out PMA/PMD ID from MMD 1 device ID registers */
+ ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID1);
if (ret < 0)
- return 0xffffffff;
+ return ret;
- id = ret << 16;
+ id = ret;
- ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID2);
+ ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID2);
if (ret < 0)
- return 0xffffffff;
+ return ret;
+
+ /* Note the inverted dword order and masked out Model/Revision numbers
+ * with respect to what is done with the PCS ID...
+ */
+ ret = (ret >> 10) & 0x3F;
+ id |= ret << 16;
- /* If Device IDs are not all zeros, we found C37 AN-type device */
- if (id | ret)
- return id | ret;
+ /* Set the PMA ID if it hasn't been pre-initialized */
+ if (xpcs->info.pma == DW_XPCS_PMA_ID_NATIVE)
+ xpcs->info.pma = id;
- return 0xffffffff;
+ return 0;
}
-static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+static const struct dw_xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
[DW_XPCS_USXGMII] = {
.supported = xpcs_usxgmii_features,
.interface = xpcs_usxgmii_interfaces,
@@ -1314,7 +1320,7 @@ static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
},
};
-static const struct xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+static const struct dw_xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
[DW_XPCS_SGMII] = {
.supported = xpcs_sgmii_features,
.interface = xpcs_sgmii_interfaces,
@@ -1324,7 +1330,7 @@ static const struct xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] =
},
};
-static const struct xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
+static const struct dw_xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] = {
[DW_XPCS_SGMII] = {
.supported = xpcs_sgmii_features,
.interface = xpcs_sgmii_interfaces,
@@ -1341,18 +1347,18 @@ static const struct xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] =
},
};
-static const struct xpcs_id xpcs_id_list[] = {
+static const struct dw_xpcs_desc xpcs_desc_list[] = {
{
- .id = SYNOPSYS_XPCS_ID,
- .mask = SYNOPSYS_XPCS_MASK,
+ .id = DW_XPCS_ID,
+ .mask = DW_XPCS_ID_MASK,
.compat = synopsys_xpcs_compat,
}, {
.id = NXP_SJA1105_XPCS_ID,
- .mask = SYNOPSYS_XPCS_MASK,
+ .mask = DW_XPCS_ID_MASK,
.compat = nxp_sja1105_xpcs_compat,
}, {
.id = NXP_SJA1110_XPCS_ID,
- .mask = SYNOPSYS_XPCS_MASK,
+ .mask = DW_XPCS_ID_MASK,
.compat = nxp_sja1110_xpcs_compat,
},
};
@@ -1365,12 +1371,9 @@ static const struct phylink_pcs_ops xpcs_phylink_ops = {
.pcs_link_up = xpcs_link_up,
};
-static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
- phy_interface_t interface)
+static struct dw_xpcs *xpcs_create_data(struct mdio_device *mdiodev)
{
struct dw_xpcs *xpcs;
- u32 xpcs_id;
- int i, ret;
xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL);
if (!xpcs)
@@ -1378,59 +1381,142 @@ static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
mdio_device_get(mdiodev);
xpcs->mdiodev = mdiodev;
+ xpcs->pcs.ops = &xpcs_phylink_ops;
+ xpcs->pcs.neg_mode = true;
+ xpcs->pcs.poll = true;
+
+ return xpcs;
+}
- xpcs_id = xpcs_get_id(xpcs);
+static void xpcs_free_data(struct dw_xpcs *xpcs)
+{
+ mdio_device_put(xpcs->mdiodev);
+ kfree(xpcs);
+}
- for (i = 0; i < ARRAY_SIZE(xpcs_id_list); i++) {
- const struct xpcs_id *entry = &xpcs_id_list[i];
- const struct xpcs_compat *compat;
+static int xpcs_init_clks(struct dw_xpcs *xpcs)
+{
+ static const char *ids[DW_XPCS_NUM_CLKS] = {
+ [DW_XPCS_CORE_CLK] = "core",
+ [DW_XPCS_PAD_CLK] = "pad",
+ };
+ struct device *dev = &xpcs->mdiodev->dev;
+ int ret, i;
- if ((xpcs_id & entry->mask) != entry->id)
- continue;
+ for (i = 0; i < DW_XPCS_NUM_CLKS; ++i)
+ xpcs->clks[i].id = ids[i];
- xpcs->id = entry;
+ ret = clk_bulk_get_optional(dev, DW_XPCS_NUM_CLKS, xpcs->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
- compat = xpcs_find_compat(entry, interface);
- if (!compat) {
- ret = -ENODEV;
- goto out;
- }
+ ret = clk_bulk_prepare_enable(DW_XPCS_NUM_CLKS, xpcs->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to enable clocks\n");
- ret = xpcs_dev_flag(xpcs);
- if (ret)
- goto out;
+ return 0;
+}
- xpcs->pcs.ops = &xpcs_phylink_ops;
- xpcs->pcs.neg_mode = true;
+static void xpcs_clear_clks(struct dw_xpcs *xpcs)
+{
+ clk_bulk_disable_unprepare(DW_XPCS_NUM_CLKS, xpcs->clks);
- if (xpcs->dev_flag != DW_DEV_TXGBE) {
- xpcs->pcs.poll = true;
+ clk_bulk_put(DW_XPCS_NUM_CLKS, xpcs->clks);
+}
- ret = xpcs_soft_reset(xpcs, compat);
- if (ret)
- goto out;
- }
+static int xpcs_init_id(struct dw_xpcs *xpcs)
+{
+ const struct dw_xpcs_info *info;
+ int i, ret;
- return xpcs;
+ info = dev_get_platdata(&xpcs->mdiodev->dev);
+ if (!info) {
+ xpcs->info.pcs = DW_XPCS_ID_NATIVE;
+ xpcs->info.pma = DW_XPCS_PMA_ID_NATIVE;
+ } else {
+ xpcs->info = *info;
}
- ret = -ENODEV;
+ ret = xpcs_get_id(xpcs);
+ if (ret < 0)
+ return ret;
-out:
- mdio_device_put(mdiodev);
- kfree(xpcs);
+ for (i = 0; i < ARRAY_SIZE(xpcs_desc_list); i++) {
+ const struct dw_xpcs_desc *desc = &xpcs_desc_list[i];
- return ERR_PTR(ret);
+ if ((xpcs->info.pcs & desc->mask) != desc->id)
+ continue;
+
+ xpcs->desc = desc;
+
+ break;
+ }
+
+ if (!xpcs->desc)
+ return -ENODEV;
+
+ return 0;
}
-void xpcs_destroy(struct dw_xpcs *xpcs)
+static int xpcs_init_iface(struct dw_xpcs *xpcs, phy_interface_t interface)
{
- if (xpcs)
- mdio_device_put(xpcs->mdiodev);
- kfree(xpcs);
+ const struct dw_xpcs_compat *compat;
+
+ compat = xpcs_find_compat(xpcs->desc, interface);
+ if (!compat)
+ return -EINVAL;
+
+ if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) {
+ xpcs->pcs.poll = false;
+ return 0;
+ }
+
+ return xpcs_soft_reset(xpcs, compat);
}
-EXPORT_SYMBOL_GPL(xpcs_destroy);
+static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev,
+ phy_interface_t interface)
+{
+ struct dw_xpcs *xpcs;
+ int ret;
+
+ xpcs = xpcs_create_data(mdiodev);
+ if (IS_ERR(xpcs))
+ return xpcs;
+
+ ret = xpcs_init_clks(xpcs);
+ if (ret)
+ goto out_free_data;
+
+ ret = xpcs_init_id(xpcs);
+ if (ret)
+ goto out_clear_clks;
+
+ ret = xpcs_init_iface(xpcs, interface);
+ if (ret)
+ goto out_clear_clks;
+
+ return xpcs;
+
+out_clear_clks:
+ xpcs_clear_clks(xpcs);
+
+out_free_data:
+ xpcs_free_data(xpcs);
+
+ return ERR_PTR(ret);
+}
+
+/**
+ * xpcs_create_mdiodev() - create a DW xPCS instance with the MDIO @addr
+ * @bus: pointer to the MDIO-bus descriptor for the device to be looked at
+ * @addr: device MDIO-bus ID
+ * @interface: requested PHY interface
+ *
+ * Return: a pointer to the DW XPCS handle if successful, otherwise -ENODEV if
+ * the PCS device couldn't be found on the bus and other negative errno related
+ * to the data allocation and MDIO-bus communications.
+ */
struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
phy_interface_t interface)
{
@@ -1455,5 +1541,54 @@ struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr,
}
EXPORT_SYMBOL_GPL(xpcs_create_mdiodev);
+/**
+ * xpcs_create_fwnode() - Create a DW xPCS instance from @fwnode
+ * @fwnode: fwnode handle poining to the DW XPCS device
+ * @interface: requested PHY interface
+ *
+ * Return: a pointer to the DW XPCS handle if successful, otherwise -ENODEV if
+ * the fwnode device is unavailable or the PCS device couldn't be found on the
+ * bus, -EPROBE_DEFER if the respective MDIO-device instance couldn't be found,
+ * other negative errno related to the data allocations and MDIO-bus
+ * communications.
+ */
+struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode,
+ phy_interface_t interface)
+{
+ struct mdio_device *mdiodev;
+ struct dw_xpcs *xpcs;
+
+ if (!fwnode_device_is_available(fwnode))
+ return ERR_PTR(-ENODEV);
+
+ mdiodev = fwnode_mdio_find_device(fwnode);
+ if (!mdiodev)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ xpcs = xpcs_create(mdiodev, interface);
+
+ /* xpcs_create() has taken a refcount on the mdiodev if it was
+ * successful. If xpcs_create() fails, this will free the mdio
+ * device here. In any case, we don't need to hold our reference
+ * anymore, and putting it here will allow mdio_device_put() in
+ * xpcs_destroy() to automatically free the mdio device.
+ */
+ mdio_device_put(mdiodev);
+
+ return xpcs;
+}
+EXPORT_SYMBOL_GPL(xpcs_create_fwnode);
+
+void xpcs_destroy(struct dw_xpcs *xpcs)
+{
+ if (!xpcs)
+ return;
+
+ xpcs_clear_clks(xpcs);
+
+ xpcs_free_data(xpcs);
+}
+EXPORT_SYMBOL_GPL(xpcs_destroy);
+
MODULE_DESCRIPTION("Synopsys DesignWare XPCS library");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h
index 96c36b32ca99..fa05adfae220 100644
--- a/drivers/net/pcs/pcs-xpcs.h
+++ b/drivers/net/pcs/pcs-xpcs.h
@@ -6,8 +6,8 @@
* Author: Jose Abreu <Jose.Abreu@synopsys.com>
*/
-#define SYNOPSYS_XPCS_ID 0x7996ced0
-#define SYNOPSYS_XPCS_MASK 0xffffffff
+#include <linux/bits.h>
+#include <linux/pcs/pcs-xpcs.h>
/* Vendor regs access */
#define DW_VENDOR BIT(15)
@@ -120,6 +120,9 @@
/* VR MII EEE Control 1 defines */
#define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */
+#define DW_XPCS_INFO_DECLARE(_name, _pcs, _pma) \
+ static const struct dw_xpcs_info _name = { .pcs = _pcs, .pma = _pma }
+
int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg);
int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val);
int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg);
diff --git a/drivers/net/pfcp.c b/drivers/net/pfcp.c
new file mode 100644
index 000000000000..69434fd13f96
--- /dev/null
+++ b/drivers/net/pfcp.c
@@ -0,0 +1,301 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * PFCP according to 3GPP TS 29.244
+ *
+ * Copyright (C) 2022, Intel Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/rculist.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/pfcp.h>
+
+struct pfcp_dev {
+ struct list_head list;
+
+ struct socket *sock;
+ struct net_device *dev;
+ struct net *net;
+
+ struct gro_cells gro_cells;
+};
+
+static unsigned int pfcp_net_id __read_mostly;
+
+struct pfcp_net {
+ struct list_head pfcp_dev_list;
+};
+
+static void
+pfcp_session_recv(struct pfcp_dev *pfcp, struct sk_buff *skb,
+ struct pfcp_metadata *md)
+{
+ struct pfcphdr_session *unparsed = pfcp_hdr_session(skb);
+
+ md->seid = unparsed->seid;
+ md->type = PFCP_TYPE_SESSION;
+}
+
+static void
+pfcp_node_recv(struct pfcp_dev *pfcp, struct sk_buff *skb,
+ struct pfcp_metadata *md)
+{
+ md->type = PFCP_TYPE_NODE;
+}
+
+static int pfcp_encap_recv(struct sock *sk, struct sk_buff *skb)
+{
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
+ struct metadata_dst *tun_dst;
+ struct pfcp_metadata *md;
+ struct pfcphdr *unparsed;
+ struct pfcp_dev *pfcp;
+
+ if (unlikely(!pskb_may_pull(skb, PFCP_HLEN)))
+ goto drop;
+
+ pfcp = rcu_dereference_sk_user_data(sk);
+ if (unlikely(!pfcp))
+ goto drop;
+
+ unparsed = pfcp_hdr(skb);
+
+ ip_tunnel_flags_zero(flags);
+ tun_dst = udp_tun_rx_dst(skb, sk->sk_family, flags, 0,
+ sizeof(*md));
+ if (unlikely(!tun_dst))
+ goto drop;
+
+ md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
+ if (unlikely(!md))
+ goto drop;
+
+ if (unparsed->flags & PFCP_SEID_FLAG)
+ pfcp_session_recv(pfcp, skb, md);
+ else
+ pfcp_node_recv(pfcp, skb, md);
+
+ __set_bit(IP_TUNNEL_PFCP_OPT_BIT, tun_dst->u.tun_info.key.tun_flags);
+ tun_dst->u.tun_info.options_len = sizeof(*md);
+
+ if (unlikely(iptunnel_pull_header(skb, PFCP_HLEN, skb->protocol,
+ !net_eq(sock_net(sk),
+ dev_net(pfcp->dev)))))
+ goto drop;
+
+ skb_dst_set(skb, (struct dst_entry *)tun_dst);
+
+ skb_reset_network_header(skb);
+ skb_reset_mac_header(skb);
+ skb->dev = pfcp->dev;
+
+ gro_cells_receive(&pfcp->gro_cells, skb);
+
+ return 0;
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static void pfcp_del_sock(struct pfcp_dev *pfcp)
+{
+ udp_tunnel_sock_release(pfcp->sock);
+ pfcp->sock = NULL;
+}
+
+static void pfcp_dev_uninit(struct net_device *dev)
+{
+ struct pfcp_dev *pfcp = netdev_priv(dev);
+
+ gro_cells_destroy(&pfcp->gro_cells);
+ pfcp_del_sock(pfcp);
+}
+
+static int pfcp_dev_init(struct net_device *dev)
+{
+ struct pfcp_dev *pfcp = netdev_priv(dev);
+
+ pfcp->dev = dev;
+
+ return gro_cells_init(&pfcp->gro_cells, dev);
+}
+
+static const struct net_device_ops pfcp_netdev_ops = {
+ .ndo_init = pfcp_dev_init,
+ .ndo_uninit = pfcp_dev_uninit,
+ .ndo_get_stats64 = dev_get_tstats64,
+};
+
+static const struct device_type pfcp_type = {
+ .name = "pfcp",
+};
+
+static void pfcp_link_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &pfcp_netdev_ops;
+ dev->needs_free_netdev = true;
+ SET_NETDEV_DEVTYPE(dev, &pfcp_type);
+
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->priv_flags |= IFF_NO_QUEUE;
+
+ netif_keep_dst(dev);
+}
+
+static struct socket *pfcp_create_sock(struct pfcp_dev *pfcp)
+{
+ struct udp_tunnel_sock_cfg tuncfg = {};
+ struct udp_port_cfg udp_conf = {
+ .local_ip.s_addr = htonl(INADDR_ANY),
+ .family = AF_INET,
+ };
+ struct net *net = pfcp->net;
+ struct socket *sock;
+ int err;
+
+ udp_conf.local_udp_port = htons(PFCP_PORT);
+
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err)
+ return ERR_PTR(err);
+
+ tuncfg.sk_user_data = pfcp;
+ tuncfg.encap_rcv = pfcp_encap_recv;
+ tuncfg.encap_type = 1;
+
+ setup_udp_tunnel_sock(net, sock, &tuncfg);
+
+ return sock;
+}
+
+static int pfcp_add_sock(struct pfcp_dev *pfcp)
+{
+ pfcp->sock = pfcp_create_sock(pfcp);
+
+ return PTR_ERR_OR_ZERO(pfcp->sock);
+}
+
+static int pfcp_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct pfcp_dev *pfcp = netdev_priv(dev);
+ struct pfcp_net *pn;
+ int err;
+
+ pfcp->net = net;
+
+ err = pfcp_add_sock(pfcp);
+ if (err) {
+ netdev_dbg(dev, "failed to add pfcp socket %d\n", err);
+ goto exit_err;
+ }
+
+ err = register_netdevice(dev);
+ if (err) {
+ netdev_dbg(dev, "failed to register pfcp netdev %d\n", err);
+ goto exit_del_pfcp_sock;
+ }
+
+ pn = net_generic(dev_net(dev), pfcp_net_id);
+ list_add_rcu(&pfcp->list, &pn->pfcp_dev_list);
+
+ netdev_dbg(dev, "registered new PFCP interface\n");
+
+ return 0;
+
+exit_del_pfcp_sock:
+ pfcp_del_sock(pfcp);
+exit_err:
+ pfcp->net = NULL;
+ return err;
+}
+
+static void pfcp_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct pfcp_dev *pfcp = netdev_priv(dev);
+
+ list_del_rcu(&pfcp->list);
+ unregister_netdevice_queue(dev, head);
+}
+
+static struct rtnl_link_ops pfcp_link_ops __read_mostly = {
+ .kind = "pfcp",
+ .priv_size = sizeof(struct pfcp_dev),
+ .setup = pfcp_link_setup,
+ .newlink = pfcp_newlink,
+ .dellink = pfcp_dellink,
+};
+
+static int __net_init pfcp_net_init(struct net *net)
+{
+ struct pfcp_net *pn = net_generic(net, pfcp_net_id);
+
+ INIT_LIST_HEAD(&pn->pfcp_dev_list);
+ return 0;
+}
+
+static void __net_exit pfcp_net_exit(struct net *net)
+{
+ struct pfcp_net *pn = net_generic(net, pfcp_net_id);
+ struct pfcp_dev *pfcp;
+ LIST_HEAD(list);
+
+ rtnl_lock();
+ list_for_each_entry(pfcp, &pn->pfcp_dev_list, list)
+ pfcp_dellink(pfcp->dev, &list);
+
+ unregister_netdevice_many(&list);
+ rtnl_unlock();
+}
+
+static struct pernet_operations pfcp_net_ops = {
+ .init = pfcp_net_init,
+ .exit = pfcp_net_exit,
+ .id = &pfcp_net_id,
+ .size = sizeof(struct pfcp_net),
+};
+
+static int __init pfcp_init(void)
+{
+ int err;
+
+ err = register_pernet_subsys(&pfcp_net_ops);
+ if (err)
+ goto exit_err;
+
+ err = rtnl_link_register(&pfcp_link_ops);
+ if (err)
+ goto exit_unregister_subsys;
+ return 0;
+
+exit_unregister_subsys:
+ unregister_pernet_subsys(&pfcp_net_ops);
+exit_err:
+ pr_err("loading PFCP module failed: err %d\n", err);
+ return err;
+}
+late_initcall(pfcp_init);
+
+static void __exit pfcp_exit(void)
+{
+ rtnl_link_unregister(&pfcp_link_ops);
+ unregister_pernet_subsys(&pfcp_net_ops);
+
+ pr_info("PFCP module unloaded\n");
+}
+module_exit(pfcp_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Wojciech Drewek <wojciech.drewek@intel.com>");
+MODULE_DESCRIPTION("Interface driver for PFCP encapsulated traffic");
+MODULE_ALIAS_RTNL_LINK("pfcp");
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 1df0595c5ba9..7fddc8306d82 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -76,6 +76,11 @@ config SFP
comment "MII PHY device drivers"
+config AIR_EN8811H_PHY
+ tristate "Airoha EN8811H 2.5 Gigabit PHY"
+ help
+ Currently supports the Airoha EN8811H PHY.
+
config AMD_PHY
tristate "AMD and Altima PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 197acfa0b412..202ed7f450da 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -34,6 +34,7 @@ obj-y += $(sfp-obj-y) $(sfp-obj-m)
obj-$(CONFIG_ADIN_PHY) += adin.o
obj-$(CONFIG_ADIN1100_PHY) += adin1100.o
+obj-$(CONFIG_AIR_EN8811H_PHY) += air_en8811h.o
obj-$(CONFIG_AMD_PHY) += amd.o
obj-$(CONFIG_AQUANTIA_PHY) += aquantia/
ifdef CONFIG_AX88796B_RUST_PHY
diff --git a/drivers/net/phy/air_en8811h.c b/drivers/net/phy/air_en8811h.c
new file mode 100644
index 000000000000..3cdc8c6b30b6
--- /dev/null
+++ b/drivers/net/phy/air_en8811h.c
@@ -0,0 +1,1090 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Driver for the Airoha EN8811H 2.5 Gigabit PHY.
+ *
+ * Limitations of the EN8811H:
+ * - Only full duplex supported
+ * - Forced speed (AN off) is not supported by hardware (100Mbps)
+ *
+ * Source originated from airoha's en8811h.c and en8811h.h v1.2.1
+ *
+ * Copyright (C) 2023 Airoha Technology Corp.
+ */
+
+#include <linux/phy.h>
+#include <linux/firmware.h>
+#include <linux/property.h>
+#include <linux/wordpart.h>
+#include <asm/unaligned.h>
+
+#define EN8811H_PHY_ID 0x03a2a411
+
+#define EN8811H_MD32_DM "airoha/EthMD32.dm.bin"
+#define EN8811H_MD32_DSP "airoha/EthMD32.DSP.bin"
+
+#define AIR_FW_ADDR_DM 0x00000000
+#define AIR_FW_ADDR_DSP 0x00100000
+
+/* MII Registers */
+#define AIR_AUX_CTRL_STATUS 0x1d
+#define AIR_AUX_CTRL_STATUS_SPEED_MASK GENMASK(4, 2)
+#define AIR_AUX_CTRL_STATUS_SPEED_100 0x4
+#define AIR_AUX_CTRL_STATUS_SPEED_1000 0x8
+#define AIR_AUX_CTRL_STATUS_SPEED_2500 0xc
+
+#define AIR_EXT_PAGE_ACCESS 0x1f
+#define AIR_PHY_PAGE_STANDARD 0x0000
+#define AIR_PHY_PAGE_EXTENDED_4 0x0004
+
+/* MII Registers Page 4*/
+#define AIR_BPBUS_MODE 0x10
+#define AIR_BPBUS_MODE_ADDR_FIXED 0x0000
+#define AIR_BPBUS_MODE_ADDR_INCR BIT(15)
+#define AIR_BPBUS_WR_ADDR_HIGH 0x11
+#define AIR_BPBUS_WR_ADDR_LOW 0x12
+#define AIR_BPBUS_WR_DATA_HIGH 0x13
+#define AIR_BPBUS_WR_DATA_LOW 0x14
+#define AIR_BPBUS_RD_ADDR_HIGH 0x15
+#define AIR_BPBUS_RD_ADDR_LOW 0x16
+#define AIR_BPBUS_RD_DATA_HIGH 0x17
+#define AIR_BPBUS_RD_DATA_LOW 0x18
+
+/* Registers on MDIO_MMD_VEND1 */
+#define EN8811H_PHY_FW_STATUS 0x8009
+#define EN8811H_PHY_READY 0x02
+
+#define AIR_PHY_MCU_CMD_1 0x800c
+#define AIR_PHY_MCU_CMD_1_MODE1 0x0
+#define AIR_PHY_MCU_CMD_2 0x800d
+#define AIR_PHY_MCU_CMD_2_MODE1 0x0
+#define AIR_PHY_MCU_CMD_3 0x800e
+#define AIR_PHY_MCU_CMD_3_MODE1 0x1101
+#define AIR_PHY_MCU_CMD_3_DOCMD 0x1100
+#define AIR_PHY_MCU_CMD_4 0x800f
+#define AIR_PHY_MCU_CMD_4_MODE1 0x0002
+#define AIR_PHY_MCU_CMD_4_INTCLR 0x00e4
+
+/* Registers on MDIO_MMD_VEND2 */
+#define AIR_PHY_LED_BCR 0x021
+#define AIR_PHY_LED_BCR_MODE_MASK GENMASK(1, 0)
+#define AIR_PHY_LED_BCR_TIME_TEST BIT(2)
+#define AIR_PHY_LED_BCR_CLK_EN BIT(3)
+#define AIR_PHY_LED_BCR_EXT_CTRL BIT(15)
+
+#define AIR_PHY_LED_DUR_ON 0x022
+
+#define AIR_PHY_LED_DUR_BLINK 0x023
+
+#define AIR_PHY_LED_ON(i) (0x024 + ((i) * 2))
+#define AIR_PHY_LED_ON_MASK (GENMASK(6, 0) | BIT(8))
+#define AIR_PHY_LED_ON_LINK1000 BIT(0)
+#define AIR_PHY_LED_ON_LINK100 BIT(1)
+#define AIR_PHY_LED_ON_LINK10 BIT(2)
+#define AIR_PHY_LED_ON_LINKDOWN BIT(3)
+#define AIR_PHY_LED_ON_FDX BIT(4) /* Full duplex */
+#define AIR_PHY_LED_ON_HDX BIT(5) /* Half duplex */
+#define AIR_PHY_LED_ON_FORCE_ON BIT(6)
+#define AIR_PHY_LED_ON_LINK2500 BIT(8)
+#define AIR_PHY_LED_ON_POLARITY BIT(14)
+#define AIR_PHY_LED_ON_ENABLE BIT(15)
+
+#define AIR_PHY_LED_BLINK(i) (0x025 + ((i) * 2))
+#define AIR_PHY_LED_BLINK_1000TX BIT(0)
+#define AIR_PHY_LED_BLINK_1000RX BIT(1)
+#define AIR_PHY_LED_BLINK_100TX BIT(2)
+#define AIR_PHY_LED_BLINK_100RX BIT(3)
+#define AIR_PHY_LED_BLINK_10TX BIT(4)
+#define AIR_PHY_LED_BLINK_10RX BIT(5)
+#define AIR_PHY_LED_BLINK_COLLISION BIT(6)
+#define AIR_PHY_LED_BLINK_RX_CRC_ERR BIT(7)
+#define AIR_PHY_LED_BLINK_RX_IDLE_ERR BIT(8)
+#define AIR_PHY_LED_BLINK_FORCE_BLINK BIT(9)
+#define AIR_PHY_LED_BLINK_2500TX BIT(10)
+#define AIR_PHY_LED_BLINK_2500RX BIT(11)
+
+/* Registers on BUCKPBUS */
+#define EN8811H_2P5G_LPA 0x3b30
+#define EN8811H_2P5G_LPA_2P5G BIT(0)
+
+#define EN8811H_FW_VERSION 0x3b3c
+
+#define EN8811H_POLARITY 0xca0f8
+#define EN8811H_POLARITY_TX_NORMAL BIT(0)
+#define EN8811H_POLARITY_RX_REVERSE BIT(1)
+
+#define EN8811H_GPIO_OUTPUT 0xcf8b8
+#define EN8811H_GPIO_OUTPUT_345 (BIT(3) | BIT(4) | BIT(5))
+
+#define EN8811H_FW_CTRL_1 0x0f0018
+#define EN8811H_FW_CTRL_1_START 0x0
+#define EN8811H_FW_CTRL_1_FINISH 0x1
+#define EN8811H_FW_CTRL_2 0x800000
+#define EN8811H_FW_CTRL_2_LOADING BIT(11)
+
+/* Led definitions */
+#define EN8811H_LED_COUNT 3
+
+/* Default LED setup:
+ * GPIO5 <-> LED0 On: Link detected, blink Rx/Tx
+ * GPIO4 <-> LED1 On: Link detected at 2500 or 1000 Mbps
+ * GPIO3 <-> LED2 On: Link detected at 2500 or 100 Mbps
+ */
+#define AIR_DEFAULT_TRIGGER_LED0 (BIT(TRIGGER_NETDEV_LINK) | \
+ BIT(TRIGGER_NETDEV_RX) | \
+ BIT(TRIGGER_NETDEV_TX))
+#define AIR_DEFAULT_TRIGGER_LED1 (BIT(TRIGGER_NETDEV_LINK_2500) | \
+ BIT(TRIGGER_NETDEV_LINK_1000))
+#define AIR_DEFAULT_TRIGGER_LED2 (BIT(TRIGGER_NETDEV_LINK_2500) | \
+ BIT(TRIGGER_NETDEV_LINK_100))
+
+struct led {
+ unsigned long rules;
+ unsigned long state;
+};
+
+struct en8811h_priv {
+ u32 firmware_version;
+ bool mcu_needs_restart;
+ struct led led[EN8811H_LED_COUNT];
+};
+
+enum {
+ AIR_PHY_LED_STATE_FORCE_ON,
+ AIR_PHY_LED_STATE_FORCE_BLINK,
+};
+
+enum {
+ AIR_PHY_LED_DUR_BLINK_32MS,
+ AIR_PHY_LED_DUR_BLINK_64MS,
+ AIR_PHY_LED_DUR_BLINK_128MS,
+ AIR_PHY_LED_DUR_BLINK_256MS,
+ AIR_PHY_LED_DUR_BLINK_512MS,
+ AIR_PHY_LED_DUR_BLINK_1024MS,
+};
+
+enum {
+ AIR_LED_DISABLE,
+ AIR_LED_ENABLE,
+};
+
+enum {
+ AIR_ACTIVE_LOW,
+ AIR_ACTIVE_HIGH,
+};
+
+enum {
+ AIR_LED_MODE_DISABLE,
+ AIR_LED_MODE_USER_DEFINE,
+};
+
+#define AIR_PHY_LED_DUR_UNIT 1024
+#define AIR_PHY_LED_DUR (AIR_PHY_LED_DUR_UNIT << AIR_PHY_LED_DUR_BLINK_64MS)
+
+static const unsigned long en8811h_led_trig = BIT(TRIGGER_NETDEV_FULL_DUPLEX) |
+ BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX);
+
+static int air_phy_read_page(struct phy_device *phydev)
+{
+ return __phy_read(phydev, AIR_EXT_PAGE_ACCESS);
+}
+
+static int air_phy_write_page(struct phy_device *phydev, int page)
+{
+ return __phy_write(phydev, AIR_EXT_PAGE_ACCESS, page);
+}
+
+static int __air_buckpbus_reg_write(struct phy_device *phydev,
+ u32 pbus_address, u32 pbus_data)
+{
+ int ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_MODE, AIR_BPBUS_MODE_ADDR_FIXED);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_HIGH,
+ upper_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_LOW,
+ lower_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_HIGH,
+ upper_16_bits(pbus_data));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_LOW,
+ lower_16_bits(pbus_data));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int air_buckpbus_reg_write(struct phy_device *phydev,
+ u32 pbus_address, u32 pbus_data)
+{
+ int saved_page;
+ int ret = 0;
+
+ saved_page = phy_select_page(phydev, AIR_PHY_PAGE_EXTENDED_4);
+
+ if (saved_page >= 0) {
+ ret = __air_buckpbus_reg_write(phydev, pbus_address,
+ pbus_data);
+ if (ret < 0)
+ phydev_err(phydev, "%s 0x%08x failed: %d\n", __func__,
+ pbus_address, ret);
+ }
+
+ return phy_restore_page(phydev, saved_page, ret);
+}
+
+static int __air_buckpbus_reg_read(struct phy_device *phydev,
+ u32 pbus_address, u32 *pbus_data)
+{
+ int pbus_data_low, pbus_data_high;
+ int ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_MODE, AIR_BPBUS_MODE_ADDR_FIXED);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_RD_ADDR_HIGH,
+ upper_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_RD_ADDR_LOW,
+ lower_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ pbus_data_high = __phy_read(phydev, AIR_BPBUS_RD_DATA_HIGH);
+ if (pbus_data_high < 0)
+ return pbus_data_high;
+
+ pbus_data_low = __phy_read(phydev, AIR_BPBUS_RD_DATA_LOW);
+ if (pbus_data_low < 0)
+ return pbus_data_low;
+
+ *pbus_data = pbus_data_low | (pbus_data_high << 16);
+ return 0;
+}
+
+static int air_buckpbus_reg_read(struct phy_device *phydev,
+ u32 pbus_address, u32 *pbus_data)
+{
+ int saved_page;
+ int ret = 0;
+
+ saved_page = phy_select_page(phydev, AIR_PHY_PAGE_EXTENDED_4);
+
+ if (saved_page >= 0) {
+ ret = __air_buckpbus_reg_read(phydev, pbus_address, pbus_data);
+ if (ret < 0)
+ phydev_err(phydev, "%s 0x%08x failed: %d\n", __func__,
+ pbus_address, ret);
+ }
+
+ return phy_restore_page(phydev, saved_page, ret);
+}
+
+static int __air_buckpbus_reg_modify(struct phy_device *phydev,
+ u32 pbus_address, u32 mask, u32 set)
+{
+ int pbus_data_low, pbus_data_high;
+ u32 pbus_data_old, pbus_data_new;
+ int ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_MODE, AIR_BPBUS_MODE_ADDR_FIXED);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_RD_ADDR_HIGH,
+ upper_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_RD_ADDR_LOW,
+ lower_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ pbus_data_high = __phy_read(phydev, AIR_BPBUS_RD_DATA_HIGH);
+ if (pbus_data_high < 0)
+ return pbus_data_high;
+
+ pbus_data_low = __phy_read(phydev, AIR_BPBUS_RD_DATA_LOW);
+ if (pbus_data_low < 0)
+ return pbus_data_low;
+
+ pbus_data_old = pbus_data_low | (pbus_data_high << 16);
+ pbus_data_new = (pbus_data_old & ~mask) | set;
+ if (pbus_data_new == pbus_data_old)
+ return 0;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_HIGH,
+ upper_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_LOW,
+ lower_16_bits(pbus_address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_HIGH,
+ upper_16_bits(pbus_data_new));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_LOW,
+ lower_16_bits(pbus_data_new));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int air_buckpbus_reg_modify(struct phy_device *phydev,
+ u32 pbus_address, u32 mask, u32 set)
+{
+ int saved_page;
+ int ret = 0;
+
+ saved_page = phy_select_page(phydev, AIR_PHY_PAGE_EXTENDED_4);
+
+ if (saved_page >= 0) {
+ ret = __air_buckpbus_reg_modify(phydev, pbus_address, mask,
+ set);
+ if (ret < 0)
+ phydev_err(phydev, "%s 0x%08x failed: %d\n", __func__,
+ pbus_address, ret);
+ }
+
+ return phy_restore_page(phydev, saved_page, ret);
+}
+
+static int __air_write_buf(struct phy_device *phydev, u32 address,
+ const struct firmware *fw)
+{
+ unsigned int offset;
+ int ret;
+ u16 val;
+
+ ret = __phy_write(phydev, AIR_BPBUS_MODE, AIR_BPBUS_MODE_ADDR_INCR);
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_HIGH,
+ upper_16_bits(address));
+ if (ret < 0)
+ return ret;
+
+ ret = __phy_write(phydev, AIR_BPBUS_WR_ADDR_LOW,
+ lower_16_bits(address));
+ if (ret < 0)
+ return ret;
+
+ for (offset = 0; offset < fw->size; offset += 4) {
+ val = get_unaligned_le16(&fw->data[offset + 2]);
+ ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_HIGH, val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_le16(&fw->data[offset]);
+ ret = __phy_write(phydev, AIR_BPBUS_WR_DATA_LOW, val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int air_write_buf(struct phy_device *phydev, u32 address,
+ const struct firmware *fw)
+{
+ int saved_page;
+ int ret = 0;
+
+ saved_page = phy_select_page(phydev, AIR_PHY_PAGE_EXTENDED_4);
+
+ if (saved_page >= 0) {
+ ret = __air_write_buf(phydev, address, fw);
+ if (ret < 0)
+ phydev_err(phydev, "%s 0x%08x failed: %d\n", __func__,
+ address, ret);
+ }
+
+ return phy_restore_page(phydev, saved_page, ret);
+}
+
+static int en8811h_wait_mcu_ready(struct phy_device *phydev)
+{
+ int ret, reg_value;
+
+ /* Because of mdio-lock, may have to wait for multiple loads */
+ ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ EN8811H_PHY_FW_STATUS, reg_value,
+ reg_value == EN8811H_PHY_READY,
+ 20000, 7500000, true);
+ if (ret) {
+ phydev_err(phydev, "MCU not ready: 0x%x\n", reg_value);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int en8811h_load_firmware(struct phy_device *phydev)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ const struct firmware *fw1, *fw2;
+ int ret;
+
+ ret = request_firmware_direct(&fw1, EN8811H_MD32_DM, dev);
+ if (ret < 0)
+ return ret;
+
+ ret = request_firmware_direct(&fw2, EN8811H_MD32_DSP, dev);
+ if (ret < 0)
+ goto en8811h_load_firmware_rel1;
+
+ ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1,
+ EN8811H_FW_CTRL_1_START);
+ if (ret < 0)
+ goto en8811h_load_firmware_out;
+
+ ret = air_buckpbus_reg_modify(phydev, EN8811H_FW_CTRL_2,
+ EN8811H_FW_CTRL_2_LOADING,
+ EN8811H_FW_CTRL_2_LOADING);
+ if (ret < 0)
+ goto en8811h_load_firmware_out;
+
+ ret = air_write_buf(phydev, AIR_FW_ADDR_DM, fw1);
+ if (ret < 0)
+ goto en8811h_load_firmware_out;
+
+ ret = air_write_buf(phydev, AIR_FW_ADDR_DSP, fw2);
+ if (ret < 0)
+ goto en8811h_load_firmware_out;
+
+ ret = air_buckpbus_reg_modify(phydev, EN8811H_FW_CTRL_2,
+ EN8811H_FW_CTRL_2_LOADING, 0);
+ if (ret < 0)
+ goto en8811h_load_firmware_out;
+
+ ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1,
+ EN8811H_FW_CTRL_1_FINISH);
+ if (ret < 0)
+ goto en8811h_load_firmware_out;
+
+ ret = en8811h_wait_mcu_ready(phydev);
+
+ air_buckpbus_reg_read(phydev, EN8811H_FW_VERSION,
+ &priv->firmware_version);
+ phydev_info(phydev, "MD32 firmware version: %08x\n",
+ priv->firmware_version);
+
+en8811h_load_firmware_out:
+ release_firmware(fw2);
+
+en8811h_load_firmware_rel1:
+ release_firmware(fw1);
+
+ if (ret < 0)
+ phydev_err(phydev, "Load firmware failed: %d\n", ret);
+
+ return ret;
+}
+
+static int en8811h_restart_mcu(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1,
+ EN8811H_FW_CTRL_1_START);
+ if (ret < 0)
+ return ret;
+
+ ret = air_buckpbus_reg_write(phydev, EN8811H_FW_CTRL_1,
+ EN8811H_FW_CTRL_1_FINISH);
+ if (ret < 0)
+ return ret;
+
+ return en8811h_wait_mcu_ready(phydev);
+}
+
+static int air_hw_led_on_set(struct phy_device *phydev, u8 index, bool on)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ bool changed;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ if (on)
+ changed = !test_and_set_bit(AIR_PHY_LED_STATE_FORCE_ON,
+ &priv->led[index].state);
+ else
+ changed = !!test_and_clear_bit(AIR_PHY_LED_STATE_FORCE_ON,
+ &priv->led[index].state);
+
+ changed |= (priv->led[index].rules != 0);
+
+ /* clear netdev trigger rules in case LED_OFF has been set */
+ if (!on)
+ priv->led[index].rules = 0;
+
+ if (changed)
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND2,
+ AIR_PHY_LED_ON(index),
+ AIR_PHY_LED_ON_MASK,
+ on ? AIR_PHY_LED_ON_FORCE_ON : 0);
+
+ return 0;
+}
+
+static int air_hw_led_blink_set(struct phy_device *phydev, u8 index,
+ bool blinking)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ bool changed;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ if (blinking)
+ changed = !test_and_set_bit(AIR_PHY_LED_STATE_FORCE_BLINK,
+ &priv->led[index].state);
+ else
+ changed = !!test_and_clear_bit(AIR_PHY_LED_STATE_FORCE_BLINK,
+ &priv->led[index].state);
+
+ changed |= (priv->led[index].rules != 0);
+
+ if (changed)
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2,
+ AIR_PHY_LED_BLINK(index),
+ blinking ?
+ AIR_PHY_LED_BLINK_FORCE_BLINK : 0);
+ else
+ return 0;
+}
+
+static int air_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ bool blinking = false;
+ int err;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ if (delay_on && delay_off && (*delay_on > 0) && (*delay_off > 0)) {
+ blinking = true;
+ *delay_on = 50;
+ *delay_off = 50;
+ }
+
+ err = air_hw_led_blink_set(phydev, index, blinking);
+ if (err)
+ return err;
+
+ /* led-blink set, so switch led-on off */
+ err = air_hw_led_on_set(phydev, index, false);
+ if (err)
+ return err;
+
+ /* hw-control is off*/
+ if (!!test_bit(AIR_PHY_LED_STATE_FORCE_BLINK, &priv->led[index].state))
+ priv->led[index].rules = 0;
+
+ return 0;
+}
+
+static int air_led_brightness_set(struct phy_device *phydev, u8 index,
+ enum led_brightness value)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ int err;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ /* led-on set, so switch led-blink off */
+ err = air_hw_led_blink_set(phydev, index, false);
+ if (err)
+ return err;
+
+ err = air_hw_led_on_set(phydev, index, (value != LED_OFF));
+ if (err)
+ return err;
+
+ /* hw-control is off */
+ if (!!test_bit(AIR_PHY_LED_STATE_FORCE_ON, &priv->led[index].state))
+ priv->led[index].rules = 0;
+
+ return 0;
+}
+
+static int air_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ struct en8811h_priv *priv = phydev->priv;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ *rules = priv->led[index].rules;
+
+ return 0;
+};
+
+static int air_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ u16 on = 0, blink = 0;
+ int ret;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ priv->led[index].rules = rules;
+
+ if (rules & BIT(TRIGGER_NETDEV_FULL_DUPLEX))
+ on |= AIR_PHY_LED_ON_FDX;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_10) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= AIR_PHY_LED_ON_LINK10;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= AIR_PHY_LED_ON_LINK100;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= AIR_PHY_LED_ON_LINK1000;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK)))
+ on |= AIR_PHY_LED_ON_LINK2500;
+
+ if (rules & BIT(TRIGGER_NETDEV_RX)) {
+ blink |= AIR_PHY_LED_BLINK_10RX |
+ AIR_PHY_LED_BLINK_100RX |
+ AIR_PHY_LED_BLINK_1000RX |
+ AIR_PHY_LED_BLINK_2500RX;
+ }
+
+ if (rules & BIT(TRIGGER_NETDEV_TX)) {
+ blink |= AIR_PHY_LED_BLINK_10TX |
+ AIR_PHY_LED_BLINK_100TX |
+ AIR_PHY_LED_BLINK_1000TX |
+ AIR_PHY_LED_BLINK_2500TX;
+ }
+
+ if (blink || on) {
+ /* switch hw-control on, so led-on and led-blink are off */
+ clear_bit(AIR_PHY_LED_STATE_FORCE_ON,
+ &priv->led[index].state);
+ clear_bit(AIR_PHY_LED_STATE_FORCE_BLINK,
+ &priv->led[index].state);
+ } else {
+ priv->led[index].rules = 0;
+ }
+
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_ON(index),
+ AIR_PHY_LED_ON_MASK, on);
+
+ if (ret < 0)
+ return ret;
+
+ return phy_write_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_BLINK(index),
+ blink);
+};
+
+static int air_led_init(struct phy_device *phydev, u8 index, u8 state, u8 pol)
+{
+ int val = 0;
+ int err;
+
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ if (state == AIR_LED_ENABLE)
+ val |= AIR_PHY_LED_ON_ENABLE;
+ else
+ val &= ~AIR_PHY_LED_ON_ENABLE;
+
+ if (pol == AIR_ACTIVE_HIGH)
+ val |= AIR_PHY_LED_ON_POLARITY;
+ else
+ val &= ~AIR_PHY_LED_ON_POLARITY;
+
+ err = phy_modify_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_ON(index),
+ AIR_PHY_LED_ON_ENABLE |
+ AIR_PHY_LED_ON_POLARITY, val);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int air_leds_init(struct phy_device *phydev, int num, int dur, int mode)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ int ret, i;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_DUR_BLINK,
+ dur);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_DUR_ON,
+ dur >> 1);
+ if (ret < 0)
+ return ret;
+
+ switch (mode) {
+ case AIR_LED_MODE_DISABLE:
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_BCR,
+ AIR_PHY_LED_BCR_EXT_CTRL |
+ AIR_PHY_LED_BCR_MODE_MASK, 0);
+ if (ret < 0)
+ return ret;
+ break;
+ case AIR_LED_MODE_USER_DEFINE:
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, AIR_PHY_LED_BCR,
+ AIR_PHY_LED_BCR_EXT_CTRL |
+ AIR_PHY_LED_BCR_CLK_EN,
+ AIR_PHY_LED_BCR_EXT_CTRL |
+ AIR_PHY_LED_BCR_CLK_EN);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ phydev_err(phydev, "LED mode %d is not supported\n", mode);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num; ++i) {
+ ret = air_led_init(phydev, i, AIR_LED_ENABLE, AIR_ACTIVE_HIGH);
+ if (ret < 0) {
+ phydev_err(phydev, "LED%d init failed: %d\n", i, ret);
+ return ret;
+ }
+ air_led_hw_control_set(phydev, i, priv->led[i].rules);
+ }
+
+ return 0;
+}
+
+static int en8811h_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ if (index >= EN8811H_LED_COUNT)
+ return -EINVAL;
+
+ /* All combinations of the supported triggers are allowed */
+ if (rules & ~en8811h_led_trig)
+ return -EOPNOTSUPP;
+
+ return 0;
+};
+
+static int en8811h_probe(struct phy_device *phydev)
+{
+ struct en8811h_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(&phydev->mdio.dev, sizeof(struct en8811h_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ phydev->priv = priv;
+
+ ret = en8811h_load_firmware(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* mcu has just restarted after firmware load */
+ priv->mcu_needs_restart = false;
+
+ priv->led[0].rules = AIR_DEFAULT_TRIGGER_LED0;
+ priv->led[1].rules = AIR_DEFAULT_TRIGGER_LED1;
+ priv->led[2].rules = AIR_DEFAULT_TRIGGER_LED2;
+
+ /* MDIO_DEVS1/2 empty, so set mmds_present bits here */
+ phydev->c45_ids.mmds_present |= MDIO_DEVS_PMAPMD | MDIO_DEVS_AN;
+
+ ret = air_leds_init(phydev, EN8811H_LED_COUNT, AIR_PHY_LED_DUR,
+ AIR_LED_MODE_DISABLE);
+ if (ret < 0) {
+ phydev_err(phydev, "Failed to disable leds: %d\n", ret);
+ return ret;
+ }
+
+ /* Configure led gpio pins as output */
+ ret = air_buckpbus_reg_modify(phydev, EN8811H_GPIO_OUTPUT,
+ EN8811H_GPIO_OUTPUT_345,
+ EN8811H_GPIO_OUTPUT_345);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int en8811h_config_init(struct phy_device *phydev)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ u32 pbus_value;
+ int ret;
+
+ /* If restart happened in .probe(), no need to restart now */
+ if (priv->mcu_needs_restart) {
+ ret = en8811h_restart_mcu(phydev);
+ if (ret < 0)
+ return ret;
+ } else {
+ /* Next calls to .config_init() mcu needs to restart */
+ priv->mcu_needs_restart = true;
+ }
+
+ /* Select mode 1, the only mode supported.
+ * Configures the SerDes for 2500Base-X with rate adaptation
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_1,
+ AIR_PHY_MCU_CMD_1_MODE1);
+ if (ret < 0)
+ return ret;
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_2,
+ AIR_PHY_MCU_CMD_2_MODE1);
+ if (ret < 0)
+ return ret;
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_3,
+ AIR_PHY_MCU_CMD_3_MODE1);
+ if (ret < 0)
+ return ret;
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_4,
+ AIR_PHY_MCU_CMD_4_MODE1);
+ if (ret < 0)
+ return ret;
+
+ /* Serdes polarity */
+ pbus_value = 0;
+ if (device_property_read_bool(dev, "airoha,pnswap-rx"))
+ pbus_value |= EN8811H_POLARITY_RX_REVERSE;
+ else
+ pbus_value &= ~EN8811H_POLARITY_RX_REVERSE;
+ if (device_property_read_bool(dev, "airoha,pnswap-tx"))
+ pbus_value &= ~EN8811H_POLARITY_TX_NORMAL;
+ else
+ pbus_value |= EN8811H_POLARITY_TX_NORMAL;
+ ret = air_buckpbus_reg_modify(phydev, EN8811H_POLARITY,
+ EN8811H_POLARITY_RX_REVERSE |
+ EN8811H_POLARITY_TX_NORMAL, pbus_value);
+ if (ret < 0)
+ return ret;
+
+ ret = air_leds_init(phydev, EN8811H_LED_COUNT, AIR_PHY_LED_DUR,
+ AIR_LED_MODE_USER_DEFINE);
+ if (ret < 0) {
+ phydev_err(phydev, "Failed to initialize leds: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int en8811h_get_features(struct phy_device *phydev)
+{
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ phydev->supported);
+
+ return genphy_c45_pma_read_abilities(phydev);
+}
+
+static int en8811h_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ return RATE_MATCH_PAUSE;
+}
+
+static int en8811h_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ int ret;
+ u32 adv;
+
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ phydev_warn(phydev, "Disabling autoneg is not supported\n");
+ return -EINVAL;
+ }
+
+ adv = linkmode_adv_to_mii_10gbt_adv_t(phydev->advertising);
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G, adv);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return __genphy_config_aneg(phydev, changed);
+}
+
+static int en8811h_read_status(struct phy_device *phydev)
+{
+ struct en8811h_priv *priv = phydev->priv;
+ u32 pbus_value;
+ int ret, val;
+
+ ret = genphy_update_link(phydev);
+ if (ret)
+ return ret;
+
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNSUPPORTED;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNSUPPORTED;
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+ phydev->rate_matching = RATE_MATCH_PAUSE;
+
+ ret = genphy_read_master_slave(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_read_lpa(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Get link partner 2.5GBASE-T ability from vendor register */
+ ret = air_buckpbus_reg_read(phydev, EN8811H_2P5G_LPA, &pbus_value);
+ if (ret < 0)
+ return ret;
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->lp_advertising,
+ pbus_value & EN8811H_2P5G_LPA_2P5G);
+
+ if (phydev->autoneg_complete)
+ phy_resolve_aneg_pause(phydev);
+
+ if (!phydev->link)
+ return 0;
+
+ /* Get real speed from vendor register */
+ val = phy_read(phydev, AIR_AUX_CTRL_STATUS);
+ if (val < 0)
+ return val;
+ switch (val & AIR_AUX_CTRL_STATUS_SPEED_MASK) {
+ case AIR_AUX_CTRL_STATUS_SPEED_2500:
+ phydev->speed = SPEED_2500;
+ break;
+ case AIR_AUX_CTRL_STATUS_SPEED_1000:
+ phydev->speed = SPEED_1000;
+ break;
+ case AIR_AUX_CTRL_STATUS_SPEED_100:
+ phydev->speed = SPEED_100;
+ break;
+ }
+
+ /* Firmware before version 24011202 has no vendor register 2P5G_LPA.
+ * Assume link partner advertised it if connected at 2500Mbps.
+ */
+ if (priv->firmware_version < 0x24011202) {
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ phydev->lp_advertising,
+ phydev->speed == SPEED_2500);
+ }
+
+ /* Only supports full duplex */
+ phydev->duplex = DUPLEX_FULL;
+
+ return 0;
+}
+
+static int en8811h_clear_intr(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_3,
+ AIR_PHY_MCU_CMD_3_DOCMD);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, AIR_PHY_MCU_CMD_4,
+ AIR_PHY_MCU_CMD_4_INTCLR);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static irqreturn_t en8811h_handle_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = en8811h_clear_intr(phydev);
+ if (ret < 0) {
+ phy_error(phydev);
+ return IRQ_NONE;
+ }
+
+ phy_trigger_machine(phydev);
+
+ return IRQ_HANDLED;
+}
+
+static struct phy_driver en8811h_driver[] = {
+{
+ PHY_ID_MATCH_MODEL(EN8811H_PHY_ID),
+ .name = "Airoha EN8811H",
+ .probe = en8811h_probe,
+ .get_features = en8811h_get_features,
+ .config_init = en8811h_config_init,
+ .get_rate_matching = en8811h_get_rate_matching,
+ .config_aneg = en8811h_config_aneg,
+ .read_status = en8811h_read_status,
+ .config_intr = en8811h_clear_intr,
+ .handle_interrupt = en8811h_handle_interrupt,
+ .led_hw_is_supported = en8811h_led_hw_is_supported,
+ .read_page = air_phy_read_page,
+ .write_page = air_phy_write_page,
+ .led_blink_set = air_led_blink_set,
+ .led_brightness_set = air_led_brightness_set,
+ .led_hw_control_set = air_led_hw_control_set,
+ .led_hw_control_get = air_led_hw_control_get,
+} };
+
+module_phy_driver(en8811h_driver);
+
+static struct mdio_device_id __maybe_unused en8811h_tbl[] = {
+ { PHY_ID_MATCH_MODEL(EN8811H_PHY_ID) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, en8811h_tbl);
+MODULE_FIRMWARE(EN8811H_MD32_DM);
+MODULE_FIRMWARE(EN8811H_MD32_DSP);
+
+MODULE_DESCRIPTION("Airoha EN8811H PHY drivers");
+MODULE_AUTHOR("Airoha");
+MODULE_AUTHOR("Eric Woudstra <ericwouds@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/aquantia/Makefile b/drivers/net/phy/aquantia/Makefile
index aa77fb63c8ec..c6c4d494ee2a 100644
--- a/drivers/net/phy/aquantia/Makefile
+++ b/drivers/net/phy/aquantia/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-aquantia-objs += aquantia_main.o aquantia_firmware.o
+aquantia-objs += aquantia_main.o aquantia_firmware.o aquantia_leds.o
ifdef CONFIG_HWMON
aquantia-objs += aquantia_hwmon.o
endif
diff --git a/drivers/net/phy/aquantia/aquantia.h b/drivers/net/phy/aquantia/aquantia.h
index 1c19ae74ad2b..2465345081f8 100644
--- a/drivers/net/phy/aquantia/aquantia.h
+++ b/drivers/net/phy/aquantia/aquantia.h
@@ -6,6 +6,9 @@
* Author: Heiner Kallweit <hkallweit1@gmail.com>
*/
+#ifndef AQUANTIA_H
+#define AQUANTIA_H
+
#include <linux/device.h>
#include <linux/phy.h>
@@ -63,6 +66,28 @@
#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD BIT(6)
#define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL BIT(0)
+#define VEND1_GLOBAL_LED_PROV 0xc430
+#define AQR_LED_PROV(x) (VEND1_GLOBAL_LED_PROV + (x))
+#define VEND1_GLOBAL_LED_PROV_LINK2500 BIT(14)
+#define VEND1_GLOBAL_LED_PROV_LINK5000 BIT(15)
+#define VEND1_GLOBAL_LED_PROV_FORCE_ON BIT(8)
+#define VEND1_GLOBAL_LED_PROV_LINK10000 BIT(7)
+#define VEND1_GLOBAL_LED_PROV_LINK1000 BIT(6)
+#define VEND1_GLOBAL_LED_PROV_LINK100 BIT(5)
+#define VEND1_GLOBAL_LED_PROV_RX_ACT BIT(3)
+#define VEND1_GLOBAL_LED_PROV_TX_ACT BIT(2)
+#define VEND1_GLOBAL_LED_PROV_ACT_STRETCH GENMASK(0, 1)
+
+#define VEND1_GLOBAL_LED_PROV_LINK_MASK (VEND1_GLOBAL_LED_PROV_LINK100 | \
+ VEND1_GLOBAL_LED_PROV_LINK1000 | \
+ VEND1_GLOBAL_LED_PROV_LINK10000 | \
+ VEND1_GLOBAL_LED_PROV_LINK5000 | \
+ VEND1_GLOBAL_LED_PROV_LINK2500)
+
+#define VEND1_GLOBAL_LED_DRIVE 0xc438
+#define VEND1_GLOBAL_LED_DRIVE_VDD BIT(1)
+#define AQR_LED_DRIVE(x) (VEND1_GLOBAL_LED_DRIVE + (x))
+
#define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL 0xc421
#define VEND1_THERMAL_PROV_LOW_TEMP_FAIL 0xc422
#define VEND1_THERMAL_PROV_HIGH_TEMP_WARN 0xc423
@@ -87,6 +112,18 @@
#define VEND1_GLOBAL_RSVD_STAT9_MODE GENMASK(7, 0)
#define VEND1_GLOBAL_RSVD_STAT9_1000BT2 0x23
+/* MDIO_MMD_C22EXT */
+#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES 0xd292
+#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES 0xd294
+#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER 0xd297
+#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES 0xd313
+#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES 0xd315
+#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER 0xd317
+#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS 0xd318
+#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS 0xd319
+#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR 0xd31a
+#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES 0xd31b
+
#define VEND1_GLOBAL_INT_STD_STATUS 0xfc00
#define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01
@@ -113,6 +150,35 @@
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1)
#define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0)
+#define AQR_MAX_LEDS 3
+
+struct aqr107_hw_stat {
+ const char *name;
+ int reg;
+ int size;
+};
+
+#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s }
+static const struct aqr107_hw_stat aqr107_hw_stats[] = {
+ SGMII_STAT("sgmii_rx_good_frames", RX_GOOD_FRAMES, 26),
+ SGMII_STAT("sgmii_rx_bad_frames", RX_BAD_FRAMES, 26),
+ SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER, 8),
+ SGMII_STAT("sgmii_tx_good_frames", TX_GOOD_FRAMES, 26),
+ SGMII_STAT("sgmii_tx_bad_frames", TX_BAD_FRAMES, 26),
+ SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER, 8),
+ SGMII_STAT("sgmii_tx_collisions", TX_COLLISIONS, 8),
+ SGMII_STAT("sgmii_tx_line_collisions", TX_LINE_COLLISIONS, 8),
+ SGMII_STAT("sgmii_tx_frame_alignment_err", TX_FRAME_ALIGN_ERR, 16),
+ SGMII_STAT("sgmii_tx_runt_frames", TX_RUNT_FRAMES, 22),
+};
+
+#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
+
+struct aqr107_priv {
+ u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
+ unsigned long leds_active_low;
+};
+
#if IS_REACHABLE(CONFIG_HWMON)
int aqr_hwmon_probe(struct phy_device *phydev);
#else
@@ -120,3 +186,21 @@ static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; }
#endif
int aqr_firmware_load(struct phy_device *phydev);
+
+int aqr_phy_led_blink_set(struct phy_device *phydev, u8 index,
+ unsigned long *delay_on,
+ unsigned long *delay_off);
+int aqr_phy_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value);
+int aqr_phy_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules);
+int aqr_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules);
+int aqr_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules);
+int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable);
+int aqr_phy_led_polarity_set(struct phy_device *phydev, int index,
+ unsigned long modes);
+int aqr_wait_reset_complete(struct phy_device *phydev);
+
+#endif /* AQUANTIA_H */
diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c
index 0c9640ef153b..524627a36c6f 100644
--- a/drivers/net/phy/aquantia/aquantia_firmware.c
+++ b/drivers/net/phy/aquantia/aquantia_firmware.c
@@ -353,6 +353,10 @@ int aqr_firmware_load(struct phy_device *phydev)
{
int ret;
+ ret = aqr_wait_reset_complete(phydev);
+ if (ret)
+ return ret;
+
/* Check if the firmware is not already loaded by pooling
* the current version returned by the PHY. If 0 is returned,
* no firmware is loaded.
diff --git a/drivers/net/phy/aquantia/aquantia_leds.c b/drivers/net/phy/aquantia/aquantia_leds.c
new file mode 100644
index 000000000000..0516ac02c3f8
--- /dev/null
+++ b/drivers/net/phy/aquantia/aquantia_leds.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/* LED driver for Aquantia PHY
+ *
+ * Author: Daniel Golle <daniel@makrotopia.org>
+ */
+
+#include <linux/phy.h>
+
+#include "aquantia.h"
+
+int aqr_phy_led_brightness_set(struct phy_device *phydev,
+ u8 index, enum led_brightness value)
+{
+ if (index >= AQR_MAX_LEDS)
+ return -EINVAL;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_PROV(index),
+ VEND1_GLOBAL_LED_PROV_LINK_MASK |
+ VEND1_GLOBAL_LED_PROV_FORCE_ON |
+ VEND1_GLOBAL_LED_PROV_RX_ACT |
+ VEND1_GLOBAL_LED_PROV_TX_ACT,
+ value ? VEND1_GLOBAL_LED_PROV_FORCE_ON : 0);
+}
+
+static const unsigned long supported_triggers = (BIT(TRIGGER_NETDEV_LINK) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_LINK_2500) |
+ BIT(TRIGGER_NETDEV_LINK_5000) |
+ BIT(TRIGGER_NETDEV_LINK_10000) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX));
+
+int aqr_phy_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ if (index >= AQR_MAX_LEDS)
+ return -EINVAL;
+
+ /* All combinations of the supported triggers are allowed */
+ if (rules & ~supported_triggers)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+int aqr_phy_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ if (index >= AQR_MAX_LEDS)
+ return -EINVAL;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_PROV(index));
+ if (val < 0)
+ return val;
+
+ *rules = 0;
+ if (val & VEND1_GLOBAL_LED_PROV_LINK100)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_100);
+
+ if (val & VEND1_GLOBAL_LED_PROV_LINK1000)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_1000);
+
+ if (val & VEND1_GLOBAL_LED_PROV_LINK2500)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_2500);
+
+ if (val & VEND1_GLOBAL_LED_PROV_LINK5000)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_5000);
+
+ if (val & VEND1_GLOBAL_LED_PROV_LINK10000)
+ *rules |= BIT(TRIGGER_NETDEV_LINK_10000);
+
+ if (val & VEND1_GLOBAL_LED_PROV_RX_ACT)
+ *rules |= BIT(TRIGGER_NETDEV_RX);
+
+ if (val & VEND1_GLOBAL_LED_PROV_TX_ACT)
+ *rules |= BIT(TRIGGER_NETDEV_TX);
+
+ return 0;
+}
+
+int aqr_phy_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ u16 val = 0;
+
+ if (index >= AQR_MAX_LEDS)
+ return -EINVAL;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK)))
+ val |= VEND1_GLOBAL_LED_PROV_LINK100;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK)))
+ val |= VEND1_GLOBAL_LED_PROV_LINK1000;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK)))
+ val |= VEND1_GLOBAL_LED_PROV_LINK2500;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_5000) | BIT(TRIGGER_NETDEV_LINK)))
+ val |= VEND1_GLOBAL_LED_PROV_LINK5000;
+
+ if (rules & (BIT(TRIGGER_NETDEV_LINK_10000) | BIT(TRIGGER_NETDEV_LINK)))
+ val |= VEND1_GLOBAL_LED_PROV_LINK10000;
+
+ if (rules & BIT(TRIGGER_NETDEV_RX))
+ val |= VEND1_GLOBAL_LED_PROV_RX_ACT;
+
+ if (rules & BIT(TRIGGER_NETDEV_TX))
+ val |= VEND1_GLOBAL_LED_PROV_TX_ACT;
+
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_PROV(index),
+ VEND1_GLOBAL_LED_PROV_LINK_MASK |
+ VEND1_GLOBAL_LED_PROV_FORCE_ON |
+ VEND1_GLOBAL_LED_PROV_RX_ACT |
+ VEND1_GLOBAL_LED_PROV_TX_ACT, val);
+}
+
+int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable)
+{
+ return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_DRIVE(index),
+ VEND1_GLOBAL_LED_DRIVE_VDD, enable);
+}
+
+int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long modes)
+{
+ struct aqr107_priv *priv = phydev->priv;
+ bool active_low = false;
+ u32 mode;
+
+ if (index >= AQR_MAX_LEDS)
+ return -EINVAL;
+
+ for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) {
+ switch (mode) {
+ case PHY_LED_ACTIVE_LOW:
+ active_low = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ /* Save LED driver vdd state to restore on SW reset */
+ if (active_low)
+ priv->leds_active_low |= BIT(index);
+
+ return aqr_phy_led_active_low_set(phydev, index, active_low);
+}
diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c
index 71bfddb8f453..e982e9ce44a5 100644
--- a/drivers/net/phy/aquantia/aquantia_main.c
+++ b/drivers/net/phy/aquantia/aquantia_main.c
@@ -28,6 +28,8 @@
#define PHY_ID_AQR412 0x03a1b712
#define PHY_ID_AQR113 0x31c31c40
#define PHY_ID_AQR113C 0x31c31c12
+#define PHY_ID_AQR114C 0x31c31c22
+#define PHY_ID_AQR115C 0x31c31c33
#define PHY_ID_AQR813 0x31c31cb2
#define MDIO_PHYXS_VEND_IF_STATUS 0xe812
@@ -83,49 +85,12 @@
#define MDIO_AN_RX_VEND_STAT3 0xe832
#define MDIO_AN_RX_VEND_STAT3_AFR BIT(0)
-/* MDIO_MMD_C22EXT */
-#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES 0xd292
-#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES 0xd294
-#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER 0xd297
-#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES 0xd313
-#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES 0xd315
-#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER 0xd317
-#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS 0xd318
-#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS 0xd319
-#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR 0xd31a
-#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES 0xd31b
-
/* Sleep and timeout for checking if the Processor-Intensive
* MDIO operation is finished
*/
#define AQR107_OP_IN_PROG_SLEEP 1000
#define AQR107_OP_IN_PROG_TIMEOUT 100000
-struct aqr107_hw_stat {
- const char *name;
- int reg;
- int size;
-};
-
-#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s }
-static const struct aqr107_hw_stat aqr107_hw_stats[] = {
- SGMII_STAT("sgmii_rx_good_frames", RX_GOOD_FRAMES, 26),
- SGMII_STAT("sgmii_rx_bad_frames", RX_BAD_FRAMES, 26),
- SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER, 8),
- SGMII_STAT("sgmii_tx_good_frames", TX_GOOD_FRAMES, 26),
- SGMII_STAT("sgmii_tx_bad_frames", TX_BAD_FRAMES, 26),
- SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER, 8),
- SGMII_STAT("sgmii_tx_collisions", TX_COLLISIONS, 8),
- SGMII_STAT("sgmii_tx_line_collisions", TX_LINE_COLLISIONS, 8),
- SGMII_STAT("sgmii_tx_frame_alignment_err", TX_FRAME_ALIGN_ERR, 16),
- SGMII_STAT("sgmii_tx_runt_frames", TX_RUNT_FRAMES, 22),
-};
-#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats)
-
-struct aqr107_priv {
- u64 sgmii_stats[AQR107_SGMII_STAT_SZ];
-};
-
static int aqr107_get_sset_count(struct phy_device *phydev)
{
return AQR107_SGMII_STAT_SZ;
@@ -477,7 +442,7 @@ static int aqr107_set_tunable(struct phy_device *phydev,
* The chip also provides a "reset completed" bit, but it's cleared after
* read. Therefore function would time out if called again.
*/
-static int aqr107_wait_reset_complete(struct phy_device *phydev)
+int aqr_wait_reset_complete(struct phy_device *phydev)
{
int val;
@@ -511,7 +476,9 @@ static void aqr107_chip_info(struct phy_device *phydev)
static int aqr107_config_init(struct phy_device *phydev)
{
- int ret;
+ struct aqr107_priv *priv = phydev->priv;
+ u32 led_active_low;
+ int ret, index = 0;
/* Check that the PHY interface type is compatible */
if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
@@ -528,11 +495,23 @@ static int aqr107_config_init(struct phy_device *phydev)
WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII,
"Your devicetree is out of date, please update it. The AQR107 family doesn't support XGMII, maybe you mean USXGMII.\n");
- ret = aqr107_wait_reset_complete(phydev);
+ ret = aqr_wait_reset_complete(phydev);
if (!ret)
aqr107_chip_info(phydev);
- return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+ ret = aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+ if (ret)
+ return ret;
+
+ /* Restore LED polarity state after reset */
+ for_each_set_bit(led_active_low, &priv->leds_active_low, AQR_MAX_LEDS) {
+ ret = aqr_phy_led_active_low_set(phydev, index, led_active_low);
+ if (ret)
+ return ret;
+ index++;
+ }
+
+ return 0;
}
static int aqcs109_config_init(struct phy_device *phydev)
@@ -544,7 +523,7 @@ static int aqcs109_config_init(struct phy_device *phydev)
phydev->interface != PHY_INTERFACE_MODE_2500BASEX)
return -ENODEV;
- ret = aqr107_wait_reset_complete(phydev);
+ ret = aqr_wait_reset_complete(phydev);
if (!ret)
aqr107_chip_info(phydev);
@@ -723,6 +702,25 @@ static int aqr107_fill_interface_modes(struct phy_device *phydev)
return 0;
}
+static int aqr113c_fill_interface_modes(struct phy_device *phydev)
+{
+ int val, ret;
+
+ /* It's been observed on some models that - when coming out of suspend
+ * - the FW signals that the PHY is ready but the GLOBAL_CFG registers
+ * continue on returning zeroes for some time. Let's poll the 100M
+ * register until it returns a real value as both 113c and 115c support
+ * this mode.
+ */
+ ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1,
+ VEND1_GLOBAL_CFG_100M, val, val != 0,
+ 1000, 100000, false);
+ if (ret)
+ return ret;
+
+ return aqr107_fill_interface_modes(phydev);
+}
+
static int aqr113c_config_init(struct phy_device *phydev)
{
int ret;
@@ -740,7 +738,7 @@ static int aqr113c_config_init(struct phy_device *phydev)
if (ret)
return ret;
- return aqr107_fill_interface_modes(phydev);
+ return aqr113c_fill_interface_modes(phydev);
}
static int aqr107_probe(struct phy_device *phydev)
@@ -822,6 +820,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
@@ -841,6 +844,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR111),
@@ -860,6 +868,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0),
@@ -879,6 +892,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR405),
@@ -905,6 +923,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR412),
@@ -942,6 +965,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR113C),
@@ -961,6 +989,59 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR114C),
+ .name = "Aquantia AQR114C",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr111_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_AQR115C),
+ .name = "Aquantia AQR115C",
+ .probe = aqr107_probe,
+ .get_rate_matching = aqr107_get_rate_matching,
+ .config_init = aqr113c_config_init,
+ .config_aneg = aqr_config_aneg,
+ .config_intr = aqr_config_intr,
+ .handle_interrupt = aqr_handle_interrupt,
+ .read_status = aqr107_read_status,
+ .get_tunable = aqr107_get_tunable,
+ .set_tunable = aqr107_set_tunable,
+ .suspend = aqr107_suspend,
+ .resume = aqr107_resume,
+ .get_sset_count = aqr107_get_sset_count,
+ .get_strings = aqr107_get_strings,
+ .get_stats = aqr107_get_stats,
+ .link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
{
PHY_ID_MATCH_MODEL(PHY_ID_AQR813),
@@ -980,6 +1061,11 @@ static struct phy_driver aqr_driver[] = {
.get_strings = aqr107_get_strings,
.get_stats = aqr107_get_stats,
.link_change_notify = aqr107_link_change_notify,
+ .led_brightness_set = aqr_phy_led_brightness_set,
+ .led_hw_is_supported = aqr_phy_led_hw_is_supported,
+ .led_hw_control_set = aqr_phy_led_hw_control_set,
+ .led_hw_control_get = aqr_phy_led_hw_control_get,
+ .led_polarity_set = aqr_phy_led_polarity_set,
},
};
@@ -999,6 +1085,8 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = {
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR412) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR114C) },
+ { PHY_ID_MATCH_MODEL(PHY_ID_AQR115C) },
{ PHY_ID_MATCH_MODEL(PHY_ID_AQR813) },
{ }
};
diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
index 876f28fd8256..6c52f7dda514 100644
--- a/drivers/net/phy/bcm-phy-lib.c
+++ b/drivers/net/phy/bcm-phy-lib.c
@@ -794,6 +794,49 @@ out:
return ret;
}
+static int bcm_setup_lre_forced(struct phy_device *phydev)
+{
+ u16 ctl = 0;
+
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ if (phydev->speed == SPEED_100)
+ ctl |= LRECR_SPEED100;
+
+ if (phydev->duplex != DUPLEX_FULL)
+ return -EOPNOTSUPP;
+
+ return phy_modify(phydev, MII_BCM54XX_LRECR, LRECR_SPEED100, ctl);
+}
+
+/**
+ * bcm_linkmode_adv_to_lre_adv_t - translate linkmode advertisement to LDS
+ * @advertising: the linkmode advertisement settings
+ * Return: LDS Auto-Negotiation Advertised Ability register value
+ *
+ * A small helper function that translates linkmode advertisement
+ * settings to phy LDS autonegotiation advertisements for the
+ * MII_BCM54XX_LREANAA register of Broadcom PHYs capable of LDS
+ */
+static u32 bcm_linkmode_adv_to_lre_adv_t(unsigned long *advertising)
+{
+ u32 result = 0;
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT,
+ advertising))
+ result |= LREANAA_10_1PAIR;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ advertising))
+ result |= LREANAA_100_1PAIR;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising))
+ result |= LRELPA_PAUSE;
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising))
+ result |= LRELPA_PAUSE_ASYM;
+
+ return result;
+}
+
int bcm_phy_cable_test_start(struct phy_device *phydev)
{
return _bcm_phy_cable_test_start(phydev, false);
@@ -1066,6 +1109,78 @@ int bcm_phy_led_brightness_set(struct phy_device *phydev,
}
EXPORT_SYMBOL_GPL(bcm_phy_led_brightness_set);
+int bcm_setup_lre_master_slave(struct phy_device *phydev)
+{
+ u16 ctl = 0;
+
+ switch (phydev->master_slave_set) {
+ case MASTER_SLAVE_CFG_MASTER_PREFERRED:
+ case MASTER_SLAVE_CFG_MASTER_FORCE:
+ ctl = LRECR_MASTER;
+ break;
+ case MASTER_SLAVE_CFG_SLAVE_PREFERRED:
+ case MASTER_SLAVE_CFG_SLAVE_FORCE:
+ break;
+ case MASTER_SLAVE_CFG_UNKNOWN:
+ case MASTER_SLAVE_CFG_UNSUPPORTED:
+ return 0;
+ default:
+ phydev_warn(phydev, "Unsupported Master/Slave mode\n");
+ return -EOPNOTSUPP;
+ }
+
+ return phy_modify_changed(phydev, MII_BCM54XX_LRECR, LRECR_MASTER, ctl);
+}
+EXPORT_SYMBOL_GPL(bcm_setup_lre_master_slave);
+
+int bcm_config_lre_aneg(struct phy_device *phydev, bool changed)
+{
+ int err;
+
+ if (genphy_config_eee_advert(phydev))
+ changed = true;
+
+ err = bcm_setup_lre_master_slave(phydev);
+ if (err < 0)
+ return err;
+ else if (err)
+ changed = true;
+
+ if (phydev->autoneg != AUTONEG_ENABLE)
+ return bcm_setup_lre_forced(phydev);
+
+ err = bcm_config_lre_advert(phydev);
+ if (err < 0)
+ return err;
+ else if (err)
+ changed = true;
+
+ return genphy_check_and_restart_aneg(phydev, changed);
+}
+EXPORT_SYMBOL_GPL(bcm_config_lre_aneg);
+
+/**
+ * bcm_config_lre_advert - sanitize and advertise Long-Distance Signaling
+ * auto-negotiation parameters
+ * @phydev: target phy_device struct
+ * Return: 0 if the PHY's advertisement hasn't changed, < 0 on error,
+ * > 0 if it has changed
+ *
+ * Writes MII_BCM54XX_LREANAA with the appropriate values. The values are to be
+ * sanitized before, to make sure we only advertise what is supported.
+ * The sanitization is done already in phy_ethtool_ksettings_set()
+ */
+int bcm_config_lre_advert(struct phy_device *phydev)
+{
+ u32 adv = bcm_linkmode_adv_to_lre_adv_t(phydev->advertising);
+
+ /* Setup BroadR-Reach mode advertisement */
+ return phy_modify_changed(phydev, MII_BCM54XX_LREANAA,
+ LRE_ADVERTISE_ALL | LREANAA_PAUSE |
+ LREANAA_PAUSE_ASYM, adv);
+}
+EXPORT_SYMBOL_GPL(bcm_config_lre_advert);
+
MODULE_DESCRIPTION("Broadcom PHY Library");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
index b52189e45a84..bceddbc860eb 100644
--- a/drivers/net/phy/bcm-phy-lib.h
+++ b/drivers/net/phy/bcm-phy-lib.h
@@ -121,4 +121,8 @@ irqreturn_t bcm_phy_wol_isr(int irq, void *dev_id);
int bcm_phy_led_brightness_set(struct phy_device *phydev,
u8 index, enum led_brightness value);
+int bcm_setup_lre_master_slave(struct phy_device *phydev);
+int bcm_config_lre_aneg(struct phy_device *phydev, bool changed);
+int bcm_config_lre_advert(struct phy_device *phydev);
+
#endif /* _LINUX_BCM_PHY_LIB_H */
diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c
index 617d384d4551..874a1b64b115 100644
--- a/drivers/net/phy/bcm-phy-ptp.c
+++ b/drivers/net/phy/bcm-phy-ptp.c
@@ -841,7 +841,7 @@ static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts,
}
static int bcm_ptp_ts_info(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct bcm_ptp_private *priv = mii2priv(mii_ts);
@@ -931,6 +931,9 @@ struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev)
return ERR_CAST(clock);
priv->ptp_clock = clock;
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
+
priv->phydev = phydev;
bcm_ptp_init(priv);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 370e4ed45098..ddded162c44c 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -5,6 +5,8 @@
* Broadcom BCM5411, BCM5421 and BCM5461 Gigabit Ethernet
* transceivers.
*
+ * Broadcom BCM54810, BCM54811 BroadR-Reach transceivers.
+ *
* Copyright (c) 2006 Maciej W. Rozycki
*
* Inspired by code written by Amy Fong.
@@ -36,6 +38,29 @@ struct bcm54xx_phy_priv {
struct bcm_ptp_private *ptp;
int wake_irq;
bool wake_irq_enabled;
+ bool brr_mode;
+};
+
+/* Link modes for BCM58411 PHY */
+static const int bcm54811_linkmodes[] = {
+ ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ ETHTOOL_LINK_MODE_10baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10baseT_Half_BIT
+};
+
+/* Long-Distance Signaling (BroadR-Reach mode aneg) relevant linkmode bits */
+static const int lds_br_bits[] = {
+ ETHTOOL_LINK_MODE_Autoneg_BIT,
+ ETHTOOL_LINK_MODE_Pause_BIT,
+ ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT,
+ ETHTOOL_LINK_MODE_100baseT1_Full_BIT
};
static bool bcm54xx_phy_can_wakeup(struct phy_device *phydev)
@@ -347,6 +372,61 @@ static void bcm54xx_ptp_config_init(struct phy_device *phydev)
bcm_ptp_config_init(phydev);
}
+static int bcm5481x_set_brrmode(struct phy_device *phydev, bool on)
+{
+ int reg;
+ int err;
+ u16 val;
+
+ reg = bcm_phy_read_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL);
+
+ if (reg < 0)
+ return reg;
+
+ if (on)
+ reg |= BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN;
+ else
+ reg &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN;
+
+ err = bcm_phy_write_exp(phydev,
+ BCM54810_EXP_BROADREACH_LRE_MISC_CTL, reg);
+ if (err)
+ return err;
+
+ /* Ensure LRE or IEEE register set is accessed according to the brr
+ * on/off, thus set the override
+ */
+ val = BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_EN;
+ if (!on)
+ val |= BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_OVERRIDE_VAL;
+
+ return bcm_phy_write_exp(phydev,
+ BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL, val);
+}
+
+static int bcm54811_config_init(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+ int err, reg;
+
+ /* Enable CLK125 MUX on LED4 if ref clock is enabled. */
+ if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) {
+ reg = bcm_phy_read_exp(phydev, BCM54612E_EXP_SPARE0);
+ if (reg < 0)
+ return reg;
+ err = bcm_phy_write_exp(phydev, BCM54612E_EXP_SPARE0,
+ BCM54612E_LED4_CLK125OUT_EN | reg);
+ if (err < 0)
+ return err;
+ }
+
+ /* With BCM54811, BroadR-Reach implies no autoneg */
+ if (priv->brr_mode)
+ phydev->autoneg = 0;
+
+ return bcm5481x_set_brrmode(phydev, priv->brr_mode);
+}
+
static int bcm54xx_config_init(struct phy_device *phydev)
{
int reg, err, val;
@@ -399,6 +479,9 @@ static int bcm54xx_config_init(struct phy_device *phydev)
BCM54810_EXP_BROADREACH_LRE_MISC_CTL,
val);
break;
+ case PHY_ID_BCM54811:
+ err = bcm54811_config_init(phydev);
+ break;
}
if (err)
return err;
@@ -553,52 +636,117 @@ static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
return -EOPNOTSUPP;
}
-static int bcm54811_config_init(struct phy_device *phydev)
+
+/**
+ * bcm5481x_read_abilities - read PHY abilities from LRESR or Clause 22
+ * (BMSR) registers, based on whether the PHY is in BroadR-Reach or IEEE mode
+ * @phydev: target phy_device struct
+ *
+ * Description: Reads the PHY's abilities and populates phydev->supported
+ * accordingly. The register to read the abilities from is determined by
+ * the brr mode setting of the PHY as read from the device tree.
+ * Note that the LRE and IEEE sets of abilities are disjunct, in other words,
+ * not only the link modes differ, but also the auto-negotiation and
+ * master-slave setup is controlled differently.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+static int bcm5481x_read_abilities(struct phy_device *phydev)
{
- int err, reg;
+ struct device_node *np = phydev->mdio.dev.of_node;
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+ int i, val, err;
- /* Disable BroadR-Reach function. */
- reg = bcm_phy_read_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL);
- reg &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN;
- err = bcm_phy_write_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL,
- reg);
- if (err < 0)
+ for (i = 0; i < ARRAY_SIZE(bcm54811_linkmodes); i++)
+ linkmode_clear_bit(bcm54811_linkmodes[i], phydev->supported);
+
+ priv->brr_mode = of_property_read_bool(np, "brr-mode");
+
+ /* Set BroadR-Reach mode as configured in the DT. */
+ err = bcm5481x_set_brrmode(phydev, priv->brr_mode);
+ if (err)
return err;
- err = bcm54xx_config_init(phydev);
+ if (priv->brr_mode) {
+ linkmode_set_bit_array(phy_basic_ports_array,
+ ARRAY_SIZE(phy_basic_ports_array),
+ phydev->supported);
- /* Enable CLK125 MUX on LED4 if ref clock is enabled. */
- if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) {
- reg = bcm_phy_read_exp(phydev, BCM54612E_EXP_SPARE0);
- err = bcm_phy_write_exp(phydev, BCM54612E_EXP_SPARE0,
- BCM54612E_LED4_CLK125OUT_EN | reg);
- if (err < 0)
- return err;
+ val = phy_read(phydev, MII_BCM54XX_LRESR);
+ if (val < 0)
+ return val;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->supported,
+ val & LRESR_LDSABILITY);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ phydev->supported,
+ val & LRESR_100_1PAIR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT,
+ phydev->supported,
+ val & LRESR_10_1PAIR);
+ return 0;
}
- return err;
+ return genphy_read_abilities(phydev);
}
-static int bcm5481_config_aneg(struct phy_device *phydev)
+static int bcm5481x_config_delay_swap(struct phy_device *phydev)
{
struct device_node *np = phydev->mdio.dev.of_node;
- int ret;
-
- /* Aneg firstly. */
- ret = genphy_config_aneg(phydev);
- /* Then we can set up the delay. */
+ /* Set up the delay. */
bcm54xx_config_clock_delay(phydev);
if (of_property_read_bool(np, "enet-phy-lane-swap")) {
/* Lane Swap - Undocumented register...magic! */
- ret = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_SEL_ER + 0x9,
- 0x11B);
+ int ret = bcm_phy_write_exp(phydev,
+ MII_BCM54XX_EXP_SEL_ER + 0x9,
+ 0x11B);
if (ret < 0)
return ret;
}
- return ret;
+ return 0;
+}
+
+static int bcm5481_config_aneg(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Aneg firstly. */
+ if (priv->brr_mode)
+ ret = bcm_config_lre_aneg(phydev, false);
+ else
+ ret = genphy_config_aneg(phydev);
+
+ if (ret)
+ return ret;
+
+ /* Then we can set up the delay and swap. */
+ return bcm5481x_config_delay_swap(phydev);
+}
+
+static int bcm54811_config_aneg(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+ int ret;
+
+ /* Aneg firstly. */
+ if (priv->brr_mode) {
+ /* BCM54811 is only capable of autonegotiation in IEEE mode */
+ phydev->autoneg = 0;
+ ret = bcm_config_lre_aneg(phydev, false);
+ } else {
+ ret = genphy_config_aneg(phydev);
+ }
+
+ if (ret)
+ return ret;
+
+ /* Then we can set up the delay and swap. */
+ return bcm5481x_config_delay_swap(phydev);
}
struct bcm54616s_phy_priv {
@@ -1062,6 +1210,203 @@ static void bcm54xx_link_change_notify(struct phy_device *phydev)
bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08, ret);
}
+static int lre_read_master_slave(struct phy_device *phydev)
+{
+ int cfg = MASTER_SLAVE_CFG_UNKNOWN, state;
+ int val;
+
+ /* In BroadR-Reach mode we are always capable of master-slave
+ * and there is no preferred master or slave configuration
+ */
+ phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN;
+ phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN;
+
+ val = phy_read(phydev, MII_BCM54XX_LRECR);
+ if (val < 0)
+ return val;
+
+ if ((val & LRECR_LDSEN) == 0) {
+ if (val & LRECR_MASTER)
+ cfg = MASTER_SLAVE_CFG_MASTER_FORCE;
+ else
+ cfg = MASTER_SLAVE_CFG_SLAVE_FORCE;
+ }
+
+ val = phy_read(phydev, MII_BCM54XX_LRELDSE);
+ if (val < 0)
+ return val;
+
+ if (val & LDSE_MASTER)
+ state = MASTER_SLAVE_STATE_MASTER;
+ else
+ state = MASTER_SLAVE_STATE_SLAVE;
+
+ phydev->master_slave_get = cfg;
+ phydev->master_slave_state = state;
+
+ return 0;
+}
+
+/* Read LDS Link Partner Ability in BroadR-Reach mode */
+static int lre_read_lpa(struct phy_device *phydev)
+{
+ int i, lrelpa;
+
+ if (phydev->autoneg != AUTONEG_ENABLE) {
+ if (!phydev->autoneg_complete) {
+ /* aneg not yet done, reset all relevant bits */
+ for (i = 0; i < ARRAY_SIZE(lds_br_bits); i++)
+ linkmode_clear_bit(lds_br_bits[i],
+ phydev->lp_advertising);
+
+ return 0;
+ }
+
+ /* Long-Distance Signaling Link Partner Ability */
+ lrelpa = phy_read(phydev, MII_BCM54XX_LRELPA);
+ if (lrelpa < 0)
+ return lrelpa;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->lp_advertising,
+ lrelpa & LRELPA_PAUSE_ASYM);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ phydev->lp_advertising,
+ lrelpa & LRELPA_PAUSE);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT,
+ phydev->lp_advertising,
+ lrelpa & LRELPA_100_1PAIR);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT,
+ phydev->lp_advertising,
+ lrelpa & LRELPA_10_1PAIR);
+ } else {
+ linkmode_zero(phydev->lp_advertising);
+ }
+
+ return 0;
+}
+
+static int lre_read_status_fixed(struct phy_device *phydev)
+{
+ int lrecr = phy_read(phydev, MII_BCM54XX_LRECR);
+
+ if (lrecr < 0)
+ return lrecr;
+
+ phydev->duplex = DUPLEX_FULL;
+
+ if (lrecr & LRECR_SPEED100)
+ phydev->speed = SPEED_100;
+ else
+ phydev->speed = SPEED_10;
+
+ return 0;
+}
+
+/**
+ * lre_update_link - update link status in @phydev
+ * @phydev: target phy_device struct
+ * Return: 0 on success, < 0 on error
+ *
+ * Description: Update the value in phydev->link to reflect the
+ * current link value. In order to do this, we need to read
+ * the status register twice, keeping the second value.
+ * This is a genphy_update_link modified to work on LRE registers
+ * of BroadR-Reach PHY
+ */
+static int lre_update_link(struct phy_device *phydev)
+{
+ int status = 0, lrecr;
+
+ lrecr = phy_read(phydev, MII_BCM54XX_LRECR);
+ if (lrecr < 0)
+ return lrecr;
+
+ /* Autoneg is being started, therefore disregard BMSR value and
+ * report link as down.
+ */
+ if (lrecr & BMCR_ANRESTART)
+ goto done;
+
+ /* The link state is latched low so that momentary link
+ * drops can be detected. Do not double-read the status
+ * in polling mode to detect such short link drops except
+ * the link was already down.
+ */
+ if (!phy_polling_mode(phydev) || !phydev->link) {
+ status = phy_read(phydev, MII_BCM54XX_LRESR);
+ if (status < 0)
+ return status;
+ else if (status & LRESR_LSTATUS)
+ goto done;
+ }
+
+ /* Read link and autonegotiation status */
+ status = phy_read(phydev, MII_BCM54XX_LRESR);
+ if (status < 0)
+ return status;
+done:
+ phydev->link = status & LRESR_LSTATUS ? 1 : 0;
+ phydev->autoneg_complete = status & LRESR_LDSCOMPLETE ? 1 : 0;
+
+ /* Consider the case that autoneg was started and "aneg complete"
+ * bit has been reset, but "link up" bit not yet.
+ */
+ if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete)
+ phydev->link = 0;
+
+ return 0;
+}
+
+/* Get the status in BroadRReach mode just like genphy_read_status does
+* in normal mode
+*/
+static int bcm54811_lre_read_status(struct phy_device *phydev)
+{
+ int err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = lre_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg ==
+ AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ err = lre_read_master_slave(phydev);
+ if (err < 0)
+ return err;
+
+ /* Read LDS Link Partner Ability */
+ err = lre_read_lpa(phydev);
+ if (err < 0)
+ return err;
+
+ if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
+ phy_resolve_aneg_linkmode(phydev);
+ else if (phydev->autoneg == AUTONEG_DISABLE)
+ err = lre_read_status_fixed(phydev);
+
+ return err;
+}
+
+static int bcm54811_read_status(struct phy_device *phydev)
+{
+ struct bcm54xx_phy_priv *priv = phydev->priv;
+
+ if (priv->brr_mode)
+ return bcm54811_lre_read_status(phydev);
+
+ return genphy_read_status(phydev);
+}
+
static struct phy_driver broadcom_drivers[] = {
{
.phy_id = PHY_ID_BCM5411,
@@ -1211,10 +1556,12 @@ static struct phy_driver broadcom_drivers[] = {
.get_strings = bcm_phy_get_strings,
.get_stats = bcm54xx_get_stats,
.probe = bcm54xx_phy_probe,
- .config_init = bcm54811_config_init,
- .config_aneg = bcm5481_config_aneg,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = bcm54811_config_aneg,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .read_status = bcm54811_read_status,
+ .get_features = bcm5481x_read_abilities,
.suspend = bcm54xx_suspend,
.resume = bcm54xx_resume,
.link_change_notify = bcm54xx_link_change_notify,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 5c42c47dc564..075d2beea716 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1395,7 +1395,7 @@ static void dp83640_txtstamp(struct mii_timestamper *mii_ts,
}
static int dp83640_ts_info(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct dp83640_private *dp83640 =
container_of(mii_ts, struct dp83640_private, mii_ts);
@@ -1447,6 +1447,8 @@ static int dp83640_probe(struct phy_device *phydev)
for (i = 0; i < MAX_RXTS; i++)
list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool);
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
phydev->mii_ts = &dp83640->mii_ts;
phydev->priv = dp83640;
diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
index c3426a17e6d0..efeb643c1373 100644
--- a/drivers/net/phy/dp83822.c
+++ b/drivers/net/phy/dp83822.c
@@ -140,10 +140,11 @@ struct dp83822_private {
u16 fx_sd_enable;
u8 cfg_dac_minus;
u8 cfg_dac_plus;
+ struct ethtool_wolinfo wol;
};
-static int dp83822_set_wol(struct phy_device *phydev,
- struct ethtool_wolinfo *wol)
+static int dp83822_config_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
{
struct net_device *ndev = phydev->attached_dev;
u16 value;
@@ -197,10 +198,25 @@ static int dp83822_set_wol(struct phy_device *phydev,
MII_DP83822_WOL_CFG, value);
} else {
return phy_clear_bits_mmd(phydev, DP83822_DEVADDR,
- MII_DP83822_WOL_CFG, DP83822_WOL_EN);
+ MII_DP83822_WOL_CFG,
+ DP83822_WOL_EN |
+ DP83822_WOL_MAGIC_EN |
+ DP83822_WOL_SECURE_ON);
}
}
+static int dp83822_set_wol(struct phy_device *phydev,
+ struct ethtool_wolinfo *wol)
+{
+ struct dp83822_private *dp83822 = phydev->priv;
+ int ret;
+
+ ret = dp83822_config_wol(phydev, wol);
+ if (!ret)
+ memcpy(&dp83822->wol, wol, sizeof(*wol));
+ return ret;
+}
+
static void dp83822_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
@@ -346,13 +362,6 @@ static irqreturn_t dp83822_handle_interrupt(struct phy_device *phydev)
return IRQ_HANDLED;
}
-static int dp8382x_disable_wol(struct phy_device *phydev)
-{
- return phy_clear_bits_mmd(phydev, DP83822_DEVADDR, MII_DP83822_WOL_CFG,
- DP83822_WOL_EN | DP83822_WOL_MAGIC_EN |
- DP83822_WOL_SECURE_ON);
-}
-
static int dp83822_read_status(struct phy_device *phydev)
{
struct dp83822_private *dp83822 = phydev->priv;
@@ -496,7 +505,7 @@ static int dp83822_config_init(struct phy_device *phydev)
return err;
}
}
- return dp8382x_disable_wol(phydev);
+ return dp83822_config_wol(phydev, &dp83822->wol);
}
static int dp83826_config_rmii_mode(struct phy_device *phydev)
@@ -575,12 +584,14 @@ static int dp83826_config_init(struct phy_device *phydev)
return ret;
}
- return dp8382x_disable_wol(phydev);
+ return dp83822_config_wol(phydev, &dp83822->wol);
}
static int dp8382x_config_init(struct phy_device *phydev)
{
- return dp8382x_disable_wol(phydev);
+ struct dp83822_private *dp83822 = phydev->priv;
+
+ return dp83822_config_wol(phydev, &dp83822->wol);
}
static int dp83822_phy_reset(struct phy_device *phydev)
diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c
index d7616b13c594..551e37786c2d 100644
--- a/drivers/net/phy/dp83td510.c
+++ b/drivers/net/phy/dp83td510.c
@@ -4,6 +4,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/ethtool_netlink.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/phy.h>
@@ -29,6 +30,10 @@
#define DP83TD510E_INT1_LINK BIT(13)
#define DP83TD510E_INT1_LINK_EN BIT(5)
+#define DP83TD510E_CTRL 0x1f
+#define DP83TD510E_CTRL_HW_RESET BIT(15)
+#define DP83TD510E_CTRL_SW_RESET BIT(14)
+
#define DP83TD510E_AN_STAT_1 0x60c
#define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15)
@@ -53,6 +58,117 @@ static const u16 dp83td510_mse_sqi_map[] = {
0x0000 /* 24dB =< SNR */
};
+/* Time Domain Reflectometry (TDR) Functionality of DP83TD510 PHY
+ *
+ * I assume that this PHY is using a variation of Spread Spectrum Time Domain
+ * Reflectometry (SSTDR) rather than the commonly used TDR found in many PHYs.
+ * Here are the following observations which likely confirm this:
+ * - The DP83TD510 PHY transmits a modulated signal of configurable length
+ * (default 16000 µs) instead of a single pulse pattern, which is typical
+ * for traditional TDR.
+ * - The pulse observed on the wire, triggered by the HW RESET register, is not
+ * part of the cable testing process.
+ *
+ * I assume that SSTDR seems to be a logical choice for the 10BaseT1L
+ * environment due to improved noise resistance, making it suitable for
+ * environments with significant electrical noise, such as long 10BaseT1L cable
+ * runs.
+ *
+ * Configuration Variables:
+ * The SSTDR variation used in this PHY involves more configuration variables
+ * that can dramatically affect the functionality and precision of cable
+ * testing. Since most of these configuration options are either not well
+ * documented or documented with minimal details, the following sections
+ * describe my understanding and observations of these variables and their
+ * impact on TDR functionality.
+ *
+ * Timeline:
+ * ,<--cfg_pre_silence_time
+ * | ,<-SSTDR Modulated Transmission
+ * | | ,<--cfg_post_silence_time
+ * | | | ,<--Force Link Mode
+ * |<--'-->|<-------'------->|<--'-->|<--------'------->|
+ *
+ * - cfg_pre_silence_time: Optional silence time before TDR transmission starts.
+ * - SSTDR Modulated Transmission: Transmission duration configured by
+ * cfg_tdr_tx_duration and amplitude configured by cfg_tdr_tx_type.
+ * - cfg_post_silence_time: Silence time after TDR transmission.
+ * - Force Link Mode: If nothing is configured after cfg_post_silence_time,
+ * the PHY continues in force link mode without autonegotiation.
+ */
+
+#define DP83TD510E_TDR_CFG 0x1e
+#define DP83TD510E_TDR_START BIT(15)
+#define DP83TD510E_TDR_DONE BIT(1)
+#define DP83TD510E_TDR_FAIL BIT(0)
+
+#define DP83TD510E_TDR_CFG1 0x300
+/* cfg_tdr_tx_type: Transmit voltage level for TDR.
+ * 0 = 1V, 1 = 2.4V
+ * Note: Using different voltage levels may not work
+ * in all configuration variations. For example, setting
+ * 2.4V may give different cable length measurements.
+ * Other settings may be needed to make it work properly.
+ */
+#define DP83TD510E_TDR_TX_TYPE BIT(12)
+#define DP83TD510E_TDR_TX_TYPE_1V 0
+#define DP83TD510E_TDR_TX_TYPE_2_4V 1
+/* cfg_post_silence_time: Time after the TDR sequence. Since we force master mode
+ * for the TDR will proceed with forced link state after this time. For Linux
+ * it is better to set max value to avoid false link state detection.
+ */
+#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME GENMASK(3, 2)
+#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_0MS 0
+#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_10MS 1
+#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_100MS 2
+#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_1000MS 3
+/* cfg_pre_silence_time: Time before the TDR sequence. It should be enough to
+ * settle down all pulses and reflections. Since for 10BASE-T1L we have
+ * maximum 2000m cable length, we can set it to 1ms.
+ */
+#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME GENMASK(1, 0)
+#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_0MS 0
+#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_10MS 1
+#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_100MS 2
+#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_1000MS 3
+
+#define DP83TD510E_TDR_CFG2 0x301
+#define DP83TD510E_TDR_END_TAP_INDEX_1 GENMASK(14, 8)
+#define DP83TD510E_TDR_END_TAP_INDEX_1_DEF 36
+#define DP83TD510E_TDR_START_TAP_INDEX_1 GENMASK(6, 0)
+#define DP83TD510E_TDR_START_TAP_INDEX_1_DEF 4
+
+#define DP83TD510E_TDR_CFG3 0x302
+/* cfg_tdr_tx_duration: Duration of the TDR transmission in microseconds.
+ * This value sets the duration of the modulated signal used for TDR
+ * measurements.
+ * - Default: 16000 µs
+ * - Observation: A minimum duration of 6000 µs is recommended to ensure
+ * accurate detection of cable faults. Durations shorter than 6000 µs may
+ * result in incomplete data, especially for shorter cables (e.g., 20 meters),
+ * leading to false "OK" results. Longer durations (e.g., 6000 µs or more)
+ * provide better accuracy, particularly for detecting open circuits.
+ */
+#define DP83TD510E_TDR_TX_DURATION_US GENMASK(15, 0)
+#define DP83TD510E_TDR_TX_DURATION_US_DEF 16000
+
+#define DP83TD510E_TDR_FAULT_CFG1 0x303
+#define DP83TD510E_TDR_FLT_LOC_OFFSET_1 GENMASK(14, 8)
+#define DP83TD510E_TDR_FLT_LOC_OFFSET_1_DEF 4
+#define DP83TD510E_TDR_FLT_INIT_1 GENMASK(7, 0)
+#define DP83TD510E_TDR_FLT_INIT_1_DEF 62
+
+#define DP83TD510E_TDR_FAULT_STAT 0x30c
+#define DP83TD510E_TDR_PEAK_DETECT BIT(11)
+#define DP83TD510E_TDR_PEAK_SIGN BIT(10)
+#define DP83TD510E_TDR_PEAK_LOCATION GENMASK(9, 0)
+
+/* Not documented registers and values but recommended according to
+ * "DP83TD510E Cable Diagnostics Toolkit revC"
+ */
+#define DP83TD510E_UNKN_030E 0x30e
+#define DP83TD510E_030E_VAL 0x2520
+
static int dp83td510_config_intr(struct phy_device *phydev)
{
int ret;
@@ -198,6 +314,151 @@ static int dp83td510_get_sqi_max(struct phy_device *phydev)
return DP83TD510_SQI_MAX;
}
+/**
+ * dp83td510_cable_test_start - Start the cable test for the DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This sequence is implemented according to the "Application Note DP83TD510E
+ * Cable Diagnostics Toolkit revC".
+ *
+ * Returns: 0 on success, a negative error code on failure.
+ */
+static int dp83td510_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_CTRL,
+ DP83TD510E_CTRL_HW_RESET);
+ if (ret)
+ return ret;
+
+ ret = genphy_c45_an_disable_aneg(phydev);
+ if (ret)
+ return ret;
+
+ /* Force master mode */
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL,
+ MDIO_PMA_PMD_BT1_CTRL_CFG_MST);
+ if (ret)
+ return ret;
+
+ /* There is no official recommendation for this register, but it is
+ * better to use 1V for TDR since other values seems to be optimized
+ * for this amplitude. Except of amplitude, it is better to configure
+ * pre TDR silence time to 10ms to avoid false reflections (value 0
+ * seems to be too short, otherwise we need to implement own silence
+ * time). Also, post TDR silence time should be set to 1000ms to avoid
+ * false link state detection, it fits to the polling time of the
+ * PHY framework. The idea is to wait until
+ * dp83td510_cable_test_get_status() will be called and reconfigure
+ * the PHY to the default state within the post silence time window.
+ */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG1,
+ DP83TD510E_TDR_TX_TYPE |
+ DP83TD510E_TDR_CFG1_POST_SILENCE_TIME |
+ DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME,
+ DP83TD510E_TDR_TX_TYPE_1V |
+ DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_10MS |
+ DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_1000MS);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG2,
+ FIELD_PREP(DP83TD510E_TDR_END_TAP_INDEX_1,
+ DP83TD510E_TDR_END_TAP_INDEX_1_DEF) |
+ FIELD_PREP(DP83TD510E_TDR_START_TAP_INDEX_1,
+ DP83TD510E_TDR_START_TAP_INDEX_1_DEF));
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_FAULT_CFG1,
+ FIELD_PREP(DP83TD510E_TDR_FLT_LOC_OFFSET_1,
+ DP83TD510E_TDR_FLT_LOC_OFFSET_1_DEF) |
+ FIELD_PREP(DP83TD510E_TDR_FLT_INIT_1,
+ DP83TD510E_TDR_FLT_INIT_1_DEF));
+ if (ret)
+ return ret;
+
+ /* Undocumented register, from the "Application Note DP83TD510E Cable
+ * Diagnostics Toolkit revC".
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_UNKN_030E,
+ DP83TD510E_030E_VAL);
+ if (ret)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG3,
+ DP83TD510E_TDR_TX_DURATION_US_DEF);
+ if (ret)
+ return ret;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_CTRL,
+ DP83TD510E_CTRL_SW_RESET);
+ if (ret)
+ return ret;
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG,
+ DP83TD510E_TDR_START);
+}
+
+/**
+ * dp83td510_cable_test_get_status - Get the status of the cable test for the
+ * DP83TD510 PHY.
+ * @phydev: Pointer to the phy_device structure.
+ * @finished: Pointer to a boolean that indicates whether the test is finished.
+ *
+ * The function sets the @finished flag to true if the test is complete.
+ *
+ * Returns: 0 on success or a negative error code on failure.
+ */
+static int dp83td510_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int ret, stat;
+
+ *finished = false;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG);
+ if (ret < 0)
+ return ret;
+
+ if (!(ret & DP83TD510E_TDR_DONE))
+ return 0;
+
+ if (!(ret & DP83TD510E_TDR_FAIL)) {
+ int location;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ DP83TD510E_TDR_FAULT_STAT);
+ if (ret < 0)
+ return ret;
+
+ if (ret & DP83TD510E_TDR_PEAK_DETECT) {
+ if (ret & DP83TD510E_TDR_PEAK_SIGN)
+ stat = ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ else
+ stat = ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+
+ location = FIELD_GET(DP83TD510E_TDR_PEAK_LOCATION,
+ ret) * 100;
+ ethnl_cable_test_fault_length(phydev,
+ ETHTOOL_A_CABLE_PAIR_A,
+ location);
+ } else {
+ stat = ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ }
+ } else {
+ /* Most probably we have active link partner */
+ stat = ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+
+ *finished = true;
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, stat);
+
+ return phy_init_hw(phydev);
+}
+
static int dp83td510_get_features(struct phy_device *phydev)
{
/* This PHY can't respond on MDIO bus if no RMII clock is enabled.
@@ -221,6 +482,7 @@ static struct phy_driver dp83td510_driver[] = {
PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID),
.name = "TI DP83TD510E",
+ .flags = PHY_POLL_CABLE_TEST,
.config_aneg = dp83td510_config_aneg,
.read_status = dp83td510_read_status,
.get_features = dp83td510_get_features,
@@ -228,6 +490,8 @@ static struct phy_driver dp83td510_driver[] = {
.handle_interrupt = dp83td510_handle_interrupt,
.get_sqi = dp83td510_get_sqi,
.get_sqi_max = dp83td510_get_sqi_max,
+ .cable_test_start = dp83td510_cable_test_start,
+ .cable_test_get_status = dp83td510_cable_test_get_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
diff --git a/drivers/net/phy/dp83tg720.c b/drivers/net/phy/dp83tg720.c
index 326c9770a6dc..c706429b225a 100644
--- a/drivers/net/phy/dp83tg720.c
+++ b/drivers/net/phy/dp83tg720.c
@@ -17,6 +17,11 @@
#define DP83TG720S_PHY_RESET 0x1f
#define DP83TG720S_HW_RESET BIT(15)
+#define DP83TG720S_LPS_CFG3 0x18c
+/* Power modes are documented as bit fields but used as values */
+/* Power Mode 0 is Normal mode */
+#define DP83TG720S_LPS_CFG3_PWR_MODE_0 BIT(0)
+
#define DP83TG720S_RGMII_DELAY_CTRL 0x602
/* In RGMII mode, Enable or disable the internal delay for RXD */
#define DP83TG720S_RGMII_RX_CLK_SEL BIT(1)
@@ -31,11 +36,20 @@
static int dp83tg720_config_aneg(struct phy_device *phydev)
{
+ int ret;
+
/* Autoneg is not supported and this PHY supports only one speed.
* We need to care only about master/slave configuration if it was
* changed by user.
*/
- return genphy_c45_pma_baset1_setup_master_slave(phydev);
+ ret = genphy_c45_pma_baset1_setup_master_slave(phydev);
+ if (ret)
+ return ret;
+
+ /* Re-read role configuration to make changes visible even if
+ * the link is in administrative down state.
+ */
+ return genphy_c45_pma_baset1_read_master_slave(phydev);
}
static int dp83tg720_read_status(struct phy_device *phydev)
@@ -64,6 +78,8 @@ static int dp83tg720_read_status(struct phy_device *phydev)
return ret;
/* After HW reset we need to restore master/slave configuration.
+ * genphy_c45_pma_baset1_read_master_slave() call will be done
+ * by the dp83tg720_config_aneg() function.
*/
ret = dp83tg720_config_aneg(phydev);
if (ret)
@@ -154,10 +170,24 @@ static int dp83tg720_config_init(struct phy_device *phydev)
*/
usleep_range(1000, 2000);
- if (phy_interface_is_rgmii(phydev))
- return dp83tg720_config_rgmii_delay(phydev);
+ if (phy_interface_is_rgmii(phydev)) {
+ ret = dp83tg720_config_rgmii_delay(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /* In case the PHY is bootstrapped in managed mode, we need to
+ * wake it.
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TG720S_LPS_CFG3,
+ DP83TG720S_LPS_CFG3_PWR_MODE_0);
+ if (ret)
+ return ret;
- return 0;
+ /* Make role configuration visible for ethtool on init and after
+ * rest.
+ */
+ return genphy_c45_pma_baset1_read_master_slave(phydev);
}
static struct phy_driver dp83tg720_driver[] = {
diff --git a/drivers/net/phy/marvell-88q2xxx.c b/drivers/net/phy/marvell-88q2xxx.c
index 6b4bd9883304..c812f16eaa3a 100644
--- a/drivers/net/phy/marvell-88q2xxx.c
+++ b/drivers/net/phy/marvell-88q2xxx.c
@@ -12,6 +12,8 @@
#include <linux/hwmon.h>
#define PHY_ID_88Q2220_REVB0 (MARVELL_PHY_ID_88Q2220 | 0x1)
+#define PHY_ID_88Q2220_REVB1 (MARVELL_PHY_ID_88Q2220 | 0x2)
+#define PHY_ID_88Q2220_REVB2 (MARVELL_PHY_ID_88Q2220 | 0x3)
#define MDIO_MMD_AN_MV_STAT 32769
#define MDIO_MMD_AN_MV_STAT_ANEG 0x0100
@@ -129,6 +131,49 @@ static const struct mmd_val mv88q222x_revb0_init_seq1[] = {
{ MDIO_MMD_PCS, 0xfe05, 0x755c },
};
+static const struct mmd_val mv88q222x_revb1_init_seq0[] = {
+ { MDIO_MMD_PCS, 0xffe4, 0x0007 },
+ { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 },
+ { MDIO_MMD_PCS, 0xffe3, 0x7000 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0840 },
+};
+
+static const struct mmd_val mv88q222x_revb2_init_seq0[] = {
+ { MDIO_MMD_PCS, 0xffe4, 0x0007 },
+ { MDIO_MMD_AN, MDIO_AN_T1_CTRL, 0x0 },
+ { MDIO_MMD_PMAPMD, MDIO_CTRL1, 0x0840 },
+};
+
+static const struct mmd_val mv88q222x_revb1_revb2_init_seq1[] = {
+ { MDIO_MMD_PCS, 0xfe07, 0x125a },
+ { MDIO_MMD_PCS, 0xfe09, 0x1288 },
+ { MDIO_MMD_PCS, 0xfe08, 0x2588 },
+ { MDIO_MMD_PCS, 0xfe72, 0x042c },
+ { MDIO_MMD_PCS, 0xffe4, 0x0071 },
+ { MDIO_MMD_PCS, 0xffe4, 0x0001 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x0048 },
+ { MDIO_MMD_PMAPMD, 0x0000, 0x0000 },
+ { MDIO_MMD_PCS, 0x0000, 0x0000 },
+ { MDIO_MMD_PCS, 0xffdb, 0xfc10 },
+ { MDIO_MMD_PCS, 0xfe1b, 0x58 },
+ { MDIO_MMD_PCS, 0xfcad, 0x030c },
+ { MDIO_MMD_PCS, 0x8032, 0x6001 },
+ { MDIO_MMD_PCS, 0xfdff, 0x05a5 },
+ { MDIO_MMD_PCS, 0xfdec, 0xdbaf },
+ { MDIO_MMD_PCS, 0xfcab, 0x1054 },
+ { MDIO_MMD_PCS, 0xfcac, 0x1483 },
+ { MDIO_MMD_PCS, 0x8033, 0xc801 },
+ { MDIO_MMD_AN, 0x8032, 0x2020 },
+ { MDIO_MMD_AN, 0x8031, 0xa28 },
+ { MDIO_MMD_AN, 0x8031, 0xc28 },
+ { MDIO_MMD_PCS, 0xfbba, 0x0cb2 },
+ { MDIO_MMD_PCS, 0xfbbb, 0x0c4a },
+ { MDIO_MMD_PCS, 0xfe5f, 0xe8 },
+ { MDIO_MMD_PCS, 0xfe05, 0x755c },
+ { MDIO_MMD_PCS, 0xfa20, 0x002a },
+ { MDIO_MMD_PCS, 0xfe11, 0x1105 },
+};
+
static int mv88q2xxx_soft_reset(struct phy_device *phydev)
{
int ret;
@@ -687,31 +732,72 @@ static int mv88q222x_soft_reset(struct phy_device *phydev)
return 0;
}
-static int mv88q222x_revb0_config_init(struct phy_device *phydev)
+static int mv88q222x_write_mmd_vals(struct phy_device *phydev,
+ const struct mmd_val *vals, size_t len)
{
- int ret, i;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq0); i++) {
- ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq0[i].devad,
- mv88q222x_revb0_init_seq0[i].regnum,
- mv88q222x_revb0_init_seq0[i].val);
+ for (; len; vals++, len--) {
+ ret = phy_write_mmd(phydev, vals->devad, vals->regnum,
+ vals->val);
if (ret < 0)
return ret;
}
+ return 0;
+}
+
+static int mv88q222x_revb0_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb0_init_seq0,
+ ARRAY_SIZE(mv88q222x_revb0_init_seq0));
+ if (ret < 0)
+ return ret;
+
usleep_range(5000, 10000);
- for (i = 0; i < ARRAY_SIZE(mv88q222x_revb0_init_seq1); i++) {
- ret = phy_write_mmd(phydev, mv88q222x_revb0_init_seq1[i].devad,
- mv88q222x_revb0_init_seq1[i].regnum,
- mv88q222x_revb0_init_seq1[i].val);
- if (ret < 0)
- return ret;
- }
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb0_init_seq1,
+ ARRAY_SIZE(mv88q222x_revb0_init_seq1));
+ if (ret < 0)
+ return ret;
+
+ return mv88q2xxx_config_init(phydev);
+}
+
+static int mv88q222x_revb1_revb2_config_init(struct phy_device *phydev)
+{
+ bool is_rev_b1 = phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB1;
+ int ret;
+
+ if (is_rev_b1)
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb1_init_seq0,
+ ARRAY_SIZE(mv88q222x_revb1_init_seq0));
+ else
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb2_init_seq0,
+ ARRAY_SIZE(mv88q222x_revb2_init_seq0));
+ if (ret < 0)
+ return ret;
+
+ usleep_range(3000, 5000);
+
+ ret = mv88q222x_write_mmd_vals(phydev, mv88q222x_revb1_revb2_init_seq1,
+ ARRAY_SIZE(mv88q222x_revb1_revb2_init_seq1));
+ if (ret < 0)
+ return ret;
return mv88q2xxx_config_init(phydev);
}
+static int mv88q222x_config_init(struct phy_device *phydev)
+{
+ if (phydev->c45_ids.device_ids[MDIO_MMD_PMAPMD] == PHY_ID_88Q2220_REVB0)
+ return mv88q222x_revb0_config_init(phydev);
+ else
+ return mv88q222x_revb1_revb2_config_init(phydev);
+}
+
static int mv88q222x_cable_test_start(struct phy_device *phydev)
{
int ret;
@@ -810,14 +896,15 @@ static struct phy_driver mv88q2xxx_driver[] = {
.get_sqi_max = mv88q2xxx_get_sqi_max,
},
{
- PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0),
+ .phy_id = MARVELL_PHY_ID_88Q2220,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88q2220",
.flags = PHY_POLL_CABLE_TEST,
.probe = mv88q2xxx_probe,
.get_features = mv88q2xxx_get_features,
.config_aneg = mv88q2xxx_config_aneg,
.aneg_done = genphy_c45_aneg_done,
- .config_init = mv88q222x_revb0_config_init,
+ .config_init = mv88q222x_config_init,
.read_status = mv88q2xxx_read_status,
.soft_reset = mv88q222x_soft_reset,
.config_intr = mv88q2xxx_config_intr,
@@ -836,7 +923,7 @@ module_phy_driver(mv88q2xxx_driver);
static struct mdio_device_id __maybe_unused mv88q2xxx_tbl[] = {
{ MARVELL_PHY_ID_88Q2110, MARVELL_PHY_ID_MASK },
- { PHY_ID_MATCH_EXACT(PHY_ID_88Q2220_REVB0), },
+ { MARVELL_PHY_ID_88Q2220, MARVELL_PHY_ID_MASK },
{ /*sentinel*/ }
};
MODULE_DEVICE_TABLE(mdio, mv88q2xxx_tbl);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 42ed013385bf..b89fbffa6a93 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -279,10 +279,29 @@
#define MII_VCT7_CTRL_METERS BIT(10)
#define MII_VCT7_CTRL_CENTIMETERS 0
+#define MII_VCT_TXPINS 0x1A
+#define MII_VCT_RXPINS 0x1B
+#define MII_VCT_SR 0x1C
+#define MII_VCT_TXPINS_ENVCT BIT(15)
+#define MII_VCT_TXRXPINS_VCTTST GENMASK(14, 13)
+#define MII_VCT_TXRXPINS_VCTTST_SHIFT 13
+#define MII_VCT_TXRXPINS_VCTTST_OK 0
+#define MII_VCT_TXRXPINS_VCTTST_SHORT 1
+#define MII_VCT_TXRXPINS_VCTTST_OPEN 2
+#define MII_VCT_TXRXPINS_VCTTST_FAIL 3
+#define MII_VCT_TXRXPINS_AMPRFLN GENMASK(12, 8)
+#define MII_VCT_TXRXPINS_AMPRFLN_SHIFT 8
+#define MII_VCT_TXRXPINS_DISTRFLN GENMASK(7, 0)
+#define MII_VCT_TXRXPINS_DISTRFLN_MAX 0xff
+
+#define M88E3082_PAIR_A BIT(0)
+#define M88E3082_PAIR_B BIT(1)
+
#define LPA_PAUSE_FIBER 0x180
#define LPA_PAUSE_ASYM_FIBER 0x100
#define NB_FIBER_STATS 1
+#define NB_STAT_MAX 3
MODULE_DESCRIPTION("Marvell PHY driver");
MODULE_AUTHOR("Andy Fleming");
@@ -295,14 +314,37 @@ struct marvell_hw_stat {
u8 bits;
};
-static struct marvell_hw_stat marvell_hw_stats[] = {
+static const struct marvell_hw_stat marvell_hw_stats[] = {
{ "phy_receive_errors_copper", 0, 21, 16},
{ "phy_idle_errors", 0, 10, 8 },
{ "phy_receive_errors_fiber", 1, 21, 16},
};
+static_assert(ARRAY_SIZE(marvell_hw_stats) <= NB_STAT_MAX);
+
+/* "simple" stat list + corresponding marvell_get_*_simple functions are used
+ * on PHYs without a page register
+ */
+struct marvell_hw_stat_simple {
+ const char *string;
+ u8 reg;
+ u8 bits;
+};
+
+static const struct marvell_hw_stat_simple marvell_hw_stats_simple[] = {
+ { "phy_receive_errors", 21, 16},
+};
+
+static_assert(ARRAY_SIZE(marvell_hw_stats_simple) <= NB_STAT_MAX);
+
+enum {
+ M88E3082_VCT_OFF,
+ M88E3082_VCT_PHASE1,
+ M88E3082_VCT_PHASE2,
+};
+
struct marvell_priv {
- u64 stats[ARRAY_SIZE(marvell_hw_stats)];
+ u64 stats[NB_STAT_MAX];
char *hwmon_name;
struct device *hwmon_dev;
bool cable_test_tdr;
@@ -310,6 +352,7 @@ struct marvell_priv {
u32 last;
u32 step;
s8 pair;
+ u8 vct_phase;
};
static int marvell_read_page(struct phy_device *phydev)
@@ -1953,6 +1996,11 @@ static int marvell_get_sset_count(struct phy_device *phydev)
return ARRAY_SIZE(marvell_hw_stats) - NB_FIBER_STATS;
}
+static int marvell_get_sset_count_simple(struct phy_device *phydev)
+{
+ return ARRAY_SIZE(marvell_hw_stats_simple);
+}
+
static void marvell_get_strings(struct phy_device *phydev, u8 *data)
{
int count = marvell_get_sset_count(phydev);
@@ -1964,6 +2012,17 @@ static void marvell_get_strings(struct phy_device *phydev, u8 *data)
}
}
+static void marvell_get_strings_simple(struct phy_device *phydev, u8 *data)
+{
+ int count = marvell_get_sset_count_simple(phydev);
+ int i;
+
+ for (i = 0; i < count; i++) {
+ strscpy(data + i * ETH_GSTRING_LEN,
+ marvell_hw_stats_simple[i].string, ETH_GSTRING_LEN);
+ }
+}
+
static u64 marvell_get_stat(struct phy_device *phydev, int i)
{
struct marvell_hw_stat stat = marvell_hw_stats[i];
@@ -1983,6 +2042,25 @@ static u64 marvell_get_stat(struct phy_device *phydev, int i)
return ret;
}
+static u64 marvell_get_stat_simple(struct phy_device *phydev, int i)
+{
+ struct marvell_hw_stat_simple stat = marvell_hw_stats_simple[i];
+ struct marvell_priv *priv = phydev->priv;
+ int val;
+ u64 ret;
+
+ val = phy_read(phydev, stat.reg);
+ if (val < 0) {
+ ret = U64_MAX;
+ } else {
+ val = val & ((1 << stat.bits) - 1);
+ priv->stats[i] += val;
+ ret = priv->stats[i];
+ }
+
+ return ret;
+}
+
static void marvell_get_stats(struct phy_device *phydev,
struct ethtool_stats *stats, u64 *data)
{
@@ -1993,6 +2071,16 @@ static void marvell_get_stats(struct phy_device *phydev,
data[i] = marvell_get_stat(phydev, i);
}
+static void marvell_get_stats_simple(struct phy_device *phydev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int count = marvell_get_sset_count_simple(phydev);
+ int i;
+
+ for (i = 0; i < count; i++)
+ data[i] = marvell_get_stat_simple(phydev, i);
+}
+
static int m88e1510_loopback(struct phy_device *phydev, bool enable)
{
int err;
@@ -2417,6 +2505,274 @@ static int marvell_vct7_cable_test_get_status(struct phy_device *phydev,
return 0;
}
+static int m88e3082_vct_cable_test_start(struct phy_device *phydev)
+{
+ struct marvell_priv *priv = phydev->priv;
+ int ret;
+
+ /* It needs some magic workarounds described in VCT manual for this PHY.
+ */
+ ret = phy_write(phydev, 29, 0x0003);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x6440);
+ if (ret < 0)
+ return ret;
+
+ if (priv->vct_phase == M88E3082_VCT_PHASE1) {
+ ret = phy_write(phydev, 29, 0x000a);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x0002);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = phy_write(phydev, MII_BMCR,
+ BMCR_RESET | BMCR_SPEED100 | BMCR_FULLDPLX);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, MII_VCT_TXPINS, MII_VCT_TXPINS_ENVCT);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 29, 0x0003);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x0);
+ if (ret < 0)
+ return ret;
+
+ if (priv->vct_phase == M88E3082_VCT_OFF) {
+ priv->vct_phase = M88E3082_VCT_PHASE1;
+ priv->pair = 0;
+
+ return 0;
+ }
+
+ ret = phy_write(phydev, 29, 0x000a);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x0);
+ if (ret < 0)
+ return ret;
+
+ priv->vct_phase = M88E3082_VCT_PHASE2;
+
+ return 0;
+}
+
+static int m88e3082_vct_cable_test_report_trans(int result, u8 distance)
+{
+ switch (result) {
+ case MII_VCT_TXRXPINS_VCTTST_OK:
+ if (distance == MII_VCT_TXRXPINS_DISTRFLN_MAX)
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ return ETHTOOL_A_CABLE_RESULT_CODE_IMPEDANCE_MISMATCH;
+ case MII_VCT_TXRXPINS_VCTTST_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case MII_VCT_TXRXPINS_VCTTST_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static u32 m88e3082_vct_distrfln_2_cm(u8 distrfln)
+{
+ if (distrfln < 24)
+ return 0;
+
+ /* Original function for meters: y = 0.7861x - 18.862 */
+ return (7861 * distrfln - 188620) / 100;
+}
+
+static int m88e3082_vct_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ u8 tx_vcttst_res, rx_vcttst_res, tx_distrfln, rx_distrfln;
+ struct marvell_priv *priv = phydev->priv;
+ int ret, tx_result, rx_result;
+ bool done_phase = true;
+
+ *finished = false;
+
+ ret = phy_read(phydev, MII_VCT_TXPINS);
+ if (ret < 0)
+ return ret;
+ else if (ret & MII_VCT_TXPINS_ENVCT)
+ return 0;
+
+ tx_distrfln = ret & MII_VCT_TXRXPINS_DISTRFLN;
+ tx_vcttst_res = (ret & MII_VCT_TXRXPINS_VCTTST) >>
+ MII_VCT_TXRXPINS_VCTTST_SHIFT;
+
+ ret = phy_read(phydev, MII_VCT_RXPINS);
+ if (ret < 0)
+ return ret;
+
+ rx_distrfln = ret & MII_VCT_TXRXPINS_DISTRFLN;
+ rx_vcttst_res = (ret & MII_VCT_TXRXPINS_VCTTST) >>
+ MII_VCT_TXRXPINS_VCTTST_SHIFT;
+
+ *finished = true;
+
+ switch (priv->vct_phase) {
+ case M88E3082_VCT_PHASE1:
+ tx_result = m88e3082_vct_cable_test_report_trans(tx_vcttst_res,
+ tx_distrfln);
+ rx_result = m88e3082_vct_cable_test_report_trans(rx_vcttst_res,
+ rx_distrfln);
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ tx_result);
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
+ rx_result);
+
+ if (tx_vcttst_res == MII_VCT_TXRXPINS_VCTTST_OPEN) {
+ done_phase = false;
+ priv->pair |= M88E3082_PAIR_A;
+ } else if (tx_distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) {
+ u8 pair = ETHTOOL_A_CABLE_PAIR_A;
+ u32 cm = m88e3082_vct_distrfln_2_cm(tx_distrfln);
+
+ ethnl_cable_test_fault_length(phydev, pair, cm);
+ }
+
+ if (rx_vcttst_res == MII_VCT_TXRXPINS_VCTTST_OPEN) {
+ done_phase = false;
+ priv->pair |= M88E3082_PAIR_B;
+ } else if (rx_distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) {
+ u8 pair = ETHTOOL_A_CABLE_PAIR_B;
+ u32 cm = m88e3082_vct_distrfln_2_cm(rx_distrfln);
+
+ ethnl_cable_test_fault_length(phydev, pair, cm);
+ }
+
+ break;
+ case M88E3082_VCT_PHASE2:
+ if (priv->pair & M88E3082_PAIR_A &&
+ tx_vcttst_res == MII_VCT_TXRXPINS_VCTTST_OPEN &&
+ tx_distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) {
+ u8 pair = ETHTOOL_A_CABLE_PAIR_A;
+ u32 cm = m88e3082_vct_distrfln_2_cm(tx_distrfln);
+
+ ethnl_cable_test_fault_length(phydev, pair, cm);
+ }
+ if (priv->pair & M88E3082_PAIR_B &&
+ rx_vcttst_res == MII_VCT_TXRXPINS_VCTTST_OPEN &&
+ rx_distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) {
+ u8 pair = ETHTOOL_A_CABLE_PAIR_B;
+ u32 cm = m88e3082_vct_distrfln_2_cm(rx_distrfln);
+
+ ethnl_cable_test_fault_length(phydev, pair, cm);
+ }
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!done_phase) {
+ *finished = false;
+ return m88e3082_vct_cable_test_start(phydev);
+ }
+ if (*finished)
+ priv->vct_phase = M88E3082_VCT_OFF;
+ return 0;
+}
+
+static int m88e1111_vct_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = marvell_cable_test_start_common(phydev);
+ if (ret)
+ return ret;
+
+ /* It needs some magic workarounds described in VCT manual for this PHY.
+ */
+ ret = phy_write(phydev, 29, 0x0018);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x00c2);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x00ca);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x00c2);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_paged(phydev, MII_MARVELL_COPPER_PAGE, MII_VCT_SR,
+ MII_VCT_TXPINS_ENVCT);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 29, 0x0018);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write(phydev, 30, 0x0042);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static u32 m88e1111_vct_distrfln_2_cm(u8 distrfln)
+{
+ if (distrfln < 36)
+ return 0;
+
+ /* Original function for meters: y = 0.8018x - 28.751 */
+ return (8018 * distrfln - 287510) / 100;
+}
+
+static int m88e1111_vct_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ u8 vcttst_res, distrfln;
+ int ret, result;
+
+ *finished = false;
+
+ /* Each pair use one page: A-0, B-1, C-2, D-3 */
+ for (u8 i = 0; i < 4; i++) {
+ ret = phy_read_paged(phydev, i, MII_VCT_SR);
+ if (ret < 0)
+ return ret;
+ else if (i == 0 && ret & MII_VCT_TXPINS_ENVCT)
+ return 0;
+
+ distrfln = ret & MII_VCT_TXRXPINS_DISTRFLN;
+ vcttst_res = (ret & MII_VCT_TXRXPINS_VCTTST) >>
+ MII_VCT_TXRXPINS_VCTTST_SHIFT;
+
+ result = m88e3082_vct_cable_test_report_trans(vcttst_res,
+ distrfln);
+ ethnl_cable_test_result(phydev, i, result);
+
+ if (distrfln < MII_VCT_TXRXPINS_DISTRFLN_MAX) {
+ u32 cm = m88e1111_vct_distrfln_2_cm(distrfln);
+
+ ethnl_cable_test_fault_length(phydev, i, cm);
+ }
+ }
+
+ *finished = true;
+ return 0;
+}
+
#ifdef CONFIG_HWMON
struct marvell_hwmon_ops {
int (*config)(struct phy_device *phydev);
@@ -3290,6 +3646,20 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
},
{
+ .phy_id = MARVELL_PHY_ID_88E3082,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E308X/88E609X Family",
+ /* PHY_BASIC_FEATURES */
+ .probe = marvell_probe,
+ .config_init = marvell_config_init,
+ .aneg_done = marvell_aneg_done,
+ .read_status = marvell_read_status,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .cable_test_start = m88e3082_vct_cable_test_start,
+ .cable_test_get_status = m88e3082_vct_cable_test_get_status,
+ },
+ {
.phy_id = MARVELL_PHY_ID_88E1112,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1112",
@@ -3314,6 +3684,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1111",
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.probe = marvell_probe,
.config_init = m88e1111gbe_config_init,
.config_aneg = m88e1111_config_aneg,
@@ -3329,6 +3700,8 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1111_get_tunable,
.set_tunable = m88e1111_set_tunable,
+ .cable_test_start = m88e1111_vct_cable_test_start,
+ .cable_test_get_status = m88e1111_vct_cable_test_get_status,
},
{
.phy_id = MARVELL_PHY_ID_88E1111_FINISAR,
@@ -3422,6 +3795,7 @@ static struct phy_driver marvell_drivers[] = {
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1145",
/* PHY_GBIT_FEATURES */
+ .flags = PHY_POLL_CABLE_TEST,
.probe = marvell_probe,
.config_init = m88e1145_config_init,
.config_aneg = m88e1101_config_aneg,
@@ -3436,6 +3810,8 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
.get_tunable = m88e1111_get_tunable,
.set_tunable = m88e1111_set_tunable,
+ .cable_test_start = m88e1111_vct_cable_test_start,
+ .cable_test_get_status = m88e1111_vct_cable_test_get_status,
},
{
.phy_id = MARVELL_PHY_ID_88E1149R,
@@ -3610,6 +3986,21 @@ static struct phy_driver marvell_drivers[] = {
.get_stats = marvell_get_stats,
},
{
+ .phy_id = MARVELL_PHY_ID_88E6250_FAMILY,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .name = "Marvell 88E6250 Family",
+ /* PHY_BASIC_FEATURES */
+ .probe = marvell_probe,
+ .aneg_done = marvell_aneg_done,
+ .config_intr = marvell_config_intr,
+ .handle_interrupt = marvell_handle_interrupt,
+ .resume = genphy_resume,
+ .suspend = genphy_suspend,
+ .get_sset_count = marvell_get_sset_count_simple,
+ .get_strings = marvell_get_strings_simple,
+ .get_stats = marvell_get_stats_simple,
+ },
+ {
.phy_id = MARVELL_PHY_ID_88E6341_FAMILY,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E6341 Family",
@@ -3742,6 +4133,7 @@ module_phy_driver(marvell_drivers);
static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1101, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E3082, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1112, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1111, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1111_FINISAR, MARVELL_PHY_ID_MASK },
@@ -3756,6 +4148,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E1545, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
+ { MARVELL_PHY_ID_88E6250_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E6341_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E6390_FAMILY, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E6393_FAMILY, MARVELL_PHY_ID_MASK },
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 8b9ead76e40e..7e2f10182c0c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -1375,9 +1375,9 @@ EXPORT_SYMBOL_GPL(mdiobus_c45_modify_changed);
* require calling the devices own match function, since different classes
* of MDIO devices have different match criteria.
*/
-static int mdio_bus_match(struct device *dev, struct device_driver *drv)
+static int mdio_bus_match(struct device *dev, const struct device_driver *drv)
{
- struct mdio_driver *mdiodrv = to_mdio_driver(drv);
+ const struct mdio_driver *mdiodrv = to_mdio_driver(drv);
struct mdio_device *mdio = to_mdio_device(dev);
/* Both the driver and device must type-match */
diff --git a/drivers/net/phy/mdio_device.c b/drivers/net/phy/mdio_device.c
index 73f6539b9e50..e747ee63c665 100644
--- a/drivers/net/phy/mdio_device.c
+++ b/drivers/net/phy/mdio_device.c
@@ -35,10 +35,10 @@ static void mdio_device_release(struct device *dev)
kfree(to_mdio_device(dev));
}
-int mdio_device_bus_match(struct device *dev, struct device_driver *drv)
+int mdio_device_bus_match(struct device *dev, const struct device_driver *drv)
{
struct mdio_device *mdiodev = to_mdio_device(dev);
- struct mdio_driver *mdiodrv = to_mdio_driver(drv);
+ const struct mdio_driver *mdiodrv = to_mdio_driver(drv);
if (mdiodrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY)
return 0;
diff --git a/drivers/net/phy/mediatek-ge.c b/drivers/net/phy/mediatek-ge.c
index a493ae01b267..54ea64a37ab3 100644
--- a/drivers/net/phy/mediatek-ge.c
+++ b/drivers/net/phy/mediatek-ge.c
@@ -23,9 +23,6 @@ static int mtk_gephy_write_page(struct phy_device *phydev, int page)
static void mtk_gephy_config_init(struct phy_device *phydev)
{
- /* Disable EEE */
- phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, 0);
-
/* Enable HW auto downshift */
phy_modify_paged(phydev, MTK_PHY_PAGE_EXTENDED, 0x14, 0, BIT(4));
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index ddb50a0e2bc8..65b0a3115e14 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -167,6 +167,9 @@
#define PTP_CMD_CTL_PTP_LTC_STEP_SEC_ BIT(5)
#define PTP_CMD_CTL_PTP_LTC_STEP_NSEC_ BIT(6)
+#define PTP_COMMON_INT_ENA 0x0204
+#define PTP_COMMON_INT_ENA_GPIO_CAP_EN BIT(2)
+
#define PTP_CLOCK_SET_SEC_HI 0x0205
#define PTP_CLOCK_SET_SEC_MID 0x0206
#define PTP_CLOCK_SET_SEC_LO 0x0207
@@ -179,6 +182,27 @@
#define PTP_CLOCK_READ_NS_HI 0x022C
#define PTP_CLOCK_READ_NS_LO 0x022D
+#define PTP_GPIO_SEL 0x0230
+#define PTP_GPIO_SEL_GPIO_SEL(pin) ((pin) << 8)
+#define PTP_GPIO_CAP_MAP_LO 0x0232
+
+#define PTP_GPIO_CAP_EN 0x0233
+#define PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(gpio) BIT(gpio)
+#define PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(gpio) (BIT(gpio) << 8)
+
+#define PTP_GPIO_RE_LTC_SEC_HI_CAP 0x0235
+#define PTP_GPIO_RE_LTC_SEC_LO_CAP 0x0236
+#define PTP_GPIO_RE_LTC_NS_HI_CAP 0x0237
+#define PTP_GPIO_RE_LTC_NS_LO_CAP 0x0238
+#define PTP_GPIO_FE_LTC_SEC_HI_CAP 0x0239
+#define PTP_GPIO_FE_LTC_SEC_LO_CAP 0x023A
+#define PTP_GPIO_FE_LTC_NS_HI_CAP 0x023B
+#define PTP_GPIO_FE_LTC_NS_LO_CAP 0x023C
+
+#define PTP_GPIO_CAP_STS 0x023D
+#define PTP_GPIO_CAP_STS_PTP_GPIO_RE_STS(gpio) BIT(gpio)
+#define PTP_GPIO_CAP_STS_PTP_GPIO_FE_STS(gpio) (BIT(gpio) << 8)
+
#define PTP_OPERATING_MODE 0x0241
#define PTP_OPERATING_MODE_STANDALONE_ BIT(0)
@@ -272,6 +296,67 @@
#define PS_TO_REG 200
#define FIFO_SIZE 8
+#define LAN8814_PTP_GPIO_NUM 24
+#define LAN8814_PTP_PEROUT_NUM 2
+#define LAN8814_PTP_EXTTS_NUM 3
+
+#define LAN8814_BUFFER_TIME 2
+
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_200MS 13
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100MS 12
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50MS 11
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10MS 10
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5MS 9
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1MS 8
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500US 7
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100US 6
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50US 5
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10US 4
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5US 3
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1US 2
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500NS 1
+#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100NS 0
+
+#define LAN8814_GPIO_EN1 0x20
+#define LAN8814_GPIO_EN2 0x21
+#define LAN8814_GPIO_DIR1 0x22
+#define LAN8814_GPIO_DIR2 0x23
+#define LAN8814_GPIO_BUF1 0x24
+#define LAN8814_GPIO_BUF2 0x25
+
+#define LAN8814_GPIO_EN_ADDR(pin) \
+ ((pin) > 15 ? LAN8814_GPIO_EN1 : LAN8814_GPIO_EN2)
+#define LAN8814_GPIO_EN_BIT(pin) BIT(pin)
+#define LAN8814_GPIO_DIR_ADDR(pin) \
+ ((pin) > 15 ? LAN8814_GPIO_DIR1 : LAN8814_GPIO_DIR2)
+#define LAN8814_GPIO_DIR_BIT(pin) BIT(pin)
+#define LAN8814_GPIO_BUF_ADDR(pin) \
+ ((pin) > 15 ? LAN8814_GPIO_BUF1 : LAN8814_GPIO_BUF2)
+#define LAN8814_GPIO_BUF_BIT(pin) BIT(pin)
+
+#define LAN8814_EVENT_A 0
+#define LAN8814_EVENT_B 1
+
+#define LAN8814_PTP_GENERAL_CONFIG 0x0201
+#define LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event) \
+ ((event) ? GENMASK(11, 8) : GENMASK(7, 4))
+#define LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, value) \
+ (((value) & GENMASK(3, 0)) << (4 + ((event) << 2)))
+#define LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event) \
+ ((event) ? BIT(2) : BIT(0))
+#define LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event) \
+ ((event) ? BIT(3) : BIT(1))
+
+#define LAN8814_PTP_CLOCK_TARGET_SEC_HI(event) ((event) ? 0x21F : 0x215)
+#define LAN8814_PTP_CLOCK_TARGET_SEC_LO(event) ((event) ? 0x220 : 0x216)
+#define LAN8814_PTP_CLOCK_TARGET_NS_HI(event) ((event) ? 0x221 : 0x217)
+#define LAN8814_PTP_CLOCK_TARGET_NS_LO(event) ((event) ? 0x222 : 0x218)
+
+#define LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_HI(event) ((event) ? 0x223 : 0x219)
+#define LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_LO(event) ((event) ? 0x224 : 0x21A)
+#define LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_HI(event) ((event) ? 0x225 : 0x21B)
+#define LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_LO(event) ((event) ? 0x226 : 0x21C)
+
/* Delay used to get the second part from the LTC */
#define LAN8841_GET_SEC_LTC_DELAY (500 * NSEC_PER_MSEC)
@@ -304,13 +389,9 @@ struct lan8814_shared_priv {
struct phy_device *phydev;
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
+ struct ptp_pin_desc *pin_config;
- /* Reference counter to how many ports in the package are enabling the
- * timestamping
- */
- u8 ref;
-
- /* Lock for ptp_clock and ref */
+ /* Lock for ptp_clock */
struct mutex shared_lock;
};
@@ -785,6 +866,17 @@ static int ksz8061_config_init(struct phy_device *phydev)
{
int ret;
+ /* Chip can be powered down by the bootstrap code. */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ if (ret & BMCR_PDOWN) {
+ ret = phy_write(phydev, MII_BMCR, ret & ~BMCR_PDOWN);
+ if (ret < 0)
+ return ret;
+ usleep_range(1000, 2000);
+ }
+
ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
if (ret)
return ret;
@@ -1297,6 +1389,8 @@ static int ksz9131_config_init(struct phy_device *phydev)
const struct device *dev_walker;
int ret;
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+
dev_walker = &phydev->mdio.dev;
do {
of_node = dev_walker->of_node;
@@ -1346,28 +1440,30 @@ static int ksz9131_config_init(struct phy_device *phydev)
#define MII_KSZ9131_AUTO_MDIX 0x1C
#define MII_KSZ9131_AUTO_MDI_SET BIT(7)
#define MII_KSZ9131_AUTO_MDIX_SWAP_OFF BIT(6)
+#define MII_KSZ9131_DIG_AXAN_STS 0x14
+#define MII_KSZ9131_DIG_AXAN_STS_LINK_DET BIT(14)
+#define MII_KSZ9131_DIG_AXAN_STS_A_SELECT BIT(12)
static int ksz9131_mdix_update(struct phy_device *phydev)
{
int ret;
- ret = phy_read(phydev, MII_KSZ9131_AUTO_MDIX);
- if (ret < 0)
- return ret;
-
- if (ret & MII_KSZ9131_AUTO_MDIX_SWAP_OFF) {
- if (ret & MII_KSZ9131_AUTO_MDI_SET)
- phydev->mdix_ctrl = ETH_TP_MDI;
- else
- phydev->mdix_ctrl = ETH_TP_MDI_X;
+ if (phydev->mdix_ctrl != ETH_TP_MDI_AUTO) {
+ phydev->mdix = phydev->mdix_ctrl;
} else {
- phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
- }
+ ret = phy_read(phydev, MII_KSZ9131_DIG_AXAN_STS);
+ if (ret < 0)
+ return ret;
- if (ret & MII_KSZ9131_AUTO_MDI_SET)
- phydev->mdix = ETH_TP_MDI;
- else
- phydev->mdix = ETH_TP_MDI_X;
+ if (ret & MII_KSZ9131_DIG_AXAN_STS_LINK_DET) {
+ if (ret & MII_KSZ9131_DIG_AXAN_STS_A_SELECT)
+ phydev->mdix = ETH_TP_MDI;
+ else
+ phydev->mdix = ETH_TP_MDI_X;
+ } else {
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ }
+ }
return 0;
}
@@ -1858,7 +1954,7 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
{0x1c, 0x20, 0xeeee},
};
-static int ksz9477_config_init(struct phy_device *phydev)
+static int ksz9477_phy_errata(struct phy_device *phydev)
{
int err;
int i;
@@ -1886,16 +1982,30 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
+ err = genphy_restart_aneg(phydev);
+ if (err)
+ return err;
+
+ return err;
+}
+
+static int ksz9477_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ /* Only KSZ9897 family of switches needs this fix. */
+ if ((phydev->phy_id & 0xf) == 1) {
+ err = ksz9477_phy_errata(phydev);
+ if (err)
+ return err;
+ }
+
/* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
* in this switch shall be regarded as broken.
*/
if (phydev->dev_flags & MICREL_NO_EEE)
phydev->eee_broken_modes = -1;
- err = genphy_restart_aneg(phydev);
- if (err)
- return err;
-
return kszphy_config_init(phydev);
}
@@ -2004,6 +2114,71 @@ static int kszphy_resume(struct phy_device *phydev)
return 0;
}
+static int ksz9477_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* No need to initialize registers if not powered down. */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ if (!(ret & BMCR_PDOWN))
+ return 0;
+
+ genphy_resume(phydev);
+
+ /* After switching from power-down to normal mode, an internal global
+ * reset is automatically generated. Wait a minimum of 1 ms before
+ * read/write access to the PHY registers.
+ */
+ usleep_range(1000, 2000);
+
+ /* Only KSZ9897 family of switches needs this fix. */
+ if ((phydev->phy_id & 0xf) == 1) {
+ ret = ksz9477_phy_errata(phydev);
+ if (ret)
+ return ret;
+ }
+
+ /* Enable PHY Interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ if (phydev->drv->config_intr)
+ phydev->drv->config_intr(phydev);
+ }
+
+ return 0;
+}
+
+static int ksz8061_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* This function can be called twice when the Ethernet device is on. */
+ ret = phy_read(phydev, MII_BMCR);
+ if (ret < 0)
+ return ret;
+ if (!(ret & BMCR_PDOWN))
+ return 0;
+
+ genphy_resume(phydev);
+ usleep_range(1000, 2000);
+
+ /* Re-program the value after chip is reset. */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+ if (ret)
+ return ret;
+
+ /* Enable PHY Interrupts */
+ if (phy_interrupt_is_valid(phydev)) {
+ phydev->interrupts = PHY_INTERRUPT_ENABLED;
+ if (phydev->drv->config_intr)
+ phydev->drv->config_intr(phydev);
+ }
+
+ return 0;
+}
+
static int kszphy_probe(struct phy_device *phydev)
{
const struct kszphy_type *type = phydev->drv->driver_data;
@@ -2381,7 +2556,7 @@ static void lan8814_ptp_tx_ts_get(struct phy_device *phydev,
*seq_id = lanphy_read_page_reg(phydev, 5, PTP_TX_MSG_HEADER2);
}
-static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct ethtool_ts_info *info)
+static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct kernel_ethtool_ts_info *info)
{
struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
struct phy_device *phydev = ptp_priv->phydev;
@@ -2426,8 +2601,6 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
{
struct kszphy_ptp_priv *ptp_priv =
container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
- struct phy_device *phydev = ptp_priv->phydev;
- struct lan8814_shared_priv *shared = phydev->shared->priv;
struct lan8814_ptp_rx_ts *rx_ts, *tmp;
int txcfg = 0, rxcfg = 0;
int pkt_ts_enable;
@@ -2492,20 +2665,6 @@ static int lan8814_hwtstamp(struct mii_timestamper *mii_ts,
else
lan8814_config_ts_intr(ptp_priv->phydev, false);
- mutex_lock(&shared->shared_lock);
- if (config->rx_filter != HWTSTAMP_FILTER_NONE)
- shared->ref++;
- else
- shared->ref--;
-
- if (shared->ref)
- lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL,
- PTP_CMD_CTL_PTP_ENABLE_);
- else
- lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL,
- PTP_CMD_CTL_PTP_DISABLE_);
- mutex_unlock(&shared->shared_lock);
-
/* In case of multiple starts and stops, these needs to be cleared */
list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) {
list_del(&rx_ts->list);
@@ -2677,6 +2836,29 @@ static int lan8814_ptpci_settime64(struct ptp_clock_info *ptpci,
return 0;
}
+static void lan8814_ptp_set_target(struct phy_device *phydev, int event,
+ s64 start_sec, u32 start_nsec)
+{
+ /* Set the start time */
+ lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_LO(event),
+ lower_16_bits(start_sec));
+ lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_HI(event),
+ upper_16_bits(start_sec));
+
+ lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_LO(event),
+ lower_16_bits(start_nsec));
+ lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_HI(event),
+ upper_16_bits(start_nsec) & 0x3fff);
+}
+
+static void lan8814_ptp_update_target(struct phy_device *phydev, time64_t sec)
+{
+ lan8814_ptp_set_target(phydev, LAN8814_EVENT_A,
+ sec + LAN8814_BUFFER_TIME, 0);
+ lan8814_ptp_set_target(phydev, LAN8814_EVENT_B,
+ sec + LAN8814_BUFFER_TIME, 0);
+}
+
static void lan8814_ptp_clock_step(struct phy_device *phydev,
s64 time_step_ns)
{
@@ -2698,6 +2880,7 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
nano_seconds -= 1000000000;
}
lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
+ lan8814_ptp_update_target(phydev, set_seconds);
return;
} else if (time_step_ns < -15000000000LL) {
/* convert to clock set */
@@ -2713,6 +2896,7 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
}
nano_seconds -= nano_seconds_step;
lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
+ lan8814_ptp_update_target(phydev, set_seconds);
return;
}
@@ -2749,6 +2933,8 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
}
while (seconds) {
+ u32 nsec;
+
if (seconds > 0) {
u32 adjustment_value = (u32)seconds;
u16 adjustment_value_lo, adjustment_value_hi;
@@ -2765,6 +2951,10 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
PTP_LTC_STEP_ADJ_DIR_ |
adjustment_value_hi);
seconds -= ((s32)adjustment_value);
+
+ lan8814_ptp_clock_get(phydev, &set_seconds, &nsec);
+ set_seconds -= adjustment_value;
+ lan8814_ptp_update_target(phydev, set_seconds);
} else {
u32 adjustment_value = (u32)(-seconds);
u16 adjustment_value_lo, adjustment_value_hi;
@@ -2780,6 +2970,10 @@ static void lan8814_ptp_clock_step(struct phy_device *phydev,
lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
adjustment_value_hi);
seconds += ((s32)adjustment_value);
+
+ lan8814_ptp_clock_get(phydev, &set_seconds, &nsec);
+ set_seconds += adjustment_value;
+ lan8814_ptp_update_target(phydev, set_seconds);
}
lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
@@ -2845,6 +3039,335 @@ static int lan8814_ptpci_adjfine(struct ptp_clock_info *ptpci, long scaled_ppm)
return 0;
}
+static void lan8814_ptp_set_reload(struct phy_device *phydev, int event,
+ s64 period_sec, u32 period_nsec)
+{
+ lanphy_write_page_reg(phydev, 4,
+ LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_LO(event),
+ lower_16_bits(period_sec));
+ lanphy_write_page_reg(phydev, 4,
+ LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_HI(event),
+ upper_16_bits(period_sec));
+
+ lanphy_write_page_reg(phydev, 4,
+ LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_LO(event),
+ lower_16_bits(period_nsec));
+ lanphy_write_page_reg(phydev, 4,
+ LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_HI(event),
+ upper_16_bits(period_nsec) & 0x3fff);
+}
+
+static void lan8814_ptp_enable_event(struct phy_device *phydev, int event,
+ int pulse_width)
+{
+ u16 val;
+
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
+ /* Set the pulse width of the event */
+ val &= ~(LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event));
+ /* Make sure that the target clock will be incremented each time when
+ * local time reaches or pass it
+ */
+ val |= LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width);
+ val &= ~(LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event));
+ /* Set the polarity high */
+ val |= LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event);
+ lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
+}
+
+static void lan8814_ptp_disable_event(struct phy_device *phydev, int event)
+{
+ u16 val;
+
+ /* Set target to too far in the future, effectively disabling it */
+ lan8814_ptp_set_target(phydev, event, 0xFFFFFFFF, 0);
+
+ /* And then reload once it recheas the target */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
+ val |= LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event);
+ lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
+}
+
+static void lan8814_ptp_perout_off(struct phy_device *phydev, int pin)
+{
+ u16 val;
+
+ /* Disable gpio alternate function,
+ * 1: select as gpio,
+ * 0: select alt func
+ */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
+ val |= LAN8814_GPIO_EN_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
+
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
+ val &= ~LAN8814_GPIO_DIR_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
+
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
+ val &= ~LAN8814_GPIO_BUF_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
+}
+
+static void lan8814_ptp_perout_on(struct phy_device *phydev, int pin)
+{
+ int val;
+
+ /* Set as gpio output */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
+ val |= LAN8814_GPIO_DIR_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
+
+ /* Enable gpio 0:for alternate function, 1:gpio */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
+ val &= ~LAN8814_GPIO_EN_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
+
+ /* Set buffer type to push pull */
+ val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
+ val |= LAN8814_GPIO_BUF_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
+}
+
+static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+ struct timespec64 ts_on, ts_period;
+ s64 on_nsec, period_nsec;
+ int pulse_width;
+ int pin, event;
+
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags & ~PTP_PEROUT_DUTY_CYCLE)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&shared->shared_lock);
+ event = rq->perout.index;
+ pin = ptp_find_pin(shared->ptp_clock, PTP_PF_PEROUT, event);
+ if (pin < 0 || pin >= LAN8814_PTP_PEROUT_NUM) {
+ mutex_unlock(&shared->shared_lock);
+ return -EBUSY;
+ }
+
+ if (!on) {
+ lan8814_ptp_perout_off(phydev, pin);
+ lan8814_ptp_disable_event(phydev, event);
+ mutex_unlock(&shared->shared_lock);
+ return 0;
+ }
+
+ ts_on.tv_sec = rq->perout.on.sec;
+ ts_on.tv_nsec = rq->perout.on.nsec;
+ on_nsec = timespec64_to_ns(&ts_on);
+
+ ts_period.tv_sec = rq->perout.period.sec;
+ ts_period.tv_nsec = rq->perout.period.nsec;
+ period_nsec = timespec64_to_ns(&ts_period);
+
+ if (period_nsec < 200) {
+ pr_warn_ratelimited("%s: perout period too small, minimum is 200 nsec\n",
+ phydev_name(phydev));
+ mutex_unlock(&shared->shared_lock);
+ return -EOPNOTSUPP;
+ }
+
+ if (on_nsec >= period_nsec) {
+ pr_warn_ratelimited("%s: pulse width must be smaller than period\n",
+ phydev_name(phydev));
+ mutex_unlock(&shared->shared_lock);
+ return -EINVAL;
+ }
+
+ switch (on_nsec) {
+ case 200000000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_200MS;
+ break;
+ case 100000000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100MS;
+ break;
+ case 50000000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50MS;
+ break;
+ case 10000000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10MS;
+ break;
+ case 5000000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5MS;
+ break;
+ case 1000000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1MS;
+ break;
+ case 500000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500US;
+ break;
+ case 100000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100US;
+ break;
+ case 50000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50US;
+ break;
+ case 10000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10US;
+ break;
+ case 5000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5US;
+ break;
+ case 1000:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1US;
+ break;
+ case 500:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500NS;
+ break;
+ case 100:
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100NS;
+ break;
+ default:
+ pr_warn_ratelimited("%s: Use default duty cycle of 100ns\n",
+ phydev_name(phydev));
+ pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100NS;
+ break;
+ }
+
+ /* Configure to pulse every period */
+ lan8814_ptp_enable_event(phydev, event, pulse_width);
+ lan8814_ptp_set_target(phydev, event, rq->perout.start.sec,
+ rq->perout.start.nsec);
+ lan8814_ptp_set_reload(phydev, event, rq->perout.period.sec,
+ rq->perout.period.nsec);
+ lan8814_ptp_perout_on(phydev, pin);
+ mutex_unlock(&shared->shared_lock);
+
+ return 0;
+}
+
+static void lan8814_ptp_extts_on(struct phy_device *phydev, int pin, u32 flags)
+{
+ u16 tmp;
+
+ /* Set as gpio input */
+ tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
+ tmp &= ~LAN8814_GPIO_DIR_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
+
+ /* Map the pin to ltc pin 0 of the capture map registers */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
+ tmp |= pin;
+ lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
+
+ /* Enable capture on the edges of the ltc pin */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
+ if (flags & PTP_RISING_EDGE)
+ tmp |= PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0);
+ if (flags & PTP_FALLING_EDGE)
+ tmp |= PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0);
+ lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
+
+ /* Enable interrupt top interrupt */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
+ tmp |= PTP_COMMON_INT_ENA_GPIO_CAP_EN;
+ lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
+}
+
+static void lan8814_ptp_extts_off(struct phy_device *phydev, int pin)
+{
+ u16 tmp;
+
+ /* Set as gpio out */
+ tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
+ tmp |= LAN8814_GPIO_DIR_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
+
+ /* Enable alternate, 0:for alternate function, 1:gpio */
+ tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
+ tmp &= ~LAN8814_GPIO_EN_BIT(pin);
+ lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), tmp);
+
+ /* Clear the mapping of pin to registers 0 of the capture registers */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
+ tmp &= ~GENMASK(3, 0);
+ lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
+
+ /* Disable capture on both of the edges */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
+ tmp &= ~PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(pin);
+ tmp &= ~PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(pin);
+ lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
+
+ /* Disable interrupt top interrupt */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
+ tmp &= ~PTP_COMMON_INT_ENA_GPIO_CAP_EN;
+ lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
+}
+
+static int lan8814_ptp_extts(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *rq, int on)
+{
+ struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
+ ptp_clock_info);
+ struct phy_device *phydev = shared->phydev;
+ int pin;
+
+ if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
+ PTP_EXTTS_EDGES |
+ PTP_STRICT_FLAGS))
+ return -EOPNOTSUPP;
+
+ pin = ptp_find_pin(shared->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin == -1 || pin != LAN8814_PTP_EXTTS_NUM)
+ return -EINVAL;
+
+ mutex_lock(&shared->shared_lock);
+ if (on)
+ lan8814_ptp_extts_on(phydev, pin, rq->extts.flags);
+ else
+ lan8814_ptp_extts_off(phydev, pin);
+
+ mutex_unlock(&shared->shared_lock);
+
+ return 0;
+}
+
+static int lan8814_ptpci_enable(struct ptp_clock_info *ptpci,
+ struct ptp_clock_request *rq, int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ return lan8814_ptp_perout(ptpci, rq, on);
+ case PTP_CLK_REQ_EXTTS:
+ return lan8814_ptp_extts(ptpci, rq, on);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int lan8814_ptpci_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_PEROUT:
+ /* Only pins 0 and 1 can generate perout signals. And for pin 0
+ * there is only chan 0 (event A) and for pin 1 there is only
+ * chan 1 (event B)
+ */
+ if (pin >= LAN8814_PTP_PEROUT_NUM || pin != chan)
+ return -1;
+ break;
+ case PTP_PF_EXTTS:
+ if (pin != LAN8814_PTP_EXTTS_NUM)
+ return -1;
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
static bool lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
{
struct ptp_header *ptp_header;
@@ -3010,6 +3533,64 @@ static void lan8814_handle_ptp_interrupt(struct phy_device *phydev, u16 status)
}
}
+static int lan8814_gpio_process_cap(struct lan8814_shared_priv *shared)
+{
+ struct phy_device *phydev = shared->phydev;
+ struct ptp_clock_event ptp_event = {0};
+ unsigned long nsec;
+ s64 sec;
+ u16 tmp;
+
+ /* This is 0 because whatever was the input pin it was mapped it to
+ * ltc gpio pin 0
+ */
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_SEL);
+ tmp |= PTP_GPIO_SEL_GPIO_SEL(0);
+ lanphy_write_page_reg(phydev, 4, PTP_GPIO_SEL, tmp);
+
+ tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_STS);
+ if (!(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_RE_STS(0)) &&
+ !(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_FE_STS(0)))
+ return -1;
+
+ if (tmp & BIT(0)) {
+ sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_HI_CAP);
+ sec <<= 16;
+ sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_LO_CAP);
+
+ nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_HI_CAP) & 0x3fff;
+ nsec <<= 16;
+ nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
+ } else {
+ sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_HI_CAP);
+ sec <<= 16;
+ sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_LO_CAP);
+
+ nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_NS_HI_CAP) & 0x3fff;
+ nsec <<= 16;
+ nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
+ }
+
+ ptp_event.index = 0;
+ ptp_event.timestamp = ktime_set(sec, nsec);
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_clock_event(shared->ptp_clock, &ptp_event);
+
+ return 0;
+}
+
+static int lan8814_handle_gpio_interrupt(struct phy_device *phydev, u16 status)
+{
+ struct lan8814_shared_priv *shared = phydev->shared->priv;
+ int ret;
+
+ mutex_lock(&shared->shared_lock);
+ ret = lan8814_gpio_process_cap(shared);
+ mutex_unlock(&shared->shared_lock);
+
+ return ret;
+}
+
static int lan8804_config_init(struct phy_device *phydev)
{
int val;
@@ -3114,6 +3695,9 @@ static irqreturn_t lan8814_handle_interrupt(struct phy_device *phydev)
ret = IRQ_HANDLED;
}
+ if (!lan8814_handle_gpio_interrupt(phydev, irq_status))
+ ret = IRQ_HANDLED;
+
return ret;
}
@@ -3201,6 +3785,9 @@ static void lan8814_ptp_init(struct phy_device *phydev)
ptp_priv->mii_ts.ts_info = lan8814_ts_info;
phydev->mii_ts = &ptp_priv->mii_ts;
+
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
}
static int lan8814_ptp_probe_once(struct phy_device *phydev)
@@ -3210,19 +3797,39 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
/* Initialise shared lock for clock*/
mutex_init(&shared->shared_lock);
+ shared->pin_config = devm_kmalloc_array(&phydev->mdio.dev,
+ LAN8814_PTP_GPIO_NUM,
+ sizeof(*shared->pin_config),
+ GFP_KERNEL);
+ if (!shared->pin_config)
+ return -ENOMEM;
+
+ for (int i = 0; i < LAN8814_PTP_GPIO_NUM; i++) {
+ struct ptp_pin_desc *ptp_pin = &shared->pin_config[i];
+
+ memset(ptp_pin, 0, sizeof(*ptp_pin));
+ snprintf(ptp_pin->name,
+ sizeof(ptp_pin->name), "lan8814_ptp_pin_%02d", i);
+ ptp_pin->index = i;
+ ptp_pin->func = PTP_PF_NONE;
+ }
+
shared->ptp_clock_info.owner = THIS_MODULE;
snprintf(shared->ptp_clock_info.name, 30, "%s", phydev->drv->name);
shared->ptp_clock_info.max_adj = 31249999;
shared->ptp_clock_info.n_alarm = 0;
- shared->ptp_clock_info.n_ext_ts = 0;
- shared->ptp_clock_info.n_pins = 0;
+ shared->ptp_clock_info.n_ext_ts = LAN8814_PTP_EXTTS_NUM;
+ shared->ptp_clock_info.n_pins = LAN8814_PTP_GPIO_NUM;
shared->ptp_clock_info.pps = 0;
- shared->ptp_clock_info.pin_config = NULL;
+ shared->ptp_clock_info.pin_config = shared->pin_config;
+ shared->ptp_clock_info.n_per_out = LAN8814_PTP_PEROUT_NUM;
shared->ptp_clock_info.adjfine = lan8814_ptpci_adjfine;
shared->ptp_clock_info.adjtime = lan8814_ptpci_adjtime;
shared->ptp_clock_info.gettime64 = lan8814_ptpci_gettime64;
shared->ptp_clock_info.settime64 = lan8814_ptpci_settime64;
shared->ptp_clock_info.getcrosststamp = NULL;
+ shared->ptp_clock_info.enable = lan8814_ptpci_enable;
+ shared->ptp_clock_info.verify = lan8814_ptpci_verify;
shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info,
&phydev->mdio.dev);
@@ -3247,6 +3854,9 @@ static int lan8814_ptp_probe_once(struct phy_device *phydev)
lanphy_write_page_reg(phydev, 4, PTP_OPERATING_MODE,
PTP_OPERATING_MODE_STANDALONE_);
+ /* Enable ptp to run LTC clock for ptp and gpio 1PPS operation */
+ lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_);
+
return 0;
}
@@ -3516,7 +4126,7 @@ static int lan8841_config_intr(struct phy_device *phydev)
if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
err = phy_read(phydev, LAN8814_INTS);
- if (err)
+ if (err < 0)
return err;
/* Enable / disable interrupts. It is OK to enable PTP interrupt
@@ -3532,6 +4142,14 @@ static int lan8841_config_intr(struct phy_device *phydev)
return err;
err = phy_read(phydev, LAN8814_INTS);
+ if (err < 0)
+ return err;
+
+ /* Getting a positive value doesn't mean that is an error, it
+ * just indicates what was the status. Therefore make sure to
+ * clear the value and say that there is no error.
+ */
+ err = 0;
}
return err;
@@ -3703,7 +4321,7 @@ static irqreturn_t lan8841_handle_interrupt(struct phy_device *phydev)
}
static int lan8841_ts_info(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct kszphy_ptp_priv *ptp_priv;
@@ -4668,6 +5286,9 @@ static int lan8841_probe(struct phy_device *phydev)
phydev->mii_ts = &ptp_priv->mii_ts;
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
+
return 0;
}
@@ -4676,7 +5297,8 @@ static int lan8841_suspend(struct phy_device *phydev)
struct kszphy_priv *priv = phydev->priv;
struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
- ptp_cancel_worker_sync(ptp_priv->ptp_clock);
+ if (ptp_priv->ptp_clock)
+ ptp_cancel_worker_sync(ptp_priv->ptp_clock);
return genphy_suspend(phydev);
}
@@ -4813,10 +5435,11 @@ static struct phy_driver ksphy_driver[] = {
/* PHY_BASIC_FEATURES */
.probe = kszphy_probe,
.config_init = ksz8061_config_init,
+ .soft_reset = genphy_soft_reset,
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = kszphy_suspend,
- .resume = kszphy_resume,
+ .resume = ksz8061_resume,
}, {
.phy_id = PHY_ID_KSZ9021,
.phy_id_mask = 0x000ffffe,
@@ -4970,7 +5593,7 @@ static struct phy_driver ksphy_driver[] = {
.config_intr = kszphy_config_intr,
.handle_interrupt = kszphy_handle_interrupt,
.suspend = genphy_suspend,
- .resume = genphy_resume,
+ .resume = ksz9477_resume,
.get_features = ksz9477_get_features,
} };
@@ -4994,6 +5617,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8081, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
+ { PHY_ID_KSZ9477, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8841, MICREL_PHY_ID_MASK },
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
index 0b88635f4fbc..d3273bc0da4a 100644
--- a/drivers/net/phy/microchip.c
+++ b/drivers/net/phy/microchip.c
@@ -12,8 +12,14 @@
#include <linux/of.h>
#include <dt-bindings/net/microchip-lan78xx.h>
+#define PHY_ID_LAN937X_TX 0x0007c190
+
+#define LAN937X_MODE_CTRL_STATUS_REG 0x11
+#define LAN937X_AUTOMDIX_EN BIT(7)
+#define LAN937X_MDI_MODE BIT(6)
+
#define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
-#define DRIVER_DESC "Microchip LAN88XX PHY driver"
+#define DRIVER_DESC "Microchip LAN88XX/LAN937X TX PHY driver"
struct lan88xx_priv {
int chip_id;
@@ -373,6 +379,115 @@ static void lan88xx_link_change_notify(struct phy_device *phydev)
}
}
+/**
+ * lan937x_tx_read_mdix_status - Read the MDIX status for the LAN937x TX PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This function reads the MDIX status of the LAN937x TX PHY and sets the
+ * mdix_ctrl and mdix fields of the phy_device structure accordingly.
+ * Note that MDIX status is not supported in AUTO mode, and will be set
+ * to invalid in such cases.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+static int lan937x_tx_read_mdix_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, LAN937X_MODE_CTRL_STATUS_REG);
+ if (ret < 0)
+ return ret;
+
+ if (ret & LAN937X_AUTOMDIX_EN) {
+ phydev->mdix_ctrl = ETH_TP_MDI_AUTO;
+ /* MDI/MDIX status is unknown */
+ phydev->mdix = ETH_TP_MDI_INVALID;
+ } else if (ret & LAN937X_MDI_MODE) {
+ phydev->mdix_ctrl = ETH_TP_MDI_X;
+ phydev->mdix = ETH_TP_MDI_X;
+ } else {
+ phydev->mdix_ctrl = ETH_TP_MDI;
+ phydev->mdix = ETH_TP_MDI;
+ }
+
+ return 0;
+}
+
+/**
+ * lan937x_tx_read_status - Read the status for the LAN937x TX PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This function reads the status of the LAN937x TX PHY and updates the
+ * phy_device structure accordingly.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+static int lan937x_tx_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ return lan937x_tx_read_mdix_status(phydev);
+}
+
+/**
+ * lan937x_tx_set_mdix - Set the MDIX mode for the LAN937x TX PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This function configures the MDIX mode of the LAN937x TX PHY based on the
+ * mdix_ctrl field of the phy_device structure. The MDIX mode can be set to
+ * MDI (straight-through), MDIX (crossover), or AUTO (auto-MDIX). If the mode
+ * is not recognized, it returns 0 without making any changes.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+static int lan937x_tx_set_mdix(struct phy_device *phydev)
+{
+ u16 val;
+
+ switch (phydev->mdix_ctrl) {
+ case ETH_TP_MDI:
+ val = 0;
+ break;
+ case ETH_TP_MDI_X:
+ val = LAN937X_MDI_MODE;
+ break;
+ case ETH_TP_MDI_AUTO:
+ val = LAN937X_AUTOMDIX_EN;
+ break;
+ default:
+ return 0;
+ }
+
+ return phy_modify(phydev, LAN937X_MODE_CTRL_STATUS_REG,
+ LAN937X_AUTOMDIX_EN | LAN937X_MDI_MODE, val);
+}
+
+/**
+ * lan937x_tx_config_aneg - Configure auto-negotiation and fixed modes for the
+ * LAN937x TX PHY.
+ * @phydev: Pointer to the phy_device structure.
+ *
+ * This function configures the MDIX mode for the LAN937x TX PHY and then
+ * proceeds to configure the auto-negotiation or fixed mode settings
+ * based on the phy_device structure.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+static int lan937x_tx_config_aneg(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = lan937x_tx_set_mdix(phydev);
+ if (ret < 0)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
static struct phy_driver microchip_phy_driver[] = {
{
.phy_id = 0x0007c132,
@@ -400,12 +515,21 @@ static struct phy_driver microchip_phy_driver[] = {
.set_wol = lan88xx_set_wol,
.read_page = lan88xx_read_page,
.write_page = lan88xx_write_page,
+},
+{
+ PHY_ID_MATCH_MODEL(PHY_ID_LAN937X_TX),
+ .name = "Microchip LAN937x TX",
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .config_aneg = lan937x_tx_config_aneg,
+ .read_status = lan937x_tx_read_status,
} };
module_phy_driver(microchip_phy_driver);
static struct mdio_device_id __maybe_unused microchip_tbl[] = {
{ 0x0007c132, 0xfffffff2 },
+ { PHY_ID_MATCH_MODEL(PHY_ID_LAN937X_TX) },
{ }
};
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index a838b61cd844..a35528497a57 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -748,7 +748,7 @@ static int lan87xx_cable_test_report(struct phy_device *phydev)
ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
lan87xx_cable_test_report_trans(detect));
- return 0;
+ return phy_init_hw(phydev);
}
static int lan87xx_cable_test_get_status(struct phy_device *phydev,
diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
index eb0b032cb613..c1ddae36a2ae 100644
--- a/drivers/net/phy/mscc/mscc_ptp.c
+++ b/drivers/net/phy/mscc/mscc_ptp.c
@@ -1134,7 +1134,7 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
}
static int vsc85xx_ts_info(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *info)
+ struct kernel_ethtool_ts_info *info)
{
struct vsc8531_private *vsc8531 =
container_of(mii_ts, struct vsc8531_private, mii_ts);
@@ -1570,6 +1570,9 @@ int vsc8584_ptp_probe(struct phy_device *phydev)
return PTR_ERR(vsc8531->load_save);
}
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
+
vsc8531->ptp->phydev = phydev;
return 0;
diff --git a/drivers/net/phy/mxl-gpy.c b/drivers/net/phy/mxl-gpy.c
index b2d36a3a96f1..e5f8ac4b4604 100644
--- a/drivers/net/phy/mxl-gpy.c
+++ b/drivers/net/phy/mxl-gpy.c
@@ -107,6 +107,7 @@ struct gpy_priv {
u8 fw_major;
u8 fw_minor;
+ u32 wolopts;
/* It takes 3 seconds to fully switch out of loopback mode before
* it can safely re-enter loopback mode. Record the time when
@@ -221,6 +222,15 @@ static int gpy_hwmon_register(struct phy_device *phydev)
}
#endif
+static int gpy_ack_interrupt(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Clear all pending interrupts */
+ ret = phy_read(phydev, PHY_ISTAT);
+ return ret < 0 ? ret : 0;
+}
+
static int gpy_mbox_read(struct phy_device *phydev, u32 addr)
{
struct gpy_priv *priv = phydev->priv;
@@ -262,16 +272,8 @@ out:
static int gpy_config_init(struct phy_device *phydev)
{
- int ret;
-
- /* Mask all interrupts */
- ret = phy_write(phydev, PHY_IMASK, 0);
- if (ret)
- return ret;
-
- /* Clear all pending interrupts */
- ret = phy_read(phydev, PHY_ISTAT);
- return ret < 0 ? ret : 0;
+ /* Nothing to configure. Configuration Requirement Placeholder */
+ return 0;
}
static int gpy21x_config_init(struct phy_device *phydev)
@@ -627,11 +629,23 @@ static int gpy_read_status(struct phy_device *phydev)
static int gpy_config_intr(struct phy_device *phydev)
{
+ struct gpy_priv *priv = phydev->priv;
u16 mask = 0;
+ int ret;
+
+ ret = gpy_ack_interrupt(phydev);
+ if (ret)
+ return ret;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
mask = PHY_IMASK_MASK;
+ if (priv->wolopts & WAKE_MAGIC)
+ mask |= PHY_IMASK_WOL;
+
+ if (priv->wolopts & WAKE_PHY)
+ mask |= PHY_IMASK_LSTC;
+
return phy_write(phydev, PHY_IMASK, mask);
}
@@ -678,6 +692,7 @@ static int gpy_set_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
struct net_device *attach_dev = phydev->attached_dev;
+ struct gpy_priv *priv = phydev->priv;
int ret;
if (wol->wolopts & WAKE_MAGIC) {
@@ -725,6 +740,8 @@ static int gpy_set_wol(struct phy_device *phydev,
ret = phy_read(phydev, PHY_ISTAT);
if (ret < 0)
return ret;
+
+ priv->wolopts |= WAKE_MAGIC;
} else {
/* Disable magic packet matching */
ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2,
@@ -732,6 +749,13 @@ static int gpy_set_wol(struct phy_device *phydev,
WOL_EN);
if (ret < 0)
return ret;
+
+ /* Disable the WOL interrupt */
+ ret = phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_WOL);
+ if (ret < 0)
+ return ret;
+
+ priv->wolopts &= ~WAKE_MAGIC;
}
if (wol->wolopts & WAKE_PHY) {
@@ -748,9 +772,11 @@ static int gpy_set_wol(struct phy_device *phydev,
if (ret & (PHY_IMASK_MASK & ~PHY_IMASK_LSTC))
phy_trigger_machine(phydev);
+ priv->wolopts |= WAKE_PHY;
return 0;
}
+ priv->wolopts &= ~WAKE_PHY;
/* Disable the link state change interrupt */
return phy_clear_bits(phydev, PHY_IMASK, PHY_IMASK_LSTC);
}
@@ -758,18 +784,10 @@ static int gpy_set_wol(struct phy_device *phydev,
static void gpy_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
- int ret;
+ struct gpy_priv *priv = phydev->priv;
wol->supported = WAKE_MAGIC | WAKE_PHY;
- wol->wolopts = 0;
-
- ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, VPSPEC2_WOL_CTL);
- if (ret & WOL_EN)
- wol->wolopts |= WAKE_MAGIC;
-
- ret = phy_read(phydev, PHY_IMASK);
- if (ret & PHY_IMASK_LSTC)
- wol->wolopts |= WAKE_PHY;
+ wol->wolopts = priv->wolopts;
}
static int gpy_loopback(struct phy_device *phydev, bool enable)
diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c
index 3cf614b4cd52..5af5ade4fc64 100644
--- a/drivers/net/phy/nxp-c45-tja11xx.c
+++ b/drivers/net/phy/nxp-c45-tja11xx.c
@@ -1058,7 +1058,7 @@ nxp_c45_no_ptp_irq:
}
static int nxp_c45_ts_info(struct mii_timestamper *mii_ts,
- struct ethtool_ts_info *ts_info)
+ struct kernel_ethtool_ts_info *ts_info)
{
struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy,
mii_ts);
@@ -1660,6 +1660,9 @@ static int nxp_c45_probe(struct phy_device *phydev)
priv->mii_ts.ts_info = nxp_c45_ts_info;
phydev->mii_ts = &priv->mii_ts;
ret = nxp_c45_init_ptp_clock(priv);
+
+ /* Timestamp selected by default to keep legacy API */
+ phydev->default_timestamp = true;
} else {
phydev_dbg(phydev, "PTP support not enabled even if the phy supports it");
}
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 15f349e5995a..1f98b6a96c15 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -13,7 +13,7 @@
*/
const char *phy_speed_to_str(int speed)
{
- BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 102,
+ BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 103,
"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
"If a speed or mode has been added please update phy_speed_to_str "
"and the PHY settings array.\n");
@@ -141,6 +141,7 @@ int phy_interface_num_ports(phy_interface_t interface)
return 1;
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_QUSGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
return 4;
case PHY_INTERFACE_MODE_PSGMII:
return 5;
@@ -265,6 +266,7 @@ static const struct phy_setting settings[] = {
PHY_SETTING( 10, FULL, 10baseT1S_Full ),
PHY_SETTING( 10, HALF, 10baseT1S_Half ),
PHY_SETTING( 10, HALF, 10baseT1S_P2MP_Half ),
+ PHY_SETTING( 10, FULL, 10baseT1BRR_Full ),
};
#undef PHY_SETTING
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index c4236564c1cd..785182fa5fe0 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1309,7 +1309,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
if (netdev) {
struct device *parent = netdev->dev.parent;
- if (netdev->wol_enabled)
+ if (netdev->ethtool->wol_enabled)
pm_system_wakeup();
else if (device_may_wakeup(&netdev->dev))
pm_wakeup_dev_event(&netdev->dev, 0, true);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 6c6ec9475709..7752e9386b40 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -296,7 +296,7 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (!netdev)
goto out;
- if (netdev->wol_enabled)
+ if (netdev->ethtool->wol_enabled)
return false;
/* As long as not all affected network drivers support the
@@ -533,10 +533,10 @@ static int phy_scan_fixups(struct phy_device *phydev)
return 0;
}
-static int phy_bus_match(struct device *dev, struct device_driver *drv)
+static int phy_bus_match(struct device *dev, const struct device_driver *drv)
{
struct phy_device *phydev = to_phy_device(dev);
- struct phy_driver *phydrv = to_phy_driver(drv);
+ const struct phy_driver *phydrv = to_phy_driver(drv);
const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
int i;
@@ -1980,16 +1980,17 @@ int phy_suspend(struct phy_device *phydev)
const struct phy_driver *phydrv = phydev->drv;
int ret;
- if (phydev->suspended)
+ if (phydev->suspended || !phydrv)
return 0;
phy_ethtool_get_wol(phydev, &wol);
- phydev->wol_enabled = wol.wolopts || (netdev && netdev->wol_enabled);
+ phydev->wol_enabled = wol.wolopts ||
+ (netdev && netdev->ethtool->wol_enabled);
/* If the device has WOL enabled, we cannot suspend the PHY */
if (phydev->wol_enabled && !(phydrv->flags & PHY_ALWAYS_CALL_SUSPEND))
return -EBUSY;
- if (!phydrv || !phydrv->suspend)
+ if (!phydrv->suspend)
return 0;
ret = phydrv->suspend(phydev);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 503fd7c40523..51c526d227fa 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -231,6 +231,7 @@ static int phylink_interface_max_speed(phy_interface_t interface)
return SPEED_1000;
case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
return SPEED_2500;
case PHY_INTERFACE_MODE_5GBASER:
@@ -500,7 +501,11 @@ static unsigned long phylink_get_capabilities(phy_interface_t interface,
switch (interface) {
case PHY_INTERFACE_MODE_USXGMII:
- caps |= MAC_10000FD | MAC_5000FD | MAC_2500FD;
+ caps |= MAC_10000FD | MAC_5000FD;
+ fallthrough;
+
+ case PHY_INTERFACE_MODE_10G_QXGMII:
+ caps |= MAC_2500FD;
fallthrough;
case PHY_INTERFACE_MODE_RGMII_TXID:
@@ -885,26 +890,31 @@ static int phylink_parse_mode(struct phylink *pl,
const char *managed;
unsigned long caps;
+ if (pl->config->default_an_inband)
+ pl->cfg_link_an_mode = MLO_AN_INBAND;
+
dn = fwnode_get_named_child_node(fwnode, "fixed-link");
if (dn || fwnode_property_present(fwnode, "fixed-link"))
pl->cfg_link_an_mode = MLO_AN_FIXED;
fwnode_handle_put(dn);
if ((fwnode_property_read_string(fwnode, "managed", &managed) == 0 &&
- strcmp(managed, "in-band-status") == 0) ||
- pl->config->ovr_an_inband) {
+ strcmp(managed, "in-band-status") == 0)) {
if (pl->cfg_link_an_mode == MLO_AN_FIXED) {
phylink_err(pl,
"can't use both fixed-link and in-band-status\n");
return -EINVAL;
}
+ pl->cfg_link_an_mode = MLO_AN_INBAND;
+ }
+
+ if (pl->cfg_link_an_mode == MLO_AN_INBAND) {
linkmode_zero(pl->supported);
phylink_set(pl->supported, MII);
phylink_set(pl->supported, Autoneg);
phylink_set(pl->supported, Asym_Pause);
phylink_set(pl->supported, Pause);
- pl->cfg_link_an_mode = MLO_AN_INBAND;
switch (pl->link_config.interface) {
case PHY_INTERFACE_MODE_SGMII:
@@ -921,6 +931,7 @@ static int phylink_parse_mode(struct phylink *pl,
case PHY_INTERFACE_MODE_5GBASER:
case PHY_INTERFACE_MODE_25GBASER:
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
case PHY_INTERFACE_MODE_10GKR:
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XLGMII:
@@ -1042,6 +1053,21 @@ static void phylink_pcs_poll_start(struct phylink *pl)
mod_timer(&pl->link_poll, jiffies + HZ);
}
+int phylink_pcs_pre_init(struct phylink *pl, struct phylink_pcs *pcs)
+{
+ int ret = 0;
+
+ /* Signal to PCS driver that MAC requires RX clock for init */
+ if (pl->config->mac_requires_rxc)
+ pcs->rxc_always_on = true;
+
+ if (pcs->ops->pcs_pre_init)
+ ret = pcs->ops->pcs_pre_init(pcs);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(phylink_pcs_pre_init);
+
static void phylink_mac_config(struct phylink *pl,
const struct phylink_link_state *state)
{
@@ -1104,6 +1130,7 @@ static unsigned int phylink_pcs_neg_mode(unsigned int mode,
case PHY_INTERFACE_MODE_QSGMII:
case PHY_INTERFACE_MODE_QUSGMII:
case PHY_INTERFACE_MODE_USXGMII:
+ case PHY_INTERFACE_MODE_10G_QXGMII:
/* These protocols are designed for use with a PHY which
* communicates its negotiation result back to the MAC via
* inband communication. Note: there exist PHYs that run
@@ -1823,6 +1850,9 @@ static int phylink_validate_phy(struct phylink *pl, struct phy_device *phy,
interfaces);
}
+ phylink_dbg(pl, "PHY %s doesn't supply possible interfaces\n",
+ phydev_name(phy));
+
/* Check whether we would use rate matching for the proposed interface
* mode.
*/
@@ -1923,6 +1953,8 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
phy_interface_t interface)
{
+ u32 flags = 0;
+
if (WARN_ON(pl->cfg_link_an_mode == MLO_AN_FIXED ||
(pl->cfg_link_an_mode == MLO_AN_INBAND &&
phy_interface_mode_is_8023z(interface) && !pl->sfp_bus)))
@@ -1931,7 +1963,10 @@ static int phylink_attach_phy(struct phylink *pl, struct phy_device *phy,
if (pl->phydev)
return -EBUSY;
- return phy_attach_direct(pl->netdev, phy, 0, interface);
+ if (pl->config->mac_requires_rxc)
+ flags |= PHY_F_RXC_ALWAYS_ON;
+
+ return phy_attach_direct(pl->netdev, phy, flags, interface);
}
/**
@@ -2034,6 +2069,9 @@ int phylink_fwnode_phy_connect(struct phylink *pl,
pl->link_config.interface = pl->link_interface;
}
+ if (pl->config->mac_requires_rxc)
+ flags |= PHY_F_RXC_ALWAYS_ON;
+
ret = phy_attach_direct(pl->netdev, phy_dev, flags,
pl->link_interface);
phy_device_free(phy_dev);
@@ -2244,7 +2282,7 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
{
ASSERT_RTNL();
- if (mac_wol && (!pl->netdev || pl->netdev->wol_enabled)) {
+ if (mac_wol && (!pl->netdev || pl->netdev->ethtool->wol_enabled)) {
/* Wake-on-Lan enabled, MAC handling */
mutex_lock(&pl->state_mutex);
diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c
index e79657f76bea..c8f83e5f78ab 100644
--- a/drivers/net/phy/qcom/at803x.c
+++ b/drivers/net/phy/qcom/at803x.c
@@ -426,7 +426,8 @@ static int at803x_hibernation_mode_config(struct phy_device *phydev)
/* The default after hardware reset is hibernation mode enabled. After
* software reset, the value is retained.
*/
- if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE))
+ if (!(priv->flags & AT803X_DISABLE_HIBERNATION_MODE) &&
+ !(phydev->dev_flags & PHY_F_RXC_ALWAYS_ON))
return 0;
return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 1fa70427b2a2..25e5bfbb6f89 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -32,6 +32,15 @@
#define RTL8211F_PHYCR2 0x19
#define RTL8211F_INSR 0x1d
+#define RTL8211F_LEDCR 0x10
+#define RTL8211F_LEDCR_MODE BIT(15)
+#define RTL8211F_LEDCR_ACT_TXRX BIT(4)
+#define RTL8211F_LEDCR_LINK_1000 BIT(3)
+#define RTL8211F_LEDCR_LINK_100 BIT(1)
+#define RTL8211F_LEDCR_LINK_10 BIT(0)
+#define RTL8211F_LEDCR_MASK GENMASK(4, 0)
+#define RTL8211F_LEDCR_SHIFT 5
+
#define RTL8211F_TX_DELAY BIT(8)
#define RTL8211F_RX_DELAY BIT(3)
@@ -54,6 +63,25 @@
RTL8201F_ISR_LINK)
#define RTL8201F_IER 0x13
+#define RTL822X_VND1_SERDES_OPTION 0x697a
+#define RTL822X_VND1_SERDES_OPTION_MODE_MASK GENMASK(5, 0)
+#define RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX_SGMII 0
+#define RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX 2
+
+#define RTL822X_VND1_SERDES_CTRL3 0x7580
+#define RTL822X_VND1_SERDES_CTRL3_MODE_MASK GENMASK(5, 0)
+#define RTL822X_VND1_SERDES_CTRL3_MODE_SGMII 0x02
+#define RTL822X_VND1_SERDES_CTRL3_MODE_2500BASEX 0x16
+
+/* RTL822X_VND2_XXXXX registers are only accessible when phydev->is_c45
+ * is set, they cannot be accessed by C45-over-C22.
+ */
+#define RTL822X_VND2_GBCR 0xa412
+
+#define RTL822X_VND2_GANLPAR 0xa414
+
+#define RTL822X_VND2_PHYSR 0xa434
+
#define RTL8366RB_POWER_SAVE 0x15
#define RTL8366RB_POWER_SAVE_ON BIT(12)
@@ -64,6 +92,11 @@
#define RTL_GENERIC_PHYID 0x001cc800
#define RTL_8211FVD_PHYID 0x001cc878
+#define RTL_8221B_VB_CG 0x001cc849
+#define RTL_8221B_VN_CG 0x001cc84a
+#define RTL_8251B 0x001cc862
+
+#define RTL8211F_LED_COUNT 3
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
@@ -454,6 +487,99 @@ static int rtl821x_resume(struct phy_device *phydev)
return 0;
}
+static int rtl8211f_led_hw_is_supported(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ const unsigned long mask = BIT(TRIGGER_NETDEV_LINK_10) |
+ BIT(TRIGGER_NETDEV_LINK_100) |
+ BIT(TRIGGER_NETDEV_LINK_1000) |
+ BIT(TRIGGER_NETDEV_RX) |
+ BIT(TRIGGER_NETDEV_TX);
+
+ /* The RTL8211F PHY supports these LED settings on up to three LEDs:
+ * - Link: Configurable subset of 10/100/1000 link rates
+ * - Active: Blink on activity, RX or TX is not differentiated
+ * The Active option has two modes, A and B:
+ * - A: Link and Active indication at configurable, but matching,
+ * subset of 10/100/1000 link rates
+ * - B: Link indication at configurable subset of 10/100/1000 link
+ * rates and Active indication always at all three 10+100+1000
+ * link rates.
+ * This code currently uses mode B only.
+ */
+
+ if (index >= RTL8211F_LED_COUNT)
+ return -EINVAL;
+
+ /* Filter out any other unsupported triggers. */
+ if (rules & ~mask)
+ return -EOPNOTSUPP;
+
+ /* RX and TX are not differentiated, either both are set or not set. */
+ if (!(rules & BIT(TRIGGER_NETDEV_RX)) ^ !(rules & BIT(TRIGGER_NETDEV_TX)))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index,
+ unsigned long *rules)
+{
+ int val;
+
+ val = phy_read_paged(phydev, 0xd04, RTL8211F_LEDCR);
+ if (val < 0)
+ return val;
+
+ val >>= RTL8211F_LEDCR_SHIFT * index;
+ val &= RTL8211F_LEDCR_MASK;
+
+ if (val & RTL8211F_LEDCR_LINK_10)
+ set_bit(TRIGGER_NETDEV_LINK_10, rules);
+
+ if (val & RTL8211F_LEDCR_LINK_100)
+ set_bit(TRIGGER_NETDEV_LINK_100, rules);
+
+ if (val & RTL8211F_LEDCR_LINK_1000)
+ set_bit(TRIGGER_NETDEV_LINK_1000, rules);
+
+ if (val & RTL8211F_LEDCR_ACT_TXRX) {
+ set_bit(TRIGGER_NETDEV_RX, rules);
+ set_bit(TRIGGER_NETDEV_TX, rules);
+ }
+
+ return 0;
+}
+
+static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index,
+ unsigned long rules)
+{
+ const u16 mask = RTL8211F_LEDCR_MASK << (RTL8211F_LEDCR_SHIFT * index);
+ u16 reg = 0;
+
+ if (index >= RTL8211F_LED_COUNT)
+ return -EINVAL;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_10, &rules))
+ reg |= RTL8211F_LEDCR_LINK_10;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_100, &rules))
+ reg |= RTL8211F_LEDCR_LINK_100;
+
+ if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules))
+ reg |= RTL8211F_LEDCR_LINK_1000;
+
+ if (test_bit(TRIGGER_NETDEV_RX, &rules) ||
+ test_bit(TRIGGER_NETDEV_TX, &rules)) {
+ reg |= RTL8211F_LEDCR_ACT_TXRX;
+ }
+
+ reg <<= RTL8211F_LEDCR_SHIFT * index;
+ reg |= RTL8211F_LEDCR_MODE; /* Mode B */
+
+ return phy_modify_paged(phydev, 0xd04, RTL8211F_LEDCR, mask, reg);
+}
+
static int rtl8211e_config_init(struct phy_device *phydev)
{
int ret = 0, oldpage;
@@ -531,17 +657,8 @@ static int rtl8366rb_config_init(struct phy_device *phydev)
}
/* get actual speed to cover the downshift case */
-static int rtlgen_get_speed(struct phy_device *phydev)
+static void rtlgen_decode_speed(struct phy_device *phydev, int val)
{
- int val;
-
- if (!phydev->link)
- return 0;
-
- val = phy_read_paged(phydev, 0xa43, 0x12);
- if (val < 0)
- return val;
-
switch (val & RTLGEN_SPEED_MASK) {
case 0x0000:
phydev->speed = SPEED_10;
@@ -564,19 +681,26 @@ static int rtlgen_get_speed(struct phy_device *phydev)
default:
break;
}
-
- return 0;
}
static int rtlgen_read_status(struct phy_device *phydev)
{
- int ret;
+ int ret, val;
ret = genphy_read_status(phydev);
if (ret < 0)
return ret;
- return rtlgen_get_speed(phydev);
+ if (!phydev->link)
+ return 0;
+
+ val = phy_read_paged(phydev, 0xa43, 0x12);
+ if (val < 0)
+ return val;
+
+ rtlgen_decode_speed(phydev, val);
+
+ return 0;
}
static int rtlgen_read_mmd(struct phy_device *phydev, int devnum, u16 regnum)
@@ -659,6 +783,84 @@ static int rtl822x_write_mmd(struct phy_device *phydev, int devnum, u16 regnum,
return ret;
}
+static int rtl822xb_config_init(struct phy_device *phydev)
+{
+ bool has_2500, has_sgmii;
+ u16 mode;
+ int ret;
+
+ has_2500 = test_bit(PHY_INTERFACE_MODE_2500BASEX,
+ phydev->host_interfaces) ||
+ phydev->interface == PHY_INTERFACE_MODE_2500BASEX;
+
+ has_sgmii = test_bit(PHY_INTERFACE_MODE_SGMII,
+ phydev->host_interfaces) ||
+ phydev->interface == PHY_INTERFACE_MODE_SGMII;
+
+ /* fill in possible interfaces */
+ __assign_bit(PHY_INTERFACE_MODE_2500BASEX, phydev->possible_interfaces,
+ has_2500);
+ __assign_bit(PHY_INTERFACE_MODE_SGMII, phydev->possible_interfaces,
+ has_sgmii);
+
+ if (!has_2500 && !has_sgmii)
+ return 0;
+
+ /* determine SerDes option mode */
+ if (has_2500 && !has_sgmii) {
+ mode = RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX;
+ phydev->rate_matching = RATE_MATCH_PAUSE;
+ } else {
+ mode = RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX_SGMII;
+ phydev->rate_matching = RATE_MATCH_NONE;
+ }
+
+ /* the following sequence with magic numbers sets up the SerDes
+ * option mode
+ */
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x75f3, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND1,
+ RTL822X_VND1_SERDES_OPTION,
+ RTL822X_VND1_SERDES_OPTION_MODE_MASK,
+ mode);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6a04, 0x0503);
+ if (ret < 0)
+ return ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6f10, 0xd455);
+ if (ret < 0)
+ return ret;
+
+ return phy_write_mmd(phydev, MDIO_MMD_VEND1, 0x6f11, 0x8020);
+}
+
+static int rtl822xb_get_rate_matching(struct phy_device *phydev,
+ phy_interface_t iface)
+{
+ int val;
+
+ /* Only rate matching at 2500base-x */
+ if (iface != PHY_INTERFACE_MODE_2500BASEX)
+ return RATE_MATCH_NONE;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, RTL822X_VND1_SERDES_OPTION);
+ if (val < 0)
+ return val;
+
+ if ((val & RTL822X_VND1_SERDES_OPTION_MODE_MASK) ==
+ RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX)
+ return RATE_MATCH_PAUSE;
+
+ /* RTL822X_VND1_SERDES_OPTION_MODE_2500BASEX_SGMII */
+ return RATE_MATCH_NONE;
+}
+
static int rtl822x_get_features(struct phy_device *phydev)
{
int val;
@@ -695,10 +897,30 @@ static int rtl822x_config_aneg(struct phy_device *phydev)
return __genphy_config_aneg(phydev, ret);
}
-static int rtl822x_read_status(struct phy_device *phydev)
+static void rtl822xb_update_interface(struct phy_device *phydev)
{
- int ret;
+ int val;
+
+ if (!phydev->link)
+ return;
+
+ /* Change interface according to serdes mode */
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND1, RTL822X_VND1_SERDES_CTRL3);
+ if (val < 0)
+ return;
+ switch (val & RTL822X_VND1_SERDES_CTRL3_MODE_MASK) {
+ case RTL822X_VND1_SERDES_CTRL3_MODE_2500BASEX:
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ break;
+ case RTL822X_VND1_SERDES_CTRL3_MODE_SGMII:
+ phydev->interface = PHY_INTERFACE_MODE_SGMII;
+ break;
+ }
+}
+
+static int rtl822x_read_status(struct phy_device *phydev)
+{
if (phydev->autoneg == AUTONEG_ENABLE) {
int lpadv = phy_read_paged(phydev, 0xa5d, 0x13);
@@ -709,11 +931,99 @@ static int rtl822x_read_status(struct phy_device *phydev)
lpadv);
}
- ret = genphy_read_status(phydev);
+ return rtlgen_read_status(phydev);
+}
+
+static int rtl822xb_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = rtl822x_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ rtl822xb_update_interface(phydev);
+
+ return 0;
+}
+
+static int rtl822x_c45_get_features(struct phy_device *phydev)
+{
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT,
+ phydev->supported);
+
+ return genphy_c45_pma_read_abilities(phydev);
+}
+
+static int rtl822x_c45_config_aneg(struct phy_device *phydev)
+{
+ bool changed = false;
+ int ret, val;
+
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ return genphy_c45_pma_setup_forced(phydev);
+
+ ret = genphy_c45_an_config_aneg(phydev);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ val = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising);
+
+ /* Vendor register as C45 has no standardized support for 1000BaseT */
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL822X_VND2_GBCR,
+ ADVERTISE_1000FULL, val);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ changed = true;
+
+ return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int rtl822x_c45_read_status(struct phy_device *phydev)
+{
+ int ret, val;
+
+ ret = genphy_c45_read_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* Vendor register as C45 has no standardized support for 1000BaseT */
+ if (phydev->autoneg == AUTONEG_ENABLE) {
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2,
+ RTL822X_VND2_GANLPAR);
+ if (val < 0)
+ return val;
+
+ mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val);
+ }
+
+ if (!phydev->link)
+ return 0;
+
+ /* Read actual speed from vendor register. */
+ val = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL822X_VND2_PHYSR);
+ if (val < 0)
+ return val;
+
+ rtlgen_decode_speed(phydev, val);
+
+ return 0;
+}
+
+static int rtl822xb_c45_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = rtl822x_c45_read_status(phydev);
if (ret < 0)
return ret;
- return rtlgen_get_speed(phydev);
+ rtl822xb_update_interface(phydev);
+
+ return 0;
}
static bool rtlgen_supports_2_5gbps(struct phy_device *phydev)
@@ -739,6 +1049,35 @@ static int rtl8226_match_phy_device(struct phy_device *phydev)
rtlgen_supports_2_5gbps(phydev);
}
+static int rtlgen_is_c45_match(struct phy_device *phydev, unsigned int id,
+ bool is_c45)
+{
+ if (phydev->is_c45)
+ return is_c45 && (id == phydev->c45_ids.device_ids[1]);
+ else
+ return !is_c45 && (id == phydev->phy_id);
+}
+
+static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev)
+{
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false);
+}
+
+static int rtl8221b_vb_cg_c45_match_phy_device(struct phy_device *phydev)
+{
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true);
+}
+
+static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev)
+{
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, false);
+}
+
+static int rtl8221b_vn_cg_c45_match_phy_device(struct phy_device *phydev)
+{
+ return rtlgen_is_c45_match(phydev, RTL_8221B_VN_CG, true);
+}
+
static int rtlgen_resume(struct phy_device *phydev)
{
int ret = genphy_resume(phydev);
@@ -749,6 +1088,15 @@ static int rtlgen_resume(struct phy_device *phydev)
return ret;
}
+static int rtlgen_c45_resume(struct phy_device *phydev)
+{
+ int ret = genphy_c45_pma_resume(phydev);
+
+ msleep(20);
+
+ return ret;
+}
+
static int rtl9000a_config_init(struct phy_device *phydev)
{
phydev->autoneg = AUTONEG_DISABLE;
@@ -948,6 +1296,9 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
.flags = PHY_ALWAYS_CALL_SUSPEND,
+ .led_hw_is_supported = rtl8211f_led_hw_is_supported,
+ .led_hw_control_get = rtl8211f_led_hw_control_get,
+ .led_hw_control_set = rtl8211f_led_hw_control_set,
}, {
PHY_ID_MATCH_EXACT(RTL_8211FVD_PHYID),
.name = "RTL8211F-VD Gigabit Ethernet",
@@ -988,7 +1339,9 @@ static struct phy_driver realtek_drvs[] = {
.name = "RTL8226B_RTL8221B 2.5Gbps PHY",
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
+ .config_init = rtl822xb_config_init,
+ .get_rate_matching = rtl822xb_get_rate_matching,
+ .read_status = rtl822xb_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
@@ -1010,32 +1363,58 @@ static struct phy_driver realtek_drvs[] = {
.name = "RTL8226B-CG_RTL8221B-CG 2.5Gbps PHY",
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
+ .config_init = rtl822xb_config_init,
+ .get_rate_matching = rtl822xb_get_rate_matching,
+ .read_status = rtl822xb_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- PHY_ID_MATCH_EXACT(0x001cc849),
- .name = "RTL8221B-VB-CG 2.5Gbps PHY",
+ .match_phy_device = rtl8221b_vb_cg_c22_match_phy_device,
+ .name = "RTL8221B-VB-CG 2.5Gbps PHY (C22)",
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
+ .config_init = rtl822xb_config_init,
+ .get_rate_matching = rtl822xb_get_rate_matching,
+ .read_status = rtl822xb_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
- PHY_ID_MATCH_EXACT(0x001cc84a),
- .name = "RTL8221B-VM-CG 2.5Gbps PHY",
+ .match_phy_device = rtl8221b_vb_cg_c45_match_phy_device,
+ .name = "RTL8221B-VB-CG 2.5Gbps PHY (C45)",
+ .config_init = rtl822xb_config_init,
+ .get_rate_matching = rtl822xb_get_rate_matching,
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .read_status = rtl822xb_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
+ }, {
+ .match_phy_device = rtl8221b_vn_cg_c22_match_phy_device,
+ .name = "RTL8221B-VM-CG 2.5Gbps PHY (C22)",
.get_features = rtl822x_get_features,
.config_aneg = rtl822x_config_aneg,
- .read_status = rtl822x_read_status,
+ .config_init = rtl822xb_config_init,
+ .get_rate_matching = rtl822xb_get_rate_matching,
+ .read_status = rtl822xb_read_status,
.suspend = genphy_suspend,
.resume = rtlgen_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ .match_phy_device = rtl8221b_vn_cg_c45_match_phy_device,
+ .name = "RTL8221B-VN-CG 2.5Gbps PHY (C45)",
+ .config_init = rtl822xb_config_init,
+ .get_rate_matching = rtl822xb_get_rate_matching,
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .read_status = rtl822xb_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc862),
.name = "RTL8251B 5Gbps PHY",
.get_features = rtl822x_get_features,
@@ -1046,6 +1425,14 @@ static struct phy_driver realtek_drvs[] = {
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
}, {
+ PHY_ID_MATCH_EXACT(0x001ccad0),
+ .name = "RTL8224 2.5Gbps PHY",
+ .get_features = rtl822x_c45_get_features,
+ .config_aneg = rtl822x_c45_config_aneg,
+ .read_status = rtl822x_c45_read_status,
+ .suspend = genphy_c45_pma_suspend,
+ .resume = rtlgen_c45_resume,
+ }, {
PHY_ID_MATCH_EXACT(0x001cc961),
.name = "RTL8366RB Gigabit Ethernet",
.config_init = &rtl8366rb_config_init,
@@ -1079,6 +1466,13 @@ static struct phy_driver realtek_drvs[] = {
.handle_interrupt = genphy_handle_interrupt_no_ack,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc960),
+ .name = "RTL8366S Gigabit Ethernet",
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_mmd = genphy_read_mmd_unsupported,
+ .write_mmd = genphy_write_mmd_unsupported,
},
};
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index db39dec7f247..2f44fc51848f 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -355,7 +355,7 @@ EXPORT_SYMBOL_GPL(sfp_parse_support);
* modes mask.
*/
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- unsigned long *link_modes)
+ const unsigned long *link_modes)
{
if (phylink_test(link_modes, 25000baseCR_Full) ||
phylink_test(link_modes, 25000baseKR_Full) ||
@@ -373,7 +373,8 @@ phy_interface_t sfp_select_interface(struct sfp_bus *bus,
if (phylink_test(link_modes, 5000baseT_Full))
return PHY_INTERFACE_MODE_5GBASER;
- if (phylink_test(link_modes, 2500baseX_Full))
+ if (phylink_test(link_modes, 2500baseX_Full) ||
+ phylink_test(link_modes, 2500baseT_Full))
return PHY_INTERFACE_MODE_2500BASEX;
if (phylink_test(link_modes, 1000baseT_Half) ||
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index f75c9eb3958e..a5684ef5884b 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -385,18 +385,23 @@ static void sfp_fixup_rollball(struct sfp *sfp)
sfp->phy_t_retry = msecs_to_jiffies(1000);
}
-static void sfp_fixup_fs_10gt(struct sfp *sfp)
+static void sfp_fixup_fs_2_5gt(struct sfp *sfp)
{
- sfp_fixup_10gbaset_30m(sfp);
sfp_fixup_rollball(sfp);
- /* The RollBall fixup is not enough for FS modules, the AQR chip inside
+ /* The RollBall fixup is not enough for FS modules, the PHY chip inside
* them does not return 0xffff for PHY ID registers in all MMDs for the
* while initializing. They need a 4 second wait before accessing PHY.
*/
sfp->module_t_wait = msecs_to_jiffies(4000);
}
+static void sfp_fixup_fs_10gt(struct sfp *sfp)
+{
+ sfp_fixup_10gbaset_30m(sfp);
+ sfp_fixup_fs_2_5gt(sfp);
+}
+
static void sfp_fixup_halny_gsfp(struct sfp *sfp)
{
/* Ignore the TX_FAULT and LOS signals on this module.
@@ -468,10 +473,15 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK("ALCATELLUCENT", "3FE46541AA", sfp_quirk_2500basex,
sfp_fixup_nokia),
- // Fiberstore SFP-10G-T doesn't identify as copper, and uses the
- // Rollball protocol to talk to the PHY.
+ // Fiberstore SFP-10G-T doesn't identify as copper, uses the Rollball
+ // protocol to talk to the PHY and needs 4 sec wait before probing the
+ // PHY.
SFP_QUIRK_F("FS", "SFP-10G-T", sfp_fixup_fs_10gt),
+ // Fiberstore SFP-2.5G-T uses Rollball protocol to talk to the PHY and
+ // needs 4 sec wait before probing the PHY.
+ SFP_QUIRK_F("FS", "SFP-2.5G-T", sfp_fixup_fs_2_5gt),
+
// Fiberstore GPON-ONU-34-20BI can operate at 2500base-X, but report 1.2GBd
// NRZ in their EEPROM
SFP_QUIRK("FS", "GPON-ONU-34-20BI", sfp_quirk_2500basex,
@@ -488,9 +498,6 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK("HUAWEI", "MA5671A", sfp_quirk_2500basex,
sfp_fixup_ignore_tx_fault),
- // FS 2.5G Base-T
- SFP_QUIRK_M("FS", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
-
// Lantech 8330-262D-E can operate at 2500base-X, but incorrectly report
// 2500MBd NRZ in their EEPROM
SFP_QUIRK_M("Lantech", "8330-262D-E", sfp_quirk_2500basex),
@@ -502,10 +509,14 @@ static const struct sfp_quirk sfp_quirks[] = {
SFP_QUIRK_F("Walsun", "HXSX-ATRC-1", sfp_fixup_fs_10gt),
SFP_QUIRK_F("Walsun", "HXSX-ATRI-1", sfp_fixup_fs_10gt),
+ // OEM SFP-GE-T is a 1000Base-T module with broken TX_FAULT indicator
+ SFP_QUIRK_F("OEM", "SFP-GE-T", sfp_fixup_ignore_tx_fault),
+
SFP_QUIRK_F("OEM", "SFP-10G-T", sfp_fixup_rollball_cc),
SFP_QUIRK_M("OEM", "SFP-2.5G-T", sfp_quirk_oem_2_5g),
SFP_QUIRK_F("OEM", "RTSFP-10", sfp_fixup_rollball_cc),
SFP_QUIRK_F("OEM", "RTSFP-10G", sfp_fixup_rollball_cc),
+ SFP_QUIRK_F("Turris", "RTSFP-2.5G", sfp_fixup_rollball),
SFP_QUIRK_F("Turris", "RTSFP-10", sfp_fixup_rollball),
SFP_QUIRK_F("Turris", "RTSFP-10G", sfp_fixup_rollball),
};
@@ -2418,8 +2429,7 @@ static void sfp_sm_module(struct sfp *sfp, unsigned int event)
/* Handle remove event globally, it resets this state machine */
if (event == SFP_E_REMOVE) {
- if (sfp->sm_mod_state > SFP_MOD_PROBE)
- sfp_sm_mod_remove(sfp);
+ sfp_sm_mod_remove(sfp);
sfp_sm_mod_next(sfp, SFP_MOD_EMPTY, 0);
return;
}
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 897b979ec03c..3b5fcaf0dd36 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -237,16 +237,6 @@ static int vsc739x_config_init(struct phy_device *phydev)
return 0;
}
-static int vsc73xx_config_aneg(struct phy_device *phydev)
-{
- /* The VSC73xx switches does not like to be instructed to
- * do autonegotiation in any way, it prefers that you just go
- * with the power-on/reset defaults. Writing some registers will
- * just make autonegotiation permanently fail.
- */
- return 0;
-}
-
/* This adds a skew for both TX and RX clocks, so the skew should only be
* applied to "rgmii-id" interfaces. It may not work as expected
* on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces.
@@ -444,7 +434,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
- .config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
}, {
@@ -453,7 +442,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc738x_config_init,
- .config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
}, {
@@ -462,7 +450,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
- .config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
}, {
@@ -471,7 +458,6 @@ static struct phy_driver vsc82xx_driver[] = {
.phy_id_mask = 0x000ffff0,
/* PHY_GBIT_FEATURES */
.config_init = vsc739x_config_init,
- .config_aneg = vsc73xx_config_aneg,
.read_page = vsc73xx_read_page,
.write_page = vsc73xx_write_page,
}, {
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 7b1bc5fcef9b..7c51daecf18e 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -15,6 +15,7 @@
#include <linux/mii.h>
#include <linux/mdio.h>
#include <linux/phy.h>
+#include <linux/clk.h>
#include <linux/of_mdio.h>
#define XILINX_GMII2RGMII_REG 0x10
@@ -85,11 +86,17 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev)
struct device *dev = &mdiodev->dev;
struct device_node *np = dev->of_node, *phy_node;
struct gmii2rgmii *priv;
+ struct clk *clkin;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ clkin = devm_clk_get_optional_enabled(dev, NULL);
+ if (IS_ERR(clkin))
+ return dev_err_probe(dev, PTR_ERR(clkin),
+ "Failed to get and enable clock from Device Tree\n");
+
phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!phy_node) {
dev_err(dev, "Couldn't parse phy-handle\n");
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index cc7d1113ece0..e39bfaefe8c5 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -1358,7 +1358,6 @@ static struct parport_driver plip_driver = {
.probe = plip_probe,
.match_port = plip_attach,
.detach = plip_detach,
- .devmodel = true,
};
static void __exit plip_cleanup_module (void)
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fe380fe196e7..eb9acfcaeb09 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -70,6 +70,7 @@
#define MPHDRLEN_SSN 4 /* ditto with short sequence numbers */
#define PPP_PROTO_LEN 2
+#define PPP_LCP_HDRLEN 4
/*
* An instance of /dev/ppp can be associated with either a ppp
@@ -493,6 +494,15 @@ static ssize_t ppp_read(struct file *file, char __user *buf,
return ret;
}
+static bool ppp_check_packet(struct sk_buff *skb, size_t count)
+{
+ /* LCP packets must include LCP header which 4 bytes long:
+ * 1-byte code, 1-byte identifier, and 2-byte length.
+ */
+ return get_unaligned_be16(skb->data) != PPP_LCP ||
+ count >= PPP_PROTO_LEN + PPP_LCP_HDRLEN;
+}
+
static ssize_t ppp_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -515,6 +525,11 @@ static ssize_t ppp_write(struct file *file, const char __user *buf,
kfree_skb(skb);
goto out;
}
+ ret = -EINVAL;
+ if (unlikely(!ppp_check_packet(skb, count))) {
+ kfree_skb(skb);
+ goto out;
+ }
switch (pf->kind) {
case INTERFACE:
@@ -1357,7 +1372,7 @@ static struct net *ppp_nl_get_link_net(const struct net_device *dev)
{
struct ppp *ppp = netdev_priv(dev);
- return ppp->ppp_net;
+ return READ_ONCE(ppp->ppp_net);
}
static struct rtnl_link_ops ppp_link_ops __read_mostly = {
diff --git a/drivers/net/pse-pd/Kconfig b/drivers/net/pse-pd/Kconfig
index 687dec49c1e1..7fab916a7f46 100644
--- a/drivers/net/pse-pd/Kconfig
+++ b/drivers/net/pse-pd/Kconfig
@@ -5,6 +5,7 @@
menuconfig PSE_CONTROLLER
bool "Ethernet Power Sourcing Equipment Support"
+ depends on REGULATOR
help
Generic Power Sourcing Equipment Controller support.
@@ -14,10 +15,30 @@ if PSE_CONTROLLER
config PSE_REGULATOR
tristate "Regulator based PSE controller"
- depends on REGULATOR || COMPILE_TEST
help
This module provides support for simple regulator based Ethernet Power
Sourcing Equipment without automatic classification support. For
example for basic implementation of PoDL (802.3bu) specification.
+config PSE_PD692X0
+ tristate "PD692X0 PSE controller"
+ depends on I2C
+ select FW_LOADER
+ select FW_UPLOAD
+ help
+ This module provides support for PD692x0 regulator based Ethernet
+ Power Sourcing Equipment.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pd692x0.
+
+config PSE_TPS23881
+ tristate "TPS23881 PSE controller"
+ depends on I2C
+ help
+ This module provides support for TPS23881 regulator based Ethernet
+ Power Sourcing Equipment.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tps23881.
endif
diff --git a/drivers/net/pse-pd/Makefile b/drivers/net/pse-pd/Makefile
index 1b8aa4c70f0b..9d2898b36737 100644
--- a/drivers/net/pse-pd/Makefile
+++ b/drivers/net/pse-pd/Makefile
@@ -4,3 +4,5 @@
obj-$(CONFIG_PSE_CONTROLLER) += pse_core.o
obj-$(CONFIG_PSE_REGULATOR) += pse_regulator.o
+obj-$(CONFIG_PSE_PD692X0) += pd692x0.o
+obj-$(CONFIG_PSE_TPS23881) += tps23881.o
diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c
new file mode 100644
index 000000000000..0af7db80b2f8
--- /dev/null
+++ b/drivers/net/pse-pd/pd692x0.c
@@ -0,0 +1,1538 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the Microchip PD692X0 PoE PSE Controller driver (I2C bus)
+ *
+ * Copyright (c) 2023 Bootlin, Kory Maincent <kory.maincent@bootlin.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pse-pd/pse.h>
+
+#define PD692X0_PSE_NAME "pd692x0_pse"
+
+#define PD692X0_MAX_PIS 48
+#define PD692X0_MAX_MANAGERS 12
+#define PD692X0_MAX_MANAGER_PORTS 8
+#define PD692X0_MAX_HW_PORTS (PD692X0_MAX_MANAGERS * PD692X0_MAX_MANAGER_PORTS)
+
+#define PD69200_BT_PROD_VER 24
+#define PD69210_BT_PROD_VER 26
+#define PD69220_BT_PROD_VER 29
+
+#define PD692X0_FW_MAJ_VER 3
+#define PD692X0_FW_MIN_VER 5
+#define PD692X0_FW_PATCH_VER 5
+
+enum pd692x0_fw_state {
+ PD692X0_FW_UNKNOWN,
+ PD692X0_FW_OK,
+ PD692X0_FW_BROKEN,
+ PD692X0_FW_NEED_UPDATE,
+ PD692X0_FW_PREPARE,
+ PD692X0_FW_WRITE,
+ PD692X0_FW_COMPLETE,
+};
+
+struct pd692x0_msg {
+ u8 key;
+ u8 echo;
+ u8 sub[3];
+ u8 data[8];
+ __be16 chksum;
+} __packed;
+
+struct pd692x0_msg_ver {
+ u8 prod;
+ u8 maj_sw_ver;
+ u8 min_sw_ver;
+ u8 pa_sw_ver;
+ u8 param;
+ u8 build;
+};
+
+enum {
+ PD692X0_KEY_CMD,
+ PD692X0_KEY_PRG,
+ PD692X0_KEY_REQ,
+ PD692X0_KEY_TLM,
+ PD692X0_KEY_TEST,
+ PD692X0_KEY_REPORT = 0x52
+};
+
+enum {
+ PD692X0_MSG_RESET,
+ PD692X0_MSG_GET_SYS_STATUS,
+ PD692X0_MSG_GET_SW_VER,
+ PD692X0_MSG_SET_TMP_PORT_MATRIX,
+ PD692X0_MSG_PRG_PORT_MATRIX,
+ PD692X0_MSG_SET_PORT_PARAM,
+ PD692X0_MSG_GET_PORT_STATUS,
+ PD692X0_MSG_DOWNLOAD_CMD,
+ PD692X0_MSG_GET_PORT_CLASS,
+ PD692X0_MSG_GET_PORT_MEAS,
+ PD692X0_MSG_GET_PORT_PARAM,
+
+ /* add new message above here */
+ PD692X0_MSG_CNT
+};
+
+struct pd692x0_priv {
+ struct i2c_client *client;
+ struct pse_controller_dev pcdev;
+ struct device_node *np;
+
+ enum pd692x0_fw_state fw_state;
+ struct fw_upload *fwl;
+ bool cancel_request;
+
+ u8 msg_id;
+ bool last_cmd_key;
+ unsigned long last_cmd_key_time;
+
+ enum ethtool_c33_pse_admin_state admin_state[PD692X0_MAX_PIS];
+};
+
+/* Template list of communication messages. The non-null bytes defined here
+ * constitute the fixed portion of the messages. The remaining bytes will
+ * be configured later within the functions. Refer to the "PD692x0 BT Serial
+ * Communication Protocol User Guide" for comprehensive details on messages
+ * content.
+ */
+static const struct pd692x0_msg pd692x0_msg_template_list[PD692X0_MSG_CNT] = {
+ [PD692X0_MSG_RESET] = {
+ .key = PD692X0_KEY_CMD,
+ .sub = {0x07, 0x55, 0x00},
+ .data = {0x55, 0x00, 0x55, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_GET_SYS_STATUS] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x07, 0xd0, 0x4e},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_GET_SW_VER] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x07, 0x1e, 0x21},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_SET_TMP_PORT_MATRIX] = {
+ .key = PD692X0_KEY_CMD,
+ .sub = {0x05, 0x43},
+ .data = { 0, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_PRG_PORT_MATRIX] = {
+ .key = PD692X0_KEY_CMD,
+ .sub = {0x07, 0x43, 0x4e},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_SET_PORT_PARAM] = {
+ .key = PD692X0_KEY_CMD,
+ .sub = {0x05, 0xc0},
+ .data = { 0xf, 0xff, 0xff, 0xff,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_GET_PORT_STATUS] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x05, 0xc1},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_DOWNLOAD_CMD] = {
+ .key = PD692X0_KEY_PRG,
+ .sub = {0xff, 0x99, 0x15},
+ .data = {0x16, 0x16, 0x99, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_GET_PORT_CLASS] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x05, 0xc4},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_GET_PORT_MEAS] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x05, 0xc5},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+ [PD692X0_MSG_GET_PORT_PARAM] = {
+ .key = PD692X0_KEY_REQ,
+ .sub = {0x05, 0xc0},
+ .data = {0x4e, 0x4e, 0x4e, 0x4e,
+ 0x4e, 0x4e, 0x4e, 0x4e},
+ },
+};
+
+static u8 pd692x0_build_msg(struct pd692x0_msg *msg, u8 echo)
+{
+ u8 *data = (u8 *)msg;
+ u16 chksum = 0;
+ int i;
+
+ msg->echo = echo++;
+ if (echo == 0xff)
+ echo = 0;
+
+ for (i = 0; i < sizeof(*msg) - sizeof(msg->chksum); i++)
+ chksum += data[i];
+
+ msg->chksum = cpu_to_be16(chksum);
+
+ return echo;
+}
+
+static int pd692x0_send_msg(struct pd692x0_priv *priv, struct pd692x0_msg *msg)
+{
+ const struct i2c_client *client = priv->client;
+ int ret;
+
+ if (msg->key == PD692X0_KEY_CMD && priv->last_cmd_key) {
+ int cmd_msleep;
+
+ cmd_msleep = 30 - jiffies_to_msecs(jiffies - priv->last_cmd_key_time);
+ if (cmd_msleep > 0)
+ msleep(cmd_msleep);
+ }
+
+ /* Add echo and checksum bytes to the message */
+ priv->msg_id = pd692x0_build_msg(msg, priv->msg_id);
+
+ ret = i2c_master_send(client, (u8 *)msg, sizeof(*msg));
+ if (ret != sizeof(*msg))
+ return -EIO;
+
+ return 0;
+}
+
+static int pd692x0_reset(struct pd692x0_priv *priv)
+{
+ const struct i2c_client *client = priv->client;
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_RESET];
+ ret = pd692x0_send_msg(priv, &msg);
+ if (ret) {
+ dev_err(&client->dev,
+ "Failed to reset the controller (%pe)\n", ERR_PTR(ret));
+ return ret;
+ }
+
+ msleep(30);
+
+ ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
+ if (ret != sizeof(buf))
+ return ret < 0 ? ret : -EIO;
+
+ /* Is the reply a successful report message */
+ if (buf.key != PD692X0_KEY_REPORT || buf.sub[0] || buf.sub[1])
+ return -EIO;
+
+ msleep(300);
+
+ ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
+ if (ret != sizeof(buf))
+ return ret < 0 ? ret : -EIO;
+
+ /* Is the boot status without error */
+ if (buf.key != 0x03 || buf.echo != 0xff || buf.sub[0] & 0x1) {
+ dev_err(&client->dev, "PSE controller error\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static bool pd692x0_try_recv_msg(const struct i2c_client *client,
+ struct pd692x0_msg *msg,
+ struct pd692x0_msg *buf)
+{
+ /* Wait 30ms before readback as mandated by the protocol */
+ msleep(30);
+
+ memset(buf, 0, sizeof(*buf));
+ i2c_master_recv(client, (u8 *)buf, sizeof(*buf));
+ if (buf->key)
+ return 0;
+
+ msleep(100);
+
+ memset(buf, 0, sizeof(*buf));
+ i2c_master_recv(client, (u8 *)buf, sizeof(*buf));
+ if (buf->key)
+ return 0;
+
+ return 1;
+}
+
+/* Implementation of I2C communication, specifically addressing scenarios
+ * involving communication loss. Refer to the "Synchronization During
+ * Communication Loss" section in the Communication Protocol document for
+ * further details.
+ */
+static int pd692x0_recv_msg(struct pd692x0_priv *priv,
+ struct pd692x0_msg *msg,
+ struct pd692x0_msg *buf)
+{
+ const struct i2c_client *client = priv->client;
+ int ret;
+
+ ret = pd692x0_try_recv_msg(client, msg, buf);
+ if (!ret)
+ goto out_success;
+
+ dev_warn(&client->dev,
+ "Communication lost, rtnl is locked until communication is back!");
+
+ ret = pd692x0_send_msg(priv, msg);
+ if (ret)
+ return ret;
+
+ ret = pd692x0_try_recv_msg(client, msg, buf);
+ if (!ret)
+ goto out_success2;
+
+ msleep(10000);
+
+ ret = pd692x0_send_msg(priv, msg);
+ if (ret)
+ return ret;
+
+ ret = pd692x0_try_recv_msg(client, msg, buf);
+ if (!ret)
+ goto out_success2;
+
+ return pd692x0_reset(priv);
+
+out_success2:
+ dev_warn(&client->dev, "Communication is back, rtnl is unlocked!");
+out_success:
+ if (msg->key == PD692X0_KEY_CMD) {
+ priv->last_cmd_key = true;
+ priv->last_cmd_key_time = jiffies;
+ } else {
+ priv->last_cmd_key = false;
+ }
+
+ return 0;
+}
+
+static int pd692x0_sendrecv_msg(struct pd692x0_priv *priv,
+ struct pd692x0_msg *msg,
+ struct pd692x0_msg *buf)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ ret = pd692x0_send_msg(priv, msg);
+ if (ret)
+ return ret;
+
+ ret = pd692x0_recv_msg(priv, msg, buf);
+ if (ret)
+ return ret;
+
+ if (msg->echo != buf->echo) {
+ dev_err(dev,
+ "Wrong match in message ID, expect %d received %d.\n",
+ msg->echo, buf->echo);
+ return -EIO;
+ }
+
+ /* If the reply is a report message is it successful */
+ if (buf->key == PD692X0_KEY_REPORT &&
+ (buf->sub[0] || buf->sub[1])) {
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static struct pd692x0_priv *to_pd692x0_priv(struct pse_controller_dev *pcdev)
+{
+ return container_of(pcdev, struct pd692x0_priv, pcdev);
+}
+
+static int pd692x0_fw_unavailable(struct pd692x0_priv *priv)
+{
+ switch (priv->fw_state) {
+ case PD692X0_FW_OK:
+ return 0;
+ case PD692X0_FW_PREPARE:
+ case PD692X0_FW_WRITE:
+ case PD692X0_FW_COMPLETE:
+ dev_err(&priv->client->dev, "Firmware update in progress!\n");
+ return -EBUSY;
+ case PD692X0_FW_BROKEN:
+ case PD692X0_FW_NEED_UPDATE:
+ default:
+ dev_err(&priv->client->dev,
+ "Firmware issue. Please update it!\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int pd692x0_pi_enable(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ if (priv->admin_state[id] == ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED)
+ return 0;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
+ msg.data[0] = 0x1;
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+
+ return 0;
+}
+
+static int pd692x0_pi_disable(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ if (priv->admin_state[id] == ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED)
+ return 0;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
+ msg.data[0] = 0x0;
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ return 0;
+}
+
+static int pd692x0_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ if (buf.sub[1]) {
+ priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ return 1;
+ } else {
+ priv->admin_state[id] = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+ return 0;
+ }
+}
+
+struct pd692x0_pse_ext_state_mapping {
+ u32 status_code;
+ enum ethtool_c33_pse_ext_state pse_ext_state;
+ u32 pse_ext_substate;
+};
+
+static const struct pd692x0_pse_ext_state_mapping
+pd692x0_pse_ext_state_map[] = {
+ {0x06, ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE},
+ {0x07, ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE},
+ {0x08, ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE},
+ {0x0C, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT},
+ {0x11, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT},
+ {0x12, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT},
+ {0x1B, ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS},
+ {0x1C, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS},
+ {0x1E, ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_DETECTED_UNDERLOAD},
+ {0x1F, ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD},
+ {0x20, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED},
+ {0x21, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT},
+ {0x22, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE},
+ {0x24, ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION},
+ {0x25, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS},
+ {0x34, ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION},
+ {0x35, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP},
+ {0x36, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP},
+ {0x37, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS},
+ {0x3C, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET},
+ {0x3D, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT},
+ {0x41, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT},
+ {0x43, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS},
+ {0xA7, ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR},
+ {0xA8, ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID,
+ ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_CONNECTION_OPEN},
+ { /* sentinel */ }
+};
+
+static void
+pd692x0_get_ext_state(struct ethtool_c33_pse_ext_state_info *c33_ext_state_info,
+ u32 status_code)
+{
+ const struct pd692x0_pse_ext_state_mapping *ext_state_map;
+
+ ext_state_map = pd692x0_pse_ext_state_map;
+ while (ext_state_map->status_code) {
+ if (ext_state_map->status_code == status_code) {
+ c33_ext_state_info->c33_pse_ext_state = ext_state_map->pse_ext_state;
+ c33_ext_state_info->__c33_pse_ext_substate = ext_state_map->pse_ext_substate;
+ return;
+ }
+ ext_state_map++;
+ }
+}
+
+struct pd692x0_class_pw {
+ int class;
+ int class_cfg_value;
+ int class_pw;
+ int max_added_class_pw;
+};
+
+#define PD692X0_CLASS_PW_TABLE_SIZE 4
+/* 4/2 pairs class configuration power table in compliance mode.
+ * Need to be arranged in ascending order of power support.
+ */
+static const struct pd692x0_class_pw
+pd692x0_class_pw_table[PD692X0_CLASS_PW_TABLE_SIZE] = {
+ {.class = 3, .class_cfg_value = 0x3, .class_pw = 15000, .max_added_class_pw = 3100},
+ {.class = 4, .class_cfg_value = 0x2, .class_pw = 30000, .max_added_class_pw = 8000},
+ {.class = 6, .class_cfg_value = 0x1, .class_pw = 60000, .max_added_class_pw = 5000},
+ {.class = 8, .class_cfg_value = 0x0, .class_pw = 90000, .max_added_class_pw = 7500},
+};
+
+static int pd692x0_pi_get_pw_from_table(int op_mode, int added_pw)
+{
+ const struct pd692x0_class_pw *pw_table;
+ int i;
+
+ pw_table = pd692x0_class_pw_table;
+ for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) {
+ if (pw_table->class_cfg_value == op_mode)
+ return pw_table->class_pw + added_pw * 100;
+ }
+
+ return -ERANGE;
+}
+
+static int pd692x0_pi_set_pw_from_table(struct device *dev,
+ struct pd692x0_msg *msg, int pw)
+{
+ const struct pd692x0_class_pw *pw_table;
+ int i;
+
+ pw_table = pd692x0_class_pw_table;
+ if (pw < pw_table->class_pw) {
+ dev_err(dev,
+ "Power limit %dmW not supported. Ranges minimal available: [%d-%d]\n",
+ pw,
+ pw_table->class_pw,
+ pw_table->class_pw + pw_table->max_added_class_pw);
+ return -ERANGE;
+ }
+
+ for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) {
+ if (pw > (pw_table->class_pw + pw_table->max_added_class_pw))
+ continue;
+
+ if (pw < pw_table->class_pw) {
+ dev_err(dev,
+ "Power limit %dmW not supported. Ranges available: [%d-%d] or [%d-%d]\n",
+ pw,
+ (pw_table - 1)->class_pw,
+ (pw_table - 1)->class_pw + (pw_table - 1)->max_added_class_pw,
+ pw_table->class_pw,
+ pw_table->class_pw + pw_table->max_added_class_pw);
+ return -ERANGE;
+ }
+
+ msg->data[2] = pw_table->class_cfg_value;
+ msg->data[3] = (pw - pw_table->class_pw) / 100;
+ return 0;
+ }
+
+ pw_table--;
+ dev_warn(dev,
+ "Power limit %dmW not supported. Set to highest power limit %dmW\n",
+ pw, pw_table->class_pw + pw_table->max_added_class_pw);
+ msg->data[2] = pw_table->class_cfg_value;
+ msg->data[3] = pw_table->max_added_class_pw / 100;
+ return 0;
+}
+
+static int
+pd692x0_pi_get_pw_ranges(struct pse_control_status *st)
+{
+ const struct pd692x0_class_pw *pw_table;
+ int i;
+
+ pw_table = pd692x0_class_pw_table;
+ st->c33_pw_limit_ranges = kcalloc(PD692X0_CLASS_PW_TABLE_SIZE,
+ sizeof(struct ethtool_c33_pse_pw_limit_range),
+ GFP_KERNEL);
+ if (!st->c33_pw_limit_ranges)
+ return -ENOMEM;
+
+ for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) {
+ st->c33_pw_limit_ranges[i].min = pw_table->class_pw;
+ st->c33_pw_limit_ranges[i].max = pw_table->class_pw + pw_table->max_added_class_pw;
+ }
+
+ st->c33_pw_limit_nb_ranges = i;
+ return 0;
+}
+
+static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev,
+ unsigned long id,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ u32 class;
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_STATUS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ /* Compare Port Status (Communication Protocol Document par. 7.1) */
+ if ((buf.sub[0] & 0xf0) == 0x80 || (buf.sub[0] & 0xf0) == 0x90)
+ status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ else if (buf.sub[0] == 0x1b || buf.sub[0] == 0x22)
+ status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_SEARCHING;
+ else if (buf.sub[0] == 0x12)
+ status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_FAULT;
+ else
+ status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
+
+ if (buf.sub[1])
+ status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ priv->admin_state[id] = status->c33_admin_state;
+
+ pd692x0_get_ext_state(&status->c33_ext_state_info, buf.sub[0]);
+ status->c33_actual_pw = (buf.data[0] << 4 | buf.data[1]) * 100;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
+ msg.sub[2] = id;
+ memset(&buf, 0, sizeof(buf));
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ ret = pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]);
+ if (ret < 0)
+ return ret;
+ status->c33_avail_pw_limit = ret;
+
+ memset(&buf, 0, sizeof(buf));
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_CLASS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ class = buf.data[3] >> 4;
+ if (class <= 8)
+ status->c33_pw_class = class;
+
+ ret = pd692x0_pi_get_pw_ranges(status);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static struct pd692x0_msg_ver pd692x0_get_sw_version(struct pd692x0_priv *priv)
+{
+ struct device *dev = &priv->client->dev;
+ struct pd692x0_msg msg, buf = {0};
+ struct pd692x0_msg_ver ver = {0};
+ int ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_SW_VER];
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get PSE version (%pe)\n", ERR_PTR(ret));
+ return ver;
+ }
+
+ /* Extract version from the message */
+ ver.prod = buf.sub[2];
+ ver.maj_sw_ver = (buf.data[0] << 8 | buf.data[1]) / 100;
+ ver.min_sw_ver = ((buf.data[0] << 8 | buf.data[1]) / 10) % 10;
+ ver.pa_sw_ver = (buf.data[0] << 8 | buf.data[1]) % 10;
+ ver.param = buf.data[2];
+ ver.build = buf.data[3];
+
+ return ver;
+}
+
+struct pd692x0_manager {
+ struct device_node *port_node[PD692X0_MAX_MANAGER_PORTS];
+ int nports;
+};
+
+struct pd692x0_matrix {
+ u8 hw_port_a;
+ u8 hw_port_b;
+};
+
+static int
+pd692x0_of_get_ports_manager(struct pd692x0_priv *priv,
+ struct pd692x0_manager *manager,
+ struct device_node *np)
+{
+ struct device_node *node;
+ int ret, nports, i;
+
+ nports = 0;
+ for_each_child_of_node(np, node) {
+ u32 port;
+
+ if (!of_node_name_eq(node, "port"))
+ continue;
+
+ ret = of_property_read_u32(node, "reg", &port);
+ if (ret)
+ goto out;
+
+ if (port >= PD692X0_MAX_MANAGER_PORTS || port != nports) {
+ dev_err(&priv->client->dev,
+ "wrong number or order of manager ports (%d)\n",
+ port);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ of_node_get(node);
+ manager->port_node[port] = node;
+ nports++;
+ }
+
+ manager->nports = nports;
+ return 0;
+
+out:
+ for (i = 0; i < nports; i++) {
+ of_node_put(manager->port_node[i]);
+ manager->port_node[i] = NULL;
+ }
+ of_node_put(node);
+ return ret;
+}
+
+static int
+pd692x0_of_get_managers(struct pd692x0_priv *priv,
+ struct pd692x0_manager manager[PD692X0_MAX_MANAGERS])
+{
+ struct device_node *managers_node, *node;
+ int ret, nmanagers, i, j;
+
+ if (!priv->np)
+ return -EINVAL;
+
+ nmanagers = 0;
+ managers_node = of_get_child_by_name(priv->np, "managers");
+ if (!managers_node)
+ return -EINVAL;
+
+ for_each_child_of_node(managers_node, node) {
+ u32 manager_id;
+
+ if (!of_node_name_eq(node, "manager"))
+ continue;
+
+ ret = of_property_read_u32(node, "reg", &manager_id);
+ if (ret)
+ goto out;
+
+ if (manager_id >= PD692X0_MAX_MANAGERS ||
+ manager_id != nmanagers) {
+ dev_err(&priv->client->dev,
+ "wrong number or order of managers (%d)\n",
+ manager_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = pd692x0_of_get_ports_manager(priv, &manager[manager_id],
+ node);
+ if (ret)
+ goto out;
+
+ nmanagers++;
+ }
+
+ of_node_put(managers_node);
+ return nmanagers;
+
+out:
+ for (i = 0; i < nmanagers; i++) {
+ for (j = 0; j < manager[i].nports; j++) {
+ of_node_put(manager[i].port_node[j]);
+ manager[i].port_node[j] = NULL;
+ }
+ }
+
+ of_node_put(node);
+ of_node_put(managers_node);
+ return ret;
+}
+
+static int
+pd692x0_set_port_matrix(const struct pse_pi_pairset *pairset,
+ const struct pd692x0_manager *manager,
+ int nmanagers, struct pd692x0_matrix *port_matrix)
+{
+ int i, j, port_cnt;
+ bool found = false;
+
+ if (!pairset->np)
+ return 0;
+
+ /* Look on every managers */
+ port_cnt = 0;
+ for (i = 0; i < nmanagers; i++) {
+ /* Look on every ports of the manager */
+ for (j = 0; j < manager[i].nports; j++) {
+ if (pairset->np == manager[i].port_node[j]) {
+ found = true;
+ break;
+ }
+ }
+ port_cnt += j;
+
+ if (found)
+ break;
+ }
+
+ if (!found)
+ return -ENODEV;
+
+ if (pairset->pinout == ALTERNATIVE_A)
+ port_matrix->hw_port_a = port_cnt;
+ else if (pairset->pinout == ALTERNATIVE_B)
+ port_matrix->hw_port_b = port_cnt;
+
+ return 0;
+}
+
+static int
+pd692x0_set_ports_matrix(struct pd692x0_priv *priv,
+ const struct pd692x0_manager *manager,
+ int nmanagers,
+ struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS])
+{
+ struct pse_controller_dev *pcdev = &priv->pcdev;
+ int i, ret;
+
+ /* Init Matrix */
+ for (i = 0; i < PD692X0_MAX_PIS; i++) {
+ port_matrix[i].hw_port_a = 0xff;
+ port_matrix[i].hw_port_b = 0xff;
+ }
+
+ /* Update with values for every PSE PIs */
+ for (i = 0; i < pcdev->nr_lines; i++) {
+ ret = pd692x0_set_port_matrix(&pcdev->pi[i].pairset[0],
+ manager, nmanagers,
+ &port_matrix[i]);
+ if (ret) {
+ dev_err(&priv->client->dev,
+ "unable to configure pi %d pairset 0", i);
+ return ret;
+ }
+
+ ret = pd692x0_set_port_matrix(&pcdev->pi[i].pairset[1],
+ manager, nmanagers,
+ &port_matrix[i]);
+ if (ret) {
+ dev_err(&priv->client->dev,
+ "unable to configure pi %d pairset 1", i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int
+pd692x0_write_ports_matrix(struct pd692x0_priv *priv,
+ const struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS])
+{
+ struct pd692x0_msg msg, buf;
+ int ret, i;
+
+ /* Write temporary Matrix */
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_TMP_PORT_MATRIX];
+ for (i = 0; i < PD692X0_MAX_PIS; i++) {
+ msg.sub[2] = i;
+ msg.data[0] = port_matrix[i].hw_port_b;
+ msg.data[1] = port_matrix[i].hw_port_a;
+
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Program Matrix */
+ msg = pd692x0_msg_template_list[PD692X0_MSG_PRG_PORT_MATRIX];
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int pd692x0_setup_pi_matrix(struct pse_controller_dev *pcdev)
+{
+ struct pd692x0_manager manager[PD692X0_MAX_MANAGERS] = {0};
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_matrix port_matrix[PD692X0_MAX_PIS];
+ int ret, i, j, nmanagers;
+
+ /* Should we flash the port matrix */
+ if (priv->fw_state != PD692X0_FW_OK &&
+ priv->fw_state != PD692X0_FW_COMPLETE)
+ return 0;
+
+ ret = pd692x0_of_get_managers(priv, manager);
+ if (ret < 0)
+ return ret;
+
+ nmanagers = ret;
+ ret = pd692x0_set_ports_matrix(priv, manager, nmanagers, port_matrix);
+ if (ret)
+ goto out;
+
+ ret = pd692x0_write_ports_matrix(priv, port_matrix);
+ if (ret)
+ goto out;
+
+out:
+ for (i = 0; i < nmanagers; i++) {
+ for (j = 0; j < manager[i].nports; j++)
+ of_node_put(manager[i].port_node[j]);
+ }
+ return ret;
+}
+
+static int pd692x0_pi_get_voltage(struct pse_controller_dev *pcdev, int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int ret;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_MEAS];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ /* Convert 0.1V unit to uV */
+ return (buf.sub[0] << 8 | buf.sub[1]) * 100000;
+}
+
+static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev,
+ int id)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct pd692x0_msg msg, buf = {0};
+ int mW, uV, uA, ret;
+ s64 tmp_64;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM];
+ msg.sub[2] = id;
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0)
+ return ret;
+
+ ret = pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]);
+ if (ret < 0)
+ return ret;
+ mW = ret;
+
+ ret = pd692x0_pi_get_voltage(pcdev, id);
+ if (ret < 0)
+ return ret;
+ uV = ret;
+
+ tmp_64 = mW;
+ tmp_64 *= 1000000000ull;
+ /* uA = mW * 1000000000 / uV */
+ uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
+ return uA;
+}
+
+static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev,
+ int id, int max_uA)
+{
+ struct pd692x0_priv *priv = to_pd692x0_priv(pcdev);
+ struct device *dev = &priv->client->dev;
+ struct pd692x0_msg msg, buf = {0};
+ int uV, ret, mW;
+ s64 tmp_64;
+
+ ret = pd692x0_fw_unavailable(priv);
+ if (ret)
+ return ret;
+
+ ret = pd692x0_pi_get_voltage(pcdev, id);
+ if (ret < 0)
+ return ret;
+ uV = ret;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM];
+ msg.sub[2] = id;
+ tmp_64 = uV;
+ tmp_64 *= max_uA;
+ /* mW = uV * uA / 1000000000 */
+ mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000);
+ ret = pd692x0_pi_set_pw_from_table(dev, &msg, mW);
+ if (ret)
+ return ret;
+
+ return pd692x0_sendrecv_msg(priv, &msg, &buf);
+}
+
+static const struct pse_controller_ops pd692x0_ops = {
+ .setup_pi_matrix = pd692x0_setup_pi_matrix,
+ .ethtool_get_status = pd692x0_ethtool_get_status,
+ .pi_enable = pd692x0_pi_enable,
+ .pi_disable = pd692x0_pi_disable,
+ .pi_is_enabled = pd692x0_pi_is_enabled,
+ .pi_get_voltage = pd692x0_pi_get_voltage,
+ .pi_get_current_limit = pd692x0_pi_get_current_limit,
+ .pi_set_current_limit = pd692x0_pi_set_current_limit,
+};
+
+#define PD692X0_FW_LINE_MAX_SZ 0xff
+static int pd692x0_fw_get_next_line(const u8 *data,
+ char *line, size_t size)
+{
+ size_t line_size;
+ int i;
+
+ line_size = min_t(size_t, size, PD692X0_FW_LINE_MAX_SZ);
+
+ memset(line, 0, PD692X0_FW_LINE_MAX_SZ);
+ for (i = 0; i < line_size - 1; i++) {
+ if (*data == '\r' && *(data + 1) == '\n') {
+ line[i] = '\r';
+ line[i + 1] = '\n';
+ return i + 2;
+ }
+ line[i] = *data;
+ data++;
+ }
+
+ return -EIO;
+}
+
+static enum fw_upload_err
+pd692x0_fw_recv_resp(const struct i2c_client *client, unsigned long ms_timeout,
+ const char *msg_ok, unsigned int msg_size)
+{
+ /* Maximum controller response size */
+ char fw_msg_buf[5] = {0};
+ unsigned long timeout;
+ int ret;
+
+ if (msg_size > sizeof(fw_msg_buf))
+ return FW_UPLOAD_ERR_RW_ERROR;
+
+ /* Read until we get something */
+ timeout = msecs_to_jiffies(ms_timeout) + jiffies;
+ while (true) {
+ if (time_is_before_jiffies(timeout))
+ return FW_UPLOAD_ERR_TIMEOUT;
+
+ ret = i2c_master_recv(client, fw_msg_buf, 1);
+ if (ret < 0 || *fw_msg_buf == 0) {
+ usleep_range(1000, 2000);
+ continue;
+ } else {
+ break;
+ }
+ }
+
+ /* Read remaining characters */
+ ret = i2c_master_recv(client, fw_msg_buf + 1, msg_size - 1);
+ if (strncmp(fw_msg_buf, msg_ok, msg_size)) {
+ dev_err(&client->dev,
+ "Wrong FW download process answer (%*pE)\n",
+ msg_size, fw_msg_buf);
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static int pd692x0_fw_write_line(const struct i2c_client *client,
+ const char line[PD692X0_FW_LINE_MAX_SZ],
+ const bool last_line)
+{
+ int ret;
+
+ while (*line != 0) {
+ ret = i2c_master_send(client, line, 1);
+ if (ret < 0)
+ return FW_UPLOAD_ERR_RW_ERROR;
+ line++;
+ }
+
+ if (last_line) {
+ ret = pd692x0_fw_recv_resp(client, 100, "TP\r\n",
+ sizeof("TP\r\n") - 1);
+ if (ret)
+ return ret;
+ } else {
+ ret = pd692x0_fw_recv_resp(client, 100, "T*\r\n",
+ sizeof("T*\r\n") - 1);
+ if (ret)
+ return ret;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err pd692x0_fw_reset(const struct i2c_client *client)
+{
+ const struct pd692x0_msg zero = {0};
+ struct pd692x0_msg buf = {0};
+ unsigned long timeout;
+ char cmd[] = "RST";
+ int ret;
+
+ ret = i2c_master_send(client, cmd, strlen(cmd));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to reset the controller (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ timeout = msecs_to_jiffies(10000) + jiffies;
+ while (true) {
+ if (time_is_before_jiffies(timeout))
+ return FW_UPLOAD_ERR_TIMEOUT;
+
+ ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
+ if (ret < 0 ||
+ !memcmp(&buf, &zero, sizeof(buf)))
+ usleep_range(1000, 2000);
+ else
+ break;
+ }
+
+ /* Is the reply a successful report message */
+ if (buf.key != PD692X0_KEY_TLM || buf.echo != 0xff ||
+ buf.sub[0] & 0x01) {
+ dev_err(&client->dev, "PSE controller error\n");
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ /* Is the firmware operational */
+ if (buf.sub[0] & 0x02) {
+ dev_err(&client->dev,
+ "PSE firmware error. Please update it.\n");
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static enum fw_upload_err pd692x0_fw_prepare(struct fw_upload *fwl,
+ const u8 *data, u32 size)
+{
+ struct pd692x0_priv *priv = fwl->dd_handle;
+ const struct i2c_client *client = priv->client;
+ enum pd692x0_fw_state last_fw_state;
+ int ret;
+
+ priv->cancel_request = false;
+ last_fw_state = priv->fw_state;
+
+ priv->fw_state = PD692X0_FW_PREPARE;
+
+ /* Enter program mode */
+ if (last_fw_state == PD692X0_FW_BROKEN) {
+ const char *msg = "ENTR";
+ const char *c;
+
+ c = msg;
+ do {
+ ret = i2c_master_send(client, c, 1);
+ if (ret < 0)
+ return FW_UPLOAD_ERR_RW_ERROR;
+ if (*(c + 1))
+ usleep_range(10000, 20000);
+ } while (*(++c));
+ } else {
+ struct pd692x0_msg msg, buf;
+
+ msg = pd692x0_msg_template_list[PD692X0_MSG_DOWNLOAD_CMD];
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to enter programming mode (%pe)\n",
+ ERR_PTR(ret));
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+ }
+
+ ret = pd692x0_fw_recv_resp(client, 100, "TPE\r\n", sizeof("TPE\r\n") - 1);
+ if (ret)
+ goto err_out;
+
+ if (priv->cancel_request) {
+ ret = FW_UPLOAD_ERR_CANCELED;
+ goto err_out;
+ }
+
+ return FW_UPLOAD_ERR_NONE;
+
+err_out:
+ pd692x0_fw_reset(priv->client);
+ priv->fw_state = last_fw_state;
+ return ret;
+}
+
+static enum fw_upload_err pd692x0_fw_write(struct fw_upload *fwl,
+ const u8 *data, u32 offset,
+ u32 size, u32 *written)
+{
+ struct pd692x0_priv *priv = fwl->dd_handle;
+ char line[PD692X0_FW_LINE_MAX_SZ];
+ const struct i2c_client *client;
+ int ret, i;
+ char cmd;
+
+ client = priv->client;
+ priv->fw_state = PD692X0_FW_WRITE;
+
+ /* Erase */
+ cmd = 'E';
+ ret = i2c_master_send(client, &cmd, 1);
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to boot programming mode (%pe)\n",
+ ERR_PTR(ret));
+ return FW_UPLOAD_ERR_RW_ERROR;
+ }
+
+ ret = pd692x0_fw_recv_resp(client, 100, "TOE\r\n", sizeof("TOE\r\n") - 1);
+ if (ret)
+ return ret;
+
+ ret = pd692x0_fw_recv_resp(client, 5000, "TE\r\n", sizeof("TE\r\n") - 1);
+ if (ret)
+ dev_warn(&client->dev,
+ "Failed to erase internal memory, however still try to write Firmware\n");
+
+ ret = pd692x0_fw_recv_resp(client, 100, "TPE\r\n", sizeof("TPE\r\n") - 1);
+ if (ret)
+ dev_warn(&client->dev,
+ "Failed to erase internal memory, however still try to write Firmware\n");
+
+ if (priv->cancel_request)
+ return FW_UPLOAD_ERR_CANCELED;
+
+ /* Program */
+ cmd = 'P';
+ ret = i2c_master_send(client, &cmd, sizeof(char));
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "Failed to boot programming mode (%pe)\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+
+ ret = pd692x0_fw_recv_resp(client, 100, "TOP\r\n", sizeof("TOP\r\n") - 1);
+ if (ret)
+ return ret;
+
+ i = 0;
+ while (i < size) {
+ ret = pd692x0_fw_get_next_line(data, line, size - i);
+ if (ret < 0) {
+ ret = FW_UPLOAD_ERR_FW_INVALID;
+ goto err;
+ }
+
+ i += ret;
+ data += ret;
+ if (line[0] == 'S' && line[1] == '0') {
+ continue;
+ } else if (line[0] == 'S' && line[1] == '7') {
+ ret = pd692x0_fw_write_line(client, line, true);
+ if (ret)
+ goto err;
+ } else {
+ ret = pd692x0_fw_write_line(client, line, false);
+ if (ret)
+ goto err;
+ }
+
+ if (priv->cancel_request) {
+ ret = FW_UPLOAD_ERR_CANCELED;
+ goto err;
+ }
+ }
+ *written = i;
+
+ msleep(400);
+
+ return FW_UPLOAD_ERR_NONE;
+
+err:
+ strscpy_pad(line, "S7\r\n", sizeof(line));
+ pd692x0_fw_write_line(client, line, true);
+ return ret;
+}
+
+static enum fw_upload_err pd692x0_fw_poll_complete(struct fw_upload *fwl)
+{
+ struct pd692x0_priv *priv = fwl->dd_handle;
+ const struct i2c_client *client = priv->client;
+ struct pd692x0_msg_ver ver;
+ int ret;
+
+ priv->fw_state = PD692X0_FW_COMPLETE;
+
+ ret = pd692x0_fw_reset(client);
+ if (ret)
+ return ret;
+
+ ver = pd692x0_get_sw_version(priv);
+ if (ver.maj_sw_ver < PD692X0_FW_MAJ_VER) {
+ dev_err(&client->dev,
+ "Too old firmware version. Please update it\n");
+ priv->fw_state = PD692X0_FW_NEED_UPDATE;
+ return FW_UPLOAD_ERR_FW_INVALID;
+ }
+
+ ret = pd692x0_setup_pi_matrix(&priv->pcdev);
+ if (ret < 0) {
+ dev_err(&client->dev, "Error configuring ports matrix (%pe)\n",
+ ERR_PTR(ret));
+ priv->fw_state = PD692X0_FW_NEED_UPDATE;
+ return FW_UPLOAD_ERR_HW_ERROR;
+ }
+
+ priv->fw_state = PD692X0_FW_OK;
+ return FW_UPLOAD_ERR_NONE;
+}
+
+static void pd692x0_fw_cancel(struct fw_upload *fwl)
+{
+ struct pd692x0_priv *priv = fwl->dd_handle;
+
+ priv->cancel_request = true;
+}
+
+static void pd692x0_fw_cleanup(struct fw_upload *fwl)
+{
+ struct pd692x0_priv *priv = fwl->dd_handle;
+
+ switch (priv->fw_state) {
+ case PD692X0_FW_WRITE:
+ pd692x0_fw_reset(priv->client);
+ fallthrough;
+ case PD692X0_FW_COMPLETE:
+ priv->fw_state = PD692X0_FW_BROKEN;
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct fw_upload_ops pd692x0_fw_ops = {
+ .prepare = pd692x0_fw_prepare,
+ .write = pd692x0_fw_write,
+ .poll_complete = pd692x0_fw_poll_complete,
+ .cancel = pd692x0_fw_cancel,
+ .cleanup = pd692x0_fw_cleanup,
+};
+
+static int pd692x0_i2c_probe(struct i2c_client *client)
+{
+ struct pd692x0_msg msg, buf = {0}, zero = {0};
+ struct device *dev = &client->dev;
+ struct pd692x0_msg_ver ver;
+ struct pd692x0_priv *priv;
+ struct fw_upload *fwl;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "i2c check functionality failed\n");
+ return -ENXIO;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+
+ ret = i2c_master_recv(client, (u8 *)&buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ dev_err(dev, "Failed to get device status\n");
+ return -EIO;
+ }
+
+ /* Probe has been already run and the status dumped */
+ if (!memcmp(&buf, &zero, sizeof(buf))) {
+ /* Ask again the controller status */
+ msg = pd692x0_msg_template_list[PD692X0_MSG_GET_SYS_STATUS];
+ ret = pd692x0_sendrecv_msg(priv, &msg, &buf);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get device status\n");
+ return ret;
+ }
+ }
+
+ if (buf.key != 0x03 || buf.sub[0] & 0x01) {
+ dev_err(dev, "PSE controller error\n");
+ return -EIO;
+ }
+ if (buf.sub[0] & 0x02) {
+ dev_err(dev, "PSE firmware error. Please update it.\n");
+ priv->fw_state = PD692X0_FW_BROKEN;
+ } else {
+ ver = pd692x0_get_sw_version(priv);
+ dev_info(&client->dev, "Software version %d.%02d.%d.%d\n",
+ ver.prod, ver.maj_sw_ver, ver.min_sw_ver,
+ ver.pa_sw_ver);
+
+ if (ver.maj_sw_ver < PD692X0_FW_MAJ_VER) {
+ dev_err(dev, "Too old firmware version. Please update it\n");
+ priv->fw_state = PD692X0_FW_NEED_UPDATE;
+ } else {
+ priv->fw_state = PD692X0_FW_OK;
+ }
+ }
+
+ priv->np = dev->of_node;
+ priv->pcdev.nr_lines = PD692X0_MAX_PIS;
+ priv->pcdev.owner = THIS_MODULE;
+ priv->pcdev.ops = &pd692x0_ops;
+ priv->pcdev.dev = dev;
+ priv->pcdev.types = ETHTOOL_PSE_C33;
+ ret = devm_pse_controller_register(dev, &priv->pcdev);
+ if (ret)
+ return dev_err_probe(dev, ret,
+ "failed to register PSE controller\n");
+
+ fwl = firmware_upload_register(THIS_MODULE, dev, dev_name(dev),
+ &pd692x0_fw_ops, priv);
+ if (IS_ERR(fwl))
+ return dev_err_probe(dev, PTR_ERR(fwl),
+ "failed to register to the Firmware Upload API\n");
+ priv->fwl = fwl;
+
+ return 0;
+}
+
+static void pd692x0_i2c_remove(struct i2c_client *client)
+{
+ struct pd692x0_priv *priv = i2c_get_clientdata(client);
+
+ firmware_upload_unregister(priv->fwl);
+}
+
+static const struct i2c_device_id pd692x0_id[] = {
+ { PD692X0_PSE_NAME },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pd692x0_id);
+
+static const struct of_device_id pd692x0_of_match[] = {
+ { .compatible = "microchip,pd69200", },
+ { .compatible = "microchip,pd69210", },
+ { .compatible = "microchip,pd69220", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pd692x0_of_match);
+
+static struct i2c_driver pd692x0_driver = {
+ .probe = pd692x0_i2c_probe,
+ .remove = pd692x0_i2c_remove,
+ .id_table = pd692x0_id,
+ .driver = {
+ .name = PD692X0_PSE_NAME,
+ .of_match_table = pd692x0_of_match,
+ },
+};
+module_i2c_driver(pd692x0_driver);
+
+MODULE_AUTHOR("Kory Maincent <kory.maincent@bootlin.com>");
+MODULE_DESCRIPTION("Microchip PD692x0 PoE PSE Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c
index 146b81f08a89..4f032b16a8a0 100644
--- a/drivers/net/pse-pd/pse_core.c
+++ b/drivers/net/pse-pd/pse_core.c
@@ -8,6 +8,8 @@
#include <linux/device.h>
#include <linux/of.h>
#include <linux/pse-pd/pse.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
static DEFINE_MUTEX(pse_list_mutex);
static LIST_HEAD(pse_controller_list);
@@ -16,67 +18,467 @@ static LIST_HEAD(pse_controller_list);
* struct pse_control - a PSE control
* @pcdev: a pointer to the PSE controller device
* this PSE control belongs to
+ * @ps: PSE PI supply of the PSE control
* @list: list entry for the pcdev's PSE controller list
* @id: ID of the PSE line in the PSE controller device
* @refcnt: Number of gets of this pse_control
*/
struct pse_control {
struct pse_controller_dev *pcdev;
+ struct regulator *ps;
struct list_head list;
unsigned int id;
struct kref refcnt;
};
+static int of_load_single_pse_pi_pairset(struct device_node *node,
+ struct pse_pi *pi,
+ int pairset_num)
+{
+ struct device_node *pairset_np;
+ const char *name;
+ int ret;
+
+ ret = of_property_read_string_index(node, "pairset-names",
+ pairset_num, &name);
+ if (ret)
+ return ret;
+
+ if (!strcmp(name, "alternative-a")) {
+ pi->pairset[pairset_num].pinout = ALTERNATIVE_A;
+ } else if (!strcmp(name, "alternative-b")) {
+ pi->pairset[pairset_num].pinout = ALTERNATIVE_B;
+ } else {
+ pr_err("pse: wrong pairset-names value %s (%pOF)\n",
+ name, node);
+ return -EINVAL;
+ }
+
+ pairset_np = of_parse_phandle(node, "pairsets", pairset_num);
+ if (!pairset_np)
+ return -ENODEV;
+
+ pi->pairset[pairset_num].np = pairset_np;
+
+ return 0;
+}
+
/**
- * of_pse_zero_xlate - dummy function for controllers with one only control
- * @pcdev: a pointer to the PSE controller device
- * @pse_spec: PSE line specifier as found in the device tree
+ * of_load_pse_pi_pairsets - load PSE PI pairsets pinout and polarity
+ * @node: a pointer of the device node
+ * @pi: a pointer of the PSE PI to fill
+ * @npairsets: the number of pairsets (1 or 2) used by the PI
*
- * This static translation function is used by default if of_xlate in
- * :c:type:`pse_controller_dev` is not set. It is useful for all PSE
- * controllers with #pse-cells = <0>.
+ * Return: 0 on success and failure value on error
*/
-static int of_pse_zero_xlate(struct pse_controller_dev *pcdev,
- const struct of_phandle_args *pse_spec)
+static int of_load_pse_pi_pairsets(struct device_node *node,
+ struct pse_pi *pi,
+ int npairsets)
{
- return 0;
+ int i, ret;
+
+ ret = of_property_count_strings(node, "pairset-names");
+ if (ret != npairsets) {
+ pr_err("pse: amount of pairsets and pairset-names is not equal %d != %d (%pOF)\n",
+ npairsets, ret, node);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < npairsets; i++) {
+ ret = of_load_single_pse_pi_pairset(node, pi, i);
+ if (ret)
+ goto out;
+ }
+
+ if (npairsets == 2 &&
+ pi->pairset[0].pinout == pi->pairset[1].pinout) {
+ pr_err("pse: two PI pairsets can not have identical pinout (%pOF)",
+ node);
+ ret = -EINVAL;
+ }
+
+out:
+ /* If an error appears, release all the pairset device node kref */
+ if (ret) {
+ of_node_put(pi->pairset[0].np);
+ pi->pairset[0].np = NULL;
+ of_node_put(pi->pairset[1].np);
+ pi->pairset[1].np = NULL;
+ }
+
+ return ret;
+}
+
+static void pse_release_pis(struct pse_controller_dev *pcdev)
+{
+ int i;
+
+ for (i = 0; i <= pcdev->nr_lines; i++) {
+ of_node_put(pcdev->pi[i].pairset[0].np);
+ of_node_put(pcdev->pi[i].pairset[1].np);
+ of_node_put(pcdev->pi[i].np);
+ }
+ kfree(pcdev->pi);
}
/**
- * of_pse_simple_xlate - translate pse_spec to the PSE line number
+ * of_load_pse_pis - load all the PSE PIs
* @pcdev: a pointer to the PSE controller device
- * @pse_spec: PSE line specifier as found in the device tree
*
- * This static translation function is used by default if of_xlate in
- * :c:type:`pse_controller_dev` is not set. It is useful for all PSE
- * controllers with 1:1 mapping, where PSE lines can be indexed by number
- * without gaps.
+ * Return: 0 on success and failure value on error
*/
-static int of_pse_simple_xlate(struct pse_controller_dev *pcdev,
- const struct of_phandle_args *pse_spec)
+static int of_load_pse_pis(struct pse_controller_dev *pcdev)
{
- if (pse_spec->args[0] >= pcdev->nr_lines)
- return -EINVAL;
+ struct device_node *np = pcdev->dev->of_node;
+ struct device_node *node, *pis;
+ int ret;
- return pse_spec->args[0];
+ if (!np)
+ return -ENODEV;
+
+ pcdev->pi = kcalloc(pcdev->nr_lines, sizeof(*pcdev->pi), GFP_KERNEL);
+ if (!pcdev->pi)
+ return -ENOMEM;
+
+ pis = of_get_child_by_name(np, "pse-pis");
+ if (!pis) {
+ /* no description of PSE PIs */
+ pcdev->no_of_pse_pi = true;
+ return 0;
+ }
+
+ for_each_child_of_node(pis, node) {
+ struct pse_pi pi = {0};
+ u32 id;
+
+ if (!of_node_name_eq(node, "pse-pi"))
+ continue;
+
+ ret = of_property_read_u32(node, "reg", &id);
+ if (ret) {
+ dev_err(pcdev->dev,
+ "can't get reg property for node '%pOF'",
+ node);
+ goto out;
+ }
+
+ if (id >= pcdev->nr_lines) {
+ dev_err(pcdev->dev,
+ "reg value (%u) is out of range (%u) (%pOF)\n",
+ id, pcdev->nr_lines, node);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (pcdev->pi[id].np) {
+ dev_err(pcdev->dev,
+ "other node with same reg value was already registered. %pOF : %pOF\n",
+ pcdev->pi[id].np, node);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = of_count_phandle_with_args(node, "pairsets", NULL);
+ /* npairsets is limited to value one or two */
+ if (ret == 1 || ret == 2) {
+ ret = of_load_pse_pi_pairsets(node, &pi, ret);
+ if (ret)
+ goto out;
+ } else if (ret != ENOENT) {
+ dev_err(pcdev->dev,
+ "error: wrong number of pairsets. Should be 1 or 2, got %d (%pOF)\n",
+ ret, node);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ of_node_get(node);
+ pi.np = node;
+ memcpy(&pcdev->pi[id], &pi, sizeof(pi));
+ }
+
+ of_node_put(pis);
+ return 0;
+
+out:
+ pse_release_pis(pcdev);
+ of_node_put(node);
+ of_node_put(pis);
+ return ret;
+}
+
+static int pse_pi_is_enabled(struct regulator_dev *rdev)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ const struct pse_controller_ops *ops;
+ int id, ret;
+
+ ops = pcdev->ops;
+ if (!ops->pi_is_enabled)
+ return -EOPNOTSUPP;
+
+ id = rdev_get_id(rdev);
+ mutex_lock(&pcdev->lock);
+ ret = ops->pi_is_enabled(pcdev, id);
+ mutex_unlock(&pcdev->lock);
+
+ return ret;
+}
+
+static int pse_pi_enable(struct regulator_dev *rdev)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ const struct pse_controller_ops *ops;
+ int id, ret;
+
+ ops = pcdev->ops;
+ if (!ops->pi_enable)
+ return -EOPNOTSUPP;
+
+ id = rdev_get_id(rdev);
+ mutex_lock(&pcdev->lock);
+ ret = ops->pi_enable(pcdev, id);
+ if (!ret)
+ pcdev->pi[id].admin_state_enabled = 1;
+ mutex_unlock(&pcdev->lock);
+
+ return ret;
+}
+
+static int pse_pi_disable(struct regulator_dev *rdev)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ const struct pse_controller_ops *ops;
+ int id, ret;
+
+ ops = pcdev->ops;
+ if (!ops->pi_disable)
+ return -EOPNOTSUPP;
+
+ id = rdev_get_id(rdev);
+ mutex_lock(&pcdev->lock);
+ ret = ops->pi_disable(pcdev, id);
+ if (!ret)
+ pcdev->pi[id].admin_state_enabled = 0;
+ mutex_unlock(&pcdev->lock);
+
+ return ret;
+}
+
+static int _pse_pi_get_voltage(struct regulator_dev *rdev)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ const struct pse_controller_ops *ops;
+ int id;
+
+ ops = pcdev->ops;
+ if (!ops->pi_get_voltage)
+ return -EOPNOTSUPP;
+
+ id = rdev_get_id(rdev);
+ return ops->pi_get_voltage(pcdev, id);
+}
+
+static int pse_pi_get_voltage(struct regulator_dev *rdev)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ int ret;
+
+ mutex_lock(&pcdev->lock);
+ ret = _pse_pi_get_voltage(rdev);
+ mutex_unlock(&pcdev->lock);
+
+ return ret;
+}
+
+static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
+ int id,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status);
+
+static int pse_pi_get_current_limit(struct regulator_dev *rdev)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ const struct pse_controller_ops *ops;
+ struct netlink_ext_ack extack = {};
+ struct pse_control_status st = {};
+ int id, uV, ret;
+ s64 tmp_64;
+
+ ops = pcdev->ops;
+ id = rdev_get_id(rdev);
+ mutex_lock(&pcdev->lock);
+ if (ops->pi_get_current_limit) {
+ ret = ops->pi_get_current_limit(pcdev, id);
+ goto out;
+ }
+
+ /* If pi_get_current_limit() callback not populated get voltage
+ * from pi_get_voltage() and power limit from ethtool_get_status()
+ * to calculate current limit.
+ */
+ ret = _pse_pi_get_voltage(rdev);
+ if (!ret) {
+ dev_err(pcdev->dev, "Voltage null\n");
+ ret = -ERANGE;
+ goto out;
+ }
+ if (ret < 0)
+ goto out;
+ uV = ret;
+
+ ret = _pse_ethtool_get_status(pcdev, id, &extack, &st);
+ if (ret)
+ goto out;
+
+ if (!st.c33_avail_pw_limit) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ tmp_64 = st.c33_avail_pw_limit;
+ tmp_64 *= 1000000000ull;
+ /* uA = mW * 1000000000 / uV */
+ ret = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
+
+out:
+ mutex_unlock(&pcdev->lock);
+ return ret;
+}
+
+static int pse_pi_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ int max_uA)
+{
+ struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev);
+ const struct pse_controller_ops *ops;
+ int id, ret;
+
+ ops = pcdev->ops;
+ if (!ops->pi_set_current_limit)
+ return -EOPNOTSUPP;
+
+ id = rdev_get_id(rdev);
+ mutex_lock(&pcdev->lock);
+ ret = ops->pi_set_current_limit(pcdev, id, max_uA);
+ mutex_unlock(&pcdev->lock);
+
+ return ret;
+}
+
+static const struct regulator_ops pse_pi_ops = {
+ .is_enabled = pse_pi_is_enabled,
+ .enable = pse_pi_enable,
+ .disable = pse_pi_disable,
+ .get_voltage = pse_pi_get_voltage,
+ .get_current_limit = pse_pi_get_current_limit,
+ .set_current_limit = pse_pi_set_current_limit,
+};
+
+static int
+devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev,
+ char *name, int id)
+{
+ struct regulator_init_data *rinit_data;
+ struct regulator_config rconfig = {0};
+ struct regulator_desc *rdesc;
+ struct regulator_dev *rdev;
+
+ rinit_data = devm_kzalloc(pcdev->dev, sizeof(*rinit_data),
+ GFP_KERNEL);
+ if (!rinit_data)
+ return -ENOMEM;
+
+ rdesc = devm_kzalloc(pcdev->dev, sizeof(*rdesc), GFP_KERNEL);
+ if (!rdesc)
+ return -ENOMEM;
+
+ /* Regulator descriptor id have to be the same as its associated
+ * PSE PI id for the well functioning of the PSE controls.
+ */
+ rdesc->id = id;
+ rdesc->name = name;
+ rdesc->type = REGULATOR_VOLTAGE;
+ rdesc->ops = &pse_pi_ops;
+ rdesc->owner = pcdev->owner;
+
+ rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS;
+
+ if (pcdev->ops->pi_set_current_limit) {
+ rinit_data->constraints.valid_ops_mask |=
+ REGULATOR_CHANGE_CURRENT;
+ rinit_data->constraints.max_uA = MAX_PI_CURRENT;
+ }
+
+ rinit_data->supply_regulator = "vpwr";
+
+ rconfig.dev = pcdev->dev;
+ rconfig.driver_data = pcdev;
+ rconfig.init_data = rinit_data;
+
+ rdev = devm_regulator_register(pcdev->dev, rdesc, &rconfig);
+ if (IS_ERR(rdev)) {
+ dev_err_probe(pcdev->dev, PTR_ERR(rdev),
+ "Failed to register regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ pcdev->pi[id].rdev = rdev;
+
+ return 0;
}
/**
* pse_controller_register - register a PSE controller device
* @pcdev: a pointer to the initialized PSE controller device
+ *
+ * Return: 0 on success and failure value on error
*/
int pse_controller_register(struct pse_controller_dev *pcdev)
{
- if (!pcdev->of_xlate) {
- if (pcdev->of_pse_n_cells == 0)
- pcdev->of_xlate = of_pse_zero_xlate;
- else if (pcdev->of_pse_n_cells == 1)
- pcdev->of_xlate = of_pse_simple_xlate;
- }
+ size_t reg_name_len;
+ int ret, i;
mutex_init(&pcdev->lock);
INIT_LIST_HEAD(&pcdev->pse_control_head);
+ if (!pcdev->nr_lines)
+ pcdev->nr_lines = 1;
+
+ ret = of_load_pse_pis(pcdev);
+ if (ret)
+ return ret;
+
+ if (pcdev->ops->setup_pi_matrix) {
+ ret = pcdev->ops->setup_pi_matrix(pcdev);
+ if (ret)
+ return ret;
+ }
+
+ /* Each regulator name len is pcdev dev name + 7 char +
+ * int max digit number (10) + 1
+ */
+ reg_name_len = strlen(dev_name(pcdev->dev)) + 18;
+
+ /* Register PI regulators */
+ for (i = 0; i < pcdev->nr_lines; i++) {
+ char *reg_name;
+
+ /* Do not register regulator for PIs not described */
+ if (!pcdev->no_of_pse_pi && !pcdev->pi[i].np)
+ continue;
+
+ reg_name = devm_kzalloc(pcdev->dev, reg_name_len, GFP_KERNEL);
+ if (!reg_name)
+ return -ENOMEM;
+
+ snprintf(reg_name, reg_name_len, "pse-%s_pi%d",
+ dev_name(pcdev->dev), i);
+
+ ret = devm_pse_pi_regulator_register(pcdev, reg_name, i);
+ if (ret)
+ return ret;
+ }
+
mutex_lock(&pse_list_mutex);
list_add(&pcdev->list, &pse_controller_list);
mutex_unlock(&pse_list_mutex);
@@ -91,6 +493,7 @@ EXPORT_SYMBOL_GPL(pse_controller_register);
*/
void pse_controller_unregister(struct pse_controller_dev *pcdev)
{
+ pse_release_pis(pcdev);
mutex_lock(&pse_list_mutex);
list_del(&pcdev->list);
mutex_unlock(&pse_list_mutex);
@@ -110,6 +513,8 @@ static void devm_pse_controller_release(struct device *dev, void *res)
* Managed pse_controller_register(). For PSE controllers registered by
* this function, pse_controller_unregister() is automatically called on
* driver detach. See pse_controller_register() for more information.
+ *
+ * Return: 0 on success and failure value on error
*/
int devm_pse_controller_register(struct device *dev,
struct pse_controller_dev *pcdev)
@@ -144,6 +549,10 @@ static void __pse_control_release(struct kref *kref)
lockdep_assert_held(&pse_list_mutex);
+ if (psec->pcdev->pi[psec->id].admin_state_enabled)
+ regulator_disable(psec->ps);
+ devm_regulator_put(psec->ps);
+
module_put(psec->pcdev->owner);
list_del(&psec->list);
@@ -176,6 +585,7 @@ static struct pse_control *
pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
{
struct pse_control *psec;
+ int ret;
lockdep_assert_held(&pse_list_mutex);
@@ -191,20 +601,82 @@ pse_control_get_internal(struct pse_controller_dev *pcdev, unsigned int index)
return ERR_PTR(-ENOMEM);
if (!try_module_get(pcdev->owner)) {
- kfree(psec);
- return ERR_PTR(-ENODEV);
+ ret = -ENODEV;
+ goto free_psec;
}
+ psec->ps = devm_regulator_get_exclusive(pcdev->dev,
+ rdev_get_name(pcdev->pi[index].rdev));
+ if (IS_ERR(psec->ps)) {
+ ret = PTR_ERR(psec->ps);
+ goto put_module;
+ }
+
+ ret = regulator_is_enabled(psec->ps);
+ if (ret < 0)
+ goto regulator_put;
+
+ pcdev->pi[index].admin_state_enabled = ret;
+
psec->pcdev = pcdev;
list_add(&psec->list, &pcdev->pse_control_head);
psec->id = index;
kref_init(&psec->refcnt);
return psec;
+
+regulator_put:
+ devm_regulator_put(psec->ps);
+put_module:
+ module_put(pcdev->owner);
+free_psec:
+ kfree(psec);
+
+ return ERR_PTR(ret);
}
-struct pse_control *
-of_pse_control_get(struct device_node *node)
+/**
+ * of_pse_match_pi - Find the PSE PI id matching the device node phandle
+ * @pcdev: a pointer to the PSE controller device
+ * @np: a pointer to the device node
+ *
+ * Return: id of the PSE PI, -EINVAL if not found
+ */
+static int of_pse_match_pi(struct pse_controller_dev *pcdev,
+ struct device_node *np)
+{
+ int i;
+
+ for (i = 0; i <= pcdev->nr_lines; i++) {
+ if (pcdev->pi[i].np == np)
+ return i;
+ }
+
+ return -EINVAL;
+}
+
+/**
+ * psec_id_xlate - translate pse_spec to the PSE line number according
+ * to the number of pse-cells in case of no pse_pi node
+ * @pcdev: a pointer to the PSE controller device
+ * @pse_spec: PSE line specifier as found in the device tree
+ *
+ * Return: 0 if #pse-cells = <0>. Return PSE line number otherwise.
+ */
+static int psec_id_xlate(struct pse_controller_dev *pcdev,
+ const struct of_phandle_args *pse_spec)
+{
+ if (!pcdev->of_pse_n_cells)
+ return 0;
+
+ if (pcdev->of_pse_n_cells > 1 ||
+ pse_spec->args[0] >= pcdev->nr_lines)
+ return -EINVAL;
+
+ return pse_spec->args[0];
+}
+
+struct pse_control *of_pse_control_get(struct device_node *node)
{
struct pse_controller_dev *r, *pcdev;
struct of_phandle_args args;
@@ -222,7 +694,14 @@ of_pse_control_get(struct device_node *node)
mutex_lock(&pse_list_mutex);
pcdev = NULL;
list_for_each_entry(r, &pse_controller_list, list) {
- if (args.np == r->dev->of_node) {
+ if (!r->no_of_pse_pi) {
+ ret = of_pse_match_pi(r, args.np);
+ if (ret >= 0) {
+ pcdev = r;
+ psec_id = ret;
+ break;
+ }
+ } else if (args.np == r->dev->of_node) {
pcdev = r;
break;
}
@@ -238,10 +717,12 @@ of_pse_control_get(struct device_node *node)
goto out;
}
- psec_id = pcdev->of_xlate(pcdev, &args);
- if (psec_id < 0) {
- psec = ERR_PTR(psec_id);
- goto out;
+ if (pcdev->no_of_pse_pi) {
+ psec_id = psec_id_xlate(pcdev, &args);
+ if (psec_id < 0) {
+ psec = ERR_PTR(psec_id);
+ goto out;
+ }
}
/* pse_list_mutex also protects the pcdev's pse_control list */
@@ -255,60 +736,165 @@ out:
}
EXPORT_SYMBOL_GPL(of_pse_control_get);
+static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev,
+ int id,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ const struct pse_controller_ops *ops;
+
+ ops = pcdev->ops;
+ if (!ops->ethtool_get_status) {
+ NL_SET_ERR_MSG(extack,
+ "PSE driver does not support status report");
+ return -EOPNOTSUPP;
+ }
+
+ return ops->ethtool_get_status(pcdev, id, extack, status);
+}
+
/**
* pse_ethtool_get_status - get status of PSE control
* @psec: PSE control pointer
* @extack: extack for reporting useful error messages
* @status: struct to store PSE status
+ *
+ * Return: 0 on success and failure value on error
*/
int pse_ethtool_get_status(struct pse_control *psec,
struct netlink_ext_ack *extack,
struct pse_control_status *status)
{
- const struct pse_controller_ops *ops;
int err;
- ops = psec->pcdev->ops;
-
- if (!ops->ethtool_get_status) {
- NL_SET_ERR_MSG(extack,
- "PSE driver does not support status report");
- return -EOPNOTSUPP;
- }
-
mutex_lock(&psec->pcdev->lock);
- err = ops->ethtool_get_status(psec->pcdev, psec->id, extack, status);
+ err = _pse_ethtool_get_status(psec->pcdev, psec->id, extack, status);
mutex_unlock(&psec->pcdev->lock);
return err;
}
EXPORT_SYMBOL_GPL(pse_ethtool_get_status);
+static int pse_ethtool_c33_set_config(struct pse_control *psec,
+ const struct pse_control_config *config)
+{
+ int err = 0;
+
+ /* Look at admin_state_enabled status to not call regulator_enable
+ * or regulator_disable twice creating a regulator counter mismatch
+ */
+ switch (config->c33_admin_control) {
+ case ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED:
+ if (!psec->pcdev->pi[psec->id].admin_state_enabled)
+ err = regulator_enable(psec->ps);
+ break;
+ case ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED:
+ if (psec->pcdev->pi[psec->id].admin_state_enabled)
+ err = regulator_disable(psec->ps);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int pse_ethtool_podl_set_config(struct pse_control *psec,
+ const struct pse_control_config *config)
+{
+ int err = 0;
+
+ /* Look at admin_state_enabled status to not call regulator_enable
+ * or regulator_disable twice creating a regulator counter mismatch
+ */
+ switch (config->podl_admin_control) {
+ case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED:
+ if (!psec->pcdev->pi[psec->id].admin_state_enabled)
+ err = regulator_enable(psec->ps);
+ break;
+ case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED:
+ if (psec->pcdev->pi[psec->id].admin_state_enabled)
+ err = regulator_disable(psec->ps);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
/**
* pse_ethtool_set_config - set PSE control configuration
* @psec: PSE control pointer
* @extack: extack for reporting useful error messages
* @config: Configuration of the test to run
+ *
+ * Return: 0 on success and failure value on error
*/
int pse_ethtool_set_config(struct pse_control *psec,
struct netlink_ext_ack *extack,
const struct pse_control_config *config)
{
- const struct pse_controller_ops *ops;
- int err;
+ int err = 0;
- ops = psec->pcdev->ops;
-
- if (!ops->ethtool_set_config) {
- NL_SET_ERR_MSG(extack,
- "PSE driver does not configuration");
- return -EOPNOTSUPP;
+ if (pse_has_c33(psec) && config->c33_admin_control) {
+ err = pse_ethtool_c33_set_config(psec, config);
+ if (err)
+ return err;
}
- mutex_lock(&psec->pcdev->lock);
- err = ops->ethtool_set_config(psec->pcdev, psec->id, extack, config);
- mutex_unlock(&psec->pcdev->lock);
+ if (pse_has_podl(psec) && config->podl_admin_control)
+ err = pse_ethtool_podl_set_config(psec, config);
return err;
}
EXPORT_SYMBOL_GPL(pse_ethtool_set_config);
+
+/**
+ * pse_ethtool_set_pw_limit - set PSE control power limit
+ * @psec: PSE control pointer
+ * @extack: extack for reporting useful error messages
+ * @pw_limit: power limit value in mW
+ *
+ * Return: 0 on success and failure value on error
+ */
+int pse_ethtool_set_pw_limit(struct pse_control *psec,
+ struct netlink_ext_ack *extack,
+ const unsigned int pw_limit)
+{
+ int uV, uA, ret;
+ s64 tmp_64;
+
+ ret = regulator_get_voltage(psec->ps);
+ if (!ret) {
+ NL_SET_ERR_MSG(extack,
+ "Can't calculate the current, PSE voltage read is 0");
+ return -ERANGE;
+ }
+ if (ret < 0) {
+ NL_SET_ERR_MSG(extack,
+ "Error reading PSE voltage");
+ return ret;
+ }
+ uV = ret;
+
+ tmp_64 = pw_limit;
+ tmp_64 *= 1000000000ull;
+ /* uA = mW * 1000000000 / uV */
+ uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV);
+
+ return regulator_set_current_limit(psec->ps, 0, uA);
+}
+EXPORT_SYMBOL_GPL(pse_ethtool_set_pw_limit);
+
+bool pse_has_podl(struct pse_control *psec)
+{
+ return psec->pcdev->types & ETHTOOL_PSE_PODL;
+}
+EXPORT_SYMBOL_GPL(pse_has_podl);
+
+bool pse_has_c33(struct pse_control *psec)
+{
+ return psec->pcdev->types & ETHTOOL_PSE_C33;
+}
+EXPORT_SYMBOL_GPL(pse_has_c33);
diff --git a/drivers/net/pse-pd/pse_regulator.c b/drivers/net/pse-pd/pse_regulator.c
index e2bf8306ca90..64ab36974fe0 100644
--- a/drivers/net/pse-pd/pse_regulator.c
+++ b/drivers/net/pse-pd/pse_regulator.c
@@ -24,38 +24,42 @@ static struct pse_reg_priv *to_pse_reg(struct pse_controller_dev *pcdev)
}
static int
-pse_reg_ethtool_set_config(struct pse_controller_dev *pcdev, unsigned long id,
- struct netlink_ext_ack *extack,
- const struct pse_control_config *config)
+pse_reg_pi_enable(struct pse_controller_dev *pcdev, int id)
{
struct pse_reg_priv *priv = to_pse_reg(pcdev);
int ret;
- if (priv->admin_state == config->admin_cotrol)
- return 0;
-
- switch (config->admin_cotrol) {
- case ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED:
- ret = regulator_enable(priv->ps);
- break;
- case ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED:
- ret = regulator_disable(priv->ps);
- break;
- default:
- dev_err(pcdev->dev, "Unknown admin state %i\n",
- config->admin_cotrol);
- ret = -ENOTSUPP;
- }
-
+ ret = regulator_enable(priv->ps);
if (ret)
return ret;
- priv->admin_state = config->admin_cotrol;
+ priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_ENABLED;
+ return 0;
+}
+
+static int
+pse_reg_pi_disable(struct pse_controller_dev *pcdev, int id)
+{
+ struct pse_reg_priv *priv = to_pse_reg(pcdev);
+ int ret;
+ ret = regulator_disable(priv->ps);
+ if (ret)
+ return ret;
+
+ priv->admin_state = ETHTOOL_PODL_PSE_ADMIN_STATE_DISABLED;
return 0;
}
static int
+pse_reg_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+{
+ struct pse_reg_priv *priv = to_pse_reg(pcdev);
+
+ return regulator_is_enabled(priv->ps);
+}
+
+static int
pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
struct netlink_ext_ack *extack,
struct pse_control_status *status)
@@ -80,7 +84,9 @@ pse_reg_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id,
static const struct pse_controller_ops pse_reg_ops = {
.ethtool_get_status = pse_reg_ethtool_get_status,
- .ethtool_set_config = pse_reg_ethtool_set_config,
+ .pi_enable = pse_reg_pi_enable,
+ .pi_is_enabled = pse_reg_pi_is_enabled,
+ .pi_disable = pse_reg_pi_disable,
};
static int
@@ -116,6 +122,7 @@ pse_reg_probe(struct platform_device *pdev)
priv->pcdev.owner = THIS_MODULE;
priv->pcdev.ops = &pse_reg_ops;
priv->pcdev.dev = dev;
+ priv->pcdev.types = ETHTOOL_PSE_PODL;
ret = devm_pse_controller_register(dev, &priv->pcdev);
if (ret) {
dev_err(dev, "failed to register PSE controller (%pe)\n",
diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c
new file mode 100644
index 000000000000..2ea75686a319
--- /dev/null
+++ b/drivers/net/pse-pd/tps23881.c
@@ -0,0 +1,823 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Driver for the TI TPS23881 PoE PSE Controller driver (I2C bus)
+ *
+ * Copyright (c) 2023 Bootlin, Kory Maincent <kory.maincent@bootlin.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/delay.h>
+#include <linux/firmware.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pse-pd/pse.h>
+
+#define TPS23881_MAX_CHANS 8
+
+#define TPS23881_REG_PW_STATUS 0x10
+#define TPS23881_REG_OP_MODE 0x12
+#define TPS23881_OP_MODE_SEMIAUTO 0xaaaa
+#define TPS23881_REG_DIS_EN 0x13
+#define TPS23881_REG_DET_CLA_EN 0x14
+#define TPS23881_REG_GEN_MASK 0x17
+#define TPS23881_REG_NBITACC BIT(5)
+#define TPS23881_REG_PW_EN 0x19
+#define TPS23881_REG_PORT_MAP 0x26
+#define TPS23881_REG_PORT_POWER 0x29
+#define TPS23881_REG_POEPLUS 0x40
+#define TPS23881_REG_TPON BIT(0)
+#define TPS23881_REG_FWREV 0x41
+#define TPS23881_REG_DEVID 0x43
+#define TPS23881_REG_DEVID_MASK 0xF0
+#define TPS23881_DEVICE_ID 0x02
+#define TPS23881_REG_SRAM_CTRL 0x60
+#define TPS23881_REG_SRAM_DATA 0x61
+
+struct tps23881_port_desc {
+ u8 chan[2];
+ bool is_4p;
+};
+
+struct tps23881_priv {
+ struct i2c_client *client;
+ struct pse_controller_dev pcdev;
+ struct device_node *np;
+ struct tps23881_port_desc port[TPS23881_MAX_CHANS];
+};
+
+static struct tps23881_priv *to_tps23881_priv(struct pse_controller_dev *pcdev)
+{
+ return container_of(pcdev, struct tps23881_priv, pcdev);
+}
+
+static int tps23881_pi_enable(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ u8 chan;
+ u16 val;
+ int ret;
+
+ if (id >= TPS23881_MAX_CHANS)
+ return -ERANGE;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+ if (ret < 0)
+ return ret;
+
+ chan = priv->port[id].chan[0];
+ if (chan < 4)
+ val = (u16)(ret | BIT(chan));
+ else
+ val = (u16)(ret | BIT(chan + 4));
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ if (chan < 4)
+ val |= BIT(chan);
+ else
+ val |= BIT(chan + 4);
+ }
+
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps23881_pi_disable(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ u8 chan;
+ u16 val;
+ int ret;
+
+ if (id >= TPS23881_MAX_CHANS)
+ return -ERANGE;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+ if (ret < 0)
+ return ret;
+
+ chan = priv->port[id].chan[0];
+ if (chan < 4)
+ val = (u16)(ret | BIT(chan + 4));
+ else
+ val = (u16)(ret | BIT(chan + 8));
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ if (chan < 4)
+ val |= BIT(chan + 4);
+ else
+ val |= BIT(chan + 8);
+ }
+
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_PW_EN, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps23881_pi_is_enabled(struct pse_controller_dev *pcdev, int id)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ bool enabled;
+ u8 chan;
+ int ret;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+ if (ret < 0)
+ return ret;
+
+ chan = priv->port[id].chan[0];
+ if (chan < 4)
+ enabled = ret & BIT(chan);
+ else
+ enabled = ret & BIT(chan + 4);
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ if (chan < 4)
+ enabled &= !!(ret & BIT(chan));
+ else
+ enabled &= !!(ret & BIT(chan + 4));
+ }
+
+ /* Return enabled status only if both channel are on this state */
+ return enabled;
+}
+
+static int tps23881_ethtool_get_status(struct pse_controller_dev *pcdev,
+ unsigned long id,
+ struct netlink_ext_ack *extack,
+ struct pse_control_status *status)
+{
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ struct i2c_client *client = priv->client;
+ bool enabled, delivering;
+ u8 chan;
+ int ret;
+
+ ret = i2c_smbus_read_word_data(client, TPS23881_REG_PW_STATUS);
+ if (ret < 0)
+ return ret;
+
+ chan = priv->port[id].chan[0];
+ if (chan < 4) {
+ enabled = ret & BIT(chan);
+ delivering = ret & BIT(chan + 4);
+ } else {
+ enabled = ret & BIT(chan + 4);
+ delivering = ret & BIT(chan + 8);
+ }
+
+ if (priv->port[id].is_4p) {
+ chan = priv->port[id].chan[1];
+ if (chan < 4) {
+ enabled &= !!(ret & BIT(chan));
+ delivering &= !!(ret & BIT(chan + 4));
+ } else {
+ enabled &= !!(ret & BIT(chan + 4));
+ delivering &= !!(ret & BIT(chan + 8));
+ }
+ }
+
+ /* Return delivering status only if both channel are on this state */
+ if (delivering)
+ status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DELIVERING;
+ else
+ status->c33_pw_status = ETHTOOL_C33_PSE_PW_D_STATUS_DISABLED;
+
+ /* Return enabled status only if both channel are on this state */
+ if (enabled)
+ status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED;
+ else
+ status->c33_admin_state = ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED;
+
+ return 0;
+}
+
+/* Parse managers subnode into a array of device node */
+static int
+tps23881_get_of_channels(struct tps23881_priv *priv,
+ struct device_node *chan_node[TPS23881_MAX_CHANS])
+{
+ struct device_node *channels_node, *node;
+ int i, ret;
+
+ if (!priv->np)
+ return -EINVAL;
+
+ channels_node = of_find_node_by_name(priv->np, "channels");
+ if (!channels_node)
+ return -EINVAL;
+
+ for_each_child_of_node(channels_node, node) {
+ u32 chan_id;
+
+ if (!of_node_name_eq(node, "channel"))
+ continue;
+
+ ret = of_property_read_u32(node, "reg", &chan_id);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (chan_id >= TPS23881_MAX_CHANS || chan_node[chan_id]) {
+ dev_err(&priv->client->dev,
+ "wrong number of port (%d)\n", chan_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ of_node_get(node);
+ chan_node[chan_id] = node;
+ }
+
+ of_node_put(channels_node);
+ return 0;
+
+out:
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ of_node_put(chan_node[i]);
+ chan_node[i] = NULL;
+ }
+
+ of_node_put(node);
+ of_node_put(channels_node);
+ return ret;
+}
+
+struct tps23881_port_matrix {
+ u8 pi_id;
+ u8 lgcl_chan[2];
+ u8 hw_chan[2];
+ bool is_4p;
+ bool exist;
+};
+
+static int
+tps23881_match_channel(const struct pse_pi_pairset *pairset,
+ struct device_node *chan_node[TPS23881_MAX_CHANS])
+{
+ int i;
+
+ /* Look on every channels */
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ if (pairset->np == chan_node[i])
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static bool
+tps23881_is_chan_free(struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS],
+ int chan)
+{
+ int i;
+
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ if (port_matrix[i].exist &&
+ (port_matrix[i].hw_chan[0] == chan ||
+ port_matrix[i].hw_chan[1] == chan))
+ return false;
+ }
+
+ return true;
+}
+
+/* Fill port matrix with the matching channels */
+static int
+tps23881_match_port_matrix(struct pse_pi *pi, int pi_id,
+ struct device_node *chan_node[TPS23881_MAX_CHANS],
+ struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS])
+{
+ int ret;
+
+ if (!pi->pairset[0].np)
+ return 0;
+
+ ret = tps23881_match_channel(&pi->pairset[0], chan_node);
+ if (ret < 0)
+ return ret;
+
+ if (!tps23881_is_chan_free(port_matrix, ret)) {
+ pr_err("tps23881: channel %d already used\n", ret);
+ return -ENODEV;
+ }
+
+ port_matrix[pi_id].hw_chan[0] = ret;
+ port_matrix[pi_id].exist = true;
+
+ if (!pi->pairset[1].np)
+ return 0;
+
+ ret = tps23881_match_channel(&pi->pairset[1], chan_node);
+ if (ret < 0)
+ return ret;
+
+ if (!tps23881_is_chan_free(port_matrix, ret)) {
+ pr_err("tps23881: channel %d already used\n", ret);
+ return -ENODEV;
+ }
+
+ if (port_matrix[pi_id].hw_chan[0] / 4 != ret / 4) {
+ pr_err("tps23881: 4-pair PSE can only be set within the same 4 ports group");
+ return -ENODEV;
+ }
+
+ port_matrix[pi_id].hw_chan[1] = ret;
+ port_matrix[pi_id].is_4p = true;
+
+ return 0;
+}
+
+static int
+tps23881_get_unused_chan(struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS],
+ int port_cnt)
+{
+ bool used;
+ int i, j;
+
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ used = false;
+
+ for (j = 0; j < port_cnt; j++) {
+ if (port_matrix[j].hw_chan[0] == i) {
+ used = true;
+ break;
+ }
+
+ if (port_matrix[j].is_4p &&
+ port_matrix[j].hw_chan[1] == i) {
+ used = true;
+ break;
+ }
+ }
+
+ if (!used)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+/* Sort the port matrix to following particular hardware ports matrix
+ * specification of the tps23881. The device has two 4-ports groups and
+ * each 4-pair powered device has to be configured to use two consecutive
+ * logical channel in each 4 ports group (1 and 2 or 3 and 4). Also the
+ * hardware matrix has to be fully configured even with unused chan to be
+ * valid.
+ */
+static int
+tps23881_sort_port_matrix(struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS])
+{
+ struct tps23881_port_matrix tmp_port_matrix[TPS23881_MAX_CHANS] = {0};
+ int i, ret, port_cnt = 0, cnt_4ch_grp1 = 0, cnt_4ch_grp2 = 4;
+
+ /* Configure 4p port matrix */
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ int *cnt;
+
+ if (!port_matrix[i].exist || !port_matrix[i].is_4p)
+ continue;
+
+ if (port_matrix[i].hw_chan[0] < 4)
+ cnt = &cnt_4ch_grp1;
+ else
+ cnt = &cnt_4ch_grp2;
+
+ tmp_port_matrix[port_cnt].exist = true;
+ tmp_port_matrix[port_cnt].is_4p = true;
+ tmp_port_matrix[port_cnt].pi_id = i;
+ tmp_port_matrix[port_cnt].hw_chan[0] = port_matrix[i].hw_chan[0];
+ tmp_port_matrix[port_cnt].hw_chan[1] = port_matrix[i].hw_chan[1];
+
+ /* 4-pair ports have to be configured with consecutive
+ * logical channels 0 and 1, 2 and 3.
+ */
+ tmp_port_matrix[port_cnt].lgcl_chan[0] = (*cnt)++;
+ tmp_port_matrix[port_cnt].lgcl_chan[1] = (*cnt)++;
+
+ port_cnt++;
+ }
+
+ /* Configure 2p port matrix */
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ int *cnt;
+
+ if (!port_matrix[i].exist || port_matrix[i].is_4p)
+ continue;
+
+ if (port_matrix[i].hw_chan[0] < 4)
+ cnt = &cnt_4ch_grp1;
+ else
+ cnt = &cnt_4ch_grp2;
+
+ tmp_port_matrix[port_cnt].exist = true;
+ tmp_port_matrix[port_cnt].pi_id = i;
+ tmp_port_matrix[port_cnt].lgcl_chan[0] = (*cnt)++;
+ tmp_port_matrix[port_cnt].hw_chan[0] = port_matrix[i].hw_chan[0];
+
+ port_cnt++;
+ }
+
+ /* Complete the rest of the first 4 port group matrix even if
+ * channels are unused
+ */
+ while (cnt_4ch_grp1 < 4) {
+ ret = tps23881_get_unused_chan(tmp_port_matrix, port_cnt);
+ if (ret < 0) {
+ pr_err("tps23881: port matrix issue, no chan available\n");
+ return ret;
+ }
+
+ if (port_cnt >= TPS23881_MAX_CHANS) {
+ pr_err("tps23881: wrong number of channels\n");
+ return -ENODEV;
+ }
+ tmp_port_matrix[port_cnt].lgcl_chan[0] = cnt_4ch_grp1;
+ tmp_port_matrix[port_cnt].hw_chan[0] = ret;
+ cnt_4ch_grp1++;
+ port_cnt++;
+ }
+
+ /* Complete the rest of the second 4 port group matrix even if
+ * channels are unused
+ */
+ while (cnt_4ch_grp2 < 8) {
+ ret = tps23881_get_unused_chan(tmp_port_matrix, port_cnt);
+ if (ret < 0) {
+ pr_err("tps23881: port matrix issue, no chan available\n");
+ return -ENODEV;
+ }
+
+ if (port_cnt >= TPS23881_MAX_CHANS) {
+ pr_err("tps23881: wrong number of channels\n");
+ return -ENODEV;
+ }
+ tmp_port_matrix[port_cnt].lgcl_chan[0] = cnt_4ch_grp2;
+ tmp_port_matrix[port_cnt].hw_chan[0] = ret;
+ cnt_4ch_grp2++;
+ port_cnt++;
+ }
+
+ memcpy(port_matrix, tmp_port_matrix, sizeof(tmp_port_matrix));
+
+ return port_cnt;
+}
+
+/* Write port matrix to the hardware port matrix and the software port
+ * matrix.
+ */
+static int
+tps23881_write_port_matrix(struct tps23881_priv *priv,
+ struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS],
+ int port_cnt)
+{
+ struct i2c_client *client = priv->client;
+ u8 pi_id, lgcl_chan, hw_chan;
+ u16 val = 0;
+ int i, ret;
+
+ for (i = 0; i < port_cnt; i++) {
+ pi_id = port_matrix[i].pi_id;
+ lgcl_chan = port_matrix[i].lgcl_chan[0];
+ hw_chan = port_matrix[i].hw_chan[0] % 4;
+
+ /* Set software port matrix for existing ports */
+ if (port_matrix[i].exist)
+ priv->port[pi_id].chan[0] = lgcl_chan;
+
+ /* Set hardware port matrix for all ports */
+ val |= hw_chan << (lgcl_chan * 2);
+
+ if (!port_matrix[i].is_4p)
+ continue;
+
+ lgcl_chan = port_matrix[i].lgcl_chan[1];
+ hw_chan = port_matrix[i].hw_chan[1] % 4;
+
+ /* Set software port matrix for existing ports */
+ if (port_matrix[i].exist) {
+ priv->port[pi_id].is_4p = true;
+ priv->port[pi_id].chan[1] = lgcl_chan;
+ }
+
+ /* Set hardware port matrix for all ports */
+ val |= hw_chan << (lgcl_chan * 2);
+ }
+
+ /* Write hardware ports matrix */
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_PORT_MAP, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+tps23881_set_ports_conf(struct tps23881_priv *priv,
+ struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS])
+{
+ struct i2c_client *client = priv->client;
+ int i, ret;
+ u16 val;
+
+ /* Set operating mode */
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_OP_MODE,
+ TPS23881_OP_MODE_SEMIAUTO);
+ if (ret)
+ return ret;
+
+ /* Disable DC disconnect */
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_DIS_EN, 0x0);
+ if (ret)
+ return ret;
+
+ /* Set port power allocation */
+ val = 0;
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ if (!port_matrix[i].exist)
+ continue;
+
+ if (port_matrix[i].is_4p)
+ val |= 0xf << ((port_matrix[i].lgcl_chan[0] / 2) * 4);
+ else
+ val |= 0x3 << ((port_matrix[i].lgcl_chan[0] / 2) * 4);
+ }
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_PORT_POWER, val);
+ if (ret)
+ return ret;
+
+ /* Enable detection and classification */
+ val = 0;
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ if (!port_matrix[i].exist)
+ continue;
+
+ val |= BIT(port_matrix[i].lgcl_chan[0]) |
+ BIT(port_matrix[i].lgcl_chan[0] + 4);
+ if (port_matrix[i].is_4p)
+ val |= BIT(port_matrix[i].lgcl_chan[1]) |
+ BIT(port_matrix[i].lgcl_chan[1] + 4);
+ }
+ ret = i2c_smbus_write_word_data(client, TPS23881_REG_DET_CLA_EN, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+tps23881_set_ports_matrix(struct tps23881_priv *priv,
+ struct device_node *chan_node[TPS23881_MAX_CHANS])
+{
+ struct tps23881_port_matrix port_matrix[TPS23881_MAX_CHANS] = {0};
+ int i, ret;
+
+ /* Update with values for every PSE PIs */
+ for (i = 0; i < TPS23881_MAX_CHANS; i++) {
+ ret = tps23881_match_port_matrix(&priv->pcdev.pi[i], i,
+ chan_node, port_matrix);
+ if (ret)
+ return ret;
+ }
+
+ ret = tps23881_sort_port_matrix(port_matrix);
+ if (ret < 0)
+ return ret;
+
+ ret = tps23881_write_port_matrix(priv, port_matrix, ret);
+ if (ret)
+ return ret;
+
+ ret = tps23881_set_ports_conf(priv, port_matrix);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int tps23881_setup_pi_matrix(struct pse_controller_dev *pcdev)
+{
+ struct device_node *chan_node[TPS23881_MAX_CHANS] = {NULL};
+ struct tps23881_priv *priv = to_tps23881_priv(pcdev);
+ int ret, i;
+
+ ret = tps23881_get_of_channels(priv, chan_node);
+ if (ret < 0) {
+ dev_warn(&priv->client->dev,
+ "Unable to parse port-matrix, default matrix will be used\n");
+ return 0;
+ }
+
+ ret = tps23881_set_ports_matrix(priv, chan_node);
+
+ for (i = 0; i < TPS23881_MAX_CHANS; i++)
+ of_node_put(chan_node[i]);
+
+ return ret;
+}
+
+static const struct pse_controller_ops tps23881_ops = {
+ .setup_pi_matrix = tps23881_setup_pi_matrix,
+ .pi_enable = tps23881_pi_enable,
+ .pi_disable = tps23881_pi_disable,
+ .pi_is_enabled = tps23881_pi_is_enabled,
+ .ethtool_get_status = tps23881_ethtool_get_status,
+};
+
+static const char fw_parity_name[] = "ti/tps23881/tps23881-parity-14.bin";
+static const char fw_sram_name[] = "ti/tps23881/tps23881-sram-14.bin";
+
+struct tps23881_fw_conf {
+ u8 reg;
+ u8 val;
+};
+
+static const struct tps23881_fw_conf tps23881_fw_parity_conf[] = {
+ {.reg = 0x60, .val = 0x01},
+ {.reg = 0x62, .val = 0x00},
+ {.reg = 0x63, .val = 0x80},
+ {.reg = 0x60, .val = 0xC4},
+ {.reg = 0x1D, .val = 0xBC},
+ {.reg = 0xD7, .val = 0x02},
+ {.reg = 0x91, .val = 0x00},
+ {.reg = 0x90, .val = 0x00},
+ {.reg = 0xD7, .val = 0x00},
+ {.reg = 0x1D, .val = 0x00},
+ { /* sentinel */ }
+};
+
+static const struct tps23881_fw_conf tps23881_fw_sram_conf[] = {
+ {.reg = 0x60, .val = 0xC5},
+ {.reg = 0x62, .val = 0x00},
+ {.reg = 0x63, .val = 0x80},
+ {.reg = 0x60, .val = 0xC0},
+ {.reg = 0x1D, .val = 0xBC},
+ {.reg = 0xD7, .val = 0x02},
+ {.reg = 0x91, .val = 0x00},
+ {.reg = 0x90, .val = 0x00},
+ {.reg = 0xD7, .val = 0x00},
+ {.reg = 0x1D, .val = 0x00},
+ { /* sentinel */ }
+};
+
+static int tps23881_flash_sram_fw_part(struct i2c_client *client,
+ const char *fw_name,
+ const struct tps23881_fw_conf *fw_conf)
+{
+ const struct firmware *fw = NULL;
+ int i, ret;
+
+ ret = request_firmware(&fw, fw_name, &client->dev);
+ if (ret)
+ return ret;
+
+ dev_dbg(&client->dev, "Flashing %s\n", fw_name);
+
+ /* Prepare device for RAM download */
+ while (fw_conf->reg) {
+ ret = i2c_smbus_write_byte_data(client, fw_conf->reg,
+ fw_conf->val);
+ if (ret)
+ goto out;
+
+ fw_conf++;
+ }
+
+ /* Flash the firmware file */
+ for (i = 0; i < fw->size; i++) {
+ ret = i2c_smbus_write_byte_data(client,
+ TPS23881_REG_SRAM_DATA,
+ fw->data[i]);
+ if (ret)
+ goto out;
+ }
+
+out:
+ release_firmware(fw);
+ return ret;
+}
+
+static int tps23881_flash_sram_fw(struct i2c_client *client)
+{
+ int ret;
+
+ ret = tps23881_flash_sram_fw_part(client, fw_parity_name,
+ tps23881_fw_parity_conf);
+ if (ret)
+ return ret;
+
+ ret = tps23881_flash_sram_fw_part(client, fw_sram_name,
+ tps23881_fw_sram_conf);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, TPS23881_REG_SRAM_CTRL, 0x18);
+ if (ret)
+ return ret;
+
+ mdelay(12);
+
+ return 0;
+}
+
+static int tps23881_i2c_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct tps23881_priv *priv;
+ int ret;
+ u8 val;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "i2c check functionality failed\n");
+ return -ENXIO;
+ }
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ ret = i2c_smbus_read_byte_data(client, TPS23881_REG_DEVID);
+ if (ret < 0)
+ return ret;
+
+ if (FIELD_GET(TPS23881_REG_DEVID_MASK, ret) != TPS23881_DEVICE_ID) {
+ dev_err(dev, "Wrong device ID\n");
+ return -ENXIO;
+ }
+
+ ret = tps23881_flash_sram_fw(client);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(client, TPS23881_REG_FWREV);
+ if (ret < 0)
+ return ret;
+
+ dev_info(&client->dev, "Firmware revision 0x%x\n", ret);
+
+ /* Set configuration B, 16 bit access on a single device address */
+ ret = i2c_smbus_read_byte_data(client, TPS23881_REG_GEN_MASK);
+ if (ret < 0)
+ return ret;
+
+ val = ret | TPS23881_REG_NBITACC;
+ ret = i2c_smbus_write_byte_data(client, TPS23881_REG_GEN_MASK, val);
+ if (ret)
+ return ret;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+ priv->np = dev->of_node;
+
+ priv->pcdev.owner = THIS_MODULE;
+ priv->pcdev.ops = &tps23881_ops;
+ priv->pcdev.dev = dev;
+ priv->pcdev.types = ETHTOOL_PSE_C33;
+ priv->pcdev.nr_lines = TPS23881_MAX_CHANS;
+ ret = devm_pse_controller_register(dev, &priv->pcdev);
+ if (ret) {
+ return dev_err_probe(dev, ret,
+ "failed to register PSE controller\n");
+ }
+
+ return ret;
+}
+
+static const struct i2c_device_id tps23881_id[] = {
+ { "tps23881" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tps23881_id);
+
+static const struct of_device_id tps23881_of_match[] = {
+ { .compatible = "ti,tps23881", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tps23881_of_match);
+
+static struct i2c_driver tps23881_driver = {
+ .probe = tps23881_i2c_probe,
+ .id_table = tps23881_id,
+ .driver = {
+ .name = "tps23881",
+ .of_match_table = tps23881_of_match,
+ },
+};
+module_i2c_driver(tps23881_driver);
+
+MODULE_AUTHOR("Kory Maincent <kory.maincent@bootlin.com>");
+MODULE_DESCRIPTION("TI TPS23881 PoE PSE Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 0aba3569ccc0..fb362ee248ff 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -286,7 +286,7 @@ static int sl_realloc_bufs(struct slip *sl, int mtu)
}
}
sl->mtu = mtu;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
sl->buffsize = len;
err = 0;
diff --git a/drivers/net/tap.c b/drivers/net/tap.c
index 9f0495e8df4d..77574f7a3bd4 100644
--- a/drivers/net/tap.c
+++ b/drivers/net/tap.c
@@ -754,7 +754,7 @@ static ssize_t tap_get_user(struct tap_queue *q, void *msg_control,
skb_zcopy_init(skb, msg_control);
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
- uarg->callback(NULL, uarg, false);
+ uarg->ops->complete(NULL, uarg, false);
}
dev_queue_xmit(skb);
@@ -1177,6 +1177,11 @@ static int tap_get_user_xdp(struct tap_queue *q, struct xdp_buff *xdp)
struct sk_buff *skb;
int err, depth;
+ if (unlikely(xdp->data_end - xdp->data < ETH_HLEN)) {
+ err = -EINVAL;
+ goto err;
+ }
+
if (q->flags & IFF_VNET_HDR)
vnet_hdr_len = READ_ONCE(q->vnet_hdr_sz);
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
index f582d81a5091..7a5aa20d286b 100644
--- a/drivers/net/team/Makefile
+++ b/drivers/net/team/Makefile
@@ -3,6 +3,7 @@
# Makefile for the network team driver
#
+team-y:= team_core.o team_nl.o
obj-$(CONFIG_NET_TEAM) += team.o
obj-$(CONFIG_NET_TEAM_MODE_BROADCAST) += team_mode_broadcast.o
obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team_core.c
index 0a44bbdcfb7b..ab1935a4aa2c 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team_core.c
@@ -27,6 +27,8 @@
#include <net/sch_generic.h>
#include <linux/if_team.h>
+#include "team_nl.h"
+
#define DRV_NAME "team"
@@ -1829,7 +1831,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
team->port_mtu_change_allowed = false;
mutex_unlock(&team->lock);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
@@ -2254,28 +2256,7 @@ static struct rtnl_link_ops team_link_ops __read_mostly = {
static struct genl_family team_nl_family;
-static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
- [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
- [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
- [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
- [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
-};
-
-static const struct nla_policy
-team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
- [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
- [TEAM_ATTR_OPTION_NAME] = {
- .type = NLA_STRING,
- .len = TEAM_STRING_MAX_LEN,
- },
- [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
- [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
- [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
- [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 },
- [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 },
-};
-
-static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+int team_nl_noop_doit(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *msg;
void *hdr;
@@ -2513,7 +2494,7 @@ errout:
return err;
}
-static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
+int team_nl_options_get_doit(struct sk_buff *skb, struct genl_info *info)
{
struct team *team;
struct team_option_inst *opt_inst;
@@ -2538,7 +2519,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
static int team_nl_send_event_options_get(struct team *team,
struct list_head *sel_opt_inst_list);
-static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
+int team_nl_options_set_doit(struct sk_buff *skb, struct genl_info *info)
{
struct team *team;
int err = 0;
@@ -2579,7 +2560,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
err = nla_parse_nested_deprecated(opt_attrs,
TEAM_ATTR_OPTION_MAX,
nl_option,
- team_nl_option_policy,
+ team_attr_option_nl_policy,
info->extack);
if (err)
goto team_put;
@@ -2802,8 +2783,8 @@ errout:
return err;
}
-static int team_nl_cmd_port_list_get(struct sk_buff *skb,
- struct genl_info *info)
+int team_nl_port_list_get_doit(struct sk_buff *skb,
+ struct genl_info *info)
{
struct team *team;
int err;
@@ -2820,32 +2801,6 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
return err;
}
-static const struct genl_small_ops team_nl_ops[] = {
- {
- .cmd = TEAM_CMD_NOOP,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = team_nl_cmd_noop,
- },
- {
- .cmd = TEAM_CMD_OPTIONS_SET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = team_nl_cmd_options_set,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = TEAM_CMD_OPTIONS_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = team_nl_cmd_options_get,
- .flags = GENL_ADMIN_PERM,
- },
- {
- .cmd = TEAM_CMD_PORT_LIST_GET,
- .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
- .doit = team_nl_cmd_port_list_get,
- .flags = GENL_ADMIN_PERM,
- },
-};
-
static const struct genl_multicast_group team_nl_mcgrps[] = {
{ .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
};
@@ -2853,7 +2808,7 @@ static const struct genl_multicast_group team_nl_mcgrps[] = {
static struct genl_family team_nl_family __ro_after_init = {
.name = TEAM_GENL_NAME,
.version = TEAM_GENL_VERSION,
- .maxattr = TEAM_ATTR_MAX,
+ .maxattr = ARRAY_SIZE(team_nl_policy) - 1,
.policy = team_nl_policy,
.netnsok = true,
.module = THIS_MODULE,
diff --git a/drivers/net/team/team_nl.c b/drivers/net/team/team_nl.c
new file mode 100644
index 000000000000..208424ab78f5
--- /dev/null
+++ b/drivers/net/team/team_nl.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/team.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "team_nl.h"
+
+#include <uapi/linux/if_team.h>
+
+/* Common nested types */
+const struct nla_policy team_attr_option_nl_policy[TEAM_ATTR_OPTION_ARRAY_INDEX + 1] = {
+ [TEAM_ATTR_OPTION_NAME] = { .type = NLA_STRING, .len = TEAM_STRING_MAX_LEN, },
+ [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG, },
+ [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8, },
+ [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY, },
+ [TEAM_ATTR_OPTION_REMOVED] = { .type = NLA_FLAG, },
+ [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32, },
+ [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32, },
+};
+
+const struct nla_policy team_item_option_nl_policy[TEAM_ATTR_ITEM_OPTION + 1] = {
+ [TEAM_ATTR_ITEM_OPTION] = NLA_POLICY_NESTED(team_attr_option_nl_policy),
+};
+
+/* Global operation policy for team */
+const struct nla_policy team_nl_policy[TEAM_ATTR_LIST_OPTION + 1] = {
+ [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32, },
+ [TEAM_ATTR_LIST_OPTION] = NLA_POLICY_NESTED(team_item_option_nl_policy),
+};
+
+/* Ops table for team */
+const struct genl_small_ops team_nl_ops[4] = {
+ {
+ .cmd = TEAM_CMD_NOOP,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = team_nl_noop_doit,
+ },
+ {
+ .cmd = TEAM_CMD_OPTIONS_SET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = team_nl_options_set_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TEAM_CMD_OPTIONS_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = team_nl_options_get_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TEAM_CMD_PORT_LIST_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT,
+ .doit = team_nl_port_list_get_doit,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
diff --git a/drivers/net/team/team_nl.h b/drivers/net/team/team_nl.h
new file mode 100644
index 000000000000..c9ec1b22ac4d
--- /dev/null
+++ b/drivers/net/team/team_nl.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/team.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_TEAM_GEN_H
+#define _LINUX_TEAM_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/if_team.h>
+
+/* Common nested types */
+extern const struct nla_policy team_attr_option_nl_policy[TEAM_ATTR_OPTION_ARRAY_INDEX + 1];
+extern const struct nla_policy team_item_option_nl_policy[TEAM_ATTR_ITEM_OPTION + 1];
+
+/* Global operation policy for team */
+extern const struct nla_policy team_nl_policy[TEAM_ATTR_LIST_OPTION + 1];
+
+/* Ops table for team */
+extern const struct genl_small_ops team_nl_ops[4];
+
+int team_nl_noop_doit(struct sk_buff *skb, struct genl_info *info);
+int team_nl_options_set_doit(struct sk_buff *skb, struct genl_info *info);
+int team_nl_options_get_doit(struct sk_buff *skb, struct genl_info *info);
+int team_nl_port_list_get_doit(struct sk_buff *skb, struct genl_info *info);
+
+#endif /* _LINUX_TEAM_GEN_H */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 92da8c03d960..1d06c560c5e6 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1661,6 +1661,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
int len, int *skb_xdp)
{
struct page_frag *alloc_frag = &current->task_frag;
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct bpf_prog *xdp_prog;
int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
char *buf;
@@ -1700,6 +1701,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
local_bh_disable();
rcu_read_lock();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
struct xdp_buff xdp;
@@ -1728,12 +1730,14 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
pad = xdp.data - xdp.data_hard_start;
len = xdp.data_end - xdp.data;
}
+ bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock();
local_bh_enable();
return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
out:
+ bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock();
local_bh_enable();
return NULL;
@@ -1906,7 +1910,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
skb_zcopy_init(skb, msg_control);
} else if (msg_control) {
struct ubuf_info *uarg = msg_control;
- uarg->callback(NULL, uarg, false);
+ uarg->ops->complete(NULL, uarg, false);
}
skb_reset_network_header(skb);
@@ -2451,6 +2455,9 @@ static int tun_xdp_one(struct tun_struct *tun,
bool skb_xdp = false;
struct page *page;
+ if (unlikely(datasize < ETH_HLEN))
+ return -EINVAL;
+
xdp_prog = rcu_dereference(tun->xdp_prog);
if (xdp_prog) {
if (gso->gso_type) {
@@ -2566,6 +2573,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
ctl && ctl->type == TUN_MSG_PTR) {
+ struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
struct tun_page tpage;
int n = ctl->num;
int flush = 0, queued = 0;
@@ -2574,6 +2582,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
local_bh_disable();
rcu_read_lock();
+ bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
for (i = 0; i < n; i++) {
xdp = &((struct xdp_buff *)ctl->ptr)[i];
@@ -2588,6 +2597,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
if (tfile->napi_enabled && queued > 0)
napi_schedule(&tfile->napi);
+ bpf_net_ctx_clear(bpf_net_ctx);
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 7b8afa589a53..ff5be2cbf17b 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -424,7 +424,7 @@ static int aqc111_change_mtu(struct net_device *net, int new_mtu)
u16 reg16 = 0;
u8 buf[5];
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
dev->hard_mtu = net->mtu + net->hard_header_len;
aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
@@ -1141,17 +1141,15 @@ static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
continue;
}
- /* Clone SKB */
- new_skb = skb_clone(skb, GFP_ATOMIC);
+ new_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
if (!new_skb)
goto err;
- new_skb->len = pkt_len;
+ skb_put(new_skb, pkt_len);
+ memcpy(new_skb->data, skb->data, pkt_len);
skb_pull(new_skb, AQ_RX_HW_PAD);
- skb_set_tail_pointer(new_skb, new_skb->len);
- new_skb->truesize = SKB_TRUESIZE(new_skb->len);
if (aqc111_data->rx_checksum)
aqc111_rx_checksum(new_skb, pkt_desc);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index f7cff58fe044..57d6e5abc30e 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -1230,7 +1230,7 @@ static int ax88178_change_mtu(struct net_device *net, int new_mtu)
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
dev->hard_mtu = net->mtu + net->hard_header_len;
ax88178_set_mfb(dev);
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index df9d767cb524..b034ef8a73ea 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -326,7 +326,8 @@ static void ax88179_status(struct usbnet *dev, struct urb *urb)
if (netif_carrier_ok(dev->net) != link) {
usbnet_link_change(dev, link, 1);
- netdev_info(dev->net, "ax88179 - Link status is: %d\n", link);
+ if (!link)
+ netdev_info(dev->net, "ax88179 - Link status is: 0\n");
}
}
@@ -943,7 +944,7 @@ static int ax88179_change_mtu(struct net_device *net, int new_mtu)
struct usbnet *dev = netdev_priv(net);
u16 tmp16;
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
dev->hard_mtu = net->mtu + net->hard_header_len;
if (net->mtu > 1500) {
@@ -1277,7 +1278,6 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
dev->net->addr_assign_type = NET_ADDR_PERM;
} else {
netdev_info(dev->net, "invalid MAC address, using random\n");
- eth_hw_addr_random(dev->net);
}
ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
@@ -1287,8 +1287,11 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct ax88179_data *ax179_data;
+ int ret;
- usbnet_get_endpoints(dev, intf);
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret < 0)
+ return ret;
ax179_data = kzalloc(sizeof(*ax179_data), GFP_KERNEL);
if (!ax179_data)
@@ -1540,6 +1543,7 @@ static int ax88179_link_reset(struct usbnet *dev)
GMII_PHY_PHYSR, 2, &tmp16);
if (!(tmp16 & GMII_PHY_PHYSR_LINK)) {
+ netdev_info(dev->net, "ax88179 - Link status is: 0\n");
return 0;
} else if (GMII_PHY_PHYSR_GIGA == (tmp16 & GMII_PHY_PHYSR_SMASK)) {
mode |= AX_MEDIUM_GIGAMODE | AX_MEDIUM_EN_125MHZ;
@@ -1577,6 +1581,8 @@ static int ax88179_link_reset(struct usbnet *dev)
netif_carrier_on(dev->net);
+ netdev_info(dev->net, "ax88179 - Link status is: 1\n");
+
return 0;
}
@@ -1673,6 +1679,27 @@ static int ax88179_reset(struct usbnet *dev)
return 0;
}
+static int ax88179_net_reset(struct usbnet *dev)
+{
+ u16 tmp16;
+
+ ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID, GMII_PHY_PHYSR,
+ 2, &tmp16);
+ if (tmp16) {
+ ax88179_read_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ if (!(tmp16 & AX_MEDIUM_RECEIVE_EN)) {
+ tmp16 |= AX_MEDIUM_RECEIVE_EN;
+ ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
+ 2, 2, &tmp16);
+ }
+ } else {
+ ax88179_reset(dev);
+ }
+
+ return 0;
+}
+
static int ax88179_stop(struct usbnet *dev)
{
u16 tmp16;
@@ -1692,6 +1719,7 @@ static const struct driver_info ax88179_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1704,6 +1732,7 @@ static const struct driver_info ax88178a_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1716,7 +1745,7 @@ static const struct driver_info cypress_GX3_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1729,7 +1758,7 @@ static const struct driver_info dlink_dub1312_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1742,7 +1771,7 @@ static const struct driver_info sitecom_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1755,7 +1784,7 @@ static const struct driver_info samsung_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1768,7 +1797,7 @@ static const struct driver_info lenovo_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1781,7 +1810,7 @@ static const struct driver_info belkin_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1794,7 +1823,7 @@ static const struct driver_info toshiba_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1807,7 +1836,7 @@ static const struct driver_info mct_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1820,7 +1849,7 @@ static const struct driver_info at_umc2000_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1833,7 +1862,7 @@ static const struct driver_info at_umc200_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
@@ -1846,7 +1875,7 @@ static const struct driver_info at_umc2000sp_info = {
.unbind = ax88179_unbind,
.status = ax88179_status,
.link_reset = ax88179_link_reset,
- .reset = ax88179_reset,
+ .reset = ax88179_net_reset,
.stop = ax88179_stop,
.flags = FLAG_ETHER | FLAG_FRAMING_AX,
.rx_fixup = ax88179_rx_fixup,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index db05622f1f70..d5c47a2a62dc 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -798,7 +798,7 @@ int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
{
struct usbnet *dev = netdev_priv(net);
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
cdc_ncm_set_dgram_size(dev, new_mtu + cdc_ncm_eth_hlen(dev));
return 0;
@@ -933,7 +933,8 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
cdc_ncm_find_endpoints(dev, ctx->data);
cdc_ncm_find_endpoints(dev, ctx->control);
- if (!dev->in || !dev->out || !dev->status) {
+ if (!dev->in || !dev->out ||
+ (!dev->status && dev->driver_info->flags & FLAG_LINK_INTR)) {
dev_dbg(&intf->dev, "failed to collect endpoints\n");
goto error2;
}
@@ -1925,6 +1926,34 @@ static const struct driver_info cdc_ncm_zlp_info = {
.set_rx_mode = usbnet_cdc_update_filter,
};
+/* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */
+static const struct driver_info apple_tethering_interface_info = {
+ .description = "CDC NCM (Apple Tethering)",
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_LINK_INTR | FLAG_ETHER | FLAG_SEND_ZLP,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .manage_power = usbnet_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+ .set_rx_mode = usbnet_cdc_update_filter,
+};
+
+/* Same as apple_tethering_interface_info, but without FLAG_LINK_INTR */
+static const struct driver_info apple_private_interface_info = {
+ .description = "CDC NCM (Apple Private)",
+ .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
+ | FLAG_ETHER | FLAG_SEND_ZLP,
+ .bind = cdc_ncm_bind,
+ .unbind = cdc_ncm_unbind,
+ .manage_power = usbnet_manage_power,
+ .status = cdc_ncm_status,
+ .rx_fixup = cdc_ncm_rx_fixup,
+ .tx_fixup = cdc_ncm_tx_fixup,
+ .set_rx_mode = usbnet_cdc_update_filter,
+};
+
/* Same as cdc_ncm_info, but with FLAG_WWAN */
static const struct driver_info wwan_info = {
.description = "Mobile Broadband Network Device",
@@ -1954,6 +1983,22 @@ static const struct driver_info wwan_noarp_info = {
};
static const struct usb_device_id cdc_devs[] = {
+ /* iPhone */
+ { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12a8, 2),
+ .driver_info = (unsigned long)&apple_tethering_interface_info,
+ },
+ { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12a8, 4),
+ .driver_info = (unsigned long)&apple_private_interface_info,
+ },
+
+ /* iPad */
+ { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12ab, 2),
+ .driver_info = (unsigned long)&apple_tethering_interface_info,
+ },
+ { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12ab, 4),
+ .driver_info = (unsigned long)&apple_private_interface_info,
+ },
+
/* Ericsson MBM devices like F5521gw */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_VENDOR,
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 687d70cfc556..46afb95ffabe 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -286,10 +286,11 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
return;
}
- if (urb->actual_length <= IPHETH_IP_ALIGN) {
- dev->net->stats.rx_length_errors++;
- return;
- }
+ /* iPhone may periodically send URBs with no payload
+ * on the "bulk in" endpoint. It is safe to ignore them.
+ */
+ if (urb->actual_length == 0)
+ goto rx_submit;
/* RX URBs starting with 0x00 0x01 do not encapsulate Ethernet frames,
* but rather are control frames. Their purpose is not documented, and
@@ -298,7 +299,8 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
* URB received from the bulk IN endpoint.
*/
if (unlikely
- (((char *)urb->transfer_buffer)[0] == 0 &&
+ (urb->actual_length == 4 &&
+ ((char *)urb->transfer_buffer)[0] == 0 &&
((char *)urb->transfer_buffer)[1] == 1))
goto rx_submit;
@@ -306,7 +308,6 @@ static void ipheth_rcvbulk_callback(struct urb *urb)
if (retval != 0) {
dev_err(&dev->intf->dev, "%s: callback retval: %d\n",
__func__, retval);
- return;
}
rx_submit:
@@ -354,13 +355,14 @@ static int ipheth_carrier_set(struct ipheth_device *dev)
0x02, /* index */
dev->ctrl_buf, IPHETH_CTRL_BUF_SIZE,
IPHETH_CTRL_TIMEOUT);
- if (retval < 0) {
+ if (retval <= 0) {
dev_err(&dev->intf->dev, "%s: usb_control_msg: %d\n",
__func__, retval);
return retval;
}
- if (dev->ctrl_buf[0] == IPHETH_CARRIER_ON) {
+ if ((retval == 1 && dev->ctrl_buf[0] == IPHETH_CARRIER_ON) ||
+ (retval >= 2 && dev->ctrl_buf[1] == IPHETH_CARRIER_ON)) {
netif_carrier_on(dev->net);
if (dev->tx_urb->status != -EINPROGRESS)
netif_wake_queue(dev->net);
@@ -475,8 +477,8 @@ static int ipheth_close(struct net_device *net)
{
struct ipheth_device *dev = netdev_priv(net);
- cancel_delayed_work_sync(&dev->carrier_work);
netif_stop_queue(net);
+ cancel_delayed_work_sync(&dev->carrier_work);
return 0;
}
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 80ee4fcdfb36..8adf77e3557e 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -380,11 +380,6 @@ struct skb_data { /* skb->cb is one of these */
int num_of_packet;
};
-struct usb_context {
- struct usb_ctrlrequest req;
- struct lan78xx_net *dev;
-};
-
#define EVENT_TX_HALT 0
#define EVENT_RX_HALT 1
#define EVENT_RX_MEMORY 2
@@ -1692,15 +1687,10 @@ static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
ret = lan78xx_read_reg(dev, MAC_CR, &buf);
if (buf & MAC_CR_EEE_EN_) {
- edata->eee_enabled = true;
- edata->tx_lpi_enabled = true;
/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
edata->tx_lpi_timer = buf;
} else {
- edata->eee_enabled = false;
- edata->eee_active = false;
- edata->tx_lpi_enabled = false;
edata->tx_lpi_timer = 0;
}
@@ -1721,24 +1711,16 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
if (ret < 0)
return ret;
- if (edata->eee_enabled) {
- ret = lan78xx_read_reg(dev, MAC_CR, &buf);
- buf |= MAC_CR_EEE_EN_;
- ret = lan78xx_write_reg(dev, MAC_CR, buf);
-
- phy_ethtool_set_eee(net->phydev, edata);
-
- buf = (u32)edata->tx_lpi_timer;
- ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
- } else {
- ret = lan78xx_read_reg(dev, MAC_CR, &buf);
- buf &= ~MAC_CR_EEE_EN_;
- ret = lan78xx_write_reg(dev, MAC_CR, buf);
- }
+ ret = phy_ethtool_set_eee(net->phydev, edata);
+ if (ret < 0)
+ goto out;
+ buf = (u32)edata->tx_lpi_timer;
+ ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
+out:
usb_autopm_put_interface(dev->intf);
- return 0;
+ return ret;
}
static u32 lan78xx_get_link(struct net_device *net)
@@ -2114,7 +2096,20 @@ static void lan78xx_remove_mdio(struct lan78xx_net *dev)
static void lan78xx_link_status_change(struct net_device *net)
{
+ struct lan78xx_net *dev = netdev_priv(net);
struct phy_device *phydev = net->phydev;
+ u32 data;
+ int ret;
+
+ ret = lan78xx_read_reg(dev, MAC_CR, &data);
+ if (ret < 0)
+ return;
+
+ if (phydev->enable_tx_lpi)
+ data |= MAC_CR_EEE_EN_;
+ else
+ data &= ~MAC_CR_EEE_EN_;
+ lan78xx_write_reg(dev, MAC_CR, data);
phy_print_status(phydev);
}
@@ -2408,6 +2403,8 @@ static int lan78xx_phy_init(struct lan78xx_net *dev)
mii_adv_to_linkmode_adv_t(fc, mii_adv);
linkmode_or(phydev->advertising, fc, phydev->advertising);
+ phy_support_eee(phydev);
+
if (phydev->mdio.dev.of_node) {
u32 reg;
int len;
@@ -2526,7 +2523,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
if (!ret)
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
usb_autopm_put_interface(dev->intf);
@@ -2944,6 +2941,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
return ret;
buf |= HW_CFG_MEF_;
+ buf |= HW_CFG_CLK125_EN_;
+ buf |= HW_CFG_REFCLK25_EN_;
ret = lan78xx_write_reg(dev, HW_CFG, buf);
if (ret < 0)
@@ -3032,8 +3031,11 @@ static int lan78xx_reset(struct lan78xx_net *dev)
return ret;
/* LAN7801 only has RGMII mode */
- if (dev->chipid == ID_REV_CHIP_ID_7801_)
+ if (dev->chipid == ID_REV_CHIP_ID_7801_) {
buf &= ~MAC_CR_GMII_EN_;
+ /* Enable Auto Duplex and Auto speed */
+ buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
+ }
if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
dev->chipid == ID_REV_CHIP_ID_7850_) {
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index a5469cf5cf67..4823dbdf5465 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -121,7 +121,6 @@ static const struct net_device_ops qmimux_netdev_ops = {
.ndo_open = qmimux_open,
.ndo_stop = qmimux_stop,
.ndo_start_xmit = qmimux_start_xmit,
- .ndo_get_stats64 = dev_get_tstats64,
};
static void qmimux_setup(struct net_device *dev)
@@ -133,6 +132,7 @@ static void qmimux_setup(struct net_device *dev)
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->netdev_ops = &qmimux_netdev_ops;
dev->mtu = 1500;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
dev->needs_free_netdev = true;
}
@@ -201,6 +201,7 @@ static int qmimux_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
break;
default:
/* not ip - do not know what to do */
+ kfree_skb(skbn);
goto skip;
}
@@ -257,12 +258,6 @@ static int qmimux_register_device(struct net_device *real_dev, u8 mux_id)
priv->mux_id = mux_id;
priv->real_dev = real_dev;
- new_dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
- if (!new_dev->tstats) {
- err = -ENOBUFS;
- goto out_free_newdev;
- }
-
new_dev->sysfs_groups[0] = &qmi_wwan_sysfs_qmimux_attr_group;
err = register_netdevice(new_dev);
@@ -295,7 +290,6 @@ static void qmimux_unregister_device(struct net_device *dev,
struct qmimux_priv *priv = netdev_priv(dev);
struct net_device *real_dev = priv->real_dev;
- free_percpu(dev->tstats);
netdev_upper_dev_unlink(real_dev, dev);
unregister_netdevice_queue(dev, head);
@@ -644,7 +638,6 @@ static const struct net_device_ops qmi_wwan_netdev_ops = {
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
.ndo_change_mtu = usbnet_change_mtu,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = qmi_wwan_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -1380,9 +1373,13 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */
{QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x3000, 0)}, /* Telit FN912 series */
+ {QMI_QUIRK_SET_DTR(0x1bc7, 0x3001, 0)}, /* Telit FN912 series */
{QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
{QMI_FIXED_INTF(0x1c9e, 0x9b01, 3)}, /* XS Stick W100-2 from 4G Systems */
+ {QMI_QUIRK_SET_DTR(0x1c9e, 0x9b05, 4)}, /* Longsung U8300 */
+ {QMI_QUIRK_SET_DTR(0x1c9e, 0x9b3c, 4)}, /* Longsung U9300 */
{QMI_FIXED_INTF(0x0b3c, 0xc000, 4)}, /* Olivetti Olicard 100 */
{QMI_FIXED_INTF(0x0b3c, 0xc001, 4)}, /* Olivetti Olicard 120 */
{QMI_FIXED_INTF(0x0b3c, 0xc002, 4)}, /* Olivetti Olicard 140 */
@@ -1435,6 +1432,7 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x1546, 0x1312, 4)}, /* u-blox LARA-R6 01B */
{QMI_QUIRK_SET_DTR(0x1546, 0x1342, 4)}, /* u-blox LARA-L6 */
{QMI_QUIRK_SET_DTR(0x33f8, 0x0104, 4)}, /* Rolling RW101 RMNET */
+ {QMI_FIXED_INTF(0x2dee, 0x4d22, 5)}, /* MeiG Smart SRM825L */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 5d6aeb086fc7..15e12f46d0ea 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1774,6 +1774,7 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa)
goto amacout;
}
memcpy(sa->sa_data, buf, 6);
+ tp->netdev->addr_assign_type = NET_ADDR_STOLEN;
netif_info(tp, probe, tp->netdev,
"Using pass-thru MAC addr %pM\n", sa->sa_data);
@@ -5274,7 +5275,7 @@ post_fw:
rtl_reset_ocp_base(tp);
strscpy(rtl_fw->version, fw_hdr->version, RTL_VER_SIZE);
- dev_info(&tp->intf->dev, "load %s successfully\n", rtl_fw->version);
+ dev_dbg(&tp->intf->dev, "load %s successfully\n", rtl_fw->version);
}
static void rtl8152_release_firmware(struct r8152 *tp)
@@ -8554,6 +8555,19 @@ static int rtl8152_system_resume(struct r8152 *tp)
usb_submit_urb(tp->intr_urb, GFP_NOIO);
}
+ /* If the device is RTL8152_INACCESSIBLE here then we should do a
+ * reset. This is important because the usb_lock_device_for_reset()
+ * that happens as a result of usb_queue_reset_device() will silently
+ * fail if the device was suspended or if too much time passed.
+ *
+ * NOTE: The device is locked here so we can directly do the reset.
+ * We don't need usb_lock_device_for_reset() because that's just a
+ * wrapper over device_lock() and device_resume() (which calls us)
+ * does that for us.
+ */
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ usb_reset_device(tp->udev);
+
return 0;
}
@@ -8634,6 +8648,13 @@ static int rtl8152_system_suspend(struct r8152 *tp)
tasklet_enable(&tp->tx_tl);
}
+ /* If we're inaccessible here then some of the work that we did to
+ * get the adapter ready for suspend didn't work. Queue up a wakeup
+ * event so we can try again.
+ */
+ if (test_bit(RTL8152_INACCESSIBLE, &tp->flags))
+ pm_wakeup_event(&tp->udev->dev, 0);
+
return 0;
}
@@ -9365,7 +9386,7 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
case RTL_VER_01:
case RTL_VER_02:
case RTL_VER_07:
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
default:
break;
@@ -9377,7 +9398,7 @@ static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
mutex_lock(&tp->control);
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
if (netif_running(dev)) {
if (tp->rtl_ops.change_mtu)
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 97afd7335d86..01a3b2417a54 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -778,7 +778,8 @@ static int rtl8150_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ecmd)
{
rtl8150_t *dev = netdev_priv(netdev);
- short lpa, bmcr;
+ short lpa = 0;
+ short bmcr = 0;
u32 supported;
supported = (SUPPORTED_10baseT_Half |
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 78ad2da3ee29..78c821349f48 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -61,11 +61,6 @@ struct smsc75xx_priv {
u8 suspend_flags;
};
-struct usb_context {
- struct usb_ctrlrequest req;
- struct usbnet *dev;
-};
-
static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
@@ -2234,27 +2229,23 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
rx_cmd_b);
skb_trim(skb, skb->len - 4); /* remove fcs */
- skb->truesize = size + sizeof(struct sk_buff);
return 1;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
+ /* Use "size - 4" to remove fcs */
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, size - 4);
if (unlikely(!ax_skb)) {
netdev_warn(dev->net, "Error allocating skb\n");
return 0;
}
- ax_skb->len = size;
- ax_skb->data = packet;
- skb_set_tail_pointer(ax_skb, size);
+ skb_put(ax_skb, size - 4);
+ memcpy(ax_skb->data, packet, size - 4);
smsc75xx_rx_csum_offload(dev, ax_skb, rx_cmd_a,
rx_cmd_b);
- skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
- ax_skb->truesize = size + sizeof(struct sk_buff);
-
usbnet_skb_return(dev, ax_skb);
}
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 2fa46baa589e..8e82184be5e7 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -879,7 +879,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev)
static int smsc95xx_reset(struct usbnet *dev)
{
struct smsc95xx_priv *pdata = dev->driver_priv;
- u32 read_buf, write_buf, burst_cap;
+ u32 read_buf, burst_cap;
int ret = 0, timeout;
netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
@@ -1003,10 +1003,13 @@ static int smsc95xx_reset(struct usbnet *dev)
return ret;
netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
+ ret = smsc95xx_read_reg(dev, LED_GPIO_CFG, &read_buf);
+ if (ret < 0)
+ return ret;
/* Configure GPIO pins as LED outputs */
- write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
- LED_GPIO_CFG_FDX_LED;
- ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
+ read_buf |= LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
+ LED_GPIO_CFG_FDX_LED;
+ ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, read_buf);
if (ret < 0)
return ret;
@@ -1810,9 +1813,11 @@ static int smsc95xx_reset_resume(struct usb_interface *intf)
static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
{
- skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
+ u16 *csum_ptr = (u16 *)(skb_tail_pointer(skb) - 2);
+
+ skb->csum = (__force __wsum)get_unaligned(csum_ptr);
skb->ip_summed = CHECKSUM_COMPLETE;
- skb_trim(skb, skb->len - 2);
+ skb_trim(skb, skb->len - 2); /* remove csum */
}
static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
@@ -1870,25 +1875,22 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if (dev->net->features & NETIF_F_RXCSUM)
smsc95xx_rx_csum_offload(skb);
skb_trim(skb, skb->len - 4); /* remove fcs */
- skb->truesize = size + sizeof(struct sk_buff);
return 1;
}
- ax_skb = skb_clone(skb, GFP_ATOMIC);
+ ax_skb = netdev_alloc_skb_ip_align(dev->net, size);
if (unlikely(!ax_skb)) {
netdev_warn(dev->net, "Error allocating skb\n");
return 0;
}
- ax_skb->len = size;
- ax_skb->data = packet;
- skb_set_tail_pointer(ax_skb, size);
+ skb_put(ax_skb, size);
+ memcpy(ax_skb->data, packet, size);
if (dev->net->features & NETIF_F_RXCSUM)
smsc95xx_rx_csum_offload(ax_skb);
skb_trim(ax_skb, ax_skb->len - 4); /* remove fcs */
- ax_skb->truesize = size + sizeof(struct sk_buff);
usbnet_skb_return(dev, ax_skb);
}
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 3164451e1010..cb7d2f798fb4 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -179,6 +179,7 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
struct usbnet *dev = netdev_priv(netdev);
__le16 res;
int rc = 0;
+ int err;
if (phy_id) {
netdev_dbg(netdev, "Only internal phy supported\n");
@@ -189,11 +190,17 @@ static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
if (loc == MII_BMSR) {
u8 value;
- sr_read_reg(dev, SR_NSR, &value);
+ err = sr_read_reg(dev, SR_NSR, &value);
+ if (err < 0)
+ return err;
+
if (value & NSR_LINKST)
rc = 1;
}
- sr_share_read_word(dev, 1, loc, &res);
+ err = sr_share_read_word(dev, 1, loc, &res);
+ if (err < 0)
+ return err;
+
if (rc == 1)
res = le16_to_cpu(res) | BMSR_LSTATUS;
else
@@ -421,19 +428,15 @@ static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, 3);
skb->len = len;
skb_set_tail_pointer(skb, len);
- skb->truesize = len + sizeof(struct sk_buff);
return 2;
}
- /* skb_clone is used for address align */
- sr_skb = skb_clone(skb, GFP_ATOMIC);
+ sr_skb = netdev_alloc_skb_ip_align(dev->net, len);
if (!sr_skb)
return 0;
- sr_skb->len = len;
- sr_skb->data = skb->data + 3;
- skb_set_tail_pointer(sr_skb, len);
- sr_skb->truesize = len + sizeof(struct sk_buff);
+ skb_put(sr_skb, len);
+ memcpy(sr_skb->data, skb->data + 3, len);
usbnet_skb_return(dev, sr_skb);
skb_pull(skb, len + SR_RX_OVERHEAD);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index e84efa661589..9fd516e8bb10 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -398,7 +398,7 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
// no second zero-length packet read wanted after mtu-sized packets
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
- net->mtu = new_mtu;
+ WRITE_ONCE(net->mtu, new_mtu);
dev->hard_mtu = net->mtu + net->hard_header_len;
if (dev->rx_urb_size == old_hard_mtu) {
@@ -1733,6 +1733,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->hard_mtu = net->mtu + net->hard_header_len;
net->min_mtu = 0;
net->max_mtu = ETH_MAX_MTU;
+ net->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
net->netdev_ops = &usbnet_netdev_ops;
net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index bcdfbf61eb66..426e68a95067 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -26,6 +26,7 @@
#include <linux/ptr_ring.h>
#include <linux/bpf_trace.h>
#include <linux/net_tstamp.h>
+#include <linux/skbuff_ref.h>
#include <net/page_pool/helpers.h>
#define DRV_NAME "veth"
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 115c3c5414f2..c6af18948092 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -24,6 +24,8 @@
#include <net/xdp.h>
#include <net/net_failover.h>
#include <net/netdev_rx_queue.h>
+#include <net/netdev_queues.h>
+#include <net/xdp_sock_drv.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -39,14 +41,12 @@ module_param(napi_tx, bool, 0644);
#define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
-/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
-#define VIRTIO_XDP_HEADROOM 256
-
/* Separating two types of XDP xmit */
#define VIRTIO_XDP_TX BIT(0)
#define VIRTIO_XDP_REDIR BIT(1)
-#define VIRTIO_XDP_FLAG BIT(0)
+#define VIRTIO_XDP_FLAG BIT(0)
+#define VIRTIO_ORPHAN_FLAG BIT(1)
/* RX packet size EWMA. The average packet size is used to determine the packet
* buffer size when refilling RX rings. As the entire RX ring may be refilled
@@ -78,11 +78,14 @@ static const unsigned long guest_offloads[] = {
struct virtnet_stat_desc {
char desc[ETH_GSTRING_LEN];
size_t offset;
+ size_t qstat_offset;
};
struct virtnet_sq_free_stats {
u64 packets;
u64 bytes;
+ u64 napi_packets;
+ u64 napi_bytes;
};
struct virtnet_sq_stats {
@@ -93,6 +96,8 @@ struct virtnet_sq_stats {
u64_stats_t xdp_tx_drops;
u64_stats_t kicks;
u64_stats_t tx_timeouts;
+ u64_stats_t stop;
+ u64_stats_t wake;
};
struct virtnet_rq_stats {
@@ -107,31 +112,159 @@ struct virtnet_rq_stats {
u64_stats_t kicks;
};
-#define VIRTNET_SQ_STAT(m) offsetof(struct virtnet_sq_stats, m)
-#define VIRTNET_RQ_STAT(m) offsetof(struct virtnet_rq_stats, m)
+#define VIRTNET_SQ_STAT(name, m) {name, offsetof(struct virtnet_sq_stats, m), -1}
+#define VIRTNET_RQ_STAT(name, m) {name, offsetof(struct virtnet_rq_stats, m), -1}
+
+#define VIRTNET_SQ_STAT_QSTAT(name, m) \
+ { \
+ name, \
+ offsetof(struct virtnet_sq_stats, m), \
+ offsetof(struct netdev_queue_stats_tx, m), \
+ }
+
+#define VIRTNET_RQ_STAT_QSTAT(name, m) \
+ { \
+ name, \
+ offsetof(struct virtnet_rq_stats, m), \
+ offsetof(struct netdev_queue_stats_rx, m), \
+ }
static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
- { "packets", VIRTNET_SQ_STAT(packets) },
- { "bytes", VIRTNET_SQ_STAT(bytes) },
- { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
- { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
- { "kicks", VIRTNET_SQ_STAT(kicks) },
- { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) },
+ VIRTNET_SQ_STAT("xdp_tx", xdp_tx),
+ VIRTNET_SQ_STAT("xdp_tx_drops", xdp_tx_drops),
+ VIRTNET_SQ_STAT("kicks", kicks),
+ VIRTNET_SQ_STAT("tx_timeouts", tx_timeouts),
};
static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
- { "packets", VIRTNET_RQ_STAT(packets) },
- { "bytes", VIRTNET_RQ_STAT(bytes) },
- { "drops", VIRTNET_RQ_STAT(drops) },
- { "xdp_packets", VIRTNET_RQ_STAT(xdp_packets) },
- { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) },
- { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) },
- { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) },
- { "kicks", VIRTNET_RQ_STAT(kicks) },
+ VIRTNET_RQ_STAT("drops", drops),
+ VIRTNET_RQ_STAT("xdp_packets", xdp_packets),
+ VIRTNET_RQ_STAT("xdp_tx", xdp_tx),
+ VIRTNET_RQ_STAT("xdp_redirects", xdp_redirects),
+ VIRTNET_RQ_STAT("xdp_drops", xdp_drops),
+ VIRTNET_RQ_STAT("kicks", kicks),
+};
+
+static const struct virtnet_stat_desc virtnet_sq_stats_desc_qstat[] = {
+ VIRTNET_SQ_STAT_QSTAT("packets", packets),
+ VIRTNET_SQ_STAT_QSTAT("bytes", bytes),
+ VIRTNET_SQ_STAT_QSTAT("stop", stop),
+ VIRTNET_SQ_STAT_QSTAT("wake", wake),
+};
+
+static const struct virtnet_stat_desc virtnet_rq_stats_desc_qstat[] = {
+ VIRTNET_RQ_STAT_QSTAT("packets", packets),
+ VIRTNET_RQ_STAT_QSTAT("bytes", bytes),
+};
+
+#define VIRTNET_STATS_DESC_CQ(name) \
+ {#name, offsetof(struct virtio_net_stats_cvq, name), -1}
+
+#define VIRTNET_STATS_DESC_RX(class, name) \
+ {#name, offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), -1}
+
+#define VIRTNET_STATS_DESC_TX(class, name) \
+ {#name, offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), -1}
+
+
+static const struct virtnet_stat_desc virtnet_stats_cvq_desc[] = {
+ VIRTNET_STATS_DESC_CQ(command_num),
+ VIRTNET_STATS_DESC_CQ(ok_num),
};
-#define VIRTNET_SQ_STATS_LEN ARRAY_SIZE(virtnet_sq_stats_desc)
-#define VIRTNET_RQ_STATS_LEN ARRAY_SIZE(virtnet_rq_stats_desc)
+static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc[] = {
+ VIRTNET_STATS_DESC_RX(basic, packets),
+ VIRTNET_STATS_DESC_RX(basic, bytes),
+
+ VIRTNET_STATS_DESC_RX(basic, notifications),
+ VIRTNET_STATS_DESC_RX(basic, interrupts),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc[] = {
+ VIRTNET_STATS_DESC_TX(basic, packets),
+ VIRTNET_STATS_DESC_TX(basic, bytes),
+
+ VIRTNET_STATS_DESC_TX(basic, notifications),
+ VIRTNET_STATS_DESC_TX(basic, interrupts),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc[] = {
+ VIRTNET_STATS_DESC_RX(csum, needs_csum),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc[] = {
+ VIRTNET_STATS_DESC_TX(gso, gso_packets_noseg),
+ VIRTNET_STATS_DESC_TX(gso, gso_bytes_noseg),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc[] = {
+ VIRTNET_STATS_DESC_RX(speed, ratelimit_bytes),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc[] = {
+ VIRTNET_STATS_DESC_TX(speed, ratelimit_bytes),
+};
+
+#define VIRTNET_STATS_DESC_RX_QSTAT(class, name, qstat_field) \
+ { \
+ #name, \
+ offsetof(struct virtio_net_stats_rx_ ## class, rx_ ## name), \
+ offsetof(struct netdev_queue_stats_rx, qstat_field), \
+ }
+
+#define VIRTNET_STATS_DESC_TX_QSTAT(class, name, qstat_field) \
+ { \
+ #name, \
+ offsetof(struct virtio_net_stats_tx_ ## class, tx_ ## name), \
+ offsetof(struct netdev_queue_stats_tx, qstat_field), \
+ }
+
+static const struct virtnet_stat_desc virtnet_stats_rx_basic_desc_qstat[] = {
+ VIRTNET_STATS_DESC_RX_QSTAT(basic, drops, hw_drops),
+ VIRTNET_STATS_DESC_RX_QSTAT(basic, drop_overruns, hw_drop_overruns),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_basic_desc_qstat[] = {
+ VIRTNET_STATS_DESC_TX_QSTAT(basic, drops, hw_drops),
+ VIRTNET_STATS_DESC_TX_QSTAT(basic, drop_malformed, hw_drop_errors),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_csum_desc_qstat[] = {
+ VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_valid, csum_unnecessary),
+ VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_none, csum_none),
+ VIRTNET_STATS_DESC_RX_QSTAT(csum, csum_bad, csum_bad),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_csum_desc_qstat[] = {
+ VIRTNET_STATS_DESC_TX_QSTAT(csum, csum_none, csum_none),
+ VIRTNET_STATS_DESC_TX_QSTAT(csum, needs_csum, needs_csum),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_gso_desc_qstat[] = {
+ VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets, hw_gro_packets),
+ VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes, hw_gro_bytes),
+ VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_packets_coalesced, hw_gro_wire_packets),
+ VIRTNET_STATS_DESC_RX_QSTAT(gso, gso_bytes_coalesced, hw_gro_wire_bytes),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_gso_desc_qstat[] = {
+ VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_packets, hw_gso_packets),
+ VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_bytes, hw_gso_bytes),
+ VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments, hw_gso_wire_packets),
+ VIRTNET_STATS_DESC_TX_QSTAT(gso, gso_segments_bytes, hw_gso_wire_bytes),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_rx_speed_desc_qstat[] = {
+ VIRTNET_STATS_DESC_RX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
+};
+
+static const struct virtnet_stat_desc virtnet_stats_tx_speed_desc_qstat[] = {
+ VIRTNET_STATS_DESC_TX_QSTAT(speed, ratelimit_packets, hw_drop_ratelimits),
+};
+
+#define VIRTNET_Q_TYPE_RX 0
+#define VIRTNET_Q_TYPE_TX 1
+#define VIRTNET_Q_TYPE_CQ 2
struct virtnet_interrupt_coalesce {
u32 max_packets;
@@ -184,6 +317,9 @@ struct receive_queue {
/* Is dynamic interrupt moderation enabled? */
bool dim_enabled;
+ /* Used to protect dim_enabled and inter_coal */
+ struct mutex dim_lock;
+
/* Dynamic Interrupt Moderation */
struct dim dim;
@@ -214,8 +350,12 @@ struct receive_queue {
/* Record the last dma info to free after new pages is allocated. */
struct virtnet_rq_dma *last_dma;
- /* Do dma by self */
- bool do_dma;
+ struct xsk_buff_pool *xsk_pool;
+
+ /* xdp rxq used by xsk */
+ struct xdp_rxq_info xsk_rxq_info;
+
+ struct xdp_buff **xsk_buffs;
};
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -240,15 +380,6 @@ struct virtio_net_ctrl_rss {
struct control_buf {
struct virtio_net_ctrl_hdr hdr;
virtio_net_ctrl_ack status;
- struct virtio_net_ctrl_mq mq;
- u8 promisc;
- u8 allmulti;
- __virtio16 vid;
- __virtio64 offloads;
- struct virtio_net_ctrl_rss rss;
- struct virtio_net_ctrl_coal_tx coal_tx;
- struct virtio_net_ctrl_coal_rx coal_rx;
- struct virtio_net_ctrl_coal_vq coal_vq;
};
struct virtnet_info {
@@ -287,10 +418,14 @@ struct virtnet_info {
u16 rss_indir_table_size;
u32 rss_hash_types_supported;
u32 rss_hash_types_saved;
+ struct virtio_net_ctrl_rss rss;
/* Has control virtqueue */
bool has_cvq;
+ /* Lock to protect the control VQ */
+ struct mutex cvq_lock;
+
/* Host can handle any s/g split between our header and packet data */
bool any_header_sg;
@@ -340,6 +475,8 @@ struct virtnet_info {
/* failover when STANDBY feature enabled */
struct failover *failover;
+
+ u64 device_stats_cap;
};
struct padded_vnet_hdr {
@@ -361,6 +498,16 @@ struct virtio_net_common_hdr {
};
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
+static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
+ struct net_device *dev,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats);
+static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
+ struct sk_buff *skb, u8 flags);
+static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
+ struct sk_buff *curr_skb,
+ struct page *page, void *buf,
+ int len, int truesize);
static bool is_xdp_frame(void *ptr)
{
@@ -377,29 +524,50 @@ static struct xdp_frame *ptr_to_xdp(void *ptr)
return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG);
}
-static void __free_old_xmit(struct send_queue *sq, bool in_napi,
- struct virtnet_sq_free_stats *stats)
+static bool is_orphan_skb(void *ptr)
+{
+ return (unsigned long)ptr & VIRTIO_ORPHAN_FLAG;
+}
+
+static void *skb_to_ptr(struct sk_buff *skb, bool orphan)
+{
+ return (void *)((unsigned long)skb | (orphan ? VIRTIO_ORPHAN_FLAG : 0));
+}
+
+static struct sk_buff *ptr_to_skb(void *ptr)
+{
+ return (struct sk_buff *)((unsigned long)ptr & ~VIRTIO_ORPHAN_FLAG);
+}
+
+static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
+ bool in_napi, struct virtnet_sq_free_stats *stats)
{
unsigned int len;
void *ptr;
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- ++stats->packets;
-
if (!is_xdp_frame(ptr)) {
- struct sk_buff *skb = ptr;
+ struct sk_buff *skb = ptr_to_skb(ptr);
pr_debug("Sent skb %p\n", skb);
- stats->bytes += skb->len;
+ if (is_orphan_skb(ptr)) {
+ stats->packets++;
+ stats->bytes += skb->len;
+ } else {
+ stats->napi_packets++;
+ stats->napi_bytes += skb->len;
+ }
napi_consume_skb(skb, in_napi);
} else {
struct xdp_frame *frame = ptr_to_xdp(ptr);
+ stats->packets++;
stats->bytes += xdp_get_frame_len(frame);
xdp_return_frame(frame);
}
}
+ netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes);
}
/* Converting between virtqueue no. and kernel tx/rx queue no.
@@ -425,6 +593,17 @@ static int rxq2vq(int rxq)
return rxq * 2;
}
+static int vq_type(struct virtnet_info *vi, int qid)
+{
+ if (qid == vi->max_queue_pairs * 2)
+ return VIRTNET_Q_TYPE_CQ;
+
+ if (qid % 2)
+ return VIRTNET_Q_TYPE_TX;
+
+ return VIRTNET_Q_TYPE_RX;
+}
+
static inline struct virtio_net_common_hdr *
skb_vnet_common_hdr(struct sk_buff *skb)
{
@@ -603,7 +782,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- /* copy small packet so we can reuse these pages */
if (!NET_IP_ALIGN && len > GOOD_COPY_LEN && tailroom >= shinfo_size) {
skb = virtnet_build_skb(buf, truesize, p - buf, len);
if (unlikely(!skb))
@@ -707,7 +885,7 @@ static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
void *buf;
buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
- if (buf && rq->do_dma)
+ if (buf)
virtnet_rq_unmap(rq, buf, *len);
return buf;
@@ -720,11 +898,6 @@ static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
u32 offset;
void *head;
- if (!rq->do_dma) {
- sg_init_one(rq->sg, buf, len);
- return;
- }
-
head = page_address(rq->alloc_frag.page);
offset = buf - head;
@@ -750,44 +923,42 @@ static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
head = page_address(alloc_frag->page);
- if (rq->do_dma) {
- dma = head;
-
- /* new pages */
- if (!alloc_frag->offset) {
- if (rq->last_dma) {
- /* Now, the new page is allocated, the last dma
- * will not be used. So the dma can be unmapped
- * if the ref is 0.
- */
- virtnet_rq_unmap(rq, rq->last_dma, 0);
- rq->last_dma = NULL;
- }
+ dma = head;
- dma->len = alloc_frag->size - sizeof(*dma);
+ /* new pages */
+ if (!alloc_frag->offset) {
+ if (rq->last_dma) {
+ /* Now, the new page is allocated, the last dma
+ * will not be used. So the dma can be unmapped
+ * if the ref is 0.
+ */
+ virtnet_rq_unmap(rq, rq->last_dma, 0);
+ rq->last_dma = NULL;
+ }
- addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
- dma->len, DMA_FROM_DEVICE, 0);
- if (virtqueue_dma_mapping_error(rq->vq, addr))
- return NULL;
+ dma->len = alloc_frag->size - sizeof(*dma);
- dma->addr = addr;
- dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
+ addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
+ dma->len, DMA_FROM_DEVICE, 0);
+ if (virtqueue_dma_mapping_error(rq->vq, addr))
+ return NULL;
- /* Add a reference to dma to prevent the entire dma from
- * being released during error handling. This reference
- * will be freed after the pages are no longer used.
- */
- get_page(alloc_frag->page);
- dma->ref = 1;
- alloc_frag->offset = sizeof(*dma);
+ dma->addr = addr;
+ dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
- rq->last_dma = dma;
- }
+ /* Add a reference to dma to prevent the entire dma from
+ * being released during error handling. This reference
+ * will be freed after the pages are no longer used.
+ */
+ get_page(alloc_frag->page);
+ dma->ref = 1;
+ alloc_frag->offset = sizeof(*dma);
- ++dma->ref;
+ rq->last_dma = dma;
}
+ ++dma->ref;
+
buf = head + alloc_frag->offset;
get_page(alloc_frag->page);
@@ -804,12 +975,9 @@ static void virtnet_rq_set_premapped(struct virtnet_info *vi)
if (!vi->mergeable_rx_bufs && vi->big_packets)
return;
- for (i = 0; i < vi->max_queue_pairs; i++) {
- if (virtqueue_set_dma_premapped(vi->rq[i].vq))
- continue;
-
- vi->rq[i].do_dma = true;
- }
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ /* error should never happen */
+ BUG_ON(virtqueue_set_dma_premapped(vi->rq[i].vq));
}
static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
@@ -820,27 +988,33 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf)
rq = &vi->rq[i];
- if (rq->do_dma)
+ if (rq->xsk_pool) {
+ xsk_buff_free((struct xdp_buff *)buf);
+ return;
+ }
+
+ if (!vi->big_packets || vi->mergeable_rx_bufs)
virtnet_rq_unmap(rq, buf, 0);
virtnet_rq_free_buf(vi, rq, buf);
}
-static void free_old_xmit(struct send_queue *sq, bool in_napi)
+static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq,
+ bool in_napi)
{
struct virtnet_sq_free_stats stats = {0};
- __free_old_xmit(sq, in_napi, &stats);
+ __free_old_xmit(sq, txq, in_napi, &stats);
/* Avoid overhead when no packets have been processed
* happens when called speculatively from start_xmit.
*/
- if (!stats.packets)
+ if (!stats.packets && !stats.napi_packets)
return;
u64_stats_update_begin(&sq->stats.syncp);
- u64_stats_add(&sq->stats.bytes, stats.bytes);
- u64_stats_add(&sq->stats.packets, stats.packets);
+ u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes);
+ u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets);
u64_stats_update_end(&sq->stats.syncp);
}
@@ -874,21 +1048,352 @@ static void check_sq_full_and_disable(struct virtnet_info *vi,
* early means 16 slots are typically wasted.
*/
if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
- netif_stop_subqueue(dev, qnum);
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
+
+ netif_tx_stop_queue(txq);
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_inc(&sq->stats.stop);
+ u64_stats_update_end(&sq->stats.syncp);
if (use_napi) {
if (unlikely(!virtqueue_enable_cb_delayed(sq->vq)))
virtqueue_napi_schedule(&sq->napi, sq->vq);
} else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
/* More just got used, free them then recheck. */
- free_old_xmit(sq, false);
+ free_old_xmit(sq, txq, false);
if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
netif_start_subqueue(dev, qnum);
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_inc(&sq->stats.wake);
+ u64_stats_update_end(&sq->stats.syncp);
virtqueue_disable_cb(sq->vq);
}
}
}
}
+static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len)
+{
+ sg->dma_address = addr;
+ sg->length = len;
+}
+
+static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi,
+ struct receive_queue *rq, void *buf, u32 len)
+{
+ struct xdp_buff *xdp;
+ u32 bufsize;
+
+ xdp = (struct xdp_buff *)buf;
+
+ bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len;
+
+ if (unlikely(len > bufsize)) {
+ pr_debug("%s: rx error: len %u exceeds truesize %u\n",
+ vi->dev->name, len, bufsize);
+ DEV_STATS_INC(vi->dev, rx_length_errors);
+ xsk_buff_free(xdp);
+ return NULL;
+ }
+
+ xsk_buff_set_size(xdp, len);
+ xsk_buff_dma_sync_for_cpu(xdp);
+
+ return xdp;
+}
+
+static struct sk_buff *xsk_construct_skb(struct receive_queue *rq,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ struct sk_buff *skb;
+ unsigned int size;
+
+ size = xdp->data_end - xdp->data_hard_start;
+ skb = napi_alloc_skb(&rq->napi, size);
+ if (unlikely(!skb)) {
+ xsk_buff_free(xdp);
+ return NULL;
+ }
+
+ skb_reserve(skb, xdp->data_meta - xdp->data_hard_start);
+
+ size = xdp->data_end - xdp->data_meta;
+ memcpy(__skb_put(skb, size), xdp->data_meta, size);
+
+ if (metasize) {
+ __skb_pull(skb, metasize);
+ skb_metadata_set(skb, metasize);
+ }
+
+ xsk_buff_free(xdp);
+
+ return skb;
+}
+
+static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi,
+ struct receive_queue *rq, struct xdp_buff *xdp,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
+{
+ struct bpf_prog *prog;
+ u32 ret;
+
+ ret = XDP_PASS;
+ rcu_read_lock();
+ prog = rcu_dereference(rq->xdp_prog);
+ if (prog)
+ ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
+ rcu_read_unlock();
+
+ switch (ret) {
+ case XDP_PASS:
+ return xsk_construct_skb(rq, xdp);
+
+ case XDP_TX:
+ case XDP_REDIRECT:
+ return NULL;
+
+ default:
+ /* drop packet */
+ xsk_buff_free(xdp);
+ u64_stats_inc(&stats->drops);
+ return NULL;
+ }
+}
+
+static void xsk_drop_follow_bufs(struct net_device *dev,
+ struct receive_queue *rq,
+ u32 num_buf,
+ struct virtnet_rq_stats *stats)
+{
+ struct xdp_buff *xdp;
+ u32 len;
+
+ while (num_buf-- > 1) {
+ xdp = virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!xdp)) {
+ pr_debug("%s: rx error: %d buffers missing\n",
+ dev->name, num_buf);
+ DEV_STATS_INC(dev, rx_length_errors);
+ break;
+ }
+ u64_stats_add(&stats->bytes, len);
+ xsk_buff_free(xdp);
+ }
+}
+
+static int xsk_append_merge_buffer(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ struct sk_buff *head_skb,
+ u32 num_buf,
+ struct virtio_net_hdr_mrg_rxbuf *hdr,
+ struct virtnet_rq_stats *stats)
+{
+ struct sk_buff *curr_skb;
+ struct xdp_buff *xdp;
+ u32 len, truesize;
+ struct page *page;
+ void *buf;
+
+ curr_skb = head_skb;
+
+ while (--num_buf) {
+ buf = virtqueue_get_buf(rq->vq, &len);
+ if (unlikely(!buf)) {
+ pr_debug("%s: rx error: %d buffers out of %d missing\n",
+ vi->dev->name, num_buf,
+ virtio16_to_cpu(vi->vdev,
+ hdr->num_buffers));
+ DEV_STATS_INC(vi->dev, rx_length_errors);
+ return -EINVAL;
+ }
+
+ u64_stats_add(&stats->bytes, len);
+
+ xdp = buf_to_xdp(vi, rq, buf, len);
+ if (!xdp)
+ goto err;
+
+ buf = napi_alloc_frag(len);
+ if (!buf) {
+ xsk_buff_free(xdp);
+ goto err;
+ }
+
+ memcpy(buf, xdp->data - vi->hdr_len, len);
+
+ xsk_buff_free(xdp);
+
+ page = virt_to_page(buf);
+
+ truesize = len;
+
+ curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
+ buf, len, truesize);
+ if (!curr_skb) {
+ put_page(page);
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats);
+ return -EINVAL;
+}
+
+static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi,
+ struct receive_queue *rq, struct xdp_buff *xdp,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
+{
+ struct virtio_net_hdr_mrg_rxbuf *hdr;
+ struct bpf_prog *prog;
+ struct sk_buff *skb;
+ u32 ret, num_buf;
+
+ hdr = xdp->data - vi->hdr_len;
+ num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
+
+ ret = XDP_PASS;
+ rcu_read_lock();
+ prog = rcu_dereference(rq->xdp_prog);
+ /* TODO: support multi buffer. */
+ if (prog && num_buf == 1)
+ ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
+ rcu_read_unlock();
+
+ switch (ret) {
+ case XDP_PASS:
+ skb = xsk_construct_skb(rq, xdp);
+ if (!skb)
+ goto drop_bufs;
+
+ if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) {
+ dev_kfree_skb(skb);
+ goto drop;
+ }
+
+ return skb;
+
+ case XDP_TX:
+ case XDP_REDIRECT:
+ return NULL;
+
+ default:
+ /* drop packet */
+ xsk_buff_free(xdp);
+ }
+
+drop_bufs:
+ xsk_drop_follow_bufs(dev, rq, num_buf, stats);
+
+drop:
+ u64_stats_inc(&stats->drops);
+ return NULL;
+}
+
+static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ void *buf, u32 len,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
+{
+ struct net_device *dev = vi->dev;
+ struct sk_buff *skb = NULL;
+ struct xdp_buff *xdp;
+ u8 flags;
+
+ len -= vi->hdr_len;
+
+ u64_stats_add(&stats->bytes, len);
+
+ xdp = buf_to_xdp(vi, rq, buf, len);
+ if (!xdp)
+ return;
+
+ if (unlikely(len < ETH_HLEN)) {
+ pr_debug("%s: short packet %i\n", dev->name, len);
+ DEV_STATS_INC(dev, rx_length_errors);
+ xsk_buff_free(xdp);
+ return;
+ }
+
+ flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags;
+
+ if (!vi->mergeable_rx_bufs)
+ skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats);
+ else
+ skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats);
+
+ if (skb)
+ virtnet_receive_done(vi, rq, skb, flags);
+}
+
+static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq,
+ struct xsk_buff_pool *pool, gfp_t gfp)
+{
+ struct xdp_buff **xsk_buffs;
+ dma_addr_t addr;
+ int err = 0;
+ u32 len, i;
+ int num;
+
+ xsk_buffs = rq->xsk_buffs;
+
+ num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free);
+ if (!num)
+ return -ENOMEM;
+
+ len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len;
+
+ for (i = 0; i < num; ++i) {
+ /* Use the part of XDP_PACKET_HEADROOM as the virtnet hdr space.
+ * We assume XDP_PACKET_HEADROOM is larger than hdr->len.
+ * (see function virtnet_xsk_pool_enable)
+ */
+ addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len;
+
+ sg_init_table(rq->sg, 1);
+ sg_fill_dma(rq->sg, addr, len);
+
+ err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp);
+ if (err)
+ goto err;
+ }
+
+ return num;
+
+err:
+ for (; i < num; ++i)
+ xsk_buff_free(xsk_buffs[i]);
+
+ return err;
+}
+
+static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct send_queue *sq;
+
+ if (!netif_running(dev))
+ return -ENETDOWN;
+
+ if (qid >= vi->curr_queue_pairs)
+ return -EINVAL;
+
+ sq = &vi->sq[qid];
+
+ if (napi_if_scheduled_mark_missed(&sq->napi))
+ return 0;
+
+ local_bh_disable();
+ virtqueue_napi_schedule(&sq->napi, sq->vq);
+ local_bh_enable();
+
+ return 0;
+}
+
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
struct send_queue *sq,
struct xdp_frame *xdpf)
@@ -1003,7 +1508,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
/* Free up any pending old buffers before queueing new ones. */
- __free_old_xmit(sq, false, &stats);
+ __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq),
+ false, &stats);
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
@@ -1105,7 +1611,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp,
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
{
- return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
+ return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0;
}
/* We copy the packet for XDP in the following cases:
@@ -1169,7 +1675,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
}
/* Headroom does not contribute to packet length */
- *len = page_off - VIRTIO_XDP_HEADROOM;
+ *len = page_off - XDP_PACKET_HEADROOM;
return page;
err_buf:
__free_pages(page, 0);
@@ -1225,6 +1731,10 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev,
if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
+ /* Partially checksummed packets must be dropped. */
+ if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ goto err_xdp;
+
buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1452,8 +1962,8 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
void *ctx;
xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq);
- xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM,
- VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
+ xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM,
+ XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
if (!*num_buf)
return 0;
@@ -1542,6 +2052,10 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
if (unlikely(hdr->hdr.gso_type))
return NULL;
+ /* Partially checksummed packets must be dropped. */
+ if (unlikely(hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return NULL;
+
/* Now XDP core assumes frag size is PAGE_SIZE, but buffers
* with headroom may add hole in truesize, which
* make their length exceed PAGE_SIZE. So we disabled the
@@ -1566,12 +2080,12 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
/* linearize data for XDP */
xdp_page = xdp_linearize_page(rq, num_buf,
*page, offset,
- VIRTIO_XDP_HEADROOM,
+ XDP_PACKET_HEADROOM,
len);
if (!xdp_page)
return NULL;
} else {
- xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
+ xdp_room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
sizeof(struct skb_shared_info));
if (*len + xdp_room > PAGE_SIZE)
return NULL;
@@ -1580,7 +2094,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
if (!xdp_page)
return NULL;
- memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM,
+ memcpy(page_address(xdp_page) + XDP_PACKET_HEADROOM,
page_address(*page) + offset, *len);
}
@@ -1590,7 +2104,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
*page = xdp_page;
- return page_address(*page) + VIRTIO_XDP_HEADROOM;
+ return page_address(*page) + XDP_PACKET_HEADROOM;
}
static struct sk_buff *receive_mergeable_xdp(struct net_device *dev,
@@ -1653,6 +2167,49 @@ err_xdp:
return NULL;
}
+static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb,
+ struct sk_buff *curr_skb,
+ struct page *page, void *buf,
+ int len, int truesize)
+{
+ int num_skb_frags;
+ int offset;
+
+ num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
+ if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
+ struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
+
+ if (unlikely(!nskb))
+ return NULL;
+
+ if (curr_skb == head_skb)
+ skb_shinfo(curr_skb)->frag_list = nskb;
+ else
+ curr_skb->next = nskb;
+ curr_skb = nskb;
+ head_skb->truesize += nskb->truesize;
+ num_skb_frags = 0;
+ }
+
+ if (curr_skb != head_skb) {
+ head_skb->data_len += len;
+ head_skb->len += len;
+ head_skb->truesize += truesize;
+ }
+
+ offset = buf - page_address(page);
+ if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
+ put_page(page);
+ skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
+ len, truesize);
+ } else {
+ skb_add_rx_frag(curr_skb, num_skb_frags, page,
+ offset, len, truesize);
+ }
+
+ return curr_skb;
+}
+
static struct sk_buff *receive_mergeable(struct net_device *dev,
struct virtnet_info *vi,
struct receive_queue *rq,
@@ -1702,8 +2259,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
if (unlikely(!curr_skb))
goto err_skb;
while (--num_buf) {
- int num_skb_frags;
-
buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
@@ -1728,34 +2283,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
goto err_skb;
}
- num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
- if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
- struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
-
- if (unlikely(!nskb))
- goto err_skb;
- if (curr_skb == head_skb)
- skb_shinfo(curr_skb)->frag_list = nskb;
- else
- curr_skb->next = nskb;
- curr_skb = nskb;
- head_skb->truesize += nskb->truesize;
- num_skb_frags = 0;
- }
- if (curr_skb != head_skb) {
- head_skb->data_len += len;
- head_skb->len += len;
- head_skb->truesize += truesize;
- }
- offset = buf - page_address(page);
- if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
- put_page(page);
- skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
- len, truesize);
- } else {
- skb_add_rx_frag(curr_skb, num_skb_frags, page,
- offset, len, truesize);
- }
+ curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page,
+ buf, len, truesize);
+ if (!curr_skb)
+ goto err_skb;
}
ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
@@ -1800,38 +2331,17 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash,
skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type);
}
-static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
- void *buf, unsigned int len, void **ctx,
- unsigned int *xdp_xmit,
- struct virtnet_rq_stats *stats)
+static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq,
+ struct sk_buff *skb, u8 flags)
{
- struct net_device *dev = vi->dev;
- struct sk_buff *skb;
struct virtio_net_common_hdr *hdr;
-
- if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
- pr_debug("%s: short packet %i\n", dev->name, len);
- DEV_STATS_INC(dev, rx_length_errors);
- virtnet_rq_free_buf(vi, rq, buf);
- return;
- }
-
- if (vi->mergeable_rx_bufs)
- skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
- stats);
- else if (vi->big_packets)
- skb = receive_big(dev, vi, rq, buf, len, stats);
- else
- skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
-
- if (unlikely(!skb))
- return;
+ struct net_device *dev = vi->dev;
hdr = skb_vnet_common_hdr(skb);
if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
virtio_skb_set_hash(&hdr->hash_v1_hdr, skb);
- if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
+ if (flags & VIRTIO_NET_HDR_F_DATA_VALID)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
@@ -1855,6 +2365,45 @@ frame_err:
dev_kfree_skb(skb);
}
+static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
+ void *buf, unsigned int len, void **ctx,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
+{
+ struct net_device *dev = vi->dev;
+ struct sk_buff *skb;
+ u8 flags;
+
+ if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
+ pr_debug("%s: short packet %i\n", dev->name, len);
+ DEV_STATS_INC(dev, rx_length_errors);
+ virtnet_rq_free_buf(vi, rq, buf);
+ return;
+ }
+
+ /* 1. Save the flags early, as the XDP program might overwrite them.
+ * These flags ensure packets marked as VIRTIO_NET_HDR_F_DATA_VALID
+ * stay valid after XDP processing.
+ * 2. XDP doesn't work with partially checksummed packets (refer to
+ * virtnet_xdp_set()), so packets marked as
+ * VIRTIO_NET_HDR_F_NEEDS_CSUM get dropped during XDP processing.
+ */
+ flags = ((struct virtio_net_common_hdr *)buf)->hdr.flags;
+
+ if (vi->mergeable_rx_bufs)
+ skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
+ stats);
+ else if (vi->big_packets)
+ skb = receive_big(dev, vi, rq, buf, len, stats);
+ else
+ skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
+
+ if (unlikely(!skb))
+ return;
+
+ virtnet_receive_done(vi, rq, skb, flags);
+}
+
/* Unlike mergeable buffers, all buffers are allocated to the
* same size, except for the headroom. For this reason we do
* not need to use mergeable_len_to_ctx here - it is enough
@@ -1881,8 +2430,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- if (rq->do_dma)
- virtnet_rq_unmap(rq, buf, 0);
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -1996,8 +2544,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
if (err < 0) {
- if (rq->do_dma)
- virtnet_rq_unmap(rq, buf, 0);
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
}
@@ -2015,7 +2562,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
gfp_t gfp)
{
int err;
- bool oom;
+
+ if (rq->xsk_pool) {
+ err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp);
+ goto kick;
+ }
do {
if (vi->mergeable_rx_bufs)
@@ -2025,10 +2576,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
else
err = add_recvbuf_small(vi, rq, gfp);
- oom = err == -ENOMEM;
if (err)
break;
} while (rq->vq->num_free);
+
+kick:
if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) {
unsigned long flags;
@@ -2037,7 +2589,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
u64_stats_update_end_irqrestore(&rq->stats.syncp, flags);
}
- return !oom;
+ return err != -ENOMEM;
}
static void skb_recv_done(struct virtqueue *rvq)
@@ -2108,32 +2660,68 @@ static void refill_work(struct work_struct *work)
}
}
-static int virtnet_receive(struct receive_queue *rq, int budget,
- unsigned int *xdp_xmit)
+static int virtnet_receive_xsk_bufs(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ int budget,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
+{
+ unsigned int len;
+ int packets = 0;
+ void *buf;
+
+ while (packets < budget) {
+ buf = virtqueue_get_buf(rq->vq, &len);
+ if (!buf)
+ break;
+
+ virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, stats);
+ packets++;
+ }
+
+ return packets;
+}
+
+static int virtnet_receive_packets(struct virtnet_info *vi,
+ struct receive_queue *rq,
+ int budget,
+ unsigned int *xdp_xmit,
+ struct virtnet_rq_stats *stats)
{
- struct virtnet_info *vi = rq->vq->vdev->priv;
- struct virtnet_rq_stats stats = {};
unsigned int len;
int packets = 0;
void *buf;
- int i;
if (!vi->big_packets || vi->mergeable_rx_bufs) {
void *ctx;
-
while (packets < budget &&
(buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
- receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
+ receive_buf(vi, rq, buf, len, ctx, xdp_xmit, stats);
packets++;
}
} else {
while (packets < budget &&
- (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
- receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
+ (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
+ receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats);
packets++;
}
}
+ return packets;
+}
+
+static int virtnet_receive(struct receive_queue *rq, int budget,
+ unsigned int *xdp_xmit)
+{
+ struct virtnet_info *vi = rq->vq->vdev->priv;
+ struct virtnet_rq_stats stats = {};
+ int i, packets;
+
+ if (rq->xsk_pool)
+ packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats);
+ else
+ packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats);
+
if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
spin_lock(&vi->refill_lock);
@@ -2145,7 +2733,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
u64_stats_set(&stats.packets, packets);
u64_stats_update_begin(&rq->stats.syncp);
- for (i = 0; i < VIRTNET_RQ_STATS_LEN; i++) {
+ for (i = 0; i < ARRAY_SIZE(virtnet_rq_stats_desc); i++) {
size_t offset = virtnet_rq_stats_desc[i].offset;
u64_stats_t *item, *src;
@@ -2153,12 +2741,16 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
src = (u64_stats_t *)((u8 *)&stats + offset);
u64_stats_add(item, u64_stats_read(src));
}
+
+ u64_stats_add(&rq->stats.packets, u64_stats_read(&stats.packets));
+ u64_stats_add(&rq->stats.bytes, u64_stats_read(&stats.bytes));
+
u64_stats_update_end(&rq->stats.syncp);
return packets;
}
-static void virtnet_poll_cleantx(struct receive_queue *rq)
+static void virtnet_poll_cleantx(struct receive_queue *rq, int budget)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
unsigned int index = vq2rxq(rq->vq);
@@ -2176,11 +2768,17 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
do {
virtqueue_disable_cb(sq->vq);
- free_old_xmit(sq, true);
+ free_old_xmit(sq, txq, !!budget);
} while (unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
+ if (netif_tx_queue_stopped(txq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_inc(&sq->stats.wake);
+ u64_stats_update_end(&sq->stats.syncp);
+ }
netif_tx_wake_queue(txq);
+ }
__netif_tx_unlock(txq);
}
@@ -2193,12 +2791,13 @@ static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue
if (!rq->packets_in_napi)
return;
- u64_stats_update_begin(&rq->stats.syncp);
+ /* Don't need protection when fetching stats, since fetcher and
+ * updater of the stats are in same context
+ */
dim_update_sample(rq->calls,
u64_stats_read(&rq->stats.packets),
u64_stats_read(&rq->stats.bytes),
&cur_sample);
- u64_stats_update_end(&rq->stats.syncp);
net_dim(&rq->dim, cur_sample);
rq->packets_in_napi = 0;
@@ -2214,7 +2813,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
unsigned int xdp_xmit = 0;
bool napi_complete;
- virtnet_poll_cleantx(rq);
+ virtnet_poll_cleantx(rq, budget);
received = virtnet_receive(rq, budget, &xdp_xmit);
rq->packets_in_napi += received;
@@ -2225,6 +2824,10 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
/* Out of packets? */
if (received < budget) {
napi_complete = virtqueue_napi_complete(napi, rq->vq, received);
+ /* Intentionally not taking dim_lock here. This may result in a
+ * spurious net_dim call. But if that happens virtnet_rx_dim_work
+ * will not act on the scheduled work.
+ */
if (napi_complete && rq->dim_enabled)
virtnet_rx_dim_update(vi, rq);
}
@@ -2264,6 +2867,7 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
if (err < 0)
goto err_xdp_reg_mem_model;
+ netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qp_index));
virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
@@ -2274,6 +2878,13 @@ err_xdp_reg_mem_model:
return err;
}
+static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim)
+{
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
+ return;
+ net_dim_work_cancel(dim);
+}
+
static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2300,7 +2911,7 @@ err_enable_qp:
for (i--; i >= 0; i--) {
virtnet_disable_queue_pair(vi, i);
- cancel_work_sync(&vi->rq[i].dim.work);
+ virtnet_cancel_dim(vi, &vi->rq[i].dim);
}
return err;
@@ -2324,10 +2935,16 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
txq = netdev_get_tx_queue(vi->dev, index);
__netif_tx_lock(txq, raw_smp_processor_id());
virtqueue_disable_cb(sq->vq);
- free_old_xmit(sq, true);
+ free_old_xmit(sq, txq, !!budget);
- if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS)
+ if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) {
+ if (netif_tx_queue_stopped(txq)) {
+ u64_stats_update_begin(&sq->stats.syncp);
+ u64_stats_inc(&sq->stats.wake);
+ u64_stats_update_end(&sq->stats.syncp);
+ }
netif_tx_wake_queue(txq);
+ }
opaque = virtqueue_enable_cb_prepare(sq->vq);
@@ -2352,7 +2969,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
-static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
+static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
@@ -2396,7 +3013,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
return num_sg;
num_sg++;
}
- return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
+ return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
+ skb_to_ptr(skb, orphan), GFP_ATOMIC);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -2406,24 +3024,25 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
struct send_queue *sq = &vi->sq[qnum];
int err;
struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
- bool kick = !netdev_xmit_more();
+ bool xmit_more = netdev_xmit_more();
bool use_napi = sq->napi.weight;
+ bool kick;
/* Free up any pending old buffers before queueing new ones. */
do {
if (use_napi)
virtqueue_disable_cb(sq->vq);
- free_old_xmit(sq, false);
+ free_old_xmit(sq, txq, false);
- } while (use_napi && kick &&
+ } while (use_napi && !xmit_more &&
unlikely(!virtqueue_enable_cb_delayed(sq->vq)));
/* timestamp packet in software */
skb_tx_timestamp(skb);
/* Try to transmit */
- err = xmit_skb(sq, skb);
+ err = xmit_skb(sq, skb, !use_napi);
/* This should not happen! */
if (unlikely(err)) {
@@ -2445,7 +3064,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
check_sq_full_and_disable(vi, dev, sq);
- if (kick || netif_xmit_stopped(txq)) {
+ kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) :
+ !xmit_more || netif_xmit_stopped(txq);
+ if (kick) {
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
u64_stats_update_begin(&sq->stats.syncp);
u64_stats_inc(&sq->stats.kicks);
@@ -2456,37 +3077,49 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static int virtnet_rx_resize(struct virtnet_info *vi,
- struct receive_queue *rq, u32 ring_num)
+static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq)
{
bool running = netif_running(vi->dev);
- int err, qindex;
-
- qindex = rq - vi->rq;
if (running) {
napi_disable(&rq->napi);
- cancel_work_sync(&rq->dim.work);
+ virtnet_cancel_dim(vi, &rq->dim);
}
+}
- err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
- if (err)
- netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq)
+{
+ bool running = netif_running(vi->dev);
if (!try_fill_recv(vi, rq, GFP_KERNEL))
schedule_delayed_work(&vi->refill, 0);
if (running)
virtnet_napi_enable(rq->vq, &rq->napi);
+}
+
+static int virtnet_rx_resize(struct virtnet_info *vi,
+ struct receive_queue *rq, u32 ring_num)
+{
+ int err, qindex;
+
+ qindex = rq - vi->rq;
+
+ virtnet_rx_pause(vi, rq);
+
+ err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf);
+ if (err)
+ netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
+
+ virtnet_rx_resume(vi, rq);
return err;
}
-static int virtnet_tx_resize(struct virtnet_info *vi,
- struct send_queue *sq, u32 ring_num)
+static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq)
{
bool running = netif_running(vi->dev);
struct netdev_queue *txq;
- int err, qindex;
+ int qindex;
qindex = sq - vi->sq;
@@ -2507,10 +3140,17 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
netif_stop_subqueue(vi->dev, qindex);
__netif_tx_unlock_bh(txq);
+}
- err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
- if (err)
- netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq)
+{
+ bool running = netif_running(vi->dev);
+ struct netdev_queue *txq;
+ int qindex;
+
+ qindex = sq - vi->sq;
+
+ txq = netdev_get_tx_queue(vi->dev, qindex);
__netif_tx_lock_bh(txq);
sq->reset = false;
@@ -2519,6 +3159,23 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
if (running)
virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+}
+
+static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq,
+ u32 ring_num)
+{
+ int qindex, err;
+
+ qindex = sq - vi->sq;
+
+ virtnet_tx_pause(vi, sq);
+
+ err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+ if (err)
+ netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+
+ virtnet_tx_resume(vi, sq);
+
return err;
}
@@ -2527,16 +3184,19 @@ static int virtnet_tx_resize(struct virtnet_info *vi,
* supported by the hypervisor, as indicated by feature bits, should
* never fail unless improperly formatted.
*/
-static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
- struct scatterlist *out)
+static bool virtnet_send_command_reply(struct virtnet_info *vi, u8 class, u8 cmd,
+ struct scatterlist *out,
+ struct scatterlist *in)
{
- struct scatterlist *sgs[4], hdr, stat;
- unsigned out_num = 0, tmp;
+ struct scatterlist *sgs[5], hdr, stat;
+ u32 out_num = 0, tmp, in_num = 0;
+ bool ok;
int ret;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
+ mutex_lock(&vi->cvq_lock);
vi->ctrl->status = ~0;
vi->ctrl->hdr.class = class;
vi->ctrl->hdr.cmd = cmd;
@@ -2549,18 +3209,22 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
/* Add return status. */
sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
- sgs[out_num] = &stat;
+ sgs[out_num + in_num++] = &stat;
- BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
- ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+ if (in)
+ sgs[out_num + in_num++] = in;
+
+ BUG_ON(out_num + in_num > ARRAY_SIZE(sgs));
+ ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC);
if (ret < 0) {
dev_warn(&vi->vdev->dev,
"Failed to add sgs for command vq: %d\n.", ret);
+ mutex_unlock(&vi->cvq_lock);
return false;
}
if (unlikely(!virtqueue_kick(vi->cvq)))
- return vi->ctrl->status == VIRTIO_NET_OK;
+ goto unlock;
/* Spin for a response, the kick causes an ioport write, trapping
* into the hypervisor, so the request should be handled immediately.
@@ -2571,7 +3235,16 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
cpu_relax();
}
- return vi->ctrl->status == VIRTIO_NET_OK;
+unlock:
+ ok = vi->ctrl->status == VIRTIO_NET_OK;
+ mutex_unlock(&vi->cvq_lock);
+ return ok;
+}
+
+static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
+ struct scatterlist *out)
+{
+ return virtnet_send_command_reply(vi, class, cmd, out, NULL);
}
static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -2663,23 +3336,26 @@ static void virtnet_stats(struct net_device *dev,
static void virtnet_ack_link_announce(struct virtnet_info *vi)
{
- rtnl_lock();
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
- rtnl_unlock();
}
-static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
+static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
{
+ struct virtio_net_ctrl_mq *mq __free(kfree) = NULL;
struct scatterlist sg;
struct net_device *dev = vi->dev;
if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
return 0;
- vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
- sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
+ mq = kzalloc(sizeof(*mq), GFP_KERNEL);
+ if (!mq)
+ return -ENOMEM;
+
+ mq->virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
+ sg_init_one(&sg, mq, sizeof(*mq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
@@ -2696,16 +3372,6 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
return 0;
}
-static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
-{
- int err;
-
- rtnl_lock();
- err = _virtnet_set_queues(vi, queue_pairs);
- rtnl_unlock();
- return err;
-}
-
static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
@@ -2718,7 +3384,7 @@ static int virtnet_close(struct net_device *dev)
for (i = 0; i < vi->max_queue_pairs; i++) {
virtnet_disable_queue_pair(vi, i);
- cancel_work_sync(&vi->rq[i].dim.work);
+ virtnet_cancel_dim(vi, &vi->rq[i].dim);
}
return 0;
@@ -2728,6 +3394,7 @@ static void virtnet_rx_mode_work(struct work_struct *work)
{
struct virtnet_info *vi =
container_of(work, struct virtnet_info, rx_mode_work);
+ u8 *promisc_allmulti __free(kfree) = NULL;
struct net_device *dev = vi->dev;
struct scatterlist sg[2];
struct virtio_net_ctrl_mac *mac_data;
@@ -2741,24 +3408,29 @@ static void virtnet_rx_mode_work(struct work_struct *work)
if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
return;
- rtnl_lock();
+ promisc_allmulti = kzalloc(sizeof(*promisc_allmulti), GFP_KERNEL);
+ if (!promisc_allmulti) {
+ dev_warn(&dev->dev, "Failed to set RX mode, no memory.\n");
+ return;
+ }
- vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
- vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
+ rtnl_lock();
- sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
+ *promisc_allmulti = !!(dev->flags & IFF_PROMISC);
+ sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_PROMISC, sg))
dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
- vi->ctrl->promisc ? "en" : "dis");
+ *promisc_allmulti ? "en" : "dis");
- sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
+ *promisc_allmulti = !!(dev->flags & IFF_ALLMULTI);
+ sg_init_one(sg, promisc_allmulti, sizeof(*promisc_allmulti));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
- vi->ctrl->allmulti ? "en" : "dis");
+ *promisc_allmulti ? "en" : "dis");
netif_addr_lock_bh(dev);
@@ -2819,10 +3491,15 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
+ __virtio16 *_vid __free(kfree) = NULL;
struct scatterlist sg;
- vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
- sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+ _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
+ if (!_vid)
+ return -ENOMEM;
+
+ *_vid = cpu_to_virtio16(vi->vdev, vid);
+ sg_init_one(&sg, _vid, sizeof(*_vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg))
@@ -2834,10 +3511,15 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
__be16 proto, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
+ __virtio16 *_vid __free(kfree) = NULL;
struct scatterlist sg;
- vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
- sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
+ _vid = kzalloc(sizeof(*_vid), GFP_KERNEL);
+ if (!_vid)
+ return -ENOMEM;
+
+ *_vid = cpu_to_virtio16(vi->vdev, vid);
+ sg_init_one(&sg, _vid, sizeof(*_vid));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg))
@@ -2950,12 +3632,17 @@ static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
u16 vqn, u32 max_usecs, u32 max_packets)
{
+ struct virtio_net_ctrl_coal_vq *coal_vq __free(kfree) = NULL;
struct scatterlist sgs;
- vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
- vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
- vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
- sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
+ coal_vq = kzalloc(sizeof(*coal_vq), GFP_KERNEL);
+ if (!coal_vq)
+ return -ENOMEM;
+
+ coal_vq->vqn = cpu_to_le16(vqn);
+ coal_vq->coal.max_usecs = cpu_to_le32(max_usecs);
+ coal_vq->coal.max_packets = cpu_to_le32(max_packets);
+ sg_init_one(&sgs, coal_vq, sizeof(*coal_vq));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET,
@@ -2971,6 +3658,9 @@ static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
{
int err;
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
+ return -EOPNOTSUPP;
+
err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
max_usecs, max_packets);
if (err)
@@ -2988,6 +3678,9 @@ static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi,
{
int err;
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
+ return -EOPNOTSUPP;
+
err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
max_usecs, max_packets);
if (err)
@@ -3056,7 +3749,11 @@ static int virtnet_set_ringparam(struct net_device *dev,
err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i,
vi->intr_coal_tx.max_usecs,
vi->intr_coal_tx.max_packets);
- if (err)
+
+ /* Don't break the tx resize action if the vq coalescing is not
+ * supported. The same is true for rx resize below.
+ */
+ if (err && err != -EOPNOTSUPP)
return err;
}
@@ -3066,10 +3763,12 @@ static int virtnet_set_ringparam(struct net_device *dev,
return err;
/* The reason is same as the transmit virtqueue reset */
+ mutex_lock(&vi->rq[i].dim_lock);
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i,
vi->intr_coal_rx.max_usecs,
vi->intr_coal_rx.max_packets);
- if (err)
+ mutex_unlock(&vi->rq[i].dim_lock);
+ if (err && err != -EOPNOTSUPP)
return err;
}
}
@@ -3087,25 +3786,29 @@ static bool virtnet_commit_rss_command(struct virtnet_info *vi)
sg_init_table(sgs, 4);
sg_buf_size = offsetof(struct virtio_net_ctrl_rss, indirection_table);
- sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
+ sg_set_buf(&sgs[0], &vi->rss, sg_buf_size);
- sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
- sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
+ sg_buf_size = sizeof(uint16_t) * (vi->rss.indirection_table_mask + 1);
+ sg_set_buf(&sgs[1], vi->rss.indirection_table, sg_buf_size);
sg_buf_size = offsetof(struct virtio_net_ctrl_rss, key)
- offsetof(struct virtio_net_ctrl_rss, max_tx_vq);
- sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
+ sg_set_buf(&sgs[2], &vi->rss.max_tx_vq, sg_buf_size);
sg_buf_size = vi->rss_key_size;
- sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
+ sg_set_buf(&sgs[3], vi->rss.key, sg_buf_size);
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
- : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs)) {
- dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
- return false;
- }
+ : VIRTIO_NET_CTRL_MQ_HASH_CONFIG, sgs))
+ goto err;
+
return true;
+
+err:
+ dev_warn(&dev->dev, "VIRTIONET issue with committing RSS sgs\n");
+ return false;
+
}
static void virtnet_init_default_rss(struct virtnet_info *vi)
@@ -3113,21 +3816,21 @@ static void virtnet_init_default_rss(struct virtnet_info *vi)
u32 indir_val = 0;
int i = 0;
- vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
+ vi->rss.hash_types = vi->rss_hash_types_supported;
vi->rss_hash_types_saved = vi->rss_hash_types_supported;
- vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
+ vi->rss.indirection_table_mask = vi->rss_indir_table_size
? vi->rss_indir_table_size - 1 : 0;
- vi->ctrl->rss.unclassified_queue = 0;
+ vi->rss.unclassified_queue = 0;
for (; i < vi->rss_indir_table_size; ++i) {
indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
- vi->ctrl->rss.indirection_table[i] = indir_val;
+ vi->rss.indirection_table[i] = indir_val;
}
- vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
- vi->ctrl->rss.hash_key_length = vi->rss_key_size;
+ vi->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
+ vi->rss.hash_key_length = vi->rss_key_size;
- netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
+ netdev_rss_key_fill(vi->rss.key, vi->rss_key_size);
}
static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
@@ -3238,7 +3941,7 @@ static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *
if (new_hashtypes != vi->rss_hash_types_saved) {
vi->rss_hash_types_saved = new_hashtypes;
- vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss.hash_types = vi->rss_hash_types_saved;
if (vi->dev->features & NETIF_F_RXHASH)
return virtnet_commit_rss_command(vi);
}
@@ -3283,7 +3986,7 @@ static int virtnet_set_channels(struct net_device *dev,
return -EINVAL;
cpus_read_lock();
- err = _virtnet_set_queues(vi, queue_pairs);
+ err = virtnet_set_queues(vi, queue_pairs);
if (err) {
cpus_read_unlock();
goto err;
@@ -3297,25 +4000,654 @@ static int virtnet_set_channels(struct net_device *dev,
return err;
}
+static void virtnet_stats_sprintf(u8 **p, const char *fmt, const char *noq_fmt,
+ int num, int qid, const struct virtnet_stat_desc *desc)
+{
+ int i;
+
+ if (qid < 0) {
+ for (i = 0; i < num; ++i)
+ ethtool_sprintf(p, noq_fmt, desc[i].desc);
+ } else {
+ for (i = 0; i < num; ++i)
+ ethtool_sprintf(p, fmt, qid, desc[i].desc);
+ }
+}
+
+/* qid == -1: for rx/tx queue total field */
+static void virtnet_get_stats_string(struct virtnet_info *vi, int type, int qid, u8 **data)
+{
+ const struct virtnet_stat_desc *desc;
+ const char *fmt, *noq_fmt;
+ u8 *p = *data;
+ u32 num;
+
+ if (type == VIRTNET_Q_TYPE_CQ && qid >= 0) {
+ noq_fmt = "cq_hw_%s";
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
+ desc = &virtnet_stats_cvq_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_cvq_desc);
+
+ virtnet_stats_sprintf(&p, NULL, noq_fmt, num, -1, desc);
+ }
+ }
+
+ if (type == VIRTNET_Q_TYPE_RX) {
+ fmt = "rx%u_%s";
+ noq_fmt = "rx_%s";
+
+ desc = &virtnet_rq_stats_desc[0];
+ num = ARRAY_SIZE(virtnet_rq_stats_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+
+ fmt = "rx%u_hw_%s";
+ noq_fmt = "rx_hw_%s";
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
+ desc = &virtnet_stats_rx_basic_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
+ desc = &virtnet_stats_rx_csum_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
+ desc = &virtnet_stats_rx_speed_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+ }
+ }
+
+ if (type == VIRTNET_Q_TYPE_TX) {
+ fmt = "tx%u_%s";
+ noq_fmt = "tx_%s";
+
+ desc = &virtnet_sq_stats_desc[0];
+ num = ARRAY_SIZE(virtnet_sq_stats_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+
+ fmt = "tx%u_hw_%s";
+ noq_fmt = "tx_hw_%s";
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
+ desc = &virtnet_stats_tx_basic_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
+ desc = &virtnet_stats_tx_gso_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
+ desc = &virtnet_stats_tx_speed_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
+
+ virtnet_stats_sprintf(&p, fmt, noq_fmt, num, qid, desc);
+ }
+ }
+
+ *data = p;
+}
+
+struct virtnet_stats_ctx {
+ /* The stats are write to qstats or ethtool -S */
+ bool to_qstat;
+
+ /* Used to calculate the offset inside the output buffer. */
+ u32 desc_num[3];
+
+ /* The actual supported stat types. */
+ u32 bitmap[3];
+
+ /* Used to calculate the reply buffer size. */
+ u32 size[3];
+
+ /* Record the output buffer. */
+ u64 *data;
+};
+
+static void virtnet_stats_ctx_init(struct virtnet_info *vi,
+ struct virtnet_stats_ctx *ctx,
+ u64 *data, bool to_qstat)
+{
+ u32 queue_type;
+
+ ctx->data = data;
+ ctx->to_qstat = to_qstat;
+
+ if (to_qstat) {
+ ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
+ ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
+
+ queue_type = VIRTNET_Q_TYPE_RX;
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_GSO;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_gso);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed);
+ }
+
+ queue_type = VIRTNET_Q_TYPE_TX;
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_CSUM;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_csum);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed);
+ }
+
+ return;
+ }
+
+ ctx->desc_num[VIRTNET_Q_TYPE_RX] = ARRAY_SIZE(virtnet_rq_stats_desc);
+ ctx->desc_num[VIRTNET_Q_TYPE_TX] = ARRAY_SIZE(virtnet_sq_stats_desc);
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_CVQ) {
+ queue_type = VIRTNET_Q_TYPE_CQ;
+
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_CVQ;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_cvq_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_cvq);
+ }
+
+ queue_type = VIRTNET_Q_TYPE_RX;
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_BASIC;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_basic_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_basic);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_CSUM;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_csum_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_csum);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_RX_SPEED;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_rx_speed_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_rx_speed);
+ }
+
+ queue_type = VIRTNET_Q_TYPE_TX;
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_BASIC;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_basic_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_basic);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_GSO;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_gso_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_gso);
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
+ ctx->bitmap[queue_type] |= VIRTIO_NET_STATS_TYPE_TX_SPEED;
+ ctx->desc_num[queue_type] += ARRAY_SIZE(virtnet_stats_tx_speed_desc);
+ ctx->size[queue_type] += sizeof(struct virtio_net_stats_tx_speed);
+ }
+}
+
+/* stats_sum_queue - Calculate the sum of the same fields in sq or rq.
+ * @sum: the position to store the sum values
+ * @num: field num
+ * @q_value: the first queue fields
+ * @q_num: number of the queues
+ */
+static void stats_sum_queue(u64 *sum, u32 num, u64 *q_value, u32 q_num)
+{
+ u32 step = num;
+ int i, j;
+ u64 *p;
+
+ for (i = 0; i < num; ++i) {
+ p = sum + i;
+ *p = 0;
+
+ for (j = 0; j < q_num; ++j)
+ *p += *(q_value + i + j * step);
+ }
+}
+
+static void virtnet_fill_total_fields(struct virtnet_info *vi,
+ struct virtnet_stats_ctx *ctx)
+{
+ u64 *data, *first_rx_q, *first_tx_q;
+ u32 num_cq, num_rx, num_tx;
+
+ num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
+ num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
+ num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
+
+ first_rx_q = ctx->data + num_rx + num_tx + num_cq;
+ first_tx_q = first_rx_q + vi->curr_queue_pairs * num_rx;
+
+ data = ctx->data;
+
+ stats_sum_queue(data, num_rx, first_rx_q, vi->curr_queue_pairs);
+
+ data = ctx->data + num_rx;
+
+ stats_sum_queue(data, num_tx, first_tx_q, vi->curr_queue_pairs);
+}
+
+static void virtnet_fill_stats_qstat(struct virtnet_info *vi, u32 qid,
+ struct virtnet_stats_ctx *ctx,
+ const u8 *base, bool drv_stats, u8 reply_type)
+{
+ const struct virtnet_stat_desc *desc;
+ const u64_stats_t *v_stat;
+ u64 offset, bitmap;
+ const __le64 *v;
+ u32 queue_type;
+ int i, num;
+
+ queue_type = vq_type(vi, qid);
+ bitmap = ctx->bitmap[queue_type];
+
+ if (drv_stats) {
+ if (queue_type == VIRTNET_Q_TYPE_RX) {
+ desc = &virtnet_rq_stats_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_rq_stats_desc_qstat);
+ } else {
+ desc = &virtnet_sq_stats_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_sq_stats_desc_qstat);
+ }
+
+ for (i = 0; i < num; ++i) {
+ offset = desc[i].qstat_offset / sizeof(*ctx->data);
+ v_stat = (const u64_stats_t *)(base + desc[i].offset);
+ ctx->data[offset] = u64_stats_read(v_stat);
+ }
+ return;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
+ desc = &virtnet_stats_rx_basic_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_basic_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
+ desc = &virtnet_stats_rx_csum_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_csum_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
+ desc = &virtnet_stats_rx_gso_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_gso_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_GSO)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
+ desc = &virtnet_stats_rx_speed_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_speed_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
+ desc = &virtnet_stats_tx_basic_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_basic_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
+ desc = &virtnet_stats_tx_csum_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_csum_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_CSUM)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
+ desc = &virtnet_stats_tx_gso_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_gso_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
+ goto found;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
+ desc = &virtnet_stats_tx_speed_desc_qstat[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_speed_desc_qstat);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
+ goto found;
+ }
+
+ return;
+
+found:
+ for (i = 0; i < num; ++i) {
+ offset = desc[i].qstat_offset / sizeof(*ctx->data);
+ v = (const __le64 *)(base + desc[i].offset);
+ ctx->data[offset] = le64_to_cpu(*v);
+ }
+}
+
+/* virtnet_fill_stats - copy the stats to qstats or ethtool -S
+ * The stats source is the device or the driver.
+ *
+ * @vi: virtio net info
+ * @qid: the vq id
+ * @ctx: stats ctx (initiated by virtnet_stats_ctx_init())
+ * @base: pointer to the device reply or the driver stats structure.
+ * @drv_stats: designate the base type (device reply, driver stats)
+ * @type: the type of the device reply (if drv_stats is true, this must be zero)
+ */
+static void virtnet_fill_stats(struct virtnet_info *vi, u32 qid,
+ struct virtnet_stats_ctx *ctx,
+ const u8 *base, bool drv_stats, u8 reply_type)
+{
+ u32 queue_type, num_rx, num_tx, num_cq;
+ const struct virtnet_stat_desc *desc;
+ const u64_stats_t *v_stat;
+ u64 offset, bitmap;
+ const __le64 *v;
+ int i, num;
+
+ if (ctx->to_qstat)
+ return virtnet_fill_stats_qstat(vi, qid, ctx, base, drv_stats, reply_type);
+
+ num_cq = ctx->desc_num[VIRTNET_Q_TYPE_CQ];
+ num_rx = ctx->desc_num[VIRTNET_Q_TYPE_RX];
+ num_tx = ctx->desc_num[VIRTNET_Q_TYPE_TX];
+
+ queue_type = vq_type(vi, qid);
+ bitmap = ctx->bitmap[queue_type];
+
+ /* skip the total fields of pairs */
+ offset = num_rx + num_tx;
+
+ if (queue_type == VIRTNET_Q_TYPE_TX) {
+ offset += num_cq + num_rx * vi->curr_queue_pairs + num_tx * (qid / 2);
+
+ num = ARRAY_SIZE(virtnet_sq_stats_desc);
+ if (drv_stats) {
+ desc = &virtnet_sq_stats_desc[0];
+ goto drv_stats;
+ }
+
+ offset += num;
+
+ } else if (queue_type == VIRTNET_Q_TYPE_RX) {
+ offset += num_cq + num_rx * (qid / 2);
+
+ num = ARRAY_SIZE(virtnet_rq_stats_desc);
+ if (drv_stats) {
+ desc = &virtnet_rq_stats_desc[0];
+ goto drv_stats;
+ }
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_CVQ) {
+ desc = &virtnet_stats_cvq_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_cvq_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_CVQ)
+ goto found;
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
+ desc = &virtnet_stats_rx_basic_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_basic_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_BASIC)
+ goto found;
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
+ desc = &virtnet_stats_rx_csum_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_csum_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_CSUM)
+ goto found;
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_RX_SPEED) {
+ desc = &virtnet_stats_rx_speed_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_rx_speed_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_RX_SPEED)
+ goto found;
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
+ desc = &virtnet_stats_tx_basic_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_basic_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_BASIC)
+ goto found;
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
+ desc = &virtnet_stats_tx_gso_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_gso_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_GSO)
+ goto found;
+
+ offset += num;
+ }
+
+ if (bitmap & VIRTIO_NET_STATS_TYPE_TX_SPEED) {
+ desc = &virtnet_stats_tx_speed_desc[0];
+ num = ARRAY_SIZE(virtnet_stats_tx_speed_desc);
+ if (reply_type == VIRTIO_NET_STATS_TYPE_REPLY_TX_SPEED)
+ goto found;
+
+ offset += num;
+ }
+
+ return;
+
+found:
+ for (i = 0; i < num; ++i) {
+ v = (const __le64 *)(base + desc[i].offset);
+ ctx->data[offset + i] = le64_to_cpu(*v);
+ }
+
+ return;
+
+drv_stats:
+ for (i = 0; i < num; ++i) {
+ v_stat = (const u64_stats_t *)(base + desc[i].offset);
+ ctx->data[offset + i] = u64_stats_read(v_stat);
+ }
+}
+
+static int __virtnet_get_hw_stats(struct virtnet_info *vi,
+ struct virtnet_stats_ctx *ctx,
+ struct virtio_net_ctrl_queue_stats *req,
+ int req_size, void *reply, int res_size)
+{
+ struct virtio_net_stats_reply_hdr *hdr;
+ struct scatterlist sgs_in, sgs_out;
+ void *p;
+ u32 qid;
+ int ok;
+
+ sg_init_one(&sgs_out, req, req_size);
+ sg_init_one(&sgs_in, reply, res_size);
+
+ ok = virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
+ VIRTIO_NET_CTRL_STATS_GET,
+ &sgs_out, &sgs_in);
+
+ if (!ok)
+ return ok;
+
+ for (p = reply; p - reply < res_size; p += le16_to_cpu(hdr->size)) {
+ hdr = p;
+ qid = le16_to_cpu(hdr->vq_index);
+ virtnet_fill_stats(vi, qid, ctx, p, false, hdr->type);
+ }
+
+ return 0;
+}
+
+static void virtnet_make_stat_req(struct virtnet_info *vi,
+ struct virtnet_stats_ctx *ctx,
+ struct virtio_net_ctrl_queue_stats *req,
+ int qid, int *idx)
+{
+ int qtype = vq_type(vi, qid);
+ u64 bitmap = ctx->bitmap[qtype];
+
+ if (!bitmap)
+ return;
+
+ req->stats[*idx].vq_index = cpu_to_le16(qid);
+ req->stats[*idx].types_bitmap[0] = cpu_to_le64(bitmap);
+ *idx += 1;
+}
+
+/* qid: -1: get stats of all vq.
+ * > 0: get the stats for the special vq. This must not be cvq.
+ */
+static int virtnet_get_hw_stats(struct virtnet_info *vi,
+ struct virtnet_stats_ctx *ctx, int qid)
+{
+ int qnum, i, j, res_size, qtype, last_vq, first_vq;
+ struct virtio_net_ctrl_queue_stats *req;
+ bool enable_cvq;
+ void *reply;
+ int ok;
+
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS))
+ return 0;
+
+ if (qid == -1) {
+ last_vq = vi->curr_queue_pairs * 2 - 1;
+ first_vq = 0;
+ enable_cvq = true;
+ } else {
+ last_vq = qid;
+ first_vq = qid;
+ enable_cvq = false;
+ }
+
+ qnum = 0;
+ res_size = 0;
+ for (i = first_vq; i <= last_vq ; ++i) {
+ qtype = vq_type(vi, i);
+ if (ctx->bitmap[qtype]) {
+ ++qnum;
+ res_size += ctx->size[qtype];
+ }
+ }
+
+ if (enable_cvq && ctx->bitmap[VIRTNET_Q_TYPE_CQ]) {
+ res_size += ctx->size[VIRTNET_Q_TYPE_CQ];
+ qnum += 1;
+ }
+
+ req = kcalloc(qnum, sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ reply = kmalloc(res_size, GFP_KERNEL);
+ if (!reply) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ j = 0;
+ for (i = first_vq; i <= last_vq ; ++i)
+ virtnet_make_stat_req(vi, ctx, req, i, &j);
+
+ if (enable_cvq)
+ virtnet_make_stat_req(vi, ctx, req, vi->max_queue_pairs * 2, &j);
+
+ ok = __virtnet_get_hw_stats(vi, ctx, req, sizeof(*req) * j, reply, res_size);
+
+ kfree(req);
+ kfree(reply);
+
+ return ok;
+}
+
static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct virtnet_info *vi = netdev_priv(dev);
- unsigned int i, j;
+ unsigned int i;
u8 *p = data;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++)
- ethtool_sprintf(&p, "rx_queue_%u_%s", i,
- virtnet_rq_stats_desc[j].desc);
- }
+ /* Generate the total field names. */
+ virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, -1, &p);
+ virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, -1, &p);
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++)
- ethtool_sprintf(&p, "tx_queue_%u_%s", i,
- virtnet_sq_stats_desc[j].desc);
- }
+ virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_CQ, 0, &p);
+
+ for (i = 0; i < vi->curr_queue_pairs; ++i)
+ virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_RX, i, &p);
+
+ for (i = 0; i < vi->curr_queue_pairs; ++i)
+ virtnet_get_stats_string(vi, VIRTNET_Q_TYPE_TX, i, &p);
break;
}
}
@@ -3323,11 +4655,17 @@ static void virtnet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static int virtnet_get_sset_count(struct net_device *dev, int sset)
{
struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_stats_ctx ctx = {0};
+ u32 pair_count;
switch (sset) {
case ETH_SS_STATS:
- return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
- VIRTNET_SQ_STATS_LEN);
+ virtnet_stats_ctx_init(vi, &ctx, NULL, false);
+
+ pair_count = ctx.desc_num[VIRTNET_Q_TYPE_RX] + ctx.desc_num[VIRTNET_Q_TYPE_TX];
+
+ return pair_count + ctx.desc_num[VIRTNET_Q_TYPE_CQ] +
+ vi->curr_queue_pairs * pair_count;
default:
return -EOPNOTSUPP;
}
@@ -3337,40 +4675,32 @@ static void virtnet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct virtnet_info *vi = netdev_priv(dev);
- unsigned int idx = 0, start, i, j;
+ struct virtnet_stats_ctx ctx = {0};
+ unsigned int start, i;
const u8 *stats_base;
- const u64_stats_t *p;
- size_t offset;
+
+ virtnet_stats_ctx_init(vi, &ctx, data, false);
+ if (virtnet_get_hw_stats(vi, &ctx, -1))
+ dev_warn(&vi->dev->dev, "Failed to get hw stats.\n");
for (i = 0; i < vi->curr_queue_pairs; i++) {
struct receive_queue *rq = &vi->rq[i];
+ struct send_queue *sq = &vi->sq[i];
stats_base = (const u8 *)&rq->stats;
do {
start = u64_stats_fetch_begin(&rq->stats.syncp);
- for (j = 0; j < VIRTNET_RQ_STATS_LEN; j++) {
- offset = virtnet_rq_stats_desc[j].offset;
- p = (const u64_stats_t *)(stats_base + offset);
- data[idx + j] = u64_stats_read(p);
- }
+ virtnet_fill_stats(vi, i * 2, &ctx, stats_base, true, 0);
} while (u64_stats_fetch_retry(&rq->stats.syncp, start));
- idx += VIRTNET_RQ_STATS_LEN;
- }
-
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- struct send_queue *sq = &vi->sq[i];
stats_base = (const u8 *)&sq->stats;
do {
start = u64_stats_fetch_begin(&sq->stats.syncp);
- for (j = 0; j < VIRTNET_SQ_STATS_LEN; j++) {
- offset = virtnet_sq_stats_desc[j].offset;
- p = (const u64_stats_t *)(stats_base + offset);
- data[idx + j] = u64_stats_read(p);
- }
+ virtnet_fill_stats(vi, i * 2 + 1, &ctx, stats_base, true, 0);
} while (u64_stats_fetch_retry(&sq->stats.syncp, start));
- idx += VIRTNET_SQ_STATS_LEN;
}
+
+ virtnet_fill_total_fields(vi, &ctx);
}
static void virtnet_get_channels(struct net_device *dev,
@@ -3410,12 +4740,17 @@ static int virtnet_get_link_ksettings(struct net_device *dev,
static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
+ struct virtio_net_ctrl_coal_tx *coal_tx __free(kfree) = NULL;
struct scatterlist sgs_tx;
int i;
- vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
- vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
- sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
+ coal_tx = kzalloc(sizeof(*coal_tx), GFP_KERNEL);
+ if (!coal_tx)
+ return -ENOMEM;
+
+ coal_tx->tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
+ coal_tx->tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
+ sg_init_one(&sgs_tx, coal_tx, sizeof(*coal_tx));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_TX_SET,
@@ -3435,6 +4770,7 @@ static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi,
static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
struct ethtool_coalesce *ec)
{
+ struct virtio_net_ctrl_coal_rx *coal_rx __free(kfree) = NULL;
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
struct scatterlist sgs_rx;
int i;
@@ -3448,24 +4784,34 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
if (rx_ctrl_dim_on && !vi->rx_dim_enabled) {
vi->rx_dim_enabled = true;
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ mutex_lock(&vi->rq[i].dim_lock);
vi->rq[i].dim_enabled = true;
+ mutex_unlock(&vi->rq[i].dim_lock);
+ }
return 0;
}
+ coal_rx = kzalloc(sizeof(*coal_rx), GFP_KERNEL);
+ if (!coal_rx)
+ return -ENOMEM;
+
if (!rx_ctrl_dim_on && vi->rx_dim_enabled) {
vi->rx_dim_enabled = false;
- for (i = 0; i < vi->max_queue_pairs; i++)
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ mutex_lock(&vi->rq[i].dim_lock);
vi->rq[i].dim_enabled = false;
+ mutex_unlock(&vi->rq[i].dim_lock);
+ }
}
/* Since the per-queue coalescing params can be set,
* we need apply the global new params even if they
* are not updated.
*/
- vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
- vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
- sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
+ coal_rx->rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
+ coal_rx->rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
+ sg_init_one(&sgs_rx, coal_rx, sizeof(*coal_rx));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
VIRTIO_NET_CTRL_NOTF_COAL_RX_SET,
@@ -3475,8 +4821,10 @@ static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi,
vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
for (i = 0; i < vi->max_queue_pairs; i++) {
+ mutex_lock(&vi->rq[i].dim_lock);
vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
+ mutex_unlock(&vi->rq[i].dim_lock);
}
return 0;
@@ -3503,19 +4851,24 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
u16 queue)
{
bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce;
- bool cur_rx_dim = vi->rq[queue].dim_enabled;
u32 max_usecs, max_packets;
+ bool cur_rx_dim;
int err;
+ mutex_lock(&vi->rq[queue].dim_lock);
+ cur_rx_dim = vi->rq[queue].dim_enabled;
max_usecs = vi->rq[queue].intr_coal.max_usecs;
max_packets = vi->rq[queue].intr_coal.max_packets;
if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs ||
- ec->rx_max_coalesced_frames != max_packets))
+ ec->rx_max_coalesced_frames != max_packets)) {
+ mutex_unlock(&vi->rq[queue].dim_lock);
return -EINVAL;
+ }
if (rx_ctrl_dim_on && !cur_rx_dim) {
vi->rq[queue].dim_enabled = true;
+ mutex_unlock(&vi->rq[queue].dim_lock);
return 0;
}
@@ -3528,10 +4881,8 @@ static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi,
err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue,
ec->rx_coalesce_usecs,
ec->rx_max_coalesced_frames);
- if (err)
- return err;
-
- return 0;
+ mutex_unlock(&vi->rq[queue].dim_lock);
+ return err;
}
static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
@@ -3561,39 +4912,27 @@ static void virtnet_rx_dim_work(struct work_struct *work)
struct virtnet_info *vi = rq->vq->vdev->priv;
struct net_device *dev = vi->dev;
struct dim_cq_moder update_moder;
- int i, qnum, err;
+ int qnum, err;
- if (!rtnl_trylock())
- return;
+ qnum = rq - vi->rq;
- /* Each rxq's work is queued by "net_dim()->schedule_work()"
- * in response to NAPI traffic changes. Note that dim->profile_ix
- * for each rxq is updated prior to the queuing action.
- * So we only need to traverse and update profiles for all rxqs
- * in the work which is holding rtnl_lock.
- */
- for (i = 0; i < vi->curr_queue_pairs; i++) {
- rq = &vi->rq[i];
- dim = &rq->dim;
- qnum = rq - vi->rq;
-
- if (!rq->dim_enabled)
- continue;
-
- update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
- if (update_moder.usec != rq->intr_coal.max_usecs ||
- update_moder.pkts != rq->intr_coal.max_packets) {
- err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
- update_moder.usec,
- update_moder.pkts);
- if (err)
- pr_debug("%s: Failed to send dim parameters on rxq%d\n",
- dev->name, qnum);
- dim->state = DIM_START_MEASURE;
- }
- }
+ mutex_lock(&rq->dim_lock);
+ if (!rq->dim_enabled)
+ goto out;
- rtnl_unlock();
+ update_moder = net_dim_get_rx_irq_moder(dev, dim);
+ if (update_moder.usec != rq->intr_coal.max_usecs ||
+ update_moder.pkts != rq->intr_coal.max_packets) {
+ err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum,
+ update_moder.usec,
+ update_moder.pkts);
+ if (err)
+ pr_debug("%s: Failed to send dim parameters on rxq%d\n",
+ dev->name, qnum);
+ }
+out:
+ dim->state = DIM_START_MEASURE;
+ mutex_unlock(&rq->dim_lock);
}
static int virtnet_coal_params_supported(struct ethtool_coalesce *ec)
@@ -3731,11 +5070,13 @@ static int virtnet_get_per_queue_coalesce(struct net_device *dev,
return -EINVAL;
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
+ mutex_lock(&vi->rq[queue].dim_lock);
ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled;
+ mutex_unlock(&vi->rq[queue].dim_lock);
} else {
ec->rx_max_coalesced_frames = 1;
@@ -3791,11 +5132,11 @@ static int virtnet_get_rxfh(struct net_device *dev,
if (rxfh->indir) {
for (i = 0; i < vi->rss_indir_table_size; ++i)
- rxfh->indir[i] = vi->ctrl->rss.indirection_table[i];
+ rxfh->indir[i] = vi->rss.indirection_table[i];
}
if (rxfh->key)
- memcpy(rxfh->key, vi->ctrl->rss.key, vi->rss_key_size);
+ memcpy(rxfh->key, vi->rss.key, vi->rss_key_size);
rxfh->hfunc = ETH_RSS_HASH_TOP;
@@ -3819,7 +5160,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
return -EOPNOTSUPP;
for (i = 0; i < vi->rss_indir_table_size; ++i)
- vi->ctrl->rss.indirection_table[i] = rxfh->indir[i];
+ vi->rss.indirection_table[i] = rxfh->indir[i];
update = true;
}
@@ -3831,7 +5172,7 @@ static int virtnet_set_rxfh(struct net_device *dev,
if (!vi->has_rss && !vi->has_rss_hash_report)
return -EOPNOTSUPP;
- memcpy(vi->ctrl->rss.key, rxfh->key, vi->rss_key_size);
+ memcpy(vi->rss.key, rxfh->key, vi->rss_key_size);
update = true;
}
@@ -3905,6 +5246,97 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.set_rxnfc = virtnet_set_rxnfc,
};
+static void virtnet_get_queue_stats_rx(struct net_device *dev, int i,
+ struct netdev_queue_stats_rx *stats)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct receive_queue *rq = &vi->rq[i];
+ struct virtnet_stats_ctx ctx = {0};
+
+ virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
+
+ virtnet_get_hw_stats(vi, &ctx, i * 2);
+ virtnet_fill_stats(vi, i * 2, &ctx, (void *)&rq->stats, true, 0);
+}
+
+static void virtnet_get_queue_stats_tx(struct net_device *dev, int i,
+ struct netdev_queue_stats_tx *stats)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct send_queue *sq = &vi->sq[i];
+ struct virtnet_stats_ctx ctx = {0};
+
+ virtnet_stats_ctx_init(vi, &ctx, (void *)stats, true);
+
+ virtnet_get_hw_stats(vi, &ctx, i * 2 + 1);
+ virtnet_fill_stats(vi, i * 2 + 1, &ctx, (void *)&sq->stats, true, 0);
+}
+
+static void virtnet_get_base_stats(struct net_device *dev,
+ struct netdev_queue_stats_rx *rx,
+ struct netdev_queue_stats_tx *tx)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ /* The queue stats of the virtio-net will not be reset. So here we
+ * return 0.
+ */
+ rx->bytes = 0;
+ rx->packets = 0;
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_BASIC) {
+ rx->hw_drops = 0;
+ rx->hw_drop_overruns = 0;
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_CSUM) {
+ rx->csum_unnecessary = 0;
+ rx->csum_none = 0;
+ rx->csum_bad = 0;
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_GSO) {
+ rx->hw_gro_packets = 0;
+ rx->hw_gro_bytes = 0;
+ rx->hw_gro_wire_packets = 0;
+ rx->hw_gro_wire_bytes = 0;
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_RX_SPEED)
+ rx->hw_drop_ratelimits = 0;
+
+ tx->bytes = 0;
+ tx->packets = 0;
+ tx->stop = 0;
+ tx->wake = 0;
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_BASIC) {
+ tx->hw_drops = 0;
+ tx->hw_drop_errors = 0;
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_CSUM) {
+ tx->csum_none = 0;
+ tx->needs_csum = 0;
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_GSO) {
+ tx->hw_gso_packets = 0;
+ tx->hw_gso_bytes = 0;
+ tx->hw_gso_wire_packets = 0;
+ tx->hw_gso_wire_bytes = 0;
+ }
+
+ if (vi->device_stats_cap & VIRTIO_NET_STATS_TYPE_TX_SPEED)
+ tx->hw_drop_ratelimits = 0;
+}
+
+static const struct netdev_stat_ops virtnet_stat_ops = {
+ .get_queue_stats_rx = virtnet_get_queue_stats_rx,
+ .get_queue_stats_tx = virtnet_get_queue_stats_tx,
+ .get_base_stats = virtnet_get_base_stats,
+};
+
static void virtnet_freeze_down(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
@@ -3951,10 +5383,16 @@ static int virtnet_restore_up(struct virtio_device *vdev)
static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
{
+ __virtio64 *_offloads __free(kfree) = NULL;
struct scatterlist sg;
- vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
- sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
+ _offloads = kzalloc(sizeof(*_offloads), GFP_KERNEL);
+ if (!_offloads)
+ return -ENOMEM;
+
+ *_offloads = cpu_to_virtio64(vi->vdev, offloads);
+
+ sg_init_one(&sg, _offloads, sizeof(*_offloads));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) {
@@ -3985,10 +5423,144 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
return virtnet_set_guest_offloads(vi, offloads);
}
+static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq,
+ struct xsk_buff_pool *pool)
+{
+ int err, qindex;
+
+ qindex = rq - vi->rq;
+
+ if (pool) {
+ err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id);
+ if (err < 0)
+ return err;
+
+ err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info,
+ MEM_TYPE_XSK_BUFF_POOL, NULL);
+ if (err < 0)
+ goto unreg;
+
+ xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info);
+ }
+
+ virtnet_rx_pause(vi, rq);
+
+ err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf);
+ if (err) {
+ netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err);
+
+ pool = NULL;
+ }
+
+ rq->xsk_pool = pool;
+
+ virtnet_rx_resume(vi, rq);
+
+ if (pool)
+ return 0;
+
+unreg:
+ xdp_rxq_info_unreg(&rq->xsk_rxq_info);
+ return err;
+}
+
+static int virtnet_xsk_pool_enable(struct net_device *dev,
+ struct xsk_buff_pool *pool,
+ u16 qid)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct receive_queue *rq;
+ struct device *dma_dev;
+ struct send_queue *sq;
+ int err, size;
+
+ if (vi->hdr_len > xsk_pool_get_headroom(pool))
+ return -EINVAL;
+
+ /* In big_packets mode, xdp cannot work, so there is no need to
+ * initialize xsk of rq.
+ */
+ if (vi->big_packets && !vi->mergeable_rx_bufs)
+ return -ENOENT;
+
+ if (qid >= vi->curr_queue_pairs)
+ return -EINVAL;
+
+ sq = &vi->sq[qid];
+ rq = &vi->rq[qid];
+
+ /* xsk assumes that tx and rx must have the same dma device. The af-xdp
+ * may use one buffer to receive from the rx and reuse this buffer to
+ * send by the tx. So the dma dev of sq and rq must be the same one.
+ *
+ * But vq->dma_dev allows every vq has the respective dma dev. So I
+ * check the dma dev of vq and sq is the same dev.
+ */
+ if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq))
+ return -EINVAL;
+
+ dma_dev = virtqueue_dma_dev(rq->vq);
+ if (!dma_dev)
+ return -EINVAL;
+
+ size = virtqueue_get_vring_size(rq->vq);
+
+ rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL);
+ if (!rq->xsk_buffs)
+ return -ENOMEM;
+
+ err = xsk_pool_dma_map(pool, dma_dev, 0);
+ if (err)
+ goto err_xsk_map;
+
+ err = virtnet_rq_bind_xsk_pool(vi, rq, pool);
+ if (err)
+ goto err_rq;
+
+ return 0;
+
+err_rq:
+ xsk_pool_dma_unmap(pool, 0);
+err_xsk_map:
+ return err;
+}
+
+static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct xsk_buff_pool *pool;
+ struct receive_queue *rq;
+ int err;
+
+ if (qid >= vi->curr_queue_pairs)
+ return -EINVAL;
+
+ rq = &vi->rq[qid];
+
+ pool = rq->xsk_pool;
+
+ err = virtnet_rq_bind_xsk_pool(vi, rq, NULL);
+
+ xsk_pool_dma_unmap(pool, 0);
+
+ kvfree(rq->xsk_buffs);
+
+ return err;
+}
+
+static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ if (xdp->xsk.pool)
+ return virtnet_xsk_pool_enable(dev, xdp->xsk.pool,
+ xdp->xsk.queue_id);
+ else
+ return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id);
+}
+
static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
- unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM +
+ unsigned int room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM +
sizeof(struct skb_shared_info));
unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN;
struct virtnet_info *vi = netdev_priv(dev);
@@ -4054,7 +5626,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
synchronize_net();
}
- err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
+ err = virtnet_set_queues(vi, curr_qp + xdp_qp);
if (err)
goto err;
netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
@@ -4110,6 +5682,8 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
switch (xdp->command) {
case XDP_SETUP_PROG:
return virtnet_xdp_set(dev, xdp->prog, xdp->extack);
+ case XDP_SETUP_XSK_POOL:
+ return virtnet_xsk_pool_setup(dev, xdp);
default:
return -EINVAL;
}
@@ -4156,9 +5730,9 @@ static int virtnet_set_features(struct net_device *dev,
if ((dev->features ^ features) & NETIF_F_RXHASH) {
if (features & NETIF_F_RXHASH)
- vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
+ vi->rss.hash_types = vi->rss_hash_types_saved;
else
- vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
+ vi->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
if (!virtnet_commit_rss_command(vi))
return -EINVAL;
@@ -4182,6 +5756,36 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start)));
}
+static int virtnet_init_irq_moder(struct virtnet_info *vi)
+{
+ u8 profile_flags = 0, coal_flags = 0;
+ int ret, i;
+
+ profile_flags |= DIM_PROFILE_RX;
+ coal_flags |= DIM_COALESCE_USEC | DIM_COALESCE_PKTS;
+ ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags,
+ DIM_CQ_PERIOD_MODE_START_FROM_EQE,
+ 0, virtnet_rx_dim_work, NULL);
+
+ if (ret)
+ return ret;
+
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ net_dim_setting(vi->dev, &vi->rq[i].dim, false);
+
+ return 0;
+}
+
+static void virtnet_free_irq_moder(struct virtnet_info *vi)
+{
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
+ return;
+
+ rtnl_lock();
+ net_dim_free_irq_moder(vi->dev);
+ rtnl_unlock();
+}
+
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -4194,6 +5798,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
.ndo_bpf = virtnet_xdp,
.ndo_xdp_xmit = virtnet_xdp_xmit,
+ .ndo_xsk_wakeup = virtnet_xsk_wakeup,
.ndo_features_check = passthru_features_check,
.ndo_get_phys_port_name = virtnet_get_phys_port_name,
.ndo_set_features = virtnet_set_features,
@@ -4287,7 +5892,7 @@ static void free_receive_page_frags(struct virtnet_info *vi)
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->rq[i].alloc_frag.page) {
- if (vi->rq[i].do_dma && vi->rq[i].last_dma)
+ if (vi->rq[i].last_dma)
virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
}
@@ -4351,9 +5956,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu
static int virtnet_find_vqs(struct virtnet_info *vi)
{
- vq_callback_t **callbacks;
+ struct virtqueue_info *vqs_info;
struct virtqueue **vqs;
- const char **names;
int ret = -ENOMEM;
int total_vqs;
bool *ctx;
@@ -4370,12 +5974,9 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
if (!vqs)
goto err_vq;
- callbacks = kmalloc_array(total_vqs, sizeof(*callbacks), GFP_KERNEL);
- if (!callbacks)
- goto err_callback;
- names = kmalloc_array(total_vqs, sizeof(*names), GFP_KERNEL);
- if (!names)
- goto err_names;
+ vqs_info = kcalloc(total_vqs, sizeof(*vqs_info), GFP_KERNEL);
+ if (!vqs_info)
+ goto err_vqs_info;
if (!vi->big_packets || vi->mergeable_rx_bufs) {
ctx = kcalloc(total_vqs, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -4386,24 +5987,22 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
/* Parameters for control virtqueue, if any */
if (vi->has_cvq) {
- callbacks[total_vqs - 1] = NULL;
- names[total_vqs - 1] = "control";
+ vqs_info[total_vqs - 1].name = "control";
}
/* Allocate/initialize parameters for send/receive virtqueues */
for (i = 0; i < vi->max_queue_pairs; i++) {
- callbacks[rxq2vq(i)] = skb_recv_done;
- callbacks[txq2vq(i)] = skb_xmit_done;
+ vqs_info[rxq2vq(i)].callback = skb_recv_done;
+ vqs_info[txq2vq(i)].callback = skb_xmit_done;
sprintf(vi->rq[i].name, "input.%u", i);
sprintf(vi->sq[i].name, "output.%u", i);
- names[rxq2vq(i)] = vi->rq[i].name;
- names[txq2vq(i)] = vi->sq[i].name;
+ vqs_info[rxq2vq(i)].name = vi->rq[i].name;
+ vqs_info[txq2vq(i)].name = vi->sq[i].name;
if (ctx)
- ctx[rxq2vq(i)] = true;
+ vqs_info[rxq2vq(i)].ctx = true;
}
- ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
- names, ctx, NULL);
+ ret = virtio_find_vqs(vi->vdev, total_vqs, vqs, vqs_info, NULL);
if (ret)
goto err_find;
@@ -4425,10 +6024,8 @@ static int virtnet_find_vqs(struct virtnet_info *vi)
err_find:
kfree(ctx);
err_ctx:
- kfree(names);
-err_names:
- kfree(callbacks);
-err_callback:
+ kfree(vqs_info);
+err_vqs_info:
kfree(vqs);
err_vq:
return ret;
@@ -4461,15 +6058,13 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
virtnet_poll_tx,
napi_tx ? napi_weight : 0);
- INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work);
- vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
u64_stats_init(&vi->rq[i].stats.syncp);
u64_stats_init(&vi->sq[i].stats.syncp);
+ mutex_init(&vi->rq[i].dim_lock);
}
return 0;
@@ -4637,6 +6232,48 @@ static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
}
}
+#define VIRTIO_NET_HASH_REPORT_MAX_TABLE 10
+static enum xdp_rss_hash_type
+virtnet_xdp_rss_type[VIRTIO_NET_HASH_REPORT_MAX_TABLE] = {
+ [VIRTIO_NET_HASH_REPORT_NONE] = XDP_RSS_TYPE_NONE,
+ [VIRTIO_NET_HASH_REPORT_IPv4] = XDP_RSS_TYPE_L3_IPV4,
+ [VIRTIO_NET_HASH_REPORT_TCPv4] = XDP_RSS_TYPE_L4_IPV4_TCP,
+ [VIRTIO_NET_HASH_REPORT_UDPv4] = XDP_RSS_TYPE_L4_IPV4_UDP,
+ [VIRTIO_NET_HASH_REPORT_IPv6] = XDP_RSS_TYPE_L3_IPV6,
+ [VIRTIO_NET_HASH_REPORT_TCPv6] = XDP_RSS_TYPE_L4_IPV6_TCP,
+ [VIRTIO_NET_HASH_REPORT_UDPv6] = XDP_RSS_TYPE_L4_IPV6_UDP,
+ [VIRTIO_NET_HASH_REPORT_IPv6_EX] = XDP_RSS_TYPE_L3_IPV6_EX,
+ [VIRTIO_NET_HASH_REPORT_TCPv6_EX] = XDP_RSS_TYPE_L4_IPV6_TCP_EX,
+ [VIRTIO_NET_HASH_REPORT_UDPv6_EX] = XDP_RSS_TYPE_L4_IPV6_UDP_EX
+};
+
+static int virtnet_xdp_rx_hash(const struct xdp_md *_ctx, u32 *hash,
+ enum xdp_rss_hash_type *rss_type)
+{
+ const struct xdp_buff *xdp = (void *)_ctx;
+ struct virtio_net_hdr_v1_hash *hdr_hash;
+ struct virtnet_info *vi;
+ u16 hash_report;
+
+ if (!(xdp->rxq->dev->features & NETIF_F_RXHASH))
+ return -ENODATA;
+
+ vi = netdev_priv(xdp->rxq->dev);
+ hdr_hash = (struct virtio_net_hdr_v1_hash *)(xdp->data - vi->hdr_len);
+ hash_report = __le16_to_cpu(hdr_hash->hash_report);
+
+ if (hash_report >= VIRTIO_NET_HASH_REPORT_MAX_TABLE)
+ hash_report = VIRTIO_NET_HASH_REPORT_NONE;
+
+ *rss_type = virtnet_xdp_rss_type[hash_report];
+ *hash = __le32_to_cpu(hdr_hash->hash_value);
+ return 0;
+}
+
+static const struct xdp_metadata_ops virtnet_xdp_metadata_ops = {
+ .xmo_rx_hash = virtnet_xdp_rx_hash,
+};
+
static int virtnet_probe(struct virtio_device *vdev)
{
int i, err = -ENOMEM;
@@ -4666,6 +6303,7 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE |
IFF_TX_SKB_NO_LINEAR;
dev->netdev_ops = &virtnet_netdev;
+ dev->stat_ops = &virtnet_stat_ops;
dev->features = NETIF_F_HIGHDMA;
dev->ethtool_ops = &virtnet_ethtool_ops;
@@ -4698,8 +6336,16 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
/* (!csum && gso) case will be fixed by register_netdev() */
}
- if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
- dev->features |= NETIF_F_RXCSUM;
+
+ /* 1. With VIRTIO_NET_F_GUEST_CSUM negotiation, the driver doesn't
+ * need to calculate checksums for partially checksummed packets,
+ * as they're considered valid by the upper layer.
+ * 2. Without VIRTIO_NET_F_GUEST_CSUM negotiation, the driver only
+ * receives fully checksummed packets. The device may assist in
+ * validating these packets' checksums, so the driver won't have to.
+ */
+ dev->features |= NETIF_F_RXCSUM;
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
dev->features |= NETIF_F_GRO_HW;
@@ -4765,6 +6411,7 @@ static int virtnet_probe(struct virtio_device *vdev)
VIRTIO_NET_RSS_HASH_TYPE_UDP_EX);
dev->hw_features |= NETIF_F_RXHASH;
+ dev->xdp_metadata_ops = &virtnet_xdp_metadata_ops;
}
if (vi->has_rss_hash_report)
@@ -4782,6 +6429,8 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
+ mutex_init(&vi->cvq_lock);
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
mtu = virtio_cread16(vdev,
offsetof(struct virtio_net_config,
@@ -4837,6 +6486,10 @@ static int virtnet_probe(struct virtio_device *vdev)
for (i = 0; i < vi->max_queue_pairs; i++)
if (vi->sq[i].napi.weight)
vi->sq[i].intr_coal.max_packets = 1;
+
+ err = virtnet_init_irq_moder(vi);
+ if (err)
+ goto free;
}
#ifdef CONFIG_SYSFS
@@ -4873,7 +6526,7 @@ static int virtnet_probe(struct virtio_device *vdev)
virtio_device_ready(vdev);
- _virtnet_set_queues(vi, vi->curr_queue_pairs);
+ virtnet_set_queues(vi, vi->curr_queue_pairs);
/* a random MAC address has been assigned, notify the device.
* We don't fail probe if VIRTIO_NET_F_CTRL_MAC_ADDR is not there
@@ -4893,6 +6546,33 @@ static int virtnet_probe(struct virtio_device *vdev)
}
}
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_DEVICE_STATS)) {
+ struct virtio_net_stats_capabilities *stats_cap __free(kfree) = NULL;
+ struct scatterlist sg;
+ __le64 v;
+
+ stats_cap = kzalloc(sizeof(*stats_cap), GFP_KERNEL);
+ if (!stats_cap) {
+ rtnl_unlock();
+ err = -ENOMEM;
+ goto free_unregister_netdev;
+ }
+
+ sg_init_one(&sg, stats_cap, sizeof(*stats_cap));
+
+ if (!virtnet_send_command_reply(vi, VIRTIO_NET_CTRL_STATS,
+ VIRTIO_NET_CTRL_STATS_QUERY,
+ NULL, &sg)) {
+ pr_debug("virtio_net: fail to get stats capability\n");
+ rtnl_unlock();
+ err = -EINVAL;
+ goto free_unregister_netdev;
+ }
+
+ v = stats_cap->supported_stats_types[0];
+ vi->device_stats_cap = le64_to_cpu(v);
+ }
+
rtnl_unlock();
err = virtnet_cpu_notif_add(vi);
@@ -4961,6 +6641,8 @@ static void virtnet_remove(struct virtio_device *vdev)
disable_rx_mode_work(vi);
flush_work(&vi->rx_mode_work);
+ virtnet_free_irq_moder(vi);
+
unregister_netdev(vi->dev);
net_failover_destroy(vi->failover);
@@ -5021,7 +6703,7 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY, \
VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, VIRTIO_NET_F_NOTF_COAL, \
VIRTIO_NET_F_VQ_NOTF_COAL, \
- VIRTIO_NET_F_GUEST_HDRLEN
+ VIRTIO_NET_F_GUEST_HDRLEN, VIRTIO_NET_F_DEVICE_STATS
static unsigned int features[] = {
VIRTNET_FEATURES,
@@ -5039,7 +6721,6 @@ static struct virtio_driver virtio_net_driver = {
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.validate = virtnet_validate,
.probe = virtnet_probe,
diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile
index f82870c10205..59ef494ce2e0 100644
--- a/drivers/net/vmxnet3/Makefile
+++ b/drivers/net/vmxnet3/Makefile
@@ -2,7 +2,7 @@
#
# Linux driver for VMware's vmxnet3 ethernet NIC.
#
-# Copyright (C) 2007-2022, VMware, Inc. All Rights Reserved.
+# Copyright (C) 2007-2024, VMware, Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 41d6767283a6..5c5148768039 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -80,6 +80,8 @@ enum {
#define VMXNET3_IO_TYPE(addr) ((addr) >> 24)
#define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF)
+#define VMXNET3_PMC_PSEUDO_TSC 0x10003
+
enum {
VMXNET3_CMD_FIRST_SET = 0xCAFE0000,
VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET,
@@ -123,6 +125,8 @@ enum {
VMXNET3_CMD_GET_RESERVED4,
VMXNET3_CMD_GET_MAX_CAPABILITIES,
VMXNET3_CMD_GET_DCR0_REG,
+ VMXNET3_CMD_GET_TSRING_DESC_SIZE,
+ VMXNET3_CMD_GET_DISABLED_OFFLOADS,
};
/*
@@ -254,6 +258,24 @@ struct Vmxnet3_RxDesc {
#define VMXNET3_RCD_HDR_INNER_SHIFT 13
+struct Vmxnet3TSInfo {
+ u64 tsData:56;
+ u64 tsType:4;
+ u64 tsi:1; //bit to indicate to set ts
+ u64 pad:3;
+ u64 pad2;
+};
+
+struct Vmxnet3_TxTSDesc {
+ struct Vmxnet3TSInfo ts;
+ u64 pad[14];
+};
+
+struct Vmxnet3_RxTSDesc {
+ struct Vmxnet3TSInfo ts;
+ u64 pad[14];
+};
+
struct Vmxnet3_RxCompDesc {
#ifdef __BIG_ENDIAN_BITFIELD
u32 ext2:1;
@@ -427,6 +449,13 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64
#define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1)
+/* Rx TS Ring buffer size must be a multiple of 64 bytes */
+#define VMXNET3_RXTS_DESC_SIZE_ALIGN 64
+#define VMXNET3_RXTS_DESC_SIZE_MASK (VMXNET3_RXTS_DESC_SIZE_ALIGN - 1)
+/* Tx TS Ring buffer size must be a multiple of 64 bytes */
+#define VMXNET3_TXTS_DESC_SIZE_ALIGN 64
+#define VMXNET3_TXTS_DESC_SIZE_MASK (VMXNET3_TXTS_DESC_SIZE_ALIGN - 1)
+
/* Max ring size */
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
@@ -439,6 +468,9 @@ union Vmxnet3_GenericDesc {
#define VMXNET3_RXDATA_DESC_MAX_SIZE 2048
+#define VMXNET3_TXTS_DESC_MAX_SIZE 256
+#define VMXNET3_RXTS_DESC_MAX_SIZE 256
+
/* a list of reasons for queue stop */
enum {
@@ -546,6 +578,24 @@ struct Vmxnet3_RxQueueConf {
};
+struct Vmxnet3_LatencyConf {
+ u16 sampleRate;
+ u16 pad;
+};
+
+struct Vmxnet3_TxQueueTSConf {
+ __le64 txTSRingBasePA;
+ __le16 txTSRingDescSize; /* size of tx timestamp ring buffer */
+ u16 pad;
+ struct Vmxnet3_LatencyConf latencyConf;
+};
+
+struct Vmxnet3_RxQueueTSConf {
+ __le64 rxTSRingBasePA;
+ __le16 rxTSRingDescSize; /* size of rx timestamp ring buffer */
+ u16 pad[3];
+};
+
enum vmxnet3_intr_mask_mode {
VMXNET3_IMM_AUTO = 0,
VMXNET3_IMM_ACTIVE = 1,
@@ -679,7 +729,8 @@ struct Vmxnet3_TxQueueDesc {
/* Driver read after a GET command */
struct Vmxnet3_QueueStatus status;
struct UPT1_TxStats stats;
- u8 _pad[88]; /* 128 aligned */
+ struct Vmxnet3_TxQueueTSConf tsConf;
+ u8 _pad[72]; /* 128 aligned */
};
@@ -689,7 +740,8 @@ struct Vmxnet3_RxQueueDesc {
/* Driver read after a GET commad */
struct Vmxnet3_QueueStatus status;
struct UPT1_RxStats stats;
- u8 __pad[88]; /* 128 aligned */
+ struct Vmxnet3_RxQueueTSConf tsConf;
+ u8 __pad[72]; /* 128 aligned */
};
struct Vmxnet3_SetPolling {
@@ -861,4 +913,7 @@ struct Vmxnet3_DriverShared {
/* when new capability is introduced, update VMXNET3_CAP_MAX */
#define VMXNET3_CAP_MAX VMXNET3_CAP_VERSION_7_MAX
+#define VMXNET3_OFFLOAD_TSO BIT(0)
+#define VMXNET3_OFFLOAD_LRO BIT(1)
+
#endif /* _VMXNET3_DEFS_H_ */
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0578864792b6..b70654c7ad34 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -143,6 +143,32 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
}
+static u64
+vmxnet3_get_cycles(int pmc)
+{
+#ifdef CONFIG_X86
+ return native_read_pmc(pmc);
+#else
+ return 0;
+#endif
+}
+
+static bool
+vmxnet3_apply_timestamp(struct vmxnet3_tx_queue *tq, u16 rate)
+{
+#ifdef CONFIG_X86
+ if (rate > 0) {
+ if (tq->tsPktCount == 1) {
+ if (rate != 1)
+ tq->tsPktCount = rate;
+ return true;
+ }
+ tq->tsPktCount--;
+ }
+#endif
+ return false;
+}
+
/* Check if capability is supported by UPT device or
* UPT is even requested
*/
@@ -498,6 +524,12 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
tq->data_ring.base, tq->data_ring.basePA);
tq->data_ring.base = NULL;
}
+ if (tq->ts_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ tq->tx_ring.size * tq->tx_ts_desc_size,
+ tq->ts_ring.base, tq->ts_ring.basePA);
+ tq->ts_ring.base = NULL;
+ }
if (tq->comp_ring.base) {
dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc),
@@ -535,6 +567,10 @@ vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
memset(tq->data_ring.base, 0,
tq->data_ring.size * tq->txdata_desc_size);
+ if (tq->ts_ring.base)
+ memset(tq->ts_ring.base, 0,
+ tq->tx_ring.size * tq->tx_ts_desc_size);
+
/* reset the tx comp ring contents to 0 and reset comp ring states */
memset(tq->comp_ring.base, 0, tq->comp_ring.size *
sizeof(struct Vmxnet3_TxCompDesc));
@@ -573,6 +609,18 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
goto err;
}
+ if (tq->tx_ts_desc_size != 0) {
+ tq->ts_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
+ tq->tx_ring.size * tq->tx_ts_desc_size,
+ &tq->ts_ring.basePA, GFP_KERNEL);
+ if (!tq->ts_ring.base) {
+ netdev_err(adapter->netdev, "failed to allocate tx ts ring\n");
+ tq->tx_ts_desc_size = 0;
+ }
+ } else {
+ tq->ts_ring.base = NULL;
+ }
+
tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
&tq->comp_ring.basePA, GFP_KERNEL);
@@ -861,6 +909,11 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
/* set the last buf_info for the pkt */
tbi->skb = skb;
tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
+ if (tq->tx_ts_desc_size != 0) {
+ ctx->ts_txd = (struct Vmxnet3_TxTSDesc *)((u8 *)tq->ts_ring.base +
+ tbi->sop_idx * tq->tx_ts_desc_size);
+ ctx->ts_txd->ts.tsi = 0;
+ }
return 0;
}
@@ -968,7 +1021,7 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
skb_headlen(skb));
}
- if (skb->len <= VMXNET3_HDR_COPY_SIZE)
+ if (skb->len <= tq->txdata_desc_size)
ctx->copy_size = skb->len;
/* make sure headers are accessible directly */
@@ -1259,6 +1312,14 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
gdesc->txd.tci = skb_vlan_tag_get(skb);
}
+ if (tq->tx_ts_desc_size != 0 &&
+ adapter->latencyConf->sampleRate != 0) {
+ if (vmxnet3_apply_timestamp(tq, adapter->latencyConf->sampleRate)) {
+ ctx.ts_txd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC);
+ ctx.ts_txd->ts.tsi = 1;
+ }
+ }
+
/* Ensure that the write to (&gdesc->txd)->gen will be observed after
* all other writes to &gdesc->txd.
*/
@@ -1608,6 +1669,15 @@ skip_xdp:
skip_page_frags = false;
ctx->skb = rbi->skb;
+ if (rq->rx_ts_desc_size != 0 && rcd->ext2) {
+ struct Vmxnet3_RxTSDesc *ts_rxd;
+
+ ts_rxd = (struct Vmxnet3_RxTSDesc *)((u8 *)rq->ts_ring.base +
+ idx * rq->rx_ts_desc_size);
+ ts_rxd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC);
+ ts_rxd->ts.tsi = 1;
+ }
+
rxDataRingUsed =
VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
len = rxDataRingUsed ? rcd->len : rbi->len;
@@ -2007,6 +2077,13 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
rq->data_ring.base = NULL;
}
+ if (rq->ts_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ rq->rx_ring[0].size * rq->rx_ts_desc_size,
+ rq->ts_ring.base, rq->ts_ring.basePA);
+ rq->ts_ring.base = NULL;
+ }
+
if (rq->comp_ring.base) {
dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
* sizeof(struct Vmxnet3_RxCompDesc),
@@ -2034,8 +2111,8 @@ vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
rq->data_ring.base,
rq->data_ring.basePA);
rq->data_ring.base = NULL;
- rq->data_ring.desc_size = 0;
}
+ rq->data_ring.desc_size = 0;
}
}
@@ -2090,6 +2167,10 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
}
vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
+ if (rq->ts_ring.base)
+ memset(rq->ts_ring.base, 0,
+ rq->rx_ring[0].size * rq->rx_ts_desc_size);
+
/* reset the comp ring */
rq->comp_ring.next2proc = 0;
memset(rq->comp_ring.base, 0, rq->comp_ring.size *
@@ -2160,6 +2241,21 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
rq->data_ring.desc_size = 0;
}
+ if (rq->rx_ts_desc_size != 0) {
+ sz = rq->rx_ring[0].size * rq->rx_ts_desc_size;
+ rq->ts_ring.base =
+ dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &rq->ts_ring.basePA,
+ GFP_KERNEL);
+ if (!rq->ts_ring.base) {
+ netdev_err(adapter->netdev,
+ "rx ts ring will be disabled\n");
+ rq->rx_ts_desc_size = 0;
+ }
+ } else {
+ rq->ts_ring.base = NULL;
+ }
+
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
&rq->comp_ring.basePA,
@@ -2759,6 +2855,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt;
struct Vmxnet3_TxQueueConf *tqc;
struct Vmxnet3_RxQueueConf *rqc;
+ struct Vmxnet3_TxQueueTSConf *tqtsc;
+ struct Vmxnet3_RxQueueTSConf *rqtsc;
int i;
memset(shared, 0, sizeof(*shared));
@@ -2815,6 +2913,11 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
tqc->ddLen = cpu_to_le32(0);
tqc->intrIdx = tq->comp_ring.intr_idx;
+ if (VMXNET3_VERSION_GE_9(adapter)) {
+ tqtsc = &adapter->tqd_start[i].tsConf;
+ tqtsc->txTSRingBasePA = cpu_to_le64(tq->ts_ring.basePA);
+ tqtsc->txTSRingDescSize = cpu_to_le16(tq->tx_ts_desc_size);
+ }
}
/* rx queue settings */
@@ -2837,6 +2940,11 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rqc->rxDataRingDescSize =
cpu_to_le16(rq->data_ring.desc_size);
}
+ if (VMXNET3_VERSION_GE_9(adapter)) {
+ rqtsc = &adapter->rqd_start[i].tsConf;
+ rqtsc->rxTSRingBasePA = cpu_to_le64(rq->ts_ring.basePA);
+ rqtsc->rxTSRingDescSize = cpu_to_le16(rq->rx_ts_desc_size);
+ }
}
#ifdef VMXNET3_RSS
@@ -3299,6 +3407,8 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
tq->stopped = true;
tq->adapter = adapter;
tq->qid = i;
+ tq->tx_ts_desc_size = adapter->tx_ts_desc_size;
+ tq->tsPktCount = 1;
err = vmxnet3_tq_create(tq, adapter);
/*
* Too late to change num_tx_queues. We cannot do away with
@@ -3320,6 +3430,7 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
rq->shared = &adapter->rqd_start[i].ctrl;
rq->adapter = adapter;
rq->data_ring.desc_size = rxdata_desc_size;
+ rq->rx_ts_desc_size = adapter->rx_ts_desc_size;
err = vmxnet3_rq_create(rq, adapter);
if (err) {
if (i == 0) {
@@ -3361,14 +3472,15 @@ vmxnet3_open(struct net_device *netdev)
if (VMXNET3_VERSION_GE_3(adapter)) {
unsigned long flags;
u16 txdata_desc_size;
+ u32 ret;
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
- txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
- VMXNET3_REG_CMD);
+ ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ txdata_desc_size = ret & 0xffff;
if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) ||
(txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) ||
(txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) {
@@ -3377,10 +3489,40 @@ vmxnet3_open(struct net_device *netdev)
} else {
adapter->txdata_desc_size = txdata_desc_size;
}
+ if (VMXNET3_VERSION_GE_9(adapter))
+ adapter->rxdata_desc_size = (ret >> 16) & 0xffff;
} else {
adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
}
+ if (VMXNET3_VERSION_GE_9(adapter)) {
+ unsigned long flags;
+ u16 tx_ts_desc_size = 0;
+ u16 rx_ts_desc_size = 0;
+ u32 ret;
+
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_TSRING_DESC_SIZE);
+ ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ if (ret > 0) {
+ tx_ts_desc_size = (ret & 0xff);
+ rx_ts_desc_size = ((ret >> 16) & 0xff);
+ }
+ if (tx_ts_desc_size > VMXNET3_TXTS_DESC_MAX_SIZE ||
+ tx_ts_desc_size & VMXNET3_TXTS_DESC_SIZE_MASK)
+ tx_ts_desc_size = 0;
+ if (rx_ts_desc_size > VMXNET3_RXTS_DESC_MAX_SIZE ||
+ rx_ts_desc_size & VMXNET3_RXTS_DESC_SIZE_MASK)
+ rx_ts_desc_size = 0;
+ adapter->tx_ts_desc_size = tx_ts_desc_size;
+ adapter->rx_ts_desc_size = rx_ts_desc_size;
+ } else {
+ adapter->tx_ts_desc_size = 0;
+ adapter->rx_ts_desc_size = 0;
+ }
+
err = vmxnet3_create_queues(adapter,
adapter->tx_ring_size,
adapter->rx_ring_size,
@@ -3457,7 +3599,7 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
int err = 0;
- netdev->mtu = new_mtu;
+ WRITE_ONCE(netdev->mtu, new_mtu);
/*
* Reset_work may be in the middle of resetting the device, wait for its
@@ -3503,6 +3645,15 @@ static void
vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ unsigned long flags;
+
+ if (VMXNET3_VERSION_GE_9(adapter)) {
+ spin_lock_irqsave(&adapter->cmd_lock, flags);
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+ VMXNET3_CMD_GET_DISABLED_OFFLOADS);
+ adapter->disabledOffloads = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
+ spin_unlock_irqrestore(&adapter->cmd_lock, flags);
+ }
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
@@ -3520,6 +3671,16 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
+ if (adapter->disabledOffloads & VMXNET3_OFFLOAD_TSO) {
+ netdev->hw_features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ netdev->hw_enc_features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ }
+
+ if (adapter->disabledOffloads & VMXNET3_OFFLOAD_LRO) {
+ netdev->hw_features &= ~(NETIF_F_LRO);
+ netdev->hw_enc_features &= ~(NETIF_F_LRO);
+ }
+
if (VMXNET3_VERSION_GE_7(adapter)) {
unsigned long flags;
@@ -3790,7 +3951,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
struct net_device *netdev;
struct vmxnet3_adapter *adapter;
u8 mac[ETH_ALEN];
- int size;
+ int size, i;
int num_tx_queues;
int num_rx_queues;
int queues;
@@ -3857,42 +4018,14 @@ vmxnet3_probe_device(struct pci_dev *pdev,
goto err_alloc_pci;
ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
- if (ver & (1 << VMXNET3_REV_7)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_7);
- adapter->version = VMXNET3_REV_7 + 1;
- } else if (ver & (1 << VMXNET3_REV_6)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_6);
- adapter->version = VMXNET3_REV_6 + 1;
- } else if (ver & (1 << VMXNET3_REV_5)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_5);
- adapter->version = VMXNET3_REV_5 + 1;
- } else if (ver & (1 << VMXNET3_REV_4)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_4);
- adapter->version = VMXNET3_REV_4 + 1;
- } else if (ver & (1 << VMXNET3_REV_3)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_3);
- adapter->version = VMXNET3_REV_3 + 1;
- } else if (ver & (1 << VMXNET3_REV_2)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_2);
- adapter->version = VMXNET3_REV_2 + 1;
- } else if (ver & (1 << VMXNET3_REV_1)) {
- VMXNET3_WRITE_BAR1_REG(adapter,
- VMXNET3_REG_VRRS,
- 1 << VMXNET3_REV_1);
- adapter->version = VMXNET3_REV_1 + 1;
- } else {
+ for (i = VMXNET3_REV_9; i >= VMXNET3_REV_1; i--) {
+ if (ver & (1 << i)) {
+ VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1 << i);
+ adapter->version = i + 1;
+ break;
+ }
+ }
+ if (i < VMXNET3_REV_1) {
dev_err(&pdev->dev,
"Incompatible h/w version (0x%x) for adapter\n", ver);
err = -EBUSY;
@@ -3992,6 +4125,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
}
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
adapter->num_tx_queues);
+ if (VMXNET3_VERSION_GE_9(adapter))
+ adapter->latencyConf = &adapter->tqd_start->tsConf.latencyConf;
adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
sizeof(struct Vmxnet3_PMConf),
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 7e8008d5378a..471f91c4204a 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 915aaf18c409..9f24d66dbb27 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -72,18 +72,20 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.7.0.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.9.0.0-k"
/* Each byte of this 32-bit integer encodes a version number in
* VMXNET3_DRIVER_VERSION_STRING.
*/
-#define VMXNET3_DRIVER_VERSION_NUM 0x01070000
+#define VMXNET3_DRIVER_VERSION_NUM 0x01090000
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
#define VMXNET3_RSS
#endif
+#define VMXNET3_REV_9 8 /* Vmxnet3 Rev. 9 */
+#define VMXNET3_REV_8 7 /* Vmxnet3 Rev. 8 */
#define VMXNET3_REV_7 6 /* Vmxnet3 Rev. 7 */
#define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */
#define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */
@@ -191,6 +193,11 @@ struct vmxnet3_tx_data_ring {
dma_addr_t basePA;
};
+struct vmxnet3_tx_ts_ring {
+ struct Vmxnet3_TxTSDesc *base;
+ dma_addr_t basePA;
+};
+
#define VMXNET3_MAP_NONE 0
#define VMXNET3_MAP_SINGLE BIT(0)
#define VMXNET3_MAP_PAGE BIT(1)
@@ -243,6 +250,7 @@ struct vmxnet3_tx_ctx {
u32 copy_size; /* # of bytes copied into the data ring */
union Vmxnet3_GenericDesc *sop_txd;
union Vmxnet3_GenericDesc *eop_txd;
+ struct Vmxnet3_TxTSDesc *ts_txd;
};
struct vmxnet3_tx_queue {
@@ -252,6 +260,7 @@ struct vmxnet3_tx_queue {
struct vmxnet3_cmd_ring tx_ring;
struct vmxnet3_tx_buf_info *buf_info;
struct vmxnet3_tx_data_ring data_ring;
+ struct vmxnet3_tx_ts_ring ts_ring;
struct vmxnet3_comp_ring comp_ring;
struct Vmxnet3_TxQueueCtrl *shared;
struct vmxnet3_tq_driver_stats stats;
@@ -260,6 +269,8 @@ struct vmxnet3_tx_queue {
* stopped */
int qid;
u16 txdata_desc_size;
+ u16 tx_ts_desc_size;
+ u16 tsPktCount;
} ____cacheline_aligned;
enum vmxnet3_rx_buf_type {
@@ -307,6 +318,11 @@ struct vmxnet3_rx_data_ring {
u16 desc_size;
};
+struct vmxnet3_rx_ts_ring {
+ struct Vmxnet3_RxTSDesc *base;
+ dma_addr_t basePA;
+};
+
struct vmxnet3_rx_queue {
char name[IFNAMSIZ + 8]; /* To identify interrupt */
struct vmxnet3_adapter *adapter;
@@ -314,6 +330,7 @@ struct vmxnet3_rx_queue {
struct vmxnet3_cmd_ring rx_ring[2];
struct vmxnet3_rx_data_ring data_ring;
struct vmxnet3_comp_ring comp_ring;
+ struct vmxnet3_rx_ts_ring ts_ring;
struct vmxnet3_rx_ctx rx_ctx;
u32 qid; /* rqID in RCD for buffer from 1st ring */
u32 qid2; /* rqID in RCD for buffer from 2nd ring */
@@ -323,6 +340,7 @@ struct vmxnet3_rx_queue {
struct vmxnet3_rq_driver_stats stats;
struct page_pool *page_pool;
struct xdp_rxq_info xdp_rxq;
+ u16 rx_ts_desc_size;
} ____cacheline_aligned;
#define VMXNET3_DEVICE_MAX_TX_QUEUES 32
@@ -432,6 +450,11 @@ struct vmxnet3_adapter {
u16 rx_prod_offset;
u16 rx_prod2_offset;
struct bpf_prog __rcu *xdp_bpf_prog;
+ struct Vmxnet3_LatencyConf *latencyConf;
+ /* Size of buffer in the ts ring */
+ u16 tx_ts_desc_size;
+ u16 rx_ts_desc_size;
+ u32 disabledOffloads;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -463,6 +486,10 @@ struct vmxnet3_adapter {
(adapter->version >= VMXNET3_REV_6 + 1)
#define VMXNET3_VERSION_GE_7(adapter) \
(adapter->version >= VMXNET3_REV_7 + 1)
+#define VMXNET3_VERSION_GE_8(adapter) \
+ (adapter->version >= VMXNET3_REV_8 + 1)
+#define VMXNET3_VERSION_GE_9(adapter) \
+ (adapter->version >= VMXNET3_REV_9 + 1)
/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
#define VMXNET3_DEF_TX_RING_SIZE 512
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index bb95ce43cd97..040f0bb36c0e 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -126,8 +126,8 @@ static void vrf_rx_stats(struct net_device *dev, int len)
struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
u64_stats_update_begin(&dstats->syncp);
- dstats->rx_packets++;
- dstats->rx_bytes += len;
+ u64_stats_inc(&dstats->rx_packets);
+ u64_stats_add(&dstats->rx_bytes, len);
u64_stats_update_end(&dstats->syncp);
}
@@ -137,33 +137,6 @@ static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
kfree_skb(skb);
}
-static void vrf_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
-{
- int i;
-
- for_each_possible_cpu(i) {
- const struct pcpu_dstats *dstats;
- u64 tbytes, tpkts, tdrops, rbytes, rpkts;
- unsigned int start;
-
- dstats = per_cpu_ptr(dev->dstats, i);
- do {
- start = u64_stats_fetch_begin(&dstats->syncp);
- tbytes = dstats->tx_bytes;
- tpkts = dstats->tx_packets;
- tdrops = dstats->tx_drops;
- rbytes = dstats->rx_bytes;
- rpkts = dstats->rx_packets;
- } while (u64_stats_fetch_retry(&dstats->syncp, start));
- stats->tx_bytes += tbytes;
- stats->tx_packets += tpkts;
- stats->tx_dropped += tdrops;
- stats->rx_bytes += rbytes;
- stats->rx_packets += rpkts;
- }
-}
-
static struct vrf_map *netns_vrf_map(struct net *net)
{
struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
@@ -408,10 +381,15 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
skb->protocol = eth_type_trans(skb, dev);
- if (likely(__netif_rx(skb) == NET_RX_SUCCESS))
+ if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) {
vrf_rx_stats(dev, len);
- else
- this_cpu_inc(dev->dstats->rx_drops);
+ } else {
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
+ u64_stats_update_begin(&dstats->syncp);
+ u64_stats_inc(&dstats->rx_drops);
+ u64_stats_update_end(&dstats->syncp);
+ }
return NETDEV_TX_OK;
}
@@ -599,19 +577,20 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
+
int len = skb->len;
netdev_tx_t ret = is_ip_tx_frame(skb, dev);
+ u64_stats_update_begin(&dstats->syncp);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
- struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
- u64_stats_update_begin(&dstats->syncp);
- dstats->tx_packets++;
- dstats->tx_bytes += len;
- u64_stats_update_end(&dstats->syncp);
+ u64_stats_inc(&dstats->tx_packets);
+ u64_stats_add(&dstats->tx_bytes, len);
} else {
- this_cpu_inc(dev->dstats->tx_drops);
+ u64_stats_inc(&dstats->tx_drops);
}
+ u64_stats_update_end(&dstats->syncp);
return ret;
}
@@ -653,7 +632,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
skb->dev = dev;
rcu_read_lock();
- nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
+ nexthop = rt6_nexthop(dst_rt6_info(dst), &ipv6_hdr(skb)->daddr);
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
@@ -860,7 +839,7 @@ static int vrf_rt6_create(struct net_device *dev)
static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
- struct rtable *rt = (struct rtable *)dst;
+ struct rtable *rt = dst_rtable(dst);
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
@@ -1195,7 +1174,6 @@ static const struct net_device_ops vrf_netdev_ops = {
.ndo_uninit = vrf_dev_uninit,
.ndo_start_xmit = vrf_xmit,
.ndo_set_mac_address = eth_mac_addr,
- .ndo_get_stats64 = vrf_get_stats64,
.ndo_add_slave = vrf_add_slave,
.ndo_del_slave = vrf_del_slave,
};
@@ -1908,7 +1886,7 @@ unlock:
return res;
}
-static int vrf_shared_table_handler(struct ctl_table *table, int write,
+static int vrf_shared_table_handler(const struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
struct net *net = (struct net *)table->extra1;
@@ -1971,7 +1949,7 @@ static int vrf_netns_init_sysctl(struct net *net, struct netns_vrf *nn_vrf)
static void vrf_netns_exit_sysctl(struct net *net)
{
struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id);
- struct ctl_table *table;
+ const struct ctl_table *table;
table = nn_vrf->ctl_hdr->ctl_table_arg;
unregister_net_sysctl_table(nn_vrf->ctl_hdr);
diff --git a/drivers/net/vsockmon.c b/drivers/net/vsockmon.c
index a1ba5169ed5d..4c260074c091 100644
--- a/drivers/net/vsockmon.c
+++ b/drivers/net/vsockmon.c
@@ -58,7 +58,7 @@ static int vsockmon_change_mtu(struct net_device *dev, int new_mtu)
if (!vsockmon_is_valid_mtu(new_mtu))
return -EINVAL;
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index 3a9148fb1422..ba59e92ab941 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -1446,6 +1446,10 @@ static bool vxlan_snoop(struct net_device *dev,
struct vxlan_fdb *f;
u32 ifindex = 0;
+ /* Ignore packets from invalid src-address */
+ if (!is_valid_ether_addr(src_mac))
+ return true;
+
#if IS_ENABLED(CONFIG_IPV6)
if (src_ip->sa.sa_family == AF_INET6 &&
(ipv6_addr_type(&src_ip->sin6.sin6_addr) & IPV6_ADDR_LINKLOCAL))
@@ -1584,7 +1588,8 @@ static void vxlan_parse_gbp_hdr(struct vxlanhdr *unparsed,
tun_dst = (struct metadata_dst *)skb_dst(skb);
if (tun_dst) {
- tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
+ __set_bit(IP_TUNNEL_VXLAN_OPT_BIT,
+ tun_dst->u.tun_info.key.tun_flags);
tun_dst->u.tun_info.options_len = sizeof(*md);
}
if (gbp->dont_learn)
@@ -1615,10 +1620,6 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
- /* Ignore packets from invalid src-address */
- if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
- return false;
-
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
@@ -1721,9 +1722,11 @@ static int vxlan_rcv(struct sock *sk, struct sk_buff *skb)
goto drop;
if (vxlan_collect_metadata(vs)) {
+ IP_TUNNEL_DECLARE_FLAGS(flags) = { };
struct metadata_dst *tun_dst;
- tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), TUNNEL_KEY,
+ __set_bit(IP_TUNNEL_KEY_BIT, flags);
+ tun_dst = udp_tun_rx_dst(skb, vxlan_get_sk_family(vs), flags,
key32_to_tunnel_id(vni), sizeof(*md));
if (!tun_dst)
@@ -2336,7 +2339,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct ip_tunnel_key *pkey;
struct ip_tunnel_key key;
struct vxlan_dev *vxlan = netdev_priv(dev);
- const struct iphdr *old_iph = ip_hdr(skb);
+ const struct iphdr *old_iph;
struct vxlan_metadata _md;
struct vxlan_metadata *md = &_md;
unsigned int pkt_len = skb->len;
@@ -2350,8 +2353,15 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
bool use_cache;
bool udp_sum = false;
bool xnet = !net_eq(vxlan->net, dev_net(vxlan->dev));
+ bool no_eth_encap;
__be32 vni = 0;
+ no_eth_encap = flags & VXLAN_F_GPE && skb->protocol != htons(ETH_P_TEB);
+ if (!skb_vlan_inet_prepare(skb, no_eth_encap))
+ goto drop;
+
+ old_iph = ip_hdr(skb);
+
info = skb_tunnel_info(skb);
use_cache = ip_tunnel_dst_cache_usable(skb, info);
@@ -2426,14 +2436,14 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
vni = tunnel_id_to_key32(info->key.tun_id);
ifindex = 0;
dst_cache = &info->dst_cache;
- if (info->key.tun_flags & TUNNEL_VXLAN_OPT) {
+ if (test_bit(IP_TUNNEL_VXLAN_OPT_BIT, info->key.tun_flags)) {
if (info->options_len < sizeof(*md))
goto drop;
md = ip_tunnel_info_opts(info);
}
ttl = info->key.ttl;
tos = info->key.tos;
- udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
+ udp_sum = test_bit(IP_TUNNEL_CSUM_BIT, info->key.tun_flags);
}
src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->cfg.port_min,
vxlan->cfg.port_max, true);
@@ -2474,7 +2484,8 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
old_iph->frag_off & htons(IP_DF)))
df = htons(IP_DF);
}
- } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) {
+ } else if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT,
+ info->key.tun_flags)) {
df = htons(IP_DF);
}
@@ -2528,7 +2539,7 @@ void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
if (!info) {
- u32 rt6i_flags = ((struct rt6_info *)ndst)->rt6i_flags;
+ u32 rt6i_flags = dst_rt6_info(ndst)->rt6i_flags;
err = encap_bypass_if_local(skb, dev, vxlan, AF_INET6,
dst_port, ifindex, vni,
@@ -3177,7 +3188,7 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
- dev->mtu = new_mtu;
+ WRITE_ONCE(dev->mtu, new_mtu);
return 0;
}
@@ -4565,7 +4576,7 @@ static struct net *vxlan_get_link_net(const struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- return vxlan->net;
+ return READ_ONCE(vxlan->net);
}
static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
index 31ab2136cdf1..67be9857c86c 100644
--- a/drivers/net/wan/Kconfig
+++ b/drivers/net/wan/Kconfig
@@ -180,7 +180,7 @@ config C101
config FARSYNC
tristate "FarSync T-Series support"
- depends on HDLC && PCI
+ depends on HDLC && PCI && HAS_IOPORT
help
Support for the FarSync T-Series X.21 (and V.35/V.24) cards by
FarSite Communications Ltd.
diff --git a/drivers/net/wan/fsl_qmc_hdlc.c b/drivers/net/wan/fsl_qmc_hdlc.c
index f69b1f579a0c..8fcfbde31a1c 100644
--- a/drivers/net/wan/fsl_qmc_hdlc.c
+++ b/drivers/net/wan/fsl_qmc_hdlc.c
@@ -18,6 +18,7 @@
#include <linux/hdlc.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
@@ -37,7 +38,7 @@ struct qmc_hdlc {
struct qmc_chan *qmc_chan;
struct net_device *netdev;
struct framer *framer;
- spinlock_t carrier_lock; /* Protect carrier detection */
+ struct mutex carrier_lock; /* Protect carrier detection */
struct notifier_block nb;
bool is_crc32;
spinlock_t tx_lock; /* Protect tx descriptors */
@@ -60,7 +61,7 @@ static int qmc_hdlc_framer_set_carrier(struct qmc_hdlc *qmc_hdlc)
if (!qmc_hdlc->framer)
return 0;
- guard(spinlock_irqsave)(&qmc_hdlc->carrier_lock);
+ guard(mutex)(&qmc_hdlc->carrier_lock);
ret = framer_get_status(qmc_hdlc->framer, &framer_status);
if (ret) {
@@ -249,6 +250,7 @@ static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int fl
struct qmc_hdlc_desc *desc = context;
struct net_device *netdev;
struct qmc_hdlc *qmc_hdlc;
+ size_t crc_size;
int ret;
netdev = desc->netdev;
@@ -267,15 +269,26 @@ static void qmc_hcld_recv_complete(void *context, size_t length, unsigned int fl
if (flags & QMC_RX_FLAG_HDLC_CRC) /* CRC error */
netdev->stats.rx_crc_errors++;
kfree_skb(desc->skb);
- } else {
- netdev->stats.rx_packets++;
- netdev->stats.rx_bytes += length;
+ goto re_queue;
+ }
- skb_put(desc->skb, length);
- desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
- netif_rx(desc->skb);
+ /* Discard the CRC */
+ crc_size = qmc_hdlc->is_crc32 ? 4 : 2;
+ if (length < crc_size) {
+ netdev->stats.rx_length_errors++;
+ kfree_skb(desc->skb);
+ goto re_queue;
}
+ length -= crc_size;
+
+ netdev->stats.rx_packets++;
+ netdev->stats.rx_bytes += length;
+ skb_put(desc->skb, length);
+ desc->skb->protocol = hdlc_type_trans(desc->skb, netdev);
+ netif_rx(desc->skb);
+
+re_queue:
/* Re-queue a transfer using the same descriptor */
ret = qmc_hdlc_recv_queue(qmc_hdlc, desc, desc->dma_size);
if (ret) {
@@ -706,7 +719,7 @@ static int qmc_hdlc_probe(struct platform_device *pdev)
qmc_hdlc->dev = dev;
spin_lock_init(&qmc_hdlc->tx_lock);
- spin_lock_init(&qmc_hdlc->carrier_lock);
+ mutex_init(&qmc_hdlc->carrier_lock);
qmc_hdlc->qmc_chan = devm_qmc_chan_get_bychild(dev, dev->of_node);
if (IS_ERR(qmc_hdlc->qmc_chan))
@@ -765,15 +778,13 @@ framer_exit:
return ret;
}
-static int qmc_hdlc_remove(struct platform_device *pdev)
+static void qmc_hdlc_remove(struct platform_device *pdev)
{
struct qmc_hdlc *qmc_hdlc = platform_get_drvdata(pdev);
unregister_hdlc_device(qmc_hdlc->netdev);
free_netdev(qmc_hdlc->netdev);
qmc_hdlc_framer_exit(qmc_hdlc);
-
- return 0;
}
static const struct of_device_id qmc_hdlc_id_table[] = {
@@ -788,7 +799,7 @@ static struct platform_driver qmc_hdlc_driver = {
.of_match_table = qmc_hdlc_id_table,
},
.probe = qmc_hdlc_probe,
- .remove = qmc_hdlc_remove,
+ .remove_new = qmc_hdlc_remove,
};
module_platform_driver(qmc_hdlc_driver);
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 0ba714ca5185..4b8528206cc8 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -15,8 +15,8 @@ static void swap_endian(u8 *dst, const u8 *src, u8 bits)
if (bits == 32) {
*(u32 *)dst = be32_to_cpu(*(const __be32 *)src);
} else if (bits == 128) {
- ((u64 *)dst)[0] = be64_to_cpu(((const __be64 *)src)[0]);
- ((u64 *)dst)[1] = be64_to_cpu(((const __be64 *)src)[1]);
+ ((u64 *)dst)[0] = get_unaligned_be64(src);
+ ((u64 *)dst)[1] = get_unaligned_be64(src + 8);
}
}
diff --git a/drivers/net/wireguard/main.c b/drivers/net/wireguard/main.c
index ee4da9ab8013..a00671b58701 100644
--- a/drivers/net/wireguard/main.c
+++ b/drivers/net/wireguard/main.c
@@ -14,7 +14,7 @@
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/genetlink.h>
+#include <net/genetlink.h>
#include <net/rtnetlink.h>
static int __init wg_mod_init(void)
diff --git a/drivers/net/wireguard/queueing.h b/drivers/net/wireguard/queueing.h
index 1ea4f874e367..7eb76724b3ed 100644
--- a/drivers/net/wireguard/queueing.h
+++ b/drivers/net/wireguard/queueing.h
@@ -124,10 +124,10 @@ static inline int wg_cpumask_choose_online(int *stored_cpu, unsigned int id)
*/
static inline int wg_cpumask_next_online(int *last_cpu)
{
- int cpu = cpumask_next(*last_cpu, cpu_online_mask);
+ int cpu = cpumask_next(READ_ONCE(*last_cpu), cpu_online_mask);
if (cpu >= nr_cpu_ids)
cpu = cpumask_first(cpu_online_mask);
- *last_cpu = cpu;
+ WRITE_ONCE(*last_cpu, cpu);
return cpu;
}
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index 0d48e0f4a1ba..26e09c30d596 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -222,7 +222,7 @@ void wg_packet_send_keepalive(struct wg_peer *peer)
{
struct sk_buff *skb;
- if (skb_queue_empty(&peer->staged_packet_queue)) {
+ if (skb_queue_empty_lockless(&peer->staged_packet_queue)) {
skb = alloc_skb(DATA_PACKET_HEAD_ROOM + MESSAGE_MINIMUM_LENGTH,
GFP_ATOMIC);
if (unlikely(!skb))
diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
index e3fd48dd3909..a2d87c3ad196 100644
--- a/drivers/net/wireless/admtek/adm8211.c
+++ b/drivers/net/wireless/admtek/adm8211.c
@@ -1550,7 +1550,7 @@ fail:
return retval;
}
-static void adm8211_stop(struct ieee80211_hw *dev)
+static void adm8211_stop(struct ieee80211_hw *dev, bool suspend)
{
struct adm8211_priv *priv = dev->priv;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 815f8f599f5d..156f3650c006 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1061,7 +1061,7 @@ err:
return error;
}
-static void ar5523_stop(struct ieee80211_hw *hw)
+static void ar5523_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ar5523 *ar = hw->priv;
@@ -1594,6 +1594,20 @@ static int ar5523_probe(struct usb_interface *intf,
struct ar5523 *ar;
int error = -ENOMEM;
+ static const u8 bulk_ep_addr[] = {
+ AR5523_CMD_TX_PIPE | USB_DIR_OUT,
+ AR5523_DATA_TX_PIPE | USB_DIR_OUT,
+ AR5523_CMD_RX_PIPE | USB_DIR_IN,
+ AR5523_DATA_RX_PIPE | USB_DIR_IN,
+ 0};
+
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr)) {
+ dev_err(&dev->dev,
+ "Could not find all expected endpoints\n");
+ error = -ENODEV;
+ goto out;
+ }
+
/*
* Load firmware if the device requires it. This will return
* -ENXIO on success and we'll get called back afer the usb
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index f02a308a9ffc..34654f710d8a 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -171,8 +171,10 @@ struct ath_common {
unsigned int clockrate;
spinlock_t cc_lock;
- struct ath_cycle_counters cc_ani;
- struct ath_cycle_counters cc_survey;
+ struct_group(cc,
+ struct ath_cycle_counters cc_ani;
+ struct ath_cycle_counters cc_survey;
+ );
struct ath_regulatory regulatory;
struct ath_regulatory reg_world_copy;
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index e6ea884cafc1..876aed765833 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -45,6 +45,7 @@ config ATH10K_SNOC
depends on ATH10K
depends on ARCH_QCOM || COMPILE_TEST
depends on QCOM_SMEM
+ depends on QCOM_RPROC_COMMON || QCOM_RPROC_COMMON=n
select QCOM_SCM
select QCOM_QMI_HELPERS
help
@@ -67,6 +68,12 @@ config ATH10K_DEBUGFS
If unsure, say Y to make it easier to debug problems.
+config ATH10K_LEDS
+ bool
+ depends on ATH10K
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
+ default y
+
config ATH10K_SPECTRAL
bool "Atheros ath10k spectral scan support"
depends on ATH10K_DEBUGFS
diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile
index 142c777b287f..02bf9b629038 100644
--- a/drivers/net/wireless/ath/ath10k/Makefile
+++ b/drivers/net/wireless/ath/ath10k/Makefile
@@ -19,6 +19,7 @@ ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o
+ath10k_core-$(CONFIG_ATH10K_LEDS) += leds.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
ath10k_core-$(CONFIG_PM) += wow.o
ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o
diff --git a/drivers/net/wireless/ath/ath10k/ahb.c b/drivers/net/wireless/ath/ath10k/ahb.c
index a378bc48b1d2..f0441b3d7dcb 100644
--- a/drivers/net/wireless/ath/ath10k/ahb.c
+++ b/drivers/net/wireless/ath/ath10k/ahb.c
@@ -394,14 +394,14 @@ static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
if (!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
- ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_disable_and_clear_intx_irq(ar);
ath10k_pci_irq_msi_fw_mask(ar);
napi_schedule(&ar->napi);
return IRQ_HANDLED;
}
-static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
+static int ath10k_ahb_request_irq_intx(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
@@ -415,12 +415,12 @@ static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
ar_ahb->irq, ret);
return ret;
}
- ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_INTX;
return 0;
}
-static void ath10k_ahb_release_irq_legacy(struct ath10k *ar)
+static void ath10k_ahb_release_irq_intx(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
@@ -430,7 +430,7 @@ static void ath10k_ahb_release_irq_legacy(struct ath10k *ar)
static void ath10k_ahb_irq_disable(struct ath10k *ar)
{
ath10k_ce_disable_interrupts(ar);
- ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_disable_and_clear_intx_irq(ar);
}
static int ath10k_ahb_resource_init(struct ath10k *ar)
@@ -621,7 +621,7 @@ static int ath10k_ahb_hif_start(struct ath10k *ar)
ath10k_core_napi_enable(ar);
ath10k_ce_enable_interrupts(ar);
- ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_enable_intx_irq(ar);
ath10k_pci_rx_post(ar);
@@ -775,7 +775,7 @@ static int ath10k_ahb_probe(struct platform_device *pdev)
ath10k_pci_init_napi(ar);
- ret = ath10k_ahb_request_irq_legacy(ar);
+ ret = ath10k_ahb_request_irq_intx(ar);
if (ret)
goto err_free_pipes;
@@ -806,7 +806,7 @@ err_halt_device:
ath10k_ahb_clock_disable(ar);
err_free_irq:
- ath10k_ahb_release_irq_legacy(ar);
+ ath10k_ahb_release_irq_intx(ar);
err_free_pipes:
ath10k_pci_release_resource(ar);
@@ -828,7 +828,7 @@ static void ath10k_ahb_remove(struct platform_device *pdev)
ath10k_core_unregister(ar);
ath10k_ahb_irq_disable(ar);
- ath10k_ahb_release_irq_legacy(ar);
+ ath10k_ahb_release_irq_intx(ar);
ath10k_pci_release_resource(ar);
ath10k_ahb_halt_chip(ar);
ath10k_ahb_clock_disable(ar);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 9ce6f49ab261..b3294287bce1 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -27,6 +27,7 @@
#include "testmode.h"
#include "wmi-ops.h"
#include "coredump.h"
+#include "leds.h"
unsigned int ath10k_debug_mask;
EXPORT_SYMBOL(ath10k_debug_mask);
@@ -68,6 +69,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca988x hw2.0",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 1,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
@@ -75,7 +77,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 2116,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
- .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
@@ -109,6 +110,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca988x hw2.0 ubiquiti",
.patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 0,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
@@ -116,7 +118,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 2116,
.fw = {
.dir = QCA988X_HW_2_0_FW_DIR,
- .board = QCA988X_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA988X_BOARD_DATA_SZ,
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
},
@@ -151,6 +152,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9887 hw1.0",
.patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 1,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
@@ -158,7 +160,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 2116,
.fw = {
.dir = QCA9887_HW_1_0_FW_DIR,
- .board = QCA9887_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9887_BOARD_DATA_SZ,
.board_ext_size = QCA9887_BOARD_EXT_DATA_SZ,
},
@@ -193,13 +194,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.2 sdio",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 19,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.cal_data_len = 0,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
- .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
@@ -230,13 +231,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6164 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
- .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
@@ -271,13 +272,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
- .board = QCA6174_HW_2_1_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
@@ -312,13 +313,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.0",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.cal_data_len = 8124,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
- .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
@@ -353,6 +354,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca6174 hw3.2",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
@@ -360,7 +362,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.fw = {
/* uses same binaries as hw3.0 */
.dir = QCA6174_HW_3_0_FW_DIR,
- .board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
@@ -398,6 +399,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca99x0 hw2.0",
.patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 17,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
.cck_rate_map_rev2 = true,
@@ -409,7 +411,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 12064,
.fw = {
.dir = QCA99X0_HW_2_0_FW_DIR,
- .board = QCA99X0_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
@@ -445,6 +446,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9984/qca9994 hw1.0",
.patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 17,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
@@ -457,8 +459,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 12064,
.fw = {
.dir = QCA9984_HW_1_0_FW_DIR,
- .board = QCA9984_HW_1_0_BOARD_DATA_FILE,
- .eboard = QCA9984_HW_1_0_EBOARD_DATA_FILE,
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
.ext_board_size = QCA99X0_EXT_BOARD_DATA_SZ,
@@ -499,6 +499,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9888 hw2.0",
.patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 17,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x00000700,
.continuous_frag_desc = true,
@@ -510,7 +511,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 12064,
.fw = {
.dir = QCA9888_HW_2_0_FW_DIR,
- .board = QCA9888_HW_2_0_BOARD_DATA_FILE,
.board_size = QCA99X0_BOARD_DATA_SZ,
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
@@ -550,13 +550,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9377 hw1.0",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.cal_data_len = 8124,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
- .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
@@ -591,13 +591,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9377 hw1.1",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.cal_data_len = 8124,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
- .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
@@ -634,13 +634,13 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca9377 hw1.1 sdio",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 19,
+ .led_pin = 0,
.otp_exe_param = 0,
.channel_counters_freq_hz = 88000,
.max_probe_resp_desc_thres = 0,
.cal_data_len = 8124,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
- .board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
@@ -668,6 +668,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.name = "qca4019 hw1.0",
.patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
+ .led_pin = 0,
.cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH,
.otp_exe_param = 0x0010000,
.continuous_frag_desc = true,
@@ -680,7 +681,6 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.cal_data_len = 12064,
.fw = {
.dir = QCA4019_HW_1_0_FW_DIR,
- .board = QCA4019_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA4019_BOARD_DATA_SZ,
.board_ext_size = QCA4019_BOARD_EXT_DATA_SZ,
},
@@ -714,12 +714,15 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.dev_id = 0,
.bus = ATH10K_BUS_SNOC,
.name = "wcn3990 hw1.0",
+ .led_pin = 0,
.continuous_frag_desc = true,
.tx_chain_mask = 0x7,
.rx_chain_mask = 0x7,
.max_spatial_stream = 4,
.fw = {
.dir = WCN3990_HW_1_0_FW_DIR,
+ .board_size = WCN3990_BOARD_DATA_SZ,
+ .board_ext_size = WCN3990_BOARD_EXT_DATA_SZ,
},
.sw_decrypt_mcast_mgmt = true,
.rx_desc_ops = &wcn3990_rx_desc_ops,
@@ -942,11 +945,20 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
if (dir == NULL)
dir = ".";
+ if (ar->board_name) {
+ snprintf(filename, sizeof(filename), "%s/%s/%s",
+ dir, ar->board_name, file);
+ ret = firmware_request_nowarn(&fw, filename, ar->dev);
+ ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n",
+ filename, ret);
+ if (!ret)
+ return fw;
+ }
+
snprintf(filename, sizeof(filename), "%s/%s", dir, file);
ret = firmware_request_nowarn(&fw, filename, ar->dev);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot fw request '%s': %d\n",
filename, ret);
-
if (ret)
return ERR_PTR(ret);
@@ -1288,11 +1300,6 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
char boardname[100];
if (bd_ie_type == ATH10K_BD_IE_BOARD) {
- if (!ar->hw_params.fw.board) {
- ath10k_err(ar, "failed to find board file fw entry\n");
- return -EINVAL;
- }
-
scnprintf(boardname, sizeof(boardname), "board-%s-%s.bin",
ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
@@ -1302,7 +1309,7 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
if (IS_ERR(ar->normal_mode_fw.board)) {
fw = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
+ ATH10K_BOARD_DATA_FILE);
ar->normal_mode_fw.board = fw;
}
@@ -1312,13 +1319,8 @@ static int ath10k_core_fetch_board_data_api_1(struct ath10k *ar, int bd_ie_type)
ar->normal_mode_fw.board_data = ar->normal_mode_fw.board->data;
ar->normal_mode_fw.board_len = ar->normal_mode_fw.board->size;
} else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
- if (!ar->hw_params.fw.eboard) {
- ath10k_err(ar, "failed to find eboard file fw entry\n");
- return -EINVAL;
- }
-
fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
- ar->hw_params.fw.eboard);
+ ATH10K_EBOARD_DATA_FILE);
ar->normal_mode_fw.ext_board = fw;
if (IS_ERR(ar->normal_mode_fw.ext_board))
return PTR_ERR(ar->normal_mode_fw.ext_board);
@@ -3239,6 +3241,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode,
goto err_hif_stop;
}
+ status = ath10k_leds_start(ar);
+ if (status)
+ goto err_hif_stop;
+
return 0;
err_hif_stop:
@@ -3497,9 +3503,18 @@ static void ath10k_core_register_work(struct work_struct *work)
goto err_spectral_destroy;
}
+ status = ath10k_leds_register(ar);
+ if (status) {
+ ath10k_err(ar, "could not register leds: %d\n",
+ status);
+ goto err_thermal_unregister;
+ }
+
set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags);
return;
+err_thermal_unregister:
+ ath10k_thermal_unregister(ar);
err_spectral_destroy:
ath10k_spectral_destroy(ar);
err_debug_destroy:
@@ -3535,6 +3550,8 @@ void ath10k_core_unregister(struct ath10k *ar)
if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
return;
+ ath10k_leds_unregister(ar);
+
ath10k_thermal_unregister(ar);
/* Stop spectral before unregistering from mac80211 to remove the
* relayfs debugfs file cleanly. Otherwise the parent debugfs tree
@@ -3673,11 +3690,13 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
INIT_WORK(&ar->set_coverage_class_work,
ath10k_core_set_coverage_class_work);
- init_dummy_netdev(&ar->napi_dev);
+ ar->napi_dev = alloc_netdev_dummy(0);
+ if (!ar->napi_dev)
+ goto err_free_tx_complete;
ret = ath10k_coredump_create(ar);
if (ret)
- goto err_free_tx_complete;
+ goto err_free_netdev;
ret = ath10k_debug_create(ar);
if (ret)
@@ -3687,6 +3706,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
err_free_coredump:
ath10k_coredump_destroy(ar);
+err_free_netdev:
+ free_netdev(ar->napi_dev);
err_free_tx_complete:
destroy_workqueue(ar->workqueue_tx_complete);
err_free_aux_wq:
@@ -3708,6 +3729,7 @@ void ath10k_core_destroy(struct ath10k *ar)
destroy_workqueue(ar->workqueue_tx_complete);
+ free_netdev(ar->napi_dev);
ath10k_debug_destroy(ar);
ath10k_coredump_destroy(ar);
ath10k_htt_tx_destroy(&ar->htt);
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index c110d15528bd..446dca74f06a 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -15,6 +15,7 @@
#include <linux/pci.h>
#include <linux/uuid.h>
#include <linux/time.h>
+#include <linux/leds.h>
#include "htt.h"
#include "htc.h"
@@ -1081,6 +1082,8 @@ struct ath10k {
*/
const struct ath10k_fw_components *running_fw;
+ const char *board_name;
+
const struct firmware *pre_cal_file;
const struct firmware *cal_file;
@@ -1257,6 +1260,13 @@ struct ath10k {
} testmode;
struct {
+ struct gpio_led wifi_led;
+ struct led_classdev cdev;
+ char label[48];
+ u32 gpio_state_pin;
+ } leds;
+
+ struct {
/* protected by data_lock */
u32 rx_crc_err_drop;
u32 fw_crash_counter;
@@ -1269,7 +1279,7 @@ struct ath10k {
struct ath10k_per_peer_tx_stats peer_tx_stats;
/* NAPI */
- struct net_device napi_dev;
+ struct net_device *napi_dev;
struct napi_struct napi;
struct work_struct set_coverage_class_work;
diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
index 394bf3c32abf..0f6de862c3a9 100644
--- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
@@ -439,7 +439,7 @@ ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
}
out:
mutex_unlock(&ar->conf_mutex);
- return count;
+ return ret ?: count;
}
static const struct file_operations fops_peer_debug_trigger = {
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index 93c073091996..442091c6dfd2 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -39,14 +39,12 @@ enum ath10k_bus {
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
-#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* QCA9887 1.0 definitions */
#define QCA9887_HW_1_0_VERSION 0x4100016d
#define QCA9887_HW_1_0_CHIP_ID_REV 0
#define QCA9887_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9887/hw1.0"
-#define QCA9887_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA9887_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA6174 target BMI version signatures */
@@ -85,11 +83,9 @@ enum qca9377_chip_id_rev {
};
#define QCA6174_HW_2_1_FW_DIR ATH10K_FW_DIR "/QCA6174/hw2.1"
-#define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
#define QCA6174_HW_3_0_FW_DIR ATH10K_FW_DIR "/QCA6174/hw3.0"
-#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
/* QCA99X0 1.0 definitions (unsupported) */
@@ -99,7 +95,6 @@ enum qca9377_chip_id_rev {
#define QCA99X0_HW_2_0_DEV_VERSION 0x01000000
#define QCA99X0_HW_2_0_CHIP_ID_REV 0x1
#define QCA99X0_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA99X0/hw2.0"
-#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* QCA9984 1.0 defines */
@@ -107,8 +102,6 @@ enum qca9377_chip_id_rev {
#define QCA9984_HW_DEV_TYPE 0xa
#define QCA9984_HW_1_0_CHIP_ID_REV 0x0
#define QCA9984_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9984/hw1.0"
-#define QCA9984_HW_1_0_BOARD_DATA_FILE "board.bin"
-#define QCA9984_HW_1_0_EBOARD_DATA_FILE "eboard.bin"
#define QCA9984_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA9888 2.0 defines */
@@ -116,18 +109,15 @@ enum qca9377_chip_id_rev {
#define QCA9888_HW_DEV_TYPE 0xc
#define QCA9888_HW_2_0_CHIP_ID_REV 0x0
#define QCA9888_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA9888/hw2.0"
-#define QCA9888_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA9888_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* QCA9377 1.0 definitions */
#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
-#define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* QCA4019 1.0 definitions */
#define QCA4019_HW_1_0_DEV_VERSION 0x01000000
#define QCA4019_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA4019/hw1.0"
-#define QCA4019_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA4019_HW_1_0_PATCH_LOAD_ADDR 0x1234
/* WCN3990 1.0 definitions */
@@ -159,7 +149,9 @@ enum qca9377_chip_id_rev {
#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
#define ATH10K_BOARD_MAGIC "QCA-ATH10K-BOARD"
+#define ATH10K_BOARD_DATA_FILE "board.bin"
#define ATH10K_BOARD_API2_FILE "board-2.bin"
+#define ATH10K_EBOARD_DATA_FILE "eboard.bin"
#define REG_DUMP_COUNT_QCA988X 60
@@ -520,6 +512,7 @@ struct ath10k_hw_params {
const char *name;
u32 patch_load_addr;
int uart_pin;
+ int led_pin;
u32 otp_exe_param;
/* Type of hw cycle counter wraparound logic, for more info
@@ -553,9 +546,7 @@ struct ath10k_hw_params {
struct ath10k_hw_params_fw {
const char *dir;
- const char *board;
size_t board_size;
- const char *eboard;
size_t ext_board_size;
size_t board_ext_size;
} fw;
diff --git a/drivers/net/wireless/ath/ath10k/leds.c b/drivers/net/wireless/ath/ath10k/leds.c
new file mode 100644
index 000000000000..9b1d04eb4265
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/leds.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: ISC
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 Sebastian Gottschall <s.gottschall@dd-wrt.com>
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/leds.h>
+
+#include "core.h"
+#include "wmi.h"
+#include "wmi-ops.h"
+
+#include "leds.h"
+
+static int ath10k_leds_set_brightness_blocking(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath10k *ar = container_of(led_cdev, struct ath10k,
+ leds.cdev);
+ struct gpio_led *led = &ar->leds.wifi_led;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON)
+ goto out;
+
+ ar->leds.gpio_state_pin = (brightness != LED_OFF) ^ led->active_low;
+ ath10k_wmi_gpio_output(ar, led->gpio, ar->leds.gpio_state_pin);
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+int ath10k_leds_start(struct ath10k *ar)
+{
+ if (ar->hw_params.led_pin == 0)
+ /* leds not supported */
+ return 0;
+
+ /* under some circumstances, the gpio pin gets reconfigured
+ * to default state by the firmware, so we need to
+ * reconfigure it this behaviour has only ben seen on
+ * QCA9984 and QCA99XX devices so far
+ */
+ ath10k_wmi_gpio_config(ar, ar->hw_params.led_pin, 0,
+ WMI_GPIO_PULL_NONE, WMI_GPIO_INTTYPE_DISABLE);
+ ath10k_wmi_gpio_output(ar, ar->hw_params.led_pin, 1);
+
+ return 0;
+}
+
+int ath10k_leds_register(struct ath10k *ar)
+{
+ int ret;
+
+ if (ar->hw_params.led_pin == 0)
+ /* leds not supported */
+ return 0;
+
+ snprintf(ar->leds.label, sizeof(ar->leds.label), "ath10k-%s",
+ wiphy_name(ar->hw->wiphy));
+ ar->leds.wifi_led.active_low = 1;
+ ar->leds.wifi_led.gpio = ar->hw_params.led_pin;
+ ar->leds.wifi_led.name = ar->leds.label;
+ ar->leds.wifi_led.default_state = LEDS_GPIO_DEFSTATE_KEEP;
+
+ ar->leds.cdev.name = ar->leds.label;
+ ar->leds.cdev.brightness_set_blocking = ath10k_leds_set_brightness_blocking;
+ ar->leds.cdev.default_trigger = ar->leds.wifi_led.default_trigger;
+
+ ret = led_classdev_register(wiphy_dev(ar->hw->wiphy), &ar->leds.cdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void ath10k_leds_unregister(struct ath10k *ar)
+{
+ if (ar->hw_params.led_pin == 0)
+ /* leds not supported */
+ return;
+
+ led_classdev_unregister(&ar->leds.cdev);
+}
+
diff --git a/drivers/net/wireless/ath/ath10k/leds.h b/drivers/net/wireless/ath/ath10k/leds.h
new file mode 100644
index 000000000000..56325b0875e5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath10k/leds.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: ISC */
+/*
+ * Copyright (c) 2005-2011 Atheros Communications Inc.
+ * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018 Sebastian Gottschall <s.gottschall@dd-wrt.com>
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _LEDS_H_
+#define _LEDS_H_
+
+#include "core.h"
+
+#ifdef CONFIG_ATH10K_LEDS
+void ath10k_leds_unregister(struct ath10k *ar);
+int ath10k_leds_start(struct ath10k *ar);
+int ath10k_leds_register(struct ath10k *ar);
+#else
+static inline void ath10k_leds_unregister(struct ath10k *ar)
+{
+}
+
+static inline int ath10k_leds_start(struct ath10k *ar)
+{
+ return 0;
+}
+
+static inline int ath10k_leds_register(struct ath10k *ar)
+{
+ return 0;
+}
+
+#endif
+#endif /* _LEDS_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e322b528baaf..a5da32e87106 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -25,6 +25,7 @@
#include "wmi-tlv.h"
#include "wmi-ops.h"
#include "wow.h"
+#include "leds.h"
/*********/
/* Rates */
@@ -5362,7 +5363,7 @@ err:
return ret;
}
-static void ath10k_stop(struct ieee80211_hw *hw)
+static void ath10k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath10k *ar = hw->priv;
u32 opt;
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 5c34b156b4ff..c52a16f8078f 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -721,7 +721,7 @@ bool ath10k_pci_irq_pending(struct ath10k *ar)
return false;
}
-void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
+void ath10k_pci_disable_and_clear_intx_irq(struct ath10k *ar)
{
/* IMPORTANT: INTR_CLR register has to be set after
* INTR_ENABLE is set to 0, otherwise interrupt can not be
@@ -739,7 +739,7 @@ void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
PCIE_INTR_ENABLE_ADDRESS);
}
-void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
+void ath10k_pci_enable_intx_irq(struct ath10k *ar)
{
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS,
@@ -1935,7 +1935,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
static void ath10k_pci_irq_disable(struct ath10k *ar)
{
ath10k_ce_disable_interrupts(ar);
- ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_disable_and_clear_intx_irq(ar);
ath10k_pci_irq_msi_fw_mask(ar);
}
@@ -1949,7 +1949,7 @@ static void ath10k_pci_irq_sync(struct ath10k *ar)
static void ath10k_pci_irq_enable(struct ath10k *ar)
{
ath10k_ce_enable_interrupts(ar);
- ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_enable_intx_irq(ar);
ath10k_pci_irq_msi_fw_unmask(ar);
}
@@ -3111,11 +3111,11 @@ static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
return IRQ_NONE;
}
- if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY) &&
+ if ((ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX) &&
!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
- ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_disable_and_clear_intx_irq(ar);
ath10k_pci_irq_msi_fw_mask(ar);
napi_schedule(&ar->napi);
@@ -3152,7 +3152,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
napi_schedule(ctx);
goto out;
}
- ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_enable_intx_irq(ar);
ath10k_pci_irq_msi_fw_unmask(ar);
}
@@ -3177,7 +3177,7 @@ static int ath10k_pci_request_irq_msi(struct ath10k *ar)
return 0;
}
-static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
+static int ath10k_pci_request_irq_intx(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ret;
@@ -3199,8 +3199,8 @@ static int ath10k_pci_request_irq(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
switch (ar_pci->oper_irq_mode) {
- case ATH10K_PCI_IRQ_LEGACY:
- return ath10k_pci_request_irq_legacy(ar);
+ case ATH10K_PCI_IRQ_INTX:
+ return ath10k_pci_request_irq_intx(ar);
case ATH10K_PCI_IRQ_MSI:
return ath10k_pci_request_irq_msi(ar);
default:
@@ -3217,7 +3217,7 @@ static void ath10k_pci_free_irq(struct ath10k *ar)
void ath10k_pci_init_napi(struct ath10k *ar)
{
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);
+ netif_napi_add(ar->napi_dev, &ar->napi, ath10k_pci_napi_poll);
}
static int ath10k_pci_init_irq(struct ath10k *ar)
@@ -3232,7 +3232,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
ath10k_pci_irq_mode);
/* Try MSI */
- if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
+ if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_INTX) {
ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_MSI;
ret = pci_enable_msi(ar_pci->pdev);
if (ret == 0)
@@ -3250,7 +3250,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
* For now, fix the race by repeating the write in below
* synchronization checking.
*/
- ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
+ ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_INTX;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
@@ -3258,7 +3258,7 @@ static int ath10k_pci_init_irq(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
+static void ath10k_pci_deinit_irq_intx(struct ath10k *ar)
{
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
0);
@@ -3269,8 +3269,8 @@ static int ath10k_pci_deinit_irq(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
switch (ar_pci->oper_irq_mode) {
- case ATH10K_PCI_IRQ_LEGACY:
- ath10k_pci_deinit_irq_legacy(ar);
+ case ATH10K_PCI_IRQ_INTX:
+ ath10k_pci_deinit_irq_intx(ar);
break;
default:
pci_disable_msi(ar_pci->pdev);
@@ -3307,14 +3307,14 @@ int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (val & FW_IND_INITIALIZED)
break;
- if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_LEGACY)
+ if (ar_pci->oper_irq_mode == ATH10K_PCI_IRQ_INTX)
/* Fix potential race by repeating CORE_BASE writes */
- ath10k_pci_enable_legacy_irq(ar);
+ ath10k_pci_enable_intx_irq(ar);
mdelay(10);
} while (time_before(jiffies, timeout));
- ath10k_pci_disable_and_clear_legacy_irq(ar);
+ ath10k_pci_disable_and_clear_intx_irq(ar);
ath10k_pci_irq_msi_fw_mask(ar);
if (val == 0xffffffff) {
@@ -3826,28 +3826,28 @@ MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API5_FILE);
-MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA9887 1.0 firmware files */
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
-MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" QCA9887_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA9887_HW_1_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA6174 2.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_FW_API5_FILE);
-MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" QCA6174_HW_2_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA6174_HW_2_1_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA6174 3.1 firmware files */
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API6_FILE);
-MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA9377 1.0 firmware files */
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API6_FILE);
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
-MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index 27bb4cf2dfea..4c3f536f2ea1 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -101,7 +101,7 @@ struct ath10k_pci_supp_chip {
enum ath10k_pci_irq_mode {
ATH10K_PCI_IRQ_AUTO = 0,
- ATH10K_PCI_IRQ_LEGACY = 1,
+ ATH10K_PCI_IRQ_INTX = 1,
ATH10K_PCI_IRQ_MSI = 2,
};
@@ -243,9 +243,9 @@ int ath10k_pci_init_pipes(struct ath10k *ar);
int ath10k_pci_init_config(struct ath10k *ar);
void ath10k_pci_rx_post(struct ath10k *ar);
void ath10k_pci_flush(struct ath10k *ar);
-void ath10k_pci_enable_legacy_irq(struct ath10k *ar);
+void ath10k_pci_enable_intx_irq(struct ath10k *ar);
bool ath10k_pci_irq_pending(struct ath10k *ar);
-void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar);
+void ath10k_pci_disable_and_clear_intx_irq(struct ath10k *ar);
void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar);
int ath10k_pci_wait_for_target_init(struct ath10k *ar);
int ath10k_pci_setup_resource(struct ath10k *ar);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 38e939f572a9..f1f33af0170a 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -1040,6 +1040,10 @@ static void ath10k_qmi_driver_event_work(struct work_struct *work)
switch (event->type) {
case ATH10K_QMI_EVENT_SERVER_ARRIVE:
ath10k_qmi_event_server_arrive(qmi);
+ if (qmi->no_msa_ready_indicator) {
+ ath10k_info(ar, "qmi not waiting for msa_ready indicator");
+ ath10k_qmi_event_msa_ready(qmi);
+ }
break;
case ATH10K_QMI_EVENT_SERVER_EXIT:
ath10k_qmi_event_server_exit(qmi);
@@ -1048,6 +1052,10 @@ static void ath10k_qmi_driver_event_work(struct work_struct *work)
ath10k_qmi_event_fw_ready_ind(qmi);
break;
case ATH10K_QMI_EVENT_MSA_READY_IND:
+ if (qmi->no_msa_ready_indicator) {
+ ath10k_warn(ar, "qmi unexpected msa_ready indicator");
+ break;
+ }
ath10k_qmi_event_msa_ready(qmi);
break;
default:
@@ -1077,6 +1085,9 @@ int ath10k_qmi_init(struct ath10k *ar, u32 msa_size)
if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm"))
qmi->msa_fixed_perm = true;
+ if (of_property_read_bool(dev->of_node, "qcom,no-msa-ready-indicator"))
+ qmi->no_msa_ready_indicator = true;
+
ret = qmi_handle_init(&qmi->qmi_hdl,
WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN,
&ath10k_qmi_ops, qmi_msg_handler);
diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h
index 89464239fe96..0816eb4e4a18 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.h
+++ b/drivers/net/wireless/ath/ath10k/qmi.h
@@ -107,6 +107,7 @@ struct ath10k_qmi {
char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1];
struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01];
bool msa_fixed_perm;
+ bool no_msa_ready_indicator;
enum ath10k_qmi_state state;
};
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index 0ab5433f6cf6..08a6f36a6be9 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -2532,7 +2532,7 @@ static int ath10k_sdio_probe(struct sdio_func *func,
return -ENOMEM;
}
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll);
+ netif_napi_add(ar->napi_dev, &ar->napi, ath10k_sdio_napi_poll);
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
@@ -2667,29 +2667,10 @@ static struct sdio_driver ath10k_sdio_driver = {
.probe = ath10k_sdio_probe,
.remove = ath10k_sdio_remove,
.drv = {
- .owner = THIS_MODULE,
.pm = ATH10K_SDIO_PM_OPS,
},
};
-
-static int __init ath10k_sdio_init(void)
-{
- int ret;
-
- ret = sdio_register_driver(&ath10k_sdio_driver);
- if (ret)
- pr_err("sdio driver registration failed: %d\n", ret);
-
- return ret;
-}
-
-static void __exit ath10k_sdio_exit(void)
-{
- sdio_unregister_driver(&ath10k_sdio_driver);
-}
-
-module_init(ath10k_sdio_init);
-module_exit(ath10k_sdio_exit);
+module_sdio_driver(ath10k_sdio_driver);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Qualcomm Atheros 802.11ac WLAN SDIO devices");
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index 2c39bad7ebfb..0fe47d51013c 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -935,7 +935,7 @@ static int ath10k_snoc_hif_start(struct ath10k *ar)
bitmap_clear(ar_snoc->pending_ce_irqs, 0, CE_COUNT_MAX);
- dev_set_threaded(&ar->napi_dev, true);
+ dev_set_threaded(ar->napi_dev, true);
ath10k_core_napi_enable(ar);
ath10k_snoc_irq_enable(ar);
ath10k_snoc_rx_post(ar);
@@ -1253,7 +1253,7 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget)
static void ath10k_snoc_init_napi(struct ath10k *ar)
{
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll);
+ netif_napi_add(ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll);
}
static int ath10k_snoc_request_irq(struct ath10k *ar)
@@ -1338,6 +1338,9 @@ static void ath10k_snoc_quirks_init(struct ath10k *ar)
struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
struct device *dev = &ar_snoc->dev->dev;
+ /* ignore errors, keep NULL if there is no property */
+ of_property_read_string(dev->of_node, "firmware-name", &ar->board_name);
+
if (of_property_read_bool(dev->of_node, "qcom,snoc-host-cap-8bit-quirk"))
set_bit(ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK, &ar_snoc->flags);
}
@@ -1632,10 +1635,10 @@ static int ath10k_fw_init(struct ath10k *ar)
ar_snoc->fw.dev = &pdev->dev;
- iommu_dom = iommu_domain_alloc(&platform_bus_type);
- if (!iommu_dom) {
+ iommu_dom = iommu_paging_domain_alloc(ar_snoc->fw.dev);
+ if (IS_ERR(iommu_dom)) {
ath10k_err(ar, "failed to allocate iommu domain\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(iommu_dom);
goto err_unregister;
}
diff --git a/drivers/net/wireless/ath/ath10k/targaddrs.h b/drivers/net/wireless/ath/ath10k/targaddrs.h
index ec556bb88d65..ba37e6c7ced0 100644
--- a/drivers/net/wireless/ath/ath10k/targaddrs.h
+++ b/drivers/net/wireless/ath/ath10k/targaddrs.h
@@ -491,4 +491,7 @@ struct host_interest {
#define QCA4019_BOARD_DATA_SZ 12064
#define QCA4019_BOARD_EXT_DATA_SZ 0
+#define WCN3990_BOARD_DATA_SZ 26328
+#define WCN3990_BOARD_EXT_DATA_SZ 0
+
#endif /* __TARGADDRS_H__ */
diff --git a/drivers/net/wireless/ath/ath10k/thermal.c b/drivers/net/wireless/ath/ath10k/thermal.c
index 31c8d7fbb095..8b15ec07b107 100644
--- a/drivers/net/wireless/ath/ath10k/thermal.c
+++ b/drivers/net/wireless/ath/ath10k/thermal.c
@@ -100,7 +100,7 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
spin_unlock_bh(&ar->data_lock);
/* display in millidegree celsius */
- ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
+ ret = sysfs_emit(buf, "%d\n", temperature * 1000);
out:
mutex_unlock(&ar->conf_mutex);
return ret;
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index 64e7a767d963..68b78ca17eaa 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -55,8 +55,8 @@ DECLARE_EVENT_CLASS(ath10k_log_event,
__vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
@@ -92,8 +92,8 @@ TRACE_EVENT(ath10k_log_dbg,
__vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->level = level;
__assign_vstr(msg, vaf->fmt, vaf->va);
),
@@ -121,10 +121,10 @@ TRACE_EVENT(ath10k_log_dbg_dump,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
- __assign_str(msg, msg);
- __assign_str(prefix, prefix);
+ __assign_str(device);
+ __assign_str(driver);
+ __assign_str(msg);
+ __assign_str(prefix);
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
@@ -152,8 +152,8 @@ TRACE_EVENT(ath10k_wmi_cmd,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->id = id;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
@@ -182,8 +182,8 @@ TRACE_EVENT(ath10k_wmi_event,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->id = id;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
@@ -211,8 +211,8 @@ TRACE_EVENT(ath10k_htt_stats,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
@@ -239,8 +239,8 @@ TRACE_EVENT(ath10k_wmi_dbglog,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->hw_type = ar->hw_rev;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
@@ -269,8 +269,8 @@ TRACE_EVENT(ath10k_htt_pktlog,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->hw_type = ar->hw_rev;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(pktlog), buf, buf_len);
@@ -301,8 +301,8 @@ TRACE_EVENT(ath10k_htt_tx,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->msdu_id = msdu_id;
__entry->msdu_len = msdu_len;
__entry->vdev_id = vdev_id;
@@ -332,8 +332,8 @@ TRACE_EVENT(ath10k_txrx_tx_unref,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->msdu_id = msdu_id;
),
@@ -358,8 +358,8 @@ DECLARE_EVENT_CLASS(ath10k_hdr_event,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->len = ath10k_frm_hdr_len(data, len);
memcpy(__get_dynamic_array(data), data, __entry->len);
),
@@ -386,8 +386,8 @@ DECLARE_EVENT_CLASS(ath10k_payload_event,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->len = len - ath10k_frm_hdr_len(data, len);
memcpy(__get_dynamic_array(payload),
data + ath10k_frm_hdr_len(data, len), __entry->len);
@@ -435,8 +435,8 @@ TRACE_EVENT(ath10k_htt_rx_desc,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->hw_type = ar->hw_rev;
__entry->len = len;
memcpy(__get_dynamic_array(rxdesc), data, len);
@@ -472,8 +472,8 @@ TRACE_EVENT(ath10k_wmi_diag_container,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->type = type;
__entry->timestamp = timestamp;
__entry->code = code;
@@ -505,8 +505,8 @@ TRACE_EVENT(ath10k_wmi_diag,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->dev));
- __assign_str(driver, dev_driver_string(ar->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->len = len;
memcpy(__get_dynamic_array(data), data, len);
),
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index 3c482baacec1..3b51b7f52130 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -1014,7 +1014,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
return -ENOMEM;
}
- netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_usb_napi_poll);
+ netif_napi_add(ar->napi_dev, &ar->napi, ath10k_usb_napi_poll);
usb_get_dev(dev);
vendor_id = le16_to_cpu(dev->descriptor.idVendor);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index aa57d807491c..f3f6b5954b27 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -226,7 +226,10 @@ struct wmi_ops {
const struct wmi_bb_timing_cfg_arg *arg);
struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar,
const struct wmi_per_peer_per_tid_cfg_arg *arg);
+ struct sk_buff *(*gen_gpio_config)(struct ath10k *ar, u32 gpio_num,
+ u32 input, u32 pull_type, u32 intr_mode);
+ struct sk_buff *(*gen_gpio_output)(struct ath10k *ar, u32 gpio_num, u32 set);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
@@ -1122,6 +1125,35 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar,
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
}
+static inline int ath10k_wmi_gpio_config(struct ath10k *ar, u32 gpio_num,
+ u32 input, u32 pull_type, u32 intr_mode)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_gpio_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_gpio_config(ar, gpio_num, input, pull_type, intr_mode);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_config_cmdid);
+}
+
+static inline int ath10k_wmi_gpio_output(struct ath10k *ar, u32 gpio_num, u32 set)
+{
+ struct sk_buff *skb;
+
+ if (!ar->wmi.ops->gen_gpio_config)
+ return -EOPNOTSUPP;
+
+ skb = ar->wmi.ops->gen_gpio_output(ar, gpio_num, set);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_output_cmdid);
+}
+
static inline int
ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level)
{
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index aed97fd121ba..dbaf26d6a7a6 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -4606,6 +4606,8 @@ static const struct wmi_ops wmi_tlv_ops = {
.gen_echo = ath10k_wmi_tlv_op_gen_echo,
.gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf,
.gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable,
+ /* .gen_gpio_config not implemented */
+ /* .gen_gpio_output not implemented */
};
static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = {
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 2e9661f4bea8..fe2344598364 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1763,12 +1763,32 @@ void ath10k_wmi_put_wmi_channel(struct ath10k *ar, struct wmi_channel *ch,
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
- unsigned long time_left;
+ unsigned long time_left, i;
time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
WMI_SERVICE_READY_TIMEOUT_HZ);
- if (!time_left)
- return -ETIMEDOUT;
+ if (!time_left) {
+ /* Sometimes the PCI HIF doesn't receive interrupt
+ * for the service ready message even if the buffer
+ * was completed. PCIe sniffer shows that it's
+ * because the corresponding CE ring doesn't fires
+ * it. Workaround here by polling CE rings once.
+ */
+ ath10k_warn(ar, "failed to receive service ready completion, polling..\n");
+
+ for (i = 0; i < CE_COUNT; i++)
+ ath10k_hif_send_complete_check(ar, i, 1);
+
+ time_left = wait_for_completion_timeout(&ar->wmi.service_ready,
+ WMI_SERVICE_READY_TIMEOUT_HZ);
+ if (!time_left) {
+ ath10k_warn(ar, "polling timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ ath10k_warn(ar, "service ready completion received, continuing normally\n");
+ }
+
return 0;
}
@@ -7473,6 +7493,49 @@ ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id,
return skb;
}
+static struct sk_buff *ath10k_wmi_op_gen_gpio_config(struct ath10k *ar,
+ u32 gpio_num, u32 input,
+ u32 pull_type, u32 intr_mode)
+{
+ struct wmi_gpio_config_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_gpio_config_cmd *)skb->data;
+ cmd->pull_type = __cpu_to_le32(pull_type);
+ cmd->gpio_num = __cpu_to_le32(gpio_num);
+ cmd->input = __cpu_to_le32(input);
+ cmd->intr_mode = __cpu_to_le32(intr_mode);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_config gpio_num 0x%08x input 0x%08x pull_type 0x%08x intr_mode 0x%08x\n",
+ gpio_num, input, pull_type, intr_mode);
+
+ return skb;
+}
+
+static struct sk_buff *ath10k_wmi_op_gen_gpio_output(struct ath10k *ar,
+ u32 gpio_num, u32 set)
+{
+ struct wmi_gpio_output_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_gpio_output_cmd *)skb->data;
+ cmd->gpio_num = __cpu_to_le32(gpio_num);
+ cmd->set = __cpu_to_le32(set);
+
+ ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_output gpio_num 0x%08x set 0x%08x\n",
+ gpio_num, set);
+
+ return skb;
+}
+
static struct sk_buff *
ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id,
enum wmi_sta_ps_mode psmode)
@@ -9137,6 +9200,9 @@ static const struct wmi_ops wmi_ops = {
.fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
.gen_echo = ath10k_wmi_op_gen_echo,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
+
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -9207,6 +9273,8 @@ static const struct wmi_ops wmi_10_1_ops = {
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
.gen_echo = ath10k_wmi_op_gen_echo,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -9279,6 +9347,8 @@ static const struct wmi_ops wmi_10_2_ops = {
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
.get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
/* .gen_pdev_enable_adaptive_cca not implemented */
};
@@ -9350,6 +9420,8 @@ static const struct wmi_ops wmi_10_2_4_ops = {
ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
.get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype,
.gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
@@ -9431,6 +9503,8 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info,
.gen_echo = ath10k_wmi_op_gen_echo,
.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
+ .gen_gpio_config = ath10k_wmi_op_gen_gpio_config,
+ .gen_gpio_output = ath10k_wmi_op_gen_gpio_output,
};
int ath10k_wmi_attach(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 2379501225a4..0faefc0a9a40 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -3034,6 +3034,41 @@ enum wmi_10_4_feature_mask {
};
+/* WMI_GPIO_CONFIG_CMDID */
+enum {
+ WMI_GPIO_PULL_NONE,
+ WMI_GPIO_PULL_UP,
+ WMI_GPIO_PULL_DOWN,
+};
+
+enum {
+ WMI_GPIO_INTTYPE_DISABLE,
+ WMI_GPIO_INTTYPE_RISING_EDGE,
+ WMI_GPIO_INTTYPE_FALLING_EDGE,
+ WMI_GPIO_INTTYPE_BOTH_EDGE,
+ WMI_GPIO_INTTYPE_LEVEL_LOW,
+ WMI_GPIO_INTTYPE_LEVEL_HIGH
+};
+
+/* WMI_GPIO_CONFIG_CMDID */
+struct wmi_gpio_config_cmd {
+ __le32 gpio_num; /* GPIO number to be setup */
+ __le32 input; /* 0 - Output/ 1 - Input */
+ __le32 pull_type; /* Pull type defined above */
+ __le32 intr_mode; /* Interrupt mode defined above (Input) */
+} __packed;
+
+/* WMI_GPIO_OUTPUT_CMDID */
+struct wmi_gpio_output_cmd {
+ __le32 gpio_num; /* GPIO number to be setup */
+ __le32 set; /* Set the GPIO pin*/
+} __packed;
+
+/* WMI_GPIO_INPUT_EVENTID */
+struct wmi_gpio_input_event {
+ __le32 gpio_num; /* GPIO number which changed state */
+} __packed;
+
struct wmi_ext_resource_config_10_4_cmd {
/* contains enum wmi_host_platform_type */
__le32 host_platform_config;
diff --git a/drivers/net/wireless/ath/ath11k/Kconfig b/drivers/net/wireless/ath/ath11k/Kconfig
index 27f0523bf967..2e935d381b6b 100644
--- a/drivers/net/wireless/ath/ath11k/Kconfig
+++ b/drivers/net/wireless/ath/ath11k/Kconfig
@@ -24,6 +24,7 @@ config ATH11K_PCI
select MHI_BUS
select QRTR
select QRTR_MHI
+ select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
help
This module adds support for PCIE bus
diff --git a/drivers/net/wireless/ath/ath11k/Makefile b/drivers/net/wireless/ath/ath11k/Makefile
index 2c94d50ae36f..43d2d8ddcdc0 100644
--- a/drivers/net/wireless/ath/ath11k/Makefile
+++ b/drivers/net/wireless/ath/ath11k/Makefile
@@ -18,7 +18,8 @@ ath11k-y += core.o \
dbring.o \
hw.o \
pcic.o \
- fw.o
+ fw.o \
+ p2p.o
ath11k-$(CONFIG_ATH11K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
ath11k-$(CONFIG_NL80211_TESTMODE) += testmode.o
diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c
index 7c0a23517949..634d385fd9ad 100644
--- a/drivers/net/wireless/ath/ath11k/ahb.c
+++ b/drivers/net/wireless/ath/ath11k/ahb.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/module.h>
@@ -413,7 +413,7 @@ static int ath11k_ahb_power_up(struct ath11k_base *ab)
return ret;
}
-static void ath11k_ahb_power_down(struct ath11k_base *ab)
+static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
@@ -442,6 +442,7 @@ static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
+ free_netdev(irq_grp->napi_ndev);
}
}
@@ -533,8 +534,12 @@ static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab)
irq_grp->ab = ab;
irq_grp->grp_id = i;
- init_dummy_netdev(&irq_grp->napi_ndev);
- netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev)
+ return -ENOMEM;
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
ath11k_ahb_ext_grp_napi_poll);
for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) {
@@ -949,6 +954,36 @@ static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab)
return 0;
}
+static int ath11k_ahb_ce_remap(struct ath11k_base *ab)
+{
+ const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
+ struct platform_device *pdev = ab->pdev;
+
+ if (!ce_remap) {
+ /* no separate CE register space */
+ ab->mem_ce = ab->mem;
+ return 0;
+ }
+
+ /* ce register space is moved out of wcss unlike ipq8074 or ipq6018
+ * and the space is not contiguous, hence remapping the CE registers
+ * to a new space for accessing them.
+ */
+ ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
+ if (!ab->mem_ce) {
+ dev_err(&pdev->dev, "ce ioremap error\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void ath11k_ahb_ce_unmap(struct ath11k_base *ab)
+{
+ if (ab->hw_params.ce_remap)
+ iounmap(ab->mem_ce);
+}
+
static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
{
struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab);
@@ -996,10 +1031,10 @@ static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab)
ab_ahb->fw.dev = &pdev->dev;
- iommu_dom = iommu_domain_alloc(&platform_bus_type);
- if (!iommu_dom) {
+ iommu_dom = iommu_paging_domain_alloc(ab_ahb->fw.dev);
+ if (IS_ERR(iommu_dom)) {
ath11k_err(ab, "failed to allocate iommu domain\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(iommu_dom);
goto err_unregister;
}
@@ -1141,25 +1176,13 @@ static int ath11k_ahb_probe(struct platform_device *pdev)
if (ret)
goto err_core_free;
- ab->mem_ce = ab->mem;
-
- if (ab->hw_params.ce_remap) {
- const struct ce_remap *ce_remap = ab->hw_params.ce_remap;
- /* ce register space is moved out of wcss unlike ipq8074 or ipq6018
- * and the space is not contiguous, hence remapping the CE registers
- * to a new space for accessing them.
- */
- ab->mem_ce = ioremap(ce_remap->base, ce_remap->size);
- if (!ab->mem_ce) {
- dev_err(&pdev->dev, "ce ioremap error\n");
- ret = -ENOMEM;
- goto err_core_free;
- }
- }
+ ret = ath11k_ahb_ce_remap(ab);
+ if (ret)
+ goto err_core_free;
ret = ath11k_ahb_fw_resources_init(ab);
if (ret)
- goto err_core_free;
+ goto err_ce_unmap;
ret = ath11k_ahb_setup_smp2p_handle(ab);
if (ret)
@@ -1211,6 +1234,9 @@ err_release_smp2p_handle:
err_fw_deinit:
ath11k_ahb_fw_resource_deinit(ab);
+err_ce_unmap:
+ ath11k_ahb_ce_unmap(ab);
+
err_core_free:
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
@@ -1243,9 +1269,7 @@ static void ath11k_ahb_free_resources(struct ath11k_base *ab)
ath11k_ahb_release_smp2p_handle(ab);
ath11k_ahb_fw_resource_deinit(ab);
ath11k_ce_free_pipes(ab);
-
- if (ab->hw_params.ce_remap)
- iounmap(ab->mem_ce);
+ ath11k_ahb_ce_unmap(ab);
ath11k_core_free(ab);
platform_set_drvdata(pdev, NULL);
@@ -1256,7 +1280,7 @@ static void ath11k_ahb_remove(struct platform_device *pdev)
struct ath11k_base *ab = platform_get_drvdata(pdev);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_ahb_power_down(ab);
+ ath11k_ahb_power_down(ab, false);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h
index 69946fc70077..bcde2fcf02cf 100644
--- a/drivers/net/wireless/ath/ath11k/ce.h
+++ b/drivers/net/wireless/ath/ath11k/ce.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CE_H
@@ -146,7 +146,7 @@ struct ath11k_ce_ring {
/* Host address space */
void *base_addr_owner_space_unaligned;
/* CE address space */
- u32 base_addr_ce_space_unaligned;
+ dma_addr_t base_addr_ce_space_unaligned;
/* Actual start of descriptors.
* Aligned to descriptor-size boundary.
@@ -156,7 +156,7 @@ struct ath11k_ce_ring {
void *base_addr_owner_space;
/* CE address space */
- u32 base_addr_ce_space;
+ dma_addr_t base_addr_ce_space;
/* HAL ring id */
u32 hal_ring_id;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index c78bce19bd75..03187df26000 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -62,7 +62,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = false,
.rxdma1_enable = true,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
@@ -148,7 +148,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = false,
.rxdma1_enable = true,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
@@ -232,7 +232,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -247,7 +247,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.full_monitor_mode = false,
.supports_shadow_regs = true,
@@ -317,7 +320,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.svc_to_ce_map_len = 18,
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.rxdma1_enable = true,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
@@ -401,7 +404,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -416,7 +419,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.full_monitor_mode = false,
.supports_shadow_regs = true,
@@ -486,7 +492,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.svc_to_ce_map_len = 14,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -501,7 +507,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.supports_shadow_regs = true,
.idle_ps = true,
@@ -571,7 +580,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -595,7 +604,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.coldboot_cal_ftm = true,
.cbcal_restart_fw = false,
.fw_mem_mode = 0,
- .num_vdevs = 16 + 1,
+ .num_vdevs = 3,
.num_peers = 512,
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
@@ -664,7 +673,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq5018,
.ce_remap = &ath11k_ce_remap_ipq5018,
.rxdma1_enable = true,
- .num_rxmda_per_pdev = RXDMA_PER_PDEV_5018,
+ .num_rxdma_per_pdev = RXDMA_PER_PDEV_5018,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
@@ -735,7 +744,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
.single_pdev_only = true,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
@@ -750,7 +759,10 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
},
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP),
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.supports_monitor = false,
.full_monitor_mode = false,
.supports_shadow_regs = true,
@@ -894,12 +906,6 @@ int ath11k_core_suspend(struct ath11k_base *ab)
return ret;
}
- ret = ath11k_wow_enable(ab);
- if (ret) {
- ath11k_warn(ab, "failed to enable wow during suspend: %d\n", ret);
- return ret;
- }
-
ret = ath11k_dp_rx_pktlog_stop(ab, false);
if (ret) {
ath11k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
@@ -910,29 +916,85 @@ int ath11k_core_suspend(struct ath11k_base *ab)
ath11k_ce_stop_shadow_timers(ab);
ath11k_dp_stop_shadow_timers(ab);
+ /* PM framework skips suspend_late/resume_early callbacks
+ * if other devices report errors in their suspend callbacks.
+ * However ath11k_core_resume() would still be called because
+ * here we return success thus kernel put us on dpm_suspended_list.
+ * Since we won't go through a power down/up cycle, there is
+ * no chance to call complete(&ab->restart_completed) in
+ * ath11k_core_restart(), making ath11k_core_resume() timeout.
+ * So call it here to avoid this issue. This also works in case
+ * no error happens thus suspend_late/resume_early get called,
+ * because it will be reinitialized in ath11k_core_resume_early().
+ */
+ complete(&ab->restart_completed);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath11k_core_suspend);
+
+int ath11k_core_suspend_late(struct ath11k_base *ab)
+{
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ if (!ab->hw_params.supports_suspend)
+ return -EOPNOTSUPP;
+
+ /* so far single_pdev_only chips have supports_suspend as true
+ * and only the first pdev is valid.
+ */
+ pdev = ath11k_core_get_single_pdev(ab);
+ ar = pdev->ar;
+ if (!ar || ar->state != ATH11K_STATE_OFF)
+ return 0;
+
ath11k_hif_irq_disable(ab);
ath11k_hif_ce_irq_disable(ab);
- ret = ath11k_hif_suspend(ab);
- if (ret) {
- ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
- return ret;
- }
+ ath11k_hif_power_down(ab, true);
return 0;
}
-EXPORT_SYMBOL(ath11k_core_suspend);
+EXPORT_SYMBOL(ath11k_core_suspend_late);
+
+int ath11k_core_resume_early(struct ath11k_base *ab)
+{
+ int ret;
+ struct ath11k_pdev *pdev;
+ struct ath11k *ar;
+
+ if (!ab->hw_params.supports_suspend)
+ return -EOPNOTSUPP;
+
+ /* so far single_pdev_only chips have supports_suspend as true
+ * and only the first pdev is valid.
+ */
+ pdev = ath11k_core_get_single_pdev(ab);
+ ar = pdev->ar;
+ if (!ar || ar->state != ATH11K_STATE_OFF)
+ return 0;
+
+ reinit_completion(&ab->restart_completed);
+ ret = ath11k_hif_power_up(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to power up hif during resume: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(ath11k_core_resume_early);
int ath11k_core_resume(struct ath11k_base *ab)
{
int ret;
struct ath11k_pdev *pdev;
struct ath11k *ar;
+ long time_left;
if (!ab->hw_params.supports_suspend)
return -EOPNOTSUPP;
- /* so far signle_pdev_only chips have supports_suspend as true
+ /* so far single_pdev_only chips have supports_suspend as true
* and only the first pdev is valid.
*/
pdev = ath11k_core_get_single_pdev(ab);
@@ -940,29 +1002,29 @@ int ath11k_core_resume(struct ath11k_base *ab)
if (!ar || ar->state != ATH11K_STATE_OFF)
return 0;
- ret = ath11k_hif_resume(ab);
- if (ret) {
- ath11k_warn(ab, "failed to resume hif during resume: %d\n", ret);
- return ret;
+ time_left = wait_for_completion_timeout(&ab->restart_completed,
+ ATH11K_RESET_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ab, "timeout while waiting for restart complete");
+ return -ETIMEDOUT;
}
- ath11k_hif_ce_irq_enable(ab);
- ath11k_hif_irq_enable(ab);
+ if (ab->hw_params.current_cc_support &&
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
+ ret = ath11k_reg_set_cc(ar);
+ if (ret) {
+ ath11k_warn(ab, "failed to set country code during resume: %d\n",
+ ret);
+ return ret;
+ }
+ }
ret = ath11k_dp_rx_pktlog_start(ab);
- if (ret) {
+ if (ret)
ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n",
ret);
- return ret;
- }
- ret = ath11k_wow_wakeup(ab);
- if (ret) {
- ath11k_warn(ab, "failed to wakeup wow during resume: %d\n", ret);
- return ret;
- }
-
- return 0;
+ return ret;
}
EXPORT_SYMBOL(ath11k_core_resume);
@@ -1749,7 +1811,7 @@ static int ath11k_core_start(struct ath11k_base *ab)
}
/* put hardware to DBS mode */
- if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1) {
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1) {
ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS);
if (ret) {
ath11k_err(ab, "failed to send dbs mode: %d\n", ret);
@@ -1926,23 +1988,20 @@ static void ath11k_update_11d(struct work_struct *work)
struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work);
struct ath11k *ar;
struct ath11k_pdev *pdev;
- struct wmi_set_current_country_params set_current_param = {};
int ret, i;
- spin_lock_bh(&ab->base_lock);
- memcpy(&set_current_param.alpha2, &ab->new_alpha2, 2);
- spin_unlock_bh(&ab->base_lock);
-
- ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c\n",
- set_current_param.alpha2[0],
- set_current_param.alpha2[1]);
-
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
- memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
- ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ spin_lock_bh(&ab->base_lock);
+ memcpy(&ar->alpha2, &ab->new_alpha2, 2);
+ spin_unlock_bh(&ab->base_lock);
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c for pdev %d\n",
+ ar->alpha2[0], ar->alpha2[1], i);
+
+ ret = ath11k_reg_set_cc(ar);
if (ret)
ath11k_warn(ar->ab,
"pdev id %d failed set current country code: %d\n",
@@ -2060,6 +2119,8 @@ static void ath11k_core_restart(struct work_struct *work)
if (!ab->is_reset)
ath11k_core_post_reconfigure_recovery(ab);
+
+ complete(&ab->restart_completed);
}
static void ath11k_core_reset(struct work_struct *work)
@@ -2129,7 +2190,7 @@ static void ath11k_core_reset(struct work_struct *work)
ath11k_hif_irq_disable(ab);
ath11k_hif_ce_irq_disable(ab);
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "reset started\n");
@@ -2202,7 +2263,7 @@ void ath11k_core_deinit(struct ath11k_base *ab)
mutex_unlock(&ab->core_lock);
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_mac_destroy(ab);
ath11k_core_soc_destroy(ab);
ath11k_fw_destroy(ab);
@@ -2255,6 +2316,7 @@ struct ath11k_base *ath11k_core_alloc(struct device *dev, size_t priv_size,
timer_setup(&ab->rx_replenish_retry, ath11k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
init_completion(&ab->wow.wakeup_completed);
+ init_completion(&ab->restart_completed);
ab->dev = dev;
ab->hif.bus = bus;
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index b3fb74a226fb..df24f0e409af 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_CORE_H
@@ -174,7 +174,7 @@ struct ath11k_ext_irq_grp {
u64 timestamp;
bool napi_enabled;
struct napi_struct napi;
- struct net_device napi_ndev;
+ struct net_device *napi_ndev;
};
enum ath11k_smbios_cc_type {
@@ -330,6 +330,9 @@ struct ath11k_chan_power_info {
s8 tx_power;
};
+/* ath11k only deals with 160 MHz, so 8 subchannels */
+#define ATH11K_NUM_PWR_LEVELS 8
+
/**
* struct ath11k_reg_tpc_power_info - regulatory TPC power info
* @is_psd_power: is PSD power or not
@@ -346,10 +349,10 @@ struct ath11k_reg_tpc_power_info {
u8 eirp_power;
enum wmi_reg_6ghz_ap_type ap_power_type;
u8 num_pwr_levels;
- u8 reg_max[IEEE80211_MAX_NUM_PWR_LEVEL];
+ u8 reg_max[ATH11K_NUM_PWR_LEVELS];
u8 ap_constraint_power;
- s8 tpe[IEEE80211_MAX_NUM_PWR_LEVEL];
- struct ath11k_chan_power_info chan_power_info[IEEE80211_MAX_NUM_PWR_LEVEL];
+ s8 tpe[ATH11K_NUM_PWR_LEVELS];
+ struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS];
};
struct ath11k_vif {
@@ -1033,6 +1036,8 @@ struct ath11k_base {
DECLARE_BITMAP(fw_features, ATH11K_FW_FEATURE_COUNT);
} fw;
+ struct completion restart_completed;
+
#ifdef CONFIG_NL80211_TESTMODE
struct {
u32 data_pos;
@@ -1232,8 +1237,10 @@ void ath11k_core_free_bdf(struct ath11k_base *ab, struct ath11k_board_data *bd);
int ath11k_core_check_dt(struct ath11k_base *ath11k);
int ath11k_core_check_smbios(struct ath11k_base *ab);
void ath11k_core_halt(struct ath11k *ar);
+int ath11k_core_resume_early(struct ath11k_base *ab);
int ath11k_core_resume(struct ath11k_base *ab);
int ath11k_core_suspend(struct ath11k_base *ab);
+int ath11k_core_suspend_late(struct ath11k_base *ab);
void ath11k_core_pre_reconfigure_recovery(struct ath11k_base *ab);
bool ath11k_core_coldboot_cal_support(struct ath11k_base *ab);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index a48e737ef35d..57281a135dd7 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/vmalloc.h>
@@ -668,7 +668,7 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file,
ar->debug.rx_filter = tlv_filter.rx_filter;
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
HAL_RXDMA_MONITOR_STATUS,
@@ -980,7 +980,7 @@ int ath11k_debugfs_pdev_create(struct ath11k_base *ab)
debugfs_create_file("simulate_fw_crash", 0600, ab->debugfs_soc, ab,
&fops_simulate_fw_crash);
- debugfs_create_file("soc_dp_stats", 0600, ab->debugfs_soc, ab,
+ debugfs_create_file("soc_dp_stats", 0400, ab->debugfs_soc, ab,
&fops_soc_dp_stats);
if (ab->hw_params.sram_dump.start != 0)
@@ -1112,7 +1112,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
}
/* Clear rx filter set for monitor mode and rx status */
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
HAL_RXDMA_MONITOR_STATUS,
@@ -1171,7 +1171,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
HTT_RX_FP_DATA_FILTER_FLASG3;
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
ar->dp.mac_id + i,
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index 1a62407e5a9f..fbf666d0ecf1 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <crypto/hash.h>
@@ -830,8 +830,8 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params.num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxdma_per_pdev + j;
if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
BIT(id)) {
@@ -853,8 +853,8 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
ath11k_dp_process_reo_status(ab);
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params.num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params.num_rxdma_per_pdev + j;
if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
@@ -913,7 +913,7 @@ void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
atomic_set(&dp->num_tx_pending, 0);
init_waitqueue_head(&dp->tx_empty_waitq);
- for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
+ for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
}
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index afd481f5858f..86485580dd89 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/ieee80211.h>
@@ -311,7 +311,7 @@ static void ath11k_dp_service_mon_ring(struct timer_list *t)
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
int i;
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
mod_timer(&ab->mon_reap_timer, jiffies +
@@ -324,7 +324,7 @@ static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
do {
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
reaped += ath11k_dp_rx_process_mon_rings(ab, i,
NULL,
DP_MON_SERVICE_BUDGET);
@@ -468,7 +468,7 @@ static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
rx_ring = &dp->rxdma_mon_buf_ring;
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
rx_ring = &dp->rx_mon_status_refill_ring[i];
ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
}
@@ -506,7 +506,7 @@ static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
rx_ring = &dp->rx_mon_status_refill_ring[i];
ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
}
@@ -522,7 +522,7 @@ static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
if (ab->hw_params.rx_mac_buf_ring)
ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
@@ -585,7 +585,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
}
if (ar->ab->hw_params.rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ret = ath11k_dp_srng_setup(ar->ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
@@ -598,7 +598,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
}
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
HAL_RXDMA_DST, 0, dp->mac_id + i,
DP_RXDMA_ERR_DST_RING_SIZE);
@@ -608,7 +608,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
}
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
ret = ath11k_dp_srng_setup(ar->ab,
srng,
@@ -1877,8 +1877,7 @@ static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
}
-static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
- enum hal_encrypt_type enctype)
+int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype)
{
switch (enctype) {
case HAL_ENCRYPT_TYPE_OPEN:
@@ -2990,11 +2989,52 @@ ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
}
}
+static enum dp_mon_status_buf_state
+ath11k_dp_rx_mon_buf_done(struct ath11k_base *ab, struct hal_srng *srng,
+ struct dp_rxdma_ring *rx_ring)
+{
+ struct ath11k_skb_rxcb *rxcb;
+ struct hal_tlv_hdr *tlv;
+ struct sk_buff *skb;
+ void *status_desc;
+ dma_addr_t paddr;
+ u32 cookie;
+ int buf_id;
+ u8 rbm;
+
+ status_desc = ath11k_hal_srng_src_next_peek(ab, srng);
+ if (!status_desc)
+ return DP_MON_STATUS_NO_DMA;
+
+ ath11k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);
+
+ buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ skb = idr_find(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ if (!skb)
+ return DP_MON_STATUS_NO_DMA;
+
+ rxcb = ATH11K_SKB_RXCB(skb);
+ dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ tlv = (struct hal_tlv_hdr *)skb->data;
+ if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE)
+ return DP_MON_STATUS_NO_DMA;
+
+ return DP_MON_STATUS_REPLINISH;
+}
+
static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
int *budget, struct sk_buff_head *skb_list)
{
struct ath11k *ar;
const struct ath11k_hw_hal_params *hal_params;
+ enum dp_mon_status_buf_state reap_status;
struct ath11k_pdev_dp *dp;
struct dp_rxdma_ring *rx_ring;
struct ath11k_mon_data *pmon;
@@ -3057,15 +3097,38 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
FIELD_GET(HAL_TLV_HDR_TAG,
tlv->tl), buf_id);
- /* If done status is missing, hold onto status
- * ring until status is done for this status
- * ring buffer.
- * Keep HP in mon_status_ring unchanged,
- * and break from here.
- * Check status for same buffer for next time
+ /* RxDMA status done bit might not be set even
+ * though tp is moved by HW.
*/
- pmon->buf_state = DP_MON_STATUS_NO_DMA;
- break;
+
+ /* If done status is missing:
+ * 1. As per MAC team's suggestion,
+ * when HP + 1 entry is peeked and if DMA
+ * is not done and if HP + 2 entry's DMA done
+ * is set. skip HP + 1 entry and
+ * start processing in next interrupt.
+ * 2. If HP + 2 entry's DMA done is not set,
+ * poll onto HP + 1 entry DMA done to be set.
+ * Check status for same buffer for next time
+ * dp_rx_mon_status_srng_process
+ */
+
+ reap_status = ath11k_dp_rx_mon_buf_done(ab, srng,
+ rx_ring);
+ if (reap_status == DP_MON_STATUS_NO_DMA)
+ continue;
+
+ spin_lock_bh(&rx_ring->idr_lock);
+ idr_remove(&rx_ring->bufs_idr, buf_id);
+ spin_unlock_bh(&rx_ring->idr_lock);
+
+ dma_unmap_single(ab->dev, rxcb->paddr,
+ skb->len + skb_tailroom(skb),
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ pmon->buf_state = DP_MON_STATUS_REPLINISH;
+ goto move_next;
}
spin_lock_bh(&rx_ring->idr_lock);
@@ -4391,7 +4454,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
}
if (ab->hw_params.rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i, HAL_RXDMA_BUF);
@@ -4403,7 +4466,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
}
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rxdma_err_dst_ring[i].ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i, HAL_RXDMA_DST);
@@ -4443,7 +4506,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
}
config_refill_ring:
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
HAL_RXDMA_MONITOR_STATUS);
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h
index 623da3bf9dc8..c322e30caa96 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_RX_H
#define ATH11K_DP_RX_H
@@ -95,4 +96,6 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id
int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab);
int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer);
+int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype);
+
#endif /* ATH11K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 272b1c35f98d..8522c67baabf 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -353,8 +353,12 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
if (ts->acked) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR;
+
info->status.flags |=
IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
@@ -584,8 +588,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR;
+
info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
@@ -1035,7 +1043,7 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
int ret;
int i;
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
skb = ath11k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
@@ -1218,7 +1226,7 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
&tlv_filter);
} else if (!reset) {
/* set in monitor mode only */
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
dp->mac_id + i,
@@ -1231,7 +1239,7 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
if (ret)
return ret;
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
if (!reset) {
tlv_filter.rx_filter =
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
index 61be2265e09f..795fe3b8fa0d 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_DP_TX_H
@@ -13,7 +13,7 @@
struct ath11k_dp_htt_wbm_tx_status {
u32 msdu_id;
bool acked;
- int ack_rssi;
+ s8 ack_rssi;
u16 peer_id;
};
diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c
index f3d04568c221..f02599bd1c36 100644
--- a/drivers/net/wireless/ath/ath11k/hal.c
+++ b/drivers/net/wireless/ath/ath11k/hal.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/dma-mapping.h>
#include "hal_tx.h"
@@ -796,6 +796,20 @@ u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
return desc;
}
+u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab, struct hal_srng *srng)
+{
+ u32 next_hp;
+
+ lockdep_assert_held(&srng->lock);
+
+ next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
+
+ if (next_hp != srng->u.src_ring.cached_tp)
+ return srng->ring_base_vaddr + next_hp;
+
+ return NULL;
+}
+
u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
{
lockdep_assert_held(&srng->lock);
diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h
index 65e8f244ebb9..dc8bbe073017 100644
--- a/drivers/net/wireless/ath/ath11k/hal.h
+++ b/drivers/net/wireless/ath/ath11k/hal.h
@@ -664,7 +664,7 @@ struct hal_srng_config {
};
/**
- * enum hal_rx_buf_return_buf_manager
+ * enum hal_rx_buf_return_buf_manager - manager for returned rx buffers
*
* @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
* @HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST: Descriptor returned to WBM idle
@@ -947,6 +947,8 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng);
int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
bool sync_hw_ptr);
u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng);
+u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab,
+ struct hal_srng *srng);
u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
struct hal_srng *srng);
u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h
index c5e88364afe5..46d17abd808b 100644
--- a/drivers/net/wireless/ath/ath11k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath11k/hal_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HAL_TX_H
@@ -54,7 +54,7 @@ struct hal_tx_info {
struct hal_tx_status {
enum hal_wbm_rel_src_module buf_rel_source;
enum hal_wbm_tqm_rel_reason status;
- u8 ack_rssi;
+ s8 ack_rssi;
u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
u32 ppdu_id;
u8 try_cnt;
diff --git a/drivers/net/wireless/ath/ath11k/hif.h b/drivers/net/wireless/ath/ath11k/hif.h
index 877a4073fed6..c4c6cc09c7c1 100644
--- a/drivers/net/wireless/ath/ath11k/hif.h
+++ b/drivers/net/wireless/ath/ath11k/hif.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _HIF_H_
@@ -18,7 +18,7 @@ struct ath11k_hif_ops {
int (*start)(struct ath11k_base *ab);
void (*stop)(struct ath11k_base *ab);
int (*power_up)(struct ath11k_base *ab);
- void (*power_down)(struct ath11k_base *ab);
+ void (*power_down)(struct ath11k_base *ab, bool is_suspend);
int (*suspend)(struct ath11k_base *ab);
int (*resume)(struct ath11k_base *ab);
int (*map_service_to_pipe)(struct ath11k_base *ab, u16 service_id,
@@ -67,12 +67,18 @@ static inline void ath11k_hif_irq_disable(struct ath11k_base *ab)
static inline int ath11k_hif_power_up(struct ath11k_base *ab)
{
+ if (!ab->hif.ops->power_up)
+ return -EOPNOTSUPP;
+
return ab->hif.ops->power_up(ab);
}
-static inline void ath11k_hif_power_down(struct ath11k_base *ab)
+static inline void ath11k_hif_power_down(struct ath11k_base *ab, bool is_suspend)
{
- ab->hif.ops->power_down(ab);
+ if (!ab->hif.ops->power_down)
+ return;
+
+ ab->hif.ops->power_down(ab, is_suspend);
}
static inline int ath11k_hif_suspend(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 14ef4eb48f80..300322535766 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_HW_H
@@ -167,7 +167,7 @@ struct ath11k_hw_params {
bool single_pdev_only;
bool rxdma1_enable;
- int num_rxmda_per_pdev;
+ int num_rxdma_per_pdev;
bool rx_mac_buf_ring;
bool vdev_start_delay;
bool htt_peer_map_v2;
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index 9f4bf41a3d41..ba910ae2c676 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -1231,14 +1231,7 @@ static int ath11k_mac_vif_setup_ps(struct ath11k_vif *arvif)
enable_ps = arvif->ps;
- if (!arvif->is_started) {
- /* mac80211 can update vif powersave state while disconnected.
- * Firmware doesn't behave nicely and consumes more power than
- * necessary if PS is disabled on a non-started vdev. Hence
- * force-enable PS for non-running vdevs.
- */
- psmode = WMI_STA_PS_MODE_ENABLED;
- } else if (enable_ps) {
+ if (enable_ps) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
@@ -1430,10 +1423,67 @@ static bool ath11k_mac_set_nontx_vif_params(struct ath11k_vif *tx_arvif,
return false;
}
-static void ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
- struct sk_buff *bcn)
+static int ath11k_mac_setup_bcn_p2p_ie(struct ath11k_vif *arvif,
+ struct sk_buff *bcn)
{
+ struct ath11k *ar = arvif->ar;
struct ieee80211_mgmt *mgmt;
+ const u8 *p2p_ie;
+ int ret;
+
+ mgmt = (void *)bcn->data;
+ p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
+ mgmt->u.beacon.variable,
+ bcn->len - (mgmt->u.beacon.variable -
+ bcn->data));
+ if (!p2p_ie)
+ return -ENOENT;
+
+ ret = ath11k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to submit P2P GO bcn ie for vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath11k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
+ u8 oui_type, size_t ie_offset)
+{
+ size_t len;
+ const u8 *next, *end;
+ u8 *ie;
+
+ if (WARN_ON(skb->len < ie_offset))
+ return -EINVAL;
+
+ ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
+ skb->data + ie_offset,
+ skb->len - ie_offset);
+ if (!ie)
+ return -ENOENT;
+
+ len = ie[1] + 2;
+ end = skb->data + skb->len;
+ next = ie + len;
+
+ if (WARN_ON(next > end))
+ return -EINVAL;
+
+ memmove(ie, next, end - next);
+ skb_trim(skb, skb->len - len);
+
+ return 0;
+}
+
+static int ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
+ struct sk_buff *bcn)
+{
+ struct ath11k_base *ab = arvif->ar->ab;
+ struct ieee80211_mgmt *mgmt;
+ int ret = 0;
u8 *ies;
ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
@@ -1451,6 +1501,32 @@ static void ath11k_mac_set_vif_params(struct ath11k_vif *arvif,
arvif->wpaie_present = true;
else
arvif->wpaie_present = false;
+
+ if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO)
+ return ret;
+
+ ret = ath11k_mac_setup_bcn_p2p_ie(arvif, bcn);
+ if (ret) {
+ ath11k_warn(ab, "failed to setup P2P GO bcn ie: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* P2P IE is inserted by firmware automatically (as
+ * configured above) so remove it from the base beacon
+ * template to avoid duplicate P2P IEs in beacon frames.
+ */
+ ret = ath11k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA,
+ WLAN_OUI_TYPE_WFA_P2P,
+ offsetof(struct ieee80211_mgmt,
+ u.beacon.variable));
+ if (ret) {
+ ath11k_warn(ab, "failed to remove P2P vendor ie: %d\n",
+ ret);
+ return ret;
+ }
+
+ return ret;
}
static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
@@ -1472,10 +1548,12 @@ static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
return -EPERM;
}
- if (tx_arvif == arvif)
- ath11k_mac_set_vif_params(tx_arvif, beacons->bcn[0].skb);
- else
+ if (tx_arvif == arvif) {
+ if (ath11k_mac_set_vif_params(tx_arvif, beacons->bcn[0].skb))
+ return -EINVAL;
+ } else {
arvif->wpaie_present = tx_arvif->wpaie_present;
+ }
for (i = 0; i < beacons->cnt; i++) {
if (tx_arvif != arvif && !nontx_vif_params_set)
@@ -1534,10 +1612,12 @@ static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
return -EPERM;
}
- if (tx_arvif == arvif)
- ath11k_mac_set_vif_params(tx_arvif, bcn);
- else if (!ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, bcn))
+ if (tx_arvif == arvif) {
+ if (ath11k_mac_set_vif_params(tx_arvif, bcn))
+ return -EINVAL;
+ } else if (!ath11k_mac_set_nontx_vif_params(tx_arvif, arvif, bcn)) {
return -EINVAL;
+ }
ret = ath11k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, 0);
kfree_skb(bcn);
@@ -1579,7 +1659,7 @@ void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
if (vif->bss_conf.color_change_active &&
ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
arvif->bcca_zero_sent = true;
- ieee80211_color_change_finish(vif);
+ ieee80211_color_change_finish(vif, 0);
return;
}
@@ -3996,6 +4076,9 @@ static int ath11k_mac_op_hw_scan(struct ieee80211_hw *hw,
arg->vdev_id = arvif->vdev_id;
arg->scan_id = ATH11K_SCAN_ID;
+ if (ar->ab->hw_params.single_pdev_only)
+ arg->scan_f_filter_prb_req = 1;
+
if (req->ie_len) {
arg->extraie.ptr = kmemdup(req->ie, req->ie_len, GFP_KERNEL);
if (!arg->extraie.ptr) {
@@ -4146,6 +4229,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
arg.key_cipher = WMI_CIPHER_AES_CCM;
/* TODO: Re-check if flag is valid */
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
@@ -4155,12 +4239,10 @@ static int ath11k_install_key(struct ath11k_vif *arvif,
arg.key_txmic_len = 8;
arg.key_rxmic_len = 8;
break;
- case WLAN_CIPHER_SUITE_CCMP_256:
- arg.key_cipher = WMI_CIPHER_AES_CCM;
- break;
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
arg.key_cipher = WMI_CIPHER_AES_GCM;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
break;
default:
ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
@@ -5820,7 +5902,10 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
{
struct ath11k_base *ab = ar->ab;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
struct ieee80211_tx_info *info;
+ enum hal_encrypt_type enctype;
+ unsigned int mic_len;
dma_addr_t paddr;
int buf_id;
int ret;
@@ -5844,7 +5929,12 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif,
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control)) {
- skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+ if (!(skb_cb->flags & ATH11K_SKB_CIPHER_SET))
+ ath11k_warn(ab, "WMI management tx frame without ATH11K_SKB_CIPHER_SET");
+
+ enctype = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
+ mic_len = ath11k_dp_rx_crypto_mic_len(ar, enctype);
+ skb_put(skb, mic_len);
}
}
@@ -6025,7 +6115,7 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable)
tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
}
- for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
ar->dp.mac_id + i,
@@ -6195,7 +6285,7 @@ err:
return ret;
}
-static void ath11k_mac_op_stop(struct ieee80211_hw *hw)
+static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath11k *ar = hw->priv;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
@@ -6570,17 +6660,26 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_UNSPECIFIED:
case NL80211_IFTYPE_STATION:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT;
break;
case NL80211_IFTYPE_MESH_POINT:
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_MESH_11S;
fallthrough;
case NL80211_IFTYPE_AP:
arvif->vdev_type = WMI_VDEV_TYPE_AP;
+ if (vif->p2p)
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO;
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
ar->monitor_vdev_id = bit;
break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ arvif->vdev_type = WMI_VDEV_TYPE_STA;
+ arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
+ break;
+
default:
WARN_ON(1);
break;
@@ -7415,32 +7514,6 @@ static int ath11k_mac_stop_vdev_early(struct ieee80211_hw *hw,
return 0;
}
-static u8 ath11k_mac_get_tpe_count(u8 txpwr_intrprt, u8 txpwr_cnt)
-{
- switch (txpwr_intrprt) {
- /* Refer "Table 9-276-Meaning of Maximum Transmit Power Count subfield
- * if the Maximum Transmit Power Interpretation subfield is 0 or 2" of
- * "IEEE Std 802.11ax 2021".
- */
- case IEEE80211_TPE_LOCAL_EIRP:
- case IEEE80211_TPE_REG_CLIENT_EIRP:
- txpwr_cnt = txpwr_cnt <= 3 ? txpwr_cnt : 3;
- txpwr_cnt = txpwr_cnt + 1;
- break;
- /* Refer "Table 9-277-Meaning of Maximum Transmit Power Count subfield
- * if Maximum Transmit Power Interpretation subfield is 1 or 3" of
- * "IEEE Std 802.11ax 2021".
- */
- case IEEE80211_TPE_LOCAL_EIRP_PSD:
- case IEEE80211_TPE_REG_CLIENT_EIRP_PSD:
- txpwr_cnt = txpwr_cnt <= 4 ? txpwr_cnt : 4;
- txpwr_cnt = txpwr_cnt ? (BIT(txpwr_cnt - 1)) : 1;
- break;
- }
-
- return txpwr_cnt;
-}
-
static u8 ath11k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def)
{
if (chan_def->chan->flags & IEEE80211_CHAN_PSD) {
@@ -7596,7 +7669,7 @@ void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
struct ieee80211_channel *chan, *temp_chan;
u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction;
bool is_psd_power = false, is_tpe_present = false;
- s8 max_tx_power[IEEE80211_MAX_NUM_PWR_LEVEL],
+ s8 max_tx_power[ATH11K_NUM_PWR_LEVELS],
psd_power, tx_power;
s8 eirp_power = 0;
u16 start_freq, center_freq;
@@ -7609,7 +7682,8 @@ void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar,
is_tpe_present = true;
num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels;
} else {
- num_pwr_levels = ath11k_mac_get_num_pwr_levels(&ctx->def);
+ num_pwr_levels =
+ ath11k_mac_get_num_pwr_levels(&bss_conf->chanreq.oper);
}
for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) {
@@ -7766,33 +7840,23 @@ static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct ieee80211_tx_pwr_env *single_tpe;
+ struct ieee80211_parsed_tpe_eirp *non_psd = NULL;
+ struct ieee80211_parsed_tpe_psd *psd = NULL;
enum wmi_reg_6ghz_client_type client_type;
struct cur_regulatory_info *reg_info;
+ u8 local_tpe_count, reg_tpe_count;
+ bool use_local_tpe;
int i;
- u8 pwr_count, pwr_interpret, pwr_category;
- u8 psd_index = 0, non_psd_index = 0, local_tpe_count = 0, reg_tpe_count = 0;
- bool use_local_tpe, non_psd_set = false, psd_set = false;
reg_info = &ab->reg_info_store[ar->pdev_idx];
client_type = reg_info->client_type;
- for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
- single_tpe = &bss_conf->tx_pwr_env[i];
- pwr_category = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
- pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
-
- if (pwr_category == client_type) {
- if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP ||
- pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD)
- local_tpe_count++;
- else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP ||
- pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD)
- reg_tpe_count++;
- }
- }
+ local_tpe_count =
+ bss_conf->tpe.max_local[client_type].valid +
+ bss_conf->tpe.psd_local[client_type].valid;
+ reg_tpe_count =
+ bss_conf->tpe.max_reg_client[client_type].valid +
+ bss_conf->tpe.psd_reg_client[client_type].valid;
if (!reg_tpe_count && !local_tpe_count) {
ath11k_warn(ab,
@@ -7805,83 +7869,44 @@ static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar,
use_local_tpe = false;
}
- for (i = 0; i < bss_conf->tx_pwr_env_num; i++) {
- single_tpe = &bss_conf->tx_pwr_env[i];
- pwr_category = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_CATEGORY);
- pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
-
- if (pwr_category != client_type)
- continue;
-
- /* get local transmit power envelope */
- if (use_local_tpe) {
- if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP) {
- non_psd_index = i;
- non_psd_set = true;
- } else if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD) {
- psd_index = i;
- psd_set = true;
- }
- /* get regulatory transmit power envelope */
- } else {
- if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP) {
- non_psd_index = i;
- non_psd_set = true;
- } else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD) {
- psd_index = i;
- psd_set = true;
- }
- }
+ if (use_local_tpe) {
+ psd = &bss_conf->tpe.psd_local[client_type];
+ if (!psd->valid)
+ psd = NULL;
+ non_psd = &bss_conf->tpe.max_local[client_type];
+ if (!non_psd->valid)
+ non_psd = NULL;
+ } else {
+ psd = &bss_conf->tpe.psd_reg_client[client_type];
+ if (!psd->valid)
+ psd = NULL;
+ non_psd = &bss_conf->tpe.max_reg_client[client_type];
+ if (!non_psd->valid)
+ non_psd = NULL;
}
- if (non_psd_set && !psd_set) {
- single_tpe = &bss_conf->tx_pwr_env[non_psd_index];
- pwr_count = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_COUNT);
- pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
+ if (non_psd && !psd) {
arvif->reg_tpc_info.is_psd_power = false;
arvif->reg_tpc_info.eirp_power = 0;
- arvif->reg_tpc_info.num_pwr_levels =
- ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
+ arvif->reg_tpc_info.num_pwr_levels = non_psd->count;
for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
ath11k_dbg(ab, ATH11K_DBG_MAC,
"non PSD power[%d] : %d\n",
- i, single_tpe->tx_power[i]);
- arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
+ i, non_psd->power[i]);
+ arvif->reg_tpc_info.tpe[i] = non_psd->power[i] / 2;
}
}
- if (psd_set) {
- single_tpe = &bss_conf->tx_pwr_env[psd_index];
- pwr_count = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_COUNT);
- pwr_interpret = u8_get_bits(single_tpe->tx_power_info,
- IEEE80211_TX_PWR_ENV_INFO_INTERPRET);
- arvif->reg_tpc_info.is_psd_power = true;
+ if (psd) {
+ arvif->reg_tpc_info.num_pwr_levels = psd->count;
- if (pwr_count == 0) {
+ for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
ath11k_dbg(ab, ATH11K_DBG_MAC,
- "TPE PSD power : %d\n", single_tpe->tx_power[0]);
- arvif->reg_tpc_info.num_pwr_levels =
- ath11k_mac_get_num_pwr_levels(&ctx->def);
-
- for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++)
- arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[0] / 2;
- } else {
- arvif->reg_tpc_info.num_pwr_levels =
- ath11k_mac_get_tpe_count(pwr_interpret, pwr_count);
-
- for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) {
- ath11k_dbg(ab, ATH11K_DBG_MAC,
- "TPE PSD power[%d] : %d\n",
- i, single_tpe->tx_power[i]);
- arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2;
- }
+ "TPE PSD power[%d] : %d\n",
+ i, psd->power[i]);
+ arvif->reg_tpc_info.tpe[i] = psd->power[i] / 2;
}
}
}
@@ -7896,8 +7921,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ath11k_base *ab = ar->ab;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
int ret;
- struct cur_regulatory_info *reg_info;
- enum ieee80211_ap_reg_power power_type;
mutex_lock(&ar->conf_mutex);
@@ -7908,17 +7931,6 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
if (ath11k_wmi_supports_6ghz_cc_ext(ar) &&
ctx->def.chan->band == NL80211_BAND_6GHZ &&
arvif->vdev_type == WMI_VDEV_TYPE_STA) {
- reg_info = &ab->reg_info_store[ar->pdev_idx];
- power_type = vif->bss_conf.power_type;
-
- ath11k_dbg(ab, ATH11K_DBG_MAC, "chanctx power type %d\n", power_type);
-
- if (power_type == IEEE80211_REG_UNSET_AP) {
- ret = -EINVAL;
- goto out;
- }
-
- ath11k_reg_handle_chan_list(ab, reg_info, power_type);
arvif->chanctx = *ctx;
ath11k_mac_parse_tx_pwr_env(ar, vif, ctx);
}
@@ -8772,12 +8784,8 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
ieee80211_wake_queues(ar->hw);
if (ar->ab->hw_params.current_cc_support &&
- ar->alpha2[0] != 0 && ar->alpha2[1] != 0) {
- struct wmi_set_current_country_params set_current_param = {};
-
- memcpy(&set_current_param.alpha2, ar->alpha2, 2);
- ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
- }
+ ar->alpha2[0] != 0 && ar->alpha2[1] != 0)
+ ath11k_reg_set_cc(ar);
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
@@ -8976,8 +8984,11 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
}
- sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi) +
- ATH11K_DEFAULT_NOISE_FLOOR;
+ sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi);
+
+ if (!db2dbm)
+ sinfo->signal_avg += ATH11K_DEFAULT_NOISE_FLOOR;
+
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
}
@@ -9012,7 +9023,6 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct inet6_ifaddr *ifa6;
struct ifacaddr6 *ifaca6;
- struct list_head *p;
u32 count, scope;
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "op ipv6 changed\n");
@@ -9020,7 +9030,12 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
offload = &arvif->arp_ns_offload;
count = 0;
- /* Note: read_lock_bh() calls rcu_read_lock() */
+ /* The _ipv6_changed() is called with RCU lock already held in
+ * atomic_notifier_call_chain(), so we don't need to call
+ * rcu_read_lock() again here. But note that with CONFIG_PREEMPT_RT
+ * enabled, read_lock_bh() also calls rcu_read_lock(). This is OK
+ * because RCU read critical section is allowed to get nested.
+ */
read_lock_bh(&idev->lock);
memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr));
@@ -9028,11 +9043,10 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw,
memcpy(offload->mac_addr, vif->addr, ETH_ALEN);
/* get unicast address */
- list_for_each(p, &idev->addr_list) {
+ list_for_each_entry(ifa6, &idev->addr_list, if_list) {
if (count >= ATH11K_IPV6_MAX_COUNT)
goto generate;
- ifa6 = list_entry(p, struct inet6_ifaddr, if_list);
if (ifa6->flags & IFA_F_DADFAILED)
continue;
scope = ipv6_addr_src_scope(&ifa6->addr);
@@ -9255,9 +9269,11 @@ static int ath11k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
arg->dwell_time_passive = scan_time_msec;
arg->max_scan_time = scan_time_msec;
arg->scan_f_passive = 1;
- arg->scan_f_filter_prb_req = 1;
arg->burst_duration = duration;
+ if (!ar->ab->hw_params.single_pdev_only)
+ arg->scan_f_filter_prb_req = 1;
+
ret = ath11k_start_scan(ar, arg);
if (ret) {
ath11k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
@@ -9532,6 +9548,8 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
struct ath11k *ar = hw->priv;
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ath11k_sta *arsta = ath11k_sta_to_arsta(sta);
+ enum ieee80211_ap_reg_power power_type;
+ struct cur_regulatory_info *reg_info;
struct ath11k_peer *peer;
int ret = 0;
@@ -9611,6 +9629,29 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
ath11k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
sta->addr, arvif->vdev_id, ret);
}
+
+ if (!ret &&
+ ath11k_wmi_supports_6ghz_cc_ext(ar) &&
+ arvif->vdev_type == WMI_VDEV_TYPE_STA &&
+ arvif->chanctx.def.chan &&
+ arvif->chanctx.def.chan->band == NL80211_BAND_6GHZ) {
+ reg_info = &ar->ab->reg_info_store[ar->pdev_idx];
+ power_type = vif->bss_conf.power_type;
+
+ if (power_type == IEEE80211_REG_UNSET_AP) {
+ ath11k_warn(ar->ab, "invalid power type %d\n",
+ power_type);
+ ret = -EINVAL;
+ } else {
+ ret = ath11k_reg_handle_chan_list(ar->ab,
+ reg_info,
+ power_type);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to handle chan list with power type %d\n",
+ power_type);
+ }
+ }
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
spin_lock_bh(&ar->ab->base_lock);
@@ -9859,12 +9900,18 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
struct ieee80211_iface_combination *combinations;
struct ieee80211_iface_limit *limits;
int n_limits;
+ bool p2p;
+
+ p2p = ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE);
combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
if (!combinations)
return -ENOMEM;
- n_limits = 2;
+ if (p2p)
+ n_limits = 3;
+ else
+ n_limits = 2;
limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
if (!limits) {
@@ -9872,39 +9919,29 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
return -ENOMEM;
}
+ limits[0].types |= BIT(NL80211_IFTYPE_STATION);
+ limits[1].types |= BIT(NL80211_IFTYPE_AP);
+ if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
+ ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
+ limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
+
+ combinations[0].limits = limits;
+ combinations[0].n_limits = n_limits;
+ combinations[0].beacon_int_infra_match = true;
+ combinations[0].beacon_int_min_gcd = 100;
+
if (ab->hw_params.support_dual_stations) {
limits[0].max = 2;
- limits[0].types |= BIT(NL80211_IFTYPE_STATION);
-
limits[1].max = 1;
- limits[1].types |= BIT(NL80211_IFTYPE_AP);
- if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
- limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
- combinations[0].limits = limits;
- combinations[0].n_limits = 2;
combinations[0].max_interfaces = ab->hw_params.num_vdevs;
combinations[0].num_different_channels = 2;
- combinations[0].beacon_int_infra_match = true;
- combinations[0].beacon_int_min_gcd = 100;
} else {
limits[0].max = 1;
- limits[0].types |= BIT(NL80211_IFTYPE_STATION);
-
limits[1].max = 16;
- limits[1].types |= BIT(NL80211_IFTYPE_AP);
-
- if (IS_ENABLED(CONFIG_MAC80211_MESH) &&
- ab->hw_params.interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
- limits[1].types |= BIT(NL80211_IFTYPE_MESH_POINT);
- combinations[0].limits = limits;
- combinations[0].n_limits = 2;
combinations[0].max_interfaces = 16;
combinations[0].num_different_channels = 1;
- combinations[0].beacon_int_infra_match = true;
- combinations[0].beacon_int_min_gcd = 100;
combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
@@ -9913,6 +9950,13 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
BIT(NL80211_CHAN_WIDTH_160);
}
+ if (p2p) {
+ limits[1].types |= BIT(NL80211_IFTYPE_P2P_CLIENT) |
+ BIT(NL80211_IFTYPE_P2P_GO);
+ limits[2].max = 1;
+ limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE);
+ }
+
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
@@ -10029,6 +10073,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
if (ret)
goto err;
+ wiphy_read_of_freq_limits(ar->hw->wiphy);
ath11k_mac_setup_ht_vht_cap(ar, cap, &ht_cap);
ath11k_mac_setup_he_cap(ar, cap);
@@ -10215,11 +10260,8 @@ static int __ath11k_mac_register(struct ath11k *ar)
}
if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) {
- struct wmi_set_current_country_params set_current_param = {};
-
- memcpy(&set_current_param.alpha2, ab->new_alpha2, 2);
memcpy(&ar->alpha2, ab->new_alpha2, 2);
- ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ ret = ath11k_reg_set_cc(ar);
if (ret)
ath11k_warn(ar->ab,
"failed set cc code for mac register: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath11k/mhi.c b/drivers/net/wireless/ath/ath11k/mhi.c
index fb4ecf9a103e..ab182690aed3 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.c
+++ b/drivers/net/wireless/ath/ath11k/mhi.c
@@ -19,6 +19,7 @@
#define MHI_TIMEOUT_DEFAULT_MS 20000
#define RDDM_DUMP_SIZE 0x420000
+#define MHI_CB_INVALID 0xff
static const struct mhi_channel_config ath11k_mhi_channels_qca6390[] = {
{
@@ -158,9 +159,8 @@ void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_PCI, "mhistatus 0x%x\n", val);
- /* Observed on QCA6390 that after SOC_GLOBAL_RESET, MHISTATUS
- * has SYSERR bit set and thus need to set MHICTRL_RESET
- * to clear SYSERR.
+ /* After SOC_GLOBAL_RESET, MHISTATUS may still have SYSERR bit set
+ * and thus need to set MHICTRL_RESET to clear SYSERR.
*/
ath11k_pcic_write32(ab, MHICTRL, MHICTRL_RESET_MASK);
@@ -269,6 +269,7 @@ static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
struct ath11k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+ struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
ath11k_dbg(ab, ATH11K_DBG_BOOT, "notify status reason %s\n",
ath11k_mhi_op_callback_to_str(cb));
@@ -279,12 +280,21 @@ static void ath11k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
break;
case MHI_CB_EE_RDDM:
ath11k_warn(ab, "firmware crashed: MHI_CB_EE_RDDM\n");
+ if (ab_pci->mhi_pre_cb == MHI_CB_EE_RDDM) {
+ ath11k_dbg(ab, ATH11K_DBG_BOOT,
+ "do not queue again for consecutive RDDM event\n");
+ break;
+ }
+
if (!(test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)))
queue_work(ab->workqueue_aux, &ab->reset_work);
+
break;
default:
break;
}
+
+ ab_pci->mhi_pre_cb = cb;
}
static int ath11k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
@@ -397,6 +407,7 @@ int ath11k_mhi_register(struct ath11k_pci *ab_pci)
goto free_controller;
}
+ ab_pci->mhi_pre_cb = MHI_CB_INVALID;
ret = mhi_register_controller(mhi_ctrl, ath11k_mhi_config);
if (ret) {
ath11k_err(ab, "failed to register to mhi bus, err = %d\n", ret);
@@ -442,9 +453,17 @@ int ath11k_mhi_start(struct ath11k_pci *ab_pci)
return 0;
}
-void ath11k_mhi_stop(struct ath11k_pci *ab_pci)
+void ath11k_mhi_stop(struct ath11k_pci *ab_pci, bool is_suspend)
{
- mhi_power_down(ab_pci->mhi_ctrl, true);
+ /* During suspend we need to use mhi_power_down_keep_dev()
+ * workaround, otherwise ath11k_core_resume() will timeout
+ * during resume.
+ */
+ if (is_suspend)
+ mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true);
+ else
+ mhi_power_down(ab_pci->mhi_ctrl, true);
+
mhi_unprepare_after_power_down(ab_pci->mhi_ctrl);
}
diff --git a/drivers/net/wireless/ath/ath11k/mhi.h b/drivers/net/wireless/ath/ath11k/mhi.h
index f81fba2644a4..2d567705e732 100644
--- a/drivers/net/wireless/ath/ath11k/mhi.h
+++ b/drivers/net/wireless/ath/ath11k/mhi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH11K_MHI_H
#define _ATH11K_MHI_H
@@ -18,7 +18,7 @@
#define MHICTRL_RESET_MASK 0x2
int ath11k_mhi_start(struct ath11k_pci *ar_pci);
-void ath11k_mhi_stop(struct ath11k_pci *ar_pci);
+void ath11k_mhi_stop(struct ath11k_pci *ar_pci, bool is_suspend);
int ath11k_mhi_register(struct ath11k_pci *ar_pci);
void ath11k_mhi_unregister(struct ath11k_pci *ar_pci);
void ath11k_mhi_set_mhictrl_reset(struct ath11k_base *ab);
@@ -26,5 +26,4 @@ void ath11k_mhi_clear_vector(struct ath11k_base *ab);
int ath11k_mhi_suspend(struct ath11k_pci *ar_pci);
int ath11k_mhi_resume(struct ath11k_pci *ar_pci);
-
#endif
diff --git a/drivers/net/wireless/ath/ath11k/p2p.c b/drivers/net/wireless/ath/ath11k/p2p.c
new file mode 100644
index 000000000000..01e14523f1fe
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/p2p.c
@@ -0,0 +1,149 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "wmi.h"
+#include "mac.h"
+#include "p2p.h"
+
+static void ath11k_p2p_noa_ie_fill(u8 *data, size_t len,
+ const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ struct ieee80211_p2p_noa_attr *noa_attr;
+ u8 noa_descriptors, ctwindow;
+ bool oppps;
+ __le16 *noa_attr_len;
+ u16 attr_len;
+ int i;
+
+ ctwindow = u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_CTWIN_TU);
+ oppps = u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS);
+ noa_descriptors = u32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+
+ /* P2P IE */
+ data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ data[1] = len - 2;
+ data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ data[5] = WLAN_OUI_TYPE_WFA_P2P;
+
+ /* NOA ATTR */
+ data[6] = IEEE80211_P2P_ATTR_ABSENCE_NOTICE;
+ noa_attr_len = (__le16 *)&data[7]; /* 2 bytes */
+ noa_attr = (struct ieee80211_p2p_noa_attr *)&data[9];
+
+ noa_attr->index = u32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_INDEX);
+ noa_attr->oppps_ctwindow = ctwindow;
+ if (oppps)
+ noa_attr->oppps_ctwindow |= IEEE80211_P2P_OPPPS_ENABLE_BIT;
+
+ for (i = 0; i < noa_descriptors; i++) {
+ noa_attr->desc[i].count = noa->descriptors[i].type_count;
+ noa_attr->desc[i].duration =
+ cpu_to_le32(noa->descriptors[i].duration);
+ noa_attr->desc[i].interval =
+ cpu_to_le32(noa->descriptors[i].interval);
+ noa_attr->desc[i].start_time =
+ cpu_to_le32(noa->descriptors[i].start_time);
+ }
+
+ attr_len = 2; /* index + oppps_ctwindow */
+ attr_len += noa_descriptors * sizeof(struct ieee80211_p2p_noa_desc);
+ *noa_attr_len = __cpu_to_le16(attr_len);
+}
+
+static size_t
+ath11k_p2p_noa_ie_len_compute(const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ size_t len = 0;
+ u8 noa_descriptors = u32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+
+ if (!(noa_descriptors) &&
+ !(u32_get_bits(noa->noa_attr, WMI_P2P_NOA_INFO_OPP_PS)))
+ return 0;
+
+ len += 1 + 1 + 4; /* EID + len + OUI */
+ len += 1 + 2; /* noa attr + attr len */
+ len += 1 + 1; /* index + oppps_ctwindow */
+ len += noa_descriptors *
+ sizeof(struct ieee80211_p2p_noa_desc);
+
+ return len;
+}
+
+static void ath11k_p2p_noa_ie_assign(struct ath11k_vif *arvif, void *ie,
+ size_t len)
+{
+ struct ath11k *ar = arvif->ar;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ kfree(arvif->u.ap.noa_data);
+
+ arvif->u.ap.noa_data = ie;
+ arvif->u.ap.noa_len = len;
+}
+
+static void __ath11k_p2p_noa_update(struct ath11k_vif *arvif,
+ const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ struct ath11k *ar = arvif->ar;
+ void *ie;
+ size_t len;
+
+ lockdep_assert_held(&ar->data_lock);
+
+ ath11k_p2p_noa_ie_assign(arvif, NULL, 0);
+
+ len = ath11k_p2p_noa_ie_len_compute(noa);
+ if (!len)
+ return;
+
+ ie = kmalloc(len, GFP_ATOMIC);
+ if (!ie)
+ return;
+
+ ath11k_p2p_noa_ie_fill(ie, len, noa);
+ ath11k_p2p_noa_ie_assign(arvif, ie, len); }
+
+void ath11k_p2p_noa_update(struct ath11k_vif *arvif,
+ const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ struct ath11k *ar = arvif->ar;
+
+ spin_lock_bh(&ar->data_lock);
+ __ath11k_p2p_noa_update(arvif, noa);
+ spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath11k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+ struct ath11k_p2p_noa_arg *arg = data;
+
+ if (arvif->vdev_id != arg->vdev_id)
+ return;
+
+ ath11k_p2p_noa_update(arvif, arg->noa);
+}
+
+void ath11k_p2p_noa_update_by_vdev_id(struct ath11k *ar, u32 vdev_id,
+ const struct ath11k_wmi_p2p_noa_info *noa)
+{
+ struct ath11k_p2p_noa_arg arg = {
+ .vdev_id = vdev_id,
+ .noa = noa,
+ };
+
+ ieee80211_iterate_active_interfaces_atomic(ar->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath11k_p2p_noa_update_vdev_iter,
+ &arg);
+}
diff --git a/drivers/net/wireless/ath/ath11k/p2p.h b/drivers/net/wireless/ath/ath11k/p2p.h
new file mode 100644
index 000000000000..d907940a9b09
--- /dev/null
+++ b/drivers/net/wireless/ath/ath11k/p2p.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH11K_P2P_H
+#define ATH11K_P2P_H
+
+#include "wmi.h"
+
+struct ath11k_wmi_p2p_noa_info;
+
+struct ath11k_p2p_noa_arg {
+ u32 vdev_id;
+ const struct ath11k_wmi_p2p_noa_info *noa;
+};
+
+void ath11k_p2p_noa_update(struct ath11k_vif *arvif,
+ const struct ath11k_wmi_p2p_noa_info *noa);
+void ath11k_p2p_noa_update_by_vdev_id(struct ath11k *ar, u32 vdev_id,
+ const struct ath11k_wmi_p2p_noa_info *noa);
+#endif
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index be9d2c69cc41..8d63b84d1261 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -638,7 +638,7 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
return 0;
}
-static void ath11k_pci_power_down(struct ath11k_base *ab)
+static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend)
{
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -649,7 +649,7 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
ath11k_pci_msi_disable(ab_pci);
- ath11k_mhi_stop(ab_pci);
+ ath11k_mhi_stop(ab_pci, is_suspend);
clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
ath11k_pci_sw_reset(ab_pci->ab, false);
}
@@ -970,7 +970,7 @@ static void ath11k_pci_remove(struct pci_dev *pdev)
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath11k_pci_power_down(ab);
+ ath11k_pci_power_down(ab, false);
ath11k_debugfs_soc_destroy(ab);
ath11k_qmi_deinit_service(ab);
goto qmi_fail;
@@ -998,7 +998,7 @@ static void ath11k_pci_shutdown(struct pci_dev *pdev)
struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
- ath11k_pci_power_down(ab);
+ ath11k_pci_power_down(ab, false);
}
static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
@@ -1035,9 +1035,39 @@ static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
return ret;
}
-static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops,
- ath11k_pci_pm_suspend,
- ath11k_pci_pm_resume);
+static __maybe_unused int ath11k_pci_pm_suspend_late(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath11k_core_suspend_late(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to late suspend core: %d\n", ret);
+
+ /* Similar to ath11k_pci_pm_suspend(), we return success here
+ * even error happens, to allow system suspend/hibernation survive.
+ */
+ return 0;
+}
+
+static __maybe_unused int ath11k_pci_pm_resume_early(struct device *dev)
+{
+ struct ath11k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath11k_core_resume_early(ab);
+ if (ret)
+ ath11k_warn(ab, "failed to early resume core: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops __maybe_unused ath11k_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend,
+ ath11k_pci_pm_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend_late,
+ ath11k_pci_pm_resume_early)
+};
static struct pci_driver ath11k_pci_driver = {
.name = "ath11k_pci",
diff --git a/drivers/net/wireless/ath/ath11k/pci.h b/drivers/net/wireless/ath/ath11k/pci.h
index 6be73333d90b..c33c7865145c 100644
--- a/drivers/net/wireless/ath/ath11k/pci.h
+++ b/drivers/net/wireless/ath/ath11k/pci.h
@@ -64,6 +64,7 @@ struct ath11k_pci {
char amss_path[100];
struct mhi_controller *mhi_ctrl;
const struct ath11k_msi_config *msi_config;
+ enum mhi_callback mhi_pre_cb;
u32 register_window;
/* protects register_window above */
diff --git a/drivers/net/wireless/ath/ath11k/pcic.c b/drivers/net/wireless/ath/ath11k/pcic.c
index add4db4c50bc..debe7c5919ef 100644
--- a/drivers/net/wireless/ath/ath11k/pcic.c
+++ b/drivers/net/wireless/ath/ath11k/pcic.c
@@ -316,6 +316,7 @@ static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
+ free_netdev(irq_grp->napi_ndev);
}
}
@@ -558,8 +559,9 @@ ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
{
- int i, j, ret, num_vectors = 0;
+ int i, j, n, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0;
+ struct ath11k_ext_irq_grp *irq_grp;
unsigned long irq_flags;
ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors,
@@ -573,13 +575,18 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
irq_flags |= IRQF_NOBALANCING;
for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
- init_dummy_netdev(&irq_grp->napi_ndev);
- netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev) {
+ ret = -ENOMEM;
+ goto fail_allocate;
+ }
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
ath11k_pcic_ext_grp_napi_poll);
if (ab->hw_params.ring_mask->tx[i] ||
@@ -601,8 +608,10 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
int vector = (i % num_vectors) + base_vector;
int irq = ath11k_pcic_get_msi_irq(ab, vector);
- if (irq < 0)
- return irq;
+ if (irq < 0) {
+ ret = irq;
+ goto fail_irq;
+ }
ab->irq_num[irq_idx] = irq;
@@ -615,6 +624,10 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
if (ret) {
ath11k_err(ab, "failed request irq %d: %d\n",
vector, ret);
+ for (n = 0; n <= i; n++) {
+ irq_grp = &ab->ext_irq_grp[n];
+ free_netdev(irq_grp->napi_ndev);
+ }
return ret;
}
}
@@ -622,6 +635,15 @@ static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
}
return 0;
+fail_irq:
+ /* i ->napi_ndev was properly allocated. Free it also */
+ i += 1;
+fail_allocate:
+ for (n = 0; n < i; n++) {
+ irq_grp = &ab->ext_irq_grp[n];
+ free_netdev(irq_grp->napi_ndev);
+ }
+ return ret;
}
int ath11k_pcic_config_irq(struct ath11k_base *ab)
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index 5006f81f779b..1bc648920ab6 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -2293,7 +2293,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
struct qmi_txn txn;
const u8 *temp = data;
void __iomem *bdf_addr = NULL;
- int ret;
+ int ret = 0;
u32 remaining = len;
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -2859,7 +2859,7 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab,
int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
{
- int timeout;
+ long time_left;
if (!ath11k_core_coldboot_cal_support(ab) ||
ab->hw_params.cbcal_restart_fw == 0)
@@ -2867,17 +2867,17 @@ int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_QMI, "wait for cold boot done\n");
- timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
- (ab->qmi.cal_done == 1),
- ATH11K_COLD_BOOT_FW_RESET_DELAY);
+ time_left = wait_event_timeout(ab->qmi.cold_boot_waitq,
+ (ab->qmi.cal_done == 1),
+ ATH11K_COLD_BOOT_FW_RESET_DELAY);
- if (timeout <= 0) {
+ if (time_left <= 0) {
ath11k_warn(ab, "Coldboot Calibration timed out\n");
return -ETIMEDOUT;
}
/* reset the firmware */
- ath11k_hif_power_down(ab);
+ ath11k_hif_power_down(ab, false);
ath11k_hif_power_up(ab);
ath11k_dbg(ab, ATH11K_DBG_QMI, "exit wait for cold boot done\n");
return 0;
@@ -2886,7 +2886,7 @@ EXPORT_SYMBOL(ath11k_qmi_fwreset_from_cold_boot);
static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
{
- int timeout;
+ long time_left;
int ret;
ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT);
@@ -2897,10 +2897,10 @@ static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration wait started\n");
- timeout = wait_event_timeout(ab->qmi.cold_boot_waitq,
- (ab->qmi.cal_done == 1),
- ATH11K_COLD_BOOT_FW_RESET_DELAY);
- if (timeout <= 0) {
+ time_left = wait_event_timeout(ab->qmi.cold_boot_waitq,
+ (ab->qmi.cal_done == 1),
+ ATH11K_COLD_BOOT_FW_RESET_DELAY);
+ if (time_left <= 0) {
ath11k_warn(ab, "coldboot calibration timed out\n");
return 0;
}
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index 737fcd450d4b..b0f289784dd3 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/rtnetlink.h>
@@ -49,7 +49,6 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wmi_init_country_params init_country_param;
- struct wmi_set_current_country_params set_current_param = {};
struct ath11k *ar = hw->priv;
int ret;
@@ -83,9 +82,8 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
* reg info
*/
if (ar->ab->hw_params.current_cc_support) {
- memcpy(&set_current_param.alpha2, request->alpha2, 2);
- memcpy(&ar->alpha2, &set_current_param.alpha2, 2);
- ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+ memcpy(&ar->alpha2, request->alpha2, 2);
+ ret = ath11k_reg_set_cc(ar);
if (ret)
ath11k_warn(ar->ab,
"failed set current country code: %d\n", ret);
@@ -878,7 +876,7 @@ int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
ath11k_reg_reset_info(reg_info);
if (ab->hw_params.single_pdev_only &&
- pdev_idx < ab->hw_params.num_rxmda_per_pdev)
+ pdev_idx < ab->hw_params.num_rxdma_per_pdev)
return 0;
goto fallback;
}
@@ -1017,3 +1015,11 @@ void ath11k_reg_free(struct ath11k_base *ab)
kfree(ab->new_regd[i]);
}
}
+
+int ath11k_reg_set_cc(struct ath11k *ar)
+{
+ struct wmi_set_current_country_params set_current_param = {};
+
+ memcpy(&set_current_param.alpha2, ar->alpha2, 2);
+ return ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param);
+}
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 64edb794260a..263ea9061948 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_REG_H
@@ -45,5 +45,5 @@ ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type);
int ath11k_reg_handle_chan_list(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info,
enum ieee80211_ap_reg_power power_type);
-
+int ath11k_reg_set_cc(struct ath11k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/thermal.c b/drivers/net/wireless/ath/ath11k/thermal.c
index 41e7499f075f..18d6eab5cce3 100644
--- a/drivers/net/wireless/ath/ath11k/thermal.c
+++ b/drivers/net/wireless/ath/ath11k/thermal.c
@@ -101,7 +101,7 @@ static ssize_t ath11k_thermal_show_temp(struct device *dev,
spin_unlock_bh(&ar->data_lock);
/* display in millidegree Celsius */
- ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
+ ret = sysfs_emit(buf, "%d\n", temperature * 1000);
out:
mutex_unlock(&ar->conf_mutex);
return ret;
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index 235ab8ea715f..75246b0a82e3 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -48,8 +48,8 @@ TRACE_EVENT(ath11k_htt_pktlog,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->ab->dev));
- __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->buf_len = buf_len;
__entry->pktlog_checksum = pktlog_checksum;
memcpy(__get_dynamic_array(pktlog), buf, buf_len);
@@ -77,8 +77,8 @@ TRACE_EVENT(ath11k_htt_ppdu_stats,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->ab->dev));
- __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->len = len;
memcpy(__get_dynamic_array(ppdu), data, len);
),
@@ -105,8 +105,8 @@ TRACE_EVENT(ath11k_htt_rxdesc,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->ab->dev));
- __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->len = len;
__entry->log_type = log_type;
memcpy(__get_dynamic_array(rxdesc), data, len);
@@ -130,8 +130,8 @@ DECLARE_EVENT_CLASS(ath11k_log_event,
__vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- __assign_str(device, dev_name(ab->dev));
- __assign_str(driver, dev_driver_string(ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk(
@@ -171,8 +171,8 @@ TRACE_EVENT(ath11k_wmi_cmd,
),
TP_fast_assign(
- __assign_str(device, dev_name(ab->dev));
- __assign_str(driver, dev_driver_string(ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->id = id;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
@@ -201,8 +201,8 @@ TRACE_EVENT(ath11k_wmi_event,
),
TP_fast_assign(
- __assign_str(device, dev_name(ab->dev));
- __assign_str(driver, dev_driver_string(ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->id = id;
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
@@ -230,8 +230,8 @@ TRACE_EVENT(ath11k_log_dbg,
),
TP_fast_assign(
- __assign_str(device, dev_name(ab->dev));
- __assign_str(driver, dev_driver_string(ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->level = level;
WARN_ON_ONCE(vsnprintf(__get_dynamic_array(msg),
ATH11K_MSG_MAX, vaf->fmt,
@@ -262,10 +262,10 @@ TRACE_EVENT(ath11k_log_dbg_dump,
),
TP_fast_assign(
- __assign_str(device, dev_name(ab->dev));
- __assign_str(driver, dev_driver_string(ab->dev));
- __assign_str(msg, msg);
- __assign_str(prefix, prefix);
+ __assign_str(device);
+ __assign_str(driver);
+ __assign_str(msg);
+ __assign_str(prefix);
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
@@ -292,8 +292,8 @@ TRACE_EVENT(ath11k_wmi_diag,
),
TP_fast_assign(
- __assign_str(device, dev_name(ab->dev));
- __assign_str(driver, dev_driver_string(ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->len = len;
memcpy(__get_dynamic_array(data), data, len);
),
@@ -318,8 +318,8 @@ TRACE_EVENT(ath11k_ps_timekeeper,
__field(u32, peer_ps_timestamp)
),
- TP_fast_assign(__assign_str(device, dev_name(ar->ab->dev));
- __assign_str(driver, dev_driver_string(ar->ab->dev));
+ TP_fast_assign(__assign_str(device);
+ __assign_str(driver);
memcpy(__get_dynamic_array(peer_addr), peer_addr,
ETH_ALEN);
__entry->peer_ps_state = peer_ps_state;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 34ab9631ff36..38f175dd1557 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -20,6 +20,7 @@
#include "hw.h"
#include "peer.h"
#include "testmode.h"
+#include "p2p.h"
struct wmi_tlv_policy {
size_t min_len;
@@ -154,6 +155,10 @@ static const struct wmi_tlv_policy wmi_tlv_policies[] = {
.min_len = sizeof(struct wmi_per_chain_rssi_stats) },
[WMI_TAG_TWT_ADD_DIALOG_COMPLETE_EVENT] = {
.min_len = sizeof(struct wmi_twt_add_dialog_event) },
+ [WMI_TAG_P2P_NOA_INFO] = {
+ .min_len = sizeof(struct ath11k_wmi_p2p_noa_info) },
+ [WMI_TAG_P2P_NOA_EVENT] = {
+ .min_len = sizeof(struct wmi_p2p_noa_event) },
};
#define PRIMAP(_hw_mode_) \
@@ -981,7 +986,7 @@ int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
FIELD_PREP(WMI_TLV_LEN, 0);
/* Note: This is a nested TLV containing:
- * [wmi_tlv][wmi_p2p_noa_descriptor][wmi_tlv]..
+ * [wmi_tlv][ath11k_wmi_p2p_noa_descriptor][wmi_tlv]..
*/
ptr += sizeof(*tlv);
@@ -1704,6 +1709,45 @@ int ath11k_wmi_send_bcn_offload_control_cmd(struct ath11k *ar,
return ret;
}
+int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id,
+ const u8 *p2p_ie)
+{
+ struct ath11k_pdev_wmi *wmi = ar->wmi;
+ struct wmi_p2p_go_set_beacon_ie_cmd *cmd;
+ size_t p2p_ie_len, aligned_len;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret, len;
+
+ p2p_ie_len = p2p_ie[1] + 2;
+ aligned_len = roundup(p2p_ie_len, 4);
+
+ len = sizeof(*cmd) + TLV_HDR_SIZE + aligned_len;
+
+ skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_p2p_go_set_beacon_ie_cmd *)skb->data;
+ cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_P2P_GO_SET_BEACON_IE) |
+ FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
+ cmd->vdev_id = vdev_id;
+ cmd->ie_buf_len = p2p_ie_len;
+
+ tlv = (struct wmi_tlv *)cmd->tlv;
+ tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
+ FIELD_PREP(WMI_TLV_LEN, aligned_len);
+ memcpy(tlv->value, p2p_ie, p2p_ie_len);
+
+ ret = ath11k_wmi_cmd_send(wmi, skb, WMI_P2P_GO_SET_BEACON_IE);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to send WMI_P2P_GO_SET_BEACON_IE\n");
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn, u32 ema_params)
@@ -4020,7 +4064,8 @@ ath11k_wmi_obss_color_collision_event(struct ath11k_base *ab, struct sk_buff *sk
switch (ev->evt_type) {
case WMI_BSS_COLOR_COLLISION_DETECTION:
- ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap);
+ ieee80211_obss_color_collision_notify(arvif->vif, ev->obss_color_bitmap,
+ 0);
ath11k_dbg(ab, ATH11K_DBG_WMI,
"OBSS color collision detected vdev:%d, event:%d, bitmap:%08llx\n",
ev->vdev_id, ev->evt_type, ev->obss_color_bitmap);
@@ -8606,6 +8651,58 @@ exit:
kfree(tb);
}
+static void ath11k_wmi_p2p_noa_event(struct ath11k_base *ab,
+ struct sk_buff *skb)
+{
+ const void **tb;
+ const struct wmi_p2p_noa_event *ev;
+ const struct ath11k_wmi_p2p_noa_info *noa;
+ struct ath11k *ar;
+ int vdev_id;
+ u8 noa_descriptors;
+
+ tb = ath11k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ath11k_warn(ab, "failed to parse tlv: %ld\n", PTR_ERR(tb));
+ return;
+ }
+
+ ev = tb[WMI_TAG_P2P_NOA_EVENT];
+ noa = tb[WMI_TAG_P2P_NOA_INFO];
+
+ if (!ev || !noa)
+ goto out;
+
+ vdev_id = ev->vdev_id;
+ noa_descriptors = u32_get_bits(noa->noa_attr,
+ WMI_P2P_NOA_INFO_DESC_NUM);
+
+ if (noa_descriptors > WMI_P2P_MAX_NOA_DESCRIPTORS) {
+ ath11k_warn(ab, "invalid descriptor num %d in P2P NoA event\n",
+ noa_descriptors);
+ goto out;
+ }
+
+ ath11k_dbg(ab, ATH11K_DBG_WMI,
+ "wmi tlv p2p noa vdev_id %i descriptors %u\n",
+ vdev_id, noa_descriptors);
+
+ rcu_read_lock();
+ ar = ath11k_mac_get_ar_by_vdev_id(ab, vdev_id);
+ if (!ar) {
+ ath11k_warn(ab, "invalid vdev id %d in P2P NoA event\n",
+ vdev_id);
+ goto unlock;
+ }
+
+ ath11k_p2p_noa_update_by_vdev_id(ar, vdev_id, noa);
+
+unlock:
+ rcu_read_unlock();
+out:
+ kfree(tb);
+}
+
static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -8733,6 +8830,9 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_GTK_OFFLOAD_STATUS_EVENTID:
ath11k_wmi_gtk_offload_status_event(ab, skb);
break;
+ case WMI_P2P_NOA_EVENTID:
+ ath11k_wmi_p2p_noa_event(ab, skb);
+ break;
default:
ath11k_dbg(ab, ATH11K_DBG_WMI, "unsupported event id 0x%x\n", id);
break;
@@ -8982,7 +9082,7 @@ int ath11k_wmi_attach(struct ath11k_base *ab)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX;
/* It's overwritten when service_ext_ready is handled */
- if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1)
+ if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1)
ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE;
/* TODO: Init remaining wmi soc resources required */
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index bb419e3abb00..8982b909c821 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH11K_WMI_H
@@ -60,13 +60,9 @@ struct wmi_tlv {
#define WLAN_SCAN_MAX_HINT_BSSID 10
#define MAX_RNR_BSS 5
-#define WLAN_SCAN_MAX_HINT_S_SSID 10
-#define WLAN_SCAN_MAX_HINT_BSSID 10
-#define MAX_RNR_BSS 5
-
#define WLAN_SCAN_PARAMS_MAX_SSID 16
#define WLAN_SCAN_PARAMS_MAX_BSSID 4
-#define WLAN_SCAN_PARAMS_MAX_IE_LEN 256
+#define WLAN_SCAN_PARAMS_MAX_IE_LEN 512
#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
@@ -3444,34 +3440,6 @@ struct wmi_bssid_arg {
const u8 *bssid;
};
-struct wmi_start_scan_arg {
- u32 scan_id;
- u32 scan_req_id;
- u32 vdev_id;
- u32 scan_priority;
- u32 notify_scan_events;
- u32 dwell_time_active;
- u32 dwell_time_passive;
- u32 min_rest_time;
- u32 max_rest_time;
- u32 repeat_probe_time;
- u32 probe_spacing_time;
- u32 idle_time;
- u32 max_scan_time;
- u32 probe_delay;
- u32 scan_ctrl_flags;
-
- u32 ie_len;
- u32 n_channels;
- u32 n_ssids;
- u32 n_bssids;
-
- u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
- u32 channels[64];
- struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
- struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
-};
-
#define WMI_SCAN_STOP_ONE 0x00000000
#define WMI_SCN_STOP_VAP_ALL 0x01000000
#define WMI_SCAN_STOP_ALL 0x04000000
@@ -3630,6 +3598,37 @@ struct wmi_ftm_event_msg {
u8 data[];
} __packed;
+#define WMI_P2P_MAX_NOA_DESCRIPTORS 4
+
+struct wmi_p2p_noa_event {
+ u32 vdev_id;
+} __packed;
+
+struct ath11k_wmi_p2p_noa_descriptor {
+ u32 type_count; /* 255: continuous schedule, 0: reserved */
+ u32 duration; /* Absent period duration in micro seconds */
+ u32 interval; /* Absent period interval in micro seconds */
+ u32 start_time; /* 32 bit tsf time when in starts */
+} __packed;
+
+#define WMI_P2P_NOA_INFO_CHANGED_FLAG BIT(0)
+#define WMI_P2P_NOA_INFO_INDEX GENMASK(15, 8)
+#define WMI_P2P_NOA_INFO_OPP_PS BIT(16)
+#define WMI_P2P_NOA_INFO_CTWIN_TU GENMASK(23, 17)
+#define WMI_P2P_NOA_INFO_DESC_NUM GENMASK(31, 24)
+
+struct ath11k_wmi_p2p_noa_info {
+ /* Bit 0 - Flag to indicate an update in NOA schedule
+ * Bits 7-1 - Reserved
+ * Bits 15-8 - Index (identifies the instance of NOA sub element)
+ * Bit 16 - Opp PS state of the AP
+ * Bits 23-17 - Ctwindow in TUs
+ * Bits 31-24 - Number of NOA descriptors
+ */
+ u32 noa_attr;
+ struct ath11k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
+} __packed;
+
#define WMI_BEACON_TX_BUFFER_SIZE 512
#define WMI_EMA_TMPL_IDX_SHIFT 8
@@ -3653,6 +3652,13 @@ struct wmi_bcn_tmpl_cmd {
u32 ema_params;
} __packed;
+struct wmi_p2p_go_set_beacon_ie_cmd {
+ u32 tlv_header;
+ u32 vdev_id;
+ u32 ie_buf_len;
+ u8 tlv[];
+} __packed;
+
struct wmi_key_seq_counter {
u32 key_seq_counter_l;
u32 key_seq_counter_h;
@@ -5740,8 +5746,6 @@ struct wmi_debug_log_config_cmd_fixed_param {
u32 value;
} __packed;
-#define WMI_MAX_MEM_REQS 32
-
#define MAX_RADIOS 3
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
@@ -6349,6 +6353,8 @@ int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
struct sk_buff *ath11k_wmi_alloc_skb(struct ath11k_wmi_base *wmi_sc, u32 len);
int ath11k_wmi_mgmt_send(struct ath11k *ar, u32 vdev_id, u32 buf_id,
struct sk_buff *frame);
+int ath11k_wmi_p2p_go_bcn_ie(struct ath11k *ar, u32 vdev_id,
+ const u8 *p2p_ie);
int ath11k_wmi_bcn_tmpl(struct ath11k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
struct sk_buff *bcn, u32 ema_param);
diff --git a/drivers/net/wireless/ath/ath12k/Kconfig b/drivers/net/wireless/ath/ath12k/Kconfig
index e135d2b1b61d..f64e7c322216 100644
--- a/drivers/net/wireless/ath/ath12k/Kconfig
+++ b/drivers/net/wireless/ath/ath12k/Kconfig
@@ -7,6 +7,7 @@ config ATH12K
select MHI_BUS
select QRTR
select QRTR_MHI
+ select PCI_PWRCTL_PWRSEQ if HAVE_PWRCTL
help
Enable support for Qualcomm Technologies Wi-Fi 7 (IEEE
802.11be) family of chipsets, for example WCN7850 and
@@ -24,6 +25,15 @@ config ATH12K_DEBUG
If unsure, say Y to make it easier to debug problems. But if
you want optimal performance choose N.
+config ATH12K_DEBUGFS
+ bool "QTI ath12k debugfs support"
+ depends on ATH12K && MAC80211_DEBUGFS
+ help
+ Enable ath12k debugfs support
+
+ If unsure, say Y to make it easier to debug problems. But if
+ you want optimal performance choose N.
+
config ATH12K_TRACING
bool "ath12k tracing support"
depends on ATH12K && EVENT_TRACING
diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile
index 71669f94ff75..5a1ed20d730e 100644
--- a/drivers/net/wireless/ath/ath12k/Makefile
+++ b/drivers/net/wireless/ath/ath12k/Makefile
@@ -23,7 +23,10 @@ ath12k-y += core.o \
fw.o \
p2p.o
+ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o
+ath12k-$(CONFIG_ACPI) += acpi.o
ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
+ath12k-$(CONFIG_PM) += wow.o
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c
new file mode 100644
index 000000000000..0555d35aab47
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/acpi.c
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "acpi.h"
+#include "debug.h"
+
+static int ath12k_acpi_dsm_get_data(struct ath12k_base *ab, int func)
+{
+ union acpi_object *obj;
+ acpi_handle root_handle;
+ int ret;
+
+ root_handle = ACPI_HANDLE(ab->dev);
+ if (!root_handle) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "invalid acpi handler\n");
+ return -EOPNOTSUPP;
+ }
+
+ obj = acpi_evaluate_dsm(root_handle, ab->hw_params->acpi_guid, 0, func,
+ NULL);
+
+ if (!obj) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi_evaluate_dsm() failed\n");
+ return -ENOENT;
+ }
+
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ ab->acpi.func_bit = obj->integer.value;
+ } else if (obj->type == ACPI_TYPE_BUFFER) {
+ switch (func) {
+ case ATH12K_ACPI_DSM_FUNC_TAS_CFG:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_CFG_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM TAS config size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.tas_cfg, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_TAS_DATA:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_TAS_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM TAS data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.tas_sar_power_table, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_BIOS_SAR:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI BIOS SAR data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.bios_sar_data, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_GEO_OFFSET:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI GEO OFFSET data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.geo_offset_data, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_INDEX_CCA:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_CCA_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM CCA data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.cca_data, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ case ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE:
+ if (obj->buffer.length != ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE) {
+ ath12k_warn(ab, "invalid ACPI DSM band edge data size: %d\n",
+ obj->buffer.length);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ memcpy(&ab->acpi.band_edge_power, obj->buffer.pointer,
+ obj->buffer.length);
+
+ break;
+ }
+ } else {
+ ath12k_warn(ab, "ACPI DSM method returned an unsupported object type: %d\n",
+ obj->type);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = 0;
+
+out:
+ ACPI_FREE(obj);
+ return ret;
+}
+
+static int ath12k_acpi_set_power_limit(struct ath12k_base *ab)
+{
+ const u8 *tas_sar_power_table = ab->acpi.tas_sar_power_table;
+ int ret;
+
+ if (tas_sar_power_table[0] != ATH12K_ACPI_TAS_DATA_VERSION ||
+ tas_sar_power_table[1] != ATH12K_ACPI_TAS_DATA_ENABLE) {
+ ath12k_warn(ab, "latest ACPI TAS data is invalid\n");
+ return -EINVAL;
+ }
+
+ ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_DATA_TYPE,
+ tas_sar_power_table,
+ ATH12K_ACPI_DSM_TAS_DATA_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS data table: %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int ath12k_acpi_set_bios_sar_power(struct ath12k_base *ab)
+{
+ int ret;
+
+ if (ab->acpi.bios_sar_data[0] != ATH12K_ACPI_POWER_LIMIT_VERSION ||
+ ab->acpi.bios_sar_data[1] != ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG) {
+ ath12k_warn(ab, "invalid latest ACPI BIOS SAR data\n");
+ return -EINVAL;
+ }
+
+ ret = ath12k_wmi_set_bios_sar_cmd(ab, ab->acpi.bios_sar_data);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI BIOS SAR table: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ath12k_acpi_dsm_notify(acpi_handle handle, u32 event, void *data)
+{
+ int ret;
+ struct ath12k_base *ab = data;
+
+ if (event == ATH12K_ACPI_NOTIFY_EVENT) {
+ ath12k_warn(ab, "unknown acpi notify %u\n", event);
+ return;
+ }
+
+ if (!ab->acpi.acpi_tas_enable) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi_tas_enable is false\n");
+ return;
+ }
+
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_DATA);
+ if (ret) {
+ ath12k_warn(ab, "failed to update ACPI TAS data table: %d\n", ret);
+ return;
+ }
+
+ ret = ath12k_acpi_set_power_limit(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI TAS power limit data: %d", ret);
+ return;
+ }
+
+ if (!ab->acpi.acpi_bios_sar_enable)
+ return;
+
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BIOS_SAR);
+ if (ret) {
+ ath12k_warn(ab, "failed to update BIOS SAR: %d\n", ret);
+ return;
+ }
+
+ ret = ath12k_acpi_set_bios_sar_power(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to set BIOS SAR power limit: %d\n", ret);
+ return;
+ }
+}
+
+static int ath12k_acpi_set_bios_sar_params(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_wmi_set_bios_sar_cmd(ab, ab->acpi.bios_sar_data);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI BIOS SAR table: %d\n", ret);
+ return ret;
+ }
+
+ ret = ath12k_wmi_set_bios_geo_cmd(ab, ab->acpi.geo_offset_data);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI BIOS GEO table: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_acpi_set_tas_params(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_CONFIG_TYPE,
+ ab->acpi.tas_cfg,
+ ATH12K_ACPI_DSM_TAS_CFG_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS config table parameter: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = ath12k_wmi_set_bios_cmd(ab, WMI_BIOS_PARAM_TAS_DATA_TYPE,
+ ab->acpi.tas_sar_power_table,
+ ATH12K_ACPI_DSM_TAS_DATA_SIZE);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI TAS data table parameter: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_acpi_start(struct ath12k_base *ab)
+{
+ acpi_status status;
+ u8 *buf;
+ int ret;
+
+ if (!ab->hw_params->acpi_guid)
+ /* not supported with this hardware */
+ return 0;
+
+ ab->acpi.acpi_tas_enable = false;
+
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS);
+ if (ret) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "failed to get ACPI DSM data: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_CFG);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI TAS config table: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_DATA)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_TAS_DATA);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI TAS data table: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_TAS_CFG) &&
+ ab->acpi.tas_sar_power_table[0] == ATH12K_ACPI_TAS_DATA_VERSION &&
+ ab->acpi.tas_sar_power_table[1] == ATH12K_ACPI_TAS_DATA_ENABLE)
+ ab->acpi.acpi_tas_enable = true;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BIOS_SAR)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_BIOS_SAR);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI bios sar data: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_GEO_OFFSET)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_GEO_OFFSET);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI geo offset data: %d\n", ret);
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_BIOS_SAR) &&
+ ab->acpi.bios_sar_data[0] == ATH12K_ACPI_POWER_LIMIT_VERSION &&
+ ab->acpi.bios_sar_data[1] == ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG &&
+ !ab->acpi.acpi_tas_enable)
+ ab->acpi.acpi_bios_sar_enable = true;
+ }
+
+ if (ab->acpi.acpi_tas_enable) {
+ ret = ath12k_acpi_set_tas_params(ab);
+ if (ret) {
+ ath12k_warn(ab, "failed to send ACPI parameters: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (ab->acpi.acpi_bios_sar_enable) {
+ ret = ath12k_acpi_set_bios_sar_params(ab);
+ if (ret)
+ return ret;
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi, ATH12K_ACPI_FUNC_BIT_CCA)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_CCA);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI DSM CCA threshold configuration: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ab->acpi.cca_data[0] == ATH12K_ACPI_CCA_THR_VERSION &&
+ ab->acpi.cca_data[ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET] ==
+ ATH12K_ACPI_CCA_THR_ENABLE_FLAG) {
+ buf = ab->acpi.cca_data + ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET;
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE,
+ buf,
+ ATH12K_ACPI_CCA_THR_OFFSET_LEN);
+ if (ret) {
+ ath12k_warn(ab, "failed to set ACPI DSM CCA threshold: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ if (ATH12K_ACPI_FUNC_BIT_VALID(ab->acpi,
+ ATH12K_ACPI_FUNC_BIT_BAND_EDGE_CHAN_POWER)) {
+ ret = ath12k_acpi_dsm_get_data(ab, ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE);
+ if (ret) {
+ ath12k_warn(ab, "failed to get ACPI DSM band edge channel power: %d\n",
+ ret);
+ return ret;
+ }
+
+ if (ab->acpi.band_edge_power[0] == ATH12K_ACPI_BAND_EDGE_VERSION &&
+ ab->acpi.band_edge_power[1] == ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG) {
+ ret = ath12k_wmi_set_bios_cmd(ab,
+ WMI_BIOS_PARAM_TYPE_BANDEDGE,
+ ab->acpi.band_edge_power,
+ sizeof(ab->acpi.band_edge_power));
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to set ACPI DSM band edge channel power: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ status = acpi_install_notify_handler(ACPI_HANDLE(ab->dev),
+ ACPI_DEVICE_NOTIFY,
+ ath12k_acpi_dsm_notify, ab);
+ if (ACPI_FAILURE(status)) {
+ ath12k_warn(ab, "failed to install DSM notify callback: %d\n", status);
+ return -EIO;
+ }
+
+ ab->acpi.started = true;
+
+ return 0;
+}
+
+void ath12k_acpi_stop(struct ath12k_base *ab)
+{
+ if (!ab->acpi.started)
+ return;
+
+ acpi_remove_notify_handler(ACPI_HANDLE(ab->dev),
+ ACPI_DEVICE_NOTIFY,
+ ath12k_acpi_dsm_notify);
+
+ memset(&ab->acpi, 0, sizeof(ab->acpi));
+}
diff --git a/drivers/net/wireless/ath/ath12k/acpi.h b/drivers/net/wireless/ath/ath12k/acpi.h
new file mode 100644
index 000000000000..39e003d86a48
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/acpi.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+#ifndef ATH12K_ACPI_H
+#define ATH12K_ACPI_H
+
+#include <linux/acpi.h>
+
+#define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS 0
+#define ATH12K_ACPI_DSM_FUNC_BIOS_SAR 4
+#define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET 5
+#define ATH12K_ACPI_DSM_FUNC_INDEX_CCA 6
+#define ATH12K_ACPI_DSM_FUNC_TAS_CFG 8
+#define ATH12K_ACPI_DSM_FUNC_TAS_DATA 9
+#define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE 10
+
+#define ATH12K_ACPI_FUNC_BIT_BIOS_SAR BIT(3)
+#define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET BIT(4)
+#define ATH12K_ACPI_FUNC_BIT_CCA BIT(5)
+#define ATH12K_ACPI_FUNC_BIT_TAS_CFG BIT(7)
+#define ATH12K_ACPI_FUNC_BIT_TAS_DATA BIT(8)
+#define ATH12K_ACPI_FUNC_BIT_BAND_EDGE_CHAN_POWER BIT(9)
+
+#define ATH12K_ACPI_NOTIFY_EVENT 0x86
+#define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func) (((_acdata).func_bit) & (_func))
+
+#define ATH12K_ACPI_TAS_DATA_VERSION 0x1
+#define ATH12K_ACPI_TAS_DATA_ENABLE 0x1
+#define ATH12K_ACPI_POWER_LIMIT_VERSION 0x1
+#define ATH12K_ACPI_POWER_LIMIT_ENABLE_FLAG 0x1
+#define ATH12K_ACPI_CCA_THR_VERSION 0x1
+#define ATH12K_ACPI_CCA_THR_ENABLE_FLAG 0x1
+#define ATH12K_ACPI_BAND_EDGE_VERSION 0x1
+#define ATH12K_ACPI_BAND_EDGE_ENABLE_FLAG 0x1
+
+#define ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET 1
+#define ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET 2
+#define ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET 5
+#define ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN 10
+#define ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET 12
+#define ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN 18
+#define ATH12K_ACPI_BIOS_SAR_TABLE_LEN 22
+#define ATH12K_ACPI_CCA_THR_OFFSET_LEN 36
+
+#define ATH12K_ACPI_DSM_TAS_DATA_SIZE 69
+#define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE 100
+#define ATH12K_ACPI_DSM_TAS_CFG_SIZE 108
+
+#define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \
+ ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN)
+#define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \
+ ATH12K_ACPI_BIOS_SAR_TABLE_LEN)
+#define ATH12K_ACPI_DSM_CCA_DATA_SIZE (ATH12K_ACPI_CCA_THR_OFFSET_DATA_OFFSET + \
+ ATH12K_ACPI_CCA_THR_OFFSET_LEN)
+
+#ifdef CONFIG_ACPI
+
+int ath12k_acpi_start(struct ath12k_base *ab);
+void ath12k_acpi_stop(struct ath12k_base *ab);
+
+#else
+
+static inline int ath12k_acpi_start(struct ath12k_base *ab)
+{
+ return 0;
+}
+
+static inline void ath12k_acpi_stop(struct ath12k_base *ab)
+{
+}
+
+#endif /* CONFIG_ACPI */
+
+#endif /* ATH12K_ACPI_H */
diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h
index 79af3b6159f1..857bc5f9e946 100644
--- a/drivers/net/wireless/ath/ath12k/ce.h
+++ b/drivers/net/wireless/ath/ath12k/ce.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_CE_H
@@ -119,7 +119,7 @@ struct ath12k_ce_ring {
/* Host address space */
void *base_addr_owner_space_unaligned;
/* CE address space */
- u32 base_addr_ce_space_unaligned;
+ dma_addr_t base_addr_ce_space_unaligned;
/* Actual start of descriptors.
* Aligned to descriptor-size boundary.
@@ -129,7 +129,7 @@ struct ath12k_ce_ring {
void *base_addr_owner_space;
/* CE address space */
- u32 base_addr_ce_space;
+ dma_addr_t base_addr_ce_space;
/* HAL ring id */
u32 hal_ring_id;
diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c
index 391b6fb2bd42..51252e8bc1ae 100644
--- a/drivers/net/wireless/ath/ath12k/core.c
+++ b/drivers/net/wireless/ath/ath12k/core.c
@@ -15,6 +15,8 @@
#include "debug.h"
#include "hif.h"
#include "fw.h"
+#include "debugfs.h"
+#include "wow.h"
unsigned int ath12k_debug_mask;
module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
@@ -41,69 +43,121 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab)
return ret;
}
-int ath12k_core_suspend(struct ath12k_base *ab)
+/* Check if we need to continue with suspend/resume operation.
+ * Return:
+ * a negative value: error happens and don't continue.
+ * 0: no error but don't continue.
+ * positive value: no error and do continue.
+ */
+static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab)
{
- int ret;
+ struct ath12k *ar;
if (!ab->hw_params->supports_suspend)
return -EOPNOTSUPP;
- /* TODO: there can frames in queues so for now add delay as a hack.
- * Need to implement to handle and remove this delay.
+ /* so far single_pdev_only chips have supports_suspend as true
+ * so pass 0 as a dummy pdev_id here.
*/
- msleep(500);
+ ar = ab->pdevs[0].ar;
+ if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF)
+ return 0;
- ret = ath12k_dp_rx_pktlog_stop(ab, true);
- if (ret) {
- ath12k_warn(ab, "failed to stop dp rx (and timer) pktlog during suspend: %d\n",
- ret);
+ return 1;
+}
+
+int ath12k_core_suspend(struct ath12k_base *ab)
+{
+ struct ath12k *ar;
+ int ret, i;
+
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
return ret;
+
+ for (i = 0; i < ab->num_radios; i++) {
+ ar = ab->pdevs[i].ar;
+ if (!ar)
+ continue;
+ ret = ath12k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
+ return ret;
+ }
}
- ret = ath12k_dp_rx_pktlog_stop(ab, false);
- if (ret) {
- ath12k_warn(ab, "failed to stop dp rx pktlog during suspend: %d\n",
- ret);
+ /* PM framework skips suspend_late/resume_early callbacks
+ * if other devices report errors in their suspend callbacks.
+ * However ath12k_core_resume() would still be called because
+ * here we return success thus kernel put us on dpm_suspended_list.
+ * Since we won't go through a power down/up cycle, there is
+ * no chance to call complete(&ab->restart_completed) in
+ * ath12k_core_restart(), making ath12k_core_resume() timeout.
+ * So call it here to avoid this issue. This also works in case
+ * no error happens thus suspend_late/resume_early get called,
+ * because it will be reinitialized in ath12k_core_resume_early().
+ */
+ complete(&ab->restart_completed);
+
+ return 0;
+}
+EXPORT_SYMBOL(ath12k_core_suspend);
+
+int ath12k_core_suspend_late(struct ath12k_base *ab)
+{
+ int ret;
+
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
return ret;
- }
+
+ ath12k_acpi_stop(ab);
ath12k_hif_irq_disable(ab);
ath12k_hif_ce_irq_disable(ab);
- ret = ath12k_hif_suspend(ab);
- if (ret) {
- ath12k_warn(ab, "failed to suspend hif: %d\n", ret);
- return ret;
- }
+ ath12k_hif_power_down(ab, true);
return 0;
}
+EXPORT_SYMBOL(ath12k_core_suspend_late);
-int ath12k_core_resume(struct ath12k_base *ab)
+int ath12k_core_resume_early(struct ath12k_base *ab)
{
int ret;
- if (!ab->hw_params->supports_suspend)
- return -EOPNOTSUPP;
-
- ret = ath12k_hif_resume(ab);
- if (ret) {
- ath12k_warn(ab, "failed to resume hif during resume: %d\n", ret);
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
return ret;
- }
- ath12k_hif_ce_irq_enable(ab);
- ath12k_hif_irq_enable(ab);
+ reinit_completion(&ab->restart_completed);
+ ret = ath12k_hif_power_up(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to power up hif during resume: %d\n", ret);
- ret = ath12k_dp_rx_pktlog_start(ab);
- if (ret) {
- ath12k_warn(ab, "failed to start rx pktlog during resume: %d\n",
- ret);
+ return ret;
+}
+EXPORT_SYMBOL(ath12k_core_resume_early);
+
+int ath12k_core_resume(struct ath12k_base *ab)
+{
+ long time_left;
+ int ret;
+
+ ret = ath12k_core_continue_suspend_resume(ab);
+ if (ret <= 0)
return ret;
+
+ time_left = wait_for_completion_timeout(&ab->restart_completed,
+ ATH12K_RESET_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath12k_warn(ab, "timeout while waiting for restart complete");
+ return -ETIMEDOUT;
}
return 0;
}
+EXPORT_SYMBOL(ath12k_core_resume);
static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
size_t name_len, bool with_variant,
@@ -542,6 +596,8 @@ static void ath12k_core_stop(struct ath12k_base *ab)
if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
ath12k_qmi_firmware_stop(ab);
+ ath12k_acpi_stop(ab);
+
ath12k_hif_stop(ab);
ath12k_wmi_detach(ab);
ath12k_dp_rx_pdev_reo_cleanup(ab);
@@ -628,6 +684,8 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
return ret;
}
+ ath12k_debugfs_soc_create(ab);
+
ret = ath12k_hif_power_up(ab);
if (ret) {
ath12k_err(ab, "failed to power up :%d\n", ret);
@@ -637,6 +695,7 @@ static int ath12k_core_soc_create(struct ath12k_base *ab)
return 0;
err_qmi_deinit:
+ ath12k_debugfs_soc_destroy(ab);
ath12k_qmi_deinit_service(ab);
return ret;
}
@@ -645,6 +704,7 @@ static void ath12k_core_soc_destroy(struct ath12k_base *ab)
{
ath12k_dp_free(ab);
ath12k_reg_free(ab);
+ ath12k_debugfs_soc_destroy(ab);
ath12k_qmi_deinit_service(ab);
}
@@ -779,6 +839,11 @@ static int ath12k_core_start(struct ath12k_base *ab,
goto err_reo_cleanup;
}
+ ret = ath12k_acpi_start(ab);
+ if (ret)
+ /* ACPI is optional so continue in case of an error */
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
+
return 0;
err_reo_cleanup:
@@ -874,9 +939,8 @@ static int ath12k_core_reconfigure_on_crash(struct ath12k_base *ab)
int ret;
mutex_lock(&ab->core_lock);
- ath12k_hif_irq_disable(ab);
ath12k_dp_pdev_free(ab);
- ath12k_hif_stop(ab);
+ ath12k_ce_cleanup_pipes(ab);
ath12k_wmi_detach(ab);
ath12k_dp_rx_pdev_reo_cleanup(ab);
mutex_unlock(&ab->core_lock);
@@ -960,9 +1024,8 @@ void ath12k_core_halt(struct ath12k *ar)
static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
{
struct ath12k *ar;
- struct ath12k_pdev *pdev;
struct ath12k_hw *ah;
- int i;
+ int i, j;
spin_lock_bh(&ab->base_lock);
ab->stats.fw_crash_counter++;
@@ -972,35 +1035,32 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
for (i = 0; i < ab->num_hw; i++) {
- if (!ab->ah[i])
+ ah = ab->ah[i];
+ if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
- ah = ab->ah[i];
ieee80211_stop_queues(ah->hw);
- }
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar || ar->state == ATH12K_STATE_OFF)
- continue;
+ for (j = 0; j < ah->num_radio; j++) {
+ ar = &ah->radio[j];
- ath12k_mac_drain_tx(ar);
- complete(&ar->scan.started);
- complete(&ar->scan.completed);
- complete(&ar->scan.on_channel);
- complete(&ar->peer_assoc_done);
- complete(&ar->peer_delete_done);
- complete(&ar->install_key_done);
- complete(&ar->vdev_setup_done);
- complete(&ar->vdev_delete_done);
- complete(&ar->bss_survey_done);
-
- wake_up(&ar->dp.tx_empty_waitq);
- idr_for_each(&ar->txmgmt_idr,
- ath12k_mac_tx_mgmt_pending_free, ar);
- idr_destroy(&ar->txmgmt_idr);
- wake_up(&ar->txmgmt_empty_waitq);
+ ath12k_mac_drain_tx(ar);
+ complete(&ar->scan.started);
+ complete(&ar->scan.completed);
+ complete(&ar->scan.on_channel);
+ complete(&ar->peer_assoc_done);
+ complete(&ar->peer_delete_done);
+ complete(&ar->install_key_done);
+ complete(&ar->vdev_setup_done);
+ complete(&ar->vdev_delete_done);
+ complete(&ar->bss_survey_done);
+
+ wake_up(&ar->dp.tx_empty_waitq);
+ idr_for_each(&ar->txmgmt_idr,
+ ath12k_mac_tx_mgmt_pending_free, ar);
+ idr_destroy(&ar->txmgmt_idr);
+ wake_up(&ar->txmgmt_empty_waitq);
+ }
}
wake_up(&ab->wmi_ab.tx_credits_wq);
@@ -1009,51 +1069,57 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
{
+ struct ath12k_hw *ah;
struct ath12k *ar;
- struct ath12k_pdev *pdev;
- int i;
+ int i, j;
- for (i = 0; i < ab->num_radios; i++) {
- pdev = &ab->pdevs[i];
- ar = pdev->ar;
- if (!ar || ar->state == ATH12K_STATE_OFF)
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+ if (!ah || ah->state == ATH12K_HW_STATE_OFF)
continue;
- mutex_lock(&ar->conf_mutex);
+ mutex_lock(&ah->hw_mutex);
+
+ switch (ah->state) {
+ case ATH12K_HW_STATE_ON:
+ ah->state = ATH12K_HW_STATE_RESTARTING;
+
+ for (j = 0; j < ah->num_radio; j++) {
+ ar = &ah->radio[j];
+
+ mutex_lock(&ar->conf_mutex);
+ ath12k_core_halt(ar);
+ mutex_unlock(&ar->conf_mutex);
+ }
- switch (ar->state) {
- case ATH12K_STATE_ON:
- ar->state = ATH12K_STATE_RESTARTING;
- ath12k_core_halt(ar);
- ieee80211_restart_hw(ath12k_ar_to_hw(ar));
break;
- case ATH12K_STATE_OFF:
+ case ATH12K_HW_STATE_OFF:
ath12k_warn(ab,
- "cannot restart radio %d that hasn't been started\n",
+ "cannot restart hw %d that hasn't been started\n",
i);
break;
- case ATH12K_STATE_RESTARTING:
+ case ATH12K_HW_STATE_RESTARTING:
break;
- case ATH12K_STATE_RESTARTED:
- ar->state = ATH12K_STATE_WEDGED;
+ case ATH12K_HW_STATE_RESTARTED:
+ ah->state = ATH12K_HW_STATE_WEDGED;
fallthrough;
- case ATH12K_STATE_WEDGED:
+ case ATH12K_HW_STATE_WEDGED:
ath12k_warn(ab,
- "device is wedged, will not restart radio %d\n", i);
+ "device is wedged, will not restart hw %d\n", i);
break;
}
- mutex_unlock(&ar->conf_mutex);
+
+ mutex_unlock(&ah->hw_mutex);
}
+
complete(&ab->driver_recovery);
}
static void ath12k_core_restart(struct work_struct *work)
{
struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
- int ret;
-
- if (!ab->is_reset)
- ath12k_core_pre_reconfigure_recovery(ab);
+ struct ath12k_hw *ah;
+ int ret, i;
ret = ath12k_core_reconfigure_on_crash(ab);
if (ret) {
@@ -1061,11 +1127,14 @@ static void ath12k_core_restart(struct work_struct *work)
return;
}
- if (ab->is_reset)
- complete_all(&ab->reconfigure_complete);
+ if (ab->is_reset) {
+ for (i = 0; i < ab->num_hw; i++) {
+ ah = ab->ah[i];
+ ieee80211_restart_hw(ah->hw);
+ }
+ }
- if (!ab->is_reset)
- ath12k_core_post_reconfigure_recovery(ab);
+ complete(&ab->restart_completed);
}
static void ath12k_core_reset(struct work_struct *work)
@@ -1117,22 +1186,18 @@ static void ath12k_core_reset(struct work_struct *work)
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n");
ab->is_reset = true;
- atomic_set(&ab->recovery_start_count, 0);
- reinit_completion(&ab->recovery_start);
atomic_set(&ab->recovery_count, 0);
ath12k_core_pre_reconfigure_recovery(ab);
- reinit_completion(&ab->reconfigure_complete);
ath12k_core_post_reconfigure_recovery(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n");
- time_left = wait_for_completion_timeout(&ab->recovery_start,
- ATH12K_RECOVER_START_TIMEOUT_HZ);
+ ath12k_hif_irq_disable(ab);
+ ath12k_hif_ce_irq_disable(ab);
- ath12k_hif_power_down(ab);
- ath12k_qmi_free_resource(ab);
+ ath12k_hif_power_down(ab, false);
ath12k_hif_power_up(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
@@ -1153,6 +1218,29 @@ int ath12k_core_pre_init(struct ath12k_base *ab)
return 0;
}
+static int ath12k_core_panic_handler(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct ath12k_base *ab = container_of(nb, struct ath12k_base,
+ panic_nb);
+
+ return ath12k_hif_panic_handler(ab);
+}
+
+static int ath12k_core_panic_notifier_register(struct ath12k_base *ab)
+{
+ ab->panic_nb.notifier_call = ath12k_core_panic_handler;
+
+ return atomic_notifier_chain_register(&panic_notifier_list,
+ &ab->panic_nb);
+}
+
+static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
+{
+ atomic_notifier_chain_unregister(&panic_notifier_list,
+ &ab->panic_nb);
+}
+
int ath12k_core_init(struct ath12k_base *ab)
{
int ret;
@@ -1163,11 +1251,17 @@ int ath12k_core_init(struct ath12k_base *ab)
return ret;
}
+ ret = ath12k_core_panic_notifier_register(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
+
return 0;
}
void ath12k_core_deinit(struct ath12k_base *ab)
{
+ ath12k_core_panic_notifier_unregister(ab);
+
mutex_lock(&ab->core_lock);
ath12k_core_pdev_destroy(ab);
@@ -1175,7 +1269,7 @@ void ath12k_core_deinit(struct ath12k_base *ab)
mutex_unlock(&ab->core_lock);
- ath12k_hif_power_down(ab);
+ ath12k_hif_power_down(ab, false);
ath12k_mac_destroy(ab);
ath12k_core_soc_destroy(ab);
ath12k_fw_unmap(ab);
@@ -1211,8 +1305,6 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
mutex_init(&ab->core_lock);
spin_lock_init(&ab->base_lock);
init_completion(&ab->reset_complete);
- init_completion(&ab->reconfigure_complete);
- init_completion(&ab->recovery_start);
INIT_LIST_HEAD(&ab->peers);
init_waitqueue_head(&ab->peer_mapping_wq);
@@ -1223,11 +1315,23 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size,
timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0);
init_completion(&ab->htc_suspend);
+ init_completion(&ab->restart_completed);
+ init_completion(&ab->wow.wakeup_completed);
ab->dev = dev;
ab->hif.bus = bus;
ab->qmi.num_radios = U8_MAX;
- ab->slo_capable = true;
+ ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT;
+
+ /* Device index used to identify the devices in a group.
+ *
+ * In Intra-device MLO, only one device present in a group,
+ * so it is always zero.
+ *
+ * In Inter-device MLO, Multiple device present in a group,
+ * expect non-zero value.
+ */
+ ab->device_id = 0;
return ab;
diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h
index 97e5a0ccd233..cdfd43a7321a 100644
--- a/drivers/net/wireless/ath/ath12k/core.h
+++ b/drivers/net/wireless/ath/ath12k/core.h
@@ -14,6 +14,7 @@
#include <linux/dmi.h>
#include <linux/ctype.h>
#include <linux/firmware.h>
+#include <linux/panic_notifier.h>
#include "qmi.h"
#include "htc.h"
#include "wmi.h"
@@ -26,6 +27,9 @@
#include "reg.h"
#include "dbring.h"
#include "fw.h"
+#include "acpi.h"
+#include "wow.h"
+#include "debugfs_htt_stats.h"
#define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK)
@@ -46,6 +50,7 @@
#define ATH12K_SMBIOS_BDF_EXT_MAGIC "BDF_"
#define ATH12K_INVALID_HW_MAC_ID 0xFF
+#define ATH12K_CONNECTION_LOSS_HZ (3 * HZ)
#define ATH12K_RX_RATE_TABLE_NUM 320
#define ATH12K_RX_RATE_TABLE_11AX_NUM 576
@@ -144,7 +149,7 @@ struct ath12k_ext_irq_grp {
u32 grp_id;
u64 timestamp;
struct napi_struct napi;
- struct net_device napi_ndev;
+ struct net_device *napi_ndev;
};
struct ath12k_smbios_bdf {
@@ -178,8 +183,6 @@ struct ath12k_he {
u32 heop_param;
};
-#define MAX_RADIOS 3
-
enum {
WMI_HOST_TP_SCALE_MAX = 0,
WMI_HOST_TP_SCALE_50 = 1,
@@ -210,8 +213,29 @@ enum ath12k_dev_flags {
ATH12K_FLAG_EXT_IRQ_ENABLED,
};
-enum ath12k_monitor_flags {
- ATH12K_FLAG_MONITOR_ENABLED,
+struct ath12k_tx_conf {
+ bool changed;
+ u16 ac;
+ struct ieee80211_tx_queue_params tx_queue_params;
+};
+
+struct ath12k_key_conf {
+ bool changed;
+ enum set_key_cmd cmd;
+ struct ieee80211_key_conf *key;
+};
+
+struct ath12k_vif_cache {
+ struct ath12k_tx_conf tx_conf;
+ struct ath12k_key_conf key_conf;
+ u32 bss_conf_changed;
+};
+
+struct ath12k_rekey_data {
+ u8 kck[NL80211_KCK_LEN];
+ u8 kek[NL80211_KCK_LEN];
+ u64 replay_ctr;
+ bool enable_offload;
};
struct ath12k_vif {
@@ -251,11 +275,13 @@ struct ath12k_vif {
} ap;
} u;
+ bool is_created;
bool is_started;
bool is_up;
u32 aid;
u8 bssid[ETH_ALEN];
struct cfg80211_bitrate_mask bitrate_mask;
+ struct delayed_work connection_loss_work;
int num_legacy_stations;
int rtscts_prot_mode;
int txpower;
@@ -267,10 +293,13 @@ struct ath12k_vif {
u8 vdev_stats_id;
u32 punct_bitmap;
bool ps;
+ struct ath12k_vif_cache *cache;
+ struct ath12k_rekey_data rekey_data;
};
struct ath12k_vif_iter {
u32 vdev_id;
+ struct ath12k *ar;
struct ath12k_vif *arvif;
};
@@ -430,15 +459,15 @@ struct ath12k_sta {
#define ATH12K_MIN_5G_FREQ 4150
#define ATH12K_MIN_6G_FREQ 5925
#define ATH12K_MAX_6G_FREQ 7115
-#define ATH12K_NUM_CHANS 100
+#define ATH12K_NUM_CHANS 101
#define ATH12K_MAX_5G_CHAN 173
-enum ath12k_state {
- ATH12K_STATE_OFF,
- ATH12K_STATE_ON,
- ATH12K_STATE_RESTARTING,
- ATH12K_STATE_RESTARTED,
- ATH12K_STATE_WEDGED,
+enum ath12k_hw_state {
+ ATH12K_HW_STATE_OFF,
+ ATH12K_HW_STATE_ON,
+ ATH12K_HW_STATE_RESTARTING,
+ ATH12K_HW_STATE_RESTARTED,
+ ATH12K_HW_STATE_WEDGED,
/* Add other states as required */
};
@@ -453,6 +482,19 @@ struct ath12k_fw_stats {
struct list_head bcn;
};
+struct ath12k_dbg_htt_stats {
+ enum ath12k_dbg_htt_ext_stats_type type;
+ u32 cfg_param[4];
+ u8 reset;
+ struct debug_htt_stats_req *stats_req;
+};
+
+struct ath12k_debug {
+ struct dentry *debugfs_pdev;
+ struct dentry *debugfs_pdev_symlink;
+ struct ath12k_dbg_htt_stats htt_stats;
+};
+
struct ath12k_per_peer_tx_stats {
u32 succ_bytes;
u32 retry_bytes;
@@ -483,7 +525,6 @@ struct ath12k {
u32 ht_cap_info;
u32 vht_cap_info;
struct ath12k_he ar_he;
- enum ath12k_state state;
bool supports_6ghz;
struct {
struct completion started;
@@ -505,7 +546,6 @@ struct ath12k {
unsigned long dev_flags;
unsigned int filter_flags;
- unsigned long monitor_flags;
u32 min_tx_power;
u32 max_tx_power;
u32 txpower_limit_2g;
@@ -585,6 +625,9 @@ struct ath12k {
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
+ struct ath12k_wow wow;
+ struct completion target_suspend;
+ bool target_suspend_ack;
struct ath12k_per_peer_tx_stats peer_tx_stats;
struct list_head ppdu_stats_info;
u32 ppdu_stat_list_depth;
@@ -592,18 +635,34 @@ struct ath12k {
struct ath12k_per_peer_tx_stats cached_stats;
u32 last_ppdu_id;
u32 cached_ppdu_id;
+#ifdef CONFIG_ATH12K_DEBUGFS
+ struct ath12k_debug debug;
+#endif
bool dfs_block_radar_events;
bool monitor_conf_enabled;
bool monitor_vdev_created;
bool monitor_started;
int monitor_vdev_id;
+
+ u32 freq_low;
+ u32 freq_high;
+
+ bool nlo_enabled;
};
struct ath12k_hw {
struct ieee80211_hw *hw;
-
+ /* Protect the write operation of the hardware state ath12k_hw::state
+ * between hardware start<=>reconfigure<=>stop transitions.
+ */
+ struct mutex hw_mutex;
+ enum ath12k_hw_state state;
+ bool regd_updated;
+ bool use_6ghz_regd;
u8 num_radio;
+
+ /* Keep last */
struct ath12k radio[] __aligned(sizeof(void *));
};
@@ -653,6 +712,7 @@ struct mlo_timestamp {
struct ath12k_pdev {
struct ath12k *ar;
u32 pdev_id;
+ u32 hw_link_id;
struct ath12k_pdev_cap cap;
u8 mac_addr[ETH_ALEN];
struct mlo_timestamp timestamp;
@@ -688,6 +748,21 @@ struct ath12k_soc_dp_stats {
struct ath12k_soc_dp_tx_err_stats tx_err;
};
+/**
+ * enum ath12k_link_capable_flags - link capable flags
+ *
+ * Single/Multi link capability information
+ *
+ * @ATH12K_INTRA_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
+ * the links (radios) present within a device.
+ * @ATH12K_INTER_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
+ * the links (radios) present across the devices.
+ */
+enum ath12k_link_capable_flags {
+ ATH12K_INTRA_DEVICE_MLO_SUPPORT = BIT(0),
+ ATH12K_INTER_DEVICE_MLO_SUPPORT = BIT(1),
+};
+
/* Master structure to hold the hw data which may be used in core module */
struct ath12k_base {
enum ath12k_hw_rev hw_rev;
@@ -696,6 +771,7 @@ struct ath12k_base {
struct ath12k_qmi qmi;
struct ath12k_wmi_base wmi_ab;
struct completion fw_ready;
+ u8 device_id;
int num_radios;
/* HW channel counters frequency value in hertz common to all MACs */
u32 cc_freq_hz;
@@ -712,6 +788,11 @@ struct ath12k_base {
const struct ath12k_hif_ops *ops;
} hif;
+ struct {
+ struct completion wakeup_completed;
+ u32 wmi_conf_rx_decap_mode;
+ } wow;
+
struct ath12k_ce ce;
struct timer_list rx_replenish_retry;
struct ath12k_hal hal;
@@ -782,6 +863,9 @@ struct ath12k_base {
/* Current DFS Regulatory */
enum ath12k_dfs_region dfs_region;
struct ath12k_soc_dp_stats soc_stats;
+#ifdef CONFIG_ATH12K_DEBUGFS
+ struct dentry *debugfs_soc;
+#endif
unsigned long dev_flags;
struct completion driver_recovery;
@@ -791,11 +875,8 @@ struct ath12k_base {
struct work_struct reset_work;
atomic_t reset_count;
atomic_t recovery_count;
- atomic_t recovery_start_count;
bool is_reset;
struct completion reset_complete;
- struct completion reconfigure_complete;
- struct completion recovery_start;
/* continuous recovery fail count */
atomic_t fail_cont_count;
unsigned long reset_fail_timeout;
@@ -843,10 +924,33 @@ struct ath12k_base {
const struct hal_rx_ops *hal_rx_ops;
- /* slo_capable denotes if the single/multi link operation
- * is supported within the same chip (SoC).
+ /* mlo_capable_flags denotes the single/multi link operation
+ * capabilities of the Device.
+ *
+ * See enum ath12k_link_capable_flags
*/
- bool slo_capable;
+ u8 mlo_capable_flags;
+
+ struct completion restart_completed;
+
+#ifdef CONFIG_ACPI
+
+ struct {
+ bool started;
+ u32 func_bit;
+ bool acpi_tas_enable;
+ bool acpi_bios_sar_enable;
+ u8 tas_cfg[ATH12K_ACPI_DSM_TAS_CFG_SIZE];
+ u8 tas_sar_power_table[ATH12K_ACPI_DSM_TAS_DATA_SIZE];
+ u8 bios_sar_data[ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE];
+ u8 geo_offset_data[ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE];
+ u8 cca_data[ATH12K_ACPI_DSM_CCA_DATA_SIZE];
+ u8 band_edge_power[ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE];
+ } acpi;
+
+#endif /* CONFIG_ACPI */
+
+ struct notifier_block panic_nb;
/* must be last */
u8 drv_priv[] __aligned(sizeof(void *));
@@ -874,8 +978,10 @@ int ath12k_core_fetch_regdb(struct ath12k_base *ab, struct ath12k_board_data *bd
int ath12k_core_check_dt(struct ath12k_base *ath12k);
int ath12k_core_check_smbios(struct ath12k_base *ab);
void ath12k_core_halt(struct ath12k *ar);
+int ath12k_core_resume_early(struct ath12k_base *ab);
int ath12k_core_resume(struct ath12k_base *ab);
int ath12k_core_suspend(struct ath12k_base *ab);
+int ath12k_core_suspend_late(struct ath12k_base *ab);
const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
const char *filename);
@@ -951,13 +1057,26 @@ static inline struct ath12k_hw *ath12k_hw_to_ah(struct ieee80211_hw *hw)
return hw->priv;
}
-static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah)
+static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah, u8 hw_link_id)
+{
+ if (WARN(hw_link_id >= ah->num_radio,
+ "bad hw link id %d, so switch to default link\n", hw_link_id))
+ hw_link_id = 0;
+
+ return &ah->radio[hw_link_id];
+}
+
+static inline struct ath12k_hw *ath12k_ar_to_ah(struct ath12k *ar)
{
- return ah->radio;
+ return ar->ah;
}
static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar)
{
return ar->ah->hw;
}
+
+#define for_each_ar(ah, ar, index) \
+ for ((index) = 0; ((index) < (ah)->num_radio && \
+ ((ar) = &(ah)->radio[(index)])); (index)++)
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h
index aa685295f8a4..f7005917362c 100644
--- a/drivers/net/wireless/ath/ath12k/debug.h
+++ b/drivers/net/wireless/ath/ath12k/debug.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_DEBUG_H_
@@ -25,6 +25,7 @@ enum ath12k_debug_mask {
ATH12K_DBG_PCI = 0x00001000,
ATH12K_DBG_DP_TX = 0x00002000,
ATH12K_DBG_DP_RX = 0x00004000,
+ ATH12K_DBG_WOW = 0x00008000,
ATH12K_DBG_ANY = 0xffffffff,
};
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c
new file mode 100644
index 000000000000..2a977c36af00
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "debugfs.h"
+#include "debugfs_htt_stats.h"
+
+static ssize_t ath12k_write_simulate_radar(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_wmi_simulate_radar(ar);
+ if (ret)
+ goto exit;
+
+ ret = count;
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_simulate_radar = {
+ .write = ath12k_write_simulate_radar,
+ .open = simple_open
+};
+
+void ath12k_debugfs_soc_create(struct ath12k_base *ab)
+{
+ bool dput_needed;
+ char soc_name[64] = { 0 };
+ struct dentry *debugfs_ath12k;
+
+ debugfs_ath12k = debugfs_lookup("ath12k", NULL);
+ if (debugfs_ath12k) {
+ /* a dentry from lookup() needs dput() after we don't use it */
+ dput_needed = true;
+ } else {
+ debugfs_ath12k = debugfs_create_dir("ath12k", NULL);
+ if (IS_ERR_OR_NULL(debugfs_ath12k))
+ return;
+ dput_needed = false;
+ }
+
+ scnprintf(soc_name, sizeof(soc_name), "%s-%s", ath12k_bus_str(ab->hif.bus),
+ dev_name(ab->dev));
+
+ ab->debugfs_soc = debugfs_create_dir(soc_name, debugfs_ath12k);
+
+ if (dput_needed)
+ dput(debugfs_ath12k);
+}
+
+void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
+{
+ debugfs_remove_recursive(ab->debugfs_soc);
+ ab->debugfs_soc = NULL;
+ /* We are not removing ath12k directory on purpose, even if it
+ * would be empty. This simplifies the directory handling and it's
+ * a minor cosmetic issue to leave an empty ath12k directory to
+ * debugfs.
+ */
+}
+
+void ath12k_debugfs_register(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ar->ah->hw;
+ char pdev_name[5];
+ char buf[100] = {0};
+
+ scnprintf(pdev_name, sizeof(pdev_name), "%s%d", "mac", ar->pdev_idx);
+
+ ar->debug.debugfs_pdev = debugfs_create_dir(pdev_name, ab->debugfs_soc);
+
+ /* Create a symlink under ieee80211/phy* */
+ scnprintf(buf, sizeof(buf), "../../ath12k/%pd2", ar->debug.debugfs_pdev);
+ ar->debug.debugfs_pdev_symlink = debugfs_create_symlink("ath12k",
+ hw->wiphy->debugfsdir,
+ buf);
+
+ if (ar->mac.sbands[NL80211_BAND_5GHZ].channels) {
+ debugfs_create_file("dfs_simulate_radar", 0200,
+ ar->debug.debugfs_pdev, ar,
+ &fops_simulate_radar);
+ }
+
+ ath12k_debugfs_htt_stats_register(ar);
+}
+
+void ath12k_debugfs_unregister(struct ath12k *ar)
+{
+ if (!ar->debug.debugfs_pdev)
+ return;
+
+ /* Remove symlink under ieee80211/phy* */
+ debugfs_remove(ar->debug.debugfs_pdev_symlink);
+ debugfs_remove_recursive(ar->debug.debugfs_pdev);
+ ar->debug.debugfs_pdev_symlink = NULL;
+ ar->debug.debugfs_pdev = NULL;
+}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h
new file mode 100644
index 000000000000..8d64ba03aa9a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH12K_DEBUGFS_H_
+#define _ATH12K_DEBUGFS_H_
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+void ath12k_debugfs_soc_create(struct ath12k_base *ab);
+void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
+void ath12k_debugfs_register(struct ath12k *ar);
+void ath12k_debugfs_unregister(struct ath12k *ar);
+#else
+static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab)
+{
+}
+
+static inline void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
+{
+}
+
+static inline void ath12k_debugfs_register(struct ath12k *ar)
+{
+}
+
+static inline void ath12k_debugfs_unregister(struct ath12k *ar)
+{
+}
+
+#endif /* CONFIG_ATH12K_DEBUGFS */
+
+#endif /* _ATH12K_DEBUGFS_H_ */
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
new file mode 100644
index 000000000000..ce80e7b5175b
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
@@ -0,0 +1,1540 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+#include "core.h"
+#include "debug.h"
+#include "debugfs_htt_stats.h"
+#include "dp_tx.h"
+#include "dp_rx.h"
+
+static u32
+print_array_to_buf(u8 *buf, u32 offset, const char *header,
+ const __le32 *array, u32 array_len, const char *footer)
+{
+ int index = 0;
+ u8 i;
+
+ if (header) {
+ index += scnprintf(buf + offset,
+ ATH12K_HTT_STATS_BUF_SIZE - offset,
+ "%s = ", header);
+ }
+ for (i = 0; i < array_len; i++) {
+ index += scnprintf(buf + offset + index,
+ (ATH12K_HTT_STATS_BUF_SIZE - offset) - index,
+ " %u:%u,", i, le32_to_cpu(array[i]));
+ }
+ /* To overwrite the last trailing comma */
+ index--;
+ *(buf + offset + index) = '\0';
+
+ if (footer) {
+ index += scnprintf(buf + offset + index,
+ (ATH12K_HTT_STATS_BUF_SIZE - offset) - index,
+ "%s", footer);
+ }
+ return index;
+}
+
+static void
+htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "comp_delivered = %u\n",
+ le32_to_cpu(htt_stats_buf->comp_delivered));
+ len += scnprintf(buf + len, buf_len - len, "self_triggers = %u\n",
+ le32_to_cpu(htt_stats_buf->self_triggers));
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_queued));
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_reaped));
+ len += scnprintf(buf + len, buf_len - len, "underrun = %u\n",
+ le32_to_cpu(htt_stats_buf->underrun));
+ len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_paused));
+ len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_flush));
+ len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n",
+ le32_to_cpu(htt_stats_buf->hw_filt));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_abort));
+ len += scnprintf(buf + len, buf_len - len, "ppdu_ok = %u\n",
+ le32_to_cpu(htt_stats_buf->ppdu_ok));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_requed));
+ len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_xretry));
+ len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n",
+ le32_to_cpu(htt_stats_buf->data_rc));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_dropped_xretry));
+ len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n",
+ le32_to_cpu(htt_stats_buf->illgl_rate_phy_err));
+ len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n",
+ le32_to_cpu(htt_stats_buf->cont_xretry));
+ len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_timeout));
+ len += scnprintf(buf + len, buf_len - len, "tx_time_dur_data = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_time_dur_data));
+ len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n",
+ le32_to_cpu(htt_stats_buf->pdev_resets));
+ len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n",
+ le32_to_cpu(htt_stats_buf->phy_underrun));
+ len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n",
+ le32_to_cpu(htt_stats_buf->txop_ovf));
+ len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_failed_queueing));
+ len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_completed));
+ len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_restarted));
+ len += scnprintf(buf + len, buf_len - len, "seq_txop_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_txop_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "next_seq_cancel = %u\n",
+ le32_to_cpu(htt_stats_buf->next_seq_cancel));
+ len += scnprintf(buf + len, buf_len - len, "dl_mu_mimo_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "dl_mu_ofdma_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_ofdma_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "ul_mu_mimo_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_mumimo_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "ul_mu_ofdma_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->ul_ofdma_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_peer_blacklisted = %u\n",
+ le32_to_cpu(htt_stats_buf->num_mu_peer_blacklisted));
+ len += scnprintf(buf + len, buf_len - len, "seq_qdepth_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_qdepth_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "seq_min_msdu_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_min_msdu_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "mu_seq_min_msdu_repost_stop = %u\n",
+ le32_to_cpu(htt_stats_buf->mu_seq_min_msdu_repost_stop));
+ len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_switch_hw_paused));
+ len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n",
+ le32_to_cpu(htt_stats_buf->next_seq_posted_dsr));
+ len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_posted_isr));
+ len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n",
+ le32_to_cpu(htt_stats_buf->seq_ctrl_cached));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_count_tqm));
+ len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_count_tqm));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_removed_tqm));
+ len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_removed_tqm));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdus_max_retries = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdus_max_retries));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_sw_flush));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_hw_filter));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_truncated));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_ack_failed));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_expired));
+ len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdus_seq_hw_retry));
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->ack_tlv_proc));
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n",
+ le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt_valid));
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt));
+ len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n",
+ le32_to_cpu(htt_stats_buf->num_total_ppdus_tried_ota));
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n",
+ le32_to_cpu(htt_stats_buf->num_data_ppdus_tried_ota));
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n",
+ le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_enqued));
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n",
+ le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_freed));
+ len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n",
+ le32_to_cpu(htt_stats_buf->local_data_enqued));
+ len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n",
+ le32_to_cpu(htt_stats_buf->local_data_freed));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_tried));
+ len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n",
+ le32_to_cpu(htt_stats_buf->isr_wait_seq_posted));
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_active_dur_us_low));
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n",
+ le32_to_cpu(htt_stats_buf->tx_active_dur_us_high));
+ len += scnprintf(buf + len, buf_len - len, "fes_offsets_err_cnt = %u\n\n",
+ le32_to_cpu(htt_stats_buf->fes_offsets_err_cnt));
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_urrn_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_urrn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ HTT_TX_PDEV_MAX_URRN_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_URRN_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "urrn_stats", htt_stats_buf->urrn_stats,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_flush_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_flush_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_FLUSH_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "flush_errs", htt_stats_buf->flush_errs,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_sifs_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_sifs_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "sifs_status", htt_stats_buf->sifs_status,
+ num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv *htt_stats_buf = tag_buf;
+ char *mode;
+ u8 j, hw_mode, i, str_buf_len;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 stats_value;
+ u8 max_ppdu = ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST;
+ u8 max_sched = ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS;
+ char str_buf[ATH12K_HTT_MAX_STRING_LEN];
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ hw_mode = le32_to_cpu(htt_stats_buf->hw_mode);
+
+ switch (hw_mode) {
+ case ATH12K_HTT_STATS_HWMODE_AC:
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AC_MU_PPDU_DISTRIBUTION_STATS:\n");
+ mode = "ac";
+ break;
+ case ATH12K_HTT_STATS_HWMODE_AX:
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_PPDU_DISTRIBUTION_STATS:\n");
+ mode = "ax";
+ break;
+ case ATH12K_HTT_STATS_HWMODE_BE:
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_BE_MU_PPDU_DISTRIBUTION_STATS:\n");
+ mode = "be";
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < ATH12K_HTT_STATS_NUM_NR_BINS ; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_seq_posted_nr%u = %u\n", mode,
+ ((i + 1) * 4), htt_stats_buf->num_seq_posted[i]);
+ str_buf_len = 0;
+ memset(str_buf, 0x0, sizeof(str_buf));
+ for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) {
+ stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_posted_per_burst
+ [i * max_ppdu + j]);
+ str_buf_len += scnprintf(&str_buf[str_buf_len],
+ ATH12K_HTT_MAX_STRING_LEN - str_buf_len,
+ " %u:%u,", j, stats_value);
+ }
+ /* To overwrite the last trailing comma */
+ str_buf[str_buf_len - 1] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_ppdu_posted_per_burst_nr%u = %s\n",
+ mode, ((i + 1) * 4), str_buf);
+ str_buf_len = 0;
+ memset(str_buf, 0x0, sizeof(str_buf));
+ for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) {
+ stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_cmpl_per_burst
+ [i * max_ppdu + j]);
+ str_buf_len += scnprintf(&str_buf[str_buf_len],
+ ATH12K_HTT_MAX_STRING_LEN - str_buf_len,
+ " %u:%u,", j, stats_value);
+ }
+ /* To overwrite the last trailing comma */
+ str_buf[str_buf_len - 1] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_ppdu_completed_per_burst_nr%u = %s\n",
+ mode, ((i + 1) * 4), str_buf);
+ str_buf_len = 0;
+ memset(str_buf, 0x0, sizeof(str_buf));
+ for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS ; j++) {
+ stats_value = le32_to_cpu(htt_stats_buf->num_seq_term_status
+ [i * max_sched + j]);
+ str_buf_len += scnprintf(&str_buf[str_buf_len],
+ ATH12K_HTT_MAX_STRING_LEN - str_buf_len,
+ " %u:%u,", j, stats_value);
+ }
+ /* To overwrite the last trailing comma */
+ str_buf[str_buf_len - 1] = '\0';
+ len += scnprintf(buf + len, buf_len - len,
+ "%s_mu_mimo_num_seq_term_status_nr%u = %s\n\n",
+ mode, ((i + 1) * 4), str_buf);
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_tx_pdev_stats_sifs_hist_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_HIST_TLV:\n");
+
+ len += print_array_to_buf(buf, len, "sifs_hist_status",
+ htt_stats_buf->sifs_hist_status, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+htt_print_pdev_ctrl_path_tx_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_CTRL_PATH_TX_STATS:\n");
+ len += print_array_to_buf(buf, len, "fw_tx_mgmt_subtype",
+ htt_stats_buf->fw_tx_mgmt_subtype,
+ ATH12K_HTT_STATS_SUBTYPE_MAX, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n",
+ le32_to_cpu(htt_stats_buf->current_timestamp));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word,
+ ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "txq_id = %u\n",
+ u32_get_bits(mac_id_word,
+ ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID));
+ len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_policy));
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_posted_timestamp = %u\n",
+ le32_to_cpu(htt_stats_buf->last_sched_cmd_posted_timestamp));
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_compl_timestamp = %u\n",
+ le32_to_cpu(htt_stats_buf->last_sched_cmd_compl_timestamp));
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_2_tac_lwm_count));
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_2_tac_ring_full));
+ len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_cmd_post_failure));
+ len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n",
+ le32_to_cpu(htt_stats_buf->num_active_tids));
+ len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n",
+ le32_to_cpu(htt_stats_buf->num_ps_schedules));
+ len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_cmds_pending));
+ len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tid_register));
+ len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tid_unregister));
+ len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n",
+ le32_to_cpu(htt_stats_buf->num_qstats_queried));
+ len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n",
+ le32_to_cpu(htt_stats_buf->qstats_update_pending));
+ len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n",
+ le32_to_cpu(htt_stats_buf->last_qstats_query_timestamp));
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tqm_cmdq_full));
+ len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->num_de_sched_algo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->num_rt_sched_algo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->num_tqm_sched_algo_trigger));
+ len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_sched));
+ len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n",
+ le32_to_cpu(htt_stats_buf->dur_based_sendn_term));
+ len += scnprintf(buf + len, buf_len - len, "su_notify2_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->su_notify2_sched));
+ len += scnprintf(buf + len, buf_len - len, "su_optimal_queued_msdus_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->su_optimal_queued_msdus_sched));
+ len += scnprintf(buf + len, buf_len - len, "su_delay_timeout_sched = %u\n",
+ le32_to_cpu(htt_stats_buf->su_delay_timeout_sched));
+ len += scnprintf(buf + len, buf_len - len, "su_min_txtime_sched_delay = %u\n",
+ le32_to_cpu(htt_stats_buf->su_min_txtime_sched_delay));
+ len += scnprintf(buf + len, buf_len - len, "su_no_delay = %u\n",
+ le32_to_cpu(htt_stats_buf->su_no_delay));
+ len += scnprintf(buf + len, buf_len - len, "num_supercycles = %u\n",
+ le32_to_cpu(htt_stats_buf->num_supercycles));
+ len += scnprintf(buf + len, buf_len - len, "num_subcycles_with_sort = %u\n",
+ le32_to_cpu(htt_stats_buf->num_subcycles_with_sort));
+ len += scnprintf(buf + len, buf_len - len, "num_subcycles_no_sort = %u\n\n",
+ le32_to_cpu(htt_stats_buf->num_subcycles_no_sort));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_cmd_posted_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_cmd_posted_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV:\n");
+ len += print_array_to_buf(buf, len, "sched_cmd_posted",
+ htt_stats_buf->sched_cmd_posted, num_elements, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_cmd_reaped_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_cmd_reaped_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV:\n");
+ len += print_array_to_buf(buf, len, "sched_cmd_reaped",
+ htt_stats_buf->sched_cmd_reaped, num_elements, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_sched_order_su_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_sched_order_su_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 sched_order_su_num_entries = min_t(u32, (tag_len >> 2),
+ ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV:\n");
+ len += print_array_to_buf(buf, len, "sched_order_su",
+ htt_stats_buf->sched_order_su,
+ sched_order_su_num_entries, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_sched_ineligibility_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_sched_ineligibility_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 sched_ineligibility_num_entries = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_INELIGIBILITY:\n");
+ len += print_array_to_buf(buf, len, "sched_ineligibility",
+ htt_stats_buf->sched_ineligibility,
+ sched_ineligibility_num_entries, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_sched_txq_supercycle_trigger_tlv(const void *tag_buf,
+ u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_sched_txq_supercycle_triggers_tlv *htt_stats_buf =
+ tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SUPERCYCLE_TRIGGER:\n");
+ len += print_array_to_buf(buf, len, "supercycle_triggers",
+ htt_stats_buf->supercycle_triggers, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_stats_pdev_errs_tlv *htt_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_buf))
+ return;
+
+ mac_id_word = le32_to_cpu(htt_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ le32_to_cpu(htt_buf->tx_abort));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n",
+ le32_to_cpu(htt_buf->tx_abort_fail_count));
+ len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n",
+ le32_to_cpu(htt_buf->rx_abort));
+ len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n",
+ le32_to_cpu(htt_buf->rx_abort_fail_count));
+ len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
+ le32_to_cpu(htt_buf->rx_flush_cnt));
+ len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n",
+ le32_to_cpu(htt_buf->warm_reset));
+ len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n",
+ le32_to_cpu(htt_buf->cold_reset));
+ len += scnprintf(buf + len, buf_len - len, "mac_cold_reset_restore_cal = %u\n",
+ le32_to_cpu(htt_buf->mac_cold_reset_restore_cal));
+ len += scnprintf(buf + len, buf_len - len, "mac_cold_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_cold_reset));
+ len += scnprintf(buf + len, buf_len - len, "mac_warm_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_warm_reset));
+ len += scnprintf(buf + len, buf_len - len, "mac_only_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_only_reset));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_ucode_trig = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_ucode_trig));
+ len += scnprintf(buf + len, buf_len - len, "mac_warm_reset_restore_cal = %u\n",
+ le32_to_cpu(htt_buf->mac_warm_reset_restore_cal));
+ len += scnprintf(buf + len, buf_len - len, "mac_sfm_reset = %u\n",
+ le32_to_cpu(htt_buf->mac_sfm_reset));
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_m3_ssr = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_m3_ssr));
+ len += scnprintf(buf + len, buf_len - len, "fw_rx_rings_reset = %u\n",
+ le32_to_cpu(htt_buf->fw_rx_rings_reset));
+ len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n",
+ le32_to_cpu(htt_buf->tx_flush));
+ len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n",
+ le32_to_cpu(htt_buf->tx_glb_reset));
+ len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n",
+ le32_to_cpu(htt_buf->tx_txq_reset));
+ len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n",
+ le32_to_cpu(htt_buf->rx_timeout_reset));
+
+ len += scnprintf(buf + len, buf_len - len, "PDEV_PHY_WARM_RESET_REASONS:\n");
+ len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_reason_phy_m3 = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_phy_m3));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_hw_stuck = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hw_stuck));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_num_cca_rx_frame_stuck = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_num_rx_frame_stuck));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_wal_rx_recovery_rst_rx_busy = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_rx_busy));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_wal_rx_recovery_rst_mac_hang = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_mac_hng));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_mac_reset_converted_phy_reset = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_mac_conv_phy_reset));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_lifetime_expiry_cca_stuck = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_exp_cca_stuck));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_consecutive_flush9_war = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_consec_flsh_war));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_tx_hwsch_reset_war = %u\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hwsch_reset_war));
+ len += scnprintf(buf + len, buf_len - len,
+ "phy_warm_reset_reason_hwsch_wdog_or_cca_wdog_war = %u\n\n",
+ le32_to_cpu(htt_buf->phy_warm_reset_reason_hwsch_cca_wdog_war));
+
+ len += scnprintf(buf + len, buf_len - len, "WAL_RX_RECOVERY_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_mac_hang_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_mac_hang_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_known_sig_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_known_sig_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_no_rx_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_no_rx_consecutive_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_consec_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_rx_busy_count = %u\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_rx_busy_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "wal_rx_recovery_rst_phy_mac_hang_count = %u\n\n",
+ le32_to_cpu(htt_buf->wal_rx_recovery_rst_phy_mac_hang_cnt));
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_DEST_DRAIN_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rx_descs_leak_prevention_done = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_leak_prevented));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rx_descs_saved_cnt = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_saved_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma2reo_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma2reo_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma2fw_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma2fw_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma2wbm_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma2wbm_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rxdma1_2sw_leak_detected = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rxdma1_2sw_leak_detected));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_rx_drain_ok_mac_idle = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_rx_drain_ok_mac_idle));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_ok_mac_not_idle = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_ok_mac_not_idle));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_prerequisite_invld = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_prerequisite_invld));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_skip_for_non_lmac_reset = %u\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_skip_non_lmac_reset));
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_dest_drain_hw_fifo_not_empty_post_drain_wait = %u\n\n",
+ le32_to_cpu(htt_buf->rx_dest_drain_hw_fifo_notempty_post_wait));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_stats_intr_misc_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n",
+ htt_stats_buf->hw_intr_name);
+ len += scnprintf(buf + len, buf_len - len, "mask = %u\n",
+ le32_to_cpu(htt_stats_buf->mask));
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ le32_to_cpu(htt_stats_buf->count));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_stats_whal_tx_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n",
+ le32_to_cpu(htt_stats_buf->last_unpause_ppdu_id));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_unpause_wait_tqm_write));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_dummy_tlv_skipped));
+ len += scnprintf(buf + len, buf_len - len,
+ "hwsch_misaligned_offset_received = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_misaligned_offset_received));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_reset_count));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_dev_reset_war));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_delayed_pause));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_long_delayed_pause));
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n",
+ le32_to_cpu(htt_stats_buf->sch_rx_ppdu_no_response));
+ len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n",
+ le32_to_cpu(htt_stats_buf->sch_selfgen_response));
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n",
+ le32_to_cpu(htt_stats_buf->sch_rx_sifs_resp_trigger));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_hw_war_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_hw_war_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 fixed_len, array_len;
+ u8 i, array_words;
+ u32 mac_id;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+ fixed_len = sizeof(*htt_stats_buf);
+ array_len = tag_len - fixed_len;
+ array_words = array_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_WAR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID));
+
+ for (i = 0; i < array_words; i++) {
+ len += scnprintf(buf + len, buf_len - len, "hw_war %u = %u\n\n",
+ i, le32_to_cpu(htt_stats_buf->hw_wars[i]));
+ }
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u32 mac_id_word;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+ u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+ len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n",
+ le32_to_cpu(htt_stats_buf->max_cmdq_id));
+ len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n",
+ le32_to_cpu(htt_stats_buf->list_mpdu_cnt_hist_intvl));
+ len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n",
+ le32_to_cpu(htt_stats_buf->add_msdu));
+ len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n",
+ le32_to_cpu(htt_stats_buf->q_empty));
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n",
+ le32_to_cpu(htt_stats_buf->q_not_empty));
+ len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n",
+ le32_to_cpu(htt_stats_buf->drop_notification));
+ len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n",
+ le32_to_cpu(htt_stats_buf->desc_threshold));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_tqm_invalid_status = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_tqm_invalid_status));
+ len += scnprintf(buf + len, buf_len - len, "missed_tqm_gen_mpdus = %u\n",
+ le32_to_cpu(htt_stats_buf->missed_tqm_gen_mpdus));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_msduq_timestamp_updates = %u\n",
+ le32_to_cpu(htt_stats_buf->msduq_timestamp_updates));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_msduq_timestamp_updates_by_get_mpdu_head_info_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->msduq_updates_mpdu_head_info_cmd));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_msduq_timestamp_updates_by_emp_to_nonemp_status = %u\n",
+ le32_to_cpu(htt_stats_buf->msduq_updates_emp_to_nonemp_status));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_get_mpdu_head_info_cmds_by_sched_algo_la_query = %u\n",
+ le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_query));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_get_mpdu_head_info_cmds_by_tac = %u\n",
+ le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_tac));
+ len += scnprintf(buf + len, buf_len - len,
+ "total_gen_mpdu_cmds_by_sched_algo_la_query = %u\n",
+ le32_to_cpu(htt_stats_buf->gen_mpdu_cmds_by_query));
+ len += scnprintf(buf + len, buf_len - len, "active_tqm_tids = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_active_tids));
+ len += scnprintf(buf + len, buf_len - len, "inactive_tqm_tids = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_inactive_tids));
+ len += scnprintf(buf + len, buf_len - len, "tqm_active_msduq_flows = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_active_msduq_flows));
+ len += scnprintf(buf + len, buf_len - len, "hi_prio_q_not_empty = %u\n\n",
+ le32_to_cpu(htt_stats_buf->high_prio_q_not_empty));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_error_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->q_empty_failure));
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n",
+ le32_to_cpu(htt_stats_buf->q_not_empty_failure));
+ len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n",
+ le32_to_cpu(htt_stats_buf->add_msdu_failure));
+
+ len += scnprintf(buf + len, buf_len - len, "TQM_ERROR_RESET_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len, "tqm_cache_ctl_err = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_cache_ctl_err));
+ len += scnprintf(buf + len, buf_len - len, "tqm_soft_reset = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_soft_reset));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_total_num_in_use_link_descs = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_link_descs));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_worst_case_num_lost_link_descs = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_link_descs));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_worst_case_num_lost_host_tx_bufs_count = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_host_tx_buf_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_num_in_use_link_descs_internal_tqm = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_internal_tqm));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_num_in_use_link_descs_wbm_idle_link_ring = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_idle_link_rng));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_time_to_tqm_hang_delta_ms = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_time_to_tqm_hang_delta_ms));
+ len += scnprintf(buf + len, buf_len - len, "tqm_reset_recovery_time_ms = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_recovery_time_ms));
+ len += scnprintf(buf + len, buf_len - len, "tqm_reset_num_peers_hdl = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_num_peers_hdl));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_cumm_dirty_hw_mpduq_proc_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_mpduq_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_cumm_dirty_hw_msduq_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_msduq_proc));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_su_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_su_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_other_cnt = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_other_cnt));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_trig_type = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_type));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_trig_cfg = %u\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_cfg));
+ len += scnprintf(buf + len, buf_len - len,
+ "tqm_reset_flush_cache_cmd_skip_cmd_status_null = %u\n\n",
+ le32_to_cpu(htt_stats_buf->tqm_reset_flush_cmd_skp_status_null));
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elements = tag_len >> 2;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "gen_mpdu_end_reason",
+ htt_stats_buf->gen_mpdu_end_reason, num_elements,
+ "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_STATS_TLV:\n");
+ len += print_array_to_buf(buf, len, "list_mpdu_end_reason",
+ htt_stats_buf->list_mpdu_end_reason, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+ u16 num_elems = min_t(u16, (tag_len >> 2),
+ ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n");
+ len += print_array_to_buf(buf, len, "list_mpdu_cnt_hist",
+ htt_stats_buf->list_mpdu_cnt_hist, num_elems, "\n\n");
+
+ stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_tqm_pdev_stats_tlv(const void *tag_buf, u16 tag_len,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct ath12k_htt_tx_tqm_pdev_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+
+ if (tag_len < sizeof(*htt_stats_buf))
+ return;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_count));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_count));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdu));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu_ttl));
+ len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n",
+ le32_to_cpu(htt_stats_buf->send_bar));
+ len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n",
+ le32_to_cpu(htt_stats_buf->bar_sync));
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_mpdu));
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->sync_cmd));
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->write_cmd));
+ len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n",
+ le32_to_cpu(htt_stats_buf->hwsch_trigger));
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ le32_to_cpu(htt_stats_buf->ack_tlv_proc));
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->gen_mpdu_cmd));
+ len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->gen_list_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdu_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_mpdu_tried_cmd));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_queue_stats_cmd));
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->mpdu_head_info_cmd));
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->msdu_flow_stats_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu_cmd));
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->remove_msdu_ttl_cmd));
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->flush_cache_cmd));
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ le32_to_cpu(htt_stats_buf->update_mpduq_cmd));
+ len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n",
+ le32_to_cpu(htt_stats_buf->enqueue));
+ len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n",
+ le32_to_cpu(htt_stats_buf->enqueue_notify));
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_mpdu_at_head));
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n",
+ le32_to_cpu(htt_stats_buf->notify_mpdu_state_valid));
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_udp_notify1));
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_udp_notify2));
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n",
+ le32_to_cpu(htt_stats_buf->sched_nonudp_notify1));
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n",
+ le32_to_cpu(htt_stats_buf->sched_nonudp_notify2));
+
+ stats_req->buf_len = len;
+}
+
+static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
+ u16 tag, u16 len, const void *tag_buf,
+ void *user_data)
+{
+ struct debug_htt_stats_req *stats_req = user_data;
+
+ switch (tag) {
+ case HTT_STATS_TX_PDEV_CMN_TAG:
+ htt_print_tx_pdev_stats_cmn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_UNDERRUN_TAG:
+ htt_print_tx_pdev_stats_urrn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_TAG:
+ htt_print_tx_pdev_stats_sifs_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_FLUSH_TAG:
+ htt_print_tx_pdev_stats_flush_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SIFS_HIST_TAG:
+ htt_print_tx_pdev_stats_sifs_hist_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG:
+ htt_print_pdev_ctrl_path_tx_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_MU_PPDU_DIST_TAG:
+ htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_SCHED_CMN_TAG:
+ ath12k_htt_print_stats_tx_sched_cmn_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG:
+ ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG:
+ ath12k_htt_print_sched_txq_cmd_posted_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG:
+ ath12k_htt_print_sched_txq_cmd_reaped_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG:
+ ath12k_htt_print_sched_txq_sched_order_su_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG:
+ ath12k_htt_print_sched_txq_sched_ineligibility_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG:
+ ath12k_htt_print_sched_txq_supercycle_trigger_tlv(tag_buf, len,
+ stats_req);
+ break;
+ case HTT_STATS_HW_PDEV_ERRS_TAG:
+ ath12k_htt_print_hw_stats_pdev_errs_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_HW_INTR_MISC_TAG:
+ ath12k_htt_print_hw_stats_intr_misc_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_WHAL_TX_TAG:
+ ath12k_htt_print_hw_stats_whal_tx_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_HW_WAR_TAG:
+ ath12k_htt_print_hw_war_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_CMN_TAG:
+ ath12k_htt_print_tx_tqm_cmn_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_ERROR_STATS_TAG:
+ ath12k_htt_print_tx_tqm_error_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_GEN_MPDU_TAG:
+ ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_LIST_MPDU_TAG:
+ ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG:
+ ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(tag_buf, len, stats_req);
+ break;
+ case HTT_STATS_TX_TQM_PDEV_TAG:
+ ath12k_htt_print_tx_tqm_pdev_stats_tlv(tag_buf, len, stats_req);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ struct ath12k_htt_extd_stats_msg *msg;
+ struct debug_htt_stats_req *stats_req;
+ struct ath12k *ar;
+ u32 len, pdev_id, stats_info;
+ u64 cookie;
+ int ret;
+ bool send_completion = false;
+
+ msg = (struct ath12k_htt_extd_stats_msg *)skb->data;
+ cookie = le64_to_cpu(msg->cookie);
+
+ if (u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_MSB) !=
+ ATH12K_HTT_STATS_MAGIC_VALUE) {
+ ath12k_warn(ab, "received invalid htt ext stats event\n");
+ return;
+ }
+
+ pdev_id = u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_LSB);
+ rcu_read_lock();
+ ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
+ if (!ar) {
+ ath12k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id);
+ goto exit;
+ }
+
+ stats_req = ar->debug.htt_stats.stats_req;
+ if (!stats_req)
+ goto exit;
+
+ spin_lock_bh(&ar->data_lock);
+
+ stats_info = le32_to_cpu(msg->info1);
+ stats_req->done = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE);
+ if (stats_req->done)
+ send_completion = true;
+
+ spin_unlock_bh(&ar->data_lock);
+
+ len = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH);
+ if (len > skb->len) {
+ ath12k_warn(ab, "invalid length %d for HTT stats", len);
+ goto exit;
+ }
+
+ ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
+ ath12k_dbg_htt_ext_stats_parse,
+ stats_req);
+ if (ret)
+ ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
+
+ if (send_completion)
+ complete(&stats_req->htt_stats_rcvd);
+exit:
+ rcu_read_unlock();
+}
+
+static ssize_t ath12k_read_htt_stats_type(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ char buf[32];
+ size_t len;
+
+ mutex_lock(&ar->conf_mutex);
+ type = ar->debug.htt_stats.type;
+ mutex_unlock(&ar->conf_mutex);
+
+ len = scnprintf(buf, sizeof(buf), "%u\n", type);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath12k_write_htt_stats_type(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ unsigned int cfg_param[4] = {0};
+ const int size = 32;
+ int num_args;
+
+ char *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ num_args = sscanf(buf, "%u %u %u %u %u\n", &type, &cfg_param[0],
+ &cfg_param[1], &cfg_param[2], &cfg_param[3]);
+ if (!num_args || num_args > 5)
+ return -EINVAL;
+
+ if (type == ATH12K_DBG_HTT_EXT_STATS_RESET ||
+ type >= ATH12K_DBG_HTT_NUM_EXT_STATS)
+ return -EINVAL;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ar->debug.htt_stats.type = type;
+ ar->debug.htt_stats.cfg_param[0] = cfg_param[0];
+ ar->debug.htt_stats.cfg_param[1] = cfg_param[1];
+ ar->debug.htt_stats.cfg_param[2] = cfg_param[2];
+ ar->debug.htt_stats.cfg_param[3] = cfg_param[3];
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_htt_stats_type = {
+ .read = ath12k_read_htt_stats_type,
+ .write = ath12k_write_htt_stats_type,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static int ath12k_debugfs_htt_stats_req(struct ath12k *ar)
+{
+ struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req;
+ enum ath12k_dbg_htt_ext_stats_type type = stats_req->type;
+ u64 cookie;
+ int ret, pdev_id;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ init_completion(&stats_req->htt_stats_rcvd);
+
+ pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ stats_req->done = false;
+ stats_req->pdev_id = pdev_id;
+
+ cookie = u64_encode_bits(ATH12K_HTT_STATS_MAGIC_VALUE,
+ ATH12K_HTT_STATS_COOKIE_MSB);
+ cookie |= u64_encode_bits(pdev_id, ATH12K_HTT_STATS_COOKIE_LSB);
+
+ if (stats_req->override_cfg_param) {
+ cfg_params.cfg0 = stats_req->cfg_param[0];
+ cfg_params.cfg1 = stats_req->cfg_param[1];
+ cfg_params.cfg2 = stats_req->cfg_param[2];
+ cfg_params.cfg3 = stats_req->cfg_param[3];
+ }
+
+ ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ return ret;
+ }
+ if (!wait_for_completion_timeout(&stats_req->htt_stats_rcvd, 3 * HZ)) {
+ spin_lock_bh(&ar->data_lock);
+ if (!stats_req->done) {
+ stats_req->done = true;
+ spin_unlock_bh(&ar->data_lock);
+ ath12k_warn(ar->ab, "stats request timed out\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_bh(&ar->data_lock);
+ }
+
+ return 0;
+}
+
+static int ath12k_open_htt_stats(struct inode *inode,
+ struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+ struct debug_htt_stats_req *stats_req;
+ enum ath12k_dbg_htt_ext_stats_type type = ar->debug.htt_stats.type;
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ int ret;
+
+ if (type == ATH12K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ah->state != ATH12K_HW_STATE_ON) {
+ ret = -ENETDOWN;
+ goto err_unlock;
+ }
+
+ if (ar->debug.htt_stats.stats_req) {
+ ret = -EAGAIN;
+ goto err_unlock;
+ }
+
+ stats_req = kzalloc(sizeof(*stats_req) + ATH12K_HTT_STATS_BUF_SIZE, GFP_KERNEL);
+ if (!stats_req) {
+ ret = -ENOMEM;
+ goto err_unlock;
+ }
+
+ ar->debug.htt_stats.stats_req = stats_req;
+ stats_req->type = type;
+ stats_req->cfg_param[0] = ar->debug.htt_stats.cfg_param[0];
+ stats_req->cfg_param[1] = ar->debug.htt_stats.cfg_param[1];
+ stats_req->cfg_param[2] = ar->debug.htt_stats.cfg_param[2];
+ stats_req->cfg_param[3] = ar->debug.htt_stats.cfg_param[3];
+ stats_req->override_cfg_param = !!stats_req->cfg_param[0] ||
+ !!stats_req->cfg_param[1] ||
+ !!stats_req->cfg_param[2] ||
+ !!stats_req->cfg_param[3];
+
+ ret = ath12k_debugfs_htt_stats_req(ar);
+ if (ret < 0)
+ goto out;
+
+ file->private_data = stats_req;
+
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+out:
+ kfree(stats_req);
+ ar->debug.htt_stats.stats_req = NULL;
+err_unlock:
+ mutex_unlock(&ar->conf_mutex);
+
+ return ret;
+}
+
+static int ath12k_release_htt_stats(struct inode *inode,
+ struct file *file)
+{
+ struct ath12k *ar = inode->i_private;
+
+ mutex_lock(&ar->conf_mutex);
+ kfree(file->private_data);
+ ar->debug.htt_stats.stats_req = NULL;
+ mutex_unlock(&ar->conf_mutex);
+
+ return 0;
+}
+
+static ssize_t ath12k_read_htt_stats(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct debug_htt_stats_req *stats_req = file->private_data;
+ char *buf;
+ u32 length;
+
+ buf = stats_req->buf;
+ length = min_t(u32, stats_req->buf_len, ATH12K_HTT_STATS_BUF_SIZE);
+ return simple_read_from_buffer(user_buf, count, ppos, buf, length);
+}
+
+static const struct file_operations fops_dump_htt_stats = {
+ .open = ath12k_open_htt_stats,
+ .release = ath12k_release_htt_stats,
+ .read = ath12k_read_htt_stats,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_htt_stats_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath12k *ar = file->private_data;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ struct htt_ext_stats_cfg_params cfg_params = { 0 };
+ u8 param_pos;
+ int ret;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &type);
+ if (ret)
+ return ret;
+
+ if (type >= ATH12K_DBG_HTT_NUM_EXT_STATS ||
+ type == ATH12K_DBG_HTT_EXT_STATS_RESET)
+ return -E2BIG;
+
+ mutex_lock(&ar->conf_mutex);
+ cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET;
+ param_pos = (type >> 5) + 1;
+
+ switch (param_pos) {
+ case ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES:
+ cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type);
+ break;
+ case ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES:
+ cfg_params.cfg2 = ATH12K_HTT_STATS_RESET_BITMAP32_BIT(cfg_params.cfg0 +
+ type);
+ break;
+ case ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES:
+ cfg_params.cfg3 = ATH12K_HTT_STATS_RESET_BITMAP64_BIT(cfg_params.cfg0 +
+ type);
+ break;
+ default:
+ break;
+ }
+
+ ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar,
+ ATH12K_DBG_HTT_EXT_STATS_RESET,
+ &cfg_params,
+ 0ULL);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+ }
+
+ ar->debug.htt_stats.reset = type;
+ mutex_unlock(&ar->conf_mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_htt_stats_reset = {
+ .write = ath12k_write_htt_stats_reset,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath12k_debugfs_htt_stats_register(struct ath12k *ar)
+{
+ debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_type);
+ debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev,
+ ar, &fops_dump_htt_stats);
+ debugfs_create_file("htt_stats_reset", 0200, ar->debug.debugfs_pdev,
+ ar, &fops_htt_stats_reset);
+}
diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
new file mode 100644
index 000000000000..6294a082cf8a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
@@ -0,0 +1,567 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef DEBUG_HTT_STATS_H
+#define DEBUG_HTT_STATS_H
+
+#define ATH12K_HTT_STATS_BUF_SIZE (1024 * 512)
+#define ATH12K_HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0)
+#define ATH12K_HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32)
+#define ATH12K_HTT_STATS_MAGIC_VALUE 0xF0F0F0F0
+#define ATH12K_HTT_STATS_SUBTYPE_MAX 16
+#define ATH12K_HTT_MAX_STRING_LEN 256
+
+#define ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx) ((_idx) & 0x1f)
+#define ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx) ((_idx) & 0x3f)
+#define ATH12K_HTT_STATS_RESET_BITMAP32_BIT(_idx) (1 << \
+ ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx))
+#define ATH12K_HTT_STATS_RESET_BITMAP64_BIT(_idx) (1 << \
+ ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx))
+
+void ath12k_debugfs_htt_stats_register(struct ath12k *ar);
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+ struct sk_buff *skb);
+#else /* CONFIG_ATH12K_DEBUGFS */
+static inline void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+}
+#endif
+
+/**
+ * DOC: target -> host extended statistics upload
+ *
+ * The following field definitions describe the format of the HTT
+ * target to host stats upload confirmation message.
+ * The message contains a cookie echoed from the HTT host->target stats
+ * upload request, which identifies which request the confirmation is
+ * for, and a single stats can span over multiple HTT stats indication
+ * due to the HTT message size limitation so every HTT ext stats
+ * indication will have tag-length-value stats information elements.
+ * The tag-length header for each HTT stats IND message also includes a
+ * status field, to indicate whether the request for the stat type in
+ * question was fully met, partially met, unable to be met, or invalid
+ * (if the stat type in question is disabled in the target).
+ * A Done bit 1's indicate the end of the of stats info elements.
+ *
+ *
+ * |31 16|15 12|11|10 8|7 5|4 0|
+ * |--------------------------------------------------------------|
+ * | reserved | msg type |
+ * |--------------------------------------------------------------|
+ * | cookie LSBs |
+ * |--------------------------------------------------------------|
+ * | cookie MSBs |
+ * |--------------------------------------------------------------|
+ * | stats entry length | rsvd | D| S | stat type |
+ * |--------------------------------------------------------------|
+ * | type-specific stats info |
+ * | (see debugfs_htt_stats.h) |
+ * |--------------------------------------------------------------|
+ * Header fields:
+ * - MSG_TYPE
+ * Bits 7:0
+ * Purpose: Identifies this is a extended statistics upload confirmation
+ * message.
+ * Value: 0x1c
+ * - COOKIE_LSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ * - COOKIE_MSBS
+ * Bits 31:0
+ * Purpose: Provide a mechanism to match a target->host stats confirmation
+ * message with its preceding host->target stats request message.
+ * Value: MSBs of the opaque cookie specified by the host-side requestor
+ *
+ * Stats Information Element tag-length header fields:
+ * - STAT_TYPE
+ * Bits 7:0
+ * Purpose: identifies the type of statistics info held in the
+ * following information element
+ * Value: ath12k_dbg_htt_ext_stats_type
+ * - STATUS
+ * Bits 10:8
+ * Purpose: indicate whether the requested stats are present
+ * Value:
+ * 0 -> The requested stats have been delivered in full
+ * 1 -> The requested stats have been delivered in part
+ * 2 -> The requested stats could not be delivered (error case)
+ * 3 -> The requested stat type is either not recognized (invalid)
+ * - DONE
+ * Bits 11
+ * Purpose:
+ * Indicates the completion of the stats entry, this will be the last
+ * stats conf HTT segment for the requested stats type.
+ * Value:
+ * 0 -> the stats retrieval is ongoing
+ * 1 -> the stats retrieval is complete
+ * - LENGTH
+ * Bits 31:16
+ * Purpose: indicate the stats information size
+ * Value: This field specifies the number of bytes of stats information
+ * that follows the element tag-length header.
+ * It is expected but not required that this length is a multiple of
+ * 4 bytes.
+ */
+
+#define ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE BIT(11)
+#define ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16)
+
+struct ath12k_htt_extd_stats_msg {
+ __le32 info0;
+ __le64 cookie;
+ __le32 info1;
+ u8 data[];
+} __packed;
+
+/* htt_dbg_ext_stats_type */
+enum ath12k_dbg_htt_ext_stats_type {
+ ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5,
+ ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6,
+
+ /* keep this last */
+ ATH12K_DBG_HTT_NUM_EXT_STATS,
+};
+
+enum ath12k_dbg_htt_tlv_tag {
+ HTT_STATS_TX_PDEV_CMN_TAG = 0,
+ HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1,
+ HTT_STATS_TX_PDEV_SIFS_TAG = 2,
+ HTT_STATS_TX_PDEV_FLUSH_TAG = 3,
+ HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11,
+ HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12,
+ HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13,
+ HTT_STATS_TX_TQM_CMN_TAG = 14,
+ HTT_STATS_TX_TQM_PDEV_TAG = 15,
+ HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
+ HTT_STATS_TX_SCHED_CMN_TAG = 37,
+ HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
+ HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43,
+ HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
+ HTT_STATS_HW_INTR_MISC_TAG = 54,
+ HTT_STATS_HW_PDEV_ERRS_TAG = 56,
+ HTT_STATS_WHAL_TX_TAG = 66,
+ HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
+ HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
+ HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
+ HTT_STATS_HW_WAR_TAG = 89,
+ HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100,
+ HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
+ HTT_STATS_MU_PPDU_DIST_TAG = 129,
+
+ HTT_STATS_MAX_TAG,
+};
+
+#define ATH12K_HTT_STATS_MAC_ID GENMASK(7, 0)
+
+#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9
+#define ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 150
+
+/* MU MIMO distribution stats is a 2-dimensional array
+ * with dimension one denoting stats for nr4[0] or nr8[1]
+ */
+#define ATH12K_HTT_STATS_NUM_NR_BINS 2
+#define ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST 10
+#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10
+#define ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS 9
+#define ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS \
+ (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS)
+#define ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS \
+ (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST)
+
+enum ath12k_htt_tx_pdev_underrun_enum {
+ HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN = 0,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1,
+ HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU = 2,
+ HTT_TX_PDEV_MAX_URRN_STATS = 3,
+};
+
+enum ath12k_htt_stats_reset_cfg_param_alloc_pos {
+ ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES = 1,
+ ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES,
+ ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES,
+};
+
+struct debug_htt_stats_req {
+ bool done;
+ bool override_cfg_param;
+ u8 pdev_id;
+ enum ath12k_dbg_htt_ext_stats_type type;
+ u32 cfg_param[4];
+ u8 peer_addr[ETH_ALEN];
+ struct completion htt_stats_rcvd;
+ u32 buf_len;
+ u8 buf[];
+};
+
+struct ath12k_htt_tx_pdev_stats_cmn_tlv {
+ __le32 mac_id__word;
+ __le32 hw_queued;
+ __le32 hw_reaped;
+ __le32 underrun;
+ __le32 hw_paused;
+ __le32 hw_flush;
+ __le32 hw_filt;
+ __le32 tx_abort;
+ __le32 mpdu_requed;
+ __le32 tx_xretry;
+ __le32 data_rc;
+ __le32 mpdu_dropped_xretry;
+ __le32 illgl_rate_phy_err;
+ __le32 cont_xretry;
+ __le32 tx_timeout;
+ __le32 pdev_resets;
+ __le32 phy_underrun;
+ __le32 txop_ovf;
+ __le32 seq_posted;
+ __le32 seq_failed_queueing;
+ __le32 seq_completed;
+ __le32 seq_restarted;
+ __le32 mu_seq_posted;
+ __le32 seq_switch_hw_paused;
+ __le32 next_seq_posted_dsr;
+ __le32 seq_posted_isr;
+ __le32 seq_ctrl_cached;
+ __le32 mpdu_count_tqm;
+ __le32 msdu_count_tqm;
+ __le32 mpdu_removed_tqm;
+ __le32 msdu_removed_tqm;
+ __le32 mpdus_sw_flush;
+ __le32 mpdus_hw_filter;
+ __le32 mpdus_truncated;
+ __le32 mpdus_ack_failed;
+ __le32 mpdus_expired;
+ __le32 mpdus_seq_hw_retry;
+ __le32 ack_tlv_proc;
+ __le32 coex_abort_mpdu_cnt_valid;
+ __le32 coex_abort_mpdu_cnt;
+ __le32 num_total_ppdus_tried_ota;
+ __le32 num_data_ppdus_tried_ota;
+ __le32 local_ctrl_mgmt_enqued;
+ __le32 local_ctrl_mgmt_freed;
+ __le32 local_data_enqued;
+ __le32 local_data_freed;
+ __le32 mpdu_tried;
+ __le32 isr_wait_seq_posted;
+
+ __le32 tx_active_dur_us_low;
+ __le32 tx_active_dur_us_high;
+ __le32 remove_mpdus_max_retries;
+ __le32 comp_delivered;
+ __le32 ppdu_ok;
+ __le32 self_triggers;
+ __le32 tx_time_dur_data;
+ __le32 seq_qdepth_repost_stop;
+ __le32 mu_seq_min_msdu_repost_stop;
+ __le32 seq_min_msdu_repost_stop;
+ __le32 seq_txop_repost_stop;
+ __le32 next_seq_cancel;
+ __le32 fes_offsets_err_cnt;
+ __le32 num_mu_peer_blacklisted;
+ __le32 mu_ofdma_seq_posted;
+ __le32 ul_mumimo_seq_posted;
+ __le32 ul_ofdma_seq_posted;
+
+ __le32 thermal_suspend_cnt;
+ __le32 dfs_suspend_cnt;
+ __le32 tx_abort_suspend_cnt;
+ __le32 tgt_specific_opaque_txq_suspend_info;
+ __le32 last_suspend_reason;
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_urrn_tlv {
+ DECLARE_FLEX_ARRAY(__le32, urrn_stats);
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_flush_tlv {
+ DECLARE_FLEX_ARRAY(__le32, flush_errs);
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_phy_err_tlv {
+ DECLARE_FLEX_ARRAY(__le32, phy_errs);
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_sifs_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sifs_status);
+} __packed;
+
+struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv {
+ __le32 fw_tx_mgmt_subtype[ATH12K_HTT_STATS_SUBTYPE_MAX];
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sifs_hist_status);
+} __packed;
+
+enum ath12k_htt_stats_hw_mode {
+ ATH12K_HTT_STATS_HWMODE_AC = 0,
+ ATH12K_HTT_STATS_HWMODE_AX = 1,
+ ATH12K_HTT_STATS_HWMODE_BE = 2,
+};
+
+struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv {
+ __le32 hw_mode;
+ __le32 num_seq_term_status[ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS];
+ __le32 num_ppdu_cmpl_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
+ __le32 num_seq_posted[ATH12K_HTT_STATS_NUM_NR_BINS];
+ __le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
+} __packed;
+
+#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
+#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
+
+#define ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20
+
+struct ath12k_htt_stats_tx_sched_cmn_tlv {
+ __le32 mac_id__word;
+ __le32 current_timestamp;
+} __packed;
+
+struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv {
+ __le32 mac_id__word;
+ __le32 sched_policy;
+ __le32 last_sched_cmd_posted_timestamp;
+ __le32 last_sched_cmd_compl_timestamp;
+ __le32 sched_2_tac_lwm_count;
+ __le32 sched_2_tac_ring_full;
+ __le32 sched_cmd_post_failure;
+ __le32 num_active_tids;
+ __le32 num_ps_schedules;
+ __le32 sched_cmds_pending;
+ __le32 num_tid_register;
+ __le32 num_tid_unregister;
+ __le32 num_qstats_queried;
+ __le32 qstats_update_pending;
+ __le32 last_qstats_query_timestamp;
+ __le32 num_tqm_cmdq_full;
+ __le32 num_de_sched_algo_trigger;
+ __le32 num_rt_sched_algo_trigger;
+ __le32 num_tqm_sched_algo_trigger;
+ __le32 notify_sched;
+ __le32 dur_based_sendn_term;
+ __le32 su_notify2_sched;
+ __le32 su_optimal_queued_msdus_sched;
+ __le32 su_delay_timeout_sched;
+ __le32 su_min_txtime_sched_delay;
+ __le32 su_no_delay;
+ __le32 num_supercycles;
+ __le32 num_subcycles_with_sort;
+ __le32 num_subcycles_no_sort;
+} __packed;
+
+struct ath12k_htt_sched_txq_cmd_posted_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_cmd_posted);
+} __packed;
+
+struct ath12k_htt_sched_txq_cmd_reaped_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_cmd_reaped);
+} __packed;
+
+struct ath12k_htt_sched_txq_sched_order_su_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_order_su);
+} __packed;
+
+struct ath12k_htt_sched_txq_sched_ineligibility_tlv {
+ DECLARE_FLEX_ARRAY(__le32, sched_ineligibility);
+} __packed;
+
+enum ath12k_htt_sched_txq_supercycle_triggers_tlv_enum {
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_NONE = 0,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_FORCED,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_TIDQ_ENTRIES,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_ACTIVE_TIDS,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX_ITR_REACHED,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_DUR_THRESHOLD_REACHED,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_TWT_TRIGGER,
+ ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX,
+};
+
+struct ath12k_htt_sched_txq_supercycle_triggers_tlv {
+ DECLARE_FLEX_ARRAY(__le32, supercycle_triggers);
+} __packed;
+
+struct ath12k_htt_hw_stats_pdev_errs_tlv {
+ __le32 mac_id__word;
+ __le32 tx_abort;
+ __le32 tx_abort_fail_count;
+ __le32 rx_abort;
+ __le32 rx_abort_fail_count;
+ __le32 warm_reset;
+ __le32 cold_reset;
+ __le32 tx_flush;
+ __le32 tx_glb_reset;
+ __le32 tx_txq_reset;
+ __le32 rx_timeout_reset;
+ __le32 mac_cold_reset_restore_cal;
+ __le32 mac_cold_reset;
+ __le32 mac_warm_reset;
+ __le32 mac_only_reset;
+ __le32 phy_warm_reset;
+ __le32 phy_warm_reset_ucode_trig;
+ __le32 mac_warm_reset_restore_cal;
+ __le32 mac_sfm_reset;
+ __le32 phy_warm_reset_m3_ssr;
+ __le32 phy_warm_reset_reason_phy_m3;
+ __le32 phy_warm_reset_reason_tx_hw_stuck;
+ __le32 phy_warm_reset_reason_num_rx_frame_stuck;
+ __le32 phy_warm_reset_reason_wal_rx_rec_rx_busy;
+ __le32 phy_warm_reset_reason_wal_rx_rec_mac_hng;
+ __le32 phy_warm_reset_reason_mac_conv_phy_reset;
+ __le32 wal_rx_recovery_rst_mac_hang_cnt;
+ __le32 wal_rx_recovery_rst_known_sig_cnt;
+ __le32 wal_rx_recovery_rst_no_rx_cnt;
+ __le32 wal_rx_recovery_rst_no_rx_consec_cnt;
+ __le32 wal_rx_recovery_rst_rx_busy_cnt;
+ __le32 wal_rx_recovery_rst_phy_mac_hang_cnt;
+ __le32 rx_flush_cnt;
+ __le32 phy_warm_reset_reason_tx_exp_cca_stuck;
+ __le32 phy_warm_reset_reason_tx_consec_flsh_war;
+ __le32 phy_warm_reset_reason_tx_hwsch_reset_war;
+ __le32 phy_warm_reset_reason_hwsch_cca_wdog_war;
+ __le32 fw_rx_rings_reset;
+ __le32 rx_dest_drain_rx_descs_leak_prevented;
+ __le32 rx_dest_drain_rx_descs_saved_cnt;
+ __le32 rx_dest_drain_rxdma2reo_leak_detected;
+ __le32 rx_dest_drain_rxdma2fw_leak_detected;
+ __le32 rx_dest_drain_rxdma2wbm_leak_detected;
+ __le32 rx_dest_drain_rxdma1_2sw_leak_detected;
+ __le32 rx_dest_drain_rx_drain_ok_mac_idle;
+ __le32 rx_dest_drain_ok_mac_not_idle;
+ __le32 rx_dest_drain_prerequisite_invld;
+ __le32 rx_dest_drain_skip_non_lmac_reset;
+ __le32 rx_dest_drain_hw_fifo_notempty_post_wait;
+} __packed;
+
+#define ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN 8
+struct ath12k_htt_hw_stats_intr_misc_tlv {
+ u8 hw_intr_name[ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN];
+ __le32 mask;
+ __le32 count;
+} __packed;
+
+struct ath12k_htt_hw_stats_whal_tx_tlv {
+ __le32 mac_id__word;
+ __le32 last_unpause_ppdu_id;
+ __le32 hwsch_unpause_wait_tqm_write;
+ __le32 hwsch_dummy_tlv_skipped;
+ __le32 hwsch_misaligned_offset_received;
+ __le32 hwsch_reset_count;
+ __le32 hwsch_dev_reset_war;
+ __le32 hwsch_delayed_pause;
+ __le32 hwsch_long_delayed_pause;
+ __le32 sch_rx_ppdu_no_response;
+ __le32 sch_selfgen_response;
+ __le32 sch_rx_sifs_resp_trigger;
+} __packed;
+
+struct ath12k_htt_hw_war_stats_tlv {
+ __le32 mac_id__word;
+ DECLARE_FLEX_ARRAY(__le32, hw_wars);
+} __packed;
+
+struct ath12k_htt_tx_tqm_cmn_stats_tlv {
+ __le32 mac_id__word;
+ __le32 max_cmdq_id;
+ __le32 list_mpdu_cnt_hist_intvl;
+ __le32 add_msdu;
+ __le32 q_empty;
+ __le32 q_not_empty;
+ __le32 drop_notification;
+ __le32 desc_threshold;
+ __le32 hwsch_tqm_invalid_status;
+ __le32 missed_tqm_gen_mpdus;
+ __le32 tqm_active_tids;
+ __le32 tqm_inactive_tids;
+ __le32 tqm_active_msduq_flows;
+ __le32 msduq_timestamp_updates;
+ __le32 msduq_updates_mpdu_head_info_cmd;
+ __le32 msduq_updates_emp_to_nonemp_status;
+ __le32 get_mpdu_head_info_cmds_by_query;
+ __le32 get_mpdu_head_info_cmds_by_tac;
+ __le32 gen_mpdu_cmds_by_query;
+ __le32 high_prio_q_not_empty;
+} __packed;
+
+struct ath12k_htt_tx_tqm_error_stats_tlv {
+ __le32 q_empty_failure;
+ __le32 q_not_empty_failure;
+ __le32 add_msdu_failure;
+ __le32 tqm_cache_ctl_err;
+ __le32 tqm_soft_reset;
+ __le32 tqm_reset_num_in_use_link_descs;
+ __le32 tqm_reset_num_lost_link_descs;
+ __le32 tqm_reset_num_lost_host_tx_buf_cnt;
+ __le32 tqm_reset_num_in_use_internal_tqm;
+ __le32 tqm_reset_num_in_use_idle_link_rng;
+ __le32 tqm_reset_time_to_tqm_hang_delta_ms;
+ __le32 tqm_reset_recovery_time_ms;
+ __le32 tqm_reset_num_peers_hdl;
+ __le32 tqm_reset_cumm_dirty_hw_mpduq_cnt;
+ __le32 tqm_reset_cumm_dirty_hw_msduq_proc;
+ __le32 tqm_reset_flush_cache_cmd_su_cnt;
+ __le32 tqm_reset_flush_cache_cmd_other_cnt;
+ __le32 tqm_reset_flush_cache_cmd_trig_type;
+ __le32 tqm_reset_flush_cache_cmd_trig_cfg;
+ __le32 tqm_reset_flush_cmd_skp_status_null;
+} __packed;
+
+struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv {
+ DECLARE_FLEX_ARRAY(__le32, gen_mpdu_end_reason);
+} __packed;
+
+#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON 16
+#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16
+
+struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv {
+ DECLARE_FLEX_ARRAY(__le32, list_mpdu_end_reason);
+} __packed;
+
+struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv {
+ DECLARE_FLEX_ARRAY(__le32, list_mpdu_cnt_hist);
+} __packed;
+
+struct ath12k_htt_tx_tqm_pdev_stats_tlv {
+ __le32 msdu_count;
+ __le32 mpdu_count;
+ __le32 remove_msdu;
+ __le32 remove_mpdu;
+ __le32 remove_msdu_ttl;
+ __le32 send_bar;
+ __le32 bar_sync;
+ __le32 notify_mpdu;
+ __le32 sync_cmd;
+ __le32 write_cmd;
+ __le32 hwsch_trigger;
+ __le32 ack_tlv_proc;
+ __le32 gen_mpdu_cmd;
+ __le32 gen_list_cmd;
+ __le32 remove_mpdu_cmd;
+ __le32 remove_mpdu_tried_cmd;
+ __le32 mpdu_queue_stats_cmd;
+ __le32 mpdu_head_info_cmd;
+ __le32 msdu_flow_stats_cmd;
+ __le32 remove_msdu_cmd;
+ __le32 remove_msdu_ttl_cmd;
+ __le32 flush_cache_cmd;
+ __le32 update_mpduq_cmd;
+ __le32 enqueue;
+ __le32 enqueue_notify;
+ __le32 notify_mpdu_at_head;
+ __le32 notify_mpdu_state_valid;
+ __le32 sched_udp_notify1;
+ __le32 sched_udp_notify2;
+ __le32 sched_nonudp_notify1;
+ __le32 sched_nonudp_notify2;
+} __packed;
+
+#endif
diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c
index c8e1b244b69e..61aa78d8bd8c 100644
--- a/drivers/net/wireless/ath/ath12k/dp.c
+++ b/drivers/net/wireless/ath/ath12k/dp.c
@@ -14,6 +14,11 @@
#include "peer.h"
#include "dp_mon.h"
+enum ath12k_dp_desc_type {
+ ATH12K_DP_TX_DESC,
+ ATH12K_DP_RX_DESC,
+};
+
static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
struct sk_buff *skb)
{
@@ -127,7 +132,9 @@ static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
enum hal_ring_type type, int ring_num)
{
+ const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
const u8 *grp_mask;
+ int i;
switch (type) {
case HAL_WBM2SW_RELEASE:
@@ -135,6 +142,14 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
ring_num = 0;
} else {
+ map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
+ for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
+ if (ring_num == map[i].wbm_ring_num) {
+ ring_num = i;
+ break;
+ }
+ }
+
grp_mask = &ab->hw_params->ring_mask->tx[0];
}
break;
@@ -452,8 +467,6 @@ static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
}
- ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
- ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
}
@@ -474,20 +487,6 @@ static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
goto err;
}
- ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
- DP_TCL_CMD_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
- goto err;
- }
-
- ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
- 0, 0, DP_TCL_STATUS_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
- goto err;
- }
-
for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
tx_comp_ring_num = map[i].wbm_ring_num;
@@ -611,6 +610,7 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
int i;
int ret = 0;
u32 end_offset, cookie;
+ enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm;
n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
@@ -641,7 +641,8 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
paddr = link_desc_banks[i].paddr;
while (n_entries) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
- ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
+ ath12k_hal_set_link_desc_addr(scatter_buf, cookie,
+ paddr, rbm);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
if (rem_entries) {
@@ -785,6 +786,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
u32 paddr;
int i, ret;
u32 cookie;
+ enum hal_rx_buf_return_buf_manager rbm = ab->dp.idle_link_rbm;
tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
tot_mem_sz += HAL_LINK_DESC_ALIGN;
@@ -845,8 +847,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
while (n_entries &&
(desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
- ath12k_hal_set_link_desc_addr(desc,
- cookie, paddr);
+ ath12k_hal_set_link_desc_addr(desc, cookie, paddr, rbm);
n_entries--;
paddr += HAL_LINK_DESC_SIZE;
}
@@ -876,11 +877,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
enum dp_monitor_mode monitor_mode;
u8 ring_mask;
- while (i < ab->hw_params->max_tx_ring) {
- if (ab->hw_params->ring_mask->tx[grp_id] &
- BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
- ath12k_dp_tx_completion_handler(ab, i);
- i++;
+ if (ab->hw_params->ring_mask->tx[grp_id]) {
+ i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1;
+ ath12k_dp_tx_completion_handler(ab, i);
}
if (ab->hw_params->ring_mask->rx_err[grp_id]) {
@@ -916,8 +915,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
@@ -937,8 +936,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
for (i = 0; i < ab->num_radios; i++) {
- for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
- int id = i * ab->hw_params->num_rxmda_per_pdev + j;
+ for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) {
+ int id = i * ab->hw_params->num_rxdma_per_pdev + j;
if (ring_mask & BIT(id)) {
work_done =
@@ -960,8 +959,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab,
if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ LIST_HEAD(list);
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, 0);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
}
/* TODO: Implement handler for other interrupts */
@@ -1025,7 +1025,7 @@ static void ath12k_dp_service_mon_ring(struct timer_list *t)
struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
ATH12K_DP_RX_MONITOR_MODE);
@@ -1146,11 +1146,13 @@ void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
{
- struct ath12k_rx_desc_info *desc_info, *tmp;
+ struct ath12k_rx_desc_info *desc_info;
struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
struct ath12k_dp *dp = &ab->dp;
+ struct ath12k_skb_cb *skb_cb;
struct sk_buff *skb;
- int i;
+ struct ath12k *ar;
+ int i, j;
u32 pool_id, tx_spt_page;
if (!dp->spt_info)
@@ -1159,16 +1161,23 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
/* RX Descriptor cleanup */
spin_lock_bh(&dp->rx_desc_lock);
- list_for_each_entry_safe(desc_info, tmp, &dp->rx_desc_used_list, list) {
- list_del(&desc_info->list);
- skb = desc_info->skb;
+ for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
+ desc_info = dp->spt_info->rxbaddr[i];
- if (!skb)
- continue;
+ for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
+ if (!desc_info[j].in_use) {
+ list_del(&desc_info[j].list);
+ continue;
+ }
- dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
- skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
- dev_kfree_skb_any(skb);
+ skb = desc_info[j].skb;
+ if (!skb)
+ continue;
+
+ dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
+ skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
}
for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
@@ -1193,6 +1202,11 @@ static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
if (!skb)
continue;
+ skb_cb = ATH12K_SKB_CB(skb);
+ ar = skb_cb->ar;
+ if (atomic_dec_and_test(&ar->dp.num_tx_pending))
+ wake_up(&ar->dp.tx_empty_waitq);
+
dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
@@ -1335,16 +1349,22 @@ static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
u32 cookie)
{
+ struct ath12k_dp *dp = &ab->dp;
struct ath12k_rx_desc_info **desc_addr_ptr;
- u16 ppt_idx, spt_idx;
+ u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
- spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
+ spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
+
+ start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET;
+ end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
- if (ppt_idx > ATH12K_NUM_RX_SPT_PAGES ||
+ if (ppt_idx < start_ppt_idx ||
+ ppt_idx >= end_ppt_idx ||
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
+ ppt_idx = ppt_idx - dp->rx_ppt_base;
desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
return *desc_addr_ptr;
@@ -1354,13 +1374,17 @@ struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
u32 cookie)
{
struct ath12k_tx_desc_info **desc_addr_ptr;
- u16 ppt_idx, spt_idx;
+ u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
- spt_idx = u32_get_bits(cookie, ATH12k_DP_CC_COOKIE_SPT);
+ spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
- if (ppt_idx < ATH12K_NUM_RX_SPT_PAGES ||
- ppt_idx > ab->dp.num_spt_pages ||
+ start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET;
+ end_ppt_idx = start_ppt_idx +
+ (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES);
+
+ if (ppt_idx < start_ppt_idx ||
+ ppt_idx >= end_ppt_idx ||
spt_idx > ATH12K_MAX_SPT_ENTRIES)
return NULL;
@@ -1375,7 +1399,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
u32 i, j, pool_id, tx_spt_page;
- u32 ppt_idx;
+ u32 ppt_idx, cookie_ppt_idx;
spin_lock_bh(&dp->rx_desc_lock);
@@ -1389,15 +1413,17 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
return -ENOMEM;
}
+ ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
+ cookie_ppt_idx = dp->rx_ppt_base + ppt_idx;
dp->spt_info->rxbaddr[i] = &rx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
- rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(i, j);
+ rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
/* Update descriptor VA in SPT */
- rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, i, j);
+ rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
*rx_desc_addr = &rx_descs[j];
}
}
@@ -1417,10 +1443,11 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
}
tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
+ ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
+
dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
- ppt_idx = ATH12K_NUM_RX_SPT_PAGES + tx_spt_page;
tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
tx_descs[j].pool_id = pool_id;
list_add_tail(&tx_descs[j].list,
@@ -1437,14 +1464,44 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
return 0;
}
+static int ath12k_dp_cmem_init(struct ath12k_base *ab,
+ struct ath12k_dp *dp,
+ enum ath12k_dp_desc_type type)
+{
+ u32 cmem_base;
+ int i, start, end;
+
+ cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+
+ switch (type) {
+ case ATH12K_DP_TX_DESC:
+ start = ATH12K_TX_SPT_PAGE_OFFSET;
+ end = start + ATH12K_NUM_TX_SPT_PAGES;
+ break;
+ case ATH12K_DP_RX_DESC:
+ cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base);
+ start = ATH12K_RX_SPT_PAGE_OFFSET;
+ end = start + ATH12K_NUM_RX_SPT_PAGES;
+ break;
+ default:
+ ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type);
+ return -EINVAL;
+ }
+
+ /* Write to PPT in CMEM */
+ for (i = start; i < end; i++)
+ ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
+ dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+
+ return 0;
+}
+
static int ath12k_dp_cc_init(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
int i, ret = 0;
- u32 cmem_base;
INIT_LIST_HEAD(&dp->rx_desc_free_list);
- INIT_LIST_HEAD(&dp->rx_desc_used_list);
spin_lock_init(&dp->rx_desc_lock);
for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
@@ -1465,7 +1522,7 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
return -ENOMEM;
}
- cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
+ dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES;
for (i = 0; i < dp->num_spt_pages; i++) {
dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
@@ -1483,10 +1540,18 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab)
ret = -EINVAL;
goto free;
}
+ }
- /* Write to PPT in CMEM */
- ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
- dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
+ ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_TX_DESC);
+ if (ret) {
+ ath12k_warn(ab, "HW CC Tx cmem init failed %d", ret);
+ goto free;
+ }
+
+ ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_RX_DESC);
+ if (ret) {
+ ath12k_warn(ab, "HW CC Rx cmem init failed %d", ret);
+ goto free;
}
ret = ath12k_dp_cc_desc_init(ab);
@@ -1522,6 +1587,24 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
return 0;
}
+static enum hal_rx_buf_return_buf_manager
+ath12k_dp_get_idle_link_rbm(struct ath12k_base *ab)
+{
+ switch (ab->device_id) {
+ case 0:
+ return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
+ case 1:
+ return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST;
+ case 2:
+ return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST;
+ default:
+ ath12k_warn(ab, "invalid %d device id, so choose default rbm\n",
+ ab->device_id);
+ WARN_ON(1);
+ return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST;
+ }
+}
+
int ath12k_dp_alloc(struct ath12k_base *ab)
{
struct ath12k_dp *dp = &ab->dp;
@@ -1538,6 +1621,7 @@ int ath12k_dp_alloc(struct ath12k_base *ab)
spin_lock_init(&dp->reo_cmd_lock);
dp->reo_cmd_cache_flush_count = 0;
+ dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);
ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
if (ret) {
diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h
index eb2dd408e081..b77497c14ac4 100644
--- a/drivers/net/wireless/ath/ath12k/dp.h
+++ b/drivers/net/wireless/ath/ath12k/dp.h
@@ -223,6 +223,9 @@ struct ath12k_pdev_dp {
#define ATH12K_NUM_TX_SPT_PAGES (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES)
#define ATH12K_NUM_SPT_PAGES (ATH12K_NUM_RX_SPT_PAGES + ATH12K_NUM_TX_SPT_PAGES)
+#define ATH12K_TX_SPT_PAGE_OFFSET 0
+#define ATH12K_RX_SPT_PAGE_OFFSET ATH12K_NUM_TX_SPT_PAGES
+
/* The SPT pages are divided for RX and TX, first block for RX
* and remaining for TX
*/
@@ -245,7 +248,7 @@ struct ath12k_pdev_dp {
#define ATH12K_CC_SPT_MSB 8
#define ATH12K_CC_PPT_MSB 19
#define ATH12K_CC_PPT_SHIFT 9
-#define ATH12k_DP_CC_COOKIE_SPT GENMASK(8, 0)
+#define ATH12K_DP_CC_COOKIE_SPT GENMASK(8, 0)
#define ATH12K_DP_CC_COOKIE_PPT GENMASK(19, 9)
#define DP_REO_QREF_NUM GENMASK(31, 16)
@@ -282,6 +285,8 @@ struct ath12k_rx_desc_info {
struct sk_buff *skb;
u32 cookie;
u32 magic;
+ u8 in_use : 1,
+ reserved : 7;
};
struct ath12k_tx_desc_info {
@@ -320,15 +325,15 @@ struct ath12k_dp {
u8 htt_tgt_ver_major;
u8 htt_tgt_ver_minor;
struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX];
+ enum hal_rx_buf_return_buf_manager idle_link_rbm;
struct dp_srng wbm_idle_ring;
struct dp_srng wbm_desc_rel_ring;
- struct dp_srng tcl_cmd_ring;
- struct dp_srng tcl_status_ring;
struct dp_srng reo_reinject_ring;
struct dp_srng rx_rel_ring;
struct dp_srng reo_except_ring;
struct dp_srng reo_cmd_ring;
struct dp_srng reo_status_ring;
+ enum ath12k_peer_metadata_version peer_metadata_ver;
struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
@@ -346,9 +351,9 @@ struct ath12k_dp {
struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
struct ath12k_spt_info *spt_info;
u32 num_spt_pages;
+ u32 rx_ppt_base;
struct list_head rx_desc_free_list;
- struct list_head rx_desc_used_list;
- /* protects the free and used desc list */
+ /* protects the free desc list */
spinlock_t rx_desc_lock;
struct list_head tx_desc_free_list[ATH12K_HW_MAX_QUEUES];
@@ -377,8 +382,6 @@ struct ath12k_dp {
/* peer meta data */
#define HTT_TCL_META_DATA_PEER_ID GENMASK(15, 2)
-#define HTT_TX_WBM_COMP_STATUS_OFFSET 8
-
/* HTT tx completion is overlaid in wbm_release_ring */
#define HTT_TX_WBM_COMP_INFO0_STATUS GENMASK(16, 13)
#define HTT_TX_WBM_COMP_INFO1_REINJECT_REASON GENMASK(3, 0)
diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c
index 2d56913a75d0..5c6749bc4039 100644
--- a/drivers/net/wireless/ath/ath12k/dp_mon.c
+++ b/drivers/net/wireless/ath/ath12k/dp_mon.c
@@ -944,7 +944,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
goto err_merge_fail;
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "mpdu_buf %pK mpdu_buf->len %u",
+ "mpdu_buf %p mpdu_buf->len %u",
prev_buf, prev_buf->len);
} else {
ath12k_dbg(ab, ATH12K_DBG_DATA,
@@ -958,7 +958,7 @@ ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
err_merge_fail:
if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "err_merge_fail mpdu_buf %pK", mpdu_buf);
+ "err_merge_fail mpdu_buf %p", mpdu_buf);
/* Free the head buffer */
dev_kfree_skb_any(mpdu_buf);
}
@@ -1092,7 +1092,7 @@ static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct
spin_unlock_bh(&ar->ab->base_lock);
ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
- "rx skb %pK len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %u %s %s%s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@@ -1903,43 +1903,6 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
break;
}
- case HAL_MON_BUF_ADDR: {
- struct dp_rxdma_mon_ring *buf_ring = &ab->dp.tx_mon_buf_ring;
- struct dp_mon_packet_info *packet_info =
- (struct dp_mon_packet_info *)tlv_data;
- int buf_id = u32_get_bits(packet_info->cookie,
- DP_RXDMA_BUF_COOKIE_BUF_ID);
- struct sk_buff *msdu;
- struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu;
- struct ath12k_skb_rxcb *rxcb;
-
- spin_lock_bh(&buf_ring->idr_lock);
- msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
- spin_unlock_bh(&buf_ring->idr_lock);
-
- if (unlikely(!msdu)) {
- ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
- buf_id);
- return DP_MON_TX_STATUS_PPDU_NOT_DONE;
- }
-
- rxcb = ATH12K_SKB_RXCB(msdu);
- dma_unmap_single(ab->dev, rxcb->paddr,
- msdu->len + skb_tailroom(msdu),
- DMA_FROM_DEVICE);
-
- if (!mon_mpdu->head)
- mon_mpdu->head = msdu;
- else if (mon_mpdu->tail)
- mon_mpdu->tail->next = msdu;
-
- mon_mpdu->tail = msdu;
-
- ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
- status = DP_MON_TX_BUFFER_ADDR;
- break;
- }
-
case HAL_TX_MPDU_END:
list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list,
&tx_ppdu_info->dp_tx_mon_mpdu_list);
@@ -2088,8 +2051,7 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
buf_ring = &dp->rxdma_mon_buf_ring;
} else {
- mon_dst_ring = &pdev_dp->tx_mon_dst_ring[srng_id];
- buf_ring = &dp->tx_mon_buf_ring;
+ return 0;
}
srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c
index ca76c018dd0c..14236d0a0c89 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.c
@@ -17,6 +17,7 @@
#include "dp_tx.h"
#include "peer.h"
#include "dp_mon.h"
+#include "debugfs_htt_stats.h"
#define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
@@ -239,31 +240,61 @@ static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
}
-static int ath12k_dp_purge_mon_ring(struct ath12k_base *ab)
+static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list)
{
- int i, reaped = 0;
- unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
+ struct sk_buff *skb;
- do {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
- reaped += ath12k_dp_mon_process_ring(ab, i, NULL,
- DP_MON_SERVICE_BUDGET,
- ATH12K_DP_RX_MONITOR_MODE);
+ while ((skb = __skb_dequeue(skb_list)))
+ dev_kfree_skb_any(skb);
+}
+
+static size_t ath12k_dp_list_cut_nodes(struct list_head *list,
+ struct list_head *head,
+ size_t count)
+{
+ struct list_head *cur;
+ struct ath12k_rx_desc_info *rx_desc;
+ size_t nodes = 0;
- /* nothing more to reap */
- if (reaped < DP_MON_SERVICE_BUDGET)
- return 0;
+ if (!count) {
+ INIT_LIST_HEAD(list);
+ goto out;
+ }
- } while (time_before(jiffies, timeout));
+ list_for_each(cur, head) {
+ if (!count)
+ break;
- ath12k_warn(ab, "dp mon ring purge timeout");
+ rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list);
+ rx_desc->in_use = true;
- return -ETIMEDOUT;
+ count--;
+ nodes++;
+ }
+
+ list_cut_before(list, head, cur);
+out:
+ return nodes;
+}
+
+static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp,
+ struct list_head *used_list)
+{
+ struct ath12k_rx_desc_info *rx_desc, *safe;
+
+ /* Reset the use flag */
+ list_for_each_entry_safe(rx_desc, safe, used_list, list)
+ rx_desc->in_use = false;
+
+ spin_lock_bh(&dp->rx_desc_lock);
+ list_splice_tail(used_list, &dp->rx_desc_free_list);
+ spin_unlock_bh(&dp->rx_desc_lock);
}
/* Returns number of Rx buffers replenished */
int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring,
+ struct list_head *used_list,
int req_entries)
{
struct ath12k_buffer_addr *desc;
@@ -292,6 +323,19 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
req_entries = min(num_free, req_entries);
num_remain = req_entries;
+ if (!num_remain)
+ goto out;
+
+ /* Get the descriptor from free list */
+ if (list_empty(used_list)) {
+ spin_lock_bh(&dp->rx_desc_lock);
+ req_entries = ath12k_dp_list_cut_nodes(used_list,
+ &dp->rx_desc_free_list,
+ num_remain);
+ spin_unlock_bh(&dp->rx_desc_lock);
+ num_remain = req_entries;
+ }
+
while (num_remain > 0) {
skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
DP_RX_BUFFER_ALIGN_SIZE);
@@ -311,33 +355,20 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
if (dma_mapping_error(ab->dev, paddr))
goto fail_free_skb;
- spin_lock_bh(&dp->rx_desc_lock);
-
- /* Get desc from free list and store in used list
- * for cleanup purposes
- *
- * TODO: pass the removed descs rather than
- * add/read to optimize
- */
- rx_desc = list_first_entry_or_null(&dp->rx_desc_free_list,
+ rx_desc = list_first_entry_or_null(used_list,
struct ath12k_rx_desc_info,
list);
- if (!rx_desc) {
- spin_unlock_bh(&dp->rx_desc_lock);
+ if (!rx_desc)
goto fail_dma_unmap;
- }
rx_desc->skb = skb;
cookie = rx_desc->cookie;
- list_del(&rx_desc->list);
- list_add_tail(&rx_desc->list, &dp->rx_desc_used_list);
-
- spin_unlock_bh(&dp->rx_desc_lock);
desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
if (!desc)
- goto fail_buf_unassign;
+ goto fail_dma_unmap;
+ list_del(&rx_desc->list);
ATH12K_SKB_RXCB(skb)->paddr = paddr;
num_remain--;
@@ -345,26 +376,19 @@ int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
}
- ath12k_hal_srng_access_end(ab, srng);
-
- spin_unlock_bh(&srng->lock);
-
- return req_entries - num_remain;
+ goto out;
-fail_buf_unassign:
- spin_lock_bh(&dp->rx_desc_lock);
- list_del(&rx_desc->list);
- list_add_tail(&rx_desc->list, &dp->rx_desc_free_list);
- rx_desc->skb = NULL;
- spin_unlock_bh(&dp->rx_desc_lock);
fail_dma_unmap:
dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
fail_free_skb:
dev_kfree_skb_any(skb);
-
+out:
ath12k_hal_srng_access_end(ab, srng);
+ if (!list_empty(used_list))
+ ath12k_dp_rx_enqueue_free(dp, used_list);
+
spin_unlock_bh(&srng->lock);
return req_entries - num_remain;
@@ -399,8 +423,6 @@ static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
- ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->tx_mon_buf_ring);
-
return 0;
}
@@ -422,13 +444,12 @@ static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring)
{
- int num_entries;
+ LIST_HEAD(list);
- num_entries = rx_ring->refill_buf_ring.size /
- ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
+ rx_ring->bufs_max = rx_ring->refill_buf_ring.size /
+ ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
- rx_ring->bufs_max = num_entries;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_entries);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
return 0;
}
@@ -454,15 +475,6 @@ static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
"failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
-
- ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
- &dp->tx_mon_buf_ring,
- HAL_TX_MONITOR_BUF);
- if (ret) {
- ath12k_warn(ab,
- "failed to setup HAL_TX_MONITOR_BUF\n");
- return ret;
- }
}
return 0;
@@ -474,10 +486,8 @@ static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
struct ath12k_base *ab = ar->ab;
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
- ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]);
- }
}
void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
@@ -521,7 +531,7 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
int ret;
u32 mac_id = dp->mac_id;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ret = ath12k_dp_srng_setup(ar->ab,
&dp->rxdma_mon_dst_ring[i],
HAL_RXDMA_MONITOR_DST,
@@ -532,17 +542,6 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
"failed to setup HAL_RXDMA_MONITOR_DST\n");
return ret;
}
-
- ret = ath12k_dp_srng_setup(ar->ab,
- &dp->tx_mon_dst_ring[i],
- HAL_TX_MONITOR_DST,
- 0, mac_id + i,
- DP_TX_MONITOR_DEST_RING_SIZE);
- if (ret) {
- ath12k_warn(ar->ab,
- "failed to setup HAL_TX_MONITOR_DST\n");
- return ret;
- }
}
return 0;
@@ -1270,10 +1269,10 @@ static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
return 0;
}
-static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
- int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
- const void *ptr, void *data),
- void *data)
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data)
{
const struct htt_tlv *tlv;
const void *begin = ptr;
@@ -1743,6 +1742,7 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
ath12k_htt_pull_ppdu_stats(ab, skb);
break;
case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
+ ath12k_debugfs_htt_ext_stats_handler(ab, skb);
break;
case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
ath12k_htt_mlo_offset_event_handler(ab, skb);
@@ -2361,8 +2361,10 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
channel_num = meta_data;
center_freq = meta_data >> 16;
- if (center_freq >= 5935 && center_freq <= 7105) {
+ if (center_freq >= ATH12K_MIN_6G_FREQ &&
+ center_freq <= ATH12K_MAX_6G_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
+ rx_status->freq = center_freq;
} else if (channel_num >= 1 && channel_num <= 14) {
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
@@ -2380,8 +2382,9 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
rx_desc, sizeof(*rx_desc));
}
- rx_status->freq = ieee80211_channel_to_frequency(channel_num,
- rx_status->band);
+ if (rx_status->band != NL80211_BAND_6GHZ)
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
}
@@ -2423,7 +2426,7 @@ static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *nap
spin_unlock_bh(&ab->base_lock);
ath12k_dbg(ab, ATH12K_DBG_DATA,
- "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
peer ? peer->addr : NULL,
@@ -2582,9 +2585,33 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
rcu_read_unlock();
}
+static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab,
+ enum ath12k_peer_metadata_version ver,
+ __le32 peer_metadata)
+{
+ switch (ver) {
+ default:
+ ath12k_warn(ab, "Unknown peer metadata version: %d", ver);
+ fallthrough;
+ case ATH12K_PEER_METADATA_V0:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V0_PEER_ID);
+ case ATH12K_PEER_METADATA_V1:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1_PEER_ID);
+ case ATH12K_PEER_METADATA_V1A:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1A_PEER_ID);
+ case ATH12K_PEER_METADATA_V1B:
+ return le32_get_bits(peer_metadata,
+ RX_MPDU_DESC_META_DATA_V1B_PEER_ID);
+ }
+}
+
int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
struct napi_struct *napi, int budget)
{
+ LIST_HEAD(rx_desc_used_list);
struct ath12k_rx_desc_info *desc_info;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
@@ -2609,6 +2636,8 @@ try_again:
ath12k_hal_srng_access_begin(ab, srng);
while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ struct rx_mpdu_desc *mpdu_info;
+ struct rx_msdu_desc *msdu_info;
enum hal_reo_dest_ring_push_reason push_reason;
u32 cookie;
@@ -2626,7 +2655,8 @@ try_again:
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
+ cookie);
continue;
}
}
@@ -2637,9 +2667,7 @@ try_again:
msdu = desc_info->skb;
desc_info->skb = NULL;
- spin_lock_bh(&dp->rx_desc_lock);
- list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
- spin_unlock_bh(&dp->rx_desc_lock);
+ list_add_tail(&desc_info->list, &rx_desc_used_list);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ab->dev, rxcb->paddr,
@@ -2657,16 +2685,19 @@ try_again:
continue;
}
- rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ msdu_info = &desc->rx_msdu_info;
+ mpdu_info = &desc->rx_mpdu_info;
+
+ rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
- rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
- rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) &
+ rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
rxcb->mac_id = mac_id;
- rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data,
- RX_MPDU_DESC_META_DATA_PEER_ID);
- rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0,
+ rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver,
+ mpdu_info->peer_meta_data);
+ rxcb->tid = le32_get_bits(mpdu_info->info0,
RX_MPDU_DESC_INFO0_TID);
__skb_queue_tail(&msdu_list, msdu);
@@ -2700,7 +2731,8 @@ try_again:
if (!total_msdu_reaped)
goto exit;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
+ num_buffs_reaped);
ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
ring_id);
@@ -2740,6 +2772,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev
peer = ath12k_peer_find(ab, vdev_id, peer_mac);
if (!peer) {
spin_unlock_bh(&ab->base_lock);
+ crypto_free_shash(tfm);
ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
return -ENOENT;
}
@@ -2969,9 +3002,10 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
struct hal_srng *srng;
dma_addr_t link_paddr, buf_paddr;
u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
- u32 cookie, hal_rx_desc_sz, dest_ring_info0;
+ u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi;
int ret;
struct ath12k_rx_desc_info *desc_info;
+ enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm;
u8 dst_ind;
hal_rx_desc_sz = ab->hal.hal_desc_sz;
@@ -3005,7 +3039,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, buf_paddr))
return -ENOMEM;
@@ -3021,9 +3055,9 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
}
desc_info->skb = defrag_skb;
+ desc_info->in_use = true;
list_del(&desc_info->list);
- list_add_tail(&desc_info->list, &dp->rx_desc_used_list);
spin_unlock_bh(&dp->rx_desc_lock);
ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
@@ -3049,7 +3083,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
cookie,
- HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST);
+ idle_link_rbm);
mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
@@ -3061,13 +3095,11 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
reo_ent_ring->rx_mpdu_info.peer_meta_data =
reo_dest_ring->rx_mpdu_info.peer_meta_data;
- /* Firmware expects physical address to be filled in queue_addr_lo in
- * the MLO scenario and in case of non MLO peer meta data needs to be
- * filled.
- * TODO: Need to handle for MLO scenario.
- */
- reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
- reo_ent_ring->info0 = le32_encode_bits(dst_ind,
+ reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr));
+ queue_addr_hi = upper_32_bits(rx_tid->paddr);
+ reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
+ HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
+ le32_encode_bits(dst_ind,
HAL_REO_ENTR_RING_INFO0_DEST_IND);
reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
@@ -3085,13 +3117,13 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
err_free_desc:
spin_lock_bh(&dp->rx_desc_lock);
- list_del(&desc_info->list);
- list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
+ desc_info->in_use = false;
desc_info->skb = NULL;
+ list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
spin_unlock_bh(&dp->rx_desc_lock);
err_unmap_dma:
dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
return ret;
}
@@ -3304,6 +3336,7 @@ out_unlock:
static int
ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
+ struct list_head *used_list,
bool drop, u32 cookie)
{
struct ath12k_base *ab = ar->ab;
@@ -3323,7 +3356,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
+ cookie);
return -EINVAL;
}
}
@@ -3333,9 +3367,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
msdu = desc_info->skb;
desc_info->skb = NULL;
- spin_lock_bh(&ab->dp.rx_desc_lock);
- list_move_tail(&desc_info->list, &ab->dp.rx_desc_free_list);
- spin_unlock_bh(&ab->dp.rx_desc_lock);
+
+ list_add_tail(&desc_info->list, used_list);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ar->ab->dev, rxcb->paddr,
@@ -3391,6 +3424,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
struct hal_reo_dest_ring *reo_desc;
struct dp_rxdma_ring *rx_ring;
struct dp_srng *reo_except;
+ LIST_HEAD(rx_desc_used_list);
u32 desc_bank, num_msdus;
struct hal_srng *srng;
struct ath12k_dp *dp;
@@ -3398,7 +3432,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
struct ath12k *ar;
dma_addr_t paddr;
bool is_frag;
- bool drop = false;
+ bool drop;
int pdev_id;
tot_n_bufs_reaped = 0;
@@ -3416,7 +3450,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
while (budget &&
(reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
+ drop = false;
ab->soc_stats.err_ring_pkts++;
+
ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
&desc_bank);
if (ret) {
@@ -3428,7 +3464,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
(paddr - link_desc_banks[desc_bank].paddr);
ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
- if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST &&
+ if (rbm != dp->idle_link_rbm &&
rbm != HAL_RX_BUF_RBM_SW3_BM &&
rbm != ab->hw_params->hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
@@ -3458,7 +3494,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
ar = ab->pdevs[pdev_id].ar;
- if (!ath12k_dp_process_rx_err_buf(ar, reo_desc, drop,
+ if (!ath12k_dp_process_rx_err_buf(ar, reo_desc,
+ &rx_desc_used_list,
+ drop,
msdu_cookies[i]))
tot_n_bufs_reaped++;
}
@@ -3478,7 +3516,8 @@ exit:
rx_ring = &dp->rx_refill_buf_ring;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, tot_n_bufs_reaped);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
+ tot_n_bufs_reaped);
return tot_n_bufs_reaped;
}
@@ -3695,25 +3734,27 @@ static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
struct napi_struct *napi, int budget)
{
+ LIST_HEAD(rx_desc_used_list);
struct ath12k *ar;
struct ath12k_dp *dp = &ab->dp;
struct dp_rxdma_ring *rx_ring;
struct hal_rx_wbm_rel_info err_info;
struct hal_srng *srng;
struct sk_buff *msdu;
- struct sk_buff_head msdu_list;
+ struct sk_buff_head msdu_list, scatter_msdu_list;
struct ath12k_skb_rxcb *rxcb;
void *rx_desc;
u8 mac_id;
int num_buffs_reaped = 0;
struct ath12k_rx_desc_info *desc_info;
int ret, pdev_id;
+ struct hal_rx_desc *msdu_data;
__skb_queue_head_init(&msdu_list);
+ __skb_queue_head_init(&scatter_msdu_list);
srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
rx_ring = &dp->rx_refill_buf_ring;
-
spin_lock_bh(&srng->lock);
ath12k_hal_srng_access_begin(ab, srng);
@@ -3737,7 +3778,8 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!desc_info) {
desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
if (!desc_info) {
- ath12k_warn(ab, "Invalid cookie in manual desc retrieval");
+ ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
+ err_info.cookie);
continue;
}
}
@@ -3748,9 +3790,7 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
msdu = desc_info->skb;
desc_info->skb = NULL;
- spin_lock_bh(&dp->rx_desc_lock);
- list_move_tail(&desc_info->list, &dp->rx_desc_free_list);
- spin_unlock_bh(&dp->rx_desc_lock);
+ list_add_tail(&desc_info->list, &rx_desc_used_list);
rxcb = ATH12K_SKB_RXCB(msdu);
dma_unmap_single(ab->dev, rxcb->paddr,
@@ -3768,17 +3808,53 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
continue;
}
+ msdu_data = (struct hal_rx_desc *)msdu->data;
rxcb->err_rel_src = err_info.err_rel_src;
rxcb->err_code = err_info.err_code;
- rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
-
- __skb_queue_tail(&msdu_list, msdu);
-
rxcb->is_first_msdu = err_info.first_msdu;
rxcb->is_last_msdu = err_info.last_msdu;
rxcb->is_continuation = err_info.continuation;
+ rxcb->rx_desc = msdu_data;
+
+ if (err_info.continuation) {
+ __skb_queue_tail(&scatter_msdu_list, msdu);
+ continue;
+ }
+
+ mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
+ msdu_data);
+ if (mac_id >= MAX_RADIOS) {
+ dev_kfree_skb_any(msdu);
+
+ /* In any case continuation bit is set
+ * in the previous record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+ continue;
+ }
+
+ if (!skb_queue_empty(&scatter_msdu_list)) {
+ struct sk_buff *msdu;
+
+ skb_queue_walk(&scatter_msdu_list, msdu) {
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->mac_id = mac_id;
+ }
+
+ skb_queue_splice_tail_init(&scatter_msdu_list,
+ &msdu_list);
+ }
+
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ rxcb->mac_id = mac_id;
+ __skb_queue_tail(&msdu_list, msdu);
}
+ /* In any case continuation bit is set in the
+ * last record, cleanup scatter_msdu_list
+ */
+ ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+
ath12k_hal_srng_access_end(ab, srng);
spin_unlock_bh(&srng->lock);
@@ -3786,12 +3862,14 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
if (!num_buffs_reaped)
goto done;
- ath12k_dp_rx_bufs_replenish(ab, rx_ring, num_buffs_reaped);
+ ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
+ num_buffs_reaped);
rcu_read_lock();
while ((msdu = __skb_dequeue(&msdu_list))) {
- mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
- (struct hal_rx_desc *)msdu->data);
+ rxcb = ATH12K_SKB_RXCB(msdu);
+ mac_id = rxcb->mac_id;
+
pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
ar = ab->pdevs[pdev_id].ar;
@@ -3897,7 +3975,7 @@ void ath12k_dp_rx_free(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
if (ab->hw_params->rx_mac_buf_ring)
ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
}
@@ -3906,7 +3984,6 @@ void ath12k_dp_rx_free(struct ath12k_base *ab)
ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
- ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring);
ath12k_dp_rxdma_buf_free(ab);
}
@@ -3964,7 +4041,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
struct ath12k_dp *dp = &ab->dp;
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 ring_id;
- int ret;
+ int ret = 0;
u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
int i;
@@ -3990,7 +4067,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
* and modify the rx_desc struct
*/
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
HAL_RXDMA_BUF,
@@ -4017,7 +4094,7 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
}
if (ab->hw_params->rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rx_mac_buf_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
i, HAL_RXDMA_BUF);
@@ -4049,15 +4126,6 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
ret);
return ret;
}
-
- ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
- ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
- 0, HAL_TX_MONITOR_BUF);
- if (ret) {
- ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
- ret);
- return ret;
- }
}
ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
@@ -4077,9 +4145,6 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
- idr_init(&dp->tx_mon_buf_ring.bufs_idr);
- spin_lock_init(&dp->tx_mon_buf_ring.idr_lock);
-
ret = ath12k_dp_srng_setup(ab,
&dp->rx_refill_buf_ring.refill_buf_ring,
HAL_RXDMA_BUF, 0, 0,
@@ -4090,7 +4155,7 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
}
if (ab->hw_params->rx_mac_buf_ring) {
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ret = ath12k_dp_srng_setup(ab,
&dp->rx_mac_buf_ring[i],
HAL_RXDMA_BUF, 1,
@@ -4122,15 +4187,6 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab)
ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
return ret;
}
-
- ret = ath12k_dp_srng_setup(ab,
- &dp->tx_mon_buf_ring.refill_buf_ring,
- HAL_TX_MONITOR_BUF, 0, 0,
- DP_TX_MONITOR_BUF_RING_SIZE);
- if (ret) {
- ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n");
- return ret;
- }
}
ret = ath12k_dp_rxdma_buf_setup(ab);
@@ -4159,7 +4215,7 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
return ret;
}
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
mac_id + i,
@@ -4170,17 +4226,6 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
i, ret);
return ret;
}
-
- ring_id = dp->tx_mon_dst_ring[i].ring_id;
- ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
- mac_id + i,
- HAL_TX_MONITOR_DST);
- if (ret) {
- ath12k_warn(ab,
- "failed to configure tx_mon_dst_ring %d %d\n",
- i, ret);
- return ret;
- }
}
out:
return 0;
@@ -4224,29 +4269,3 @@ int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
return 0;
}
-
-int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab)
-{
- /* start reap timer */
- mod_timer(&ab->mon_reap_timer,
- jiffies + msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
-
- return 0;
-}
-
-int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer)
-{
- int ret;
-
- if (stop_timer)
- del_timer_sync(&ab->mon_reap_timer);
-
- /* reap all the monitor related rings */
- ret = ath12k_dp_purge_mon_ring(ab);
- if (ret) {
- ath12k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h
index 05b3d5581dbe..eb1f92559179 100644
--- a/drivers/net/wireless/ath/ath12k/dp_rx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_rx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_RX_H
#define ATH12K_DP_RX_H
@@ -118,12 +118,11 @@ int ath12k_dp_rx_process(struct ath12k_base *ab, int mac_id,
int budget);
int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
struct dp_rxdma_ring *rx_ring,
+ struct list_head *used_list,
int req_entries);
int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar);
int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id);
-int ath12k_dp_rx_pktlog_start(struct ath12k_base *ab);
-int ath12k_dp_rx_pktlog_stop(struct ath12k_base *ab, bool stop_timer);
u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
struct hal_rx_desc *desc);
struct ath12k_peer *
@@ -140,4 +139,8 @@ ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
+int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
+ int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
+ const void *ptr, void *data),
+ void *data);
#endif /* ATH12K_DP_RX_H */
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c
index 572b87153647..44406e0b4a34 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.c
@@ -124,6 +124,98 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
}
+#define HTT_META_DATA_ALIGNMENT 0x8
+
+static void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len)
+{
+ struct sk_buff *tail;
+ void *metadata;
+
+ if (unlikely(skb_cow_data(skb, tail_len, &tail) < 0))
+ return NULL;
+
+ metadata = pskb_put(skb, tail, tail_len);
+ memset(metadata, 0, tail_len);
+ return metadata;
+}
+
+/* Preparing HTT Metadata when utilized with ext MSDU */
+static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb)
+{
+ struct hal_tx_msdu_metadata *desc_ext;
+ u8 htt_desc_size;
+ /* Size rounded of multiple of 8 bytes */
+ u8 htt_desc_size_aligned;
+
+ htt_desc_size = sizeof(struct hal_tx_msdu_metadata);
+ htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT);
+
+ desc_ext = ath12k_dp_metadata_align_skb(skb, htt_desc_size_aligned);
+ if (!desc_ext)
+ return -ENOMEM;
+
+ desc_ext->info0 = le32_encode_bits(1, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG) |
+ le32_encode_bits(0, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE) |
+ le32_encode_bits(1,
+ HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL);
+
+ return 0;
+}
+
+static void ath12k_dp_tx_move_payload(struct sk_buff *skb,
+ unsigned long delta,
+ bool head)
+{
+ unsigned long len = skb->len;
+
+ if (head) {
+ skb_push(skb, delta);
+ memmove(skb->data, skb->data + delta, len);
+ skb_trim(skb, len);
+ } else {
+ skb_put(skb, delta);
+ memmove(skb->data + delta, skb->data, len);
+ skb_pull(skb, delta);
+ }
+}
+
+static int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
+ struct sk_buff **pskb)
+{
+ u32 iova_mask = ab->hw_params->iova_mask;
+ unsigned long offset, delta1, delta2;
+ struct sk_buff *skb2, *skb = *pskb;
+ unsigned int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ int ret = 0;
+
+ offset = (unsigned long)skb->data & iova_mask;
+ delta1 = offset;
+ delta2 = iova_mask - offset + 1;
+
+ if (headroom >= delta1) {
+ ath12k_dp_tx_move_payload(skb, delta1, true);
+ } else if (tailroom >= delta2) {
+ ath12k_dp_tx_move_payload(skb, delta2, false);
+ } else {
+ skb2 = skb_realloc_headroom(skb, iova_mask);
+ if (!skb2) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dev_kfree_skb_any(skb);
+
+ offset = (unsigned long)skb2->data & iova_mask;
+ if (offset)
+ ath12k_dp_tx_move_payload(skb2, offset, true);
+ *pskb = skb2;
+ }
+
+out:
+ return ret;
+}
+
int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
struct sk_buff *skb)
{
@@ -145,6 +237,8 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
u8 ring_selector, ring_map = 0;
bool tcl_ring_retry;
bool msdu_ext_desc = false;
+ bool add_htt_metadata = false;
+ u32 iova_mask = ab->hw_params->iova_mask;
if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
return -ESHUTDOWN;
@@ -240,6 +334,23 @@ tcl_ring_sel:
goto fail_remove_tx_buf;
}
+ if (iova_mask &&
+ (unsigned long)skb->data & iova_mask) {
+ ret = ath12k_dp_tx_align_payload(ab, &skb);
+ if (ret) {
+ ath12k_warn(ab, "failed to align TX buffer %d\n", ret);
+ /* don't bail out, give original buffer
+ * a chance even unaligned.
+ */
+ goto map;
+ }
+
+ /* hdr is pointing to a wrong place after alignment,
+ * so refresh it for later use.
+ */
+ hdr = (void *)skb->data;
+ }
+map:
ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, ti.paddr)) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
@@ -248,6 +359,18 @@ tcl_ring_sel:
goto fail_remove_tx_buf;
}
+ if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+ !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+ !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+ ieee80211_has_protected(hdr->frame_control)) {
+ /* Add metadata for sw encrypted vlan group traffic */
+ add_htt_metadata = true;
+ msdu_ext_desc = true;
+ ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
+ ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
+ ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+ }
+
tx_desc->skb = skb;
tx_desc->mac_id = ar->pdev_idx;
ti.desc_id = tx_desc->desc_id;
@@ -269,6 +392,15 @@ tcl_ring_sel:
msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
+ if (add_htt_metadata) {
+ ret = ath12k_dp_prepare_htt_metadata(skb_ext_desc);
+ if (ret < 0) {
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+ "Failed to add HTT meta data, dropping packet\n");
+ goto fail_unmap_dma;
+ }
+ }
+
ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
skb_ext_desc->len, DMA_TO_DEVICE);
ret = dma_mapping_error(ab->dev, ti.paddr);
@@ -352,15 +484,15 @@ static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
skb_cb = ATH12K_SKB_CB(msdu);
+ ar = ab->pdevs[pdev_id].ar;
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
if (skb_cb->paddr_ext_desc)
dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ar->ah->hw, msdu);
- ar = ab->pdevs[pdev_id].ar;
if (atomic_dec_and_test(&ar->dp.num_tx_pending))
wake_up(&ar->dp.tx_empty_waitq);
}
@@ -393,8 +525,12 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
if (ts->acked) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
+ info->status.ack_signal = ts->ack_rssi;
+
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
+
info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
} else {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
@@ -414,7 +550,7 @@ ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
struct ath12k_dp_htt_wbm_tx_status ts = {0};
enum hal_wbm_htt_tx_comp_status wbm_status;
- status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
+ status_desc = desc;
wbm_status = le32_get_bits(status_desc->info0,
HTT_TX_WBM_COMP_INFO0_STATUS);
@@ -448,6 +584,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
struct hal_tx_status *ts)
{
struct ath12k_base *ab = ar->ab;
+ struct ath12k_hw *ah = ar->ah;
struct ieee80211_tx_info *info;
struct ath12k_skb_cb *skb_cb;
@@ -466,12 +603,12 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
rcu_read_lock();
if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ah->hw, msdu);
goto exit;
}
if (!skb_cb->vif) {
- dev_kfree_skb_any(msdu);
+ ieee80211_free_txskb(ah->hw, msdu);
goto exit;
}
@@ -481,17 +618,39 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
/* skip tx rate update from ieee80211_status*/
info->status.rates[0].idx = -1;
- if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
- !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
- ts->ack_rssi;
- info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
- }
+ switch (ts->status) {
+ case HAL_WBM_TQM_REL_REASON_FRAME_ACKED:
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->status.ack_signal = ts->ack_rssi;
- if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
- (info->flags & IEEE80211_TX_CTL_NO_ACK))
- info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+ ab->wmi_ab.svc_map))
+ info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR;
+
+ info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
+ }
+ break;
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX:
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ break;
+ }
+ fallthrough;
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU:
+ case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD:
+ case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES:
+ /* The failure status is due to internal firmware tx failure
+ * hence drop the frame; do not update the status of frame to
+ * the upper layer
+ */
+ ieee80211_free_txskb(ah->hw, msdu);
+ goto exit;
+ default:
+ ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n",
+ ts->status);
+ break;
+ }
/* NOTE: Tx rate status reporting. Tx completion status does not have
* necessary information (for example nss) to build the tx rate.
@@ -669,14 +828,6 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
*htt_ring_type = HTT_SW_TO_HW_RING;
break;
- case HAL_TX_MONITOR_BUF:
- *htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
- *htt_ring_type = HTT_SW_TO_HW_RING;
- break;
- case HAL_TX_MONITOR_DST:
- *htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
- *htt_ring_type = HTT_HW_TO_SW_RING;
- break;
default:
ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
ret = -EINVAL;
@@ -854,7 +1005,7 @@ int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
int ret;
int i;
- for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
+ for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
return -ENOMEM;
@@ -1007,6 +1158,7 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
struct htt_ext_stats_cfg_cmd *cmd;
int len = sizeof(*cmd);
int ret;
+ u32 pdev_id;
skb = ath12k_htc_alloc_skb(ab, len);
if (!skb)
@@ -1018,7 +1170,8 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
memset(cmd, 0, sizeof(*cmd));
cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
- cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
+ pdev_id = ath12k_mac_get_target_pdev_id(ar);
+ cmd->hdr.pdev_mask = 1 << pdev_id;
cmd->hdr.stats_type = type;
cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
@@ -1044,13 +1197,7 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
struct ath12k_base *ab = ar->ab;
int ret;
- ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
- if (ret) {
- ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret);
- return ret;
- }
-
- ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
+ ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset);
if (ret) {
ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
return ret;
@@ -1209,31 +1356,3 @@ err_free:
dev_kfree_skb_any(skb);
return ret;
}
-
-int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
-{
- struct ath12k_base *ab = ar->ab;
- struct ath12k_dp *dp = &ab->dp;
- struct htt_tx_ring_tlv_filter tlv_filter = {0};
- int ret, ring_id;
-
- ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
-
- /* TODO: Need to set upstream/downstream tlv filters
- * here
- */
-
- if (ab->hw_params->rxdma1_enable) {
- ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0,
- HAL_TX_MONITOR_BUF,
- DP_RXDMA_REFILL_RING_SIZE,
- &tlv_filter);
- if (ret) {
- ath12k_err(ab,
- "failed to setup filter for monitor buf %d\n", ret);
- return ret;
- }
- }
-
- return 0;
-}
diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h
index 436d77e5e9ee..55ff8cc721e3 100644
--- a/drivers/net/wireless/ath/ath12k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath12k/dp_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_DP_TX_H
@@ -12,7 +12,7 @@
struct ath12k_dp_htt_wbm_tx_status {
bool acked;
- int ack_rssi;
+ s8 ack_rssi;
};
int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
@@ -36,6 +36,5 @@ int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
int mac_id, enum hal_ring_type ring_type,
int tx_buf_size,
struct htt_tx_ring_tlv_filter *htt_tlv_filter);
-int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset);
int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset);
#endif
diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c
index 78310da8cfe8..ca04bfae8bdc 100644
--- a/drivers/net/wireless/ath/ath12k/hal.c
+++ b/drivers/net/wireless/ath/ath12k/hal.c
@@ -1969,14 +1969,15 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc
}
void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
- dma_addr_t paddr)
+ dma_addr_t paddr,
+ enum hal_rx_buf_return_buf_manager rbm)
{
desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK),
BUFFER_ADDR_INFO0_ADDR);
desc->buf_addr_info.info1 =
le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT),
BUFFER_ADDR_INFO1_ADDR) |
- le32_encode_bits(1, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
+ le32_encode_bits(rbm, BUFFER_ADDR_INFO1_RET_BUF_MGR) |
le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE);
}
diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h
index 107927d64bbb..8a78bb9a10bc 100644
--- a/drivers/net/wireless/ath/ath12k/hal.h
+++ b/drivers/net/wireless/ath/ath12k/hal.h
@@ -767,15 +767,15 @@ struct hal_srng_config {
};
/**
- * enum hal_rx_buf_return_buf_manager
+ * enum hal_rx_buf_return_buf_manager - manager for returned rx buffers
*
* @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list
- * @HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST: Descriptor returned to WBM idle
- * descriptor list, where the chip 0 WBM is chosen in case of a multi-chip config
- * @HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST: Descriptor returned to WBM idle
- * descriptor list, where the chip 1 WBM is chosen in case of a multi-chip config
- * @HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST: Descriptor returned to WBM idle
- * descriptor list, where the chip 2 WBM is chosen in case of a multi-chip config
+ * @HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the device 0 WBM is chosen in case of a multi-device config
+ * @HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the device 1 WBM is chosen in case of a multi-device config
+ * @HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST: Descriptor returned to WBM idle
+ * descriptor list, where the device 2 WBM is chosen in case of a multi-device config
* @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW
* @HAL_RX_BUF_RBM_SW0_BM: For ring 0 -- returned to host
* @HAL_RX_BUF_RBM_SW1_BM: For ring 1 -- returned to host
@@ -788,9 +788,9 @@ struct hal_srng_config {
enum hal_rx_buf_return_buf_manager {
HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST,
- HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST,
- HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST,
- HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST,
+ HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST,
HAL_RX_BUF_RBM_FW_BM,
HAL_RX_BUF_RBM_SW0_BM,
HAL_RX_BUF_RBM_SW1_BM,
@@ -1113,7 +1113,8 @@ dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab,
dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
struct hal_srng *srng);
void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
- dma_addr_t paddr);
+ dma_addr_t paddr,
+ enum hal_rx_buf_return_buf_manager rbm);
u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type);
void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr,
u32 len, u32 id, u8 byte_swap_data);
diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h
index 63340256d3f6..739f73370015 100644
--- a/drivers/net/wireless/ath/ath12k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath12k/hal_desc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
@@ -597,8 +597,30 @@ struct hal_tlv_64_hdr {
#define RX_MPDU_DESC_INFO0_MPDU_QOS_CTRL_VALID BIT(27)
#define RX_MPDU_DESC_INFO0_TID GENMASK(31, 28)
-/* TODO revisit after meta data is concluded */
-#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0)
+/* Peer Metadata classification */
+
+/* Version 0 */
+#define RX_MPDU_DESC_META_DATA_V0_PEER_ID GENMASK(15, 0)
+#define RX_MPDU_DESC_META_DATA_V0_VDEV_ID GENMASK(23, 16)
+
+/* Version 1 */
+#define RX_MPDU_DESC_META_DATA_V1_PEER_ID GENMASK(13, 0)
+#define RX_MPDU_DESC_META_DATA_V1_LOGICAL_LINK_ID GENMASK(15, 14)
+#define RX_MPDU_DESC_META_DATA_V1_VDEV_ID GENMASK(23, 16)
+#define RX_MPDU_DESC_META_DATA_V1_LMAC_ID GENMASK(25, 24)
+#define RX_MPDU_DESC_META_DATA_V1_DEVICE_ID GENMASK(28, 26)
+
+/* Version 1A */
+#define RX_MPDU_DESC_META_DATA_V1A_PEER_ID GENMASK(13, 0)
+#define RX_MPDU_DESC_META_DATA_V1A_VDEV_ID GENMASK(21, 14)
+#define RX_MPDU_DESC_META_DATA_V1A_LOGICAL_LINK_ID GENMASK(25, 22)
+#define RX_MPDU_DESC_META_DATA_V1A_DEVICE_ID GENMASK(28, 26)
+
+/* Version 1B */
+#define RX_MPDU_DESC_META_DATA_V1B_PEER_ID GENMASK(13, 0)
+#define RX_MPDU_DESC_META_DATA_V1B_VDEV_ID GENMASK(21, 14)
+#define RX_MPDU_DESC_META_DATA_V1B_HW_LINK_ID GENMASK(25, 22)
+#define RX_MPDU_DESC_META_DATA_V1B_DEVICE_ID GENMASK(28, 26)
struct rx_mpdu_desc {
__le32 info0; /* %RX_MPDU_DESC_INFO */
@@ -2048,6 +2070,19 @@ struct hal_wbm_release_ring {
* fw with fw_reason2.
* @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by
* fw with fw_reason3.
+ * @HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE: Remove command initiated by
+ * fw with disable queue.
+ * @HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING: Remove command initiated by
+ * fw to remove all mpdu until 1st non-match.
+ * @HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD: Dropped due to drop threshold
+ * criteria
+ * @HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL: Dropped due to link desc
+ * not available
+ * @HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU: Dropped due drop bit set or
+ * null flow
+ * @HAL_WBM_TQM_REL_REASON_MULTICAST_DROP: Dropped due mcast drop set for VDEV
+ * @HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP: Dropped due to being set with
+ * 'TCL_drop_reason'
*/
enum hal_wbm_tqm_rel_reason {
HAL_WBM_TQM_REL_REASON_FRAME_ACKED,
@@ -2058,6 +2093,13 @@ enum hal_wbm_tqm_rel_reason {
HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1,
HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2,
HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3,
+ HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE,
+ HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING,
+ HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD,
+ HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL,
+ HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU,
+ HAL_WBM_TQM_REL_REASON_MULTICAST_DROP,
+ HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP,
};
struct hal_wbm_buffer_ring {
@@ -2964,4 +3006,29 @@ struct hal_mon_dest_desc {
* updated by SRNG.
*/
+#define HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG BIT(8)
+#define HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE GENMASK(16, 15)
+#define HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL BIT(31)
+
+struct hal_tx_msdu_metadata {
+ __le32 info0;
+ __le32 rsvd0[6];
+} __packed;
+
+/* hal_tx_msdu_metadata
+ * valid_encrypt_type
+ * if set, encrypt type is valid
+ * encrypt_type
+ * 0 = NO_ENCRYPT,
+ * 1 = ENCRYPT,
+ * 2 ~ 3 - Reserved
+ * host_tx_desc_pool
+ * If set, Firmware allocates tx_descriptors
+ * in WAL_BUFFERID_TX_HOST_DATA_EXP,instead
+ * of WAL_BUFFERID_TX_TCL_DATA_EXP.
+ * Use cases:
+ * Any time firmware uses TQM-BYPASS for Data
+ * TID, firmware expect host to set this bit.
+ */
+
#endif /* ATH12K_HAL_DESC_H */
diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/hal_tx.h
index 7c837094a6f7..3cf5973771d7 100644
--- a/drivers/net/wireless/ath/ath12k/hal_tx.h
+++ b/drivers/net/wireless/ath/ath12k/hal_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HAL_TX_H
@@ -57,7 +57,7 @@ struct hal_tx_info {
struct hal_tx_status {
enum hal_wbm_rel_src_module buf_rel_source;
enum hal_wbm_tqm_rel_reason status;
- u8 ack_rssi;
+ s8 ack_rssi;
u32 flags; /* %HAL_TX_STATUS_FLAGS_ */
u32 ppdu_id;
u8 try_cnt;
diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h
index c653ca1f59b2..0e53ec269fa4 100644
--- a/drivers/net/wireless/ath/ath12k/hif.h
+++ b/drivers/net/wireless/ath/ath12k/hif.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef ATH12K_HIF_H
@@ -17,7 +17,7 @@ struct ath12k_hif_ops {
int (*start)(struct ath12k_base *ab);
void (*stop)(struct ath12k_base *ab);
int (*power_up)(struct ath12k_base *ab);
- void (*power_down)(struct ath12k_base *ab);
+ void (*power_down)(struct ath12k_base *ab, bool is_suspend);
int (*suspend)(struct ath12k_base *ab);
int (*resume)(struct ath12k_base *ab);
int (*map_service_to_pipe)(struct ath12k_base *ab, u16 service_id,
@@ -30,6 +30,7 @@ struct ath12k_hif_ops {
void (*ce_irq_enable)(struct ath12k_base *ab);
void (*ce_irq_disable)(struct ath12k_base *ab);
void (*get_ce_msi_idx)(struct ath12k_base *ab, u32 ce_id, u32 *msi_idx);
+ int (*panic_handler)(struct ath12k_base *ab);
};
static inline int ath12k_hif_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
@@ -133,12 +134,26 @@ static inline void ath12k_hif_write32(struct ath12k_base *ab, u32 address,
static inline int ath12k_hif_power_up(struct ath12k_base *ab)
{
+ if (!ab->hif.ops->power_up)
+ return -EOPNOTSUPP;
+
return ab->hif.ops->power_up(ab);
}
-static inline void ath12k_hif_power_down(struct ath12k_base *ab)
+static inline void ath12k_hif_power_down(struct ath12k_base *ab, bool is_suspend)
+{
+ if (!ab->hif.ops->power_down)
+ return;
+
+ ab->hif.ops->power_down(ab, is_suspend);
+}
+
+static inline int ath12k_hif_panic_handler(struct ath12k_base *ab)
{
- ab->hif.ops->power_down(ab);
+ if (!ab->hif.ops->panic_handler)
+ return NOTIFY_DONE;
+
+ return ab->hif.ops->panic_handler(ab);
}
#endif /* ATH12K_HIF_H */
diff --git a/drivers/net/wireless/ath/ath12k/htc.c b/drivers/net/wireless/ath/ath12k/htc.c
index 23f7428abd95..d13616bf07f4 100644
--- a/drivers/net/wireless/ath/ath12k/htc.c
+++ b/drivers/net/wireless/ath/ath12k/htc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include <linux/skbuff.h>
#include <linux/ctype.h>
@@ -244,6 +244,11 @@ static void ath12k_htc_suspend_complete(struct ath12k_base *ab, bool ack)
complete(&ab->htc_suspend);
}
+static void ath12k_htc_wakeup_from_suspend(struct ath12k_base *ab)
+{
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot wakeup from suspend is received\n");
+}
+
void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
struct sk_buff *skb)
{
@@ -349,6 +354,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
ath12k_htc_suspend_complete(ab, false);
break;
case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID:
+ ath12k_htc_wakeup_from_suspend(ab);
break;
default:
ath12k_warn(ab, "ignoring unsolicited htc ep0 event %u\n",
@@ -358,7 +364,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab,
goto out;
}
- ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
+ ath12k_dbg(ab, ATH12K_DBG_HTC, "htc rx completion ep %d skb %p\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ab, skb);
diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c
index 0b17dfd47856..7b0b6a7f4701 100644
--- a/drivers/net/wireless/ath/ath12k/hw.c
+++ b/drivers/net/wireless/ath/ath12k/hw.c
@@ -15,6 +15,10 @@
#include "mhi.h"
#include "dp_rx.h"
+static const guid_t wcn7850_uuid = GUID_INIT(0xf634f534, 0x6147, 0x11ec,
+ 0x90, 0xd6, 0x02, 0x42,
+ 0xac, 0x12, 0x00, 0x03);
+
static u8 ath12k_hw_qcn9274_mac_from_pdev_id(int pdev_idx)
{
return pdev_idx;
@@ -540,9 +544,6 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
},
.rx_mon_dest = {
0, 0, 0,
- ATH12K_RX_MON_RING_MASK_0,
- ATH12K_RX_MON_RING_MASK_1,
- ATH12K_RX_MON_RING_MASK_2,
},
.rx = {
0, 0, 0, 0,
@@ -568,16 +569,15 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = {
ATH12K_HOST2RXDMA_RING_MASK_0,
},
.tx_mon_dest = {
- ATH12K_TX_MON_RING_MASK_0,
- ATH12K_TX_MON_RING_MASK_1,
+ 0, 0, 0,
},
};
static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = {
.tx = {
ATH12K_TX_RING_MASK_0,
+ ATH12K_TX_RING_MASK_1,
ATH12K_TX_RING_MASK_2,
- ATH12K_TX_RING_MASK_4,
},
.rx_mon_dest = {
},
@@ -880,14 +880,15 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_qcn9274,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT),
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_AP_VLAN),
.supports_monitor = false,
.idle_ps = false,
@@ -920,6 +921,11 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
.supports_sta_ps = false,
+
+ .acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = true,
+
+ .iova_mask = 0,
},
{
.name = "wcn7850 hw2.0",
@@ -950,7 +956,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_wcn7850,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 2,
+ .num_rxdma_per_pdev = 2,
.num_rxdma_dst_ring = 1,
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
@@ -964,7 +970,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.idle_ps = true,
.download_calib = false,
- .supports_suspend = false,
+ .supports_suspend = true,
.tcl_ring_retry = false,
.reoq_lut_support = false,
.supports_shadow_regs = true,
@@ -993,6 +999,11 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.otp_board_id_register = 0,
.supports_sta_ps = true,
+
+ .acpi_guid = &wcn7850_uuid,
+ .supports_dynamic_smps_6ghz = false,
+
+ .iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1,
},
{
.name = "qcn9274 hw2.0",
@@ -1021,14 +1032,15 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.hal_params = &ath12k_hw_hal_params_qcn9274,
.rxdma1_enable = false,
- .num_rxmda_per_pdev = 1,
+ .num_rxdma_per_pdev = 1,
.num_rxdma_dst_ring = 0,
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_MESH_POINT),
+ BIT(NL80211_IFTYPE_MESH_POINT) |
+ BIT(NL80211_IFTYPE_AP_VLAN),
.supports_monitor = false,
.idle_ps = false,
@@ -1061,6 +1073,11 @@ static const struct ath12k_hw_params ath12k_hw_params[] = {
.otp_board_id_register = QCN9274_QFPROM_RAW_RFA_PDET_ROW13_LSB,
.supports_sta_ps = false,
+
+ .acpi_guid = NULL,
+ .supports_dynamic_smps_6ghz = true,
+
+ .iova_mask = 0,
},
};
diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h
index 87965980b938..b1d302c48326 100644
--- a/drivers/net/wireless/ath/ath12k/hw.h
+++ b/drivers/net/wireless/ath/ath12k/hw.h
@@ -8,6 +8,7 @@
#define ATH12K_HW_H
#include <linux/mhi.h>
+#include <linux/uuid.h>
#include "wmi.h"
#include "hal.h"
@@ -77,9 +78,9 @@
#define TARGET_NUM_WDS_ENTRIES 32
#define TARGET_DMA_BURST_SIZE 1
#define TARGET_RX_BATCHMODE 1
-#define TARGET_RX_PEER_METADATA_VER_V1A 2
-#define TARGET_RX_PEER_METADATA_VER_V1B 3
+#define TARGET_EMA_MAX_PROFILE_PERIOD 8
+#define ATH12K_HW_DEFAULT_QUEUE 0
#define ATH12K_HW_MAX_QUEUES 4
#define ATH12K_QUEUE_LEN 4096
@@ -95,6 +96,8 @@
#define ATH12K_M3_FILE "m3.bin"
#define ATH12K_REGDB_FILE_NAME "regdb.bin"
+#define ATH12K_PCIE_MAX_PAYLOAD_SIZE 128
+
enum ath12k_hw_rate_cck {
ATH12K_HW_RATE_CCK_LP_11M = 0,
ATH12K_HW_RATE_CCK_LP_5_5M,
@@ -172,7 +175,7 @@ struct ath12k_hw_params {
const struct ath12k_hw_hal_params *hal_params;
bool rxdma1_enable:1;
- int num_rxmda_per_pdev;
+ int num_rxdma_per_pdev;
int num_rxdma_dst_ring;
bool rx_mac_buf_ring:1;
bool vdev_start_delay:1;
@@ -211,6 +214,11 @@ struct ath12k_hw_params {
u32 otp_board_id_register;
bool supports_sta_ps;
+
+ const guid_t *acpi_guid;
+ bool supports_dynamic_smps_6ghz;
+
+ u32 iova_mask;
};
struct ath12k_hw_ops {
diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c
index 52a5fb8b03e9..ce41c8153080 100644
--- a/drivers/net/wireless/ath/ath12k/mac.c
+++ b/drivers/net/wireless/ath/ath12k/mac.c
@@ -6,6 +6,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
+
#include "mac.h"
#include "core.h"
#include "debug.h"
@@ -14,6 +15,9 @@
#include "dp_tx.h"
#include "dp_rx.h"
#include "peer.h"
+#include "debugfs.h"
+#include "hif.h"
+#include "wow.h"
#define CHAN2G(_channel, _freq, _flags) { \
.band = NL80211_BAND_2GHZ, \
@@ -90,6 +94,10 @@ static const struct ieee80211_channel ath12k_5ghz_channels[] = {
};
static const struct ieee80211_channel ath12k_6ghz_channels[] = {
+ /* Operating Class 136 */
+ CHAN6G(2, 5935, 0),
+
+ /* Operating Classes 131-135 */
CHAN6G(1, 5955, 0),
CHAN6G(5, 5975, 0),
CHAN6G(9, 5995, 0),
@@ -243,6 +251,9 @@ static const u32 ath12k_smps_map[] = {
static int ath12k_start_vdev_delay(struct ath12k *ar,
struct ath12k_vif *arvif);
+static void ath12k_mac_stop(struct ath12k *ar);
+static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif);
+static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif);
static const char *ath12k_mac_phymode_str(enum wmi_phy_mode mode)
{
@@ -530,7 +541,8 @@ static void ath12k_get_arvif_iter(void *data, u8 *mac,
struct ath12k_vif_iter *arvif_iter = data;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- if (arvif->vdev_id == arvif_iter->vdev_id)
+ if (arvif->vdev_id == arvif_iter->vdev_id &&
+ arvif->ar == arvif_iter->ar)
arvif_iter->arvif = arvif;
}
@@ -540,6 +552,7 @@ struct ath12k_vif *ath12k_mac_get_arvif(struct ath12k *ar, u32 vdev_id)
u32 flags;
arvif_iter.vdev_id = vdev_id;
+ arvif_iter.ar = ar;
flags = IEEE80211_IFACE_ITER_RESUME_ALL;
ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
@@ -613,6 +626,129 @@ struct ath12k *ath12k_mac_get_ar_by_pdev_id(struct ath12k_base *ab, u32 pdev_id)
return NULL;
}
+static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw,
+ struct ieee80211_channel *channel)
+{
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k *ar;
+ int i;
+
+ ar = ah->radio;
+
+ if (ah->num_radio == 1)
+ return ar;
+
+ for_each_ar(ah, ar, i) {
+ if (channel->center_freq >= ar->freq_low &&
+ channel->center_freq <= ar->freq_high)
+ return ar;
+ }
+ return NULL;
+}
+
+static struct ath12k *ath12k_get_ar_by_ctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ if (!ctx)
+ return NULL;
+
+ return ath12k_mac_get_ar_by_chan(hw, ctx->def.chan);
+}
+
+static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+
+ /* If there is one pdev within ah, then we return
+ * ar directly.
+ */
+ if (ah->num_radio == 1)
+ return ah->radio;
+
+ if (arvif->is_created)
+ return arvif->ar;
+
+ return NULL;
+}
+
+static struct ath12k_vif *ath12k_mac_get_vif_up(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+
+ lockdep_assert_held(&ar->conf_mutex);
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->is_up)
+ return arvif;
+ }
+
+ return NULL;
+}
+
+static bool ath12k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2)
+{
+ switch (band1) {
+ case NL80211_BAND_2GHZ:
+ if (band2 & WMI_HOST_WLAN_2G_CAP)
+ return true;
+ break;
+ case NL80211_BAND_5GHZ:
+ case NL80211_BAND_6GHZ:
+ if (band2 & WMI_HOST_WLAN_5G_CAP)
+ return true;
+ break;
+ default:
+ return false;
+ }
+
+ return false;
+}
+
+static u8 ath12k_mac_get_target_pdev_id_from_vif(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_vif *vif = arvif->vif;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 pdev_id = ab->fw_pdev[0].pdev_id;
+ int i;
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return pdev_id;
+
+ band = def.chan->band;
+
+ for (i = 0; i < ab->fw_pdev_count; i++) {
+ if (ath12k_mac_band_match(band, ab->fw_pdev[i].supported_bands))
+ return ab->fw_pdev[i].pdev_id;
+ }
+
+ return pdev_id;
+}
+
+u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+ struct ath12k_base *ab = ar->ab;
+
+ if (!ab->hw_params->single_pdev_only)
+ return ar->pdev->pdev_id;
+
+ arvif = ath12k_mac_get_vif_up(ar);
+
+ /* fw_pdev array has pdev ids derived from phy capability
+ * service ready event (pdev_and_hw_link_ids).
+ * If no vif is active, return default first index.
+ */
+ if (!arvif)
+ return ar->ab->fw_pdev[0].pdev_id;
+
+ /* If active vif is found, return the pdev id matching chandef band */
+ return ath12k_mac_get_target_pdev_id_from_vif(arvif);
+}
+
static void ath12k_pdev_caps_update(struct ath12k *ar)
{
struct ath12k_base *ab = ar->ab;
@@ -810,9 +946,12 @@ static int ath12k_mac_vdev_setup_sync(struct ath12k *ar)
static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id)
{
+ struct ath12k_wmi_vdev_up_params params = {};
int ret;
- ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ params.vdev_id = vdev_id;
+ params.bssid = ar->mac_addr;
+ ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
@@ -829,6 +968,7 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id,
{
struct ieee80211_channel *channel;
struct wmi_vdev_start_req_arg arg = {};
+ struct ath12k_wmi_vdev_up_params params = {};
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -869,7 +1009,9 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id,
return ret;
}
- ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
+ params.vdev_id = vdev_id;
+ params.bssid = ar->mac_addr;
+ ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
@@ -1169,7 +1311,7 @@ static int ath12k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
struct ath12k *ar;
int ret;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, 0);
ret = ath12k_mac_config(ar, changed);
if (ret)
@@ -1236,37 +1378,188 @@ static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
return 0;
}
+static void ath12k_mac_set_arvif_ies(struct ath12k_vif *arvif, struct sk_buff *bcn,
+ u8 bssid_index, bool *nontx_profile_found)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)bcn->data;
+ const struct element *elem, *nontx, *index, *nie;
+ const u8 *start, *tail;
+ u16 rem_len;
+ u8 i;
+
+ start = bcn->data + ieee80211_get_hdrlen_from_skb(bcn) + sizeof(mgmt->u.beacon);
+ tail = skb_tail_pointer(bcn);
+ rem_len = tail - start;
+
+ arvif->rsnie_present = false;
+ arvif->wpaie_present = false;
+
+ if (cfg80211_find_ie(WLAN_EID_RSN, start, rem_len))
+ arvif->rsnie_present = true;
+ if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA,
+ start, rem_len))
+ arvif->wpaie_present = true;
+
+ /* Return from here for the transmitted profile */
+ if (!bssid_index)
+ return;
+
+ /* Initial rsnie_present for the nontransmitted profile is set to be same as that
+ * of the transmitted profile. It will be changed if security configurations are
+ * different.
+ */
+ *nontx_profile_found = false;
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, rem_len) {
+ /* Fixed minimum MBSSID element length with at least one
+ * nontransmitted BSSID profile is 12 bytes as given below;
+ * 1 (max BSSID indicator) +
+ * 2 (Nontransmitted BSSID profile: Subelement ID + length) +
+ * 4 (Nontransmitted BSSID Capabilities: tag + length + info)
+ * 2 (Nontransmitted BSSID SSID: tag + length)
+ * 3 (Nontransmitted BSSID Index: tag + length + BSSID index
+ */
+ if (elem->datalen < 12 || elem->data[0] < 1)
+ continue; /* Max BSSID indicator must be >=1 */
+
+ for_each_element(nontx, elem->data + 1, elem->datalen - 1) {
+ start = nontx->data;
+
+ if (nontx->id != 0 || nontx->datalen < 4)
+ continue; /* Invalid nontransmitted profile */
+
+ if (nontx->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
+ nontx->data[1] != 2) {
+ continue; /* Missing nontransmitted BSS capabilities */
+ }
+
+ if (nontx->data[4] != WLAN_EID_SSID)
+ continue; /* Missing SSID for nontransmitted BSS */
+
+ index = cfg80211_find_elem(WLAN_EID_MULTI_BSSID_IDX,
+ start, nontx->datalen);
+ if (!index || index->datalen < 1 || index->data[0] == 0)
+ continue; /* Invalid MBSSID Index element */
+
+ if (index->data[0] == bssid_index) {
+ *nontx_profile_found = true;
+ if (cfg80211_find_ie(WLAN_EID_RSN,
+ nontx->data,
+ nontx->datalen)) {
+ arvif->rsnie_present = true;
+ return;
+ } else if (!arvif->rsnie_present) {
+ return; /* Both tx and nontx BSS are open */
+ }
+
+ nie = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+ nontx->data,
+ nontx->datalen);
+ if (!nie || nie->datalen < 2)
+ return; /* Invalid non-inheritance element */
+
+ for (i = 1; i < nie->datalen - 1; i++) {
+ if (nie->data[i] == WLAN_EID_RSN) {
+ arvif->rsnie_present = false;
+ break;
+ }
+ }
+
+ return;
+ }
+ }
+ }
+}
+
+static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_vif *arvif)
+{
+ struct ieee80211_bss_conf *bss_conf = &arvif->vif->bss_conf;
+ struct ath12k_wmi_bcn_tmpl_ema_arg ema_args;
+ struct ieee80211_ema_beacons *beacons;
+ struct ath12k_vif *tx_arvif;
+ bool nontx_profile_found = false;
+ int ret = 0;
+ u8 i;
+
+ tx_arvif = ath12k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
+ beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar),
+ tx_arvif->vif, 0);
+ if (!beacons || !beacons->cnt) {
+ ath12k_warn(arvif->ar->ab,
+ "failed to get ema beacon templates from mac80211\n");
+ return -EPERM;
+ }
+
+ if (tx_arvif == arvif)
+ ath12k_mac_set_arvif_ies(arvif, beacons->bcn[0].skb, 0, NULL);
+
+ for (i = 0; i < beacons->cnt; i++) {
+ if (tx_arvif != arvif && !nontx_profile_found)
+ ath12k_mac_set_arvif_ies(arvif, beacons->bcn[i].skb,
+ bss_conf->bssid_index,
+ &nontx_profile_found);
+
+ ema_args.bcn_cnt = beacons->cnt;
+ ema_args.bcn_index = i;
+ ret = ath12k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id,
+ &beacons->bcn[i].offs,
+ beacons->bcn[i].skb, &ema_args);
+ if (ret) {
+ ath12k_warn(tx_arvif->ar->ab,
+ "failed to set ema beacon template id %i error %d\n",
+ i, ret);
+ break;
+ }
+ }
+
+ if (tx_arvif != arvif && !nontx_profile_found)
+ ath12k_warn(arvif->ar->ab,
+ "nontransmitted bssid index %u not found in beacon template\n",
+ bss_conf->bssid_index);
+
+ ieee80211_beacon_free_ema_list(beacons);
+ return ret;
+}
+
static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
{
+ struct ath12k_vif *tx_arvif = arvif;
struct ath12k *ar = arvif->ar;
struct ath12k_base *ab = ar->ab;
- struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_mutable_offsets offs = {};
+ bool nontx_profile_found = false;
struct sk_buff *bcn;
- struct ieee80211_mgmt *mgmt;
- u8 *ies;
int ret;
if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
return 0;
- bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
+ if (vif->mbssid_tx_vif) {
+ tx_arvif = ath12k_vif_to_arvif(vif->mbssid_tx_vif);
+ if (tx_arvif != arvif && arvif->is_up)
+ return 0;
+
+ if (vif->bss_conf.ema_ap)
+ return ath12k_mac_setup_bcn_tmpl_ema(arvif);
+ }
+
+ bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_arvif->vif,
+ &offs, 0);
if (!bcn) {
ath12k_warn(ab, "failed to get beacon template from mac80211\n");
return -EPERM;
}
- ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn);
- ies += sizeof(mgmt->u.beacon);
-
- if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies)))
- arvif->rsnie_present = true;
-
- if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
- WLAN_OUI_TYPE_MICROSOFT_WPA,
- ies, (skb_tail_pointer(bcn) - ies)))
- arvif->wpaie_present = true;
+ if (tx_arvif == arvif) {
+ ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL);
+ } else {
+ ath12k_mac_set_arvif_ies(arvif, bcn,
+ arvif->vif->bss_conf.bssid_index,
+ &nontx_profile_found);
+ if (!nontx_profile_found)
+ ath12k_warn(ab,
+ "nontransmitted profile not found in beacon template\n");
+ }
if (arvif->vif->type == NL80211_IFTYPE_AP && arvif->vif->p2p) {
ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn);
@@ -1291,7 +1584,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif)
}
}
- ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn);
+ ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, NULL);
if (ret)
ath12k_warn(ab, "failed to submit beacon template command: %d\n",
@@ -1305,6 +1598,7 @@ free_bcn_skb:
static void ath12k_control_beaconing(struct ath12k_vif *arvif,
struct ieee80211_bss_conf *info)
{
+ struct ath12k_wmi_vdev_up_params params = {};
struct ath12k *ar = arvif->ar;
int ret;
@@ -1332,8 +1626,15 @@ static void ath12k_control_beaconing(struct ath12k_vif *arvif,
ether_addr_copy(arvif->bssid, info->bssid);
- ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
- arvif->bssid);
+ params.vdev_id = arvif->vdev_id;
+ params.aid = arvif->aid;
+ params.bssid = arvif->bssid;
+ if (arvif->vif->mbssid_tx_vif) {
+ params.tx_bssid = ath12k_vif_to_arvif(arvif->vif->mbssid_tx_vif)->bssid;
+ params.nontx_profile_idx = info->bssid_index;
+ params.nontx_profile_cnt = 1 << info->bssid_indicator;
+ }
+ ret = ath12k_wmi_vdev_up(arvif->ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n",
arvif->vdev_id, ret);
@@ -1345,6 +1646,75 @@ static void ath12k_control_beaconing(struct ath12k_vif *arvif,
ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
}
+static void ath12k_mac_handle_beacon_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct sk_buff *skb = data;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
+ return;
+
+ cancel_delayed_work(&arvif->connection_loss_work);
+}
+
+void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb)
+{
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_mac_handle_beacon_iter,
+ skb);
+}
+
+static void ath12k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ u32 *vdev_id = data;
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k *ar = arvif->ar;
+ struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+
+ if (arvif->vdev_id != *vdev_id)
+ return;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_beacon_loss(vif);
+
+ /* Firmware doesn't report beacon loss events repeatedly. If AP probe
+ * (done by mac80211) succeeds but beacons do not resume then it
+ * doesn't make sense to continue operation. Queue connection loss work
+ * which can be cancelled when beacon is received.
+ */
+ ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
+ ATH12K_CONNECTION_LOSS_HZ);
+}
+
+void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id)
+{
+ ieee80211_iterate_active_interfaces_atomic(ath12k_ar_to_hw(ar),
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath12k_mac_handle_beacon_miss_iter,
+ &vdev_id);
+}
+
+static void ath12k_mac_vif_sta_connection_loss_work(struct work_struct *work)
+{
+ struct ath12k_vif *arvif = container_of(work, struct ath12k_vif,
+ connection_loss_work.work);
+ struct ieee80211_vif *vif = arvif->vif;
+
+ if (!arvif->is_up)
+ return;
+
+ ieee80211_connection_loss(vif);
+}
+
static void ath12k_peer_assoc_h_basic(struct ath12k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -1759,7 +2129,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
{
const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
int i;
- u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss;
+ u8 ampdu_factor, max_nss;
+ u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
u16 mcs_160_map, mcs_80_map;
bool support_160;
u16 v;
@@ -1906,17 +2278,88 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar,
}
}
+static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ath12k_wmi_peer_assoc_arg *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 ampdu_factor, mpdu_density;
+
+ if (WARN_ON(ath12k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa)
+ return;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+ arg->bw_40 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
+ arg->bw_320 = true;
+
+ arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa);
+
+ mpdu_density = u32_get_bits(arg->peer_he_caps_6ghz,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+ arg->peer_mpdu_density = ath12k_parse_mpdudensity(mpdu_density);
+
+ /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of
+ * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value
+ * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE
+ * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz
+ * Band Capabilities element in the 6 GHz band.
+ *
+ * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and
+ * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability.
+ */
+ ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3],
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) +
+ u32_get_bits(arg->peer_he_caps_6ghz,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+
+ arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
+}
+
+static int ath12k_get_smps_from_capa(const struct ieee80211_sta_ht_cap *ht_cap,
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa,
+ int *smps)
+{
+ if (ht_cap->ht_supported)
+ *smps = u16_get_bits(ht_cap->cap, IEEE80211_HT_CAP_SM_PS);
+ else
+ *smps = le16_get_bits(he_6ghz_capa->capa,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+
+ if (*smps >= ARRAY_SIZE(ath12k_smps_map))
+ return -EINVAL;
+
+ return 0;
+}
+
static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta,
struct ath12k_wmi_peer_assoc_arg *arg)
{
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa = &sta->deflink.he_6ghz_capa;
const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
int smps;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !he_6ghz_capa->capa)
return;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ if (ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps))
+ return;
switch (smps) {
case WLAN_HT_CAP_SM_PS_STATIC:
@@ -2378,6 +2821,7 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar,
ath12k_peer_assoc_h_ht(ar, vif, sta, arg);
ath12k_peer_assoc_h_vht(ar, vif, sta, arg);
ath12k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath12k_peer_assoc_h_he_6ghz(ar, vif, sta, arg);
ath12k_peer_assoc_h_eht(ar, vif, sta, arg);
ath12k_peer_assoc_h_qos(ar, vif, sta, arg);
ath12k_peer_assoc_h_phymode(ar, vif, sta, arg);
@@ -2388,18 +2832,17 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar,
static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif,
const u8 *addr,
- const struct ieee80211_sta_ht_cap *ht_cap)
+ const struct ieee80211_sta_ht_cap *ht_cap,
+ const struct ieee80211_he_6ghz_capa *he_6ghz_capa)
{
- int smps;
+ int smps, ret = 0;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !he_6ghz_capa)
return 0;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
-
- if (smps >= ARRAY_SIZE(ath12k_smps_map))
- return -EINVAL;
+ ret = ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps);
+ if (ret < 0)
+ return ret;
return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id,
WMI_PEER_MIMO_PS_STATE,
@@ -2411,6 +2854,7 @@ static void ath12k_bss_assoc(struct ath12k *ar,
struct ieee80211_bss_conf *bss_conf)
{
struct ieee80211_vif *vif = arvif->vif;
+ struct ath12k_wmi_vdev_up_params params = {};
struct ath12k_wmi_peer_assoc_arg peer_arg;
struct ieee80211_sta *ap_sta;
struct ath12k_peer *peer;
@@ -2450,7 +2894,8 @@ static void ath12k_bss_assoc(struct ath12k *ar,
}
ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
- &ap_sta->deflink.ht_cap);
+ &ap_sta->deflink.ht_cap,
+ &ap_sta->deflink.he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -2462,7 +2907,10 @@ static void ath12k_bss_assoc(struct ath12k *ar,
arvif->aid = vif->cfg.aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
- ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
+ params.vdev_id = arvif->vdev_id;
+ params.aid = arvif->aid;
+ params.bssid = arvif->bssid;
+ ret = ath12k_wmi_vdev_up(ar, &params);
if (ret) {
ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n",
arvif->vdev_id, ret);
@@ -2470,6 +2918,7 @@ static void ath12k_bss_assoc(struct ath12k *ar,
}
arvif->is_up = true;
+ arvif->rekey_data.enable_offload = false;
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
@@ -2517,7 +2966,9 @@ static void ath12k_bss_disassoc(struct ath12k *ar,
arvif->is_up = false;
- /* TODO: cancel connection_loss_work */
+ memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data));
+
+ cancel_delayed_work(&arvif->connection_loss_work);
}
static u32 ath12k_mac_get_rate_hw_value(int bitrate)
@@ -2961,16 +3412,42 @@ static void ath12k_mac_bss_info_changed(struct ath12k *ar,
}
}
+static struct ath12k_vif_cache *ath12k_arvif_get_cache(struct ath12k_vif *arvif)
+{
+ if (!arvif->cache)
+ arvif->cache = kzalloc(sizeof(*arvif->cache), GFP_KERNEL);
+
+ return arvif->cache;
+}
+
+static void ath12k_arvif_put_cache(struct ath12k_vif *arvif)
+{
+ kfree(arvif->cache);
+ arvif->cache = NULL;
+}
+
static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
u64 changed)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif_cache *cache;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_vif(hw, vif);
+
+ /* if the vdev is not created on a certain radio,
+ * cache the info to be updated later on vdev creation
+ */
+
+ if (!ar) {
+ cache = ath12k_arvif_get_cache(arvif);
+ if (!cache)
+ return;
+ arvif->cache->bss_conf_changed |= changed;
+ return;
+ }
mutex_lock(&ar->conf_mutex);
@@ -2979,6 +3456,42 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
+static struct ath12k*
+ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 center_freq)
+{
+ struct ath12k_hw *ah = hw->priv;
+ enum nl80211_band band;
+ struct ath12k *ar;
+ int i;
+
+ if (ah->num_radio == 1)
+ return ah->radio;
+
+ /* Currently mac80211 supports splitting scan requests into
+ * multiple scan requests per band.
+ * Loop through first channel and determine the scan radio
+ * TODO: There could be 5 GHz low/high channels in that case
+ * split the hw request and perform multiple scans
+ */
+
+ if (center_freq < ATH12K_MIN_5G_FREQ)
+ band = NL80211_BAND_2GHZ;
+ else if (center_freq < ATH12K_MIN_6G_FREQ)
+ band = NL80211_BAND_5GHZ;
+ else
+ band = NL80211_BAND_6GHZ;
+
+ for_each_ar(ah, ar, i) {
+ /* TODO 5 GHz low high split changes */
+ if (ar->mac.sbands[band].channels)
+ return ar;
+ }
+
+ return NULL;
+}
+
void __ath12k_mac_scan_finish(struct ath12k *ar)
{
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
@@ -3148,15 +3661,68 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_scan_request *hw_req)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
+ struct ath12k *ar, *prev_ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct cfg80211_scan_request *req = &hw_req->req;
struct ath12k_wmi_scan_req_arg arg = {};
int ret;
int i;
+ bool create = true;
+
+ if (ah->num_radio == 1) {
+ WARN_ON(!arvif->is_created);
+ ar = ath12k_ah_to_ar(ah, 0);
+ goto scan;
+ }
- ar = ath12k_ah_to_ar(ah);
+ /* Since the targeted scan device could depend on the frequency
+ * requested in the hw_req, select the corresponding radio
+ */
+ ar = ath12k_mac_select_scan_device(hw, vif, hw_req->req.channels[0]->center_freq);
+ if (!ar)
+ return -EINVAL;
+ /* If the vif is already assigned to a specific vdev of an ar,
+ * check whether its already started, vdev which is started
+ * are not allowed to switch to a new radio.
+ * If the vdev is not started, but was earlier created on a
+ * different ar, delete that vdev and create a new one. We don't
+ * delete at the scan stop as an optimization to avoid redundant
+ * delete-create vdev's for the same ar, in case the request is
+ * always on the same band for the vif
+ */
+ if (arvif->is_created) {
+ if (WARN_ON(!arvif->ar))
+ return -EINVAL;
+
+ if (ar != arvif->ar && arvif->is_started)
+ return -EINVAL;
+
+ if (ar != arvif->ar) {
+ /* backup the previously used ar ptr, since the vdev delete
+ * would assign the arvif->ar to NULL after the call
+ */
+ prev_ar = arvif->ar;
+ mutex_lock(&prev_ar->conf_mutex);
+ ret = ath12k_mac_vdev_delete(prev_ar, vif);
+ mutex_unlock(&prev_ar->conf_mutex);
+ if (ret)
+ ath12k_warn(prev_ar->ab,
+ "unable to delete scan vdev %d\n", ret);
+ } else {
+ create = false;
+ }
+ }
+ if (create) {
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_vdev_create(ar, vif);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret) {
+ ath12k_warn(ar->ab, "unable to create scan vdev %d\n", ret);
+ return -EINVAL;
+ }
+ }
+scan:
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -3242,10 +3808,13 @@ exit:
static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k *ar;
- ar = ath12k_ah_to_ar(ah);
+ if (!arvif->is_created)
+ return;
+
+ ar = arvif->ar;
mutex_lock(&ar->conf_mutex);
ath12k_scan_abort(ar);
@@ -3369,13 +3938,11 @@ static int ath12k_clear_peer_keys(struct ath12k_vif *arvif,
return first_errno;
}
-static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+static int ath12k_mac_set_key(struct ath12k *ar, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- struct ath12k_base *ab;
+ struct ath12k_base *ab = ar->ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
struct ath12k_sta *arsta;
@@ -3383,24 +3950,11 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
int ret = 0;
u32 flags = 0;
- /* BIP needs to be done in software */
- if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
- key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
- return 1;
-
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
+ lockdep_assert_held(&ar->conf_mutex);
- if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ if (test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
return 1;
- if (key->keyidx > WMI_MAX_KEY_INDEX)
- return -ENOSPC;
-
- mutex_lock(&ar->conf_mutex);
-
if (sta)
peer_addr = sta->addr;
else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
@@ -3492,6 +4046,47 @@ static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
spin_unlock_bh(&ab->base_lock);
exit:
+ return ret;
+}
+
+static int ath12k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif_cache *cache;
+ struct ath12k *ar;
+ int ret;
+
+ /* BIP needs to be done in software */
+ if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
+ key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
+ return 1;
+
+ if (key->keyidx > WMI_MAX_KEY_INDEX)
+ return -ENOSPC;
+
+ ar = ath12k_get_ar_by_vif(hw, vif);
+ if (!ar) {
+ /* ar is expected to be valid when sta ptr is available */
+ if (sta) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ cache = ath12k_arvif_get_cache(arvif);
+ if (!cache)
+ return -ENOSPC;
+ cache->key_conf.cmd = cmd;
+ cache->key_conf.key = key;
+ cache->key_conf.changed = true;
+ return 0;
+ }
+
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_set_key(ar, cmd, vif, sta, key);
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -3579,6 +4174,11 @@ static int ath12k_station_assoc(struct ath12k *ar,
ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc);
+ if (peer_arg.peer_nss < 1) {
+ ath12k_warn(ar->ab,
+ "invalid peer NSS %d\n", peer_arg.peer_nss);
+ return -EINVAL;
+ }
ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
if (ret) {
ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
@@ -3613,7 +4213,8 @@ static int ath12k_station_assoc(struct ath12k *ar,
return 0;
ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->deflink.ht_cap);
+ &sta->deflink.ht_cap,
+ &sta->deflink.he_6ghz_capa);
if (ret) {
ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3968,7 +4569,6 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
@@ -3980,7 +4580,11 @@ static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
new_state == IEEE80211_STA_NOTEXIST))
cancel_work_sync(&arsta->update_wk);
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_vif(hw, vif);
+ if (!ar) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
mutex_lock(&ar->conf_mutex);
@@ -4109,7 +4713,7 @@ static int ath12k_mac_op_sta_set_txpwr(struct ieee80211_hw *hw,
if (txpwr > ATH12K_TX_POWER_MAX_VAL || txpwr < ATH12K_TX_POWER_MIN_VAL)
return -EINVAL;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, 0);
mutex_lock(&ar->conf_mutex);
@@ -4131,14 +4735,17 @@ static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u32 changed)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_peer *peer;
u32 bw, smps;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_vif(hw, vif);
+ if (!ar) {
+ WARN_ON_ONCE(1);
+ return;
+ }
spin_lock_bh(&ar->ab->base_lock);
@@ -4314,12 +4921,22 @@ static int ath12k_mac_op_conf_tx(struct ieee80211_hw *hw,
unsigned int link_id, u16 ac,
const struct ieee80211_tx_queue_params *params)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif_cache *cache = arvif->cache;
int ret;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_vif(hw, vif);
+ if (!ar) {
+ /* cache the info and apply after vdev is created */
+ cache = ath12k_arvif_get_cache(arvif);
+ if (!cache)
+ return -ENOSPC;
+ cache->tx_conf.changed = true;
+ cache->tx_conf.ac = ac;
+ cache->tx_conf.tx_queue_params = *params;
+ return 0;
+ }
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_conf_tx(arvif, link_id, ac, params);
@@ -4987,6 +5604,7 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar,
static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
{
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -4997,11 +5615,18 @@ static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant)
if (ath12k_check_chain_mask(ar, rx_ant, false))
return -EINVAL;
+ /* Since we advertised the max cap of all radios combined during wiphy
+ * registration, ensure we don't set the antenna config higher than the
+ * limits
+ */
+ tx_ant = min_t(u32, tx_ant, ar->pdev->cap.tx_chain_mask);
+ rx_ant = min_t(u32, rx_ant, ar->pdev->cap.rx_chain_mask);
+
ar->cfg_tx_chainmask = tx_ant;
ar->cfg_rx_chainmask = rx_ant;
- if (ar->state != ATH12K_STATE_ON &&
- ar->state != ATH12K_STATE_RESTARTED)
+ if (ah->state != ATH12K_HW_STATE_ON &&
+ ah->state != ATH12K_HW_STATE_RESTARTED)
return 0;
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK,
@@ -5301,51 +5926,16 @@ static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
/* TODO: Need to support new monitor mode */
}
-static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab)
-{
- int recovery_start_count;
-
- if (!ab->is_reset)
- return;
-
- recovery_start_count = atomic_inc_return(&ab->recovery_start_count);
-
- ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count);
-
- if (recovery_start_count == ab->num_radios) {
- complete(&ab->recovery_start);
- ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n");
- }
-
- ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n");
-
- wait_for_completion_timeout(&ab->reconfigure_complete,
- ATH12K_RECONFIGURE_TIMEOUT_HZ);
-}
-
static int ath12k_mac_start(struct ath12k *ar)
{
+ struct ath12k_hw *ah = ar->ah;
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev = ar->pdev;
int ret;
- mutex_lock(&ar->conf_mutex);
+ lockdep_assert_held(&ah->hw_mutex);
- switch (ar->state) {
- case ATH12K_STATE_OFF:
- ar->state = ATH12K_STATE_ON;
- break;
- case ATH12K_STATE_RESTARTING:
- ar->state = ATH12K_STATE_RESTARTED;
- ath12k_mac_wait_reconfigure(ab);
- break;
- case ATH12K_STATE_RESTARTED:
- case ATH12K_STATE_WEDGED:
- case ATH12K_STATE_ON:
- WARN_ON(1);
- ret = -EINVAL;
- goto err;
- }
+ mutex_lock(&ar->conf_mutex);
ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS,
1, pdev->pdev_id);
@@ -5437,29 +6027,66 @@ static int ath12k_mac_start(struct ath12k *ar)
return 0;
err:
- ar->state = ATH12K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
return ret;
}
+static void ath12k_drain_tx(struct ath12k_hw *ah)
+{
+ struct ath12k *ar;
+ int i;
+
+ for_each_ar(ah, ar, i)
+ ath12k_mac_drain_tx(ar);
+}
+
static int ath12k_mac_op_start(struct ieee80211_hw *hw)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar = ath12k_ah_to_ar(ah);
- struct ath12k_base *ab = ar->ab;
- int ret;
+ struct ath12k *ar;
+ int ret, i;
- ath12k_mac_drain_tx(ar);
+ ath12k_drain_tx(ah);
- ret = ath12k_mac_start(ar);
- if (ret) {
- ath12k_err(ab, "fail to start mac operations in pdev idx %d ret %d\n",
- ar->pdev_idx, ret);
- return ret;
+ guard(mutex)(&ah->hw_mutex);
+
+ switch (ah->state) {
+ case ATH12K_HW_STATE_OFF:
+ ah->state = ATH12K_HW_STATE_ON;
+ break;
+ case ATH12K_HW_STATE_RESTARTING:
+ ah->state = ATH12K_HW_STATE_RESTARTED;
+ break;
+ case ATH12K_HW_STATE_RESTARTED:
+ case ATH12K_HW_STATE_WEDGED:
+ case ATH12K_HW_STATE_ON:
+ ah->state = ATH12K_HW_STATE_OFF;
+
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ for_each_ar(ah, ar, i) {
+ ret = ath12k_mac_start(ar);
+ if (ret) {
+ ah->state = ATH12K_HW_STATE_OFF;
+
+ ath12k_err(ar->ab, "fail to start mac operations in pdev idx %d ret %d\n",
+ ar->pdev_idx, ret);
+ goto fail_start;
+ }
}
return 0;
+
+fail_start:
+ for (; i > 0; i--) {
+ ar = ath12k_ah_to_ar(ah, i - 1);
+ ath12k_mac_stop(ar);
+ }
+
+ return ret;
}
int ath12k_mac_rfkill_config(struct ath12k *ar)
@@ -5521,9 +6148,12 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable)
static void ath12k_mac_stop(struct ath12k *ar)
{
+ struct ath12k_hw *ah = ar->ah;
struct htt_ppdu_stats_info *ppdu_stats, *tmp;
int ret;
+ lockdep_assert_held(&ah->hw_mutex);
+
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_config_mon_status_default(ar, false);
if (ret && (ret != -EOPNOTSUPP))
@@ -5531,7 +6161,6 @@ static void ath12k_mac_stop(struct ath12k *ar)
ret);
clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
- ar->state = ATH12K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
cancel_delayed_work_sync(&ar->scan.timeout);
@@ -5552,14 +6181,22 @@ static void ath12k_mac_stop(struct ath12k *ar)
atomic_set(&ar->num_pending_mgmt_tx, 0);
}
-static void ath12k_mac_op_stop(struct ieee80211_hw *hw)
+static void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
- ath12k_mac_drain_tx(ar);
+ ath12k_drain_tx(ah);
+
+ mutex_lock(&ah->hw_mutex);
+
+ ah->state = ATH12K_HW_STATE_OFF;
+
+ for_each_ar(ah, ar, i)
+ ath12k_mac_stop(ar);
- ath12k_mac_stop(ar);
+ mutex_unlock(&ah->hw_mutex);
}
static u8
@@ -5585,17 +6222,59 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif)
return vdev_stats_id;
}
-static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
- struct ath12k_wmi_vdev_create_arg *arg)
+static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_vif *arvif,
+ u32 *flags, u32 *tx_vdev_id)
+{
+ struct ieee80211_vif *tx_vif = arvif->vif->mbssid_tx_vif;
+ struct ath12k *ar = arvif->ar;
+ struct ath12k_vif *tx_arvif;
+
+ if (!tx_vif)
+ return 0;
+
+ tx_arvif = ath12k_vif_to_arvif(tx_vif);
+
+ if (arvif->vif->bss_conf.nontransmitted) {
+ if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
+ return -EINVAL;
+
+ *flags = WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP;
+ *tx_vdev_id = tx_arvif->vdev_id;
+ } else if (tx_arvif == arvif) {
+ *flags = WMI_VDEV_MBSSID_FLAGS_TRANSMIT_AP;
+ } else {
+ return -EINVAL;
+ }
+
+ if (arvif->vif->bss_conf.ema_ap)
+ *flags |= WMI_VDEV_MBSSID_FLAGS_EMA_MODE;
+
+ return 0;
+}
+
+static int ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
+ struct ath12k_wmi_vdev_create_arg *arg)
{
struct ath12k *ar = arvif->ar;
struct ath12k_pdev *pdev = ar->pdev;
+ int ret;
arg->if_id = arvif->vdev_id;
arg->type = arvif->vdev_type;
arg->subtype = arvif->vdev_subtype;
arg->pdev_id = pdev->pdev_id;
+ arg->mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
+ arg->mbssid_tx_vdev_id = 0;
+ if (!test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ ret = ath12k_mac_setup_vdev_params_mbssid(arvif,
+ &arg->mbssid_flags,
+ &arg->mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
+
if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
@@ -5611,6 +6290,7 @@ static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif,
}
arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif);
+ return 0;
}
static u32
@@ -5732,64 +6412,24 @@ static void ath12k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
ath12k_mac_update_vif_offload(arvif);
}
-static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
- struct ath12k_base *ab;
+ struct ath12k_hw *ah = ar->ah;
+ struct ath12k_base *ab = ar->ab;
+ struct ieee80211_hw *hw = ah->hw;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
struct ath12k_wmi_peer_create_arg peer_param;
u32 param_id, param_value;
u16 nss;
int i;
- int ret;
- int bit;
-
- vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
-
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
-
- mutex_lock(&ar->conf_mutex);
-
- if (vif->type == NL80211_IFTYPE_AP &&
- ar->num_peers > (ar->max_num_peers - 1)) {
- ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
- ret = -ENOBUFS;
- goto err;
- }
-
- if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
- ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
- TARGET_NUM_VDEVS);
- ret = -EBUSY;
- goto err;
- }
+ int ret, vdev_id;
- memset(arvif, 0, sizeof(*arvif));
+ lockdep_assert_held(&ar->conf_mutex);
arvif->ar = ar;
- arvif->vif = vif;
-
- INIT_LIST_HEAD(&arvif->list);
-
- /* Should we initialize any worker to handle connection loss indication
- * from firmware in sta mode?
- */
-
- for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
- arvif->bitrate_mask.control[i].legacy = 0xffffffff;
- memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].ht_mcs));
- memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
- sizeof(arvif->bitrate_mask.control[i].vht_mcs));
- }
-
- bit = __ffs64(ab->free_vdev_map);
-
- arvif->vdev_id = bit;
+ vdev_id = __ffs64(ab->free_vdev_map);
+ arvif->vdev_id = vdev_id;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
switch (vif->type) {
@@ -5813,7 +6453,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
- ar->monitor_vdev_id = bit;
+ ar->monitor_vdev_id = vdev_id;
break;
case NL80211_IFTYPE_P2P_DEVICE:
arvif->vdev_type = WMI_VDEV_TYPE_STA;
@@ -5824,7 +6464,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
break;
}
- ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac add interface id %d type %d subtype %d map %llx\n",
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev create id %d type %d subtype %d map %llx\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
ab->free_vdev_map);
@@ -5832,7 +6472,12 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1);
- ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg);
+ ret = ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to create vdev parameters %d: %d\n",
+ arvif->vdev_id, ret);
+ goto err;
+ }
ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg);
if (ret) {
@@ -5842,6 +6487,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
}
ar->num_created_vdevs++;
+ arvif->is_created = true;
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM created, vdev_id %d\n",
vif->addr, arvif->vdev_id);
ar->allocated_vdev_map |= 1LL << arvif->vdev_id;
@@ -5942,8 +6588,7 @@ static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
if (vif->type != NL80211_IFTYPE_MONITOR && ar->monitor_conf_enabled)
ath12k_mac_monitor_vdev_create(ar);
- mutex_unlock(&ar->conf_mutex);
-
+ arvif->ar = ar;
return ret;
err_peer_del:
@@ -5969,6 +6614,8 @@ err_peer_del:
err_vdev_del:
ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->num_created_vdevs--;
+ arvif->is_created = false;
+ arvif->ar = NULL;
ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
ab->free_vdev_map |= 1LL << arvif->vdev_id;
ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
@@ -5977,9 +6624,171 @@ err_vdev_del:
spin_unlock_bh(&ar->data_lock);
err:
+ arvif->ar = NULL;
+ return ret;
+}
+
+static void ath12k_mac_vif_cache_flush(struct ath12k *ar, struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_vif_cache *cache = arvif->cache;
+ struct ath12k_base *ab = ar->ab;
+
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!cache)
+ return;
+
+ if (cache->tx_conf.changed) {
+ ret = ath12k_mac_conf_tx(arvif, 0, cache->tx_conf.ac,
+ &cache->tx_conf.tx_queue_params);
+ if (ret)
+ ath12k_warn(ab,
+ "unable to apply tx config parameters to vdev %d\n",
+ ret);
+ }
+
+ if (cache->bss_conf_changed) {
+ ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf,
+ cache->bss_conf_changed);
+ }
+
+ if (cache->key_conf.changed) {
+ ret = ath12k_mac_set_key(ar, cache->key_conf.cmd, vif, NULL,
+ cache->key_conf.key);
+ if (ret)
+ ath12k_warn(ab, "unable to apply set key param to vdev %d ret %d\n",
+ arvif->vdev_id, ret);
+ }
+ ath12k_arvif_put_cache(arvif);
+}
+
+static struct ath12k *ath12k_mac_assign_vif_to_vdev(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_hw *ah = hw->priv;
+ struct ath12k *ar, *prev_ar;
+ struct ath12k_base *ab;
+ int ret;
+
+ if (ah->num_radio == 1)
+ ar = ah->radio;
+ else if (ctx)
+ ar = ath12k_get_ar_by_ctx(hw, ctx);
+ else
+ return NULL;
+
+ if (!ar)
+ return NULL;
+
+ if (arvif->ar) {
+ /* This is not expected really */
+ if (WARN_ON(!arvif->is_created)) {
+ arvif->ar = NULL;
+ return NULL;
+ }
+
+ if (ah->num_radio == 1)
+ return arvif->ar;
+
+ /* This can happen as scan vdev gets created during multiple scans
+ * across different radios before a vdev is brought up in
+ * a certain radio.
+ */
+ if (ar != arvif->ar) {
+ if (WARN_ON(arvif->is_started))
+ return NULL;
+
+ /* backup the previously used ar ptr since arvif->ar would
+ * be set to NULL after vdev delete is done
+ */
+ prev_ar = arvif->ar;
+ mutex_lock(&prev_ar->conf_mutex);
+ ret = ath12k_mac_vdev_delete(prev_ar, vif);
+
+ if (ret)
+ ath12k_warn(prev_ar->ab, "unable to delete vdev %d\n",
+ ret);
+ mutex_unlock(&prev_ar->conf_mutex);
+ }
+ }
+
+ ab = ar->ab;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (arvif->is_created)
+ goto flush;
+
+ if (vif->type == NL80211_IFTYPE_AP &&
+ ar->num_peers > (ar->max_num_peers - 1)) {
+ ath12k_warn(ab, "failed to create vdev due to insufficient peer entry resource in firmware\n");
+ goto unlock;
+ }
+
+ if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
+ ath12k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
+ TARGET_NUM_VDEVS);
+ goto unlock;
+ }
+
+ ret = ath12k_mac_vdev_create(ar, vif);
+ if (ret) {
+ ath12k_warn(ab, "failed to create vdev %pM ret %d", vif->addr, ret);
+ goto unlock;
+ }
+
+flush:
+ /* If the vdev is created during channel assign and not during
+ * add_interface(), Apply any parameters for the vdev which were received
+ * after add_interface, corresponding to this vif.
+ */
+ ath12k_mac_vif_cache_flush(ar, vif);
+unlock:
mutex_unlock(&ar->conf_mutex);
+ return arvif->ar;
+}
- return ret;
+static int ath12k_mac_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ int i;
+
+ memset(arvif, 0, sizeof(*arvif));
+
+ arvif->vif = vif;
+
+ INIT_LIST_HEAD(&arvif->list);
+ INIT_DELAYED_WORK(&arvif->connection_loss_work,
+ ath12k_mac_vif_sta_connection_loss_work);
+
+ for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+ arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+ memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ }
+
+ /* Allocate Default Queue now and reassign during actual vdev create */
+ vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE;
+ for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
+ vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
+
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
+
+ /* For single radio wiphy(i.e ah->num_radio is 1), create the vdev
+ * during add_interface itself, for multi radio wiphy, defer the vdev
+ * creation until channel_assign to determine the radio on which the
+ * vdev needs to be created
+ */
+ ath12k_mac_assign_vif_to_vdev(hw, vif, NULL);
+ return 0;
}
static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif)
@@ -6007,31 +6816,14 @@ static void ath12k_mac_vif_unref(struct ath12k_dp *dp, struct ieee80211_vif *vif
}
}
-static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int ath12k_mac_vdev_delete(struct ath12k *ar, struct ieee80211_vif *vif)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
- struct ath12k_base *ab;
+ struct ath12k_base *ab = ar->ab;
unsigned long time_left;
int ret;
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
-
- mutex_lock(&ar->conf_mutex);
-
- ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
- arvif->vdev_id);
-
- if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
- ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr);
- if (ret)
- ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
- arvif->vdev_id, ret);
- }
-
+ lockdep_assert_held(&ar->conf_mutex);
reinit_completion(&ar->vdev_delete_done);
ret = ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
@@ -6048,6 +6840,10 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
goto err_vdev_del;
}
+ ab->free_vdev_map |= 1LL << arvif->vdev_id;
+ ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
+ ar->num_created_vdevs--;
+
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ar->monitor_vdev_id = -1;
ar->monitor_vdev_created = false;
@@ -6055,11 +6851,6 @@ static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
ret = ath12k_mac_monitor_vdev_delete(ar);
}
- ab->free_vdev_map |= 1LL << (arvif->vdev_id);
- ar->allocated_vdev_map &= ~(1LL << arvif->vdev_id);
- ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
- ar->num_created_vdevs--;
-
ath12k_dbg(ab, ATH12K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
vif->addr, arvif->vdev_id);
@@ -6069,6 +6860,7 @@ err_vdev_del:
spin_unlock_bh(&ar->data_lock);
ath12k_peer_cleanup(ar, arvif->vdev_id);
+ ath12k_arvif_put_cache(arvif);
idr_for_each(&ar->txmgmt_idr,
ath12k_mac_vif_txmgmt_idr_remove, vif);
@@ -6078,9 +6870,48 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath12k_mac_txpower_recalc(ar);
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
/* TODO: recal traffic pause state based on the available vdevs */
+ arvif->is_created = false;
+ arvif->ar = NULL;
+
+ return ret;
+}
+
+static void ath12k_mac_op_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_base *ab;
+ struct ath12k *ar;
+ int ret;
+
+ if (!arvif->is_created) {
+ /* if we cached some config but never received assign chanctx,
+ * free the allocated cache.
+ */
+ ath12k_arvif_put_cache(arvif);
+ return;
+ }
+
+ ar = arvif->ar;
+ ab = ar->ab;
+
+ cancel_delayed_work_sync(&arvif->connection_loss_work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ ath12k_dbg(ab, ATH12K_DBG_MAC, "mac remove interface (vdev %d)\n",
+ arvif->vdev_id);
+
+ if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
+ ret = ath12k_peer_delete(ar, arvif->vdev_id, vif->addr);
+ if (ret)
+ ath12k_warn(ab, "failed to submit AP self-peer removal on vdev %d: %d\n",
+ arvif->vdev_id, ret);
+ }
+
+ ath12k_mac_vdev_delete(ar, vif);
mutex_unlock(&ar->conf_mutex);
}
@@ -6109,15 +6940,9 @@ static void ath12k_mac_configure_filter(struct ath12k *ar,
reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
- if (!ret) {
- if (!reset_flag)
- set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- else
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- } else {
+ if (ret)
ath12k_warn(ar->ab,
"fail to set monitor filter: %d\n", ret);
- }
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"total_flags:0x%x, reset_flag:%d\n",
@@ -6132,7 +6957,7 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, 0);
mutex_lock(&ar->conf_mutex);
@@ -6145,16 +6970,19 @@ static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ int antennas_rx = 0, antennas_tx = 0;
struct ath12k *ar;
+ int i;
- ar = ath12k_ah_to_ar(ah);
-
- mutex_lock(&ar->conf_mutex);
-
- *tx_ant = ar->cfg_tx_chainmask;
- *rx_ant = ar->cfg_rx_chainmask;
+ for_each_ar(ah, ar, i) {
+ mutex_lock(&ar->conf_mutex);
+ antennas_rx = max_t(u32, antennas_rx, ar->cfg_rx_chainmask);
+ antennas_tx = max_t(u32, antennas_tx, ar->cfg_tx_chainmask);
+ mutex_unlock(&ar->conf_mutex);
+ }
- mutex_unlock(&ar->conf_mutex);
+ *tx_ant = antennas_tx;
+ *rx_ant = antennas_rx;
return 0;
}
@@ -6163,13 +6991,16 @@ static int ath12k_mac_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- int ret;
-
- ar = ath12k_ah_to_ar(ah);
+ int ret = 0;
+ int i;
- mutex_lock(&ar->conf_mutex);
- ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
- mutex_unlock(&ar->conf_mutex);
+ for_each_ar(ah, ar, i) {
+ mutex_lock(&ar->conf_mutex);
+ ret = __ath12k_set_antenna(ar, tx_ant, rx_ant);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret)
+ break;
+ }
return ret;
}
@@ -6213,7 +7044,11 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret = -EINVAL;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_vif(hw, vif);
+ if (!ar)
+ return -EINVAL;
+
+ ar = ath12k_ah_to_ar(ah, 0);
mutex_lock(&ar->conf_mutex);
ret = ath12k_mac_ampdu_action(arvif, params);
@@ -6229,15 +7064,17 @@ static int ath12k_mac_op_ampdu_action(struct ieee80211_hw *hw,
static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_ctx(hw, ctx);
+ if (!ar)
+ return -EINVAL;
+
ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx add freq %u width %d ptr %pK\n",
+ "mac chanctx add freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -6257,15 +7094,17 @@ static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
static void ath12k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_ctx(hw, ctx);
+ if (!ar)
+ return;
+
ab = ar->ab;
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx remove freq %u width %d ptr %pK\n",
+ "mac chanctx remove freq %u width %d ptr %p\n",
ctx->def.chan->center_freq, ctx->def.width, ctx);
mutex_lock(&ar->conf_mutex);
@@ -6286,14 +7125,24 @@ ath12k_mac_check_down_grade_phy_mode(struct ath12k *ar,
enum nl80211_band band,
enum nl80211_iftype type)
{
- struct ieee80211_sta_eht_cap *eht_cap;
+ struct ieee80211_sta_eht_cap *eht_cap = NULL;
enum wmi_phy_mode down_mode;
+ int n = ar->mac.sbands[band].n_iftype_data;
+ int i;
+ struct ieee80211_sband_iftype_data *data;
if (mode < MODE_11BE_EHT20)
return mode;
- eht_cap = &ar->mac.iftype[band][type].eht_cap;
- if (eht_cap->has_eht)
+ data = ar->mac.iftype[band];
+ for (i = 0; i < n; i++) {
+ if (data[i].types_mask & BIT(type)) {
+ eht_cap = &data[i].eht_cap;
+ break;
+ }
+ }
+
+ if (eht_cap && eht_cap->has_eht)
return mode;
switch (mode) {
@@ -6370,10 +7219,16 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif,
arg.pref_tx_streams = ar->num_tx_chains;
arg.pref_rx_streams = ar->num_rx_chains;
- /* Fill the MBSSID flags to indicate AP is non MBSSID by default
- * Corresponding flags would be updated with MBSSID support.
- */
arg.mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP;
+ arg.mbssid_tx_vdev_id = 0;
+ if (test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT,
+ ar->ab->wmi_ab.svc_map)) {
+ ret = ath12k_mac_setup_vdev_params_mbssid(arvif,
+ &arg.mbssid_flags,
+ &arg.mbssid_tx_vdev_id);
+ if (ret)
+ return ret;
+ }
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
@@ -6468,14 +7323,19 @@ struct ath12k_mac_change_chanctx_arg {
struct ieee80211_vif_chanctx_switch *vifs;
int n_vifs;
int next_vif;
+ struct ath12k *ar;
};
static void
ath12k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_mac_change_chanctx_arg *arg = data;
+ if (arvif->ar != arg->ar)
+ return;
+
if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
return;
@@ -6486,9 +7346,13 @@ static void
ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_mac_change_chanctx_arg *arg = data;
struct ieee80211_chanctx_conf *ctx;
+ if (arvif->ar != arg->ar)
+ return;
+
ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
if (ctx != arg->ctx)
return;
@@ -6502,12 +7366,65 @@ ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
arg->next_vif++;
}
+static u32 ath12k_mac_nlwidth_to_wmiwidth(enum nl80211_chan_width width)
+{
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20:
+ return WMI_CHAN_WIDTH_20;
+ case NL80211_CHAN_WIDTH_40:
+ return WMI_CHAN_WIDTH_40;
+ case NL80211_CHAN_WIDTH_80:
+ return WMI_CHAN_WIDTH_80;
+ case NL80211_CHAN_WIDTH_160:
+ return WMI_CHAN_WIDTH_160;
+ case NL80211_CHAN_WIDTH_80P80:
+ return WMI_CHAN_WIDTH_80P80;
+ case NL80211_CHAN_WIDTH_5:
+ return WMI_CHAN_WIDTH_5;
+ case NL80211_CHAN_WIDTH_10:
+ return WMI_CHAN_WIDTH_10;
+ case NL80211_CHAN_WIDTH_320:
+ return WMI_CHAN_WIDTH_320;
+ default:
+ WARN_ON(1);
+ return WMI_CHAN_WIDTH_20;
+ }
+}
+
+static int ath12k_mac_update_peer_puncturing_width(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct cfg80211_chan_def def)
+{
+ u32 param_id, param_value;
+ int ret;
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ param_id = WMI_PEER_CHWIDTH_PUNCTURE_20MHZ_BITMAP;
+ param_value = ath12k_mac_nlwidth_to_wmiwidth(def.width) |
+ u32_encode_bits((~def.punctured),
+ WMI_PEER_PUNCTURE_BITMAP);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+ "punctured bitmap %02x width %d vdev %d\n",
+ def.punctured, def.width, arvif->vdev_id);
+
+ ret = ath12k_wmi_set_peer_param(ar, arvif->bssid,
+ arvif->vdev_id, param_id,
+ param_value);
+
+ return ret;
+}
+
static void
ath12k_mac_update_vif_chan(struct ath12k *ar,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs)
{
+ struct ath12k_wmi_vdev_up_params params = {};
struct ath12k_base *ab = ar->ab;
+ struct ieee80211_vif *vif;
struct ath12k_vif *arvif;
int ret;
int i;
@@ -6516,9 +7433,10 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
lockdep_assert_held(&ar->conf_mutex);
for (i = 0; i < n_vifs; i++) {
- arvif = ath12k_vif_to_arvif(vifs[i].vif);
+ vif = vifs[i].vif;
+ arvif = ath12k_vif_to_arvif(vif);
- if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
+ if (vif->type == NL80211_IFTYPE_MONITOR)
monitor_vif = true;
ath12k_dbg(ab, ATH12K_DBG_MAC,
@@ -6532,29 +7450,6 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
if (WARN_ON(!arvif->is_started))
continue;
- if (WARN_ON(!arvif->is_up))
- continue;
-
- ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id);
- if (ret) {
- ath12k_warn(ab, "failed to down vdev %d: %d\n",
- arvif->vdev_id, ret);
- continue;
- }
- }
-
- /* All relevant vdevs are downed and associated channel resources
- * should be available for the channel switch now.
- */
-
- /* TODO: Update ar->rx_channel */
-
- for (i = 0; i < n_vifs; i++) {
- arvif = ath12k_vif_to_arvif(vifs[i].vif);
-
- if (WARN_ON(!arvif->is_started))
- continue;
-
arvif->punct_bitmap = vifs[i].new_ctx->def.punctured;
/* Firmware expect vdev_restart only if vdev is up.
@@ -6587,13 +7482,31 @@ ath12k_mac_update_vif_chan(struct ath12k *ar,
ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
ret);
- ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
- arvif->bssid);
+ memset(&params, 0, sizeof(params));
+ params.vdev_id = arvif->vdev_id;
+ params.aid = arvif->aid;
+ params.bssid = arvif->bssid;
+ if (vif->mbssid_tx_vif) {
+ params.tx_bssid = ath12k_vif_to_arvif(vif->mbssid_tx_vif)->bssid;
+ params.nontx_profile_idx = vif->bss_conf.bssid_index;
+ params.nontx_profile_cnt = 1 << vif->bss_conf.bssid_indicator;
+ }
+ ret = ath12k_wmi_vdev_up(arvif->ar, &params);
if (ret) {
ath12k_warn(ab, "failed to bring vdev up %d: %d\n",
arvif->vdev_id, ret);
continue;
}
+
+ ret = ath12k_mac_update_peer_puncturing_width(arvif->ar, arvif,
+ vifs[i].new_ctx->def);
+ if (ret) {
+ ath12k_warn(ar->ab,
+ "failed to update puncturing bitmap %02x and width %d: %d\n",
+ vifs[i].new_ctx->def.punctured,
+ vifs[i].new_ctx->def.width, ret);
+ continue;
+ }
}
/* Restart the internal monitor vdev on new channel */
@@ -6607,7 +7520,7 @@ static void
ath12k_mac_update_active_vif_chan(struct ath12k *ar,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx };
+ struct ath12k_mac_change_chanctx_arg arg = { .ctx = ctx, .ar = ar };
struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
lockdep_assert_held(&ar->conf_mutex);
@@ -6637,17 +7550,19 @@ static void ath12k_mac_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_ctx(hw, ctx);
+ if (!ar)
+ return;
+
ab = ar->ab;
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx change freq %u width %d ptr %pK changed %x\n",
+ "mac chanctx change freq %u width %d ptr %p changed %x\n",
ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
/* This shouldn't really happen because channel switching should use
@@ -6705,20 +7620,26 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
- struct ath12k_wmi_peer_create_arg param;
- ar = ath12k_ah_to_ar(ah);
+ /* For multi radio wiphy, the vdev was not created during add_interface
+ * create now since we have a channel ctx now to assign to a specific ar/fw
+ */
+ ar = ath12k_mac_assign_vif_to_vdev(hw, vif, ctx);
+ if (!ar) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
ab = ar->ab;
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx assign ptr %pK vdev_id %i\n",
+ "mac chanctx assign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
arvif->punct_bitmap = ctx->def.punctured;
@@ -6738,21 +7659,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
goto out;
}
- if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type != WMI_VDEV_TYPE_AP &&
- arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
- param.vdev_id = arvif->vdev_id;
- param.peer_type = WMI_PEER_TYPE_DEFAULT;
- param.peer_addr = ar->mac_addr;
-
- ret = ath12k_peer_create(ar, arvif, NULL, &param);
- if (ret) {
- ath12k_warn(ab, "failed to create peer after vdev start delay: %d",
- ret);
- goto out;
- }
- }
-
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_mac_monitor_start(ar);
if (ret)
@@ -6788,28 +7694,32 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ath12k_base *ab;
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
int ret;
- ar = ath12k_ah_to_ar(ah);
+ /* The vif is expected to be attached to an ar's VDEV.
+ * We leave the vif/vdev in this function as is
+ * and not delete the vdev symmetric to assign_vif_chanctx()
+ * the VDEV will be deleted and unassigned either during
+ * remove_interface() or when there is a change in channel
+ * that moves the vif to a new ar
+ */
+ if (!arvif->is_created)
+ return;
+
+ ar = arvif->ar;
ab = ar->ab;
mutex_lock(&ar->conf_mutex);
ath12k_dbg(ab, ATH12K_DBG_MAC,
- "mac chanctx unassign ptr %pK vdev_id %i\n",
+ "mac chanctx unassign ptr %p vdev_id %i\n",
ctx, arvif->vdev_id);
WARN_ON(!arvif->is_started);
- if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR &&
- ath12k_peer_find_by_addr(ab, ar->mac_addr))
- ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
-
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
ret = ath12k_mac_monitor_stop(ar);
if (ret) {
@@ -6820,7 +7730,8 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->is_started = false;
}
- if (arvif->vdev_type != WMI_VDEV_TYPE_STA) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA &&
+ arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
ath12k_bss_disassoc(ar, arvif);
ret = ath12k_mac_vdev_stop(arvif);
if (ret)
@@ -6829,10 +7740,6 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
}
arvif->is_started = false;
- if (ab->hw_params->vdev_start_delay &&
- arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
- ath12k_wmi_vdev_down(ar, arvif->vdev_id);
-
if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
ath12k_mac_monitor_stop(ar);
@@ -6846,13 +7753,20 @@ ath12k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_get_ar_by_ctx(hw, vifs->old_ctx);
+ if (!ar)
+ return -EINVAL;
mutex_lock(&ar->conf_mutex);
+ /* Switching channels across radio is not allowed */
+ if (ar != ath12k_get_ar_by_ctx(hw, vifs->new_ctx)) {
+ mutex_unlock(&ar->conf_mutex);
+ return -EINVAL;
+ }
+
ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
"mac chanctx switch n_vifs %d mode %d\n",
n_vifs, mode);
@@ -6893,11 +7807,21 @@ static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret;
+ int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD, ret = 0, i;
- ar = ath12k_ah_to_ar(ah);
-
- ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+ /* Currently we set the rts threshold value to all the vifs across
+ * all radios of the single wiphy.
+ * TODO Once support for vif specific RTS threshold in mac80211 is
+ * available, ath12k can make use of it.
+ */
+ for_each_ar(ah, ar, i) {
+ ret = ath12k_set_vdev_param_to_all_vifs(ar, param_id, value);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set RTS config for all vdevs of pdev %d",
+ ar->pdev->pdev_id);
+ break;
+ }
+ }
return ret;
}
@@ -6917,33 +7841,62 @@ static int ath12k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
return -EOPNOTSUPP;
}
-static void ath12k_mac_flush(struct ath12k *ar)
+static int ath12k_mac_flush(struct ath12k *ar)
{
long time_left;
+ int ret = 0;
time_left = wait_event_timeout(ar->dp.tx_empty_waitq,
(atomic_read(&ar->dp.num_tx_pending) == 0),
ATH12K_FLUSH_TIMEOUT);
- if (time_left == 0)
- ath12k_warn(ar->ab, "failed to flush transmit queue %ld\n", time_left);
+ if (time_left == 0) {
+ ath12k_warn(ar->ab,
+ "failed to flush transmit queue, data pkts pending %d\n",
+ atomic_read(&ar->dp.num_tx_pending));
+ ret = -ETIMEDOUT;
+ }
time_left = wait_event_timeout(ar->txmgmt_empty_waitq,
(atomic_read(&ar->num_pending_mgmt_tx) == 0),
ATH12K_FLUSH_TIMEOUT);
- if (time_left == 0)
- ath12k_warn(ar->ab, "failed to flush mgmt transmit queue %ld\n",
- time_left);
+ if (time_left == 0) {
+ ath12k_warn(ar->ab,
+ "failed to flush mgmt transmit queue, mgmt pkts pending %d\n",
+ atomic_read(&ar->num_pending_mgmt_tx));
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+int ath12k_mac_wait_tx_complete(struct ath12k *ar)
+{
+ ath12k_mac_drain_tx(ar);
+ return ath12k_mac_flush(ar);
}
static void ath12k_mac_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
if (drop)
return;
+ /* vif can be NULL when flush() is considered for hw */
+ if (!vif) {
+ for_each_ar(ah, ar, i)
+ ath12k_mac_flush(ar);
+ return;
+ }
+
+ ar = ath12k_get_ar_by_vif(hw, vif);
+
+ if (!ar)
+ return;
+
ath12k_mac_flush(ar);
}
@@ -7145,6 +8098,9 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data,
struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k *ar = arvif->ar;
+ if (arsta->arvif != arvif)
+ return;
+
spin_lock_bh(&ar->data_lock);
arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
spin_unlock_bh(&ar->data_lock);
@@ -7155,10 +8111,14 @@ static void ath12k_mac_set_bitrate_mask_iter(void *data,
static void ath12k_mac_disable_peer_fixed_rate(void *data,
struct ieee80211_sta *sta)
{
+ struct ath12k_sta *arsta = ath12k_sta_to_arsta(sta);
struct ath12k_vif *arvif = data;
struct ath12k *ar = arvif->ar;
int ret;
+ if (arsta->arvif != arvif)
+ return;
+
ret = ath12k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_PARAM_FIXED_RATE,
@@ -7301,26 +8261,33 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
struct ath12k *ar;
struct ath12k_base *ab;
struct ath12k_vif *arvif;
- int recovery_count;
+ int recovery_count, i;
if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
return;
- ar = ath12k_ah_to_ar(ah);
- ab = ar->ab;
+ guard(mutex)(&ah->hw_mutex);
- mutex_lock(&ar->conf_mutex);
+ if (ah->state != ATH12K_HW_STATE_RESTARTED)
+ return;
+
+ ah->state = ATH12K_HW_STATE_ON;
+ ieee80211_wake_queues(hw);
+
+ for_each_ar(ah, ar, i) {
+ mutex_lock(&ar->conf_mutex);
+
+ ab = ar->ab;
- if (ar->state == ATH12K_STATE_RESTARTED) {
ath12k_warn(ar->ab, "pdev %d successfully recovered\n",
ar->pdev->pdev_id);
- ar->state = ATH12K_STATE_ON;
- ieee80211_wake_queues(hw);
if (ab->is_reset) {
recovery_count = atomic_inc_return(&ab->recovery_count);
+
ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n",
recovery_count);
+
/* When there are multiple radios in an SOC,
* the recovery has to be done for each radio
*/
@@ -7339,6 +8306,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
arvif->key_cipher,
arvif->is_up,
arvif->vdev_type);
+
/* After trigger disconnect, then upper layer will
* trigger connect again, then the PN number of
* upper layer will be reset to keep up with AP
@@ -7348,13 +8316,14 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw,
arvif->vdev_type == WMI_VDEV_TYPE_STA &&
arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) {
ieee80211_hw_restart_disconnect(arvif->vif);
+
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"restart disconnect\n");
}
}
- }
- mutex_unlock(&ar->conf_mutex);
+ mutex_unlock(&ar->conf_mutex);
+ }
}
static void
@@ -7392,21 +8361,13 @@ ath12k_mac_update_bss_chan_survey(struct ath12k *ar,
static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
- struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
struct ieee80211_supported_band *sband;
struct survey_info *ar_survey;
- int ret = 0;
if (idx >= ATH12K_NUM_CHANS)
return -ENOENT;
- ar = ath12k_ah_to_ar(ah);
-
- ar_survey = &ar->survey[idx];
-
- mutex_lock(&ar->conf_mutex);
-
sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
if (sband && idx >= sband->n_channels) {
idx -= sband->n_channels;
@@ -7415,12 +8376,30 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (!sband)
sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
- if (!sband || idx >= sband->n_channels) {
- ret = -ENOENT;
- goto exit;
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_6GHZ];
+
+ if (!sband || idx >= sband->n_channels)
+ return -ENOENT;
+
+ ar = ath12k_mac_get_ar_by_chan(hw, &sband->channels[idx]);
+ if (!ar) {
+ if (sband->channels[idx].flags & IEEE80211_CHAN_DISABLED) {
+ memset(survey, 0, sizeof(*survey));
+ return 0;
+ }
+ return -ENOENT;
}
+ ar_survey = &ar->survey[idx];
+
+ mutex_lock(&ar->conf_mutex);
+
ath12k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
spin_lock_bh(&ar->data_lock);
@@ -7432,10 +8411,8 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (ar->rx_channel == survey->channel)
survey->filled |= SURVEY_INFO_IN_USE;
-exit:
mutex_unlock(&ar->conf_mutex);
-
- return ret;
+ return 0;
}
static void ath12k_mac_op_sta_statistics(struct ieee80211_hw *hw,
@@ -7478,7 +8455,7 @@ static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k *ar;
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, 0);
mutex_lock(&ar->conf_mutex);
@@ -7504,12 +8481,68 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw,
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
struct ath12k_wmi_scan_req_arg arg;
- struct ath12k *ar;
+ struct ath12k *ar, *prev_ar;
u32 scan_time_msec;
+ bool create = true;
int ret;
- ar = ath12k_ah_to_ar(ah);
+ if (ah->num_radio == 1) {
+ WARN_ON(!arvif->is_created);
+ ar = ath12k_ah_to_ar(ah, 0);
+ goto scan;
+ }
+
+ ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq);
+ if (!ar)
+ return -EINVAL;
+
+ /* If the vif is already assigned to a specific vdev of an ar,
+ * check whether its already started, vdev which is started
+ * are not allowed to switch to a new radio.
+ * If the vdev is not started, but was earlier created on a
+ * different ar, delete that vdev and create a new one. We don't
+ * delete at the scan stop as an optimization to avoid redundant
+ * delete-create vdev's for the same ar, in case the request is
+ * always on the same band for the vif
+ */
+ if (arvif->is_created) {
+ if (WARN_ON(!arvif->ar))
+ return -EINVAL;
+
+ if (ar != arvif->ar && arvif->is_started)
+ return -EBUSY;
+
+ if (ar != arvif->ar) {
+ /* backup the previously used ar ptr, since the vdev delete
+ * would assign the arvif->ar to NULL after the call
+ */
+ prev_ar = arvif->ar;
+ mutex_lock(&prev_ar->conf_mutex);
+ ret = ath12k_mac_vdev_delete(prev_ar, vif);
+ mutex_unlock(&prev_ar->conf_mutex);
+ if (ret) {
+ ath12k_warn(prev_ar->ab,
+ "unable to delete scan vdev for roc: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ create = false;
+ }
+ }
+
+ if (create) {
+ mutex_lock(&ar->conf_mutex);
+ ret = ath12k_mac_vdev_create(ar, vif);
+ mutex_unlock(&ar->conf_mutex);
+ if (ret) {
+ ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n",
+ ret);
+ return -EINVAL;
+ }
+ }
+scan:
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
@@ -7591,6 +8624,40 @@ exit:
return ret;
}
+static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
+ struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set rekey data vdev %d\n",
+ arvif->vdev_id);
+
+ mutex_lock(&ar->conf_mutex);
+
+ memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN);
+ memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN);
+
+ /* The supplicant works on big-endian, the firmware expects it on
+ * little endian.
+ */
+ rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr);
+
+ arvif->rekey_data.enable_offload = true;
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kck", NULL,
+ rekey_data->kck, NL80211_KCK_LEN);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kek", NULL,
+ rekey_data->kck, NL80211_KEK_LEN);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "replay ctr", NULL,
+ &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr));
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
static const struct ieee80211_ops ath12k_ops = {
.tx = ath12k_mac_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
@@ -7606,6 +8673,7 @@ static const struct ieee80211_ops ath12k_ops = {
.hw_scan = ath12k_mac_op_hw_scan,
.cancel_hw_scan = ath12k_mac_op_cancel_hw_scan,
.set_key = ath12k_mac_op_set_key,
+ .set_rekey_data = ath12k_mac_op_set_rekey_data,
.sta_state = ath12k_mac_op_sta_state,
.sta_set_txpwr = ath12k_mac_op_sta_set_txpwr,
.sta_rc_update = ath12k_mac_op_sta_rc_update,
@@ -7627,6 +8695,12 @@ static const struct ieee80211_ops ath12k_ops = {
.sta_statistics = ath12k_mac_op_sta_statistics,
.remain_on_channel = ath12k_mac_op_remain_on_channel,
.cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel,
+
+#ifdef CONFIG_PM
+ .suspend = ath12k_wow_op_suspend,
+ .resume = ath12k_wow_op_resume,
+ .set_wakeup = ath12k_wow_op_set_wakeup,
+#endif
};
static void ath12k_mac_update_ch_list(struct ath12k *ar,
@@ -7643,6 +8717,9 @@ static void ath12k_mac_update_ch_list(struct ath12k *ar,
band->channels[i].center_freq > freq_high)
band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
}
+
+ ar->freq_low = freq_low;
+ ar->freq_high = freq_high;
}
static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
@@ -7667,6 +8744,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
{
struct ieee80211_supported_band *band;
struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
+ struct ath12k_hw *ah = ar->ah;
void *channels;
u32 phy_id;
@@ -7721,6 +8799,7 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
ath12k_mac_update_ch_list(ar, band,
reg_cap->low_5ghz_chan,
reg_cap->high_5ghz_chan);
+ ah->use_6ghz_regd = true;
}
if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) {
@@ -7757,10 +8836,12 @@ static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah)
{
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
u16 interface_modes = U16_MAX;
- interface_modes &= ar->ab->hw_params->interface_modes;
+ for_each_ar(ah, ar, i)
+ interface_modes &= ar->ab->hw_params->interface_modes;
return interface_modes == U16_MAX ? 0 : interface_modes;
}
@@ -7768,15 +8849,19 @@ static u16 ath12k_mac_get_ifmodes(struct ath12k_hw *ah)
static bool ath12k_mac_is_iface_mode_enable(struct ath12k_hw *ah,
enum nl80211_iftype type)
{
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
u16 interface_modes, mode;
bool is_enable = true;
mode = BIT(type);
-
- interface_modes = ar->ab->hw_params->interface_modes;
- if (!(interface_modes & mode))
- is_enable = false;
+ for_each_ar(ah, ar, i) {
+ interface_modes = ar->ab->hw_params->interface_modes;
+ if (!(interface_modes & mode)) {
+ is_enable = false;
+ break;
+ }
+ }
return is_enable;
}
@@ -7857,19 +8942,23 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
static const u8 ath12k_if_types_ext_capa[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
};
static const u8 ath12k_if_types_ext_capa_sta[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT,
};
static const u8 ath12k_if_types_ext_capa_ap[] = {
[0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING,
+ [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT,
[7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
[9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT,
+ [10] = WLAN_EXT_CAPA11_EMA_SUPPORT,
};
static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
@@ -7906,13 +8995,18 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
{
struct ieee80211_hw *hw = ah->hw;
struct wiphy *wiphy = hw->wiphy;
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar;
+ int i;
- cancel_work_sync(&ar->regd_update_work);
+ for_each_ar(ah, ar, i) {
+ cancel_work_sync(&ar->regd_update_work);
+ ath12k_debugfs_unregister(ar);
+ }
ieee80211_unregister_hw(hw);
- ath12k_mac_cleanup_unregister(ar);
+ for_each_ar(ah, ar, i)
+ ath12k_mac_cleanup_unregister(ar);
kfree(wiphy->iface_combinations[0].limits);
kfree(wiphy->iface_combinations);
@@ -7952,7 +9046,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
{
struct ieee80211_hw *hw = ah->hw;
struct wiphy *wiphy = hw->wiphy;
- struct ath12k *ar = ath12k_ah_to_ar(ah);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
struct ath12k_base *ab = ar->ab;
struct ath12k_pdev *pdev;
struct ath12k_pdev_cap *cap;
@@ -7967,39 +9061,74 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
WLAN_CIPHER_SUITE_GCMP_256,
WLAN_CIPHER_SUITE_CCMP_256,
};
- int ret;
- u32 ht_cap = 0;
+ int ret, i, j;
+ u32 ht_cap = U32_MAX, antennas_rx = 0, antennas_tx = 0;
+ bool is_6ghz = false, is_raw_mode = false, is_monitor_disable = false;
+ u8 *mac_addr = NULL;
+ u8 mbssid_max_interfaces = 0;
- pdev = ar->pdev;
+ wiphy->max_ap_assoc_sta = 0;
- if (ab->pdevs_macaddr_valid)
- ether_addr_copy(ar->mac_addr, pdev->mac_addr);
- else
- ether_addr_copy(ar->mac_addr, ab->mac_addr);
+ for_each_ar(ah, ar, i) {
+ u32 ht_cap_info = 0;
- ret = ath12k_mac_setup_register(ar, &ht_cap, hw->wiphy->bands);
- if (ret)
- goto out;
+ pdev = ar->pdev;
+ if (ar->ab->pdevs_macaddr_valid) {
+ ether_addr_copy(ar->mac_addr, pdev->mac_addr);
+ } else {
+ ether_addr_copy(ar->mac_addr, ar->ab->mac_addr);
+ ar->mac_addr[4] += ar->pdev_idx;
+ }
- wiphy->max_ap_assoc_sta = ar->max_num_stations;
+ ret = ath12k_mac_setup_register(ar, &ht_cap_info, hw->wiphy->bands);
+ if (ret)
+ goto err_cleanup_unregister;
+
+ ht_cap &= ht_cap_info;
+ wiphy->max_ap_assoc_sta += ar->max_num_stations;
+
+ /* Advertise the max antenna support of all radios, driver can handle
+ * per pdev specific antenna setting based on pdev cap when antenna
+ * changes are made
+ */
+ cap = &pdev->cap;
- cap = &pdev->cap;
+ antennas_rx = max_t(u32, antennas_rx, cap->rx_chain_mask);
+ antennas_tx = max_t(u32, antennas_tx, cap->tx_chain_mask);
- wiphy->available_antennas_rx = cap->rx_chain_mask;
- wiphy->available_antennas_tx = cap->tx_chain_mask;
+ if (ar->supports_6ghz)
+ is_6ghz = true;
- SET_IEEE80211_PERM_ADDR(hw, ar->mac_addr);
+ if (test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags))
+ is_raw_mode = true;
+
+ if (!ar->ab->hw_params->supports_monitor)
+ is_monitor_disable = true;
+
+ if (i == 0)
+ mac_addr = ar->mac_addr;
+ else
+ mac_addr = ab->mac_addr;
+
+ mbssid_max_interfaces += TARGET_NUM_VDEVS;
+ }
+
+ wiphy->available_antennas_rx = antennas_rx;
+ wiphy->available_antennas_tx = antennas_tx;
+
+ SET_IEEE80211_PERM_ADDR(hw, mac_addr);
SET_IEEE80211_DEV(hw, ab->dev);
ret = ath12k_mac_setup_iface_combinations(ah);
if (ret) {
ath12k_err(ab, "failed to setup interface combinations: %d\n", ret);
- goto err_cleanup_unregister;
+ goto err_complete_cleanup_unregister;
}
wiphy->interface_modes = ath12k_mac_get_ifmodes(ah);
- if (wiphy->bands[NL80211_BAND_2GHZ] &&
+ if (ah->num_radio == 1 &&
+ wiphy->bands[NL80211_BAND_2GHZ] &&
wiphy->bands[NL80211_BAND_5GHZ] &&
wiphy->bands[NL80211_BAND_6GHZ])
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
@@ -8019,7 +9148,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(hw, REPORTS_LOW_ACK);
- if (ht_cap & WMI_HT_CAP_ENABLED) {
+ if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) {
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
@@ -8034,7 +9163,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
* for each band for a dual band capable radio. It will be tricky to
* handle it when the ht capability different for each band.
*/
- if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS ||
+ (ar->supports_6ghz && ab->hw_params->supports_dynamic_smps_6ghz))
wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -8050,6 +9180,12 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
NL80211_FEATURE_AP_SCAN;
+ /* MLO is not yet supported so disable Wireless Extensions for now
+ * to make sure ath12k users don't use it. This flag can be removed
+ * once WIPHY_FLAG_SUPPORTS_MLO is enabled.
+ */
+ wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+
hw->queues = ATH12K_HW_MAX_QUEUES;
wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
@@ -8057,6 +9193,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
hw->vif_data_size = sizeof(struct ath12k_vif);
hw->sta_data_size = sizeof(struct ath12k_sta);
+ hw->extra_tx_headroom = ab->hw_params->iova_mask;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
@@ -8067,7 +9204,10 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa;
wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa);
- if (ar->supports_6ghz) {
+ wiphy->mbssid_max_interfaces = mbssid_max_interfaces;
+ wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD;
+
+ if (is_6ghz) {
wiphy_ext_feature_set(wiphy,
NL80211_EXT_FEATURE_FILS_DISCOVERY);
wiphy_ext_feature_set(wiphy,
@@ -8078,19 +9218,37 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
ath12k_reg_init(hw);
- if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
+ if (!is_raw_mode) {
hw->netdev_features = NETIF_F_HW_CSUM;
ieee80211_hw_set(hw, SW_CRYPTO_CONTROL);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
}
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
+ wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
+ wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
+ wiphy->max_sched_scan_plan_interval =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
+ wiphy->max_sched_scan_plan_iterations =
+ WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
+ wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
+ }
+
+ ret = ath12k_wow_init(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to init wow: %d\n", ret);
+ goto err_free_if_combs;
+ }
+
ret = ieee80211_register_hw(hw);
if (ret) {
ath12k_err(ab, "ieee80211 registration failed: %d\n", ret);
goto err_free_if_combs;
}
- if (!ab->hw_params->supports_monitor)
+ if (is_monitor_disable)
/* There's a race between calling ieee80211_register_hw()
* and here where the monitor mode is enabled for a little
* while. But that time is so short and in practise it make
@@ -8098,26 +9256,38 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah)
*/
wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
- /* Apply the regd received during initialization */
- ret = ath12k_regd_update(ar, true);
- if (ret) {
- ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
- goto err_unregister_hw;
+ for_each_ar(ah, ar, i) {
+ /* Apply the regd received during initialization */
+ ret = ath12k_regd_update(ar, true);
+ if (ret) {
+ ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret);
+ goto err_unregister_hw;
+ }
+
+ ath12k_debugfs_register(ar);
}
return 0;
err_unregister_hw:
+ for_each_ar(ah, ar, i)
+ ath12k_debugfs_unregister(ar);
+
ieee80211_unregister_hw(hw);
err_free_if_combs:
kfree(wiphy->iface_combinations[0].limits);
kfree(wiphy->iface_combinations);
+err_complete_cleanup_unregister:
+ i = ah->num_radio;
+
err_cleanup_unregister:
- ath12k_mac_cleanup_unregister(ar);
+ for (j = 0; j < i; j++) {
+ ar = ath12k_ah_to_ar(ah, j);
+ ath12k_mac_cleanup_unregister(ar);
+ }
-out:
SET_IEEE80211_DEV(hw, NULL);
return ret;
@@ -8161,7 +9331,6 @@ static void ath12k_mac_setup(struct ath12k *ar)
INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
}
int ath12k_mac_register(struct ath12k_base *ab)
@@ -8238,15 +9407,17 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
ah->hw = hw;
ah->num_radio = num_pdev_map;
+ mutex_init(&ah->hw_mutex);
+
for (i = 0; i < num_pdev_map; i++) {
ab = pdev_map[i].ab;
pdev_idx = pdev_map[i].pdev_idx;
pdev = &ab->pdevs[pdev_idx];
- ar = ath12k_ah_to_ar(ah);
+ ar = ath12k_ah_to_ar(ah, i);
ar->ah = ah;
ar->ab = ab;
- ar->hw_link_id = i;
+ ar->hw_link_id = pdev->hw_link_id;
ar->pdev = pdev;
ar->pdev_idx = pdev_idx;
pdev->ar = ar;
@@ -8324,3 +9495,34 @@ err:
return ret;
}
+
+int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct wmi_sta_keepalive_arg arg = {};
+ struct ath12k *ar = arvif->ar;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ return 0;
+
+ if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map))
+ return 0;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enabled = 1;
+ arg.method = method;
+ arg.interval = interval;
+
+ ret = ath12k_wmi_sta_keepalive(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h
index 3f5e1be0dff9..5efbb6822628 100644
--- a/drivers/net/wireless/ath/ath12k/mac.h
+++ b/drivers/net/wireless/ath/ath12k/mac.h
@@ -9,6 +9,7 @@
#include <net/mac80211.h>
#include <net/cfg80211.h>
+#include "wmi.h"
struct ath12k;
struct ath12k_base;
@@ -78,4 +79,12 @@ enum ath12k_supported_bw ath12k_mac_mac80211_bw_to_ath12k_bw(enum rate_info_bw b
enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher);
int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable);
int ath12k_mac_rfkill_config(struct ath12k *ar);
+int ath12k_mac_wait_tx_complete(struct ath12k *ar);
+void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb);
+void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id);
+int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif,
+ enum wmi_sta_keepalive_method method,
+ u32 interval);
+u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar);
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c
index adb8c3ec1950..df96b0f91f54 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.c
+++ b/drivers/net/wireless/ath/ath12k/mhi.c
@@ -16,37 +16,10 @@
#define MHI_TIMEOUT_DEFAULT_MS 90000
#define OTP_INVALID_BOARD_ID 0xFFFF
#define OTP_VALID_DUALMAC_BOARD_ID_MASK 0x1000
+#define MHI_CB_INVALID 0xff
static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = {
{
- .num = 0,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 1,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 1,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
.num = 20,
.name = "IPCR",
.num_elements = 32,
@@ -112,34 +85,6 @@ const struct mhi_controller_config ath12k_mhi_config_qcn9274 = {
static const struct mhi_channel_config ath12k_mhi_channels_wcn7850[] = {
{
- .num = 0,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 0,
- .dir = DMA_TO_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
- .num = 1,
- .name = "LOOPBACK",
- .num_elements = 32,
- .event_ring = 0,
- .dir = DMA_FROM_DEVICE,
- .ee_mask = 0x4,
- .pollcfg = 0,
- .doorbell = MHI_DB_BRST_DISABLE,
- .lpm_notify = false,
- .offload_channel = false,
- .doorbell_mode_switch = false,
- .auto_queue = false,
- },
- {
.num = 20,
.name = "IPCR",
.num_elements = 64,
@@ -196,7 +141,7 @@ const struct mhi_controller_config ath12k_mhi_config_wcn7850 = {
.max_channels = 128,
.timeout_ms = 2000,
.use_bounce_buf = false,
- .buf_len = 0,
+ .buf_len = 8192,
.num_channels = ARRAY_SIZE(ath12k_mhi_channels_wcn7850),
.ch_cfg = ath12k_mhi_channels_wcn7850,
.num_events = ARRAY_SIZE(ath12k_mhi_events_wcn7850),
@@ -324,6 +269,7 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
enum mhi_callback cb)
{
struct ath12k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev);
+ struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
ath12k_dbg(ab, ATH12K_DBG_BOOT, "mhi notify status reason %s\n",
ath12k_mhi_op_callback_to_str(cb));
@@ -333,12 +279,20 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl,
ath12k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n");
break;
case MHI_CB_EE_RDDM:
+ if (ab_pci->mhi_pre_cb == MHI_CB_EE_RDDM) {
+ ath12k_dbg(ab, ATH12K_DBG_BOOT,
+ "do not queue again for consecutive RDDM event\n");
+ break;
+ }
+
if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags)))
queue_work(ab->workqueue_aux, &ab->reset_work);
break;
default:
break;
}
+
+ ab_pci->mhi_pre_cb = cb;
}
static int ath12k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl,
@@ -369,6 +323,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
if (!mhi_ctrl)
return -ENOMEM;
+ ab_pci->mhi_pre_cb = MHI_CB_INVALID;
ab_pci->mhi_ctrl = mhi_ctrl;
mhi_ctrl->cntrl_dev = ab->dev;
mhi_ctrl->regs = ab->mem;
@@ -385,7 +340,6 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci)
"failed to read board id\n");
} else if (board_id & OTP_VALID_DUALMAC_BOARD_ID_MASK) {
dualmac = true;
- ab->slo_capable = false;
ath12k_dbg(ab, ATH12K_DBG_BOOT,
"dualmac fw selected for board id: %x\n", board_id);
}
@@ -470,6 +424,8 @@ static char *ath12k_mhi_state_to_str(enum ath12k_mhi_state mhi_state)
return "POWER_ON";
case ATH12K_MHI_POWER_OFF:
return "POWER_OFF";
+ case ATH12K_MHI_POWER_OFF_KEEP_DEV:
+ return "POWER_OFF_KEEP_DEV";
case ATH12K_MHI_FORCE_POWER_OFF:
return "FORCE_POWER_OFF";
case ATH12K_MHI_SUSPEND:
@@ -501,6 +457,7 @@ static void ath12k_mhi_set_state_bit(struct ath12k_pci *ab_pci,
set_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state);
break;
case ATH12K_MHI_POWER_OFF:
+ case ATH12K_MHI_POWER_OFF_KEEP_DEV:
case ATH12K_MHI_FORCE_POWER_OFF:
clear_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state);
clear_bit(ATH12K_MHI_TRIGGER_RDDM, &ab_pci->mhi_state);
@@ -544,6 +501,7 @@ static int ath12k_mhi_check_state_bit(struct ath12k_pci *ab_pci,
return 0;
break;
case ATH12K_MHI_POWER_OFF:
+ case ATH12K_MHI_POWER_OFF_KEEP_DEV:
case ATH12K_MHI_SUSPEND:
if (test_bit(ATH12K_MHI_POWER_ON, &ab_pci->mhi_state) &&
!test_bit(ATH12K_MHI_SUSPEND, &ab_pci->mhi_state))
@@ -594,12 +552,27 @@ static int ath12k_mhi_set_state(struct ath12k_pci *ab_pci,
ret = 0;
break;
case ATH12K_MHI_POWER_ON:
- ret = mhi_async_power_up(ab_pci->mhi_ctrl);
+ /* In case of resume, QRTR's resume_early() is called
+ * right after ath12k' resume_early(). Since QRTR requires
+ * MHI mission mode state when preparing IPCR channels
+ * (see ee_mask of that channel), we need to use the 'sync'
+ * version here to make sure MHI is in that state when we
+ * return. Or QRTR might resume before that state comes,
+ * and as a result it fails.
+ *
+ * The 'sync' version works for non-resume (normal power on)
+ * case as well.
+ */
+ ret = mhi_sync_power_up(ab_pci->mhi_ctrl);
break;
case ATH12K_MHI_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, true);
ret = 0;
break;
+ case ATH12K_MHI_POWER_OFF_KEEP_DEV:
+ mhi_power_down_keep_dev(ab_pci->mhi_ctrl, true);
+ ret = 0;
+ break;
case ATH12K_MHI_FORCE_POWER_OFF:
mhi_power_down(ab_pci->mhi_ctrl, false);
ret = 0;
@@ -653,9 +626,17 @@ out:
return ret;
}
-void ath12k_mhi_stop(struct ath12k_pci *ab_pci)
+void ath12k_mhi_stop(struct ath12k_pci *ab_pci, bool is_suspend)
{
- ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF);
+ /* During suspend we need to use mhi_power_down_keep_dev()
+ * workaround, otherwise ath12k_core_resume() will timeout
+ * during resume.
+ */
+ if (is_suspend)
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF_KEEP_DEV);
+ else
+ ath12k_mhi_set_state(ab_pci, ATH12K_MHI_POWER_OFF);
+
ath12k_mhi_set_state(ab_pci, ATH12K_MHI_DEINIT);
}
diff --git a/drivers/net/wireless/ath/ath12k/mhi.h b/drivers/net/wireless/ath/ath12k/mhi.h
index ebc23640ce7a..9362ad1958c3 100644
--- a/drivers/net/wireless/ath/ath12k/mhi.h
+++ b/drivers/net/wireless/ath/ath12k/mhi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: BSD-3-Clause-Clear */
/*
* Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _ATH12K_MHI_H
#define _ATH12K_MHI_H
@@ -22,6 +22,7 @@ enum ath12k_mhi_state {
ATH12K_MHI_DEINIT,
ATH12K_MHI_POWER_ON,
ATH12K_MHI_POWER_OFF,
+ ATH12K_MHI_POWER_OFF_KEEP_DEV,
ATH12K_MHI_FORCE_POWER_OFF,
ATH12K_MHI_SUSPEND,
ATH12K_MHI_RESUME,
@@ -34,7 +35,7 @@ extern const struct mhi_controller_config ath12k_mhi_config_qcn9274;
extern const struct mhi_controller_config ath12k_mhi_config_wcn7850;
int ath12k_mhi_start(struct ath12k_pci *ar_pci);
-void ath12k_mhi_stop(struct ath12k_pci *ar_pci);
+void ath12k_mhi_stop(struct ath12k_pci *ar_pci, bool is_suspend);
int ath12k_mhi_register(struct ath12k_pci *ar_pci);
void ath12k_mhi_unregister(struct ath12k_pci *ar_pci);
void ath12k_mhi_set_mhictrl_reset(struct ath12k_base *ab);
diff --git a/drivers/net/wireless/ath/ath12k/p2p.c b/drivers/net/wireless/ath/ath12k/p2p.c
index d334df720032..3a851ee15b2f 100644
--- a/drivers/net/wireless/ath/ath12k/p2p.c
+++ b/drivers/net/wireless/ath/ath12k/p2p.c
@@ -121,7 +121,7 @@ static void ath12k_p2p_noa_update_vdev_iter(void *data, u8 *mac,
struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif);
struct ath12k_p2p_noa_arg *arg = data;
- if (arvif->vdev_id != arg->vdev_id)
+ if (arvif->ar != arg->ar || arvif->vdev_id != arg->vdev_id)
return;
ath12k_p2p_noa_update(arvif, arg->noa);
@@ -132,6 +132,7 @@ void ath12k_p2p_noa_update_by_vdev_id(struct ath12k *ar, u32 vdev_id,
{
struct ath12k_p2p_noa_arg arg = {
.vdev_id = vdev_id,
+ .ar = ar,
.noa = noa,
};
diff --git a/drivers/net/wireless/ath/ath12k/p2p.h b/drivers/net/wireless/ath/ath12k/p2p.h
index 5768139a7844..b2eec51a9900 100644
--- a/drivers/net/wireless/ath/ath12k/p2p.h
+++ b/drivers/net/wireless/ath/ath12k/p2p.h
@@ -12,6 +12,7 @@ struct ath12k_wmi_p2p_noa_info;
struct ath12k_p2p_noa_arg {
u32 vdev_id;
+ struct ath12k *ar;
const struct ath12k_wmi_p2p_noa_info *noa;
};
diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c
index 14954bc05144..9e0b9e329bda 100644
--- a/drivers/net/wireless/ath/ath12k/pci.c
+++ b/drivers/net/wireless/ath/ath12k/pci.c
@@ -350,6 +350,7 @@ static void ath12k_pci_free_ext_irq(struct ath12k_base *ab)
free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
netif_napi_del(&irq_grp->napi);
+ free_netdev(irq_grp->napi_ndev);
}
}
@@ -472,7 +473,8 @@ static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
{
int i;
- clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
+ if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+ return;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
@@ -560,8 +562,9 @@ static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
- int i, j, ret, num_vectors = 0;
+ int i, j, n, ret, num_vectors = 0;
u32 user_base_data = 0, base_vector = 0, base_idx;
+ struct ath12k_ext_irq_grp *irq_grp;
base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
ret = ath12k_pci_get_user_msi_assignment(ab, "DP",
@@ -572,13 +575,18 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
return ret;
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
- struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
+ irq_grp = &ab->ext_irq_grp[i];
u32 num_irq = 0;
irq_grp->ab = ab;
irq_grp->grp_id = i;
- init_dummy_netdev(&irq_grp->napi_ndev);
- netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
+ irq_grp->napi_ndev = alloc_netdev_dummy(0);
+ if (!irq_grp->napi_ndev) {
+ ret = -ENOMEM;
+ goto fail_allocate;
+ }
+
+ netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
ath12k_pci_ext_grp_napi_poll);
if (ab->hw_params->ring_mask->tx[i] ||
@@ -611,13 +619,23 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
if (ret) {
ath12k_err(ab, "failed request irq %d: %d\n",
vector, ret);
- return ret;
+ goto fail_request;
}
}
ath12k_pci_ext_grp_disable(irq_grp);
}
return 0;
+
+fail_request:
+ /* i ->napi_ndev was properly allocated. Free it also */
+ i += 1;
+fail_allocate:
+ for (n = 0; n < i; n++) {
+ irq_grp = &ab->ext_irq_grp[n];
+ free_netdev(irq_grp->napi_ndev);
+ }
+ return ret;
}
static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci,
@@ -872,7 +890,7 @@ static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
goto release_region;
}
- ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
+ ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%p\n", ab->mem);
return 0;
release_region:
@@ -1090,14 +1108,14 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
{
int i;
- set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
-
for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
napi_enable(&irq_grp->napi);
ath12k_pci_ext_grp_enable(irq_grp);
}
+
+ set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
}
void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
@@ -1271,7 +1289,7 @@ int ath12k_pci_power_up(struct ath12k_base *ab)
return 0;
}
-void ath12k_pci_power_down(struct ath12k_base *ab)
+void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend)
{
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
@@ -1280,11 +1298,18 @@ void ath12k_pci_power_down(struct ath12k_base *ab)
ath12k_pci_force_wake(ab_pci->ab);
ath12k_pci_msi_disable(ab_pci);
- ath12k_mhi_stop(ab_pci);
+ ath12k_mhi_stop(ab_pci, is_suspend);
clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath12k_pci_sw_reset(ab_pci->ab, false);
}
+static int ath12k_pci_panic_handler(struct ath12k_base *ab)
+{
+ ath12k_pci_sw_reset(ab, false);
+
+ return NOTIFY_OK;
+}
+
static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
.start = ath12k_pci_start,
.stop = ath12k_pci_stop,
@@ -1302,6 +1327,7 @@ static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
.ce_irq_enable = ath12k_pci_hif_ce_irq_enable,
.ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
.get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
+ .panic_handler = ath12k_pci_panic_handler,
};
static
@@ -1503,7 +1529,7 @@ static void ath12k_pci_remove(struct pci_dev *pdev)
ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
- ath12k_pci_power_down(ab);
+ ath12k_pci_power_down(ab, false);
ath12k_qmi_deinit_service(ab);
goto qmi_fail;
}
@@ -1531,7 +1557,7 @@ static void ath12k_pci_shutdown(struct pci_dev *pdev)
struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
- ath12k_pci_power_down(ab);
+ ath12k_pci_power_down(ab, false);
}
static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
@@ -1558,9 +1584,36 @@ static __maybe_unused int ath12k_pci_pm_resume(struct device *dev)
return ret;
}
-static SIMPLE_DEV_PM_OPS(ath12k_pci_pm_ops,
- ath12k_pci_pm_suspend,
- ath12k_pci_pm_resume);
+static __maybe_unused int ath12k_pci_pm_suspend_late(struct device *dev)
+{
+ struct ath12k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath12k_core_suspend_late(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to late suspend core: %d\n", ret);
+
+ return ret;
+}
+
+static __maybe_unused int ath12k_pci_pm_resume_early(struct device *dev)
+{
+ struct ath12k_base *ab = dev_get_drvdata(dev);
+ int ret;
+
+ ret = ath12k_core_resume_early(ab);
+ if (ret)
+ ath12k_warn(ab, "failed to early resume core: %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops __maybe_unused ath12k_pci_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend,
+ ath12k_pci_pm_resume)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend_late,
+ ath12k_pci_pm_resume_early)
+};
static struct pci_driver ath12k_pci_driver = {
.name = "ath12k_pci",
diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h
index ca93693ba4e9..31584a7ad80e 100644
--- a/drivers/net/wireless/ath/ath12k/pci.h
+++ b/drivers/net/wireless/ath/ath12k/pci.h
@@ -104,6 +104,7 @@ struct ath12k_pci {
struct mhi_controller *mhi_ctrl;
const struct ath12k_msi_config *msi_config;
unsigned long mhi_state;
+ enum mhi_callback mhi_pre_cb;
u32 register_window;
/* protects register_window above */
@@ -143,5 +144,5 @@ int ath12k_pci_hif_resume(struct ath12k_base *ab);
void ath12k_pci_stop(struct ath12k_base *ab);
int ath12k_pci_start(struct ath12k_base *ab);
int ath12k_pci_power_up(struct ath12k_base *ab);
-void ath12k_pci_power_down(struct ath12k_base *ab);
+void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend);
#endif /* ATH12K_PCI_H */
diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c
index 92845ffff44a..b93ce9f87f61 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.c
+++ b/drivers/net/wireless/ath/ath12k/qmi.c
@@ -583,6 +583,24 @@ static const struct qmi_elem_info qmi_wlanfw_phy_cap_resp_msg_v01_ei[] = {
board_id),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ single_chip_mlo_support_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_wlanfw_phy_cap_resp_msg_v01,
+ single_chip_mlo_support),
+ },
+ {
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -2005,7 +2023,15 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
u8 hw_link_id = 0;
int i;
+ if (!(ab->mlo_capable_flags & ATH12K_INTRA_DEVICE_MLO_SUPPORT)) {
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "intra device MLO is disabled hence skip QMI MLO cap");
+ return;
+ }
+
if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
+ ab->mlo_capable_flags = 0;
+
ath12k_dbg(ab, ATH12K_DBG_QMI,
"skip QMI MLO cap due to invalid num_radio %d\n",
ab->qmi.num_radios);
@@ -2015,7 +2041,7 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
req->mlo_capable_valid = 1;
req->mlo_capable = 1;
req->mlo_chip_id_valid = 1;
- req->mlo_chip_id = 0;
+ req->mlo_chip_id = ab->device_id;
req->mlo_group_id_valid = 1;
req->mlo_group_id = 0;
req->max_mlo_peer_valid = 1;
@@ -2027,7 +2053,7 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
req->mlo_num_chips = 1;
info = &req->mlo_chip_info[0];
- info->chip_id = 0;
+ info->chip_id = ab->device_id;
info->num_local_links = ab->qmi.num_radios;
for (i = 0; i < info->num_local_links; i++) {
@@ -2124,9 +2150,6 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
struct qmi_txn txn;
int ret;
- if (!ab->slo_capable)
- goto out;
-
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_phy_cap_resp_msg_v01_ei, &resp);
if (ret < 0)
@@ -2151,6 +2174,13 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
goto out;
}
+ if (resp.single_chip_mlo_support_valid) {
+ if (resp.single_chip_mlo_support)
+ ab->mlo_capable_flags |= ATH12K_INTRA_DEVICE_MLO_SUPPORT;
+ else
+ ab->mlo_capable_flags &= ~ATH12K_INTRA_DEVICE_MLO_SUPPORT;
+ }
+
if (!resp.num_phy_valid) {
ret = -ENODATA;
goto out;
@@ -2158,9 +2188,11 @@ static void ath12k_qmi_phy_cap_send(struct ath12k_base *ab)
ab->qmi.num_radios = resp.num_phy;
- ath12k_dbg(ab, ATH12K_DBG_QMI, "phy capability resp valid %d num_phy %d valid %d board_id %d\n",
+ ath12k_dbg(ab, ATH12K_DBG_QMI,
+ "phy capability resp valid %d num_phy %d valid %d board_id %d valid %d single_chip_mlo_support %d\n",
resp.num_phy_valid, resp.num_phy,
- resp.board_id_valid, resp.board_id);
+ resp.board_id_valid, resp.board_id,
+ resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support);
return;
@@ -2325,8 +2357,9 @@ static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
if (!ab->qmi.target_mem[i].v.addr)
continue;
+
dma_free_coherent(ab->dev,
- ab->qmi.target_mem[i].size,
+ ab->qmi.target_mem[i].prev_size,
ab->qmi.target_mem[i].v.addr,
ab->qmi.target_mem[i].paddr);
ab->qmi.target_mem[i].v.addr = NULL;
@@ -2352,6 +2385,20 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
case M3_DUMP_REGION_TYPE:
case PAGEABLE_MEM_REGION_TYPE:
case CALDB_MEM_REGION_TYPE:
+ /* Firmware reloads in recovery/resume.
+ * In such cases, no need to allocate memory for FW again.
+ */
+ if (chunk->v.addr) {
+ if (chunk->prev_type == chunk->type &&
+ chunk->prev_size == chunk->size)
+ goto this_chunk_done;
+
+ /* cannot reuse the existing chunk */
+ dma_free_coherent(ab->dev, chunk->prev_size,
+ chunk->v.addr, chunk->paddr);
+ chunk->v.addr = NULL;
+ }
+
chunk->v.addr = dma_alloc_coherent(ab->dev,
chunk->size,
&chunk->paddr,
@@ -2370,6 +2417,10 @@ static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
chunk->type, chunk->size);
return -ENOMEM;
}
+
+ chunk->prev_type = chunk->type;
+ chunk->prev_size = chunk->size;
+this_chunk_done:
break;
default:
ath12k_warn(ab, "memory type %u not supported\n",
@@ -2452,7 +2503,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
ab->qmi.dev_mem[i].size =
resp.dev_mem[i].size;
ath12k_dbg(ab, ATH12K_DBG_QMI,
- "devmem [%d] start ox%llx size %llu\n", i,
+ "devmem [%d] start 0x%llx size %llu\n", i,
ab->qmi.dev_mem[i].start,
ab->qmi.dev_mem[i].size);
}
@@ -2487,7 +2538,7 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab,
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp = {};
struct qmi_txn txn;
const u8 *temp = data;
- int ret;
+ int ret = 0;
u32 remaining = len;
req = kzalloc(sizeof(*req), GFP_KERNEL);
@@ -2666,6 +2717,19 @@ out:
return ret;
}
+static void ath12k_qmi_m3_free(struct ath12k_base *ab)
+{
+ struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
+
+ if (!m3_mem->vaddr)
+ return;
+
+ dma_free_coherent(ab->dev, m3_mem->size,
+ m3_mem->vaddr, m3_mem->paddr);
+ m3_mem->vaddr = NULL;
+ m3_mem->size = 0;
+}
+
static int ath12k_qmi_m3_load(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
@@ -2675,10 +2739,6 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
size_t m3_len;
int ret;
- if (m3_mem->vaddr)
- /* m3 firmware buffer is already available in the DMA buffer */
- return 0;
-
if (ab->fw.m3_data && ab->fw.m3_len > 0) {
/* firmware-N.bin had a m3 firmware file so use that */
m3_data = ab->fw.m3_data;
@@ -2700,6 +2760,15 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
m3_len = fw->size;
}
+ /* In recovery/resume cases, M3 buffer is not freed, try to reuse that */
+ if (m3_mem->vaddr) {
+ if (m3_mem->size >= m3_len)
+ goto skip_m3_alloc;
+
+ /* Old buffer is too small, free and reallocate */
+ ath12k_qmi_m3_free(ab);
+ }
+
m3_mem->vaddr = dma_alloc_coherent(ab->dev,
m3_len, &m3_mem->paddr,
GFP_KERNEL);
@@ -2710,6 +2779,7 @@ static int ath12k_qmi_m3_load(struct ath12k_base *ab)
goto out;
}
+skip_m3_alloc:
memcpy(m3_mem->vaddr, m3_data, m3_len);
m3_mem->size = m3_len;
@@ -2721,19 +2791,6 @@ out:
return ret;
}
-static void ath12k_qmi_m3_free(struct ath12k_base *ab)
-{
- struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
-
- if (!m3_mem->vaddr)
- return;
-
- dma_free_coherent(ab->dev, m3_mem->size,
- m3_mem->vaddr, m3_mem->paddr);
- m3_mem->vaddr = NULL;
- m3_mem->size = 0;
-}
-
static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
{
struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
@@ -3178,6 +3235,9 @@ static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = {
.decoded_size = sizeof(struct qmi_wlanfw_fw_ready_ind_msg_v01),
.fn = ath12k_qmi_msg_fw_ready_cb,
},
+
+ /* end of list */
+ {},
};
static int ath12k_qmi_ops_new_server(struct qmi_handle *qmi_hdl,
@@ -3261,7 +3321,8 @@ static void ath12k_qmi_driver_event_work(struct work_struct *work)
case ATH12K_QMI_EVENT_FW_READY:
clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
- ath12k_hal_dump_srng_stats(ab);
+ if (ab->is_reset)
+ ath12k_hal_dump_srng_stats(ab);
queue_work(ab->workqueue, &ab->restart_work);
break;
}
diff --git a/drivers/net/wireless/ath/ath12k/qmi.h b/drivers/net/wireless/ath/ath12k/qmi.h
index 6ee33c9851c6..0dfcbd8cb59b 100644
--- a/drivers/net/wireless/ath/ath12k/qmi.h
+++ b/drivers/net/wireless/ath/ath12k/qmi.h
@@ -96,6 +96,8 @@ struct ath12k_qmi_event_msg {
struct target_mem_chunk {
u32 size;
u32 type;
+ u32 prev_size;
+ u32 prev_type;
dma_addr_t paddr;
union {
void __iomem *ioaddr;
@@ -265,6 +267,8 @@ struct qmi_wlanfw_phy_cap_resp_msg_v01 {
u8 num_phy;
u8 board_id_valid;
u32 board_id;
+ u8 single_chip_mlo_support_valid;
+ u8 single_chip_mlo_support;
};
#define QMI_WLANFW_IND_REGISTER_REQ_MSG_V01_MAX_LEN 54
diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c
index f308e9a6ed55..439d61f284d8 100644
--- a/drivers/net/wireless/ath/ath12k/reg.c
+++ b/drivers/net/wireless/ath/ath12k/reg.c
@@ -49,8 +49,8 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath12k_wmi_init_country_arg arg;
struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
- struct ath12k *ar = ath12k_ah_to_ar(ah);
- int ret;
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+ int ret, i;
ath12k_dbg(ar->ab, ATH12K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
@@ -85,10 +85,16 @@ ath12k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
memcpy(&arg.cc_info.alpha2, request->alpha2, 2);
arg.cc_info.alpha2[2] = 0;
- ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
- if (ret)
- ath12k_warn(ar->ab,
- "INIT Country code set to fw failed : %d\n", ret);
+ /* Allow fresh updates to wiphy regd */
+ ah->regd_updated = false;
+
+ /* Send the reg change request to all the radios */
+ for_each_ar(ah, ar, i) {
+ ret = ath12k_wmi_send_init_country_cmd(ar, &arg);
+ if (ret)
+ ath12k_warn(ar->ab,
+ "INIT Country code set to fw failed : %d\n", ret);
+ }
}
int ath12k_reg_update_chan_list(struct ath12k *ar)
@@ -200,12 +206,34 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig,
int ath12k_regd_update(struct ath12k *ar, bool init)
{
- struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+ struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+ struct ieee80211_hw *hw = ah->hw;
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath12k_base *ab;
+ int i;
ab = ar->ab;
+
+ /* If one of the radios within ah has already updated the regd for
+ * the wiphy, then avoid setting regd again
+ */
+ if (ah->regd_updated)
+ return 0;
+
+ /* firmware provides reg rules which are similar for 2 GHz and 5 GHz
+ * pdev but 6 GHz pdev has superset of all rules including rules for
+ * all bands, we prefer 6 GHz pdev's rules to be used for setup of
+ * the wiphy regd.
+ * If 6 GHz pdev was part of the ath12k_hw, wait for the 6 GHz pdev,
+ * else pick the first pdev which calls this function and use its
+ * regd to update global hw regd.
+ * The regd_updated flag set at the end will not allow any further
+ * updates.
+ */
+ if (ah->use_6ghz_regd && !ar->supports_6ghz)
+ return 0;
+
pdev_id = ar->pdev_idx;
spin_lock_bh(&ab->base_lock);
@@ -258,12 +286,20 @@ int ath12k_regd_update(struct ath12k *ar, bool init)
if (ret)
goto err;
- if (ar->state == ATH12K_STATE_ON) {
+ if (ah->state != ATH12K_HW_STATE_ON)
+ goto skip;
+
+ ah->regd_updated = true;
+ /* Apply the new regd to all the radios, this is expected to be received only once
+ * since we check for ah->regd_updated and allow here only once.
+ */
+ for_each_ar(ah, ar, i) {
+ ab = ar->ab;
ret = ath12k_reg_update_chan_list(ar);
if (ret)
goto err;
}
-
+skip:
return 0;
err:
ath12k_warn(ab, "failed to perform regd update : %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath12k/trace.h b/drivers/net/wireless/ath/ath12k/trace.h
index 240737e1542d..253c67accb0e 100644
--- a/drivers/net/wireless/ath/ath12k/trace.h
+++ b/drivers/net/wireless/ath/ath12k/trace.h
@@ -36,8 +36,8 @@ TRACE_EVENT(ath12k_htt_pktlog,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->ab->dev));
- __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->buf_len = buf_len;
__entry->pktlog_checksum = pktlog_checksum;
memcpy(__get_dynamic_array(pktlog), buf, buf_len);
@@ -73,8 +73,8 @@ TRACE_EVENT(ath12k_htt_ppdu_stats,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->ab->dev));
- __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->len = len;
__entry->info = ar->pdev->timestamp.info;
__entry->sync_tstmp_lo_us = ar->pdev->timestamp.sync_timestamp_hi_us;
@@ -117,8 +117,8 @@ TRACE_EVENT(ath12k_htt_rxdesc,
),
TP_fast_assign(
- __assign_str(device, dev_name(ar->ab->dev));
- __assign_str(driver, dev_driver_string(ar->ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->len = len;
__entry->type = type;
__entry->info = ar->pdev->timestamp.info;
@@ -153,8 +153,8 @@ TRACE_EVENT(ath12k_wmi_diag,
),
TP_fast_assign(
- __assign_str(device, dev_name(ab->dev));
- __assign_str(driver, dev_driver_string(ab->dev));
+ __assign_str(device);
+ __assign_str(driver);
__entry->len = len;
memcpy(__get_dynamic_array(data), data, len);
),
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 9d69a1769926..9f6be557365e 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -228,9 +228,12 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
config->peer_map_unmap_version = 0x32;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
+ config->ema_max_vap_cnt = ab->num_radios;
+ config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD;
+ config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt;
if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map))
- config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B;
+ config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B;
}
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -497,6 +500,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle,
mac_caps = wmi_mac_phy_caps + phy_idx;
pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps);
+ pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps);
pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands);
pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density);
@@ -841,6 +845,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
cmd->vdev_subtype = cpu_to_le32(args->subtype);
cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX);
cmd->pdev_id = cpu_to_le32(args->pdev_id);
+ cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags);
+ cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id);
cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id);
ether_addr_copy(cmd->vdev_macaddr.addr, macaddr);
@@ -858,20 +864,20 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
len = sizeof(*txrx_streams);
txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
len);
- txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_2G;
+ txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_2G);
txrx_streams->supported_tx_streams =
- args->chains[NL80211_BAND_2GHZ].tx;
+ cpu_to_le32(args->chains[NL80211_BAND_2GHZ].tx);
txrx_streams->supported_rx_streams =
- args->chains[NL80211_BAND_2GHZ].rx;
+ cpu_to_le32(args->chains[NL80211_BAND_2GHZ].rx);
txrx_streams++;
txrx_streams->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_TXRX_STREAMS,
len);
- txrx_streams->band = WMI_TPC_CHAINMASK_CONFIG_BAND_5G;
+ txrx_streams->band = cpu_to_le32(WMI_TPC_CHAINMASK_CONFIG_BAND_5G);
txrx_streams->supported_tx_streams =
- args->chains[NL80211_BAND_5GHZ].tx;
+ cpu_to_le32(args->chains[NL80211_BAND_5GHZ].tx);
txrx_streams->supported_rx_streams =
- args->chains[NL80211_BAND_5GHZ].rx;
+ cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
@@ -1046,6 +1052,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
cmd->he_ops = cpu_to_le32(arg->he_ops);
cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap);
cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags);
+ cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id);
if (!restart) {
if (arg->ssid) {
@@ -1097,7 +1104,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
return ret;
}
-int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
+int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_vdev_up_cmd *cmd;
@@ -1112,14 +1119,20 @@ int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD,
sizeof(*cmd));
- cmd->vdev_id = cpu_to_le32(vdev_id);
- cmd->vdev_assoc_id = cpu_to_le32(aid);
+ cmd->vdev_id = cpu_to_le32(params->vdev_id);
+ cmd->vdev_assoc_id = cpu_to_le32(params->aid);
+
+ ether_addr_copy(cmd->vdev_bssid.addr, params->bssid);
- ether_addr_copy(cmd->vdev_bssid.addr, bssid);
+ if (params->tx_bssid) {
+ ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid);
+ cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx);
+ cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt);
+ }
ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
"WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
- vdev_id, aid, bssid);
+ params->vdev_id, params->aid, params->bssid);
ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID);
if (ret) {
@@ -1776,13 +1789,15 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
- struct sk_buff *bcn)
+ struct sk_buff *bcn,
+ struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
{
struct ath12k_wmi_pdev *wmi = ar->wmi;
struct wmi_bcn_tmpl_cmd *cmd;
struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
struct wmi_tlv *tlv;
struct sk_buff *skb;
+ u32 ema_params = 0;
void *ptr;
int ret, len;
size_t aligned_len = roundup(bcn->len, 4);
@@ -1801,6 +1816,16 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
cmd->buf_len = cpu_to_le32(bcn->len);
+ cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
+ if (ema_args) {
+ u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT);
+ u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX);
+ if (ema_args->bcn_index == 0)
+ u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST);
+ if (ema_args->bcn_index + 1 == ema_args->bcn_cnt)
+ u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
+ cmd->ema_params = cpu_to_le32(ema_params);
+ }
ptr = skb->data + sizeof(*cmd);
@@ -1900,7 +1925,7 @@ static void ath12k_wmi_copy_peer_flags(struct wmi_peer_assoc_complete_cmd *cmd,
if (arg->bw_160)
cmd->peer_flags |= cpu_to_le32(WMI_PEER_160MHZ);
if (arg->bw_320)
- cmd->peer_flags |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
+ cmd->peer_flags_ext |= cpu_to_le32(WMI_PEER_EXT_320MHZ);
/* Typically if STBC is enabled for VHT it should be enabled
* for HT as well
@@ -2723,6 +2748,149 @@ int ath12k_wmi_send_dfs_phyerr_offload_enable_cmd(struct ath12k *ar,
return ret;
}
+int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
+ const u8 *buf, size_t buf_len)
+{
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_pdev_set_bios_interface_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ u8 *ptr;
+ u32 len, len_aligned;
+ int ret;
+
+ len_aligned = roundup(buf_len, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_bios_interface_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
+ cmd->param_type_id = cpu_to_le32(param_id);
+ cmd->length = cpu_to_le32(buf_len);
+
+ ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, len_aligned);
+ ptr += TLV_HDR_SIZE;
+ memcpy(ptr, buf, buf_len);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
+ skb,
+ WMI_PDEV_SET_BIOS_INTERFACE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID parameter id %d: %d\n",
+ param_id, ret);
+ dev_kfree_skb(skb);
+ }
+
+ return 0;
+}
+
+int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table)
+{
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_pdev_set_bios_sar_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret;
+ u8 *buf_ptr;
+ u32 len, sar_table_len_aligned, sar_dbs_backoff_len_aligned;
+ const u8 *psar_value = psar_table + ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET;
+ const u8 *pdbs_value = psar_table + ATH12K_ACPI_DBS_BACKOFF_DATA_OFFSET;
+
+ sar_table_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_TABLE_LEN, sizeof(u32));
+ sar_dbs_backoff_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN,
+ sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sar_table_len_aligned +
+ TLV_HDR_SIZE + sar_dbs_backoff_len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_bios_sar_table_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
+ cmd->sar_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
+ cmd->dbs_backoff_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
+ sar_table_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, psar_value, ATH12K_ACPI_BIOS_SAR_TABLE_LEN);
+
+ buf_ptr += sar_table_len_aligned;
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE,
+ sar_dbs_backoff_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, pdbs_value, ATH12K_ACPI_BIOS_SAR_DBS_BACKOFF_LEN);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
+ skb,
+ WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_PDEV_SET_BIOS_INTERFACE_CMDID %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
+int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table)
+{
+ struct ath12k_wmi_base *wmi_ab = &ab->wmi_ab;
+ struct wmi_pdev_set_bios_geo_table_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ int ret;
+ u8 *buf_ptr;
+ u32 len, sar_geo_len_aligned;
+ const u8 *pgeo_value = pgeo_table + ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET;
+
+ sar_geo_len_aligned = roundup(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN, sizeof(u32));
+ len = sizeof(*cmd) + TLV_HDR_SIZE + sar_geo_len_aligned;
+
+ skb = ath12k_wmi_alloc_skb(wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_pdev_set_bios_geo_table_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD,
+ sizeof(*cmd));
+ cmd->pdev_id = cpu_to_le32(WMI_PDEV_ID_SOC);
+ cmd->geo_len = cpu_to_le32(ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
+
+ buf_ptr = skb->data + sizeof(*cmd);
+ tlv = (struct wmi_tlv *)buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, sar_geo_len_aligned);
+ buf_ptr += TLV_HDR_SIZE;
+ memcpy(buf_ptr, pgeo_value, ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN);
+
+ ret = ath12k_wmi_cmd_send(&wmi_ab->wmi[0],
+ skb,
+ WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID);
+ if (ret) {
+ ath12k_warn(ab,
+ "failed to send WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID %d\n",
+ ret);
+ dev_kfree_skb(skb);
+ }
+
+ return ret;
+}
+
int ath12k_wmi_delba_send(struct ath12k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason)
{
@@ -3324,16 +3492,19 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf
wmi_cfg->bpf_instruction_size = cpu_to_le32(tg_cfg->bpf_instruction_size);
wmi_cfg->max_bssid_rx_filters = cpu_to_le32(tg_cfg->max_bssid_rx_filters);
wmi_cfg->use_pdev_id = cpu_to_le32(tg_cfg->use_pdev_id);
- wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config);
+ wmi_cfg->flag1 = cpu_to_le32(tg_cfg->atf_config |
+ WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64);
wmi_cfg->peer_map_unmap_version = cpu_to_le32(tg_cfg->peer_map_unmap_version);
wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params);
wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count);
wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count);
- wmi_cfg->flags2 = le32_encode_bits(tg_cfg->dp_peer_meta_data_ver,
+ wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver,
WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION);
-
wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported <<
WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT);
+ wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt);
+ wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period);
+ wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET);
}
static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi,
@@ -3546,6 +3717,7 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab)
arg.res_cfg.is_reg_cc_ext_event_supported = true;
ab->hw_params->wmi_init(ab, &arg.res_cfg);
+ ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode;
arg.num_mem_chunks = wmi_ab->num_mem_chunks;
arg.hw_mode_id = wmi_ab->preferred_hw_mode;
@@ -3557,6 +3729,8 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab)
arg.num_band_to_mac = ab->num_radios;
ath12k_fill_band_to_mac_param(ab, arg.band_to_mac);
+ ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver;
+
return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg);
}
@@ -3664,7 +3838,7 @@ int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar,
cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ,
sizeof(*cmd));
- cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id));
+ cmd->pdev_id = cpu_to_le32(arg->pdev_id);
cmd->module_id = cpu_to_le32(arg->module_id);
cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo);
cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi);
@@ -4041,6 +4215,7 @@ static void ath12k_wmi_free_dbring_caps(struct ath12k_base *ab)
{
kfree(ab->db_caps);
ab->db_caps = NULL;
+ ab->num_db_cap = 0;
}
static int ath12k_wmi_dma_ring_caps(struct ath12k_base *ab,
@@ -5548,7 +5723,7 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk
* event. Otherwise, it goes to fallback.
*/
if (ab->hw_params->single_pdev_only &&
- pdev_idx < ab->hw_params->num_rxmda_per_pdev)
+ pdev_idx < ab->hw_params->num_rxdma_per_pdev)
goto mem_free;
else
goto fallback;
@@ -5877,8 +6052,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) {
+ if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
+ rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
status->band = NL80211_BAND_6GHZ;
+ status->freq = rx_ev.chan_freq;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
@@ -5899,8 +6076,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
sband = &ar->mac.sbands[status->band];
- status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
- status->band);
+ if (status->band != NL80211_BAND_6GHZ)
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+
status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR;
status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
@@ -5927,13 +6106,11 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb)
}
}
- /* TODO: Pending handle beacon implementation
- *if (ieee80211_is_beacon(hdr->frame_control))
- * ath12k_mac_handle_beacon(ar, skb);
- */
+ if (ieee80211_is_beacon(hdr->frame_control))
+ ath12k_mac_handle_beacon(ar, skb);
ath12k_dbg(ab, ATH12K_DBG_MGMT,
- "event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
+ "event mgmt rx skb %p len %d ftype %02x stype %02x\n",
skb, skb->len,
fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE);
@@ -6137,42 +6314,44 @@ static void ath12k_roam_event(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_roam_event roam_ev = {};
struct ath12k *ar;
+ u32 vdev_id;
+ u8 roam_reason;
if (ath12k_pull_roam_ev(ab, skb, &roam_ev) != 0) {
ath12k_warn(ab, "failed to extract roam event");
return;
}
+ vdev_id = le32_to_cpu(roam_ev.vdev_id);
+ roam_reason = u32_get_bits(le32_to_cpu(roam_ev.reason),
+ WMI_ROAM_REASON_MASK);
+
ath12k_dbg(ab, ATH12K_DBG_WMI,
- "wmi roam event vdev %u reason 0x%08x rssi %d\n",
- roam_ev.vdev_id, roam_ev.reason, roam_ev.rssi);
+ "wmi roam event vdev %u reason %d rssi %d\n",
+ vdev_id, roam_reason, roam_ev.rssi);
rcu_read_lock();
- ar = ath12k_mac_get_ar_by_vdev_id(ab, le32_to_cpu(roam_ev.vdev_id));
+ ar = ath12k_mac_get_ar_by_vdev_id(ab, vdev_id);
if (!ar) {
- ath12k_warn(ab, "invalid vdev id in roam ev %d",
- roam_ev.vdev_id);
+ ath12k_warn(ab, "invalid vdev id in roam ev %d", vdev_id);
rcu_read_unlock();
return;
}
- if (le32_to_cpu(roam_ev.reason) >= WMI_ROAM_REASON_MAX)
+ if (roam_reason >= WMI_ROAM_REASON_MAX)
ath12k_warn(ab, "ignoring unknown roam event reason %d on vdev %i\n",
- roam_ev.reason, roam_ev.vdev_id);
+ roam_reason, vdev_id);
- switch (le32_to_cpu(roam_ev.reason)) {
+ switch (roam_reason) {
case WMI_ROAM_REASON_BEACON_MISS:
- /* TODO: Pending beacon miss and connection_loss_work
- * implementation
- * ath12k_mac_handle_beacon_miss(ar, vdev_id);
- */
+ ath12k_mac_handle_beacon_miss(ar, vdev_id);
break;
case WMI_ROAM_REASON_BETTER_AP:
case WMI_ROAM_REASON_LOW_RSSI:
case WMI_ROAM_REASON_SUITABLE_AP_FOUND:
case WMI_ROAM_REASON_HO_FAILED:
ath12k_warn(ab, "ignoring not implemented roam event reason %d on vdev %i\n",
- roam_ev.reason, roam_ev.vdev_id);
+ roam_reason, vdev_id);
break;
}
@@ -6854,6 +7033,116 @@ exit:
kfree(tb);
}
+static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab,
+ u16 tag, u16 len,
+ const void *ptr, void *data)
+{
+ const struct wmi_wow_ev_pg_fault_param *pf_param;
+ const struct wmi_wow_ev_param *param;
+ struct wmi_wow_ev_arg *arg = data;
+ int pf_len;
+
+ switch (tag) {
+ case WMI_TAG_WOW_EVENT_INFO:
+ param = ptr;
+ arg->wake_reason = le32_to_cpu(param->wake_reason);
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n",
+ arg->wake_reason, wow_reason(arg->wake_reason));
+ break;
+
+ case WMI_TAG_ARRAY_BYTE:
+ if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) {
+ pf_param = ptr;
+ pf_len = le32_to_cpu(pf_param->len);
+ if (pf_len > len - sizeof(pf_len) ||
+ pf_len < 0) {
+ ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n",
+ pf_len);
+ return -EINVAL;
+ }
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n",
+ pf_len);
+ ath12k_dbg_dump(ab, ATH12K_DBG_WMI,
+ "wow_reason_page_fault packet present",
+ "wow_pg_fault ",
+ pf_param->data,
+ pf_len);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb)
+{
+ struct wmi_wow_ev_arg arg = { };
+ int ret;
+
+ ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+ ath12k_wmi_wow_wakeup_host_parse,
+ &arg);
+ if (ret) {
+ ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n",
+ ret);
+ return;
+ }
+
+ complete(&ab->wow.wakeup_completed);
+}
+
+static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab,
+ struct sk_buff *skb)
+{
+ const struct wmi_gtk_offload_status_event *ev;
+ struct ath12k_vif *arvif;
+ __be64 replay_ctr_be;
+ u64 replay_ctr;
+ const void **tb;
+ int ret;
+
+ tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+ if (IS_ERR(tb)) {
+ ret = PTR_ERR(tb);
+ ath12k_warn(ab, "failed to parse tlv: %d\n", ret);
+ return;
+ }
+
+ ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT];
+ if (!ev) {
+ ath12k_warn(ab, "failed to fetch gtk offload status ev");
+ kfree(tb);
+ return;
+ }
+
+ rcu_read_lock();
+ arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id));
+ if (!arvif) {
+ rcu_read_unlock();
+ ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n",
+ le32_to_cpu(ev->vdev_id));
+ kfree(tb);
+ return;
+ }
+
+ replay_ctr = le64_to_cpu(ev->replay_ctr);
+ arvif->rekey_data.replay_ctr = replay_ctr;
+ ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n",
+ le32_to_cpu(ev->refresh_cnt), replay_ctr);
+
+ /* supplicant expects big-endian replay counter */
+ replay_ctr_be = cpu_to_be64(replay_ctr);
+
+ ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid,
+ (void *)&replay_ctr_be, GFP_ATOMIC);
+
+ rcu_read_unlock();
+
+ kfree(tb);
+}
+
static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
@@ -6974,6 +7263,12 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
case WMI_DIAG_EVENTID:
ath12k_wmi_diag_event(ab, skb);
break;
+ case WMI_WOW_WAKEUP_HOST_EVENTID:
+ ath12k_wmi_event_wow_wakeup_host(ab, skb);
+ break;
+ case WMI_GTK_OFFLOAD_STATUS_EVENTID:
+ ath12k_wmi_gtk_offload_status_event(ab, skb);
+ break;
/* TODO: Add remaining events */
default:
ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
@@ -7183,3 +7478,608 @@ void ath12k_wmi_detach(struct ath12k_base *ab)
ath12k_wmi_free_dbring_caps(ab);
}
+
+int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg)
+{
+ struct wmi_hw_data_filter_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_hw_data_filter_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->enable = cpu_to_le32(arg->enable ? 1 : 0);
+
+ /* Set all modes in case of disable */
+ if (arg->enable)
+ cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap);
+ else
+ cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi hw data filter enable %d filter_bitmap 0x%x\n",
+ arg->enable, arg->hw_filter_bitmap);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID);
+}
+
+int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar)
+{
+ struct wmi_wow_host_wakeup_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD,
+ sizeof(*cmd));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n");
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID);
+}
+
+int ath12k_wmi_wow_enable(struct ath12k *ar)
+{
+ struct wmi_wow_enable_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_enable_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD,
+ sizeof(*cmd));
+
+ cmd->enable = cpu_to_le32(1);
+ cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED);
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n");
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID);
+}
+
+int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable)
+{
+ struct wmi_wow_add_del_event_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_add_del_event_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->is_add = cpu_to_le32(enable);
+ cmd->event_bitmap = cpu_to_le32((1 << event));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n",
+ wow_wakeup_event(event), enable, vdev_id);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID);
+}
+
+int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset)
+{
+ struct wmi_wow_add_pattern_cmd *cmd;
+ struct wmi_wow_bitmap_pattern_params *bitmap;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ size_t len;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) + /* array struct */
+ sizeof(*bitmap) + /* bitmap */
+ sizeof(*tlv) + /* empty ipv4 sync */
+ sizeof(*tlv) + /* empty ipv6 sync */
+ sizeof(*tlv) + /* empty magic */
+ sizeof(*tlv) + /* empty info timeout */
+ sizeof(*tlv) + sizeof(u32); /* ratelimit interval */
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ /* cmd */
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->pattern_id = cpu_to_le32(pattern_id);
+ cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ptr += sizeof(*cmd);
+
+ /* bitmap */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap));
+
+ ptr += sizeof(*tlv);
+
+ bitmap = ptr;
+ bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T,
+ sizeof(*bitmap));
+ memcpy(bitmap->patternbuf, pattern, pattern_len);
+ memcpy(bitmap->bitmaskbuf, mask, pattern_len);
+ bitmap->pattern_offset = cpu_to_le32(pattern_offset);
+ bitmap->pattern_len = cpu_to_le32(pattern_len);
+ bitmap->bitmask_len = cpu_to_le32(pattern_len);
+ bitmap->pattern_id = cpu_to_le32(pattern_id);
+
+ ptr += sizeof(*bitmap);
+
+ /* ipv4 sync */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ipv6 sync */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* magic */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* pattern info timeout */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+
+ ptr += sizeof(*tlv);
+
+ /* ratelimit interval */
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n",
+ vdev_id, pattern_id, pattern_offset, pattern_len);
+
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ",
+ bitmap->patternbuf, pattern_len);
+ ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ",
+ bitmap->bitmaskbuf, pattern_len);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID);
+}
+
+int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id)
+{
+ struct wmi_wow_del_pattern_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_wow_del_pattern_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD,
+ sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->pattern_id = cpu_to_le32(pattern_id);
+ cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n",
+ vdev_id, pattern_id);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID);
+}
+
+static struct sk_buff *
+ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno)
+{
+ struct nlo_configured_params *nlo_list;
+ size_t len, nlo_list_len, channel_list_len;
+ struct wmi_wow_nlo_config_cmd *cmd;
+ __le32 *channel_list;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *ptr;
+ u32 i;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ /* TLV place holder for array of structures
+ * nlo_configured_params(nlo_list)
+ */
+ sizeof(*tlv);
+ /* TLV place holder for array of uint32 channel_list */
+
+ channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count;
+ len += channel_list_len;
+
+ nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count;
+ len += nlo_list_len;
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ ptr = skb->data;
+ cmd = ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd));
+
+ cmd->vdev_id = cpu_to_le32(pno->vdev_id);
+ cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN);
+
+ /* current FW does not support min-max range for dwell time */
+ cmd->active_dwell_time = cpu_to_le32(pno->active_max_time);
+ cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time);
+
+ if (pno->do_passive_scan)
+ cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE);
+
+ cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period);
+ cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period);
+ cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles);
+ cmd->delay_start_time = cpu_to_le32(pno->delay_start_time);
+
+ if (pno->enable_pno_scan_randomization) {
+ cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ |
+ WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ);
+ ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr);
+ ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask);
+ }
+
+ ptr += sizeof(*cmd);
+
+ /* nlo_configured_params(nlo_list) */
+ cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len);
+
+ ptr += sizeof(*tlv);
+ nlo_list = ptr;
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header);
+ tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE,
+ sizeof(*nlo_list));
+
+ nlo_list[i].ssid.valid = cpu_to_le32(1);
+ nlo_list[i].ssid.ssid.ssid_len =
+ cpu_to_le32(pno->a_networks[i].ssid.ssid_len);
+ memcpy(nlo_list[i].ssid.ssid.ssid,
+ pno->a_networks[i].ssid.ssid,
+ le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len));
+
+ if (pno->a_networks[i].rssi_threshold &&
+ pno->a_networks[i].rssi_threshold > -300) {
+ nlo_list[i].rssi_cond.valid = cpu_to_le32(1);
+ nlo_list[i].rssi_cond.rssi =
+ cpu_to_le32(pno->a_networks[i].rssi_threshold);
+ }
+
+ nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1);
+ nlo_list[i].bcast_nw_type.bcast_nw_type =
+ cpu_to_le32(pno->a_networks[i].bcast_nw_type);
+ }
+
+ ptr += nlo_list_len;
+ cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count);
+ tlv = ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len);
+ ptr += sizeof(*tlv);
+ channel_list = ptr;
+
+ for (i = 0; i < pno->a_networks[0].channel_count; i++)
+ channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n",
+ vdev_id);
+
+ return skb;
+}
+
+static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar,
+ u32 vdev_id)
+{
+ struct wmi_wow_nlo_config_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return ERR_PTR(-ENOMEM);
+
+ cmd = (struct wmi_wow_nlo_config_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len);
+
+ cmd->vdev_id = cpu_to_le32(vdev_id);
+ cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi tlv stop pno config vdev_id %d\n", vdev_id);
+ return skb;
+}
+
+int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno_scan)
+{
+ struct sk_buff *skb;
+
+ if (pno_scan->enable)
+ skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan);
+ else
+ skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id);
+
+ if (IS_ERR_OR_NULL(skb))
+ return -ENOMEM;
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID);
+}
+
+static void ath12k_wmi_fill_ns_offload(struct ath12k *ar,
+ struct wmi_arp_ns_offload_arg *offload,
+ void **ptr,
+ bool enable,
+ bool ext)
+{
+ struct wmi_ns_offload_params *ns;
+ struct wmi_tlv *tlv;
+ void *buf_ptr = *ptr;
+ u32 ns_cnt, ns_ext_tuples;
+ int i, max_offloads;
+
+ ns_cnt = offload->ipv6_count;
+
+ tlv = buf_ptr;
+
+ if (ext) {
+ ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ ns_ext_tuples * sizeof(*ns));
+ i = WMI_MAX_NS_OFFLOADS;
+ max_offloads = offload->ipv6_count;
+ } else {
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ WMI_MAX_NS_OFFLOADS * sizeof(*ns));
+ i = 0;
+ max_offloads = WMI_MAX_NS_OFFLOADS;
+ }
+
+ buf_ptr += sizeof(*tlv);
+
+ for (; i < max_offloads; i++) {
+ ns = buf_ptr;
+ ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE,
+ sizeof(*ns));
+
+ if (enable) {
+ if (i < ns_cnt)
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID);
+
+ memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16);
+ memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16);
+
+ if (offload->ipv6_type[i])
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST);
+
+ memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN);
+
+ if (!is_zero_ether_addr(ns->target_mac.addr))
+ ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi index %d ns_solicited %pI6 target %pI6",
+ i, ns->solicitation_ipaddr,
+ ns->target_ipaddr[0]);
+ }
+
+ buf_ptr += sizeof(*ns);
+ }
+
+ *ptr = buf_ptr;
+}
+
+static void ath12k_wmi_fill_arp_offload(struct ath12k *ar,
+ struct wmi_arp_ns_offload_arg *offload,
+ void **ptr,
+ bool enable)
+{
+ struct wmi_arp_offload_params *arp;
+ struct wmi_tlv *tlv;
+ void *buf_ptr = *ptr;
+ int i;
+
+ /* fill arp tuple */
+ tlv = buf_ptr;
+ tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+ WMI_MAX_ARP_OFFLOADS * sizeof(*arp));
+ buf_ptr += sizeof(*tlv);
+
+ for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) {
+ arp = buf_ptr;
+ arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE,
+ sizeof(*arp));
+
+ if (enable && i < offload->ipv4_count) {
+ /* Copy the target ip addr and flags */
+ arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID);
+ memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4",
+ arp->target_ipaddr);
+ }
+
+ buf_ptr += sizeof(*arp);
+ }
+
+ *ptr = buf_ptr;
+}
+
+int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload,
+ bool enable)
+{
+ struct wmi_set_arp_ns_offload_cmd *cmd;
+ struct wmi_tlv *tlv;
+ struct sk_buff *skb;
+ void *buf_ptr;
+ size_t len;
+ u8 ns_cnt, ns_ext_tuples = 0;
+
+ ns_cnt = offload->ipv6_count;
+
+ len = sizeof(*cmd) +
+ sizeof(*tlv) +
+ WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) +
+ sizeof(*tlv) +
+ WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params);
+
+ if (ns_cnt > WMI_MAX_NS_OFFLOADS) {
+ ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS;
+ len += sizeof(*tlv) +
+ ns_ext_tuples * sizeof(struct wmi_ns_offload_params);
+ }
+
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ buf_ptr = skb->data;
+ cmd = buf_ptr;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD,
+ sizeof(*cmd));
+ cmd->flags = cpu_to_le32(0);
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+ cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples);
+
+ buf_ptr += sizeof(*cmd);
+
+ ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0);
+ ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable);
+
+ if (ns_ext_tuples)
+ ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1);
+
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
+ struct ath12k_vif *arvif, bool enable)
+{
+ struct ath12k_rekey_data *rekey_data = &arvif->rekey_data;
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct sk_buff *skb;
+ __le64 replay_ctr;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+
+ if (enable) {
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE);
+
+ /* the length in rekey_data and cmd is equal */
+ memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck));
+ memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek));
+
+ replay_ctr = cpu_to_le64(rekey_data->replay_ctr);
+ memcpy(cmd->replay_ctr, &replay_ctr,
+ sizeof(replay_ctr));
+ } else {
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n",
+ arvif->vdev_id, enable);
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
+ struct ath12k_vif *arvif)
+{
+ struct wmi_gtk_rekey_offload_cmd *cmd;
+ struct sk_buff *skb;
+ int len;
+
+ len = sizeof(*cmd);
+ skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arvif->vdev_id);
+ cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE);
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n",
+ arvif->vdev_id);
+ return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID);
+}
+
+int ath12k_wmi_sta_keepalive(struct ath12k *ar,
+ const struct wmi_sta_keepalive_arg *arg)
+{
+ struct wmi_sta_keepalive_arp_resp_params *arp;
+ struct ath12k_wmi_pdev *wmi = ar->wmi;
+ struct wmi_sta_keepalive_cmd *cmd;
+ struct sk_buff *skb;
+ size_t len;
+
+ len = sizeof(*cmd) + sizeof(*arp);
+ skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_sta_keepalive_cmd *)skb->data;
+ cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd));
+ cmd->vdev_id = cpu_to_le32(arg->vdev_id);
+ cmd->enabled = cpu_to_le32(arg->enabled);
+ cmd->interval = cpu_to_le32(arg->interval);
+ cmd->method = cpu_to_le32(arg->method);
+
+ arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1);
+ arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE,
+ sizeof(*arp));
+ if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE ||
+ arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) {
+ arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr);
+ arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr);
+ ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr);
+ }
+
+ ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+ "wmi sta keepalive vdev %d enabled %d method %d interval %d\n",
+ arg->vdev_id, arg->enabled, arg->method, arg->interval);
+
+ return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h
index 103462feb935..f1f52175a52b 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.h
+++ b/drivers/net/wireless/ath/ath12k/wmi.h
@@ -24,6 +24,7 @@
struct ath12k_base;
struct ath12k;
+struct ath12k_vif;
/* There is no signed version of __le32, so for a temporary solution come
* up with our own version. The idea is from fs/ntfs/endian.h.
@@ -164,10 +165,6 @@ struct wmi_tlv {
#define WLAN_SCAN_MAX_HINT_BSSID 10
#define MAX_RNR_BSS 5
-#define WLAN_SCAN_MAX_HINT_S_SSID 10
-#define WLAN_SCAN_MAX_HINT_BSSID 10
-#define MAX_RNR_BSS 5
-
#define WMI_APPEND_TO_EXISTING_CHAN_LIST_FLAG 1
#define WMI_BA_MODE_BUFFER_SIZE_256 3
@@ -357,6 +354,9 @@ enum wmi_tlv_cmd_id {
WMI_PDEV_DMA_RING_CFG_REQ_CMDID,
WMI_PDEV_HE_TB_ACTION_FRM_CMDID,
WMI_PDEV_PKTLOG_FILTER_CMDID,
+ WMI_PDEV_SET_BIOS_SAR_TABLE_CMDID = 0x4044,
+ WMI_PDEV_SET_BIOS_GEO_TABLE_CMDID = 0x4045,
+ WMI_PDEV_SET_BIOS_INTERFACE_CMDID = 0x404A,
WMI_VDEV_CREATE_CMDID = WMI_TLV_CMD(WMI_GRP_VDEV),
WMI_VDEV_DELETE_CMDID,
WMI_VDEV_START_REQUEST_CMDID,
@@ -1929,6 +1929,9 @@ enum wmi_tlv_tag {
WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
WMI_TAG_EHT_RATE_SET = 0x3C4,
+ WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
+ WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9,
+ WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB,
WMI_TAG_MAX
};
@@ -2152,6 +2155,7 @@ enum wmi_tlv_service {
WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
WMI_TLV_SERVICE_EXT2_MSG = 220,
+ WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253,
WMI_MAX_EXT_SERVICE = 256,
@@ -2199,8 +2203,11 @@ enum wmi_peer_param {
WMI_PEER_SET_MAX_TX_RATE = 17,
WMI_PEER_SET_MIN_TX_RATE = 18,
WMI_PEER_SET_DEFAULT_ROUTING = 19,
+ WMI_PEER_CHWIDTH_PUNCTURE_20MHZ_BITMAP = 39,
};
+#define WMI_PEER_PUNCTURE_BITMAP GENMASK(23, 8)
+
enum wmi_slot_time {
WMI_VDEV_SLOT_TIME_LONG = 1,
WMI_VDEV_SLOT_TIME_SHORT = 2,
@@ -2287,6 +2294,13 @@ struct ath12k_wmi_host_mem_chunk_arg {
u32 req_id;
};
+enum ath12k_peer_metadata_version {
+ ATH12K_PEER_METADATA_V0,
+ ATH12K_PEER_METADATA_V1,
+ ATH12K_PEER_METADATA_V1A,
+ ATH12K_PEER_METADATA_V1B
+};
+
struct ath12k_wmi_resource_config_arg {
u32 num_vdevs;
u32 num_peers;
@@ -2349,8 +2363,10 @@ struct ath12k_wmi_resource_config_arg {
u32 sched_params;
u32 twt_ap_pdev_count;
u32 twt_ap_sta_count;
+ enum ath12k_peer_metadata_version peer_metadata_ver;
+ u32 ema_max_vap_cnt;
+ u32 ema_max_profile_period;
bool is_reg_cc_ext_event_supported;
- u8 dp_peer_meta_data_ver;
};
struct ath12k_wmi_init_cmd_arg {
@@ -2404,6 +2420,8 @@ struct wmi_init_cmd {
#define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4
#define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4)
+#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
+#define WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET BIT(9)
struct ath12k_wmi_resource_config_params {
__le32 tlv_header;
@@ -2604,6 +2622,19 @@ struct ath12k_wmi_soc_hal_reg_caps_params {
__le32 num_phy;
} __packed;
+enum wmi_channel_width {
+ WMI_CHAN_WIDTH_20 = 0,
+ WMI_CHAN_WIDTH_40 = 1,
+ WMI_CHAN_WIDTH_80 = 2,
+ WMI_CHAN_WIDTH_160 = 3,
+ WMI_CHAN_WIDTH_80P80 = 4,
+ WMI_CHAN_WIDTH_5 = 5,
+ WMI_CHAN_WIDTH_10 = 6,
+ WMI_CHAN_WIDTH_165 = 7,
+ WMI_CHAN_WIDTH_160P160 = 8,
+ WMI_CHAN_WIDTH_320 = 9,
+};
+
#define WMI_MAX_EHTCAP_MAC_SIZE 2
#define WMI_MAX_EHTCAP_PHY_SIZE 3
#define WMI_MAX_EHTCAP_RATE_SET 3
@@ -2707,6 +2738,8 @@ struct ath12k_wmi_vdev_create_arg {
} chains[NUM_NL80211_BANDS];
u32 pdev_id;
u8 if_stats_id;
+ u32 mbssid_flags;
+ u32 mbssid_tx_vdev_id;
};
#define ATH12K_MAX_VDEV_STATS_ID 0x30
@@ -2728,9 +2761,9 @@ struct wmi_vdev_create_cmd {
struct ath12k_wmi_vdev_txrx_streams_params {
__le32 tlv_header;
- u32 band;
- u32 supported_tx_streams;
- u32 supported_rx_streams;
+ __le32 band;
+ __le32 supported_tx_streams;
+ __le32 supported_rx_streams;
} __packed;
struct wmi_vdev_delete_cmd {
@@ -2738,14 +2771,23 @@ struct wmi_vdev_delete_cmd {
__le32 vdev_id;
} __packed;
+struct ath12k_wmi_vdev_up_params {
+ u32 vdev_id;
+ u32 aid;
+ const u8 *bssid;
+ const u8 *tx_bssid;
+ u32 nontx_profile_idx;
+ u32 nontx_profile_cnt;
+};
+
struct wmi_vdev_up_cmd {
__le32 tlv_header;
__le32 vdev_id;
__le32 vdev_assoc_id;
struct ath12k_wmi_mac_addr_params vdev_bssid;
- struct ath12k_wmi_mac_addr_params trans_bssid;
- __le32 profile_idx;
- __le32 profile_num;
+ struct ath12k_wmi_mac_addr_params tx_vdev_bssid;
+ __le32 nontx_profile_idx;
+ __le32 nontx_profile_cnt;
} __packed;
struct wmi_vdev_stop_cmd {
@@ -2773,6 +2815,10 @@ struct ath12k_wmi_ssid_params {
enum wmi_vdev_mbssid_flags {
WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP = BIT(0),
+ WMI_VDEV_MBSSID_FLAGS_TRANSMIT_AP = BIT(1),
+ WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP = BIT(2),
+ WMI_VDEV_MBSSID_FLAGS_EMA_MODE = BIT(3),
+ WMI_VDEV_MBSSID_FLAGS_SCAN_MODE_VAP = BIT(4),
};
struct wmi_vdev_start_request_cmd {
@@ -3357,34 +3403,6 @@ struct wmi_bssid_arg {
const u8 *bssid;
};
-struct wmi_start_scan_arg {
- u32 scan_id;
- u32 scan_req_id;
- u32 vdev_id;
- u32 scan_priority;
- u32 notify_scan_events;
- u32 dwell_time_active;
- u32 dwell_time_passive;
- u32 min_rest_time;
- u32 max_rest_time;
- u32 repeat_probe_time;
- u32 probe_spacing_time;
- u32 idle_time;
- u32 max_scan_time;
- u32 probe_delay;
- u32 scan_ctrl_flags;
-
- u32 ie_len;
- u32 n_channels;
- u32 n_ssids;
- u32 n_bssids;
-
- u8 ie[WLAN_SCAN_PARAMS_MAX_IE_LEN];
- u32 channels[64];
- struct wmi_ssid_arg ssids[WLAN_SCAN_PARAMS_MAX_SSID];
- struct wmi_bssid_arg bssids[WLAN_SCAN_PARAMS_MAX_BSSID];
-};
-
#define WMI_SCAN_STOP_ONE 0x00000000
#define WMI_SCAN_STOP_VAP_ALL 0x01000000
#define WMI_SCAN_STOP_ALL 0x04000000
@@ -3523,6 +3541,16 @@ struct ath12k_wmi_p2p_noa_info {
#define WMI_BEACON_TX_BUFFER_SIZE 512
+#define WMI_EMA_BEACON_CNT GENMASK(7, 0)
+#define WMI_EMA_BEACON_IDX GENMASK(15, 8)
+#define WMI_EMA_BEACON_FIRST GENMASK(23, 16)
+#define WMI_EMA_BEACON_LAST GENMASK(31, 24)
+
+struct ath12k_wmi_bcn_tmpl_ema_arg {
+ u8 bcn_cnt;
+ u8 bcn_index;
+};
+
struct wmi_bcn_tmpl_cmd {
__le32 tlv_header;
__le32 vdev_id;
@@ -3533,6 +3561,11 @@ struct wmi_bcn_tmpl_cmd {
__le32 csa_event_bitmap;
__le32 mbssid_ie_offset;
__le32 esp_ie_offset;
+ __le32 csc_switch_count_offset;
+ __le32 csc_event_bitmap;
+ __le32 mu_edca_ie_offset;
+ __le32 feature_enable_bitmap;
+ __le32 ema_params;
} __packed;
struct wmi_p2p_go_set_beacon_ie_cmd {
@@ -4214,6 +4247,9 @@ struct wmi_peer_sta_kickout_event {
struct ath12k_wmi_mac_addr_params peer_macaddr;
} __packed;
+#define WMI_ROAM_REASON_MASK GENMASK(3, 0)
+#define WMI_ROAM_SUBNET_STATUS_MASK GENMASK(5, 4)
+
enum wmi_roam_reason {
WMI_ROAM_REASON_BETTER_AP = 1,
WMI_ROAM_REASON_BEACON_MISS = 2,
@@ -4776,9 +4812,7 @@ struct wmi_probe_tmpl_cmd {
__le32 buf_len;
} __packed;
-#define WMI_MAX_MEM_REQS 32
-
-#define MAX_RADIOS 3
+#define MAX_RADIOS 2
#define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
#define WMI_SEND_TIMEOUT_HZ (3 * HZ)
@@ -4808,6 +4842,37 @@ struct ath12k_wmi_base {
struct ath12k_wmi_target_cap_arg *targ_cap;
};
+struct wmi_pdev_set_bios_interface_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 param_type_id;
+ __le32 length;
+} __packed;
+
+enum wmi_bios_param_type {
+ WMI_BIOS_PARAM_CCA_THRESHOLD_TYPE = 0,
+ WMI_BIOS_PARAM_TAS_CONFIG_TYPE = 1,
+ WMI_BIOS_PARAM_TAS_DATA_TYPE = 2,
+
+ /* bandedge control power */
+ WMI_BIOS_PARAM_TYPE_BANDEDGE = 3,
+
+ WMI_BIOS_PARAM_TYPE_MAX,
+};
+
+struct wmi_pdev_set_bios_sar_table_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 sar_len;
+ __le32 dbs_backoff_len;
+} __packed;
+
+struct wmi_pdev_set_bios_geo_table_cmd {
+ __le32 tlv_header;
+ __le32 pdev_id;
+ __le32 geo_len;
+} __packed;
+
#define ATH12K_FW_STATS_BUF_SIZE (1024 * 1024)
enum wmi_sys_cap_info_flags {
@@ -4845,6 +4910,556 @@ struct wmi_twt_disable_event {
__le32 status;
} __packed;
+/* WOW structures */
+enum wmi_wow_wakeup_event {
+ WOW_BMISS_EVENT = 0,
+ WOW_BETTER_AP_EVENT,
+ WOW_DEAUTH_RECVD_EVENT,
+ WOW_MAGIC_PKT_RECVD_EVENT,
+ WOW_GTK_ERR_EVENT,
+ WOW_FOURWAY_HSHAKE_EVENT,
+ WOW_EAPOL_RECVD_EVENT,
+ WOW_NLO_DETECTED_EVENT,
+ WOW_DISASSOC_RECVD_EVENT,
+ WOW_PATTERN_MATCH_EVENT,
+ WOW_CSA_IE_EVENT,
+ WOW_PROBE_REQ_WPS_IE_EVENT,
+ WOW_AUTH_REQ_EVENT,
+ WOW_ASSOC_REQ_EVENT,
+ WOW_HTT_EVENT,
+ WOW_RA_MATCH_EVENT,
+ WOW_HOST_AUTO_SHUTDOWN_EVENT,
+ WOW_IOAC_MAGIC_EVENT,
+ WOW_IOAC_SHORT_EVENT,
+ WOW_IOAC_EXTEND_EVENT,
+ WOW_IOAC_TIMER_EVENT,
+ WOW_DFS_PHYERR_RADAR_EVENT,
+ WOW_BEACON_EVENT,
+ WOW_CLIENT_KICKOUT_EVENT,
+ WOW_EVENT_MAX,
+};
+
+enum wmi_wow_interface_cfg {
+ WOW_IFACE_PAUSE_ENABLED,
+ WOW_IFACE_PAUSE_DISABLED
+};
+
+#define C2S(x) case x: return #x
+
+static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev)
+{
+ switch (ev) {
+ C2S(WOW_BMISS_EVENT);
+ C2S(WOW_BETTER_AP_EVENT);
+ C2S(WOW_DEAUTH_RECVD_EVENT);
+ C2S(WOW_MAGIC_PKT_RECVD_EVENT);
+ C2S(WOW_GTK_ERR_EVENT);
+ C2S(WOW_FOURWAY_HSHAKE_EVENT);
+ C2S(WOW_EAPOL_RECVD_EVENT);
+ C2S(WOW_NLO_DETECTED_EVENT);
+ C2S(WOW_DISASSOC_RECVD_EVENT);
+ C2S(WOW_PATTERN_MATCH_EVENT);
+ C2S(WOW_CSA_IE_EVENT);
+ C2S(WOW_PROBE_REQ_WPS_IE_EVENT);
+ C2S(WOW_AUTH_REQ_EVENT);
+ C2S(WOW_ASSOC_REQ_EVENT);
+ C2S(WOW_HTT_EVENT);
+ C2S(WOW_RA_MATCH_EVENT);
+ C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT);
+ C2S(WOW_IOAC_MAGIC_EVENT);
+ C2S(WOW_IOAC_SHORT_EVENT);
+ C2S(WOW_IOAC_EXTEND_EVENT);
+ C2S(WOW_IOAC_TIMER_EVENT);
+ C2S(WOW_DFS_PHYERR_RADAR_EVENT);
+ C2S(WOW_BEACON_EVENT);
+ C2S(WOW_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_EVENT_MAX);
+ default:
+ return NULL;
+ }
+}
+
+enum wmi_wow_wake_reason {
+ WOW_REASON_UNSPECIFIED = -1,
+ WOW_REASON_NLOD = 0,
+ WOW_REASON_AP_ASSOC_LOST,
+ WOW_REASON_LOW_RSSI,
+ WOW_REASON_DEAUTH_RECVD,
+ WOW_REASON_DISASSOC_RECVD,
+ WOW_REASON_GTK_HS_ERR,
+ WOW_REASON_EAP_REQ,
+ WOW_REASON_FOURWAY_HS_RECV,
+ WOW_REASON_TIMER_INTR_RECV,
+ WOW_REASON_PATTERN_MATCH_FOUND,
+ WOW_REASON_RECV_MAGIC_PATTERN,
+ WOW_REASON_P2P_DISC,
+ WOW_REASON_WLAN_HB,
+ WOW_REASON_CSA_EVENT,
+ WOW_REASON_PROBE_REQ_WPS_IE_RECV,
+ WOW_REASON_AUTH_REQ_RECV,
+ WOW_REASON_ASSOC_REQ_RECV,
+ WOW_REASON_HTT_EVENT,
+ WOW_REASON_RA_MATCH,
+ WOW_REASON_HOST_AUTO_SHUTDOWN,
+ WOW_REASON_IOAC_MAGIC_EVENT,
+ WOW_REASON_IOAC_SHORT_EVENT,
+ WOW_REASON_IOAC_EXTEND_EVENT,
+ WOW_REASON_IOAC_TIMER_EVENT,
+ WOW_REASON_ROAM_HO,
+ WOW_REASON_DFS_PHYERR_RADADR_EVENT,
+ WOW_REASON_BEACON_RECV,
+ WOW_REASON_CLIENT_KICKOUT_EVENT,
+ WOW_REASON_PAGE_FAULT = 0x3a,
+ WOW_REASON_DEBUG_TEST = 0xFF,
+};
+
+static inline const char *wow_reason(enum wmi_wow_wake_reason reason)
+{
+ switch (reason) {
+ C2S(WOW_REASON_UNSPECIFIED);
+ C2S(WOW_REASON_NLOD);
+ C2S(WOW_REASON_AP_ASSOC_LOST);
+ C2S(WOW_REASON_LOW_RSSI);
+ C2S(WOW_REASON_DEAUTH_RECVD);
+ C2S(WOW_REASON_DISASSOC_RECVD);
+ C2S(WOW_REASON_GTK_HS_ERR);
+ C2S(WOW_REASON_EAP_REQ);
+ C2S(WOW_REASON_FOURWAY_HS_RECV);
+ C2S(WOW_REASON_TIMER_INTR_RECV);
+ C2S(WOW_REASON_PATTERN_MATCH_FOUND);
+ C2S(WOW_REASON_RECV_MAGIC_PATTERN);
+ C2S(WOW_REASON_P2P_DISC);
+ C2S(WOW_REASON_WLAN_HB);
+ C2S(WOW_REASON_CSA_EVENT);
+ C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV);
+ C2S(WOW_REASON_AUTH_REQ_RECV);
+ C2S(WOW_REASON_ASSOC_REQ_RECV);
+ C2S(WOW_REASON_HTT_EVENT);
+ C2S(WOW_REASON_RA_MATCH);
+ C2S(WOW_REASON_HOST_AUTO_SHUTDOWN);
+ C2S(WOW_REASON_IOAC_MAGIC_EVENT);
+ C2S(WOW_REASON_IOAC_SHORT_EVENT);
+ C2S(WOW_REASON_IOAC_EXTEND_EVENT);
+ C2S(WOW_REASON_IOAC_TIMER_EVENT);
+ C2S(WOW_REASON_ROAM_HO);
+ C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT);
+ C2S(WOW_REASON_BEACON_RECV);
+ C2S(WOW_REASON_CLIENT_KICKOUT_EVENT);
+ C2S(WOW_REASON_PAGE_FAULT);
+ C2S(WOW_REASON_DEBUG_TEST);
+ default:
+ return NULL;
+ }
+}
+
+#undef C2S
+
+#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148
+#define WOW_DEFAULT_BITMASK_SIZE 148
+
+#define WOW_MIN_PATTERN_SIZE 1
+#define WOW_MAX_PATTERN_SIZE 148
+#define WOW_MAX_PKT_OFFSET 128
+#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \
+ sizeof(struct rfc1042_hdr))
+#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \
+ offsetof(struct ieee80211_hdr_3addr, addr1))
+
+struct wmi_wow_bitmap_pattern_params {
+ __le32 tlv_header;
+ u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE];
+ u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE];
+ __le32 pattern_offset;
+ __le32 pattern_len;
+ __le32 bitmask_len;
+ __le32 pattern_id;
+} __packed;
+
+struct wmi_wow_add_pattern_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+struct wmi_wow_del_pattern_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 pattern_id;
+ __le32 pattern_type;
+} __packed;
+
+enum wmi_tlv_pattern_type {
+ WOW_PATTERN_MIN = 0,
+ WOW_BITMAP_PATTERN = WOW_PATTERN_MIN,
+ WOW_IPV4_SYNC_PATTERN,
+ WOW_IPV6_SYNC_PATTERN,
+ WOW_WILD_CARD_PATTERN,
+ WOW_TIMER_PATTERN,
+ WOW_MAGIC_PATTERN,
+ WOW_IPV6_RA_PATTERN,
+ WOW_IOAC_PKT_PATTERN,
+ WOW_IOAC_TMR_PATTERN,
+ WOW_PATTERN_MAX
+};
+
+struct wmi_wow_add_del_event_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 is_add;
+ __le32 event_bitmap;
+} __packed;
+
+struct wmi_wow_enable_cmd {
+ __le32 tlv_header;
+ __le32 enable;
+ __le32 pause_iface_config;
+ __le32 flags;
+} __packed;
+
+struct wmi_wow_host_wakeup_cmd {
+ __le32 tlv_header;
+ __le32 reserved;
+} __packed;
+
+struct wmi_wow_ev_param {
+ __le32 vdev_id;
+ __le32 flag;
+ __le32 wake_reason;
+ __le32 data_len;
+} __packed;
+
+struct wmi_wow_ev_pg_fault_param {
+ __le32 len;
+ u8 data[];
+} __packed;
+
+struct wmi_wow_ev_arg {
+ enum wmi_wow_wake_reason wake_reason;
+};
+
+#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200
+#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100
+#define WMI_PNO_MAX_NETW_CHANNELS 26
+#define WMI_PNO_MAX_NETW_CHANNELS_EX 60
+#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID
+#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN
+
+/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */
+#define WMI_PNO_MAX_PB_REQ_SIZE 450
+
+#define WMI_PNO_24GHZ_DEFAULT_CH 1
+#define WMI_PNO_5GHZ_DEFAULT_CH 36
+
+#define WMI_ACTIVE_MAX_CHANNEL_TIME 40
+#define WMI_PASSIVE_MAX_CHANNEL_TIME 110
+
+/* SSID broadcast type */
+enum wmi_ssid_bcast_type {
+ BCAST_UNKNOWN = 0,
+ BCAST_NORMAL = 1,
+ BCAST_HIDDEN = 2,
+};
+
+#define WMI_NLO_MAX_SSIDS 16
+#define WMI_NLO_MAX_CHAN 48
+
+#define WMI_NLO_CONFIG_STOP BIT(0)
+#define WMI_NLO_CONFIG_START BIT(1)
+#define WMI_NLO_CONFIG_RESET BIT(2)
+#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4)
+#define WMI_NLO_CONFIG_FAST_SCAN BIT(5)
+#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6)
+
+/* This bit is used to indicate if EPNO or supplicant PNO is enabled.
+ * Only one of them can be enabled at a given time
+ */
+#define WMI_NLO_CONFIG_ENLO BIT(7)
+#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8)
+#define WMI_NLO_CONFIG_ENLO_RESET BIT(9)
+#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10)
+#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11)
+#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12)
+#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13)
+
+struct wmi_nlo_ssid_params {
+ __le32 valid;
+ struct ath12k_wmi_ssid_params ssid;
+} __packed;
+
+struct wmi_nlo_enc_params {
+ __le32 valid;
+ __le32 enc_type;
+} __packed;
+
+struct wmi_nlo_auth_params {
+ __le32 valid;
+ __le32 auth_type;
+} __packed;
+
+struct wmi_nlo_bcast_nw_params {
+ __le32 valid;
+ __le32 bcast_nw_type;
+} __packed;
+
+struct wmi_nlo_rssi_params {
+ __le32 valid;
+ __le32 rssi;
+} __packed;
+
+struct nlo_configured_params {
+ /* TLV tag and len;*/
+ __le32 tlv_header;
+ struct wmi_nlo_ssid_params ssid;
+ struct wmi_nlo_enc_params enc_type;
+ struct wmi_nlo_auth_params auth_type;
+ struct wmi_nlo_rssi_params rssi_cond;
+
+ /* indicates if the SSID is hidden or not */
+ struct wmi_nlo_bcast_nw_params bcast_nw_type;
+} __packed;
+
+struct wmi_network_type_arg {
+ struct cfg80211_ssid ssid;
+ u32 authentication;
+ u32 encryption;
+ u32 bcast_nw_type;
+ u8 channel_count;
+ u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX];
+ s32 rssi_threshold;
+};
+
+struct wmi_pno_scan_req_arg {
+ u8 enable;
+ u8 vdev_id;
+ u8 uc_networks_count;
+ struct wmi_network_type_arg a_networks[WMI_PNO_MAX_SUPP_NETWORKS];
+ u32 fast_scan_period;
+ u32 slow_scan_period;
+ u8 fast_scan_max_cycles;
+
+ bool do_passive_scan;
+
+ u32 delay_start_time;
+ u32 active_min_time;
+ u32 active_max_time;
+ u32 passive_min_time;
+ u32 passive_max_time;
+
+ /* mac address randomization attributes */
+ u32 enable_pno_scan_randomization;
+ u8 mac_addr[ETH_ALEN];
+ u8 mac_addr_mask[ETH_ALEN];
+};
+
+struct wmi_wow_nlo_config_cmd {
+ __le32 tlv_header;
+ __le32 flags;
+ __le32 vdev_id;
+ __le32 fast_scan_max_cycles;
+ __le32 active_dwell_time;
+ __le32 passive_dwell_time;
+ __le32 probe_bundle_size;
+
+ /* ART = IRT */
+ __le32 rest_time;
+
+ /* max value that can be reached after scan_backoff_multiplier */
+ __le32 max_rest_time;
+
+ __le32 scan_backoff_multiplier;
+ __le32 fast_scan_period;
+
+ /* specific to windows */
+ __le32 slow_scan_period;
+
+ __le32 no_of_ssids;
+
+ __le32 num_of_channels;
+
+ /* NLO scan start delay time in milliseconds */
+ __le32 delay_start_time;
+
+ /* MAC Address to use in Probe Req as SA */
+ struct ath12k_wmi_mac_addr_params mac_addr;
+
+ /* Mask on which MAC has to be randomized */
+ struct ath12k_wmi_mac_addr_params mac_mask;
+
+ /* IE bitmap to use in Probe Req */
+ __le32 ie_bitmap[8];
+
+ /* Number of vendor OUIs. In the TLV vendor_oui[] */
+ __le32 num_vendor_oui;
+
+ /* Number of connected NLO band preferences */
+ __le32 num_cnlo_band_pref;
+
+ /* The TLVs will follow.
+ * nlo_configured_params nlo_list[];
+ * u32 channel_list[num_of_channels];
+ */
+} __packed;
+
+/* Definition of HW data filtering */
+enum hw_data_filter_type {
+ WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0),
+ WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1),
+};
+
+struct wmi_hw_data_filter_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 enable;
+ __le32 hw_filter_bitmap;
+} __packed;
+
+struct wmi_hw_data_filter_arg {
+ u32 vdev_id;
+ bool enable;
+ u32 hw_filter_bitmap;
+};
+
+#define WMI_IPV6_UC_TYPE 0
+#define WMI_IPV6_AC_TYPE 1
+
+#define WMI_IPV6_MAX_COUNT 16
+#define WMI_IPV4_MAX_COUNT 2
+
+struct wmi_arp_ns_offload_arg {
+ u8 ipv4_addr[WMI_IPV4_MAX_COUNT][4];
+ u32 ipv4_count;
+ u32 ipv6_count;
+ u8 ipv6_addr[WMI_IPV6_MAX_COUNT][16];
+ u8 self_ipv6_addr[WMI_IPV6_MAX_COUNT][16];
+ u8 ipv6_type[WMI_IPV6_MAX_COUNT];
+ bool ipv6_valid[WMI_IPV6_MAX_COUNT];
+ u8 mac_addr[ETH_ALEN];
+};
+
+#define WMI_MAX_NS_OFFLOADS 2
+#define WMI_MAX_ARP_OFFLOADS 2
+
+#define WMI_ARPOL_FLAGS_VALID BIT(0)
+#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2)
+
+struct wmi_arp_offload_params {
+ __le32 tlv_header;
+ __le32 flags;
+ u8 target_ipaddr[4];
+ u8 remote_ipaddr[4];
+ struct ath12k_wmi_mac_addr_params target_mac;
+} __packed;
+
+#define WMI_NSOL_FLAGS_VALID BIT(0)
+#define WMI_NSOL_FLAGS_MAC_VALID BIT(1)
+#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2)
+#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3)
+
+#define WMI_NSOL_MAX_TARGET_IPS 2
+
+struct wmi_ns_offload_params {
+ __le32 tlv_header;
+ __le32 flags;
+ u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16];
+ u8 solicitation_ipaddr[16];
+ u8 remote_ipaddr[16];
+ struct ath12k_wmi_mac_addr_params target_mac;
+} __packed;
+
+struct wmi_set_arp_ns_offload_cmd {
+ __le32 tlv_header;
+ __le32 flags;
+ __le32 vdev_id;
+ __le32 num_ns_ext_tuples;
+ /* The TLVs follow:
+ * wmi_ns_offload_params ns[WMI_MAX_NS_OFFLOADS];
+ * wmi_arp_offload_params arp[WMI_MAX_ARP_OFFLOADS];
+ * wmi_ns_offload_params ns_ext[num_ns_ext_tuples];
+ */
+} __packed;
+
+#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000
+#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000
+#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000
+#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000
+
+#define GTK_OFFLOAD_KEK_BYTES 16
+#define GTK_OFFLOAD_KCK_BYTES 16
+#define GTK_REPLAY_COUNTER_BYTES 8
+#define WMI_MAX_KEY_LEN 32
+#define IGTK_PN_SIZE 6
+
+struct wmi_gtk_offload_status_event {
+ __le32 vdev_id;
+ __le32 flags;
+ __le32 refresh_cnt;
+ __le64 replay_ctr;
+ u8 igtk_key_index;
+ u8 igtk_key_length;
+ u8 igtk_key_rsc[IGTK_PN_SIZE];
+ u8 igtk_key[WMI_MAX_KEY_LEN];
+ u8 gtk_key_index;
+ u8 gtk_key_length;
+ u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES];
+ u8 gtk_key[WMI_MAX_KEY_LEN];
+} __packed;
+
+struct wmi_gtk_rekey_offload_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 flags;
+ u8 kek[GTK_OFFLOAD_KEK_BYTES];
+ u8 kck[GTK_OFFLOAD_KCK_BYTES];
+ u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES];
+} __packed;
+
+struct wmi_sta_keepalive_cmd {
+ __le32 tlv_header;
+ __le32 vdev_id;
+ __le32 enabled;
+
+ /* WMI_STA_KEEPALIVE_METHOD_ */
+ __le32 method;
+
+ /* in seconds */
+ __le32 interval;
+
+ /* following this structure is the TLV for struct
+ * wmi_sta_keepalive_arp_resp_params
+ */
+} __packed;
+
+struct wmi_sta_keepalive_arp_resp_params {
+ __le32 tlv_header;
+ __le32 src_ip4_addr;
+ __le32 dest_ip4_addr;
+ struct ath12k_wmi_mac_addr_params dest_mac_addr;
+} __packed;
+
+struct wmi_sta_keepalive_arg {
+ u32 vdev_id;
+ u32 enabled;
+ u32 method;
+ u32 interval;
+ u32 src_ip4_addr;
+ u32 dest_ip4_addr;
+ const u8 dest_mac_addr[ETH_ALEN];
+};
+
+enum wmi_sta_keepalive_method {
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1,
+ WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2,
+ WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3,
+ WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4,
+ WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5,
+};
+
+#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30
+#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0
+
void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
struct ath12k_wmi_resource_config_arg *config);
void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -4858,10 +5473,10 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
const u8 *p2p_ie);
int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
struct ieee80211_mutable_offsets *offs,
- struct sk_buff *bcn);
+ struct sk_buff *bcn,
+ struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args);
int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id);
-int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid,
- const u8 *bssid);
+int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params);
int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id);
int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
bool restart);
@@ -4966,6 +5581,10 @@ int ath12k_wmi_probe_resp_tmpl(struct ath12k *ar, u32 vdev_id,
struct sk_buff *tmpl);
int ath12k_wmi_set_hw_mode(struct ath12k_base *ab,
enum wmi_host_hw_mode_config_type mode);
+int ath12k_wmi_set_bios_cmd(struct ath12k_base *ab, u32 param_id,
+ const u8 *buf, size_t buf_len);
+int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table);
+int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table);
static inline u32
ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param)
@@ -4993,4 +5612,28 @@ ath12k_wmi_mac_phy_get_hw_link_id(const struct ath12k_wmi_mac_phy_caps_params *p
WMI_CAPS_PARAMS_HW_LINK_ID);
}
+int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar);
+int ath12k_wmi_wow_enable(struct ath12k *ar);
+int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id);
+int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id,
+ const u8 *pattern, const u8 *mask,
+ int pattern_len, int pattern_offset);
+int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id,
+ enum wmi_wow_wakeup_event event,
+ u32 enable);
+int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id,
+ struct wmi_pno_scan_req_arg *pno_scan);
+int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar,
+ struct wmi_hw_data_filter_arg *arg);
+int ath12k_wmi_arp_ns_offload(struct ath12k *ar,
+ struct ath12k_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload,
+ bool enable);
+int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar,
+ struct ath12k_vif *arvif, bool enable);
+int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar,
+ struct ath12k_vif *arvif);
+int ath12k_wmi_sta_keepalive(struct ath12k *ar,
+ const struct wmi_sta_keepalive_arg *arg);
+
#endif
diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c
new file mode 100644
index 000000000000..9b8684abbe40
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wow.c
@@ -0,0 +1,1026 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/delay.h>
+#include <linux/inetdevice.h>
+#include <net/addrconf.h>
+#include <net/if_inet6.h>
+#include <net/ipv6.h>
+
+#include "mac.h"
+
+#include <net/mac80211.h>
+#include "core.h"
+#include "hif.h"
+#include "debug.h"
+#include "wmi.h"
+#include "wow.h"
+
+static const struct wiphy_wowlan_support ath12k_wowlan_support = {
+ .flags = WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE,
+ .pattern_min_len = WOW_MIN_PATTERN_SIZE,
+ .pattern_max_len = WOW_MAX_PATTERN_SIZE,
+ .max_pkt_offset = WOW_MAX_PKT_OFFSET,
+};
+
+static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *arvif)
+{
+ return (arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE ||
+ arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT ||
+ arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO);
+}
+
+int ath12k_wow_enable(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ int i, ret;
+
+ clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags);
+
+ /* The firmware might be busy and it can not enter WoW immediately.
+ * In that case firmware notifies host with
+ * ATH12K_HTC_MSG_NACK_SUSPEND message, asking host to try again
+ * later. Per the firmware team there could be up to 10 loops.
+ */
+ for (i = 0; i < ATH12K_WOW_RETRY_NUM; i++) {
+ reinit_completion(&ab->htc_suspend);
+
+ ret = ath12k_wmi_wow_enable(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to issue wow enable: %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ab,
+ "timed out while waiting for htc suspend completion\n");
+ return -ETIMEDOUT;
+ }
+
+ if (test_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags))
+ /* success, suspend complete received */
+ return 0;
+
+ ath12k_warn(ab, "htc suspend not complete, retrying (try %d)\n",
+ i);
+ msleep(ATH12K_WOW_RETRY_WAIT_MS);
+ }
+
+ ath12k_warn(ab, "htc suspend not complete, failing after %d tries\n", i);
+
+ return -ETIMEDOUT;
+}
+
+int ath12k_wow_wakeup(struct ath12k *ar)
+{
+ struct ath12k_base *ab = ar->ab;
+ int ret;
+
+ reinit_completion(&ab->wow.wakeup_completed);
+
+ ret = ath12k_wmi_wow_host_wakeup_ind(ar);
+ if (ret) {
+ ath12k_warn(ab, "failed to send wow wakeup indication: %d\n",
+ ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ);
+ if (ret == 0) {
+ ath12k_warn(ab, "timed out while waiting for wow wakeup completion\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_vif_cleanup(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+ int i, ret;
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ for (i = 0; i < ar->wow.max_num_patterns; i++) {
+ ret = ath12k_wmi_wow_del_pattern(ar, arvif->vdev_id, i);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n",
+ i, arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_cleanup(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath12k_wow_vif_cleanup(arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Convert a 802.3 format to a 802.11 format.
+ * +------------+-----------+--------+----------------+
+ * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... |
+ * +------------+-----------+--------+----------------+
+ * |__ |_______ |____________ |________
+ * | | | |
+ * +--+------------+----+-----------+---------------+-----------+
+ * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... |
+ * +--+------------+----+-----------+---------------+-----------+
+ */
+static void
+ath12k_wow_convert_8023_to_80211(struct ath12k *ar,
+ const struct cfg80211_pkt_pattern *eth_pattern,
+ struct ath12k_pkt_pattern *i80211_pattern)
+{
+ size_t r1042_eth_ofs = offsetof(struct rfc1042_hdr, eth_type);
+ size_t a1_ofs = offsetof(struct ieee80211_hdr_3addr, addr1);
+ size_t a3_ofs = offsetof(struct ieee80211_hdr_3addr, addr3);
+ size_t i80211_hdr_len = sizeof(struct ieee80211_hdr_3addr);
+ size_t prot_ofs = offsetof(struct ethhdr, h_proto);
+ size_t src_ofs = offsetof(struct ethhdr, h_source);
+ u8 eth_bytemask[WOW_MAX_PATTERN_SIZE] = {};
+ const u8 *eth_pat = eth_pattern->pattern;
+ size_t eth_pat_len = eth_pattern->pattern_len;
+ size_t eth_pkt_ofs = eth_pattern->pkt_offset;
+ u8 *bytemask = i80211_pattern->bytemask;
+ u8 *pat = i80211_pattern->pattern;
+ size_t pat_len = 0;
+ size_t pkt_ofs = 0;
+ size_t delta;
+ int i;
+
+ /* convert bitmask to bytemask */
+ for (i = 0; i < eth_pat_len; i++)
+ if (eth_pattern->mask[i / 8] & BIT(i % 8))
+ eth_bytemask[i] = 0xff;
+
+ if (eth_pkt_ofs < ETH_ALEN) {
+ pkt_ofs = eth_pkt_ofs + a1_ofs;
+
+ if (size_add(eth_pkt_ofs, eth_pat_len) < ETH_ALEN) {
+ memcpy(pat, eth_pat, eth_pat_len);
+ memcpy(bytemask, eth_bytemask, eth_pat_len);
+
+ pat_len = eth_pat_len;
+ } else if (eth_pkt_ofs + eth_pat_len < prot_ofs) {
+ memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
+ memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
+
+ delta = eth_pkt_ofs + eth_pat_len - src_ofs;
+ memcpy(pat + a3_ofs - pkt_ofs,
+ eth_pat + ETH_ALEN - eth_pkt_ofs,
+ delta);
+ memcpy(bytemask + a3_ofs - pkt_ofs,
+ eth_bytemask + ETH_ALEN - eth_pkt_ofs,
+ delta);
+
+ pat_len = a3_ofs - pkt_ofs + delta;
+ } else {
+ memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs);
+ memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs);
+
+ memcpy(pat + a3_ofs - pkt_ofs,
+ eth_pat + ETH_ALEN - eth_pkt_ofs,
+ ETH_ALEN);
+ memcpy(bytemask + a3_ofs - pkt_ofs,
+ eth_bytemask + ETH_ALEN - eth_pkt_ofs,
+ ETH_ALEN);
+
+ delta = eth_pkt_ofs + eth_pat_len - prot_ofs;
+ memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_pat + prot_ofs - eth_pkt_ofs,
+ delta);
+ memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_bytemask + prot_ofs - eth_pkt_ofs,
+ delta);
+
+ pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta;
+ }
+ } else if (eth_pkt_ofs < prot_ofs) {
+ pkt_ofs = eth_pkt_ofs - ETH_ALEN + a3_ofs;
+
+ if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) {
+ memcpy(pat, eth_pat, eth_pat_len);
+ memcpy(bytemask, eth_bytemask, eth_pat_len);
+
+ pat_len = eth_pat_len;
+ } else {
+ memcpy(pat, eth_pat, prot_ofs - eth_pkt_ofs);
+ memcpy(bytemask, eth_bytemask, prot_ofs - eth_pkt_ofs);
+
+ delta = eth_pkt_ofs + eth_pat_len - prot_ofs;
+ memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_pat + prot_ofs - eth_pkt_ofs,
+ delta);
+ memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs,
+ eth_bytemask + prot_ofs - eth_pkt_ofs,
+ delta);
+
+ pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta;
+ }
+ } else {
+ pkt_ofs = eth_pkt_ofs - prot_ofs + i80211_hdr_len + r1042_eth_ofs;
+
+ memcpy(pat, eth_pat, eth_pat_len);
+ memcpy(bytemask, eth_bytemask, eth_pat_len);
+
+ pat_len = eth_pat_len;
+ }
+
+ i80211_pattern->pattern_len = pat_len;
+ i80211_pattern->pkt_offset = pkt_ofs;
+}
+
+static int
+ath12k_wow_pno_check_and_convert(struct ath12k *ar, u32 vdev_id,
+ const struct cfg80211_sched_scan_request *nd_config,
+ struct wmi_pno_scan_req_arg *pno)
+{
+ int i, j;
+ u8 ssid_len;
+
+ pno->enable = 1;
+ pno->vdev_id = vdev_id;
+ pno->uc_networks_count = nd_config->n_match_sets;
+
+ if (!pno->uc_networks_count ||
+ pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS)
+ return -EINVAL;
+
+ if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX)
+ return -EINVAL;
+
+ /* Filling per profile params */
+ for (i = 0; i < pno->uc_networks_count; i++) {
+ ssid_len = nd_config->match_sets[i].ssid.ssid_len;
+
+ if (ssid_len == 0 || ssid_len > 32)
+ return -EINVAL;
+
+ pno->a_networks[i].ssid.ssid_len = ssid_len;
+
+ memcpy(pno->a_networks[i].ssid.ssid,
+ nd_config->match_sets[i].ssid.ssid,
+ ssid_len);
+ pno->a_networks[i].authentication = 0;
+ pno->a_networks[i].encryption = 0;
+ pno->a_networks[i].bcast_nw_type = 0;
+
+ /* Copying list of valid channel into request */
+ pno->a_networks[i].channel_count = nd_config->n_channels;
+ pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold;
+
+ for (j = 0; j < nd_config->n_channels; j++) {
+ pno->a_networks[i].channels[j] =
+ nd_config->channels[j]->center_freq;
+ }
+ }
+
+ /* set scan to passive if no SSIDs are specified in the request */
+ if (nd_config->n_ssids == 0)
+ pno->do_passive_scan = true;
+ else
+ pno->do_passive_scan = false;
+
+ for (i = 0; i < nd_config->n_ssids; i++) {
+ for (j = 0; j < pno->uc_networks_count; j++) {
+ if (pno->a_networks[j].ssid.ssid_len ==
+ nd_config->ssids[i].ssid_len &&
+ !memcmp(pno->a_networks[j].ssid.ssid,
+ nd_config->ssids[i].ssid,
+ pno->a_networks[j].ssid.ssid_len)) {
+ pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN;
+ break;
+ }
+ }
+ }
+
+ if (nd_config->n_scan_plans == 2) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations;
+ pno->slow_scan_period =
+ nd_config->scan_plans[1].interval * MSEC_PER_SEC;
+ } else if (nd_config->n_scan_plans == 1) {
+ pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ pno->fast_scan_max_cycles = 1;
+ pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC;
+ } else {
+ ath12k_warn(ar->ab, "Invalid number of PNO scan plans: %d",
+ nd_config->n_scan_plans);
+ }
+
+ if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
+ /* enable mac randomization */
+ pno->enable_pno_scan_randomization = 1;
+ memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN);
+ memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN);
+ }
+
+ pno->delay_start_time = nd_config->delay;
+
+ /* Current FW does not support min-max range for dwell time */
+ pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME;
+ pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME;
+
+ return 0;
+}
+
+static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif,
+ struct cfg80211_wowlan *wowlan)
+{
+ const struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
+ struct ath12k *ar = arvif->ar;
+ unsigned long wow_mask = 0;
+ int pattern_id = 0;
+ int ret, i, j;
+
+ /* Setup requested WOW features */
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_IBSS:
+ __set_bit(WOW_BEACON_EVENT, &wow_mask);
+ fallthrough;
+ case WMI_VDEV_TYPE_AP:
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask);
+ __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask);
+ __set_bit(WOW_HTT_EVENT, &wow_mask);
+ __set_bit(WOW_RA_MATCH_EVENT, &wow_mask);
+ break;
+ case WMI_VDEV_TYPE_STA:
+ if (wowlan->disconnect) {
+ __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask);
+ __set_bit(WOW_BMISS_EVENT, &wow_mask);
+ __set_bit(WOW_CSA_IE_EVENT, &wow_mask);
+ }
+
+ if (wowlan->magic_pkt)
+ __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask);
+
+ if (wowlan->nd_config) {
+ struct wmi_pno_scan_req_arg *pno;
+ int ret;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ ar->nlo_enabled = true;
+
+ ret = ath12k_wow_pno_check_and_convert(ar, arvif->vdev_id,
+ wowlan->nd_config, pno);
+ if (!ret) {
+ ath12k_wmi_wow_config_pno(ar, arvif->vdev_id, pno);
+ __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask);
+ }
+
+ kfree(pno);
+ }
+ break;
+ default:
+ break;
+ }
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ const struct cfg80211_pkt_pattern *eth_pattern = &patterns[i];
+ struct ath12k_pkt_pattern new_pattern = {};
+
+ if (WARN_ON(eth_pattern->pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+
+ if (ar->ab->wow.wmi_conf_rx_decap_mode ==
+ ATH12K_HW_TXRX_NATIVE_WIFI) {
+ ath12k_wow_convert_8023_to_80211(ar, eth_pattern,
+ &new_pattern);
+
+ if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE))
+ return -EINVAL;
+ } else {
+ memcpy(new_pattern.pattern, eth_pattern->pattern,
+ eth_pattern->pattern_len);
+
+ /* convert bitmask to bytemask */
+ for (j = 0; j < eth_pattern->pattern_len; j++)
+ if (eth_pattern->mask[j / 8] & BIT(j % 8))
+ new_pattern.bytemask[j] = 0xff;
+
+ new_pattern.pattern_len = eth_pattern->pattern_len;
+ new_pattern.pkt_offset = eth_pattern->pkt_offset;
+ }
+
+ ret = ath12k_wmi_wow_add_pattern(ar, arvif->vdev_id,
+ pattern_id,
+ new_pattern.pattern,
+ new_pattern.bytemask,
+ new_pattern.pattern_len,
+ new_pattern.pkt_offset);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n",
+ pattern_id,
+ arvif->vdev_id, ret);
+ return ret;
+ }
+
+ pattern_id++;
+ __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask);
+ }
+
+ for (i = 0; i < WOW_EVENT_MAX; i++) {
+ if (!test_bit(i, &wow_mask))
+ continue;
+ ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n",
+ wow_wakeup_event(i), arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_set_wakeups(struct ath12k *ar,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (ath12k_wow_is_p2p_vdev(arvif))
+ continue;
+ ret = ath12k_wow_vif_set_wakeups(arvif, wowlan);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_vdev_clean_nlo(struct ath12k *ar, u32 vdev_id)
+{
+ struct wmi_pno_scan_req_arg *pno;
+ int ret;
+
+ if (!ar->nlo_enabled)
+ return 0;
+
+ pno = kzalloc(sizeof(*pno), GFP_KERNEL);
+ if (!pno)
+ return -ENOMEM;
+
+ pno->enable = 0;
+ ret = ath12k_wmi_wow_config_pno(ar, vdev_id, pno);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to disable PNO: %d", ret);
+ goto out;
+ }
+
+ ar->nlo_enabled = false;
+
+out:
+ kfree(pno);
+ return ret;
+}
+
+static int ath12k_wow_vif_clean_nlo(struct ath12k_vif *arvif)
+{
+ struct ath12k *ar = arvif->ar;
+
+ switch (arvif->vdev_type) {
+ case WMI_VDEV_TYPE_STA:
+ return ath12k_wow_vdev_clean_nlo(ar, arvif->vdev_id);
+ default:
+ return 0;
+ }
+}
+
+static int ath12k_wow_nlo_cleanup(struct ath12k *ar)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (ath12k_wow_is_p2p_vdev(arvif))
+ continue;
+
+ ret = ath12k_wow_vif_clean_nlo(arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_set_hw_filter(struct ath12k *ar)
+{
+ struct wmi_hw_data_filter_arg arg;
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enable = true;
+ arg.hw_filter_bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC;
+ ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_clear_hw_filter(struct ath12k *ar)
+{
+ struct wmi_hw_data_filter_arg arg;
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ arg.vdev_id = arvif->vdev_id;
+ arg.enable = false;
+ arg.hw_filter_bitmap = 0;
+ ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base *ab,
+ struct wmi_arp_ns_offload_arg *offload)
+{
+ int i;
+
+ for (i = 0; i < offload->ipv6_count; i++) {
+ offload->self_ipv6_addr[i][0] = 0xff;
+ offload->self_ipv6_addr[i][1] = 0x02;
+ offload->self_ipv6_addr[i][11] = 0x01;
+ offload->self_ipv6_addr[i][12] = 0xff;
+ offload->self_ipv6_addr[i][13] =
+ offload->ipv6_addr[i][13];
+ offload->self_ipv6_addr[i][14] =
+ offload->ipv6_addr[i][14];
+ offload->self_ipv6_addr[i][15] =
+ offload->ipv6_addr[i][15];
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "NS solicited addr %pI6\n",
+ offload->self_ipv6_addr[i]);
+ }
+}
+
+static void ath12k_wow_prepare_ns_offload(struct ath12k_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload)
+{
+ struct net_device *ndev = ieee80211_vif_to_wdev(arvif->vif)->netdev;
+ struct ath12k_base *ab = arvif->ar->ab;
+ struct inet6_ifaddr *ifa6;
+ struct ifacaddr6 *ifaca6;
+ struct inet6_dev *idev;
+ u32 count = 0, scope;
+
+ if (!ndev)
+ return;
+
+ idev = in6_dev_get(ndev);
+ if (!idev)
+ return;
+
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare ns offload\n");
+
+ read_lock_bh(&idev->lock);
+
+ /* get unicast address */
+ list_for_each_entry(ifa6, &idev->addr_list, if_list) {
+ if (count >= WMI_IPV6_MAX_COUNT)
+ goto unlock;
+
+ if (ifa6->flags & IFA_F_DADFAILED)
+ continue;
+
+ scope = ipv6_addr_src_scope(&ifa6->addr);
+ if (scope != IPV6_ADDR_SCOPE_LINKLOCAL &&
+ scope != IPV6_ADDR_SCOPE_GLOBAL) {
+ ath12k_dbg(ab, ATH12K_DBG_WOW,
+ "Unsupported ipv6 scope: %d\n", scope);
+ continue;
+ }
+
+ memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr,
+ sizeof(ifa6->addr.s6_addr));
+ offload->ipv6_type[count] = WMI_IPV6_UC_TYPE;
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 uc %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ }
+
+ /* get anycast address */
+ rcu_read_lock();
+
+ for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6;
+ ifaca6 = rcu_dereference(ifaca6->aca_next)) {
+ if (count >= WMI_IPV6_MAX_COUNT) {
+ rcu_read_unlock();
+ goto unlock;
+ }
+
+ scope = ipv6_addr_src_scope(&ifaca6->aca_addr);
+ if (scope != IPV6_ADDR_SCOPE_LINKLOCAL &&
+ scope != IPV6_ADDR_SCOPE_GLOBAL) {
+ ath12k_dbg(ab, ATH12K_DBG_WOW,
+ "Unsupported ipv scope: %d\n", scope);
+ continue;
+ }
+
+ memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr,
+ sizeof(ifaca6->aca_addr));
+ offload->ipv6_type[count] = WMI_IPV6_AC_TYPE;
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 ac %pI6 scope %d\n",
+ count, offload->ipv6_addr[count],
+ scope);
+ count++;
+ }
+
+ rcu_read_unlock();
+
+unlock:
+ read_unlock_bh(&idev->lock);
+
+ in6_dev_put(idev);
+
+ offload->ipv6_count = count;
+ ath12k_wow_generate_ns_mc_addr(ab, offload);
+}
+
+static void ath12k_wow_prepare_arp_offload(struct ath12k_vif *arvif,
+ struct wmi_arp_ns_offload_arg *offload)
+{
+ struct ieee80211_vif *vif = arvif->vif;
+ struct ieee80211_vif_cfg vif_cfg = vif->cfg;
+ struct ath12k_base *ab = arvif->ar->ab;
+ u32 ipv4_cnt;
+
+ ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare arp offload\n");
+
+ ipv4_cnt = min(vif_cfg.arp_addr_cnt, WMI_IPV4_MAX_COUNT);
+ memcpy(offload->ipv4_addr, vif_cfg.arp_addr_list, ipv4_cnt * sizeof(u32));
+ offload->ipv4_count = ipv4_cnt;
+
+ ath12k_dbg(ab, ATH12K_DBG_WOW,
+ "wow arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n",
+ vif_cfg.arp_addr_cnt, vif->addr, offload->ipv4_addr);
+}
+
+static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable)
+{
+ struct wmi_arp_ns_offload_arg *offload;
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ offload = kmalloc(sizeof(*offload), GFP_KERNEL);
+ if (!offload)
+ return -ENOMEM;
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
+ continue;
+
+ memset(offload, 0, sizeof(*offload));
+
+ memcpy(offload->mac_addr, arvif->vif->addr, ETH_ALEN);
+ ath12k_wow_prepare_ns_offload(arvif, offload);
+ ath12k_wow_prepare_arp_offload(arvif, offload);
+
+ ret = ath12k_wmi_arp_ns_offload(ar, arvif, offload, enable);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ kfree(offload);
+
+ return 0;
+}
+
+static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ if (arvif->vdev_type != WMI_VDEV_TYPE_STA ||
+ !arvif->is_up ||
+ !arvif->rekey_data.enable_offload)
+ continue;
+
+ /* get rekey info before disable rekey offload */
+ if (!enable) {
+ ret = ath12k_wmi_gtk_rekey_getinfo(ar, arvif);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n",
+ arvif->vdev_id, ret);
+ return ret;
+ }
+ }
+
+ ret = ath12k_wmi_gtk_rekey_offload(ar, arvif, enable);
+
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n",
+ arvif->vdev_id, enable, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_protocol_offload(struct ath12k *ar, bool enable)
+{
+ int ret;
+
+ ret = ath12k_wow_arp_ns_offload(ar, enable);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to offload ARP and NS %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ ret = ath12k_gtk_rekey_offload(ar, enable);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to offload gtk rekey %d %d\n",
+ enable, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ath12k_wow_set_keepalive(struct ath12k *ar,
+ enum wmi_sta_keepalive_method method,
+ u32 interval)
+{
+ struct ath12k_vif *arvif;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ list_for_each_entry(arvif, &ar->arvifs, list) {
+ ret = ath12k_mac_vif_set_keepalive(arvif, method, interval);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int ath12k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath12k_wow_cleanup(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear wow wakeup events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_set_wakeups(ar, wowlan);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wow wakeup events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_protocol_offload(ar, true);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set wow protocol offload events: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_mac_wait_tx_complete(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to wait tx complete: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_set_hw_filter(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to set hw filter: %d\n",
+ ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DEFAULT);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret);
+ goto cleanup;
+ }
+
+ ret = ath12k_wow_enable(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to start wow: %d\n", ret);
+ goto cleanup;
+ }
+
+ ath12k_hif_irq_disable(ar->ab);
+ ath12k_hif_ce_irq_disable(ar->ab);
+
+ ret = ath12k_hif_suspend(ar->ab);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to suspend hif: %d\n", ret);
+ goto wakeup;
+ }
+
+ goto exit;
+
+wakeup:
+ ath12k_wow_wakeup(ar);
+
+cleanup:
+ ath12k_wow_cleanup(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret ? 1 : 0;
+}
+
+void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+
+ mutex_lock(&ar->conf_mutex);
+ device_set_wakeup_enable(ar->ab->dev, enabled);
+ mutex_unlock(&ar->conf_mutex);
+}
+
+int ath12k_wow_op_resume(struct ieee80211_hw *hw)
+{
+ struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+ struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ ret = ath12k_hif_resume(ar->ab);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to resume hif: %d\n", ret);
+ goto exit;
+ }
+
+ ath12k_hif_ce_irq_enable(ar->ab);
+ ath12k_hif_irq_enable(ar->ab);
+
+ ret = ath12k_wow_wakeup(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_nlo_cleanup(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_clear_hw_filter(ar);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear hw filter: %d\n", ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_protocol_offload(ar, false);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n",
+ ret);
+ goto exit;
+ }
+
+ ret = ath12k_wow_set_keepalive(ar,
+ WMI_STA_KEEPALIVE_METHOD_NULL_FRAME,
+ WMI_STA_KEEPALIVE_INTERVAL_DISABLE);
+ if (ret) {
+ ath12k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret);
+ goto exit;
+ }
+
+exit:
+ if (ret) {
+ switch (ah->state) {
+ case ATH12K_HW_STATE_ON:
+ ah->state = ATH12K_HW_STATE_RESTARTING;
+ ret = 1;
+ break;
+ case ATH12K_HW_STATE_OFF:
+ case ATH12K_HW_STATE_RESTARTING:
+ case ATH12K_HW_STATE_RESTARTED:
+ case ATH12K_HW_STATE_WEDGED:
+ ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n",
+ ah->state);
+ ret = -EIO;
+ break;
+ }
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+int ath12k_wow_init(struct ath12k *ar)
+{
+ if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map))
+ return 0;
+
+ ar->wow.wowlan_support = ath12k_wowlan_support;
+
+ if (ar->ab->wow.wmi_conf_rx_decap_mode == ATH12K_HW_TXRX_NATIVE_WIFI) {
+ ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE;
+ ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE;
+ }
+
+ if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
+ ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT;
+ ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
+ }
+
+ ar->wow.max_num_patterns = ATH12K_WOW_PATTERNS;
+ ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns;
+ ar->ah->hw->wiphy->wowlan = &ar->wow.wowlan_support;
+
+ device_set_wakeup_capable(ar->ab->dev, true);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/ath/ath12k/wow.h b/drivers/net/wireless/ath/ath12k/wow.h
new file mode 100644
index 000000000000..af9be5fadcc3
--- /dev/null
+++ b/drivers/net/wireless/ath/ath12k/wow.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef ATH12K_WOW_H
+#define ATH12K_WOW_H
+
+#define ATH12K_WOW_RETRY_NUM 10
+#define ATH12K_WOW_RETRY_WAIT_MS 200
+#define ATH12K_WOW_PATTERNS 22
+
+struct ath12k_wow {
+ u32 max_num_patterns;
+ struct completion wakeup_completed;
+ struct wiphy_wowlan_support wowlan_support;
+};
+
+struct ath12k_pkt_pattern {
+ u8 pattern[WOW_MAX_PATTERN_SIZE];
+ u8 bytemask[WOW_MAX_PATTERN_SIZE];
+ int pattern_len;
+ int pkt_offset;
+};
+
+struct rfc1042_hdr {
+ u8 llc_dsap;
+ u8 llc_ssap;
+ u8 llc_ctrl;
+ u8 snap_oui[3];
+ __be16 eth_type;
+} __packed;
+
+#ifdef CONFIG_PM
+
+int ath12k_wow_init(struct ath12k *ar);
+int ath12k_wow_op_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan);
+int ath12k_wow_op_resume(struct ieee80211_hw *hw);
+void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled);
+int ath12k_wow_enable(struct ath12k *ar);
+int ath12k_wow_wakeup(struct ath12k *ar);
+
+#else
+
+static inline int ath12k_wow_init(struct ath12k *ar)
+{
+ return 0;
+}
+
+static inline int ath12k_wow_enable(struct ath12k *ar)
+{
+ return 0;
+}
+
+static inline int ath12k_wow_wakeup(struct ath12k *ar)
+{
+ return 0;
+}
+#endif /* CONFIG_PM */
+#endif /* ATH12K_WOW_H */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9f534ed2fbb3..abe41330fb69 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2847,7 +2847,7 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah)
* if another thread does a system call and the thread doing the
* stop is preempted).
*/
-void ath5k_stop(struct ieee80211_hw *hw)
+void ath5k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath5k_hw *ah = hw->priv;
int ret;
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 97469d0fbad7..594e5b945cb7 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -92,7 +92,7 @@ void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif);
bool ath5k_any_vif_assoc(struct ath5k_hw *ah);
int ath5k_start(struct ieee80211_hw *hw);
-void ath5k_stop(struct ieee80211_hw *hw);
+void ath5k_stop(struct ieee80211_hw *hw, bool suspend);
void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf);
int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
diff --git a/drivers/net/wireless/ath/ath6kl/htc_mbox.c b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
index 1963d3145481..fb5144e2d86c 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_mbox.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_mbox.c
@@ -364,8 +364,7 @@ static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags,
packet->buf -= HTC_HDR_LENGTH;
hdr = (struct htc_frame_hdr *)packet->buf;
- /* Endianess? */
- put_unaligned((u16)packet->act_len, &hdr->payld_len);
+ put_unaligned_le16(packet->act_len, &hdr->payld_len);
hdr->flags = flags;
hdr->eid = packet->endpoint;
hdr->ctrl[0] = ctrl0;
diff --git a/drivers/net/wireless/ath/ath6kl/htc_pipe.c b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
index 9b88d96bfe96..2f2edfe43761 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_pipe.c
+++ b/drivers/net/wireless/ath/ath6kl/htc_pipe.c
@@ -237,8 +237,7 @@ static int htc_issue_packets(struct htc_target *target,
packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
- /* Endianess? */
- put_unaligned((u16) payload_len, &htc_hdr->payld_len);
+ put_unaligned_le16(payload_len, &htc_hdr->payld_len);
htc_hdr->flags = packet->info.tx.flags;
htc_hdr->eid = (u8) packet->endpoint;
htc_hdr->ctrl[0] = 0;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 8a43c48ec1cf..9ab091044706 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -1427,25 +1427,7 @@ static struct sdio_driver ath6kl_sdio_driver = {
.remove = ath6kl_sdio_remove,
.drv.pm = ATH6KL_SDIO_PM_OPS,
};
-
-static int __init ath6kl_sdio_init(void)
-{
- int ret;
-
- ret = sdio_register_driver(&ath6kl_sdio_driver);
- if (ret)
- ath6kl_err("sdio driver registration failed: %d\n", ret);
-
- return ret;
-}
-
-static void __exit ath6kl_sdio_exit(void)
-{
- sdio_unregister_driver(&ath6kl_sdio_driver);
-}
-
-module_init(ath6kl_sdio_init);
-module_exit(ath6kl_sdio_exit);
+module_sdio_driver(ath6kl_sdio_driver);
MODULE_AUTHOR("Atheros Communications, Inc.");
MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
diff --git a/drivers/net/wireless/ath/ath6kl/trace.h b/drivers/net/wireless/ath/ath6kl/trace.h
index 231a94769ddb..8577aa459c58 100644
--- a/drivers/net/wireless/ath/ath6kl/trace.h
+++ b/drivers/net/wireless/ath/ath6kl/trace.h
@@ -304,8 +304,8 @@ TRACE_EVENT(ath6kl_log_dbg_dump,
),
TP_fast_assign(
- __assign_str(msg, msg);
- __assign_str(prefix, prefix);
+ __assign_str(msg);
+ __assign_str(prefix);
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 668fc07b3073..29ca65a732a6 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -39,6 +39,7 @@ extern int ath9k_modparam_nohwcrypt;
extern int ath9k_led_blink;
extern bool is_ath9k_unloaded;
extern int ath9k_use_chanctx;
+extern int ath9k_use_msi;
/*************************/
/* Descriptor Management */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index e8c2cc03be0c..27b860b0c769 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -76,7 +76,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size,
struct modal_eep_4k_header *modal_hdr)
{
- PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
+ PR_EEP("Chain0 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[0]));
PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
PR_EEP("Switch Settle", modal_hdr->switchSettling);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index fd5312c2a7e3..d85472ee4d85 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -79,8 +79,8 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
static u32 ar9287_dump_modal_eeprom(char *buf, u32 len, u32 size,
struct modal_eep_ar9287_header *modal_hdr)
{
- PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
- PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1]));
+ PR_EEP("Chain0 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[0]));
+ PR_EEP("Chain1 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[1]));
PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 7685f8ab371e..84b31caf8ca6 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -135,9 +135,9 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
static u32 ath9k_def_dump_modal_eeprom(char *buf, u32 len, u32 size,
struct modal_eep_header *modal_hdr)
{
- PR_EEP("Chain0 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[0]));
- PR_EEP("Chain1 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[1]));
- PR_EEP("Chain2 Ant. Control", le16_to_cpu(modal_hdr->antCtrlChain[2]));
+ PR_EEP("Chain0 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[0]));
+ PR_EEP("Chain1 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[1]));
+ PR_EEP("Chain2 Ant. Control", le32_to_cpu(modal_hdr->antCtrlChain[2]));
PR_EEP("Ant. Common Control", le32_to_cpu(modal_hdr->antCtrlCommon));
PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]);
PR_EEP("Chain1 Ant. Gain", modal_hdr->antennaGainCh[1]);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index b389e19381c4..8a03bcc2789e 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -973,7 +973,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
return ret;
}
-static void ath9k_htc_stop(struct ieee80211_hw *hw)
+static void ath9k_htc_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath9k_htc_priv *priv = hw->priv;
struct ath_hw *ah = priv->ah;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a2943aaecb20..b92c89dad8de 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -135,8 +135,7 @@ void ath9k_ps_wakeup(struct ath_softc *sc)
if (power_mode != ATH9K_PM_AWAKE) {
spin_lock(&common->cc_lock);
ath_hw_cycle_counters_update(common);
- memset(&common->cc_survey, 0, sizeof(common->cc_survey));
- memset(&common->cc_ani, 0, sizeof(common->cc_ani));
+ memset(&common->cc, 0, sizeof(common->cc));
spin_unlock(&common->cc_lock);
}
@@ -896,7 +895,7 @@ static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix)
ath_key_delete(common, keyix);
}
-static void ath9k_stop(struct ieee80211_hw *hw)
+static void ath9k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index e655cd8bbf94..1ff53520f0a3 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -21,8 +21,6 @@
#include <linux/module.h>
#include "ath9k.h"
-extern int ath9k_use_msi;
-
static const struct pci_device_id ath_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index d519b676a109..35aa47a9db90 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1674,8 +1674,14 @@ static void
ath9k_set_moredata(struct ath_softc *sc, struct ath_buf *bf, bool val)
{
struct ieee80211_hdr *hdr;
- u16 mask = cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- u16 mask_val = mask * val;
+ __le16 mask, mask_val;
+
+ mask = cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+
+ if (val)
+ mask_val = mask;
+ else
+ mask_val = 0;
hdr = (struct ieee80211_hdr *) bf->bf_mpdu->data;
if ((hdr->frame_control & mask) != mask_val) {
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 7e7797bf44b7..755c068e4197 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -439,7 +439,7 @@ static void carl9170_cancel_worker(struct ar9170 *ar)
cancel_work_sync(&ar->ampdu_work);
}
-static void carl9170_op_stop(struct ieee80211_hw *hw)
+static void carl9170_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct ar9170 *ar = hw->priv;
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index e902ca80eba7..0226c31a6cae 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -280,7 +280,8 @@ static void carl9170_tx_release(struct kref *ref)
* carl9170_tx_fill_rateinfo() has filled the rate information
* before we get to this point.
*/
- memset_after(&txinfo->status, 0, rates);
+ memset(&txinfo->pad, 0, sizeof(txinfo->pad));
+ memset(&txinfo->rate_driver_data, 0, sizeof(txinfo->rate_driver_data));
if (atomic_read(&ar->tx_total_queued))
ar->tx_schedule = true;
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index c4edf8355941..a3e03580cd9f 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -1069,6 +1069,38 @@ static int carl9170_usb_probe(struct usb_interface *intf,
ar->usb_ep_cmd_is_bulk = true;
}
+ /* Verify that all expected endpoints are present */
+ if (ar->usb_ep_cmd_is_bulk) {
+ u8 bulk_ep_addr[] = {
+ AR9170_USB_EP_RX | USB_DIR_IN,
+ AR9170_USB_EP_TX | USB_DIR_OUT,
+ AR9170_USB_EP_CMD | USB_DIR_OUT,
+ 0};
+ u8 int_ep_addr[] = {
+ AR9170_USB_EP_IRQ | USB_DIR_IN,
+ 0};
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
+ !usb_check_int_endpoints(intf, int_ep_addr))
+ err = -ENODEV;
+ } else {
+ u8 bulk_ep_addr[] = {
+ AR9170_USB_EP_RX | USB_DIR_IN,
+ AR9170_USB_EP_TX | USB_DIR_OUT,
+ 0};
+ u8 int_ep_addr[] = {
+ AR9170_USB_EP_IRQ | USB_DIR_IN,
+ AR9170_USB_EP_CMD | USB_DIR_OUT,
+ 0};
+ if (!usb_check_bulk_endpoints(intf, bulk_ep_addr) ||
+ !usb_check_int_endpoints(intf, int_ep_addr))
+ err = -ENODEV;
+ }
+
+ if (err) {
+ carl9170_free(ar);
+ return err;
+ }
+
usb_set_intfdata(intf, ar);
SET_IEEE80211_DEV(ar->hw, &intf->dev);
diff --git a/drivers/net/wireless/ath/trace.h b/drivers/net/wireless/ath/trace.h
index 9935cf475b6d..82aac0a4baff 100644
--- a/drivers/net/wireless/ath/trace.h
+++ b/drivers/net/wireless/ath/trace.h
@@ -44,8 +44,8 @@ TRACE_EVENT(ath_log,
),
TP_fast_assign(
- __assign_str(device, wiphy_name(wiphy));
- __assign_str(driver, KBUILD_MODNAME);
+ __assign_str(device);
+ __assign_str(driver);
__assign_vstr(msg, vaf->fmt, vaf->va);
),
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index bfbd3c7a70b3..408776562a7e 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -278,7 +278,7 @@ out_err:
return ret;
}
-static void wcn36xx_stop(struct ieee80211_hw *hw)
+static void wcn36xx_stop(struct ieee80211_hw *hw, bool suspend)
{
struct wcn36xx *wcn = hw->priv;
@@ -756,9 +756,9 @@ static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
if (sta->deflink.vht_cap.vht_supported) {
sta_priv->supported_rates.op_rate_mode = STA_11ac;
sta_priv->supported_rates.vht_rx_mcs_map =
- sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
+ le16_to_cpu(sta->deflink.vht_cap.vht_mcs.rx_mcs_map);
sta_priv->supported_rates.vht_tx_mcs_map =
- sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
+ le16_to_cpu(sta->deflink.vht_cap.vht_mcs.tx_mcs_map);
}
}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 0802ed728824..8826998797d6 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -318,7 +318,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
memset(&status, 0, sizeof(status));
bd = (struct wcn36xx_rx_bd *)skb->data;
- buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
+ buff_to_be(bd, sizeof(*bd)/sizeof(u32));
wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP,
"BD <<< ", (char *)bd,
sizeof(struct wcn36xx_rx_bd));
@@ -692,7 +692,7 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
/* MGMT and CTRL frames are handeld here*/
wcn36xx_set_tx_mgmt(&bd, wcn, &vif_priv, skb, bcast);
- buff_to_be((u32 *)&bd, sizeof(bd)/sizeof(u32));
+ buff_to_be(&bd, sizeof(bd)/sizeof(u32));
bd.tx_bd_sign = 0xbdbdbdbd;
ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low);
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index ff4a8e5d7209..bccc27de848d 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -100,11 +100,14 @@ enum wcn36xx_ampdu_state {
#define RF_IRIS_WCN3660 0x3660
#define RF_IRIS_WCN3680 0x3680
-static inline void buff_to_be(u32 *buf, size_t len)
+static inline void buff_to_be(void *buf, size_t len)
{
+ __be32 *to = buf;
+ u32 *from = buf;
int i;
+
for (i = 0; i < len; i++)
- buf[i] = cpu_to_be32(buf[i]);
+ to[i] = cpu_to_be32(from[i]);
}
struct nv_data {
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index dbe4b3478f03..e8f1d30a8d73 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -892,10 +892,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
struct wireless_dev *wdev = request->wdev;
struct wil6210_vif *vif = wdev_to_vif(wil, wdev);
- struct {
- struct wmi_start_scan_cmd cmd;
- u16 chnl[4];
- } __packed cmd;
+ DEFINE_FLEX(struct wmi_start_scan_cmd, cmd,
+ channel_list, num_channels, 4);
uint i, n;
int rc;
@@ -977,9 +975,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
vif->scan_request = request;
mod_timer(&vif->scan_timer, jiffies + WIL6210_SCAN_TO);
- memset(&cmd, 0, sizeof(cmd));
- cmd.cmd.scan_type = WMI_ACTIVE_SCAN;
- cmd.cmd.num_channels = 0;
+ cmd->scan_type = WMI_ACTIVE_SCAN;
+ cmd->num_channels = 0;
n = min(request->n_channels, 4U);
for (i = 0; i < n; i++) {
int ch = request->channels[i]->hw_value;
@@ -991,7 +988,8 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
continue;
}
/* 0-based channel indexes */
- cmd.cmd.channel_list[cmd.cmd.num_channels++].channel = ch - 1;
+ cmd->num_channels++;
+ cmd->channel_list[cmd->num_channels - 1].channel = ch - 1;
wil_dbg_misc(wil, "Scan for ch %d : %d MHz\n", ch,
request->channels[i]->center_freq);
}
@@ -1007,16 +1005,15 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
if (rc)
goto out_restore;
- if (wil->discovery_mode && cmd.cmd.scan_type == WMI_ACTIVE_SCAN) {
- cmd.cmd.discovery_mode = 1;
+ if (wil->discovery_mode && cmd->scan_type == WMI_ACTIVE_SCAN) {
+ cmd->discovery_mode = 1;
wil_dbg_misc(wil, "active scan with discovery_mode=1\n");
}
if (vif->mid == 0)
wil->radio_wdev = wdev;
rc = wmi_send(wil, WMI_START_SCAN_CMDID, vif->mid,
- &cmd, sizeof(cmd.cmd) +
- cmd.cmd.num_channels * sizeof(cmd.cmd.channel_list[0]));
+ cmd, struct_size(cmd, channel_list, cmd->num_channels));
out_restore:
if (rc) {
@@ -2735,7 +2732,7 @@ int wil_cfg80211_iface_combinations_from_fw(
return 0;
}
- combo = conc->combos;
+ combo = (const struct wil_fw_concurrency_combo *)(conc + 1);
n_combos = le16_to_cpu(conc->n_combos);
for (i = 0; i < n_combos; i++) {
total_limits += combo->n_limits;
@@ -2751,7 +2748,7 @@ int wil_cfg80211_iface_combinations_from_fw(
return -ENOMEM;
iface_limit = (struct ieee80211_iface_limit *)(iface_combinations +
n_combos);
- combo = conc->combos;
+ combo = (const struct wil_fw_concurrency_combo *)(conc + 1);
for (i = 0; i < n_combos; i++) {
iface_combinations[i].max_interfaces = combo->max_interfaces;
iface_combinations[i].num_different_channels =
diff --git a/drivers/net/wireless/ath/wil6210/fw.h b/drivers/net/wireless/ath/wil6210/fw.h
index aa1620e0d24f..2079a90ec260 100644
--- a/drivers/net/wireless/ath/wil6210/fw.h
+++ b/drivers/net/wireless/ath/wil6210/fw.h
@@ -93,7 +93,6 @@ struct wil_fw_record_concurrency { /* type == wil_fw_type_comment */
/* number of concurrency combinations that follow */
__le16 n_combos;
/* keep last - combinations, variable size by n_combos */
- struct wil_fw_concurrency_combo combos[];
} __packed;
/* brd file info encoded inside a comment record */
diff --git a/drivers/net/wireless/ath/wil6210/fw_inc.c b/drivers/net/wireless/ath/wil6210/fw_inc.c
index fbc84c03406b..c3c0b289dcf3 100644
--- a/drivers/net/wireless/ath/wil6210/fw_inc.c
+++ b/drivers/net/wireless/ath/wil6210/fw_inc.c
@@ -212,8 +212,8 @@ fw_handle_concurrency(struct wil6210_priv *wil, const void *data,
}
n_combos = le16_to_cpu(rec->n_combos);
- remain = size - offsetof(struct wil_fw_record_concurrency, combos);
- combo = rec->combos;
+ remain = size - sizeof(struct wil_fw_record_concurrency);
+ combo = (const struct wil_fw_concurrency_combo *)(rec + 1);
for (i = 0; i < n_combos; i++) {
if (remain < sizeof(*combo))
goto out_short;
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index ee7d7e9c2718..d5d364683c0e 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -453,16 +453,21 @@ int wil_if_add(struct wil6210_priv *wil)
return rc;
}
- init_dummy_netdev(&wil->napi_ndev);
+ wil->napi_ndev = alloc_netdev_dummy(0);
+ if (!wil->napi_ndev) {
+ wil_err(wil, "failed to allocate dummy netdev");
+ rc = -ENOMEM;
+ goto out_wiphy;
+ }
if (wil->use_enhanced_dma_hw) {
- netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ netif_napi_add(wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx_edma);
- netif_napi_add_tx(&wil->napi_ndev,
+ netif_napi_add_tx(wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx_edma);
} else {
- netif_napi_add(&wil->napi_ndev, &wil->napi_rx,
+ netif_napi_add(wil->napi_ndev, &wil->napi_rx,
wil6210_netdev_poll_rx);
- netif_napi_add_tx(&wil->napi_ndev,
+ netif_napi_add_tx(wil->napi_ndev,
&wil->napi_tx, wil6210_netdev_poll_tx);
}
@@ -474,10 +479,12 @@ int wil_if_add(struct wil6210_priv *wil)
wiphy_unlock(wiphy);
rtnl_unlock();
if (rc < 0)
- goto out_wiphy;
+ goto free_dummy;
return 0;
+free_dummy:
+ free_netdev(wil->napi_ndev);
out_wiphy:
wiphy_unregister(wiphy);
return rc;
@@ -554,5 +561,7 @@ void wil_if_remove(struct wil6210_priv *wil)
netif_napi_del(&wil->napi_tx);
netif_napi_del(&wil->napi_rx);
+ free_netdev(wil->napi_ndev);
+
wiphy_unregister(wiphy);
}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 22a6eb3e12b7..9bd1286d2857 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -983,7 +983,7 @@ struct wil6210_priv {
spinlock_t eap_lock; /* guarding access to eap rekey fields */
struct napi_struct napi_rx;
struct napi_struct napi_tx;
- struct net_device napi_ndev; /* dummy net_device serving all VIFs */
+ struct net_device *napi_ndev; /* dummy net_device serving all VIFs */
/* DMA related */
struct wil_ring ring_rx;
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 6fdb77d4c59e..8ff69dc72fb9 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -4015,27 +4015,22 @@ int wmi_set_cqm_rssi_config(struct wil6210_priv *wil,
struct wil6210_vif *vif = ndev_to_vif(ndev);
int rc;
struct {
- struct wmi_set_link_monitor_cmd cmd;
- s8 rssi_thold;
- } __packed cmd = {
- .cmd = {
- .rssi_hyst = rssi_hyst,
- .rssi_thresholds_list_size = 1,
- },
- .rssi_thold = rssi_thold,
- };
- struct {
struct wmi_cmd_hdr hdr;
struct wmi_set_link_monitor_event evt;
} __packed reply = {
.evt = {.status = WMI_FW_STATUS_FAILURE},
};
+ DEFINE_FLEX(struct wmi_set_link_monitor_cmd, cmd,
+ rssi_thresholds_list, rssi_thresholds_list_size, 1);
+
+ cmd->rssi_hyst = rssi_hyst;
+ cmd->rssi_thresholds_list[0] = rssi_thold;
if (rssi_thold > S8_MAX || rssi_thold < S8_MIN || rssi_hyst > U8_MAX)
return -EINVAL;
- rc = wmi_call(wil, WMI_SET_LINK_MONITOR_CMDID, vif->mid, &cmd,
- sizeof(cmd), WMI_SET_LINK_MONITOR_EVENTID,
+ rc = wmi_call(wil, WMI_SET_LINK_MONITOR_CMDID, vif->mid, cmd,
+ __struct_size(cmd), WMI_SET_LINK_MONITOR_EVENTID,
&reply, sizeof(reply), WIL_WMI_CALL_GENERAL_TO_MS);
if (rc) {
wil_err(wil, "WMI_SET_LINK_MONITOR_CMDID failed, rc %d\n", rc);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 71bf2ae27a98..38f64524019e 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -474,7 +474,7 @@ struct wmi_start_scan_cmd {
struct {
u8 channel;
u8 reserved;
- } channel_list[];
+ } channel_list[] __counted_by(num_channels);
} __packed;
#define WMI_MAX_PNO_SSID_NUM (16)
@@ -3320,7 +3320,7 @@ struct wmi_set_link_monitor_cmd {
u8 rssi_hyst;
u8 reserved[12];
u8 rssi_thresholds_list_size;
- s8 rssi_thresholds_list[];
+ s8 rssi_thresholds_list[] __counted_by(rssi_thresholds_list_size);
} __packed;
/* wmi_link_monitor_event_type */
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
index 0b55a272bfd6..504e05ea30f2 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.c
+++ b/drivers/net/wireless/atmel/at76c50x-usb.c
@@ -332,7 +332,7 @@ static int at76_dfu_get_status(struct usb_device *udev,
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), DFU_GETSTATUS,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, 0, status, sizeof(struct dfu_status),
+ 0, 0, status, sizeof(*status),
USB_CTRL_GET_TIMEOUT);
return ret;
}
@@ -366,7 +366,7 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
u32 dfu_timeout = 0;
int bsize = 0;
int blockno = 0;
- struct dfu_status *dfu_stat_buf = NULL;
+ struct dfu_status *dfu_stat_buf;
u8 *dfu_state = NULL;
u8 *block = NULL;
@@ -378,7 +378,7 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
return -EINVAL;
}
- dfu_stat_buf = kmalloc(sizeof(struct dfu_status), GFP_KERNEL);
+ dfu_stat_buf = kmalloc(sizeof(*dfu_stat_buf), GFP_KERNEL);
if (!dfu_stat_buf) {
ret = -ENOMEM;
goto exit;
@@ -721,9 +721,11 @@ static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf,
int buf_size)
{
int ret;
- struct at76_command *cmd_buf = kmalloc(sizeof(struct at76_command) +
- buf_size, GFP_KERNEL);
+ size_t total_size;
+ struct at76_command *cmd_buf;
+ total_size = struct_size(cmd_buf, data, buf_size);
+ cmd_buf = kmalloc(total_size, GFP_KERNEL);
if (!cmd_buf)
return -ENOMEM;
@@ -732,15 +734,13 @@ static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf,
cmd_buf->size = cpu_to_le16(buf_size);
memcpy(cmd_buf->data, buf, buf_size);
- at76_dbg_dump(DBG_CMD, cmd_buf, sizeof(struct at76_command) + buf_size,
+ at76_dbg_dump(DBG_CMD, cmd_buf, total_size,
"issuing command %s (0x%02x)",
at76_get_cmd_string(cmd), cmd);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0e,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
- 0, 0, cmd_buf,
- sizeof(struct at76_command) + buf_size,
- USB_CTRL_GET_TIMEOUT);
+ 0, 0, cmd_buf, total_size, USB_CTRL_GET_TIMEOUT);
kfree(cmd_buf);
return ret;
}
@@ -931,14 +931,12 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv)
{
int i;
int ret;
- struct mib_mac_addr *m = kmalloc(sizeof(struct mib_mac_addr),
- GFP_KERNEL);
+ struct mib_mac_addr *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
- ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m,
- sizeof(struct mib_mac_addr));
+ ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC_ADDR) failed: %d\n", ret);
@@ -961,13 +959,12 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv)
int i;
int ret;
int key_len;
- struct mib_mac_wep *m = kmalloc(sizeof(struct mib_mac_wep), GFP_KERNEL);
+ struct mib_mac_wep *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
- ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m,
- sizeof(struct mib_mac_wep));
+ ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC_WEP) failed: %d\n", ret);
@@ -997,14 +994,12 @@ exit:
static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
{
int ret;
- struct mib_mac_mgmt *m = kmalloc(sizeof(struct mib_mac_mgmt),
- GFP_KERNEL);
+ struct mib_mac_mgmt *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
- ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m,
- sizeof(struct mib_mac_mgmt));
+ ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC_MGMT) failed: %d\n", ret);
@@ -1035,12 +1030,12 @@ exit:
static void at76_dump_mib_mac(struct at76_priv *priv)
{
int ret;
- struct mib_mac *m = kmalloc(sizeof(struct mib_mac), GFP_KERNEL);
+ struct mib_mac *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
- ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac));
+ ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC) failed: %d\n", ret);
@@ -1072,12 +1067,12 @@ exit:
static void at76_dump_mib_phy(struct at76_priv *priv)
{
int ret;
- struct mib_phy *m = kmalloc(sizeof(struct mib_phy), GFP_KERNEL);
+ struct mib_phy *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
- ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy));
+ ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (PHY) failed: %d\n", ret);
@@ -1130,13 +1125,12 @@ exit:
static void at76_dump_mib_mdomain(struct at76_priv *priv)
{
int ret;
- struct mib_mdomain *m = kmalloc(sizeof(struct mib_mdomain), GFP_KERNEL);
+ struct mib_mdomain *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
- ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m,
- sizeof(struct mib_mdomain));
+ ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MDOMAIN) failed: %d\n", ret);
@@ -1375,7 +1369,7 @@ static int at76_startup_device(struct at76_priv *priv)
priv->scan_min_time, priv->scan_max_time,
priv->scan_mode == SCAN_TYPE_ACTIVE ? "active" : "passive");
- memset(ccfg, 0, sizeof(struct at76_card_config));
+ memset(ccfg, 0, sizeof(*ccfg));
ccfg->promiscuous_mode = 0;
ccfg->short_retry_limit = priv->short_retry_limit;
@@ -1411,7 +1405,7 @@ static int at76_startup_device(struct at76_priv *priv)
ccfg->beacon_period = cpu_to_le16(priv->beacon_period);
ret = at76_set_card_command(priv->udev, CMD_STARTUP, &priv->card_config,
- sizeof(struct at76_card_config));
+ sizeof(*ccfg));
if (ret < 0) {
wiphy_err(priv->hw->wiphy, "at76_set_card_command failed: %d\n",
ret);
@@ -1856,7 +1850,7 @@ error:
return 0;
}
-static void at76_mac80211_stop(struct ieee80211_hw *hw)
+static void at76_mac80211_stop(struct ieee80211_hw *hw, bool suspend)
{
struct at76_priv *priv = hw->priv;
@@ -2443,7 +2437,7 @@ static int at76_probe(struct usb_interface *interface,
struct usb_device *udev;
int op_mode;
int need_ext_fw = 0;
- struct mib_fw_version *fwv = NULL;
+ struct mib_fw_version *fwv;
int board_type = (int)id->driver_info;
udev = usb_get_dev(interface_to_usbdev(interface));
@@ -2531,7 +2525,7 @@ static int at76_probe(struct usb_interface *interface,
usb_set_intfdata(interface, priv);
- memcpy(&priv->fw_version, fwv, sizeof(struct mib_fw_version));
+ memcpy(&priv->fw_version, fwv, sizeof(*fwv));
priv->board_type = board_type;
ret = at76_init_new_device(priv, interface);
diff --git a/drivers/net/wireless/atmel/at76c50x-usb.h b/drivers/net/wireless/atmel/at76c50x-usb.h
index 746e64dfd8aa..843146a0de64 100644
--- a/drivers/net/wireless/atmel/at76c50x-usb.h
+++ b/drivers/net/wireless/atmel/at76c50x-usb.h
@@ -151,7 +151,7 @@ struct at76_command {
u8 cmd;
u8 reserved;
__le16 size;
- u8 data[];
+ u8 data[] __counted_by_le(size);
} __packed;
/* Length of Atmel-specific Rx header before 802.11 frame */
diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c
index badb2f494035..8e56dcf9309d 100644
--- a/drivers/net/wireless/broadcom/b43/main.c
+++ b/drivers/net/wireless/broadcom/b43/main.c
@@ -5078,7 +5078,7 @@ static int b43_op_start(struct ieee80211_hw *hw)
return err;
}
-static void b43_op_stop(struct ieee80211_hw *hw)
+static void b43_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
diff --git a/drivers/net/wireless/broadcom/b43/sysfs.c b/drivers/net/wireless/broadcom/b43/sysfs.c
index 0679d132968f..261b2b746a9c 100644
--- a/drivers/net/wireless/broadcom/b43/sysfs.c
+++ b/drivers/net/wireless/broadcom/b43/sysfs.c
@@ -53,19 +53,14 @@ static ssize_t b43_attr_interfmode_show(struct device *dev,
switch (wldev->phy.g->interfmode) {
case B43_INTERFMODE_NONE:
- count =
- snprintf(buf, PAGE_SIZE,
- "0 (No Interference Mitigation)\n");
+ count = sysfs_emit(buf, "0 (No Interference Mitigation)\n");
break;
case B43_INTERFMODE_NONWLAN:
- count =
- snprintf(buf, PAGE_SIZE,
- "1 (Non-WLAN Interference Mitigation)\n");
+ count = sysfs_emit(buf,
+ "1 (Non-WLAN Interference Mitigation)\n");
break;
case B43_INTERFMODE_MANUALWLAN:
- count =
- snprintf(buf, PAGE_SIZE,
- "2 (WLAN Interference Mitigation)\n");
+ count = sysfs_emit(buf, "2 (WLAN Interference Mitigation)\n");
break;
default:
B43_WARN_ON(1);
diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c
index 18eb610f600a..441d6440671b 100644
--- a/drivers/net/wireless/broadcom/b43legacy/main.c
+++ b/drivers/net/wireless/broadcom/b43legacy/main.c
@@ -3485,7 +3485,7 @@ out_mutex_unlock:
return err;
}
-static void b43legacy_op_stop(struct ieee80211_hw *hw)
+static void b43legacy_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
diff --git a/drivers/net/wireless/broadcom/b43legacy/sysfs.c b/drivers/net/wireless/broadcom/b43legacy/sysfs.c
index eec087ca30e6..d988fe541bf7 100644
--- a/drivers/net/wireless/broadcom/b43legacy/sysfs.c
+++ b/drivers/net/wireless/broadcom/b43legacy/sysfs.c
@@ -75,16 +75,14 @@ static ssize_t b43legacy_attr_interfmode_show(struct device *dev,
switch (wldev->phy.interfmode) {
case B43legacy_INTERFMODE_NONE:
- count = snprintf(buf, PAGE_SIZE, "0 (No Interference"
- " Mitigation)\n");
+ count = sysfs_emit(buf, "0 (No Interference Mitigation)\n");
break;
case B43legacy_INTERFMODE_NONWLAN:
- count = snprintf(buf, PAGE_SIZE, "1 (Non-WLAN Interference"
- " Mitigation)\n");
+ count = sysfs_emit(buf,
+ "1 (Non-WLAN Interference Mitigation)\n");
break;
case B43legacy_INTERFMODE_MANUALWLAN:
- count = snprintf(buf, PAGE_SIZE, "2 (WLAN Interference"
- " Mitigation)\n");
+ count = sysfs_emit(buf, "2 (WLAN Interference Mitigation)\n");
break;
default:
B43legacy_WARN_ON(1);
@@ -155,11 +153,9 @@ static ssize_t b43legacy_attr_preamble_show(struct device *dev,
mutex_lock(&wldev->wl->mutex);
if (wldev->short_preamble)
- count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble"
- " enabled)\n");
+ count = sysfs_emit(buf, "1 (Short Preamble enabled)\n");
else
- count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble"
- " disabled)\n");
+ count = sysfs_emit(buf, "0 (Short Preamble disabled)\n");
mutex_unlock(&wldev->wl->mutex);
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
index dc6d27a36faa..e5ca0f511822 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile
@@ -6,8 +6,8 @@
#
ccflags-y += \
- -I $(srctree)/$(src) \
- -I $(srctree)/$(src)/../include
+ -I $(src) \
+ -I $(src)/../include
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile
index 46098705e236..5e37c638f966 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bca/Makefile
@@ -3,9 +3,9 @@
# Copyright (c) 2022 Broadcom Corporation
ccflags-y += \
- -I $(srctree)/$(src) \
- -I $(srctree)/$(src)/.. \
- -I $(srctree)/$(src)/../../include
+ -I $(src) \
+ -I $(src)/.. \
+ -I $(src)/../../include
obj-m += brcmfmac-bca.o
brcmfmac-bca-objs += \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
index 00679a990e3d..d35262335eaf 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c
@@ -1061,10 +1061,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
if (func->num != 2)
return -ENODEV;
- bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+ bus_if = kzalloc(sizeof(*bus_if), GFP_KERNEL);
if (!bus_if)
return -ENOMEM;
- sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
+ sdiodev = kzalloc(sizeof(*sdiodev), GFP_KERNEL);
if (!sdiodev) {
kfree(bus_if);
return -ENOMEM;
@@ -1238,7 +1238,6 @@ static struct sdio_driver brcmf_sdmmc_driver = {
.name = KBUILD_MODNAME,
.id_table = brcmf_sdmmc_ids,
.drv = {
- .owner = THIS_MODULE,
.pm = pm_sleep_ptr(&brcmf_sdio_pm_ops),
.coredump = brcmf_dev_coredump,
},
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
index 7ea2631b8069..0c3d119d1219 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c
@@ -358,10 +358,10 @@ idle:
*/
int brcmf_btcoex_attach(struct brcmf_cfg80211_info *cfg)
{
- struct brcmf_btcoex_info *btci = NULL;
+ struct brcmf_btcoex_info *btci;
brcmf_dbg(TRACE, "enter\n");
- btci = kmalloc(sizeof(struct brcmf_btcoex_info), GFP_KERNEL);
+ btci = kmalloc(sizeof(*btci), GFP_KERNEL);
if (!btci)
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index b99aa66dc5a9..d4cc5fa92341 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -4071,7 +4071,7 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp)
struct cfg80211_wowlan_wakeup *wakeup;
u32 wakeind;
s32 err;
- int timeout;
+ long time_left;
err = brcmf_fil_iovar_data_get(ifp, "wowl_wakeind", &wake_ind_le,
sizeof(wake_ind_le));
@@ -4113,10 +4113,10 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp)
}
if (wakeind & BRCMF_WOWL_PFN_FOUND) {
brcmf_dbg(INFO, "WOWL Wake indicator: BRCMF_WOWL_PFN_FOUND\n");
- timeout = wait_event_timeout(cfg->wowl.nd_data_wait,
- cfg->wowl.nd_data_completed,
- BRCMF_ND_INFO_TIMEOUT);
- if (!timeout)
+ time_left = wait_event_timeout(cfg->wowl.nd_data_wait,
+ cfg->wowl.nd_data_completed,
+ BRCMF_ND_INFO_TIMEOUT);
+ if (!time_left)
bphy_err(drvr, "No result for wowl net detect\n");
else
wakeup_data.net_detect = cfg->wowl.nd_info;
@@ -4320,9 +4320,16 @@ brcmf_pmksa_v3_op(struct brcmf_if *ifp, struct cfg80211_pmksa *pmksa,
/* Single PMK operation */
pmk_op->count = cpu_to_le16(1);
length += sizeof(struct brcmf_pmksa_v3);
- memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN);
- memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
- pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN;
+ if (pmksa->bssid)
+ memcpy(pmk_op->pmk[0].bssid, pmksa->bssid, ETH_ALEN);
+ if (pmksa->pmkid) {
+ memcpy(pmk_op->pmk[0].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
+ pmk_op->pmk[0].pmkid_len = WLAN_PMKID_LEN;
+ }
+ if (pmksa->ssid && pmksa->ssid_len) {
+ memcpy(pmk_op->pmk[0].ssid.SSID, pmksa->ssid, pmksa->ssid_len);
+ pmk_op->pmk[0].ssid.SSID_len = pmksa->ssid_len;
+ }
pmk_op->pmk[0].time_left = cpu_to_le32(alive ? BRCMF_PMKSA_NO_EXPIRY : 0);
}
@@ -4549,7 +4556,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
err = -EINVAL;
- bphy_err(drvr, "ivalid OUI\n");
+ bphy_err(drvr, "invalid OUI\n");
goto exit;
}
offset += TLV_OUI_LEN;
@@ -4588,7 +4595,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
for (i = 0; i < count; i++) {
if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
err = -EINVAL;
- bphy_err(drvr, "ivalid OUI\n");
+ bphy_err(drvr, "invalid OUI\n");
goto exit;
}
offset += TLV_OUI_LEN;
@@ -4622,7 +4629,7 @@ brcmf_configure_wpaie(struct brcmf_if *ifp,
for (i = 0; i < count; i++) {
if (!brcmf_valid_wpa_oui(&data[offset], is_rsn_ie)) {
err = -EINVAL;
- bphy_err(drvr, "ivalid OUI\n");
+ bphy_err(drvr, "invalid OUI\n");
goto exit;
}
offset += TLV_OUI_LEN;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile
index 5e1fddaff79e..33e86724ba14 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cyw/Makefile
@@ -3,9 +3,9 @@
# Copyright (c) 2022 Broadcom Corporation
ccflags-y += \
- -I $(srctree)/$(src) \
- -I $(srctree)/$(src)/.. \
- -I $(srctree)/$(src)/../../include
+ -I $(src) \
+ -I $(src)/.. \
+ -I $(src)/../../include
obj-m += brcmfmac-cyw.o
brcmfmac-cyw-objs += \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index e406e11481a6..fe4f65756105 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -70,6 +70,7 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
{
struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio;
struct device_node *root, *np = dev->of_node;
+ struct of_phandle_args oirq;
const char *prop;
int irq;
int err;
@@ -129,10 +130,10 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type,
sdio->drive_strength = val;
/* make sure there are interrupts defined in the node */
- if (!of_property_present(np, "interrupts"))
+ if (of_irq_parse_one(np, 0, &oirq))
return;
- irq = irq_of_parse_and_map(np, 0);
+ irq = irq_create_of_mapping(&oirq);
if (!irq) {
brcmf_err("interrupt could not be mapped\n");
return;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
index d7fb88bb6ae1..ce482a3877e9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c
@@ -313,11 +313,6 @@ struct brcmf_pcie_shared_info {
u8 version;
};
-struct brcmf_pcie_core_info {
- u32 base;
- u32 wrapbase;
-};
-
#define BRCMF_OTP_MAX_PARAM_LEN 16
struct brcmf_otp_params {
@@ -1675,6 +1670,15 @@ struct brcmf_random_seed_footer {
#define BRCMF_RANDOM_SEED_MAGIC 0xfeedc0de
#define BRCMF_RANDOM_SEED_LENGTH 0x100
+static noinline_for_stack void
+brcmf_pcie_provide_random_bytes(struct brcmf_pciedev_info *devinfo, u32 address)
+{
+ u8 randbuf[BRCMF_RANDOM_SEED_LENGTH];
+
+ get_random_bytes(randbuf, BRCMF_RANDOM_SEED_LENGTH);
+ memcpy_toio(devinfo->tcm + address, randbuf, BRCMF_RANDOM_SEED_LENGTH);
+}
+
static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
const struct firmware *fw, void *nvram,
u32 nvram_len)
@@ -1717,7 +1721,6 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
.length = cpu_to_le32(rand_len),
.magic = cpu_to_le32(BRCMF_RANDOM_SEED_MAGIC),
};
- void *randbuf;
/* Some Apple chips/firmwares expect a buffer of random
* data to be present before NVRAM
@@ -1729,10 +1732,7 @@ static int brcmf_pcie_download_fw_nvram(struct brcmf_pciedev_info *devinfo,
sizeof(footer));
address -= rand_len;
- randbuf = kzalloc(rand_len, GFP_KERNEL);
- get_random_bytes(randbuf, rand_len);
- memcpy_toio(devinfo->tcm + address, randbuf, rand_len);
- kfree(randbuf);
+ brcmf_pcie_provide_random_bytes(devinfo, address);
}
} else {
brcmf_dbg(PCIE, "No matching NVRAM file found %s\n",
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index 6b38d9de71af..1461dc453ac2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4450,7 +4450,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
brcmf_dbg(TRACE, "Enter\n");
/* Allocate private bus interface state */
- bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
+ bus = kzalloc(sizeof(*bus), GFP_ATOMIC);
if (!bus)
goto fail;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
index 5d66e94c806d..96032322b165 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/tracepoint.h
@@ -41,7 +41,7 @@ TRACE_EVENT(brcmf_err,
__vstring(msg, vaf->fmt, vaf->va)
),
TP_fast_assign(
- __assign_str(func, func);
+ __assign_str(func);
__assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
@@ -57,7 +57,7 @@ TRACE_EVENT(brcmf_dbg,
),
TP_fast_assign(
__entry->level = level;
- __assign_str(func, func);
+ __assign_str(func);
__assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
index 0ccf735316c2..8afbf529c745 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
@@ -117,13 +117,6 @@ struct bootrom_id_le {
__le32 boardrev; /* Board revision */
};
-struct brcmf_usb_image {
- struct list_head list;
- s8 *fwname;
- u8 *image;
- int image_len;
-};
-
struct brcmf_usbdev_info {
struct brcmf_usbdev bus_pub; /* MUST BE FIRST */
spinlock_t qlock;
@@ -1243,8 +1236,8 @@ brcmf_usb_prepare_fw_request(struct brcmf_usbdev_info *devinfo)
static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
enum brcmf_fwvendor fwvid)
{
- struct brcmf_bus *bus = NULL;
- struct brcmf_usbdev *bus_pub = NULL;
+ struct brcmf_bus *bus;
+ struct brcmf_usbdev *bus_pub;
struct device *dev = devinfo->dev;
struct brcmf_fw_request *fwreq;
int ret;
@@ -1254,7 +1247,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo,
if (!bus_pub)
return -ENODEV;
- bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+ bus = kzalloc(sizeof(*bus), GFP_ATOMIC);
if (!bus) {
ret = -ENOMEM;
goto fail;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile
index 7f455a19a2b1..3db4cce44f42 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/wcc/Makefile
@@ -3,9 +3,9 @@
# Copyright (c) 2022 Broadcom Corporation
ccflags-y += \
- -I $(srctree)/$(src) \
- -I $(srctree)/$(src)/.. \
- -I $(srctree)/$(src)/../../include
+ -I $(src) \
+ -I $(src)/.. \
+ -I $(src)/../../include
obj-m += brcmfmac-wcc.o
brcmfmac-wcc-objs += \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
index 090757730ba6..ffdd17eabe1d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/Makefile
@@ -16,9 +16,9 @@
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ccflags-y := \
- -I $(srctree)/$(src) \
- -I $(srctree)/$(src)/phy \
- -I $(srctree)/$(src)/../include
+ -I $(src) \
+ -I $(src)/phy \
+ -I $(src)/../include
brcmsmac-y := \
mac80211_if.o \
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
index 2084b506a450..50d817485cf9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c
@@ -512,7 +512,7 @@ ai_attach(struct bcma_bus *pbus)
struct si_info *sii;
/* alloc struct si_info */
- sii = kzalloc(sizeof(struct si_info), GFP_ATOMIC);
+ sii = kzalloc(sizeof(*sii), GFP_ATOMIC);
if (sii == NULL)
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
index e859075db716..33d17b779201 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c
@@ -143,12 +143,6 @@ struct ampdu_info {
struct brcms_fifo_info fifo_tb[NUM_FFPLD_FIFO];
};
-/* used for flushing ampdu packets */
-struct cb_del_ampdu_pars {
- struct ieee80211_sta *sta;
- u16 tid;
-};
-
static void brcms_c_scb_ampdu_update_max_txlen(struct ampdu_info *ampdu, u8 dur)
{
u32 rate, mcs;
@@ -225,7 +219,7 @@ struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc)
struct ampdu_info *ampdu;
int i;
- ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC);
+ ampdu = kzalloc(sizeof(*ampdu), GFP_ATOMIC);
if (!ampdu)
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c
index 54c616919590..f411bc6d795d 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c
@@ -111,7 +111,7 @@ struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc)
struct antsel_info *asi;
struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom;
- asi = kzalloc(sizeof(struct antsel_info), GFP_ATOMIC);
+ asi = kzalloc(sizeof(*asi), GFP_ATOMIC);
if (!asi)
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h
index a0da3248b942..53b3dba50737 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac.h
@@ -81,7 +81,7 @@ TRACE_EVENT(brcms_macintstatus,
__field(u32, mask)
),
TP_fast_assign(
- __assign_str(dev, dev_name(dev));
+ __assign_str(dev);
__entry->in_isr = in_isr;
__entry->macintstatus = macintstatus;
__entry->mask = mask;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
index 42b0a91656c4..908ce3c864fe 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_msg.h
@@ -71,7 +71,7 @@ TRACE_EVENT(brcms_dbg,
),
TP_fast_assign(
__entry->level = level;
- __assign_str(func, func);
+ __assign_str(func);
__assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s: %s", __get_str(func), __get_str(msg))
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h
index cf2cc070f1e5..24ac34fa0207 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/brcms_trace_brcmsmac_tx.h
@@ -31,7 +31,7 @@ TRACE_EVENT(brcms_txdesc,
__dynamic_array(u8, txh, txh_len)
),
TP_fast_assign(
- __assign_str(dev, dev_name(dev));
+ __assign_str(dev);
memcpy(__get_dynamic_array(txh), txh, txh_len);
),
TP_printk("[%s] txdesc", __get_str(dev))
@@ -54,7 +54,7 @@ TRACE_EVENT(brcms_txstatus,
__field(u16, ackphyrxsh)
),
TP_fast_assign(
- __assign_str(dev, dev_name(dev));
+ __assign_str(dev);
__entry->framelen = framelen;
__entry->frameid = frameid;
__entry->status = status;
@@ -85,7 +85,7 @@ TRACE_EVENT(brcms_ampdu_session,
__field(u16, dma_len)
),
TP_fast_assign(
- __assign_str(dev, dev_name(dev));
+ __assign_str(dev);
__entry->max_ampdu_len = max_ampdu_len;
__entry->max_ampdu_frames = max_ampdu_frames;
__entry->ampdu_len = ampdu_len;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
index f6962e558d7c..d1b9a18d0374 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c
@@ -331,7 +331,7 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc)
const char *ccode = sprom->alpha2;
int ccode_len = sizeof(sprom->alpha2);
- wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC);
+ wlc_cm = kzalloc(sizeof(*wlc_cm), GFP_ATOMIC);
if (wlc_cm == NULL)
return NULL;
wlc_cm->pub = pub;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
index 3d5c1ef8f7f2..bd480239368a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c
@@ -558,7 +558,7 @@ struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc,
struct si_info *sii = container_of(sih, struct si_info, pub);
/* allocate private info structure */
- di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC);
+ di = kzalloc(sizeof(*di), GFP_ATOMIC);
if (di == NULL)
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
index 92860dc0a92e..d86f28b8bc60 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c
@@ -457,7 +457,7 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
return err;
}
-static void brcms_ops_stop(struct ieee80211_hw *hw)
+static void brcms_ops_stop(struct ieee80211_hw *hw, bool suspend)
{
struct brcms_info *wl = hw->priv;
int status;
@@ -1090,6 +1090,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
hw->extra_tx_headroom = brcms_c_get_header_len();
hw->queues = N_TX_QUEUES;
@@ -1496,7 +1497,7 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl,
{
struct brcms_timer *t;
- t = kzalloc(sizeof(struct brcms_timer), GFP_ATOMIC);
+ t = kzalloc(sizeof(*t), GFP_ATOMIC);
if (!t)
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
index 34460b5815d0..2738d4d6c60a 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c
@@ -234,12 +234,6 @@
/* max # tx status to process in wlc_txstatus() */
#define TXSBND 8
-/* brcmu_format_flags() bit description structure */
-struct brcms_c_bit_desc {
- u32 bit;
- const char *name;
-};
-
/*
* The following table lists the buffer memory allocated to xmt fifos in HW.
* the size is in units of 256bytes(one block), total size is HW dependent
@@ -463,11 +457,11 @@ static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit)
{
struct brcms_bss_cfg *cfg;
- cfg = kzalloc(sizeof(struct brcms_bss_cfg), GFP_ATOMIC);
+ cfg = kzalloc(sizeof(*cfg), GFP_ATOMIC);
if (cfg == NULL)
goto fail;
- cfg->current_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC);
+ cfg->current_bss = kzalloc(sizeof(*cfg->current_bss), GFP_ATOMIC);
if (cfg->current_bss == NULL)
goto fail;
@@ -483,14 +477,14 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
{
struct brcms_c_info *wlc;
- wlc = kzalloc(sizeof(struct brcms_c_info), GFP_ATOMIC);
+ wlc = kzalloc(sizeof(*wlc), GFP_ATOMIC);
if (wlc == NULL) {
*err = 1002;
goto fail;
}
/* allocate struct brcms_c_pub state structure */
- wlc->pub = kzalloc(sizeof(struct brcms_pub), GFP_ATOMIC);
+ wlc->pub = kzalloc(sizeof(*wlc->pub), GFP_ATOMIC);
if (wlc->pub == NULL) {
*err = 1003;
goto fail;
@@ -499,7 +493,7 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
/* allocate struct brcms_hardware state structure */
- wlc->hw = kzalloc(sizeof(struct brcms_hardware), GFP_ATOMIC);
+ wlc->hw = kzalloc(sizeof(*wlc->hw), GFP_ATOMIC);
if (wlc->hw == NULL) {
*err = 1005;
goto fail;
@@ -528,7 +522,7 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
goto fail;
}
- wlc->default_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC);
+ wlc->default_bss = kzalloc(sizeof(*wlc->default_bss), GFP_ATOMIC);
if (wlc->default_bss == NULL) {
*err = 1010;
goto fail;
@@ -540,21 +534,20 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
goto fail;
}
- wlc->protection = kzalloc(sizeof(struct brcms_protection),
- GFP_ATOMIC);
+ wlc->protection = kzalloc(sizeof(*wlc->protection), GFP_ATOMIC);
if (wlc->protection == NULL) {
*err = 1016;
goto fail;
}
- wlc->stf = kzalloc(sizeof(struct brcms_stf), GFP_ATOMIC);
+ wlc->stf = kzalloc(sizeof(*wlc->stf), GFP_ATOMIC);
if (wlc->stf == NULL) {
*err = 1017;
goto fail;
}
wlc->bandstate[0] =
- kcalloc(MAXBANDS, sizeof(struct brcms_band), GFP_ATOMIC);
+ kcalloc(MAXBANDS, sizeof(*wlc->bandstate[0]), GFP_ATOMIC);
if (wlc->bandstate[0] == NULL) {
*err = 1025;
goto fail;
@@ -567,14 +560,14 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
+ (sizeof(struct brcms_band)*i));
}
- wlc->corestate = kzalloc(sizeof(struct brcms_core), GFP_ATOMIC);
+ wlc->corestate = kzalloc(sizeof(*wlc->corestate), GFP_ATOMIC);
if (wlc->corestate == NULL) {
*err = 1026;
goto fail;
}
wlc->corestate->macstat_snapshot =
- kzalloc(sizeof(struct macstat), GFP_ATOMIC);
+ kzalloc(sizeof(*wlc->corestate->macstat_snapshot), GFP_ATOMIC);
if (wlc->corestate->macstat_snapshot == NULL) {
*err = 1027;
goto fail;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
index a27d6f0b8819..c3d7aa570b4e 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -355,7 +355,7 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
{
struct shared_phy *sh;
- sh = kzalloc(sizeof(struct shared_phy), GFP_ATOMIC);
+ sh = kzalloc(sizeof(*sh), GFP_ATOMIC);
if (sh == NULL)
return NULL;
@@ -442,7 +442,7 @@ wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
return &pi->pubpi_ro;
}
- pi = kzalloc(sizeof(struct brcms_phy), GFP_ATOMIC);
+ pi = kzalloc(sizeof(*pi), GFP_ATOMIC);
if (pi == NULL)
return NULL;
pi->wiphy = wiphy;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
index aae2cf95fe95..d0faba240561 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -2567,7 +2567,6 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
struct lcnphy_txgains cal_gains, temp_gains;
u16 hash;
- u8 band_idx;
int j;
u16 ncorr_override[5];
u16 syst_coeffs[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
@@ -2599,6 +2598,9 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
u16 *values_to_save;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
+ if (WARN_ON(CHSPEC_IS5G(pi->radio_chanspec)))
+ return;
+
values_to_save = kmalloc_array(20, sizeof(u16), GFP_ATOMIC);
if (NULL == values_to_save)
return;
@@ -2662,20 +2664,18 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
hash = (target_gains->gm_gain << 8) |
(target_gains->pga_gain << 4) | (target_gains->pad_gain);
- band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0);
-
cal_gains = *target_gains;
memset(ncorr_override, 0, sizeof(ncorr_override));
- for (j = 0; j < iqcal_gainparams_numgains_lcnphy[band_idx]; j++) {
- if (hash == tbl_iqcal_gainparams_lcnphy[band_idx][j][0]) {
+ for (j = 0; j < iqcal_gainparams_numgains_lcnphy[0]; j++) {
+ if (hash == tbl_iqcal_gainparams_lcnphy[0][j][0]) {
cal_gains.gm_gain =
- tbl_iqcal_gainparams_lcnphy[band_idx][j][1];
+ tbl_iqcal_gainparams_lcnphy[0][j][1];
cal_gains.pga_gain =
- tbl_iqcal_gainparams_lcnphy[band_idx][j][2];
+ tbl_iqcal_gainparams_lcnphy[0][j][2];
cal_gains.pad_gain =
- tbl_iqcal_gainparams_lcnphy[band_idx][j][3];
+ tbl_iqcal_gainparams_lcnphy[0][j][3];
memcpy(ncorr_override,
- &tbl_iqcal_gainparams_lcnphy[band_idx][j][3],
+ &tbl_iqcal_gainparams_lcnphy[0][j][3],
sizeof(ncorr_override));
break;
}
@@ -4968,11 +4968,11 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
{
struct brcms_phy_lcnphy *pi_lcn;
- pi->u.pi_lcnphy = kzalloc(sizeof(struct brcms_phy_lcnphy), GFP_ATOMIC);
- if (pi->u.pi_lcnphy == NULL)
+ pi_lcn = kzalloc(sizeof(*pi_lcn), GFP_ATOMIC);
+ if (!pi_lcn)
return false;
- pi_lcn = pi->u.pi_lcnphy;
+ pi->u.pi_lcnphy = pi_lcn;
if (0 == (pi->sh->boardflags & BFL_NOPA)) {
pi->hwpwrctrl = true;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
index b72381791536..8b852581c4e4 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c
@@ -38,9 +38,9 @@ struct phy_shim_info {
struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw,
struct brcms_info *wl,
struct brcms_c_info *wlc) {
- struct phy_shim_info *physhim = NULL;
+ struct phy_shim_info *physhim;
- physhim = kzalloc(sizeof(struct phy_shim_info), GFP_ATOMIC);
+ physhim = kzalloc(sizeof(*physhim), GFP_ATOMIC);
if (!physhim)
return NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
index 7a82d615ba2a..f9b40cab766c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/Makefile
@@ -4,7 +4,7 @@
#
# Copyright (c) 2011 Broadcom Corporation
#
-ccflags-y := -I $(srctree)/$(src)/../include
+ccflags-y := -I $(src)/../include
obj-$(CONFIG_BRCMUTIL) += brcmutil.o
brcmutil-objs = utils.o d11.o
diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
index 4aec1fce1ae2..e22a6732a4c3 100644
--- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
+++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c
@@ -180,11 +180,10 @@ static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size,
struct libipw_txb *txb;
int i;
- txb = kmalloc(struct_size(txb, fragments, nr_frags), gfp_mask);
+ txb = kzalloc(struct_size(txb, fragments, nr_frags), gfp_mask);
if (!txb)
return NULL;
- memset(txb, 0, sizeof(struct libipw_txb));
txb->nr_frags = nr_frags;
txb->frag_size = txb_size;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 075b705a8d7b..74fc76c00ebc 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -2813,7 +2813,7 @@ out_release_irq:
}
static void
-il3945_mac_stop(struct ieee80211_hw *hw)
+il3945_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct il_priv *il = hw->priv;
diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c
index a773939b8c2a..1fab7849f56d 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945.c
@@ -566,7 +566,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
- return;
+ rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* Convert 3945's rssi indicator to dBm */
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 4beb7be6d51d..1600c344edbb 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -664,7 +664,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
!(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
- return;
+ rx_status.flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* This will be used in several places later */
@@ -5820,7 +5820,7 @@ out:
}
void
-il4965_mac_stop(struct ieee80211_hw *hw)
+il4965_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct il_priv *il = hw->priv;
diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h
index 863e3792d153..951f2245fefb 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965.h
+++ b/drivers/net/wireless/intel/iwlegacy/4965.h
@@ -151,7 +151,7 @@ void il4965_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb);
int il4965_mac_start(struct ieee80211_hw *hw);
-void il4965_mac_stop(struct ieee80211_hw *hw);
+void il4965_mac_stop(struct ieee80211_hw *hw, bool suspend);
void il4965_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast);
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index 8bb94a4c12cd..64c123314245 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -4,7 +4,7 @@ obj-$(CONFIG_IWLWIFI) += iwlwifi.o
iwlwifi-objs += iwl-io.o
iwlwifi-objs += iwl-drv.o
iwlwifi-objs += iwl-debug.o
-iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
+iwlwifi-objs += iwl-nvm-utils.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
@@ -14,7 +14,6 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o
iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
-iwlwifi-objs += queue/tx.o
iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index d594694206b3..2e2fcb3807ef 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/module.h>
#include <linux/stringify.h>
@@ -13,7 +13,7 @@
#define IWL_22000_UCODE_API_MAX 77
/* Lowest firmware API version supported */
-#define IWL_22000_UCODE_API_MIN 50
+#define IWL_22000_UCODE_API_MIN 77
/* NVM versions */
#define IWL_22000_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
index 25952d0bea99..975e8aed1526 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c
@@ -13,7 +13,7 @@
#define IWL_AX210_UCODE_API_MAX 89
/* Lowest firmware API version supported */
-#define IWL_AX210_UCODE_API_MIN 59
+#define IWL_AX210_UCODE_API_MIN 77
/* NVM versions */
#define IWL_AX210_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
index eca1457caa0c..3b6b8b410be5 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c
@@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_BZ_UCODE_API_MAX 89
+#define IWL_BZ_UCODE_API_MAX 92
/* Lowest firmware API version supported */
-#define IWL_BZ_UCODE_API_MIN 80
+#define IWL_BZ_UCODE_API_MIN 90
/* NVM versions */
#define IWL_BZ_NVM_VERSION 0x0a1d
@@ -149,6 +149,8 @@ const struct iwl_cfg_trans_params iwl_bz_trans_cfg = {
};
const char iwl_bz_name[] = "Intel(R) TBD Bz device";
+const char iwl_fm_name[] = "Intel(R) Wi-Fi 7 BE201 320MHz";
+const char iwl_gl_name[] = "Intel(R) Wi-Fi 7 BE200 320MHz";
const char iwl_mtp_name[] = "Intel(R) Wi-Fi 7 BE202 160MHz";
const struct iwl_cfg iwl_cfg_bz = {
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
index dbbcb2d0968c..4ccb0b7bdc20 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c
@@ -10,10 +10,10 @@
#include "fw/api/txq.h"
/* Highest firmware API version supported */
-#define IWL_SC_UCODE_API_MAX 89
+#define IWL_SC_UCODE_API_MAX 92
/* Lowest firmware API version supported */
-#define IWL_SC_UCODE_API_MIN 82
+#define IWL_SC_UCODE_API_MIN 90
/* NVM versions */
#define IWL_SC_NVM_VERSION 0x0a1d
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
index 0486b17d7c41..abcf8aeb010d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile
@@ -2,7 +2,7 @@
# DVM
obj-$(CONFIG_IWLDVM) += iwldvm.o
iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o
-iwldvm-objs += lib.o calib.o tt.o sta.o rx.o
+iwldvm-objs += lib.o calib.o tt.o sta.o rx.o eeprom.o
iwldvm-objs += power.o
iwldvm-objs += scan.o
@@ -11,4 +11,4 @@ iwldvm-objs += rxon.o devices.o
iwldvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwldvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
-ccflags-y += -I $(srctree)/$(src)/../
+ccflags-y += -I $(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index fefaa414272b..a13add556a7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2021, 2024 Intel Corporation
*/
#ifndef __iwl_agn_h__
#define __iwl_agn_h__
@@ -385,6 +385,25 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state)
iwl_trans_set_pmi(priv->trans, state);
}
+/**
+ * iwl_parse_eeprom_data - parse EEPROM data and return values
+ *
+ * @trans: ransport we're parsing for, for debug only
+ * @cfg: device configuration for parsing and overrides
+ * @eeprom: the EEPROM data
+ * @eeprom_size: length of the EEPROM data
+ *
+ * This function parses all EEPROM values we need and then
+ * returns a (newly allocated) struct containing all the
+ * relevant values for driver use. The struct must be freed
+ * later with iwl_free_nvm_data().
+ */
+struct iwl_nvm_data *
+iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
+ const u8 *eeprom, size_t eeprom_size);
+
+int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir);
#else
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
index 04864d3fda63..3f49c0bccb28 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2023-2024 Intel Corporation
*/
/*
* Please use this file (commands.h) only for uCode API definitions.
@@ -177,7 +177,7 @@ enum {
*
*****************************************************************************/
-/**
+/*
* iwlagn rate_n_flags bit fields
*
* rate_n_flags format is used in following iwlagn commands:
@@ -251,7 +251,7 @@ enum {
#define RATE_MCS_SGI_POS 13
#define RATE_MCS_SGI_MSK 0x2000
-/**
+/*
* rate_n_flags Tx antenna masks
* bit14:16
*/
@@ -2767,7 +2767,7 @@ struct iwl_missed_beacon_notif {
*
*****************************************************************************/
-/**
+/*
* SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
*
* This command sets up the Rx signal detector for a sensitivity level that
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index 25283e4b849f..4ac8b862ad41 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -19,7 +19,7 @@
#include <linux/mutex.h>
#include "fw/img.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "iwl-csr.h"
#include "iwl-debug.h"
#include "iwl-agn-hw.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index 39e40901fa46..48a8349680fc 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -12,7 +12,7 @@
*/
#include "iwl-io.h"
#include "iwl-prph.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "agn.h"
#include "dev.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
index 2b290fab1ef2..931aa3f5798d 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c
@@ -1,16 +1,18 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
- * Copyright (C) 2015 Intel Mobile Communications GmbH
+ * Copyright (C) 2005-2014, 2018-2019, 2021, 2024 Intel Corporation
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/export.h>
+
#include "iwl-drv.h"
-#include "iwl-modparams.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-debug.h"
+#include "iwl-io.h"
+#include "iwl-prph.h"
+#include "iwl-csr.h"
+#include "agn.h"
-#if IS_ENABLED(CONFIG_IWLDVM)
/* EEPROM offset definitions */
/* indirect access definitions */
@@ -79,7 +81,6 @@ enum eeprom_sku_bits {
#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
/*
* EEPROM bands
* These are the channel numbers from each band in the order
@@ -257,7 +258,6 @@ struct iwl_eeprom_channel {
s8 max_power_avg;
} __packed;
-
enum iwl_eeprom_enhanced_txpwr_flags {
IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
@@ -648,114 +648,385 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
return n_channels;
}
-#endif
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-int iwl_init_sband_channels(struct iwl_nvm_data *data,
- struct ieee80211_supported_band *sband,
- int n_channels, enum nl80211_band band)
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+
+static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
{
- struct ieee80211_channel *chan = &data->channels[0];
- int n = 0, idx = 0;
+ u16 count;
+ int ret;
+
+ for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
+ /* Request semaphore */
+ iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+ /* See if we got it */
+ ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ IWL_EEPROM_SEM_TIMEOUT);
+ if (ret >= 0) {
+ IWL_DEBUG_EEPROM(trans->dev,
+ "Acquired semaphore after %d tries.\n",
+ count+1);
+ return ret;
+ }
+ }
- while (idx < n_channels && chan->band != band)
- chan = &data->channels[++idx];
+ return ret;
+}
- sband->channels = &data->channels[idx];
+static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
+{
+ iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+}
- while (idx < n_channels && chan->band == band) {
- chan = &data->channels[++idx];
- n++;
- }
+static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
+{
+ u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
- sband->n_channels = n;
+ IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
- return n;
+ switch (gp) {
+ case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
+ if (!nvm_is_otp) {
+ IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
+ gp);
+ return -ENOENT;
+ }
+ return 0;
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+ if (nvm_is_otp) {
+ IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
+ return -ENOENT;
+ }
+ return 0;
+ case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
+ default:
+ IWL_ERR(trans,
+ "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
+ nvm_is_otp ? "OTP" : "EEPROM", gp);
+ return -ENOENT;
+ }
}
-#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
-#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+/******************************************************************************
+ *
+ * OTP related functions
+ *
+******************************************************************************/
-void iwl_init_ht_hw_capab(struct iwl_trans *trans,
- struct iwl_nvm_data *data,
- struct ieee80211_sta_ht_cap *ht_info,
- enum nl80211_band band,
- u8 tx_chains, u8 rx_chains)
+static void iwl_set_otp_access_absolute(struct iwl_trans *trans)
{
- const struct iwl_cfg *cfg = trans->cfg;
- int max_bit_rate = 0;
-
- tx_chains = hweight8(tx_chains);
- if (cfg->rx_with_siso_diversity)
- rx_chains = 1;
- else
- rx_chains = hweight8(rx_chains);
-
- if (!(data->sku_cap_11n_enable) ||
- (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) ||
- !cfg->ht_params) {
- ht_info->ht_supported = false;
- return;
+ iwl_read32(trans, CSR_OTP_GP_REG);
+
+ iwl_clear_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_OTP_ACCESS_MODE);
+}
+
+static int iwl_nvm_is_otp(struct iwl_trans *trans)
+{
+ u32 otpgp;
+
+ /* OTP only valid for CP/PP and after */
+ switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
+ case CSR_HW_REV_TYPE_NONE:
+ IWL_ERR(trans, "Unknown hardware type\n");
+ return -EIO;
+ case CSR_HW_REV_TYPE_5300:
+ case CSR_HW_REV_TYPE_5350:
+ case CSR_HW_REV_TYPE_5100:
+ case CSR_HW_REV_TYPE_5150:
+ return 0;
+ default:
+ otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
+ if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
+ return 1;
+ return 0;
}
+}
+
+static int iwl_init_otp_access(struct iwl_trans *trans)
+{
+ int ret;
+
+ ret = iwl_finish_nic_init(trans);
+ if (ret)
+ return ret;
+
+ iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
+ udelay(5);
+ iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_RESET_REQ);
- if (data->sku_cap_mimo_disabled)
- rx_chains = 1;
+ /*
+ * CSR auto clock gate disable bit -
+ * this is only applicable for HW with OTP shadow RAM
+ */
+ if (trans->trans_cfg->base_params->shadow_ram_support)
+ iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
+ CSR_RESET_LINK_PWR_MGMT_DISABLED);
- ht_info->ht_supported = true;
- ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
+ return 0;
+}
- if (cfg->ht_params->stbc) {
- ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
+ __le16 *eeprom_data)
+{
+ int ret = 0;
+ u32 r;
+ u32 otpgp;
+
+ iwl_write32(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+ ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IWL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
+ return ret;
+ }
+ r = iwl_read32(trans, CSR_EEPROM_REG);
+ /* check for ECC errors: */
+ otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
+ if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
+ /* stop in this case */
+ /* set the uncorrectable OTP ECC bit for acknowledgment */
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+ IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
+ return -EINVAL;
+ }
+ if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
+ /* continue in this case */
+ /* set the correctable OTP ECC bit for acknowledgment */
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
+ IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
+ }
+ *eeprom_data = cpu_to_le16(r >> 16);
+ return 0;
+}
- if (tx_chains > 1)
- ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+/*
+ * iwl_is_otp_empty: check for empty OTP
+ */
+static bool iwl_is_otp_empty(struct iwl_trans *trans)
+{
+ u16 next_link_addr = 0;
+ __le16 link_value;
+ bool is_empty = false;
+
+ /* locate the beginning of OTP link list */
+ if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
+ if (!link_value) {
+ IWL_ERR(trans, "OTP is empty\n");
+ is_empty = true;
+ }
+ } else {
+ IWL_ERR(trans, "Unable to read first block of OTP list.\n");
+ is_empty = true;
}
- if (cfg->ht_params->ldpc)
- ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ return is_empty;
+}
- if (trans->trans_cfg->mq_rx_supported ||
- iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
- ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+/*
+ * iwl_find_otp_image: find EEPROM image in OTP
+ * finding the OTP block that contains the EEPROM image.
+ * the last valid block on the link list (the block _before_ the last block)
+ * is the block we should read and used to configure the device.
+ * If all the available OTP blocks are full, the last block will be the block
+ * we should read and used to configure the device.
+ * only perform this operation if shadow RAM is disabled
+ */
+static int iwl_find_otp_image(struct iwl_trans *trans,
+ u16 *validblockaddr)
+{
+ u16 next_link_addr = 0, valid_addr;
+ __le16 link_value = 0;
+ int usedblocks = 0;
- ht_info->mcs.rx_mask[0] = 0xFF;
- ht_info->mcs.rx_mask[1] = 0x00;
- ht_info->mcs.rx_mask[2] = 0x00;
+ /* set addressing mode to absolute to traverse the link list */
+ iwl_set_otp_access_absolute(trans);
- if (rx_chains >= 2)
- ht_info->mcs.rx_mask[1] = 0xFF;
- if (rx_chains >= 3)
- ht_info->mcs.rx_mask[2] = 0xFF;
+ /* checking for empty OTP or error */
+ if (iwl_is_otp_empty(trans))
+ return -EINVAL;
- if (cfg->ht_params->ht_greenfield_support)
- ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+ /*
+ * start traverse link list
+ * until reach the max number of OTP blocks
+ * different devices have different number of OTP blocks
+ */
+ do {
+ /* save current valid block address
+ * check for more block on the link list
+ */
+ valid_addr = next_link_addr;
+ next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
+ IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n",
+ usedblocks, next_link_addr);
+ if (iwl_read_otp_word(trans, next_link_addr, &link_value))
+ return -EINVAL;
+ if (!link_value) {
+ /*
+ * reach the end of link list, return success and
+ * set address point to the starting address
+ * of the image
+ */
+ *validblockaddr = valid_addr;
+ /* skip first 2 bytes (link list pointer) */
+ *validblockaddr += 2;
+ return 0;
+ }
+ /* more in the link list, continue */
+ usedblocks++;
+ } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items);
- max_bit_rate = MAX_BIT_RATE_20_MHZ;
+ /* OTP has no valid blocks */
+ IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
+ return -EINVAL;
+}
- if (cfg->ht_params->ht40_bands & BIT(band)) {
- ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
- max_bit_rate = MAX_BIT_RATE_40_MHZ;
+/*
+ * iwl_read_eeprom - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter and return it
+ * and its size.
+ *
+ * NOTE: This routine uses the non-debug IO access functions.
+ */
+int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
+{
+ __le16 *e;
+ u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
+ int sz;
+ int ret;
+ u16 addr;
+ u16 validblockaddr = 0;
+ u16 cache_addr = 0;
+ int nvm_is_otp;
+
+ if (!eeprom || !eeprom_size)
+ return -EINVAL;
+
+ nvm_is_otp = iwl_nvm_is_otp(trans);
+ if (nvm_is_otp < 0)
+ return nvm_is_otp;
+
+ sz = trans->trans_cfg->base_params->eeprom_size;
+ IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
+
+ e = kmalloc(sz, GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
+ if (ret < 0) {
+ IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+ goto err_free;
}
- /* Highest supported Rx data rate */
- max_bit_rate *= rx_chains;
- WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
- ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
- /* Tx MCS capabilities */
- ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- if (tx_chains != rx_chains) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_chains - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+ ret = iwl_eeprom_acquire_semaphore(trans);
+ if (ret < 0) {
+ IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
+ goto err_free;
}
+
+ if (nvm_is_otp) {
+ ret = iwl_init_otp_access(trans);
+ if (ret) {
+ IWL_ERR(trans, "Failed to initialize OTP access.\n");
+ goto err_unlock;
+ }
+
+ iwl_write32(trans, CSR_EEPROM_GP,
+ iwl_read32(trans, CSR_EEPROM_GP) &
+ ~CSR_EEPROM_GP_IF_OWNER_MSK);
+
+ iwl_set_bit(trans, CSR_OTP_GP_REG,
+ CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
+ CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
+ /* traversing the linked list if no shadow ram supported */
+ if (!trans->trans_cfg->base_params->shadow_ram_support) {
+ ret = iwl_find_otp_image(trans, &validblockaddr);
+ if (ret)
+ goto err_unlock;
+ }
+ for (addr = validblockaddr; addr < validblockaddr + sz;
+ addr += sizeof(u16)) {
+ __le16 eeprom_data;
+
+ ret = iwl_read_otp_word(trans, addr, &eeprom_data);
+ if (ret)
+ goto err_unlock;
+ e[cache_addr / 2] = eeprom_data;
+ cache_addr += sizeof(u16);
+ }
+ } else {
+ /* eeprom is an array of 16bit values */
+ for (addr = 0; addr < sz; addr += sizeof(u16)) {
+ u32 r;
+
+ iwl_write32(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+ ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IWL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IWL_ERR(trans,
+ "Time out reading EEPROM[%d]\n", addr);
+ goto err_unlock;
+ }
+ r = iwl_read32(trans, CSR_EEPROM_REG);
+ e[addr / 2] = cpu_to_le16(r >> 16);
+ }
+ }
+
+ IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n",
+ nvm_is_otp ? "OTP" : "EEPROM");
+
+ iwl_eeprom_release_semaphore(trans);
+
+ *eeprom_size = sz;
+ *eeprom = (u8 *)e;
+ return 0;
+
+ err_unlock:
+ iwl_eeprom_release_semaphore(trans);
+ err_free:
+ kfree(e);
+
+ return ret;
}
-#if IS_ENABLED(CONFIG_IWLDVM)
static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const u8 *eeprom, size_t eeprom_size)
@@ -790,7 +1061,6 @@ static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
/* EEPROM data functions */
-
struct iwl_nvm_data *
iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const u8 *eeprom, size_t eeprom_size)
@@ -837,8 +1107,8 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
data->kelvin_temperature = *(__le16 *)tmp;
data->kelvin_voltage = *((__le16 *)tmp + 1);
- radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size,
- EEPROM_RADIO_CONFIG);
+ radio_cfg =
+ iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_RADIO_CONFIG);
data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg);
data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg);
data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg);
@@ -878,5 +1148,3 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
kfree(data);
return NULL;
}
-IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data);
-#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 52b008ce53bd..74d163e56511 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -2,7 +2,7 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(C) 2018 - 2019, 2022 - 2023 Intel Corporation
+ * Copyright(C) 2018 - 2019, 2022 - 2024 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -145,8 +145,6 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
#ifdef CONFIG_PM_SLEEP
if (priv->fw->img[IWL_UCODE_WOWLAN].num_sec &&
- priv->trans->ops->d3_suspend &&
- priv->trans->ops->d3_resume &&
device_can_wakeup(priv->trans->dev)) {
priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
@@ -302,7 +300,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw)
return ret;
}
-static void iwlagn_mac_stop(struct ieee80211_hw *hw)
+static void iwlagn_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
@@ -730,8 +728,6 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_sta_rx_agg_stop(priv, sta, tid);
break;
case IEEE80211_AMPDU_TX_START:
- if (!priv->trans->ops->txq_enable)
- break;
if (!iwl_enable_tx_ampdu())
break;
IWL_DEBUG_HT(priv, "start Tx\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index 8774dd7b921e..65b7c68e5ca7 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -1,7 +1,8 @@
// SPDX-License-Identifier: GPL-2.0-only
/******************************************************************************
*
- * Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved.
+ * Copyright(c) 2024 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
@@ -25,8 +26,7 @@
#include <asm/div64.h>
-#include "iwl-eeprom-read.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "iwl-io.h"
#include "iwl-trans.h"
#include "iwl-op-mode.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index f4a6f76cf193..8879e668ef0d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -2673,20 +2673,16 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
/* Get max rate if user set max rate */
- if (lq_sta) {
- lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1;
- if ((sband->band == NL80211_BAND_5GHZ) &&
- (lq_sta->max_rate_idx != -1))
- lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
- if ((lq_sta->max_rate_idx < 0) ||
- (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
- lq_sta->max_rate_idx = -1;
- }
+ lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1;
+ if (sband->band == NL80211_BAND_5GHZ && lq_sta->max_rate_idx != -1)
+ lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
+ if (lq_sta->max_rate_idx < 0 || lq_sta->max_rate_idx >= IWL_RATE_COUNT)
+ lq_sta->max_rate_idx = -1;
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (lq_sta && !lq_sta->drv) {
+ if (!lq_sta->drv) {
IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
- priv_sta = NULL;
+ /* mac80211 already set up the data for using low rates */
+ return;
}
rate_idx = lq_sta->last_txrate_idx;
@@ -2756,7 +2752,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
lq_sta = &sta_priv->lq_sta;
sband = hw->wiphy->bands[conf->chandef.chan->band];
-
lq_sta->lq.sta_id = sta_id;
for (j = 0; j < LQ_SIZE; j++)
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
index 23dfcda0dd86..4af792d41dce 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/******************************************************************************
*
- * Copyright(c) 2007 - 2014, 2023 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014, 2023-2024 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -64,7 +64,7 @@ struct iwl_tt_trans {
};
/**
- * struct iwl_tt_mgnt - Thermal Throttling Management structure
+ * struct iwl_tt_mgmt - Thermal Throttling Management structure
* @advanced_tt: advanced thermal throttle required
* @state: current Thermal Throttling state
* @tt_power_mode: Thermal Throttling power mode index
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 4caf2e25a297..79774c8c7ff4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2023 Intel Corporation
+ * Copyright (C) 2019-2024 Intel Corporation
*/
#include <linux/uuid.h>
#include "iwl-drv.h"
@@ -27,6 +27,7 @@ static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = {
[DSM_FUNC_FORCE_DISABLE_CHANNELS] = sizeof(u32),
[DSM_FUNC_ENERGY_DETECTION_THRESHOLD] = sizeof(u32),
[DSM_FUNC_RFI_CONFIG] = sizeof(u32),
+ [DSM_FUNC_ENABLE_11BE] = sizeof(u32),
};
static int iwl_acpi_get_handle(struct device *dev, acpi_string method,
@@ -960,3 +961,37 @@ out_free:
kfree(data);
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_guid_lock_status);
+
+int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ union acpi_object *wifi_pkg, *data;
+ int ret = -ENOENT;
+ int tbl_rev;
+
+ data = iwl_acpi_get_object(fwrt->dev, ACPI_WBEM_METHOD);
+ if (IS_ERR(data))
+ return ret;
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
+ ACPI_WBEM_WIFI_DATA_SIZE,
+ &tbl_rev);
+ if (IS_ERR(wifi_pkg))
+ goto out_free;
+
+ if (tbl_rev != IWL_ACPI_WBEM_REVISION) {
+ IWL_DEBUG_RADIO(fwrt, "Unsupported ACPI WBEM revision:%d\n",
+ tbl_rev);
+ goto out_free;
+ }
+
+ if (wifi_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
+ goto out_free;
+
+ *value = wifi_pkg->package.elements[1].integer.value &
+ IWL_ACPI_WBEM_REV0_MASK;
+ IWL_DEBUG_RADIO(fwrt, "Loaded WBEM config from ACPI\n");
+ ret = 0;
+out_free:
+ kfree(data);
+ return ret;
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 1d32b82f73db..bb88398a6987 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -27,6 +27,7 @@
#define ACPI_WTAS_METHOD "WTAS"
#define ACPI_WPFC_METHOD "WPFC"
#define ACPI_GLAI_METHOD "GLAI"
+#define ACPI_WBEM_METHOD "WBEM"
#define ACPI_WIFI_DOMAIN (0x07)
@@ -67,6 +68,12 @@
#define ACPI_WRDD_WIFI_DATA_SIZE 2
#define ACPI_SPLC_WIFI_DATA_SIZE 2
#define ACPI_ECKV_WIFI_DATA_SIZE 2
+
+/*
+ * One element for domain type,
+ * and one for enablement of Wi-Fi 320MHz per MCC
+ */
+#define ACPI_WBEM_WIFI_DATA_SIZE 2
/*
* One element for domain type,
* and one for the status
@@ -94,6 +101,9 @@
#define ACPI_DSM_REV 0
+#define IWL_ACPI_WBEM_REV0_MASK (BIT(0) | BIT(1))
+#define IWL_ACPI_WBEM_REVISION 0
+
#ifdef CONFIG_ACPI
struct iwl_fw_runtime;
@@ -142,6 +152,7 @@ void iwl_acpi_get_guid_lock_status(struct iwl_fw_runtime *fwrt);
int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
enum iwl_dsm_funcs func, u32 *value);
+int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
#else /* CONFIG_ACPI */
static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev,
@@ -205,6 +216,11 @@ static inline int iwl_acpi_get_dsm(struct iwl_fw_runtime *fwrt,
{
return -ENOENT;
}
+
+static inline int iwl_acpi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ return -ENOENT;
+}
#endif /* CONFIG_ACPI */
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
index e00ab21e7358..ebe85fdf08d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2021, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -113,7 +113,7 @@ struct iwl_alive_ntf_v6 {
} __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */
/**
- * enum iwl_extended_cfg_flag - commands driver may send before
+ * enum iwl_extended_cfg_flags - commands driver may send before
* finishing init flow
* @IWL_INIT_DEBUG_CFG: driver is going to send debug config command
* @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands
@@ -126,7 +126,7 @@ enum iwl_extended_cfg_flags {
};
/**
- * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for
+ * struct iwl_init_extended_cfg_cmd - mark what commands ucode should wait for
* before finishing init flows
* @init_flags: values from iwl_extended_cfg_flags
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
index d9044ada6a43..2397fdc37fc5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2020, 2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2020, 2022, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -77,7 +77,7 @@ struct iwl_time_quota_data_v1 {
} __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */
/**
- * struct iwl_time_quota_cmd - configuration of time quota between bindings
+ * struct iwl_time_quota_cmd_v1 - configuration of time quota between bindings
* ( TIME_QUOTA_CMD = 0x2c )
* @quotas: allocations per binding
* Note: on non-CDB the fourth one is the auxilary mac and is
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
index bc27e15488f5..b97a43353779 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
* Copyright (C) 2013-2014, 2018-2019 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
@@ -77,73 +77,6 @@ struct iwl_bt_coex_ci_cmd {
__le32 secondary_ch_phy_id;
} __packed; /* BT_CI_MSG_API_S_VER_2 */
-#define BT_MBOX(n_dw, _msg, _pos, _nbits) \
- BT_MBOX##n_dw##_##_msg##_POS = (_pos), \
- BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS
-
-enum iwl_bt_mxbox_dw0 {
- BT_MBOX(0, LE_SLAVE_LAT, 0, 3),
- BT_MBOX(0, LE_PROF1, 3, 1),
- BT_MBOX(0, LE_PROF2, 4, 1),
- BT_MBOX(0, LE_PROF_OTHER, 5, 1),
- BT_MBOX(0, CHL_SEQ_N, 8, 4),
- BT_MBOX(0, INBAND_S, 13, 1),
- BT_MBOX(0, LE_MIN_RSSI, 16, 4),
- BT_MBOX(0, LE_SCAN, 20, 1),
- BT_MBOX(0, LE_ADV, 21, 1),
- BT_MBOX(0, LE_MAX_TX_POWER, 24, 4),
- BT_MBOX(0, OPEN_CON_1, 28, 2),
-};
-
-enum iwl_bt_mxbox_dw1 {
- BT_MBOX(1, BR_MAX_TX_POWER, 0, 4),
- BT_MBOX(1, IP_SR, 4, 1),
- BT_MBOX(1, LE_MSTR, 5, 1),
- BT_MBOX(1, AGGR_TRFC_LD, 8, 6),
- BT_MBOX(1, MSG_TYPE, 16, 3),
- BT_MBOX(1, SSN, 19, 2),
-};
-
-enum iwl_bt_mxbox_dw2 {
- BT_MBOX(2, SNIFF_ACT, 0, 3),
- BT_MBOX(2, PAG, 3, 1),
- BT_MBOX(2, INQUIRY, 4, 1),
- BT_MBOX(2, CONN, 5, 1),
- BT_MBOX(2, SNIFF_INTERVAL, 8, 5),
- BT_MBOX(2, DISC, 13, 1),
- BT_MBOX(2, SCO_TX_ACT, 16, 2),
- BT_MBOX(2, SCO_RX_ACT, 18, 2),
- BT_MBOX(2, ESCO_RE_TX, 20, 2),
- BT_MBOX(2, SCO_DURATION, 24, 6),
-};
-
-enum iwl_bt_mxbox_dw3 {
- BT_MBOX(3, SCO_STATE, 0, 1),
- BT_MBOX(3, SNIFF_STATE, 1, 1),
- BT_MBOX(3, A2DP_STATE, 2, 1),
- BT_MBOX(3, ACL_STATE, 3, 1),
- BT_MBOX(3, MSTR_STATE, 4, 1),
- BT_MBOX(3, OBX_STATE, 5, 1),
- BT_MBOX(3, A2DP_SRC, 6, 1),
- BT_MBOX(3, OPEN_CON_2, 8, 2),
- BT_MBOX(3, TRAFFIC_LOAD, 10, 2),
- BT_MBOX(3, CHL_SEQN_LSB, 12, 1),
- BT_MBOX(3, INBAND_P, 13, 1),
- BT_MBOX(3, MSG_TYPE_2, 16, 3),
- BT_MBOX(3, SSN_2, 19, 2),
- BT_MBOX(3, UPDATE_REQUEST, 21, 1),
-};
-
-#define BT_MBOX_MSG(_notif, _num, _field) \
- ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\
- >> BT_MBOX##_num##_##_field##_POS)
-
-#define BT_MBOX_PRINT(_num, _field, _end) \
- pos += scnprintf(buf + pos, bufsz - pos, \
- "\t%s: %d%s", \
- #_field, \
- BT_MBOX_MSG(notif, _num, _field), \
- true ? "\n" : ", ")
enum iwl_bt_activity_grading {
BT_OFF = 0,
BT_ON_NO_CONNECTION = 1,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
index 4419631604b4..1fc65469990e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2019, 2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -76,7 +76,7 @@ struct iwl_phy_specific_cfg {
} __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/
/**
- * struct iwl_phy_cfg_cmd - Phy configuration command
+ * struct iwl_phy_cfg_cmd_v1 - Phy configuration command
*
* @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg
* @calib_control: calibration control data
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index d2a74beed3a1..ffee7927cf26 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -42,7 +42,7 @@ struct iwl_d3_manager_config {
/* TODO: OFFLOADS_QUERY_API_S_VER_1 */
/**
- * enum iwl_d3_proto_offloads - enabled protocol offloads
+ * enum iwl_proto_offloads - enabled protocol offloads
* @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled
* @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled
* @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid
@@ -195,7 +195,7 @@ struct iwl_wowlan_pattern_v1 {
#define IWL_WOWLAN_MAX_PATTERNS 20
/**
- * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns
+ * struct iwl_wowlan_patterns_cmd_v1 - WoWLAN wakeup patterns
*/
struct iwl_wowlan_patterns_cmd_v1 {
/**
@@ -843,6 +843,52 @@ struct iwl_wowlan_info_notif_v2 {
u8 reserved2[2];
} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_2 */
+/* MAX MLO keys of non-active links that can arrive in the notification */
+#define WOWLAN_MAX_MLO_KEYS 18
+
+/**
+ * enum iwl_wowlan_mlo_gtk_type - GTK types
+ * @WOWLAN_MLO_GTK_KEY_TYPE_GTK: GTK
+ * @WOWLAN_MLO_GTK_KEY_TYPE_IGTK: IGTK
+ * @WOWLAN_MLO_GTK_KEY_TYPE_BIGTK: BIGTK
+ * @WOWLAN_MLO_GTK_KEY_NUM_TYPES: number of key types
+ */
+enum iwl_wowlan_mlo_gtk_type {
+ WOWLAN_MLO_GTK_KEY_TYPE_GTK,
+ WOWLAN_MLO_GTK_KEY_TYPE_IGTK,
+ WOWLAN_MLO_GTK_KEY_TYPE_BIGTK,
+ WOWLAN_MLO_GTK_KEY_NUM_TYPES
+}; /* WOWLAN_MLO_GTK_KEY_TYPE_API_E_VER_1 */
+
+/**
+ * enum iwl_wowlan_mlo_gtk_flag - MLO GTK flags
+ * @WOWLAN_MLO_GTK_FLAG_KEY_LEN_MSK: 0 for len 16, 1 for len 32
+ * @WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK: key id (ranges from 0 to 7)
+ * @WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK: spec link id of the key
+ * @WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK: &enum iwl_wowlan_mlo_gtk_type
+ * @WOWLAN_MLO_GTK_FLAG_LAST_KEY_MSK: is this the last given key per
+ * key-type / link-id - the currently used key
+ */
+enum iwl_wowlan_mlo_gtk_flag {
+ WOWLAN_MLO_GTK_FLAG_KEY_LEN_MSK = 0x0001,
+ WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK = 0x000E,
+ WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK = 0x00F0,
+ WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK = 0x0300,
+ WOWLAN_MLO_GTK_FLAG_LAST_KEY_MSK = 0x0400
+}; /* WOWLAN_MLO_GTK_FLAG_API_E_VER_1 */
+
+/**
+ * struct iwl_wowlan_mlo_gtk - MLO GTK info
+ * @key: key material
+ * @flags: &enum iwl_wowlan_mlo_gtk_flag
+ * @pn: packet number
+ */
+struct iwl_wowlan_mlo_gtk {
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ __le16 flags;
+ u8 pn[6];
+} __packed; /* WOWLAN_MLO_GTK_KEY_API_S_VER_1 */
+
/**
* struct iwl_wowlan_info_notif - WoWLAN information notification
* @gtk: GTK data
@@ -859,7 +905,10 @@ struct iwl_wowlan_info_notif_v2 {
* @tid_tear_down: bit mask of tids whose BA sessions were closed
* in suspend state
* @station_id: station id
+ * @num_mlo_link_keys: number of &struct iwl_wowlan_mlo_gtk structs
+ * following this notif, or reserved in version < 4
* @reserved2: reserved
+ * @mlo_gtks: array of GTKs of size num_mlo_link_keys for version >= 4
*/
struct iwl_wowlan_info_notif {
struct iwl_wowlan_gtk_status_v3 gtk[WOWLAN_GTK_KEYS_NUM];
@@ -875,8 +924,10 @@ struct iwl_wowlan_info_notif {
__le32 received_beacons;
u8 tid_tear_down;
u8 station_id;
- u8 reserved2[2];
-} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3 */
+ u8 num_mlo_link_keys;
+ u8 reserved2;
+ struct iwl_wowlan_mlo_gtk mlo_gtks[];
+} __packed; /* WOWLAN_INFO_NTFY_API_S_VER_3, _VER_4 */
/**
* struct iwl_wowlan_wake_pkt_notif - WoWLAN wake packet notification
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
index 0f7903c5a4df..2ab38eaeb290 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
+ * Copyright (C) 2024 Intel Corporation
* Copyright (C) 2012-2014, 2018-2022 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
@@ -90,6 +91,12 @@ enum iwl_data_path_subcmd_ids {
SEC_KEY_CMD = 0x18,
/**
+ * @ESR_MODE_NOTIF: notification to recommend/force a wanted esr mode,
+ * uses &struct iwl_mvm_esr_mode_notif
+ */
+ ESR_MODE_NOTIF = 0xF3,
+
+ /**
* @MONITOR_NOTIF: Datapath monitoring notification, using
* &struct iwl_datapath_monitor_notif
*/
@@ -224,28 +231,33 @@ struct iwl_synced_time_rsp {
#define PTP_CTX_MAX_DATA_SIZE 128
/**
- * struct iwl_time_msmt_ptp_ctx - Vendor specific information element
+ * struct iwl_time_msmt_ptp_ctx - Vendor specific element
* to allow a space for flexibility for the userspace App
*
- * @element_id: element id of vendor specific ie
- * @length: length of vendor specific ie
- * @reserved: for alignment
- * @data: vendor specific data blob
+ * @ftm: FTM specific vendor element
+ * @ftm.element_id: element id of vendor specific ie
+ * @ftm.length: length of vendor specific ie
+ * @ftm.reserved: for alignment
+ * @ftm.data: vendor specific data blob
+ * @tm: TM specific vendor element
+ * @tm.element_id: element id of vendor specific ie
+ * @tm.length: length of vendor specific ie
+ * @tm.data: vendor specific data blob
*/
struct iwl_time_msmt_ptp_ctx {
- /* Differentiate between FTM and TM specific Vendor IEs */
+ /* Differentiate between FTM and TM specific Vendor elements */
union {
struct {
u8 element_id;
u8 length;
__le16 reserved;
u8 data[PTP_CTX_MAX_DATA_SIZE];
- } ftm; /* FTM specific vendor IE */
+ } ftm;
struct {
u8 element_id;
u8 length;
u8 data[PTP_CTX_MAX_DATA_SIZE];
- } tm; /* TM specific vendor IE */
+ } tm;
};
} __packed /* PTP_CTX_VER_1 */;
@@ -524,6 +536,10 @@ struct iwl_rx_baid_cfg_cmd_remove {
/**
* struct iwl_rx_baid_cfg_cmd - BAID allocation/config command
* @action: the action, from &enum iwl_rx_baid_action
+ * @alloc: allocation data
+ * @modify: modify data
+ * @remove_v1: remove data (version 1)
+ * @remove: remove data
*/
struct iwl_rx_baid_cfg_cmd {
__le32 action;
@@ -558,6 +574,7 @@ enum iwl_scd_queue_cfg_operation {
/**
* struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command
* @operation: the operation, see &enum iwl_scd_queue_cfg_operation
+ * @u: union depending on command usage
* @u.add.sta_mask: station mask
* @u.add.tid: TID
* @u.add.reserved: reserved
@@ -627,6 +644,7 @@ enum iwl_sec_key_flags {
/**
* struct iwl_sec_key_cmd - security key command
* @action: action from &enum iwl_ctxt_action
+ * @u: union depending on command type
* @u.add.sta_mask: station mask for the new key
* @u.add.key_id: key ID (0-7) for the new key
* @u.add.key_flags: key flags per &enum iwl_sec_key_flags
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index 47c914de2992..855cd13a181e 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -147,32 +147,34 @@ struct iwl_fw_ini_region_internal_buffer {
* Configures parameters for region data collection
*
* @hdr: debug header
- * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID
+ * @id: region id. Max id is %IWL_FW_INI_MAX_REGION_ID
* @type: region type. One of &enum iwl_fw_ini_region_type
* @sub_type: region sub type
* @sub_type_ver: region sub type version
* @reserved: not in use
* @name: region name
* @dev_addr: device address configuration. Used by
- * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC,
- * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX,
- * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR,
- * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG
- * &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM,
- * &IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP,
+ * %IWL_FW_INI_REGION_DEVICE_MEMORY, %IWL_FW_INI_REGION_PERIPHERY_MAC,
+ * %IWL_FW_INI_REGION_PERIPHERY_PHY, %IWL_FW_INI_REGION_PERIPHERY_AUX,
+ * %IWL_FW_INI_REGION_PAGING, %IWL_FW_INI_REGION_CSR,
+ * %IWL_FW_INI_REGION_DRAM_IMR and %IWL_FW_INI_REGION_PCI_IOSF_CONFIG
+ * %IWL_FW_INI_REGION_DBGI_SRAM, %FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM,
+ * %IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP,
* @dev_addr_range: device address range configuration. Used by
- * &IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and
- * &IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE
- * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and
- * &IWL_FW_INI_REGION_RXF
+ * %IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and
+ * %IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE
+ * @fifos: fifos configuration. Used by %IWL_FW_INI_REGION_TXF and
+ * %IWL_FW_INI_REGION_RXF
* @err_table: error table configuration. Used by
- * IWL_FW_INI_REGION_LMAC_ERROR_TABLE and
- * IWL_FW_INI_REGION_UMAC_ERROR_TABLE
+ * %IWL_FW_INI_REGION_LMAC_ERROR_TABLE and
+ * %IWL_FW_INI_REGION_UMAC_ERROR_TABLE
* @internal_buffer: internal monitor buffer configuration. Used by
- * &IWL_FW_INI_REGION_INTERNAL_BUFFER
+ * %IWL_FW_INI_REGION_INTERNAL_BUFFER
+ * @special_mem: special device memory region, used by
+ * %IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY
* @dram_alloc_id: dram allocation id. One of &enum iwl_fw_ini_allocation_id.
- * Used by &IWL_FW_INI_REGION_DRAM_BUFFER
- * @tlv_mask: tlv collection mask. Used by &IWL_FW_INI_REGION_TLV
+ * Used by %IWL_FW_INI_REGION_DRAM_BUFFER
+ * @tlv_mask: tlv collection mask. Used by %IWL_FW_INI_REGION_TLV
* @addrs: array of addresses attached to the end of the region tlv
*/
struct iwl_fw_ini_region_tlv {
@@ -291,7 +293,7 @@ struct iwl_fw_ini_addr_val {
} __packed; /* FW_TLV_DEBUG_ADDR_VALUE_VER_1 */
/**
- * struct iwl_fw_ini_conf_tlv - configuration TLV to set register/memory.
+ * struct iwl_fw_ini_conf_set_tlv - configuration TLV to set register/memory.
*
* @hdr: debug header
* @time_point: time point to apply config. One of &enum iwl_fw_ini_time_point
@@ -470,6 +472,10 @@ enum iwl_fw_ini_region_device_memory_subtype {
* @IWL_FW_INI_TIME_POINT_EAPOL_FAILED: EAPOL failed
* @IWL_FW_INI_TIME_POINT_FAKE_TX: fake Tx
* @IWL_FW_INI_TIME_POINT_DEASSOC: de association
+ * @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ: request to override preset
+ * @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START: start handling override preset
+ * request
+ * @IWL_FW_INI_TIME_SCAN_FAILURE: failed scan channel list
* @IWL_FW_INI_TIME_POINT_NUM: number of time points
*/
enum iwl_fw_ini_time_point {
@@ -500,6 +506,9 @@ enum iwl_fw_ini_time_point {
IWL_FW_INI_TIME_POINT_EAPOL_FAILED,
IWL_FW_INI_TIME_POINT_FAKE_TX,
IWL_FW_INI_TIME_POINT_DEASSOC,
+ IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ,
+ IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START,
+ IWL_FW_INI_TIME_SCAN_FAILURE,
IWL_FW_INI_TIME_POINT_NUM,
}; /* FW_TLV_DEBUG_TIME_POINT_API_E */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index b31ae6889bd0..bea0f4668cc8 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -1,11 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_debug_h__
#define __iwl_fw_api_debug_h__
+#include "dbg-tlv.h"
/**
* enum iwl_debug_cmds - debug commands
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 25530a29317e..30a54c7fa001 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -2,6 +2,7 @@
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
* Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2024 Intel Corporation
*/
#ifndef __iwl_fw_api_location_h__
#define __iwl_fw_api_location_h__
@@ -390,10 +391,62 @@ struct iwl_tof_responder_config_cmd_v9 {
__le16 max_time_between_msr;
} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */
+/**
+ * struct iwl_tof_responder_config_cmd - ToF AP mode
+ * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field
+ * @responder_cfg_flags: &iwl_tof_responder_cfg_flags
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @bss_color: current AP bss_color
+ * @channel_num: current AP Channel
+ * @ctrl_ch_position: coding of the control channel position relative to
+ * the center frequency, see iwl_mvm_get_ctrl_pos()
+ * @sta_id: index of the AP STA when in AP mode
+ * @band: current AP band
+ * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug
+ * purposes, simulating station movement by adding various values
+ * to this field
+ * @common_calib: XVT: common calibration value
+ * @specific_calib: XVT: specific calibration value
+ * @bssid: Current AP BSSID
+ * @r2i_ndp_params: parameters for R2I NDP.
+ * bits 0 - 2: max number of LTF repetitions
+ * bits 3 - 5: max number of spatial streams (supported values are < 2)
+ * bits 6 - 7: max number of total LTFs see
+ * &enum ieee80211_range_params_max_total_ltf
+ * @i2r_ndp_params: parameters for I2R NDP.
+ * bits 0 - 2: max number of LTF repetitions
+ * bits 3 - 5: max number of spatial streams
+ * bits 6 - 7: max number of total LTFs see
+ * &enum ieee80211_range_params_max_total_ltf
+ * @min_time_between_msr: for non trigger based NDP ranging, minimum time
+ * between measurements in milliseconds.
+ * @max_time_between_msr: for non trigger based NDP ranging, maximum time
+ * between measurements in milliseconds.
+ */
+struct iwl_tof_responder_config_cmd {
+ __le32 cmd_valid_fields;
+ __le32 responder_cfg_flags;
+ u8 format_bw;
+ u8 bss_color;
+ u8 channel_num;
+ u8 ctrl_ch_position;
+ u8 sta_id;
+ u8 band;
+ __le16 toa_offset;
+ __le16 common_calib;
+ __le16 specific_calib;
+ u8 bssid[ETH_ALEN];
+ u8 r2i_ndp_params;
+ u8 i2r_ndp_params;
+ __le16 min_time_between_msr;
+ __le16 max_time_between_msr;
+} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_10 */
+
#define IWL_LCI_CIVIC_IE_MAX_SIZE 400
/**
- * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings
+ * struct iwl_tof_responder_dyn_config_cmd_v2 - Dynamic responder settings
* @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer
* @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer
* @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if
@@ -561,6 +614,8 @@ struct iwl_tof_range_req_ap_entry_v2 {
* the responder asked for LMR feedback although the initiator did not set
* the LMR feedback bit in the FTM request. If not set, the initiator will
* continue with the session and will provide the LMR feedback.
+ * @IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC: send an incorrect SAC in the
+ * first NDP exchange. This is used for testing.
*/
enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1),
@@ -577,6 +632,7 @@ enum iwl_initiator_ap_flags {
IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13),
IWL_INITIATOR_AP_FLAGS_PMF = BIT(14),
IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = BIT(15),
+ IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC = BIT(16),
};
/**
@@ -797,6 +853,7 @@ struct iwl_tof_range_req_ap_entry_v7 {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */
#define IWL_LOCATION_MAX_STS_POS 3
+#define IWL_LOCATION_TOTAL_LTF_POS 6
/**
* struct iwl_tof_range_req_ap_entry_v8 - AP configuration parameters
@@ -954,6 +1011,78 @@ struct iwl_tof_range_req_ap_entry_v9 {
} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */
/**
+ * struct iwl_tof_range_req_ap_entry_v10 - AP configuration parameters
+ * @initiator_ap_flags: see &enum iwl_initiator_ap_flags.
+ * @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz
+ * @channel_num: AP Channel number
+ * @format_bw: bits 0 - 3: &enum iwl_location_frame_format.
+ * bits 4 - 7: &enum iwl_location_bw.
+ * @ctrl_ch_position: Coding of the control channel position relative to the
+ * center frequency, see iwl_mvm_get_ctrl_pos().
+ * @bssid: AP's BSSID
+ * @burst_period: For EDCA based ranging: Recommended value to be sent to the
+ * AP. Measurement periodicity In units of 100ms. ignored if
+ * num_of_bursts_exp = 0.
+ * For non trigger based NDP ranging, the maximum time between
+ * measurements in units of milliseconds.
+ * @samples_per_burst: the number of FTMs pairs in single Burst (1-31);
+ * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of
+ * the number of measurement iterations (min 2^0 = 1, max 2^14)
+ * @sta_id: the station id of the AP. Only relevant when associated to the AP,
+ * otherwise should be set to &IWL_MVM_INVALID_STA.
+ * @cipher: pairwise cipher suite for secured measurement.
+ * &enum iwl_location_cipher.
+ * @hltk: HLTK to be used for secured 11az measurement
+ * @tk: TK to be used for secured 11az measurement
+ * @calib: An array of calibration values per FTM rx bandwidth.
+ * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the
+ * calibration value that corresponds to the rx bandwidth of the FTM
+ * frame.
+ * @beacon_interval: beacon interval of the AP in TUs. Only required if
+ * &IWL_INITIATOR_AP_FLAGS_TB is set.
+ * @rx_pn: the next expected PN for protected management frames Rx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @tx_pn: the next PN to use for protected management frames Tx. LE byte
+ * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id
+ * is set to &IWL_MVM_INVALID_STA.
+ * @r2i_ndp_params: parameters for R2I NDP ranging negotiation.
+ * bits 0 - 2: max LTF repetitions
+ * bits 3 - 5: max number of spatial streams
+ * bits 6 - 7: max total LTFs. One of
+ * &enum ieee80211_range_params_max_total_ltf.
+ * @i2r_ndp_params: parameters for I2R NDP ranging negotiation.
+ * bits 0 - 2: max LTF repetitions
+ * bits 3 - 5: max number of spatial streams (supported values are < 2)
+ * bits 6 - 7: max total LTFs. One of
+ * &enum ieee80211_range_params_max_total_ltf.
+ * @min_time_between_msr: For non trigger based NDP ranging, the minimum time
+ * between measurements in units of milliseconds
+ */
+struct iwl_tof_range_req_ap_entry_v10 {
+ __le32 initiator_ap_flags;
+ u8 band;
+ u8 channel_num;
+ u8 format_bw;
+ u8 ctrl_ch_position;
+ u8 bssid[ETH_ALEN];
+ __le16 burst_period;
+ u8 samples_per_burst;
+ u8 num_of_bursts;
+ u8 sta_id;
+ u8 cipher;
+ u8 hltk[HLTK_11AZ_LEN];
+ u8 tk[TK_11AZ_LEN];
+ __le16 calib[IWL_TOF_BW_NUM];
+ __le16 beacon_interval;
+ u8 rx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 tx_pn[IEEE80211_CCMP_PN_LEN];
+ u8 r2i_ndp_params;
+ u8 i2r_ndp_params;
+ __le16 min_time_between_msr;
+} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */
+
+/**
* enum iwl_tof_response_mode
* @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as
* possible (not supported for this release)
@@ -1230,6 +1359,34 @@ struct iwl_tof_range_req_cmd_v13 {
struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS];
} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
+/**
+ * struct iwl_tof_range_req_cmd_v14 - start measurement cmd
+ * @initiator_flags: see flags @ iwl_tof_initiator_flags
+ * @request_id: A Token incremented per request. The same Token will be
+ * sent back in the range response
+ * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS)
+ * @range_req_bssid: ranging request BSSID
+ * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template.
+ * Bits set to 1 shall be randomized by the UMAC
+ * @macaddr_template: MAC address template to use for non-randomized bits
+ * @req_timeout_ms: Requested timeout of the response in units of milliseconds.
+ * This is the session time for completing the measurement.
+ * @tsf_mac_id: report the measurement start time for each ap in terms of the
+ * TSF of this mac id. 0xff to disable TSF reporting.
+ * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v10.
+ */
+struct iwl_tof_range_req_cmd_v14 {
+ __le32 initiator_flags;
+ u8 request_id;
+ u8 num_of_ap;
+ u8 range_req_bssid[ETH_ALEN];
+ u8 macaddr_mask[ETH_ALEN];
+ u8 macaddr_template[ETH_ALEN];
+ __le32 req_timeout_ms;
+ __le32 tsf_mac_id;
+ struct iwl_tof_range_req_ap_entry_v10 ap[IWL_MVM_TOF_MAX_APS];
+} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */
+
/*
* enum iwl_tof_range_request_status - status of the sent request
* @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index c6d1f5644638..ca6fa66d1917 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -144,7 +144,7 @@ struct iwl_missed_vap_notif {
} __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */
/**
- * struct iwl_channel_switch_start_notif - Channel switch start notification
+ * struct iwl_channel_switch_start_notif_v1 - Channel switch start notification
*
* @id_and_color: ID and color of the MAC
*/
@@ -642,4 +642,25 @@ struct iwl_mvm_sta_disable_tx_cmd {
__le32 disable;
} __packed; /* STA_DISABLE_TX_API_S_VER_1 */
+/**
+ * enum iwl_mvm_fw_esr_recommendation - FW recommendation code
+ * @ESR_RECOMMEND_LEAVE: recommendation to leave esr
+ * @ESR_FORCE_LEAVE: force exiting esr
+ * @ESR_RECOMMEND_ENTER: recommendation to enter esr
+ */
+enum iwl_mvm_fw_esr_recommendation {
+ ESR_RECOMMEND_LEAVE,
+ ESR_FORCE_LEAVE,
+ ESR_RECOMMEND_ENTER,
+}; /* ESR_MODE_RECOMMENDATION_CODE_API_E_VER_1 */
+
+/**
+ * struct iwl_mvm_esr_mode_notif - FWs recommendation/force for esr mode
+ *
+ * @action: the action to apply on esr state. See &iwl_mvm_fw_esr_recommendation
+ */
+struct iwl_mvm_esr_mode_notif {
+ __le32 action;
+} __packed; /* ESR_MODE_RECOMMENDATION_NTFY_API_S_VER_1 */
+
#endif /* __iwl_fw_api_mac_cfg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 545826973a80..bcbbf8c4a297 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -310,6 +310,13 @@ struct iwl_ac_qos {
* @filter_flags: combination of &enum iwl_mac_filter_flags
* @qos_flags: from &enum iwl_mac_qos_flags
* @ac: one iwl_mac_qos configuration for each AC
+ * @ap: AP specific config data, see &struct iwl_mac_data_ap
+ * @go: GO specific config data, see &struct iwl_mac_data_go
+ * @sta: BSS client specific config data, see &struct iwl_mac_data_sta
+ * @p2p_sta: P2P client specific config data, see &struct iwl_mac_data_p2p_sta
+ * @p2p_dev: P2P-device specific config data, see &struct iwl_mac_data_p2p_dev
+ * @pibss: Pseudo-IBSS specific data, unused; see struct iwl_mac_data_pibss
+ * @ibss: IBSS specific config data, see &struct iwl_mac_data_ibss
*/
struct iwl_mac_ctx_cmd {
/* COMMON_INDEX_HDR_API_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index 58034dfa7e70..d424d0126367 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -7,7 +7,6 @@
#ifndef __iwl_fw_api_nvm_reg_h__
#define __iwl_fw_api_nvm_reg_h__
-#include "fw/regulatory.h"
/**
* enum iwl_regulatory_and_nvm_subcmd_ids - regulatory/NVM commands
*/
@@ -23,8 +22,10 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
* &struct iwl_lari_config_change_cmd_v3,
* &struct iwl_lari_config_change_cmd_v4,
* &struct iwl_lari_config_change_cmd_v5,
- * &struct iwl_lari_config_change_cmd_v6 or
- * &struct iwl_lari_config_change_cmd_v7
+ * &struct iwl_lari_config_change_cmd_v6,
+ * &struct iwl_lari_config_change_cmd_v7,
+ * &struct iwl_lari_config_change_cmd_v10 or
+ * &struct iwl_lari_config_change_cmd
*/
LARI_CONFIG_CHANGE = 0x1,
@@ -46,9 +47,9 @@ enum iwl_regulatory_and_nvm_subcmd_ids {
SAR_OFFSET_MAPPING_TABLE_CMD = 0x4,
/**
- * @UATS_TABLE_CMD: &struct iwl_uats_table_cmd
+ * @MCC_ALLOWED_AP_TYPE_CMD: &struct iwl_mcc_allowed_ap_type_cmd
*/
- UATS_TABLE_CMD = 0x5,
+ MCC_ALLOWED_AP_TYPE_CMD = 0x5,
/**
* @PNVM_INIT_COMPLETE_NTFY: &struct iwl_pnvm_init_complete_ntfy
@@ -119,7 +120,7 @@ struct iwl_nvm_access_cmd {
} __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */
/**
- * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD
+ * struct iwl_nvm_access_resp - response to NVM_ACCESS_CMD
* @offset: offset in bytes into the section
* @length: in bytes, either how much was written or read
* @type: NVM_SECTION_TYPE_*
@@ -211,7 +212,7 @@ struct iwl_nvm_get_info_phy {
#define IWL_NUM_CHANNELS 110
/**
- * struct iwl_nvm_get_info_regulatory - regulatory information
+ * struct iwl_nvm_get_info_regulatory_v1 - regulatory information
* @lar_enabled: is LAR enabled
* @channel_profile: regulatory data of this channel
* @reserved: reserved
@@ -439,6 +440,7 @@ enum iwl_mcc_source {
MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
};
+#define IWL_WTAS_BLACK_LIST_MAX 16
/**
* struct iwl_tas_config_cmd_common - configures the TAS.
* This is also the v2 structure.
@@ -609,7 +611,7 @@ struct iwl_lari_config_change_cmd_v6 {
/**
* struct iwl_lari_config_change_cmd_v7 - change LARI configuration
- * This structure is used also for lari cmd version 8.
+ * This structure is used also for lari cmd version 8 and 9.
* @config_bitmap: Bitmap of the config commands. Each bit will trigger a
* different predefined FW config operation.
* @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
@@ -619,6 +621,8 @@ struct iwl_lari_config_change_cmd_v6 {
* @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
* per country, one to indicate whether to override and the other to
* indicate allow/disallow unii4 channels.
+ * For LARI cmd version 4 to 8 - bits 0:3 are supported.
+ * For LARI cmd version 9 - bits 0:5 are supported.
* @chan_state_active_bitmap: Bitmap to enable different bands per country
* or region.
* Each bit represents a country or region, and a band to activate
@@ -642,9 +646,98 @@ struct iwl_lari_config_change_cmd_v7 {
} __packed;
/* LARI_CHANGE_CONF_CMD_S_VER_7 */
/* LARI_CHANGE_CONF_CMD_S_VER_8 */
+/* LARI_CHANGE_CONF_CMD_S_VER_9 */
+
+/**
+ * struct iwl_lari_config_change_cmd_v10 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * For LARI cmd version 10 - bits 0:5 are supported.
+ * @chan_state_active_bitmap: Bitmap to enable different bands per country
+ * or region.
+ * Each bit represents a country or region, and a band to activate
+ * according to the BIOS definitions.
+ * For LARI cmd version 10 - bits 0:4 are supported.
+ * @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
+ * Each bit represents a set of channels in a specific band that should be
+ * disabled
+ * @edt_bitmap: Bitmap of energy detection threshold table.
+ * Disable/enable the EDT optimization method for different band.
+ * @oem_320mhz_allow_bitmap: 320Mhz bandwidth enablement bitmap per MCC.
+ * bit0: enable 320Mhz in Japan.
+ * bit1: enable 320Mhz in South Korea.
+ * bit 2 - 31: reserved.
+ */
+struct iwl_lari_config_change_cmd_v10 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+ __le32 force_disable_channels_bitmap;
+ __le32 edt_bitmap;
+ __le32 oem_320mhz_allow_bitmap;
+} __packed;
+/* LARI_CHANGE_CONF_CMD_S_VER_10 */
+
+/**
+ * struct iwl_lari_config_change_cmd - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * For LARI cmd version 11 - bits 0:5 are supported.
+ * @chan_state_active_bitmap: Bitmap to enable different bands per country
+ * or region.
+ * Each bit represents a country or region, and a band to activate
+ * according to the BIOS definitions.
+ * For LARI cmd version 11 - bits 0:4 are supported.
+ * For LARI cmd version 12 - bits 0:6 are supported and bits 7:31 are
+ * reserved. No need to mask out the reserved bits.
+ * @force_disable_channels_bitmap: Bitmap of disabled bands/channels.
+ * Each bit represents a set of channels in a specific band that should be
+ * disabled
+ * @edt_bitmap: Bitmap of energy detection threshold table.
+ * Disable/enable the EDT optimization method for different band.
+ * @oem_320mhz_allow_bitmap: 320Mhz bandwidth enablement bitmap per MCC.
+ * bit0: enable 320Mhz in Japan.
+ * bit1: enable 320Mhz in South Korea.
+ * bit 2 - 31: reserved.
+ * @oem_11be_allow_bitmap: Bitmap of 11be allowed MCCs. No need to mask out the
+ * unsupported bits
+ * bit0: enable 11be in China(CB/CN).
+ * bit1: enable 11be in South Korea.
+ * bit 2 - 31: reserved.
+ */
+struct iwl_lari_config_change_cmd {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+ __le32 force_disable_channels_bitmap;
+ __le32 edt_bitmap;
+ __le32 oem_320mhz_allow_bitmap;
+ __le32 oem_11be_allow_bitmap;
+} __packed;
+/* LARI_CHANGE_CONF_CMD_S_VER_11 */
+/* LARI_CHANGE_CONF_CMD_S_VER_12 */
/* Activate UNII-1 (5.2GHz) for World Wide */
-#define ACTIVATE_5G2_IN_WW_MASK BIT(4)
+#define ACTIVATE_5G2_IN_WW_MASK BIT(4)
+#define CHAN_STATE_ACTIVE_BITMAP_CMD_V11 0x1F
/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
@@ -658,13 +751,13 @@ struct iwl_pnvm_init_complete_ntfy {
#define UATS_TABLE_COL_SIZE 13
/**
- * struct iwl_uats_table_cmd - struct for UATS_TABLE_CMD
+ * struct iwl_mcc_allowed_ap_type_cmd - struct for MCC_ALLOWED_AP_TYPE_CMD
* @offset_map: mapping a mcc to UHB AP type support (UATS) allowed
* @reserved: reserved
*/
-struct iwl_uats_table_cmd {
+struct iwl_mcc_allowed_ap_type_cmd {
u8 offset_map[UATS_TABLE_ROW_SIZE][UATS_TABLE_COL_SIZE];
__le16 reserved;
-} __packed; /* UATS_TABLE_CMD_S_VER_1 */
+} __packed; /* MCC_ALLOWED_AP_TYPE_CMD_API_S_VER_1 */
#endif /* __iwl_fw_api_nvm_reg_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
index 2d2b9c8c36ea..6a7bbfd6b2b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h
@@ -3,7 +3,7 @@
* Copyright (C) 2012-2014 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2021-2023 Intel Corporation
+ * Copyright (C) 2021-2024 Intel Corporation
*/
#ifndef __iwl_fw_api_offload_h__
#define __iwl_fw_api_offload_h__
@@ -20,7 +20,7 @@ enum iwl_prot_offload_subcmd_ids {
/**
* @WOWLAN_INFO_NOTIFICATION: Notification in
* &struct iwl_wowlan_info_notif_v1, &struct iwl_wowlan_info_notif_v2,
- * or iwl_wowlan_info_notif
+ * or &struct iwl_wowlan_info_notif
*/
WOWLAN_INFO_NOTIFICATION = 0xFD,
@@ -60,7 +60,7 @@ struct iwl_stored_beacon_notif_common {
} __packed;
/**
- * struct iwl_stored_beacon_notif - Stored beacon notification
+ * struct iwl_stored_beacon_notif_v2 - Stored beacon notification
*
* @common: fields common for all versions
* @data: beacon data, length in @byte_count
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
index 08a2c416ce60..4d8a12799c4d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018, 2020-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018, 2020-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -113,7 +113,7 @@ struct iwl_phy_context_cmd_tail {
} __packed;
/**
- * struct iwl_phy_context_cmd - config of the PHY context
+ * struct iwl_phy_context_cmd_v1 - config of the PHY context
* ( PHY_CONTEXT_CMD = 0x8 )
* @id_and_color: ID and color of the relevant Binding
* @action: action to perform, see &enum iwl_ctxt_action
@@ -144,6 +144,7 @@ struct iwl_phy_context_cmd_v1 {
* @rxchain_info: ???
* @sbb_bandwidth: 0 disabled, 1 - 40Mhz ... 4 - 320MHz
* @sbb_ctrl_channel_loc: location of the control channel
+ * @puncture_mask: bitmap of punctured subchannels
* @dsp_cfg_flags: set to 0
* @reserved: reserved to align to 64 bit
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
index 5a3f30e5e06d..c73d4d597857 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2019-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -43,6 +43,11 @@ enum iwl_phy_ops_subcmd_ids {
PER_PLATFORM_ANT_GAIN_CMD = 0x07,
/**
+ * @AP_TX_POWER_CONSTRAINTS_CMD: &struct iwl_txpower_constraints_cmd
+ */
+ AP_TX_POWER_CONSTRAINTS_CMD = 0x0C,
+
+ /**
* @CT_KILL_NOTIFICATION: &struct ct_kill_notif
*/
CT_KILL_NOTIFICATION = 0xFE,
@@ -190,7 +195,7 @@ struct ct_kill_notif {
} __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */
/**
-* enum ctdp_cmd_operation - CTDP command operations
+* enum iwl_mvm_ctdp_cmd_operation - CTDP command operations
* @CTDP_CMD_OPERATION_START: update the current budget
* @CTDP_CMD_OPERATION_STOP: stop ctdp
* @CTDP_CMD_OPERATION_REPORT: get the average budget
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 0bf38243f88a..6e6a92d173cc 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -385,6 +385,33 @@ struct iwl_dev_tx_power_cmd_v7 {
__le32 timer_period;
__le32 flags;
} __packed; /* TX_REDUCED_POWER_API_S_VER_7 */
+
+/**
+ * struct iwl_dev_tx_power_cmd_v8 - TX power reduction command version 8
+ * @per_chain: per chain restrictions
+ * @enable_ack_reduction: enable or disable close range ack TX power
+ * reduction.
+ * @per_chain_restriction_changed: is per_chain_restriction has changed
+ * from last command. used if set_mode is
+ * IWL_TX_POWER_MODE_SET_SAR_TIMER.
+ * note: if not changed, the command is used for keep alive only.
+ * @reserved: reserved (padding)
+ * @timer_period: timer in milliseconds. if expires FW will change to default
+ * BIOS values. relevant if setMode is IWL_TX_POWER_MODE_SET_SAR_TIMER
+ * @flags: reduce power flags.
+ * @tpc_vlp_backoff_level: user backoff of UNII5,7 VLP channels in USA.
+ * Not in use.
+ */
+struct iwl_dev_tx_power_cmd_v8 {
+ __le16 per_chain[IWL_NUM_CHAIN_TABLES_V2][IWL_NUM_CHAIN_LIMITS][IWL_NUM_SUB_BANDS_V2];
+ u8 enable_ack_reduction;
+ u8 per_chain_restriction_changed;
+ u8 reserved[2];
+ __le32 timer_period;
+ __le32 flags;
+ __le32 tpc_vlp_backoff_level;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_8 */
+
/**
* struct iwl_dev_tx_power_cmd - TX power reduction command (multiversion)
* @common: common part of the command
@@ -392,6 +419,8 @@ struct iwl_dev_tx_power_cmd_v7 {
* @v4: version 4 part of the command
* @v5: version 5 part of the command
* @v6: version 6 part of the command
+ * @v7: version 7 part of the command
+ * @v8: version 8 part of the command
*/
struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_common common;
@@ -401,6 +430,7 @@ struct iwl_dev_tx_power_cmd {
struct iwl_dev_tx_power_cmd_v5 v5;
struct iwl_dev_tx_power_cmd_v6 v6;
struct iwl_dev_tx_power_cmd_v7 v7;
+ struct iwl_dev_tx_power_cmd_v8 v8;
};
};
@@ -432,7 +462,7 @@ struct iwl_per_chain_offset {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
*/
@@ -442,7 +472,7 @@ struct iwl_geo_tx_power_profiles_cmd_v1 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_1 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@@ -454,7 +484,7 @@ struct iwl_geo_tx_power_profiles_cmd_v2 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_2 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@@ -466,7 +496,7 @@ struct iwl_geo_tx_power_profiles_cmd_v3 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_3 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@@ -478,7 +508,7 @@ struct iwl_geo_tx_power_profiles_cmd_v4 {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_4 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * struct iwl_geo_tx_power_profiles_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading)
@@ -537,11 +567,14 @@ enum iwl_ppag_flags {
* union iwl_ppag_table_cmd - union for all versions of PPAG command
* @v1: version 1
* @v2: version 2
- * version 3, 4 and 5 are the same structure as v2,
+ * version 3, 4, 5 and 6 are the same structure as v2,
* but has a different format of the flags bitmap
- * @flags: values from &enum iwl_ppag_flags
- * @gain: table of antenna gain values per chain and sub-band
- * @reserved: reserved
+ * @v1.flags: values from &enum iwl_ppag_flags
+ * @v1.gain: table of antenna gain values per chain and sub-band
+ * @v1.reserved: reserved
+ * @v2.flags: values from &enum iwl_ppag_flags
+ * @v2.gain: table of antenna gain values per chain and sub-band
+ * @v2.reserved: reserved
*/
union iwl_ppag_table_cmd {
struct {
@@ -702,4 +735,44 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
+
+#define DEFAULT_TPE_TX_POWER 0x7F
+
+/*
+ * Bandwidth: 20/40/80/(160/80+80)/320
+ */
+#define IWL_MAX_TX_EIRP_PWR_MAX_SIZE 5
+#define IWL_MAX_TX_EIRP_PSD_PWR_MAX_SIZE 16
+
+enum iwl_6ghz_ap_type {
+ IWL_6GHZ_AP_TYPE_LPI,
+ IWL_6GHZ_AP_TYPE_SP,
+ IWL_6GHZ_AP_TYPE_VLP,
+}; /* PHY_AP_TYPE_API_E_VER_1 */
+
+/**
+ * struct iwl_txpower_constraints_cmd
+ * AP_TX_POWER_CONSTRAINTS_CMD
+ * Used for VLP/LPI/AFC Access Point power constraints for 6GHz channels
+ * @link_id: linkId
+ * @ap_type: see &enum iwl_ap_type
+ * @eirp_pwr: 8-bit 2s complement signed integer in the range
+ * -64 dBm to 63 dBm with a 0.5 dB step
+ * default &DEFAULT_TPE_TX_POWER (no maximum limit)
+ * @psd_pwr: 8-bit 2s complement signed integer in the range
+ * -63.5 to +63 dBm/MHz with a 0.5 step
+ * value - 128 indicates that the corresponding 20
+ * MHz channel cannot be used for transmission.
+ * value +127 indicates that no maximum PSD limit
+ * is specified for the corresponding 20 MHz channel
+ * default &DEFAULT_TPE_TX_POWER (no maximum limit)
+ * @reserved: reserved (padding)
+ */
+struct iwl_txpower_constraints_cmd {
+ __le16 link_id;
+ __le16 ap_type;
+ __s8 eirp_pwr[IWL_MAX_TX_EIRP_PWR_MAX_SIZE];
+ __s8 psd_pwr[IWL_MAX_TX_EIRP_PSD_PWR_MAX_SIZE];
+ u8 reserved[3];
+} __packed; /* PHY_AP_TX_POWER_CONSTRAINTS_CMD_API_S_VER_1 */
#endif /* __iwl_fw_api_power_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index a1a272433b09..1a60f0cdf972 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_rs_h__
@@ -9,7 +9,7 @@
#include "mac.h"
/**
- * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags
+ * enum iwl_tlc_mng_cfg_flags - options for TLC config flags
* @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for
* bandwidths <= 80MHz
* @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index e71f29d0c694..691c879cb90d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -710,7 +710,15 @@ struct iwl_rx_mpdu_desc {
__le32 reorder_data;
union {
+ /**
+ * @v1: version 1 of the remaining RX descriptor,
+ * see &struct iwl_rx_mpdu_desc_v1
+ */
struct iwl_rx_mpdu_desc_v1 v1;
+ /**
+ * @v3: version 3 of the remaining RX descriptor,
+ * see &struct iwl_rx_mpdu_desc_v3
+ */
struct iwl_rx_mpdu_desc_v3 v3;
};
} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
@@ -976,7 +984,7 @@ struct iwl_ba_window_status_notif {
} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
/**
- * struct iwl_rfh_queue_config - RX queue configuration
+ * struct iwl_rfh_queue_data - RX queue configuration
* @q_num: Q num
* @enable: enable queue
* @reserved: alignment
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
index 93078f8cc08c..8598031567bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -14,6 +14,10 @@
*/
enum iwl_scan_subcmd_ids {
/**
+ * @CHANNEL_SURVEY_NOTIF: &struct iwl_umac_scan_channel_survey_notif
+ */
+ CHANNEL_SURVEY_NOTIF = 0xFB,
+ /**
* @OFFLOAD_MATCH_INFO_NOTIF: &struct iwl_scan_offload_match_info
*/
OFFLOAD_MATCH_INFO_NOTIF = 0xFC,
@@ -62,6 +66,8 @@ struct iwl_ssid_ie {
#define IWL_FAST_SCHED_SCAN_ITERATIONS 3
#define IWL_MAX_SCHED_SCAN_PLANS 2
+#define IWL_MAX_NUM_NOISE_RESULTS 22
+
enum scan_framework_client {
SCAN_CLIENT_SCHED_SCAN = BIT(0),
SCAN_CLIENT_NETDETECT = BIT(1),
@@ -143,7 +149,7 @@ struct iwl_scan_offload_profile_cfg_data {
} __packed;
/**
- * struct iwl_scan_offload_profile_cfg
+ * struct iwl_scan_offload_profile_cfg_v1 - scan offload profile config
* @profiles: profiles to search for match
* @data: the rest of the data for profile_cfg
*/
@@ -417,7 +423,7 @@ struct iwl_lmac_scan_complete_notif {
} __packed;
/**
- * struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
+ * struct iwl_periodic_scan_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
* @last_schedule_line: last schedule line executed (fast or regular)
* @last_schedule_iteration: last scan iteration executed before scan abort
* @status: &enum iwl_scan_offload_complete_status
@@ -437,10 +443,10 @@ struct iwl_periodic_scan_complete {
/* UMAC Scan API */
/* The maximum of either of these cannot exceed 8, because we use an
- * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h).
+ * 8-bit mask (see enum iwl_scan_status).
*/
-#define IWL_MVM_MAX_UMAC_SCANS 4
-#define IWL_MVM_MAX_LMAC_SCANS 1
+#define IWL_MAX_UMAC_SCANS 4
+#define IWL_MAX_LMAC_SCANS 1
enum scan_config_flags {
SCAN_CONFIG_FLAG_ACTIVATE = BIT(0),
@@ -642,10 +648,13 @@ enum iwl_umac_scan_general_flags {
* notification per channel or not.
* @IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER: Whether to allow channel
* reorder optimization or not.
+ * @IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS: Enable channel statistics
+ * collection when #IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE is set.
*/
enum iwl_umac_scan_general_flags2 {
IWL_UMAC_SCAN_GEN_FLAGS2_NOTIF_PER_CHNL = BIT(0),
IWL_UMAC_SCAN_GEN_FLAGS2_ALLOW_CHNL_REORDER = BIT(1),
+ IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS = BIT(3),
};
/**
@@ -780,7 +789,7 @@ struct iwl_scan_req_umac_tail_v1 {
} __packed;
/**
- * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command
+ * struct iwl_scan_req_umac_tail_v2 - the rest of the UMAC scan request command
* parameters following channels configuration array.
* @schedule: two scheduling plans.
* @delay: delay in TUs before starting the first scan iteration
@@ -1076,7 +1085,7 @@ struct iwl_scan_req_params_v12 {
} __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */
/**
- * struct iwl_scan_req_params_v16
+ * struct iwl_scan_req_params_v17 - scan request parameters (v17)
* @general_params: &struct iwl_scan_general_params_v11
* @channel_params: &struct iwl_scan_channel_params_v7
* @periodic_params: &struct iwl_scan_periodic_parms_v1
@@ -1102,7 +1111,7 @@ struct iwl_scan_req_umac_v12 {
} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */
/**
- * struct iwl_scan_req_umac_v16
+ * struct iwl_scan_req_umac_v17 - scan request command (v17)
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @scan_params: scan parameters
@@ -1258,4 +1267,26 @@ struct iwl_umac_scan_iter_complete_notif {
struct iwl_scan_results_notif results[];
} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_2 */
+/**
+ * struct iwl_umac_scan_channel_survey_notif - data for survey
+ * @channel: the channel scanned
+ * @band: band of channel
+ * @noise: noise floor measurements in negative dBm, invalid 0xff
+ * @reserved: for future use and alignment
+ * @active_time: time in ms the radio was turned on (on the channel)
+ * @busy_time: time in ms the channel was sensed busy, 0 for a clean channel
+ * @tx_time: time the radio spent transmitting data
+ * @rx_time: time the radio spent receiving data
+ */
+struct iwl_umac_scan_channel_survey_notif {
+ __le32 channel;
+ __le32 band;
+ u8 noise[IWL_MAX_NUM_NOISE_RESULTS];
+ u8 reserved[2];
+ __le32 active_time;
+ __le32 busy_time;
+ __le32 tx_time;
+ __le32 rx_time;
+} __packed; /* SCAN_CHANNEL_SURVEY_NTF_API_S_VER_1 */
+
#endif /* __iwl_fw_api_scan_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
index 2e15be71c957..f4b827b58bd3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2020, 2022-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -340,11 +340,13 @@ struct iwl_hs20_roc_res {
* @ROC_ACTIVITY_HOTSPOT: ROC for hs20 activity
* @ROC_ACTIVITY_P2P_DISC: ROC for p2p discoverability activity
* @ROC_ACTIVITY_P2P_TXRX: ROC for p2p action frames activity
+ * @ROC_ACTIVITY_P2P_NEG: ROC for p2p negotiation (used also for TX)
*/
enum iwl_roc_activity {
ROC_ACTIVITY_HOTSPOT,
ROC_ACTIVITY_P2P_DISC,
ROC_ACTIVITY_P2P_TXRX,
+ ROC_ACTIVITY_P2P_NEG,
ROC_NUM_ACTIVITIES
}; /* ROC_ACTIVITY_API_E_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index d9e4c75403b8..c5277e2f8cd4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_tx_h__
@@ -698,6 +698,7 @@ enum iwl_mvm_ba_resp_flags {
* @query_frame_cnt: SCD query frame count
* @txed: number of frames sent in the aggregation (all-TIDs)
* @done: number of frames that were Acked by the BA (all-TIDs)
+ * @rts_retry_cnt: RTS retry count
* @reserved: reserved (for alignment)
* @wireless_time: Wireless-media time
* @tx_rate: the rate the aggregation was sent at
@@ -718,7 +719,8 @@ struct iwl_mvm_compressed_ba_notif {
__le16 query_frame_cnt;
__le16 txed;
__le16 done;
- __le16 reserved;
+ u8 rts_retry_cnt;
+ u8 reserved;
__le32 wireless_time;
__le32 tx_rate;
__le16 tfd_cnt;
@@ -793,7 +795,8 @@ enum iwl_mac_beacon_flags {
* @reserved: reserved
* @link_id: the firmware id of the link that will use this beacon
* @tim_idx: the offset of the tim IE in the beacon
- * @tim_size: the length of the tim IE
+ * @tim_size: the length of the tim IE (version < 14)
+ * @btwt_offset: offset to the broadcast TWT IE if present (version >= 14)
* @ecsa_offset: offset to the ECSA IE if present
* @csa_offset: offset to the CSA IE if present
* @frame: the template of the beacon frame
@@ -805,14 +808,18 @@ struct iwl_mac_beacon_cmd {
__le32 reserved;
__le32 link_id;
__le32 tim_idx;
- __le32 tim_size;
+ union {
+ __le32 tim_size;
+ __le32 btwt_offset;
+ };
__le32 ecsa_offset;
__le32 csa_offset;
struct ieee80211_hdr frame[];
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10,
* BEACON_TEMPLATE_CMD_API_S_VER_11,
* BEACON_TEMPLATE_CMD_API_S_VER_12,
- * BEACON_TEMPLATE_CMD_API_S_VER_13
+ * BEACON_TEMPLATE_CMD_API_S_VER_13,
+ * BEACON_TEMPLATE_CMD_API_S_VER_14
*/
struct iwl_beacon_notif {
@@ -859,7 +866,7 @@ enum iwl_dump_control {
};
/**
- * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command
+ * struct iwl_tx_path_flush_cmd_v1 -- queue/FIFO flush command
* @queues_ctl: bitmap of queues to flush
* @flush_ctl: control flags
* @reserved: reserved
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index c3bdf433d8f7..fa57df336785 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1026,17 +1026,12 @@ static int iwl_dump_ini_prph_mac_iter_common(struct iwl_fw_runtime *fwrt,
{
struct iwl_fw_ini_error_dump_range *range = range_ptr;
__le32 *val = range->data;
- u32 prph_val;
int i;
range->internal_base_addr = cpu_to_le32(addr);
range->range_data_size = size;
- for (i = 0; i < le32_to_cpu(size); i += 4) {
- prph_val = iwl_read_prph(fwrt->trans, addr + i);
- if (iwl_trans_is_hw_error_value(prph_val))
- return -EBUSY;
- *val++ = cpu_to_le32(prph_val);
- }
+ for (i = 0; i < le32_to_cpu(size); i += 4)
+ *val++ = cpu_to_le32(iwl_read_prph(fwrt->trans, addr + i));
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
@@ -1173,17 +1168,13 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt,
le32_to_cpu(reg->dev_addr.offset);
int i;
- /* we shouldn't get here if the trans doesn't have read_config32 */
- if (WARN_ON_ONCE(!trans->ops->read_config32))
- return -EOPNOTSUPP;
-
range->internal_base_addr = cpu_to_le32(addr);
range->range_data_size = reg->dev_addr.size;
for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) {
int ret;
u32 tmp;
- ret = trans->ops->read_config32(trans, addr + i, &tmp);
+ ret = iwl_trans_read_config32(trans, addr + i, &tmp);
if (ret < 0)
return ret;
@@ -3084,6 +3075,7 @@ static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt, u8 wk_idx)
if (!test_bit(wk_idx, &fwrt->dump.active_wks))
return;
+ /* also checks 'desc' for pre-ini mode, since that shadows in union */
if (!dump_data->trig) {
IWL_ERR(fwrt, "dump trigger data is not set\n");
goto out;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
index 751a125a1566..893b21fcaf87 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c
@@ -230,8 +230,7 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf,
.data = { NULL, },
};
- if (fwrt->ops && fwrt->ops->fw_running &&
- !fwrt->ops->fw_running(fwrt->ops_ctx))
+ if (!iwl_trans_fw_running(fwrt->trans))
return -EIO;
if (count < header_size + 1 || count > 1024 * 4)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 5c76e3b94968..e63b08b7d336 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -248,7 +248,7 @@ struct iwl_fw_error_dump_mem {
#define IWL_INI_DUMP_NAME_TYPE (BIT(31) | BIT(24))
/**
- * struct iwl_fw_error_dump_data - data for one type
+ * struct iwl_fw_ini_error_dump_data - data for one type
* @type: &enum iwl_fw_ini_region_type
* @sub_type: sub type id
* @sub_type_ver: sub type version
@@ -278,7 +278,7 @@ struct iwl_fw_ini_dump_entry {
} __packed;
/**
- * struct iwl_fw_error_dump_file - header of dump file
+ * struct iwl_fw_ini_dump_file_hdr - header of dump file
* @barker: must be %IWL_FW_INI_ERROR_DUMP_BARKER
* @file_len: the length of all the file including the header
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index f69d29e531c8..ae05227b6153 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -395,6 +395,8 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* @IWL_UCODE_TLV_CAPA_SPP_AMSDU_SUPPORT: Support SPP (signaling and payload
* protected) A-MSDU.
* @IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT: Support secure LTF measurement.
+ * @IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS: Support monitor mode on otherwise
+ * passive channels
*
* @NUM_IWL_UCODE_TLV_CAPA: number of bits used
*/
@@ -494,6 +496,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_SNIFF_VALIDATE_SUPPORT = (__force iwl_ucode_tlv_capa_t)116,
IWL_UCODE_TLV_CAPA_CHINA_22_REG_SUPPORT = (__force iwl_ucode_tlv_capa_t)117,
IWL_UCODE_TLV_CAPA_SECURE_LTF_SUPPORT = (__force iwl_ucode_tlv_capa_t)121,
+ IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS = (__force iwl_ucode_tlv_capa_t)122,
NUM_IWL_UCODE_TLV_CAPA
/*
* This construction make both sparse (which cannot increment the previous
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 135bd48bfe9f..d8b083be5b6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021 Intel Corporation
+ * Copyright (C) 2019-2021, 2024 Intel Corporation
*/
#include "iwl-drv.h"
#include "runtime.h"
@@ -135,7 +135,9 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt)
struct iwl_trans_rxq_dma_data data;
cmd->data[i].q_num = i + 1;
- iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data);
+ ret = iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data);
+ if (ret)
+ goto out;
cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
cmd->data[i].urbd_stts_wrptr =
@@ -149,6 +151,7 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt)
ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
+out:
kfree(cmd);
if (ret)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
index 36d506463e0e..560a91998cc4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c
@@ -38,6 +38,7 @@ IWL_BIOS_TABLE_LOADER_DATA(tas_table, struct iwl_tas_data);
IWL_BIOS_TABLE_LOADER_DATA(pwr_limit, u64);
IWL_BIOS_TABLE_LOADER_DATA(mcc, char);
IWL_BIOS_TABLE_LOADER_DATA(eckv, u32);
+IWL_BIOS_TABLE_LOADER_DATA(wbem, u32);
static const struct dmi_system_id dmi_ppag_approved_list[] = {
@@ -347,7 +348,7 @@ int iwl_fill_ppag_table(struct iwl_fw_runtime *fwrt,
"PPAG table rev is %d, send truncated table\n",
fwrt->ppag_ver);
}
- } else if (cmd_ver >= 2 && cmd_ver <= 5) {
+ } else if (cmd_ver >= 2 && cmd_ver <= 6) {
num_sub_bands = IWL_NUM_SUB_BANDS_V2;
gain = cmd->v2.gain[0];
*cmd_size = sizeof(cmd->v2);
@@ -443,7 +444,7 @@ int iwl_parse_tas_selection(struct iwl_fw_runtime *fwrt,
return enabled;
}
-__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
+static __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
{
int ret;
u32 val;
@@ -490,7 +491,142 @@ __le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt)
return config_bitmap;
}
-IWL_EXPORT_SYMBOL(iwl_get_lari_config_bitmap);
+
+static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver)
+{
+ size_t cmd_size;
+
+ switch (cmd_ver) {
+ case 12:
+ case 11:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd);
+ break;
+ case 10:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v10);
+ break;
+ case 9:
+ case 8:
+ case 7:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7);
+ break;
+ case 6:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
+ break;
+ case 5:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
+ break;
+ case 4:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
+ break;
+ case 3:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
+ break;
+ case 2:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
+ break;
+ default:
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
+ break;
+ }
+ return cmd_size;
+}
+
+int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
+ struct iwl_lari_config_change_cmd *cmd,
+ size_t *cmd_size)
+{
+ int ret;
+ u32 value;
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(fwrt->fw,
+ WIDE_ID(REGULATORY_AND_NVM_GROUP,
+ LARI_CONFIG_CHANGE), 1);
+
+ memset(cmd, 0, sizeof(*cmd));
+ *cmd_size = iwl_get_lari_config_cmd_size(cmd_ver);
+
+ cmd->config_bitmap = iwl_get_lari_config_bitmap(fwrt);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
+ if (!ret)
+ cmd->oem_11ax_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
+ if (!ret) {
+ if (cmd_ver < 9)
+ value &= DSM_UNII4_ALLOW_BITMAP_CMD_V8;
+ else
+ value &= DSM_UNII4_ALLOW_BITMAP;
+
+ cmd->oem_unii4_allow_bitmap = cpu_to_le32(value);
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
+ if (!ret) {
+ if (cmd_ver < 8)
+ value &= ~ACTIVATE_5G2_IN_WW_MASK;
+ if (cmd_ver < 12)
+ value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11;
+
+ cmd->chan_state_active_bitmap = cpu_to_le32(value);
+ }
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_6E, &value);
+ if (!ret)
+ cmd->oem_uhb_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS, &value);
+ if (!ret)
+ cmd->force_disable_channels_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
+ &value);
+ if (!ret)
+ cmd->edt_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_wbem(fwrt, &value);
+ if (!ret)
+ cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value);
+
+ ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value);
+ if (!ret)
+ cmd->oem_11be_allow_bitmap = cpu_to_le32(value);
+
+ if (cmd->config_bitmap ||
+ cmd->oem_uhb_allow_bitmap ||
+ cmd->oem_11ax_allow_bitmap ||
+ cmd->oem_unii4_allow_bitmap ||
+ cmd->chan_state_active_bitmap ||
+ cmd->force_disable_channels_bitmap ||
+ cmd->edt_bitmap ||
+ cmd->oem_320mhz_allow_bitmap ||
+ cmd->oem_11be_allow_bitmap) {
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd->config_bitmap),
+ le32_to_cpu(cmd->oem_11ax_allow_bitmap));
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
+ le32_to_cpu(cmd->oem_unii4_allow_bitmap),
+ le32_to_cpu(cmd->chan_state_active_bitmap),
+ cmd_ver);
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
+ le32_to_cpu(cmd->oem_uhb_allow_bitmap),
+ le32_to_cpu(cmd->force_disable_channels_bitmap));
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd->edt_bitmap),
+ le32_to_cpu(cmd->oem_320mhz_allow_bitmap));
+ IWL_DEBUG_RADIO(fwrt,
+ "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd->oem_11be_allow_bitmap));
+ } else {
+ return 1;
+ }
+
+ return 0;
+}
+IWL_EXPORT_SYMBOL(iwl_fill_lari_config);
int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
index 28e774766847..e2c056f483c1 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#ifndef __fw_regulatory_h__
@@ -11,6 +11,7 @@
#include "fw/api/power.h"
#include "fw/api/phy.h"
#include "fw/api/config.h"
+#include "fw/api/nvm-reg.h"
#include "fw/img.h"
#include "iwl-trans.h"
@@ -39,7 +40,6 @@
#define IWL_PPAG_ETSI_CHINA_MASK 3
#define IWL_PPAG_REV3_MASK 0x7FF
-#define IWL_WTAS_BLACK_LIST_MAX 16
#define IWL_WTAS_ENABLED_MSK 0x1
#define IWL_WTAS_OVERRIDE_IEC_MSK 0x2
#define IWL_WTAS_ENABLE_IEC_MSK 0x4
@@ -115,7 +115,8 @@ enum iwl_dsm_funcs {
DSM_FUNC_FORCE_DISABLE_CHANNELS = 9,
DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10,
DSM_FUNC_RFI_CONFIG = 11,
- DSM_FUNC_NUM_FUNCS = 12,
+ DSM_FUNC_ENABLE_11BE = 12,
+ DSM_FUNC_NUM_FUNCS = 13,
};
enum iwl_dsm_values_srd {
@@ -132,6 +133,23 @@ enum iwl_dsm_values_indonesia {
DSM_VALUE_INDONESIA_MAX
};
+enum iwl_dsm_unii4_bitmap {
+ DSM_VALUE_UNII4_US_OVERRIDE_MSK = BIT(0),
+ DSM_VALUE_UNII4_US_EN_MSK = BIT(1),
+ DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK = BIT(2),
+ DSM_VALUE_UNII4_ETSI_EN_MSK = BIT(3),
+ DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK = BIT(4),
+ DSM_VALUE_UNII4_CANADA_EN_MSK = BIT(5),
+};
+
+#define DSM_UNII4_ALLOW_BITMAP_CMD_V8 (DSM_VALUE_UNII4_US_OVERRIDE_MSK | \
+ DSM_VALUE_UNII4_US_EN_MSK | \
+ DSM_VALUE_UNII4_ETSI_OVERRIDE_MSK | \
+ DSM_VALUE_UNII4_ETSI_EN_MSK)
+#define DSM_UNII4_ALLOW_BITMAP (DSM_UNII4_ALLOW_BITMAP_CMD_V8 | \
+ DSM_VALUE_UNII4_CANADA_OVERRIDE_MSK | \
+ DSM_VALUE_UNII4_CANADA_EN_MSK)
+
enum iwl_dsm_values_rfi {
DSM_VALUE_RFI_DLVR_DISABLE = BIT(0),
DSM_VALUE_RFI_DDR_DISABLE = BIT(1),
@@ -184,8 +202,11 @@ int iwl_bios_get_pwr_limit(struct iwl_fw_runtime *fwrt,
int iwl_bios_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
int iwl_bios_get_eckv(struct iwl_fw_runtime *fwrt, u32 *ext_clk);
+int iwl_bios_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
-__le32 iwl_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt);
+int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt,
+ struct iwl_lari_config_change_cmd *cmd,
+ size_t *cmd_size);
int iwl_bios_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index b2bc4fd37abf..048877fa7c71 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -12,14 +12,13 @@
#include "fw/api/debug.h"
#include "fw/api/paging.h"
#include "fw/api/power.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "fw/acpi.h"
#include "fw/regulatory.h"
struct iwl_fw_runtime_ops {
void (*dump_start)(void *ctx);
void (*dump_end)(void *ctx);
- bool (*fw_running)(void *ctx);
int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd);
bool (*d3_debug_enable)(void *ctx);
};
@@ -46,6 +45,10 @@ struct iwl_fwrt_shared_mem_cfg {
* struct iwl_fwrt_dump_data - dump data
* @trig: trigger the worker was scheduled upon
* @fw_pkt: packet received from FW
+ *
+ * Note that the decision which part of the union is used
+ * is based on iwl_trans_dbg_ini_valid(): the 'trig' part
+ * is used if it is %true, the 'desc' part otherwise.
*/
struct iwl_fwrt_dump_data {
union {
@@ -54,6 +57,7 @@ struct iwl_fwrt_dump_data {
struct iwl_rx_packet *fw_pkt;
};
struct {
+ /* must be first to be same as 'trig' */
const struct iwl_fw_dump_desc *desc;
bool monitor_only;
};
@@ -99,7 +103,6 @@ struct iwl_txf_iter_data {
* @cur_fw_img: current firmware image, must be maintained by
* the driver by calling &iwl_fw_set_current_image()
* @dump: debug dump data
- * @uats_enabled: VLP or AFC AP is enabled
* @uats_table: AP type table
* @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock:
* 0: Unlocked, 1 and 2: Locked.
@@ -177,9 +180,8 @@ struct iwl_fw_runtime {
u8 ppag_ver;
struct iwl_sar_offset_mapping_cmd sgom_table;
bool sgom_enabled;
- struct iwl_uats_table_cmd uats_table;
+ struct iwl_mcc_allowed_ap_type_cmd uats_table;
u8 uefi_tables_lock_status;
- bool uats_enabled;
};
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
index e81fc0129b9d..fb982d4fe851 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.c
@@ -674,6 +674,29 @@ out:
return ret;
}
+int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ struct uefi_cnv_wlan_wbem_data *data;
+ int ret = 0;
+
+ data = iwl_uefi_get_verified_variable(fwrt->trans, IWL_UEFI_WBEM_NAME,
+ "WBEM", sizeof(*data), NULL);
+ if (IS_ERR(data))
+ return -EINVAL;
+
+ if (data->revision != IWL_UEFI_WBEM_REVISION) {
+ ret = -EINVAL;
+ IWL_DEBUG_RADIO(fwrt, "Unsupported UEFI WBEM revision:%d\n",
+ data->revision);
+ goto out;
+ }
+ *value = data->wbem_320mhz_per_mcc & IWL_UEFI_WBEM_REV0_MASK;
+ IWL_DEBUG_RADIO(fwrt, "Loaded WBEM config from UEFI\n");
+out:
+ kfree(data);
+ return ret;
+}
+
int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index 303cc299d1bc..1f8884ca8997 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright(c) 2021-2023 Intel Corporation
+ * Copyright(c) 2021-2024 Intel Corporation
*/
#ifndef __iwl_fw_uefi__
#define __iwl_fw_uefi__
@@ -21,6 +21,7 @@
#define IWL_UEFI_WRDD_NAME L"UefiCnvWlanWRDD"
#define IWL_UEFI_ECKV_NAME L"UefiCnvWlanECKV"
#define IWL_UEFI_DSM_NAME L"UefiCnvWlanGeneralCfg"
+#define IWL_UEFI_WBEM_NAME L"UefiCnvWlanWBEM"
#define IWL_SGOM_MAP_SIZE 339
@@ -35,6 +36,7 @@
#define IWL_UEFI_SPLC_REVISION 0
#define IWL_UEFI_WRDD_REVISION 0
#define IWL_UEFI_ECKV_REVISION 0
+#define IWL_UEFI_WBEM_REVISION 0
#define IWL_UEFI_DSM_REVISION 4
struct pnvm_sku_package {
@@ -178,6 +180,20 @@ struct uefi_cnv_var_general_cfg {
u32 functions[UEFI_MAX_DSM_FUNCS];
} __packed;
+#define IWL_UEFI_WBEM_REV0_MASK (BIT(0) | BIT(1))
+/* struct uefi_cnv_wlan_wbem_data - Bandwidth enablement per MCC as defined
+ * in UEFI
+ * @revision: the revision of the table
+ * @wbem_320mhz_per_mcc: enablement of 320MHz bandwidth per MCC
+ * bit 0 - if set, 320MHz is enabled for Japan
+ * bit 1 - if set, 320MHz is enabled for South Korea
+ * bit 2- 31, Reserved
+ */
+struct uefi_cnv_wlan_wbem_data {
+ u8 revision;
+ u32 wbem_320mhz_per_mcc;
+} __packed;
+
/*
* This is known to be broken on v4.19 and to work on v5.4. Until we
* figure out why this is the case and how to make it work, simply
@@ -202,6 +218,7 @@ int iwl_uefi_get_pwr_limit(struct iwl_fw_runtime *fwrt,
u64 *dflt_pwr_limit);
int iwl_uefi_get_mcc(struct iwl_fw_runtime *fwrt, char *mcc);
int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk);
+int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value);
int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt, enum iwl_dsm_funcs func,
u32 *value);
void iwl_uefi_get_sgom_table(struct iwl_trans *trans, struct iwl_fw_runtime *fwrt);
@@ -281,6 +298,11 @@ static inline int iwl_uefi_get_eckv(struct iwl_fw_runtime *fwrt, u32 *extl_clk)
return -ENOENT;
}
+static inline int iwl_uefi_get_wbem(struct iwl_fw_runtime *fwrt, u32 *value)
+{
+ return -ENOENT;
+}
+
static inline int iwl_uefi_get_dsm(struct iwl_fw_runtime *fwrt,
enum iwl_dsm_funcs func, u32 *value)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 6aa4f7f9c708..b2abd4fd1944 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -11,6 +11,7 @@
#include <linux/netdevice.h>
#include <linux/ieee80211.h>
#include <linux/nl80211.h>
+#include <linux/mod_devicetable.h>
#include "iwl-csr.h"
#include "iwl-drv.h"
@@ -240,7 +241,7 @@ enum iwl_cfg_trans_ltr_delay {
};
/**
- * struct iwl_cfg_trans - information needed to start the trans
+ * struct iwl_cfg_trans_params - information needed to start the trans
*
* These values are specific to the device ID and do not change when
* multiple configs are used for a single device ID. They values are
@@ -257,6 +258,7 @@ enum iwl_cfg_trans_ltr_delay {
* @mq_rx_supported: multi-queue rx support
* @integrated: discrete or integrated
* @low_latency_xtal: use the low latency xtal if supported
+ * @bisr_workaround: BISR hardware workaround (for 22260 series devices)
* @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay.
* @imr_enabled: use the IMR if supported.
*/
@@ -421,6 +423,7 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_SC 0x48
#define IWL_CFG_MAC_TYPE_SC2 0x49
#define IWL_CFG_MAC_TYPE_SC2F 0x4A
+#define IWL_CFG_MAC_TYPE_BZ_W 0x4B
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -429,8 +432,6 @@ struct iwl_cfg {
#define IWL_CFG_RF_TYPE_HR2 0x10A
#define IWL_CFG_RF_TYPE_HR1 0x10C
#define IWL_CFG_RF_TYPE_GF 0x10D
-#define IWL_CFG_RF_TYPE_MR 0x110
-#define IWL_CFG_RF_TYPE_MS 0x111
#define IWL_CFG_RF_TYPE_FM 0x112
#define IWL_CFG_RF_TYPE_WH 0x113
@@ -484,6 +485,7 @@ const struct iwl_dev_info *
iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
u16 mac_type, u8 mac_step, u16 rf_type, u8 cdb,
u8 jacket, u8 rf_id, u8 no_160, u8 cores, u8 rf_step);
+extern const struct pci_device_id iwl_hw_card_ids[];
#endif
/*
@@ -541,6 +543,8 @@ extern const char iwl_ax221_name[];
extern const char iwl_ax231_name[];
extern const char iwl_ax411_name[];
extern const char iwl_bz_name[];
+extern const char iwl_fm_name[];
+extern const char iwl_gl_name[];
extern const char iwl_mtp_name[];
extern const char iwl_sc_name[];
extern const char iwl_sc2_name[];
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index 1379dc2d231b..5b62933134cf 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018, 2020-2023 Intel Corporation
+ * Copyright (C) 2018, 2020-2024 Intel Corporation
*/
#ifndef __iwl_context_info_file_gen3_h__
#define __iwl_context_info_file_gen3_h__
@@ -56,6 +56,8 @@ enum iwl_prph_scratch_mtr_format {
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K: 8kB RB size
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K: 12kB RB size
* @IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K: 16kB RB size
+ * @IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE: Indicate fw to set SCU_FORCE_ACTIVE
+ * upon reset.
*/
enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_IMR_DEBUG_EN = BIT(1),
@@ -71,6 +73,7 @@ enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_RB_SIZE_EXT_8K = 8 << 20,
IWL_PRPH_SCRATCH_RB_SIZE_EXT_12K = 9 << 20,
IWL_PRPH_SCRATCH_RB_SIZE_EXT_16K = 10 << 20,
+ IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE = BIT(29),
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index 4511d7fb2279..98563757ce2c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -304,9 +304,7 @@
#define CSR_HW_RFID_IS_CDB(_val) (((_val) & 0x10000000) >> 28)
#define CSR_HW_RFID_IS_JACKET(_val) (((_val) & 0x20000000) >> 29)
-/**
- * hw_rev values
- */
+/* hw_rev values */
enum {
SILICON_A_STEP = 0,
SILICON_B_STEP,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 561d0c261123..08d990ba8a79 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -223,12 +223,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
return -EINVAL;
}
- if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
- !trans->ops->read_config32) {
- IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
- return -EOPNOTSUPP;
- }
-
if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) {
trans->dbg.imr_data.sram_addr =
le32_to_cpu(reg->internal_buffer.base_addr);
@@ -1246,12 +1240,6 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
}
fwrt->trans->dbg.restart_required = false;
- IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n",
- tp, dump_data.trig->reset_fw);
- IWL_DEBUG_FW(fwrt,
- "WRT: restart_required %d, last_tp_resetfw %d\n",
- fwrt->trans->dbg.restart_required,
- fwrt->trans->dbg.last_tp_resetfw);
if (fwrt->trans->trans_cfg->device_family ==
IWL_DEVICE_FAMILY_9000) {
@@ -1261,22 +1249,17 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync,
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw = 0xFF;
- IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n");
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) {
- IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n");
fwrt->trans->dbg.restart_required = true;
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) {
- IWL_DEBUG_FW(fwrt,
- "WRT: stop only and no reload firmware\n");
fwrt->trans->dbg.restart_required = false;
fwrt->trans->dbg.last_tp_resetfw =
le32_to_cpu(dump_data.trig->reset_fw);
} else if (le32_to_cpu(dump_data.trig->reset_fw) ==
IWL_FW_INI_RESET_FW_MODE_NOTHING) {
- IWL_DEBUG_FW(fwrt,
- "WRT: nothing need to be done after debug collection\n");
+ /* nothing */
} else {
IWL_ERR(fwrt, "WRT: wrong resetfw %d\n",
le32_to_cpu(dump_data.trig->reset_fw));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
index 2c280a2fe3df..0d4a0896a2c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -3,7 +3,7 @@
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- * Copyright(c) 2018 - 2019, 2023 Intel Corporation
+ * Copyright(c) 2018 - 2019, 2023-2024 Intel Corporation
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ)
@@ -28,7 +28,7 @@ TRACE_EVENT(iwlwifi_dev_tx_tb,
TP_fast_assign(
DEV_ASSIGN;
__entry->phys = phys;
- if (iwl_trace_data(skb))
+ if (__get_dynamic_array_len(data))
memcpy(__get_dynamic_array(data), data_src, data_len);
),
TP_printk("[%s] TX frame data", __get_str(dev))
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
index e656bf6bc003..ead72c3d33bd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -4,7 +4,7 @@
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018, 2023 Intel Corporation
+ * Copyright(c) 2018, 2023-2024 Intel Corporation
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ)
@@ -88,8 +88,8 @@ TRACE_EVENT(iwlwifi_dev_tx,
* for the possible padding).
*/
__dynamic_array(u8, buf0, buf0_len)
- __dynamic_array(u8, buf1, hdr_len > 0 && iwl_trace_data(skb) ?
- 0 : skb->len - hdr_len)
+ __dynamic_array(u8, buf1, hdr_len > 0 && !iwl_trace_data(skb) ?
+ skb->len - hdr_len : 0)
),
TP_fast_assign(
DEV_ASSIGN;
@@ -99,7 +99,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
__entry->framelen += skb->len - hdr_len;
memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
- if (hdr_len > 0 && !iwl_trace_data(skb))
+ if (__get_dynamic_array_len(buf1))
skb_copy_bits(skb, hdr_len,
__get_dynamic_array(buf1),
skb->len - hdr_len);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
index 1d6c292cf545..0db1fa5477af 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
@@ -57,7 +57,7 @@ TRACE_EVENT(iwlwifi_dbg,
),
TP_fast_assign(
__entry->level = level;
- __assign_str(function, function);
+ __assign_str(function);
__assign_vstr(msg, vaf->fmt, vaf->va);
),
TP_printk("%s", __get_str(msg))
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
index c3e09f4fefeb..76166e1b10e5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
@@ -87,7 +87,7 @@ static inline void trace_ ## name(proto) {}
#endif
#define DEV_ENTRY __string(dev, dev_name(dev))
-#define DEV_ASSIGN __assign_str(dev, dev_name(dev))
+#define DEV_ASSIGN __assign_str(dev)
#include "iwl-devtrace-io.h"
#include "iwl-devtrace-ucode.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 4696d73c8971..aaaabd67f959 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -192,12 +192,6 @@ const char *iwl_drv_get_fwname_pre(struct iwl_trans *trans, char *buf)
case IWL_CFG_RF_TYPE_GF:
rf = "gf";
break;
- case IWL_CFG_RF_TYPE_MR:
- rf = "mr";
- break;
- case IWL_CFG_RF_TYPE_MS:
- rf = "ms";
- break;
case IWL_CFG_RF_TYPE_FM:
rf = "fm";
break;
@@ -988,16 +982,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
minor = le32_to_cpup(ptr++);
local_comp = le32_to_cpup(ptr);
- if (major >= 35)
- snprintf(drv->fw.fw_version,
- sizeof(drv->fw.fw_version),
- "%u.%08x.%u %s", major, minor,
- local_comp, iwl_reduced_fw_name(drv));
- else
- snprintf(drv->fw.fw_version,
- sizeof(drv->fw.fw_version),
- "%u.%u.%u %s", major, minor,
- local_comp, iwl_reduced_fw_name(drv));
+ snprintf(drv->fw.fw_version,
+ sizeof(drv->fw.fw_version),
+ "%u.%08x.%u %s", major, minor,
+ local_comp, iwl_reduced_fw_name(drv));
break;
}
case IWL_UCODE_TLV_FW_DBG_DEST: {
@@ -1484,7 +1472,6 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
size_t trigger_tlv_sz[FW_DBG_TRIGGER_MAX];
u32 api_ver;
int i;
- bool load_module = false;
bool usniffer_images = false;
bool failure = true;
@@ -1732,19 +1719,12 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
goto out_unbind;
}
} else {
- load_module = true;
+ request_module_nowait("%s", op->name);
}
mutex_unlock(&iwlwifi_opmode_table_mtx);
complete(&drv->request_firmware_complete);
- /*
- * Load the module last so we don't block anything
- * else from proceeding if the module fails to load
- * or hangs loading.
- */
- if (load_module)
- request_module("%s", op->name);
failure = false;
goto free;
@@ -1829,8 +1809,8 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans)
err_fw:
#ifdef CONFIG_IWLWIFI_DEBUGFS
debugfs_remove_recursive(drv->dbgfs_drv);
- iwl_dbg_tlv_free(drv->trans);
#endif
+ iwl_dbg_tlv_free(drv->trans);
kfree(drv);
err:
return ERR_PTR(ret);
@@ -1856,7 +1836,7 @@ void iwl_drv_stop(struct iwl_drv *drv)
mutex_unlock(&iwlwifi_opmode_table_mtx);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- drv->trans->ops->debugfs_cleanup(drv->trans);
+ iwl_trans_debugfs_cleanup(drv->trans);
debugfs_remove_recursive(drv->dbgfs_drv);
#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
deleted file mode 100644
index 5f386bb1a353..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
+++ /dev/null
@@ -1,394 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
- */
-#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include "iwl-drv.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom-read.h"
-#include "iwl-io.h"
-#include "iwl-prph.h"
-#include "iwl-csr.h"
-
-/*
- * EEPROM access time values:
- *
- * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
- * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
- * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
- * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
- */
-#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-
-/*
- * The device's EEPROM semaphore prevents conflicts between driver and uCode
- * when accessing the EEPROM; each access is a series of pulses to/from the
- * EEPROM chip, not a single event, so even reads could conflict if they
- * weren't arbitrated by the semaphore.
- */
-#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-
-static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans)
-{
- u16 count;
- int ret;
-
- for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) {
- /* Request semaphore */
- iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
- /* See if we got it */
- ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- IWL_EEPROM_SEM_TIMEOUT);
- if (ret >= 0) {
- IWL_DEBUG_EEPROM(trans->dev,
- "Acquired semaphore after %d tries.\n",
- count+1);
- return ret;
- }
- }
-
- return ret;
-}
-
-static void iwl_eeprom_release_semaphore(struct iwl_trans *trans)
-{
- iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-}
-
-static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp)
-{
- u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
-
- IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp);
-
- switch (gp) {
- case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
- if (!nvm_is_otp) {
- IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
- gp);
- return -ENOENT;
- }
- return 0;
- case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
- case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
- if (nvm_is_otp) {
- IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
- return -ENOENT;
- }
- return 0;
- case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
- default:
- IWL_ERR(trans,
- "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n",
- nvm_is_otp ? "OTP" : "EEPROM", gp);
- return -ENOENT;
- }
-}
-
-/******************************************************************************
- *
- * OTP related functions
- *
-******************************************************************************/
-
-static void iwl_set_otp_access_absolute(struct iwl_trans *trans)
-{
- iwl_read32(trans, CSR_OTP_GP_REG);
-
- iwl_clear_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_OTP_ACCESS_MODE);
-}
-
-static int iwl_nvm_is_otp(struct iwl_trans *trans)
-{
- u32 otpgp;
-
- /* OTP only valid for CP/PP and after */
- switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) {
- case CSR_HW_REV_TYPE_NONE:
- IWL_ERR(trans, "Unknown hardware type\n");
- return -EIO;
- case CSR_HW_REV_TYPE_5300:
- case CSR_HW_REV_TYPE_5350:
- case CSR_HW_REV_TYPE_5100:
- case CSR_HW_REV_TYPE_5150:
- return 0;
- default:
- otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
- if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
- return 1;
- return 0;
- }
-}
-
-static int iwl_init_otp_access(struct iwl_trans *trans)
-{
- int ret;
-
- ret = iwl_finish_nic_init(trans);
- if (ret)
- return ret;
-
- iwl_set_bits_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
- udelay(5);
- iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
-
- /*
- * CSR auto clock gate disable bit -
- * this is only applicable for HW with OTP shadow RAM
- */
- if (trans->trans_cfg->base_params->shadow_ram_support)
- iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
- CSR_RESET_LINK_PWR_MGMT_DISABLED);
-
- return 0;
-}
-
-static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr,
- __le16 *eeprom_data)
-{
- int ret = 0;
- u32 r;
- u32 otpgp;
-
- iwl_write32(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(trans, "Time out reading OTP[%d]\n", addr);
- return ret;
- }
- r = iwl_read32(trans, CSR_EEPROM_REG);
- /* check for ECC errors: */
- otpgp = iwl_read32(trans, CSR_OTP_GP_REG);
- if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
- /* stop in this case */
- /* set the uncorrectable OTP ECC bit for acknowledgment */
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n");
- return -EINVAL;
- }
- if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
- /* continue in this case */
- /* set the correctable OTP ECC bit for acknowledgment */
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
- IWL_ERR(trans, "Correctable OTP ECC error, continue read\n");
- }
- *eeprom_data = cpu_to_le16(r >> 16);
- return 0;
-}
-
-/*
- * iwl_is_otp_empty: check for empty OTP
- */
-static bool iwl_is_otp_empty(struct iwl_trans *trans)
-{
- u16 next_link_addr = 0;
- __le16 link_value;
- bool is_empty = false;
-
- /* locate the beginning of OTP link list */
- if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) {
- if (!link_value) {
- IWL_ERR(trans, "OTP is empty\n");
- is_empty = true;
- }
- } else {
- IWL_ERR(trans, "Unable to read first block of OTP list.\n");
- is_empty = true;
- }
-
- return is_empty;
-}
-
-
-/*
- * iwl_find_otp_image: find EEPROM image in OTP
- * finding the OTP block that contains the EEPROM image.
- * the last valid block on the link list (the block _before_ the last block)
- * is the block we should read and used to configure the device.
- * If all the available OTP blocks are full, the last block will be the block
- * we should read and used to configure the device.
- * only perform this operation if shadow RAM is disabled
- */
-static int iwl_find_otp_image(struct iwl_trans *trans,
- u16 *validblockaddr)
-{
- u16 next_link_addr = 0, valid_addr;
- __le16 link_value = 0;
- int usedblocks = 0;
-
- /* set addressing mode to absolute to traverse the link list */
- iwl_set_otp_access_absolute(trans);
-
- /* checking for empty OTP or error */
- if (iwl_is_otp_empty(trans))
- return -EINVAL;
-
- /*
- * start traverse link list
- * until reach the max number of OTP blocks
- * different devices have different number of OTP blocks
- */
- do {
- /* save current valid block address
- * check for more block on the link list
- */
- valid_addr = next_link_addr;
- next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
- IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n",
- usedblocks, next_link_addr);
- if (iwl_read_otp_word(trans, next_link_addr, &link_value))
- return -EINVAL;
- if (!link_value) {
- /*
- * reach the end of link list, return success and
- * set address point to the starting address
- * of the image
- */
- *validblockaddr = valid_addr;
- /* skip first 2 bytes (link list pointer) */
- *validblockaddr += 2;
- return 0;
- }
- /* more in the link list, continue */
- usedblocks++;
- } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items);
-
- /* OTP has no valid blocks */
- IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n");
- return -EINVAL;
-}
-
-/*
- * iwl_read_eeprom - read EEPROM contents
- *
- * Load the EEPROM contents from adapter and return it
- * and its size.
- *
- * NOTE: This routine uses the non-debug IO access functions.
- */
-int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size)
-{
- __le16 *e;
- u32 gp = iwl_read32(trans, CSR_EEPROM_GP);
- int sz;
- int ret;
- u16 addr;
- u16 validblockaddr = 0;
- u16 cache_addr = 0;
- int nvm_is_otp;
-
- if (!eeprom || !eeprom_size)
- return -EINVAL;
-
- nvm_is_otp = iwl_nvm_is_otp(trans);
- if (nvm_is_otp < 0)
- return nvm_is_otp;
-
- sz = trans->trans_cfg->base_params->eeprom_size;
- IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz);
-
- e = kmalloc(sz, GFP_KERNEL);
- if (!e)
- return -ENOMEM;
-
- ret = iwl_eeprom_verify_signature(trans, nvm_is_otp);
- if (ret < 0) {
- IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
- goto err_free;
- }
-
- /* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = iwl_eeprom_acquire_semaphore(trans);
- if (ret < 0) {
- IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n");
- goto err_free;
- }
-
- if (nvm_is_otp) {
- ret = iwl_init_otp_access(trans);
- if (ret) {
- IWL_ERR(trans, "Failed to initialize OTP access.\n");
- goto err_unlock;
- }
-
- iwl_write32(trans, CSR_EEPROM_GP,
- iwl_read32(trans, CSR_EEPROM_GP) &
- ~CSR_EEPROM_GP_IF_OWNER_MSK);
-
- iwl_set_bit(trans, CSR_OTP_GP_REG,
- CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
- CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- /* traversing the linked list if no shadow ram supported */
- if (!trans->trans_cfg->base_params->shadow_ram_support) {
- ret = iwl_find_otp_image(trans, &validblockaddr);
- if (ret)
- goto err_unlock;
- }
- for (addr = validblockaddr; addr < validblockaddr + sz;
- addr += sizeof(u16)) {
- __le16 eeprom_data;
-
- ret = iwl_read_otp_word(trans, addr, &eeprom_data);
- if (ret)
- goto err_unlock;
- e[cache_addr / 2] = eeprom_data;
- cache_addr += sizeof(u16);
- }
- } else {
- /* eeprom is an array of 16bit values */
- for (addr = 0; addr < sz; addr += sizeof(u16)) {
- u32 r;
-
- iwl_write32(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
- ret = iwl_poll_bit(trans, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(trans,
- "Time out reading EEPROM[%d]\n", addr);
- goto err_unlock;
- }
- r = iwl_read32(trans, CSR_EEPROM_REG);
- e[addr / 2] = cpu_to_le16(r >> 16);
- }
- }
-
- IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n",
- nvm_is_otp ? "OTP" : "EEPROM");
-
- iwl_eeprom_release_semaphore(trans);
-
- *eeprom_size = sz;
- *eeprom = (u8 *)e;
- return 0;
-
- err_unlock:
- iwl_eeprom_release_semaphore(trans);
- err_free:
- kfree(e);
-
- return ret;
-}
-IWL_EXPORT_SYMBOL(iwl_read_eeprom);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h
deleted file mode 100644
index 63b8e6c6659b..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/*
- * Copyright (C) 2005-2014 Intel Corporation
- */
-#ifndef __iwl_eeprom_h__
-#define __iwl_eeprom_h__
-
-#include "iwl-trans.h"
-
-int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size);
-
-#endif /* __iwl_eeprom_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
index 6ba374efaacb..5c8f1868db64 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h
@@ -15,7 +15,7 @@
/* Flow Handler Definitions */
/****************************/
-/**
+/*
* This I/O area is directly read/writable by driver (e.g. Linux uses writel())
* Addresses are offsets from device's PCI hardware base address.
*/
@@ -24,7 +24,7 @@
#define FH_MEM_LOWER_BOUND_GEN2 (0xa06000)
#define FH_MEM_UPPER_BOUND_GEN2 (0xa08000)
-/**
+/*
* Keep-Warm (KW) buffer base address.
*
* Driver must allocate a 4KByte buffer that is for keeping the
@@ -44,7 +44,7 @@
#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
-/**
+/*
* TFD Circular Buffers Base (CBBC) addresses
*
* Device has 16 base pointer registers, one for each of 16 host-DRAM-resident
@@ -143,7 +143,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
*/
#define TFH_SRV_DMA_CHNL0_BC (0x1F70)
-/**
+/*
* Rx SRAM Control and Status Registers (RSCSR)
*
* These registers provide handshake between driver and device for the Rx queue
@@ -216,21 +216,21 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
-/**
+/*
* Physical base address of 8-byte Rx Status buffer.
* Bit fields:
* 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
*/
#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
-/**
+/*
* Physical base address of Rx Buffer Descriptor Circular Buffer.
* Bit fields:
* 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
*/
#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
-/**
+/*
* Rx write pointer (index, really!).
* Bit fields:
* 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
@@ -242,7 +242,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c)
#define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG
-/**
+/*
* Rx Config/Status Registers (RCSR)
* Rx Config Reg for channel 0 (only channel used)
*
@@ -300,7 +300,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
-/**
+/*
* Rx Shared Status Registers (RSSR)
*
* After stopping Rx DMA channel (writing 0 to
@@ -356,7 +356,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define RFH_RBDBUF_RBD0_LSB 0xA08300
#define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8)
-/**
+/*
* RFH Status Register
*
* Bit fields:
@@ -440,7 +440,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
-/**
+/*
* Transmit DMA Channel Control/Status Registers (TCSR)
*
* Device has one configuration register for each of 8 Tx DMA/FIFO channels
@@ -501,7 +501,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
-/**
+/*
* Tx Shared Status Registers (TSSR)
*
* After stopping Tx DMA channel (writing 0 to
@@ -518,7 +518,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
-/**
+/*
* Bit fields for TSSR(Tx Shared Status & Control) error status register:
* 31: Indicates an address error when accessed to internal memory
* uCode/driver must write "1" in order to clear this flag
@@ -634,7 +634,7 @@ enum iwl_tfd_tb_hi_n_len {
};
/**
- * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ * struct iwl_tfd_tb - transmit buffer descriptor within transmit frame descriptor
*
* This structure contains dma address and length of transmission address
*
@@ -648,7 +648,7 @@ struct iwl_tfd_tb {
} __packed;
/**
- * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor
+ * struct iwl_tfh_tb - transmit buffer descriptor within transmit frame descriptor
*
* This structure contains dma address and length of transmission address
*
@@ -717,7 +717,7 @@ struct iwl_tfh_tfd {
/* Fixed (non-configurable) rx data from phy */
/**
- * struct iwlagn_schedq_bc_tbl scheduler byte count table
+ * struct iwlagn_scd_bc_tbl - scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000:
* @tfd_offset:
@@ -734,7 +734,7 @@ struct iwlagn_scd_bc_tbl {
} __packed;
/**
- * struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3
+ * struct iwl_gen3_bc_tbl_entry - scheduler byte count table entry gen3
* For AX210 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index c60f9466c5fd..060becfd64f3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2022, 2024 Intel Corporation
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
#include <linux/delay.h>
@@ -460,7 +460,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans)
*/
if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) {
iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ |
CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
} else {
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
index 1cf26ab4f488..21eabfc3ffc8 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2022 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2022, 2024 Intel Corporation
*/
#ifndef __iwl_modparams_h__
#define __iwl_modparams_h__
@@ -106,4 +106,23 @@ static inline bool iwl_enable_tx_ampdu(void)
return true;
}
+/* Verify amsdu_size module parameter and convert it to a rxb size */
+static inline enum iwl_amsdu_size
+iwl_amsdu_size_to_rxb_size(void)
+{
+ switch (iwlwifi_mod_params.amsdu_size) {
+ case IWL_AMSDU_8K:
+ return IWL_AMSDU_8K;
+ case IWL_AMSDU_12K:
+ return IWL_AMSDU_12K;
+ default:
+ pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
+ iwlwifi_mod_params.amsdu_size);
+ fallthrough;
+ case IWL_AMSDU_DEF:
+ case IWL_AMSDU_4K:
+ return IWL_AMSDU_4K;
+ }
+}
+
#endif /* #__iwl_modparams_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index baa39a18087a..d902121da009 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -38,16 +38,13 @@ enum nvm_offsets {
N_HW_ADDRS = 3,
NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION,
- /* NVM calibration section offset (in words) definitions */
- NVM_CALIB_SECTION = 0x2B8,
- XTAL_CALIB = 0x316 - NVM_CALIB_SECTION,
-
/* NVM REGULATORY -Section offset (in words) definitions */
NVM_CHANNELS_SDP = 0,
};
enum ext_nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
+
MAC_ADDRESS_OVERRIDE_EXT_NVM = 1,
/* NVM SW-Section offset (in words) definitions */
@@ -373,7 +370,9 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band,
flags |= IEEE80211_CHAN_IR_CONCURRENT;
/* Set the AP type for the UHB case. */
- if (!(nvm_flags & NVM_CHANNEL_VLP))
+ if (nvm_flags & NVM_CHANNEL_VLP)
+ flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP;
+ else
flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT;
if (!(nvm_flags & NVM_CHANNEL_AFC))
flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT;
@@ -392,11 +391,14 @@ static enum nl80211_band iwl_nl80211_band_from_channel_idx(int ch_idx)
return NL80211_BAND_2GHZ;
}
-static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
+static int iwl_init_channel_map(struct iwl_trans *trans,
+ const struct iwl_fw *fw,
struct iwl_nvm_data *data,
const void * const nvm_ch_flags,
u32 sbands_flags, bool v4)
{
+ const struct iwl_cfg *cfg = trans->cfg;
+ struct device *dev = trans->dev;
int ch_idx;
int n_channels = 0;
struct ieee80211_channel *channel;
@@ -478,11 +480,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
else
channel->flags = 0;
- /* TODO: Don't put limitations on UHB devices as we still don't
- * have NVM for them
- */
- if (cfg->uhb_supported)
- channel->flags = 0;
+ if (fw_has_capa(&fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_MONITOR_PASSIVE_CHANS))
+ channel->flags |= IEEE80211_CHAN_CAN_MONITOR;
+
iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM,
channel->hw_value, ch_flags);
IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n",
@@ -597,7 +598,8 @@ static const u8 iwl_vendor_caps[] = {
static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
{
- .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .types_mask = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -753,7 +755,8 @@ static const struct ieee80211_sband_iftype_data iwl_he_eht_capa[] = {
},
},
{
- .types_mask = BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -906,7 +909,8 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
u8 tx_chains, u8 rx_chains,
const struct iwl_fw *fw)
{
- bool is_ap = iftype_data->types_mask & BIT(NL80211_IFTYPE_AP);
+ bool is_ap = iftype_data->types_mask & (BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO));
bool no_320;
no_320 = (!trans->trans_cfg->integrated &&
@@ -1023,8 +1027,6 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
switch (CSR_HW_RFID_TYPE(trans->hw_rf_id)) {
case IWL_CFG_RF_TYPE_GF:
- case IWL_CFG_RF_TYPE_MR:
- case IWL_CFG_RF_TYPE_MS:
case IWL_CFG_RF_TYPE_FM:
case IWL_CFG_RF_TYPE_WH:
iftype_data->he_cap.he_cap_elem.phy_cap_info[9] |=
@@ -1176,12 +1178,11 @@ static void iwl_init_sbands(struct iwl_trans *trans,
const struct iwl_fw *fw)
{
struct device *dev = trans->dev;
- const struct iwl_cfg *cfg = trans->cfg;
int n_channels;
int n_used = 0;
struct ieee80211_supported_band *sband;
- n_channels = iwl_init_channel_map(dev, cfg, data, nvm_ch_flags,
+ n_channels = iwl_init_channel_map(trans, fw, data, nvm_ch_flags,
sbands_flags, v4);
sband = &data->bands[NL80211_BAND_2GHZ];
sband->band = NL80211_BAND_2GHZ;
@@ -1572,9 +1573,6 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
&regulatory[NVM_CHANNELS_SDP] :
&nvm_sw[NVM_CHANNELS];
- /* in family 8000 Xtal calibration values moved to OTP */
- data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB);
- data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1);
lar_enabled = true;
} else {
u16 lar_offset = data->nvm_version < 0xE39 ?
@@ -1612,8 +1610,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data);
static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
int ch_idx, u16 nvm_flags,
struct iwl_reg_capa reg_capa,
- const struct iwl_cfg *cfg,
- bool uats_enabled)
+ const struct iwl_cfg *cfg)
{
u32 flags = NL80211_RRF_NO_HT40;
@@ -1623,11 +1620,15 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
flags &= ~NL80211_RRF_NO_HT40PLUS;
if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS)
flags &= ~NL80211_RRF_NO_HT40MINUS;
- } else if (nvm_flags & NVM_CHANNEL_40MHZ) {
+ } else if (ch_idx < NUM_2GHZ_CHANNELS + NUM_5GHZ_CHANNELS &&
+ nvm_flags & NVM_CHANNEL_40MHZ) {
if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0)
flags &= ~NL80211_RRF_NO_HT40PLUS;
else
flags &= ~NL80211_RRF_NO_HT40MINUS;
+ } else if (nvm_flags & NVM_CHANNEL_40MHZ) {
+ flags &= ~NL80211_RRF_NO_HT40PLUS;
+ flags &= ~NL80211_RRF_NO_HT40MINUS;
}
if (!(nvm_flags & NVM_CHANNEL_80MHZ))
@@ -1660,13 +1661,13 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan,
}
/* Set the AP type for the UHB case. */
- if (uats_enabled) {
- if (!(nvm_flags & NVM_CHANNEL_VLP))
- flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT;
+ if (nvm_flags & NVM_CHANNEL_VLP)
+ flags |= NL80211_RRF_ALLOW_6GHZ_VLP_AP;
+ else
+ flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT;
- if (!(nvm_flags & NVM_CHANNEL_AFC))
- flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT;
- }
+ if (!(nvm_flags & NVM_CHANNEL_AFC))
+ flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT;
/*
* reg_capa is per regulatory domain so apply it for every channel
@@ -1722,7 +1723,7 @@ static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver)
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled)
+ u16 geo_info, u32 cap, u8 resp_ver)
{
int ch_idx;
u16 ch_flags;
@@ -1730,7 +1731,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
const u16 *nvm_chan;
struct ieee80211_regdomain *regd, *copy_rd;
struct ieee80211_reg_rule *rule;
- enum nl80211_band band;
int center_freq, prev_center_freq = 0;
int valid_rules = 0;
bool new_rule;
@@ -1774,8 +1774,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
reg_capa = iwl_get_reg_capa(cap, resp_ver);
for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) {
+ enum nl80211_band band =
+ iwl_nl80211_band_from_channel_idx(ch_idx);
+
ch_flags = (u16)__le32_to_cpup(channels + ch_idx);
- band = iwl_nl80211_band_from_channel_idx(ch_idx);
center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx],
band);
new_rule = false;
@@ -1788,7 +1790,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx,
ch_flags, reg_capa,
- cfg, uats_enabled);
+ cfg);
/* we can't continue the same rule */
if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags ||
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
index fd9c3bed9407..0c6c3fb8c6dd 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h
@@ -1,13 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2015, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2015, 2018-2024 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_nvm_parse_h__
#define __iwl_nvm_parse_h__
#include <net/cfg80211.h>
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "mei/iwl-mei.h"
/**
@@ -38,7 +38,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
u8 tx_chains, u8 rx_chains);
/**
- * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW
+ * iwl_parse_nvm_mcc_info - parse MCC (mobile country code) info coming from FW
*
* This function parses the regulatory channel data received as a
* MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain,
@@ -50,7 +50,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
struct ieee80211_regdomain *
iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg,
int num_of_ch, __le32 *channels, u16 fw_mcc,
- u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled);
+ u16 geo_info, u32 cap, u8 resp_ver);
/**
* struct iwl_nvm_section - describes an NVM section in memory.
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c
new file mode 100644
index 000000000000..b3c25acd3691
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2015 Intel Mobile Communications GmbH
+ */
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include "iwl-drv.h"
+#include "iwl-modparams.h"
+#include "iwl-nvm-utils.h"
+
+int iwl_init_sband_channels(struct iwl_nvm_data *data,
+ struct ieee80211_supported_band *sband,
+ int n_channels, enum nl80211_band band)
+{
+ struct ieee80211_channel *chan = &data->channels[0];
+ int n = 0, idx = 0;
+
+ while (idx < n_channels && chan->band != band)
+ chan = &data->channels[++idx];
+
+ sband->channels = &data->channels[idx];
+
+ while (idx < n_channels && chan->band == band) {
+ chan = &data->channels[++idx];
+ n++;
+ }
+
+ sband->n_channels = n;
+
+ return n;
+}
+IWL_EXPORT_SYMBOL(iwl_init_sband_channels);
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+
+void iwl_init_ht_hw_capab(struct iwl_trans *trans,
+ struct iwl_nvm_data *data,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum nl80211_band band,
+ u8 tx_chains, u8 rx_chains)
+{
+ const struct iwl_cfg *cfg = trans->cfg;
+ int max_bit_rate = 0;
+
+ tx_chains = hweight8(tx_chains);
+ if (cfg->rx_with_siso_diversity)
+ rx_chains = 1;
+ else
+ rx_chains = hweight8(rx_chains);
+
+ if (!(data->sku_cap_11n_enable) ||
+ (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) ||
+ !cfg->ht_params) {
+ ht_info->ht_supported = false;
+ return;
+ }
+
+ if (data->sku_cap_mimo_disabled)
+ rx_chains = 1;
+
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
+
+ if (cfg->ht_params->stbc) {
+ ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+
+ if (tx_chains > 1)
+ ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
+ }
+
+ if (cfg->ht_params->ldpc)
+ ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+
+ if (trans->trans_cfg->mq_rx_supported ||
+ iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
+ ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4;
+
+ ht_info->mcs.rx_mask[0] = 0xFF;
+ ht_info->mcs.rx_mask[1] = 0x00;
+ ht_info->mcs.rx_mask[2] = 0x00;
+
+ if (rx_chains >= 2)
+ ht_info->mcs.rx_mask[1] = 0xFF;
+ if (rx_chains >= 3)
+ ht_info->mcs.rx_mask[2] = 0xFF;
+
+ if (cfg->ht_params->ht_greenfield_support)
+ ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+
+ max_bit_rate = MAX_BIT_RATE_20_MHZ;
+
+ if (cfg->ht_params->ht40_bands & BIT(band)) {
+ ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+ max_bit_rate = MAX_BIT_RATE_40_MHZ;
+ }
+
+ /* Highest supported Rx data rate */
+ max_bit_rate *= rx_chains;
+ WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+ ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+ /* Tx MCS capabilities */
+ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (tx_chains != rx_chains) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_chains - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+}
+IWL_EXPORT_SYMBOL(iwl_init_ht_hw_capab);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h
index 34a178a2eb5d..ac0a29a1c31f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h
@@ -58,23 +58,6 @@ struct iwl_nvm_data {
struct ieee80211_channel channels[];
};
-/**
- * iwl_parse_eeprom_data - parse EEPROM data and return values
- *
- * @trans: ransport we're parsing for, for debug only
- * @cfg: device configuration for parsing and overrides
- * @eeprom: the EEPROM data
- * @eeprom_size: length of the EEPROM data
- *
- * This function parses all EEPROM values we need and then
- * returns a (newly allocated) struct containing all the
- * relevant values for driver use. The struct must be freed
- * later with iwl_free_nvm_data().
- */
-struct iwl_nvm_data *
-iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg,
- const u8 *eeprom, size_t eeprom_size);
-
int iwl_init_sband_channels(struct iwl_nvm_data *data,
struct ieee80211_supported_band *sband,
int n_channels, enum nl80211_band band);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
index 1ca82f3e4ebf..595fa6ddf084 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2021 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2021, 2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -185,7 +185,8 @@ static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
{
might_sleep();
- op_mode->ops->nic_config(op_mode);
+ if (op_mode->ops->nic_config)
+ op_mode->ops->nic_config(op_mode);
}
static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index a7d44df06eab..dc171c29eb7b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016 Intel Deutschland GmbH
*/
@@ -96,7 +96,7 @@
#define DTSC_PTAT_AVG (0x00a10650)
-/**
+/*
* Tx Scheduler
*
* The Tx Scheduler selects the next frame to be transmitted, choosing TFDs
@@ -169,7 +169,7 @@
*/
#define SCD_MEM_LOWER_BOUND (0x0000)
-/**
+/*
* Max Tx window size is the max number of contiguous TFDs that the scheduler
* can keep track of at one time when creating block-ack chains of frames.
* Note that "64" matches the number of ack bits in a block-ack packet.
@@ -371,7 +371,10 @@ enum {
#define CNVI_AUX_MISC_CHIP 0xA200B0
#define CNVI_AUX_MISC_CHIP_MAC_STEP(_val) (((_val) & 0xf000000) >> 24)
#define CNVI_AUX_MISC_CHIP_PROD_TYPE(_val) ((_val) & 0xfff)
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_GL 0x910
#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U 0x930
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_I 0x900
+#define CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_W 0x901
#define CNVR_AUX_MISC_CHIP 0xA2B800
#define CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM 0xA29890
@@ -453,11 +456,7 @@ enum {
#define REG_CRF_ID_TYPE_HR_NONE_CDB_1X1 0x501
#define REG_CRF_ID_TYPE_HR_NONE_CDB_CCP 0x532
#define REG_CRF_ID_TYPE_GF 0x410
-#define REG_CRF_ID_TYPE_GF_TC 0xF08
-#define REG_CRF_ID_TYPE_MR 0x810
#define REG_CRF_ID_TYPE_FM 0x910
-#define REG_CRF_ID_TYPE_FMI 0x930
-#define REG_CRF_ID_TYPE_FMR 0x900
#define REG_CRF_ID_TYPE_WHP 0xA10
#define HPM_DEBUG 0xA03440
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
index f95098c21c7d..3c9d91496c82 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2021, 2023 Intel Corporation
+ * Copyright (C) 2019-2021, 2023-2024 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/bsearch.h>
@@ -11,13 +11,13 @@
#include "iwl-trans.h"
#include "iwl-drv.h"
#include "iwl-fh.h"
-#include "queue/tx.h"
#include <linux/dmapool.h>
#include "fw/api/commands.h"
+#include "pcie/internal.h"
+#include "iwl-context-info-gen3.h"
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops,
const struct iwl_cfg_trans_params *cfg_trans)
{
struct iwl_trans *trans;
@@ -37,22 +37,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
#endif
trans->dev = dev;
- trans->ops = ops;
trans->num_rx_queues = 1;
- WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty);
-
- if (trans->trans_cfg->gen2) {
- trans->txqs.tfd.addr_size = 64;
- trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
- trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
- } else {
- trans->txqs.tfd.addr_size = 36;
- trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
- trans->txqs.tfd.size = sizeof(struct iwl_tfd);
- }
- trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans);
-
return trans;
}
@@ -78,31 +64,6 @@ int iwl_trans_init(struct iwl_trans *trans)
if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align))
return -EINVAL;
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- trans->txqs.bc_tbl_size =
- sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
- else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans->txqs.bc_tbl_size =
- sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
- else
- trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
- /*
- * For gen2 devices, we use a single allocation for each byte-count
- * table, but they're pretty small (1k) so use a DMA pool that we
- * allocate here.
- */
- if (trans->trans_cfg->gen2) {
- trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev,
- trans->txqs.bc_tbl_size,
- 256, 0);
- if (!trans->txqs.bc_pool)
- return -ENOMEM;
- }
-
- /* Some things must not change even if the config does */
- WARN_ON(trans->txqs.tfd.addr_size !=
- (trans->trans_cfg->gen2 ? 64 : 36));
-
snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
"iwl_cmd_pool:%s", dev_name(trans->dev));
trans->dev_cmd_pool =
@@ -112,12 +73,6 @@ int iwl_trans_init(struct iwl_trans *trans)
if (!trans->dev_cmd_pool)
return -ENOMEM;
- trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
- if (!trans->txqs.tso_hdr_page) {
- kmem_cache_destroy(trans->dev_cmd_pool);
- return -ENOMEM;
- }
-
/* Initialize the wait queue for commands */
init_waitqueue_head(&trans->wait_command_queue);
@@ -126,20 +81,6 @@ int iwl_trans_init(struct iwl_trans *trans)
void iwl_trans_free(struct iwl_trans *trans)
{
- int i;
-
- if (trans->txqs.tso_hdr_page) {
- for_each_possible_cpu(i) {
- struct iwl_tso_hdr_page *p =
- per_cpu_ptr(trans->txqs.tso_hdr_page, i);
-
- if (p && p->page)
- __free_page(p->page);
- }
-
- free_percpu(trans->txqs.tso_hdr_page);
- }
-
kmem_cache_destroy(trans->dev_cmd_pool);
}
@@ -167,10 +108,9 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
return -EIO;
- if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
return -EIO;
- }
if (!(cmd->flags & CMD_ASYNC))
lock_map_acquire_read(&trans->sync_cmd_lockdep_map);
@@ -180,7 +120,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
cmd->id = DEF_ID(cmd->id);
}
- ret = iwl_trans_txq_send_hcmd(trans, cmd);
+ ret = iwl_trans_pcie_send_hcmd(trans, cmd);
if (!(cmd->flags & CMD_ASYNC))
lock_map_release(&trans->sync_cmd_lockdep_map);
@@ -247,3 +187,379 @@ int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans)
return 0;
}
IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted);
+
+void iwl_trans_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
+{
+ trans->op_mode = trans_cfg->op_mode;
+
+ iwl_trans_pcie_configure(trans, trans_cfg);
+ WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
+}
+IWL_EXPORT_SYMBOL(iwl_trans_configure);
+
+int iwl_trans_start_hw(struct iwl_trans *trans)
+{
+ might_sleep();
+
+ return iwl_trans_pcie_start_hw(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_start_hw);
+
+void iwl_trans_op_mode_leave(struct iwl_trans *trans)
+{
+ might_sleep();
+
+ iwl_trans_pcie_op_mode_leave(trans);
+
+ trans->op_mode = NULL;
+
+ trans->state = IWL_TRANS_NO_FW;
+}
+IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave);
+
+void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+{
+ iwl_trans_pcie_write8(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write8);
+
+void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ iwl_trans_pcie_write32(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write32);
+
+u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
+{
+ return iwl_trans_pcie_read32(trans, ofs);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_read32);
+
+u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
+{
+ return iwl_trans_pcie_read_prph(trans, ofs);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_read_prph);
+
+void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
+{
+ return iwl_trans_pcie_write_prph(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write_prph);
+
+int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
+{
+ return iwl_trans_pcie_read_mem(trans, addr, buf, dwords);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_read_mem);
+
+int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords)
+{
+ return iwl_trans_pcie_write_mem(trans, addr, buf, dwords);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write_mem);
+
+void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
+{
+ if (state)
+ set_bit(STATUS_TPOWER_PMI, &trans->status);
+ else
+ clear_bit(STATUS_TPOWER_PMI, &trans->status);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_pmi);
+
+int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership)
+{
+ return iwl_trans_pcie_sw_reset(trans, retake_ownership);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_sw_reset);
+
+struct iwl_trans_dump_data *
+iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx)
+{
+ return iwl_trans_pcie_dump_data(trans, dump_mask,
+ sanitize_ops, sanitize_ctx);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_dump_data);
+
+int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
+{
+ might_sleep();
+
+ return iwl_trans_pcie_d3_suspend(trans, test, reset);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend);
+
+int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
+ bool test, bool reset)
+{
+ might_sleep();
+
+ return iwl_trans_pcie_d3_resume(trans, status, test, reset);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_d3_resume);
+
+void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
+{
+ iwl_trans_pci_interrupts(trans, enable);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_interrupts);
+
+void iwl_trans_sync_nmi(struct iwl_trans *trans)
+{
+ iwl_trans_pcie_sync_nmi(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi);
+
+int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
+ u64 src_addr, u32 byte_cnt)
+{
+ return iwl_trans_pcie_copy_imr(trans, dst_addr, src_addr, byte_cnt);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem);
+
+void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value)
+{
+ iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask);
+
+int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val)
+{
+ return iwl_trans_pcie_read_config32(trans, ofs, val);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_read_config32);
+
+bool _iwl_trans_grab_nic_access(struct iwl_trans *trans)
+{
+ return iwl_trans_pcie_grab_nic_access(trans);
+}
+IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access);
+
+void __releases(nic_access)
+iwl_trans_release_nic_access(struct iwl_trans *trans)
+{
+ iwl_trans_pcie_release_nic_access(trans);
+ __release(nic_access);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access);
+
+void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+{
+ might_sleep();
+
+ trans->state = IWL_TRANS_FW_ALIVE;
+
+ if (trans->trans_cfg->gen2)
+ iwl_trans_pcie_gen2_fw_alive(trans);
+ else
+ iwl_trans_pcie_fw_alive(trans, scd_addr);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_fw_alive);
+
+int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
+ bool run_in_rfkill)
+{
+ int ret;
+
+ might_sleep();
+
+ WARN_ON_ONCE(!trans->rx_mpdu_cmd);
+
+ clear_bit(STATUS_FW_ERROR, &trans->status);
+
+ if (trans->trans_cfg->gen2)
+ ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill);
+ else
+ ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill);
+
+ if (ret == 0)
+ trans->state = IWL_TRANS_FW_STARTED;
+
+ return ret;
+}
+IWL_EXPORT_SYMBOL(iwl_trans_start_fw);
+
+void iwl_trans_stop_device(struct iwl_trans *trans)
+{
+ might_sleep();
+
+ if (trans->trans_cfg->gen2)
+ iwl_trans_pcie_gen2_stop_device(trans);
+ else
+ iwl_trans_pcie_stop_device(trans);
+
+ trans->state = IWL_TRANS_NO_FW;
+}
+IWL_EXPORT_SYMBOL(iwl_trans_stop_device);
+
+int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int queue)
+{
+ if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
+ return -EIO;
+
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return -EIO;
+
+ if (trans->trans_cfg->gen2)
+ return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue);
+
+ return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_tx);
+
+void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
+ struct sk_buff_head *skbs, bool is_flush)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return;
+
+ iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_reclaim);
+
+void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
+ bool configure_scd)
+{
+ iwl_trans_pcie_txq_disable(trans, queue, configure_scd);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_disable);
+
+bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int queue_wdg_timeout)
+{
+ might_sleep();
+
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return false;
+
+ return iwl_trans_pcie_txq_enable(trans, queue, ssn,
+ cfg, queue_wdg_timeout);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg);
+
+int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return -EIO;
+
+ return iwl_trans_pcie_wait_txq_empty(trans, queue);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty);
+
+int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return -EIO;
+
+ return iwl_trans_pcie_wait_txqs_empty(trans, txqs);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty);
+
+void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return;
+
+ iwl_pcie_freeze_txq_timer(trans, txqs, freeze);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer);
+
+void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
+ int txq_id, bool shared_mode)
+{
+ iwl_trans_pcie_txq_set_shared_mode(trans, txq_id, shared_mode);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_trans_debugfs_cleanup(struct iwl_trans *trans)
+{
+ iwl_trans_pcie_debugfs_cleanup(trans);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup);
+#endif
+
+void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr)
+{
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return;
+
+ iwl_pcie_set_q_ptrs(trans, queue, ptr);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs);
+
+int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int wdg_timeout)
+{
+ might_sleep();
+
+ if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
+ "bad state = %d\n", trans->state))
+ return -EIO;
+
+ return iwl_txq_dyn_alloc(trans, flags, sta_mask, tid,
+ size, wdg_timeout);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc);
+
+void iwl_trans_txq_free(struct iwl_trans *trans, int queue)
+{
+ iwl_txq_dyn_free(trans, queue);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_txq_free);
+
+int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
+{
+ return iwl_trans_pcie_rxq_dma_data(trans, queue, data);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data);
+
+int iwl_trans_load_pnvm(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_data,
+ const struct iwl_ucode_capabilities *capa)
+{
+ return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm);
+
+void iwl_trans_set_pnvm(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
+{
+ iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm);
+
+int iwl_trans_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa)
+{
+ return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads,
+ capa);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power);
+
+void iwl_trans_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
+{
+ iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index b93cef7b2330..6148acbac6af 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -26,11 +26,9 @@
* DOC: Transport layer - what is it ?
*
* The transport layer is the layer that deals with the HW directly. It provides
- * an abstraction of the underlying HW to the upper layer. The transport layer
- * doesn't provide any policy, algorithm or anything of this kind, but only
- * mechanisms to make the HW do something. It is not completely stateless but
- * close to it.
- * We will have an implementation for each different supported bus.
+ * the PCIe access to the underlying hardwarwe. The transport layer doesn't
+ * provide any policy, algorithm or anything of this kind, but only mechanisms
+ * to make the HW do something. It is not completely stateless but close to it.
*/
/**
@@ -122,6 +120,7 @@ enum CMD_MODE {
CMD_BLOCK_TXQS = BIT(3),
CMD_SEND_IN_D3 = BIT(4),
};
+#define CMD_MODE_BITS 5
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -131,6 +130,11 @@ enum CMD_MODE {
* For allocation of the command and tx queues, this establishes the overall
* size of the largest command we send to uCode, except for commands that
* aren't fully copied and use other TFD space.
+ *
+ * @hdr: command header
+ * @payload: payload for the command
+ * @hdr_wide: wide command header
+ * @payload_wide: payload for the wide command
*/
struct iwl_device_cmd {
union {
@@ -167,12 +171,6 @@ struct iwl_device_tx_cmd {
*/
#define IWL_MAX_CMD_TBS_PER_TFD 2
-/* We need 2 entries for the TX command and header, and another one might
- * be needed for potential data in the SKB's head. The remaining ones can
- * be used for frags.
- */
-#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
-
/**
* enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
*
@@ -281,7 +279,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
#define IWL_9000_MAX_RX_HW_QUEUES 1
/**
- * enum iwl_wowlan_status - WoWLAN image/device status
+ * enum iwl_d3_status - WoWLAN image/device status
* @IWL_D3_STATUS_ALIVE: firmware is still running after resume
* @IWL_D3_STATUS_RESET: device was reset while suspended
*/
@@ -299,9 +297,6 @@ enum iwl_d3_status {
* @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
* @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
* @STATUS_FW_ERROR: the fw is in error state
- * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
- * are sent
- * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
* @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
* e.g. for testing
@@ -314,8 +309,6 @@ enum iwl_trans_status {
STATUS_RFKILL_HW,
STATUS_RFKILL_OPMODE,
STATUS_FW_ERROR,
- STATUS_TRANS_GOING_IDLE,
- STATUS_TRANS_IDLE,
STATUS_TRANS_DEAD,
STATUS_SUPPRESS_CMD_ERROR_ONCE,
};
@@ -482,183 +475,6 @@ struct iwl_pnvm_image {
};
/**
- * struct iwl_trans_ops - transport specific operations
- *
- * All the handlers MUST be implemented
- *
- * @start_hw: starts the HW. From that point on, the HW can send interrupts.
- * May sleep.
- * @op_mode_leave: Turn off the HW RF kill indication if on
- * May sleep
- * @start_fw: allocates and inits all the resources for the transport
- * layer. Also kick a fw image.
- * May sleep
- * @fw_alive: called when the fw sends alive notification. If the fw provides
- * the SCD base address in SRAM, then provide it here, or 0 otherwise.
- * May sleep
- * @stop_device: stops the whole device (embedded CPU put to reset) and stops
- * the HW. From that point on, the HW will be stopped but will still issue
- * an interrupt if the HW RF kill switch is triggered.
- * This callback must do the right thing and not crash even if %start_hw()
- * was called but not &start_fw(). May sleep.
- * @d3_suspend: put the device into the correct mode for WoWLAN during
- * suspend. This is optional, if not implemented WoWLAN will not be
- * supported. This callback may sleep.
- * @d3_resume: resume the device after WoWLAN, enabling the opmode to
- * talk to the WoWLAN image to get its status. This is optional, if not
- * implemented WoWLAN will not be supported. This callback may sleep.
- * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
- * If RFkill is asserted in the middle of a SYNC host command, it must
- * return -ERFKILL straight away.
- * May sleep only if CMD_ASYNC is not set
- * @tx: send an skb. The transport relies on the op_mode to zero the
- * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
- * the CSUM will be taken care of (TCP CSUM and IP header in case of
- * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
- * header if it is IPv4.
- * Must be atomic
- * @reclaim: free packet until ssn. Returns a list of freed packets.
- * Must be atomic
- * @set_q_ptrs: set queue pointers internally, after D3 when HW state changed
- * @txq_enable: setup a queue. To setup an AC queue, use the
- * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
- * this one. The op_mode must not configure the HCMD queue. The scheduler
- * configuration may be %NULL, in which case the hardware will not be
- * configured. If true is returned, the operation mode needs to increment
- * the sequence number of the packets routed to this queue because of a
- * hardware scheduler bug. May sleep.
- * @txq_disable: de-configure a Tx queue to send AMPDUs
- * Must be atomic
- * @txq_alloc: Allocate a new TX queue, may sleep.
- * @txq_free: Free a previously allocated TX queue.
- * @txq_set_shared_mode: change Tx queue shared/unshared marking
- * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
- * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
- * @freeze_txq_timer: prevents the timer of the queue from firing until the
- * queue is set to awake. Must be atomic.
- * @write8: write a u8 to a register at offset ofs from the BAR
- * @write32: write a u32 to a register at offset ofs from the BAR
- * @read32: read a u32 register at offset ofs from the BAR
- * @read_prph: read a DWORD from a periphery register
- * @write_prph: write a DWORD to a periphery register
- * @read_mem: read device's SRAM in DWORD
- * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
- * will be zeroed.
- * @read_config32: read a u32 value from the device's config space at
- * the given offset.
- * @configure: configure parameters required by the transport layer from
- * the op_mode. May be called several times before start_fw, can't be
- * called after that.
- * @set_pmi: set the power pmi state
- * @sw_reset: trigger software reset of the NIC
- * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
- * Sleeping is not allowed between grab_nic_access and
- * release_nic_access.
- * @release_nic_access: let the NIC go to sleep. The "flags" parameter
- * must be the same one that was sent before to the grab_nic_access.
- * @set_bits_mask: set SRAM register according to value and mask.
- * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
- * TX'ed commands and similar. The buffer will be vfree'd by the caller.
- * Note that the transport must fill in the proper file headers.
- * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
- * of the trans debugfs
- * @sync_nmi: trigger a firmware NMI and wait for it to complete
- * @load_pnvm: save the pnvm data in DRAM
- * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
- * context info.
- * @load_reduce_power: copy reduce power table to the corresponding DRAM memory
- * @set_reduce_power: set reduce power table addresses in the sratch buffer
- * @interrupts: disable/enable interrupts to transport
- * @imr_dma_data: set up IMR DMA
- * @rxq_dma_data: retrieve RX queue DMA data, see @struct iwl_trans_rxq_dma_data
- */
-struct iwl_trans_ops {
-
- int (*start_hw)(struct iwl_trans *iwl_trans);
- void (*op_mode_leave)(struct iwl_trans *iwl_trans);
- int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
- bool run_in_rfkill);
- void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
- void (*stop_device)(struct iwl_trans *trans);
-
- int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
- int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
- bool test, bool reset);
-
- int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-
- int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int queue);
- void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
- struct sk_buff_head *skbs, bool is_flush);
-
- void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
-
- bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int queue_wdg_timeout);
- void (*txq_disable)(struct iwl_trans *trans, int queue,
- bool configure_scd);
- /* 22000 functions */
- int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
- u32 sta_mask, u8 tid,
- int size, unsigned int queue_wdg_timeout);
- void (*txq_free)(struct iwl_trans *trans, int queue);
- int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
- struct iwl_trans_rxq_dma_data *data);
-
- void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
- bool shared);
-
- int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
- int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
- void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
- bool freeze);
-
- void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
- void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
- u32 (*read32)(struct iwl_trans *trans, u32 ofs);
- u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
- void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
- int (*read_mem)(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords);
- int (*write_mem)(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords);
- int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
- void (*configure)(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg);
- void (*set_pmi)(struct iwl_trans *trans, bool state);
- int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
- bool (*grab_nic_access)(struct iwl_trans *trans);
- void (*release_nic_access)(struct iwl_trans *trans);
- void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
- u32 value);
-
- struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
- u32 dump_mask,
- const struct iwl_dump_sanitize_ops *sanitize_ops,
- void *sanitize_ctx);
- void (*debugfs_cleanup)(struct iwl_trans *trans);
- void (*sync_nmi)(struct iwl_trans *trans);
- int (*load_pnvm)(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_payloads,
- const struct iwl_ucode_capabilities *capa);
- void (*set_pnvm)(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa);
- int (*load_reduce_power)(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads,
- const struct iwl_ucode_capabilities *capa);
- void (*set_reduce_power)(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa);
-
- void (*interrupts)(struct iwl_trans *trans, bool enable);
- int (*imr_dma_data)(struct iwl_trans *trans,
- u32 dst_addr, u64 src_addr,
- u32 byte_cnt);
-
-};
-
-/**
* enum iwl_trans_state - state of the transport layer
*
* @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
@@ -897,7 +713,9 @@ struct iwl_dma_ptr {
struct iwl_cmd_meta {
/* only for SYNC commands, iff the reply skb is wanted */
struct iwl_host_cmd *source;
- u32 flags;
+ u32 flags: CMD_MODE_BITS;
+ /* sg_offset is valid if it is non-zero */
+ u32 sg_offset: PAGE_SHIFT;
u32 tbs;
};
@@ -934,6 +752,7 @@ struct iwl_pcie_first_tb_buf {
* @first_tb_dma: DMA address for the first_tb_bufs start
* @entries: transmit entries (driver state)
* @lock: queue lock
+ * @reclaim_lock: reclaim lock
* @stuck_timer: timer that fires if queue gets stuck
* @trans: pointer back to transport (for timer)
* @need_update: indicates need to update read/write index
@@ -976,6 +795,8 @@ struct iwl_txq {
struct iwl_pcie_txq_entry *entries;
/* lock for syncing changes on the queue */
spinlock_t lock;
+ /* lock to prevent concurrent reclaim */
+ spinlock_t reclaim_lock;
unsigned long frozen_expiry_remainder;
struct timer_list stuck_timer;
struct iwl_trans *trans;
@@ -999,58 +820,9 @@ struct iwl_txq {
};
/**
- * struct iwl_trans_txqs - transport tx queues data
- *
- * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
- * @page_offs: offset from skb->cb to mac header page pointer
- * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
- * @queue_used: bit mask of used queues
- * @queue_stopped: bit mask of stopped queues
- * @txq: array of TXQ data structures representing the TXQs
- * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
- * @queue_alloc_cmd_ver: queue allocation command version
- * @bc_pool: bytecount DMA allocations pool
- * @bc_tbl_size: bytecount table size
- * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
- * (and similar usage)
- * @tfd: TFD data
- * @tfd.max_tbs: max number of buffers per TFD
- * @tfd.size: TFD size
- * @tfd.addr_size: TFD/TB address size
- */
-struct iwl_trans_txqs {
- unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
- struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
- struct dma_pool *bc_pool;
- size_t bc_tbl_size;
- bool bc_table_dword;
- u8 page_offs;
- u8 dev_cmd_offs;
- struct iwl_tso_hdr_page __percpu *tso_hdr_page;
-
- struct {
- u8 fifo;
- u8 q_id;
- unsigned int wdg_timeout;
- } cmd;
-
- struct {
- u8 max_tbs;
- u16 size;
- u8 addr_size;
- } tfd;
-
- struct iwl_dma_ptr scd_bc_tbls;
-
- u8 queue_alloc_cmd_ver;
-};
-
-/**
* struct iwl_trans - transport common data
*
* @csme_own: true if we couldn't get ownership on the device
- * @ops: pointer to iwl_trans_ops
* @op_mode: pointer to the op_mode
* @trans_cfg: the trans-specific configuration part
* @cfg: pointer to the configuration
@@ -1099,7 +871,6 @@ struct iwl_trans_txqs {
* This mode is set dynamically, depending on the WoWLAN values
* configured from the userspace at runtime.
* @name: the device name
- * @txqs: transport tx queues data.
* @mbx_addr_0_step: step address data 0
* @mbx_addr_1_step: step address data 1
* @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
@@ -1112,7 +883,6 @@ struct iwl_trans_txqs {
*/
struct iwl_trans {
bool csme_own;
- const struct iwl_trans_ops *ops;
struct iwl_op_mode *op_mode;
const struct iwl_cfg_trans_params *trans_cfg;
const struct iwl_cfg *cfg;
@@ -1169,7 +939,6 @@ struct iwl_trans {
enum iwl_plat_pm_mode system_pm_mode;
const char *name;
- struct iwl_trans_txqs txqs;
u32 mbx_addr_0_step;
u32 mbx_addr_1_step;
@@ -1185,101 +954,29 @@ struct iwl_trans {
const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
-static inline void iwl_trans_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg)
-{
- trans->op_mode = trans_cfg->op_mode;
+void iwl_trans_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg);
- trans->ops->configure(trans, trans_cfg);
- WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
-}
+int iwl_trans_start_hw(struct iwl_trans *trans);
-static inline int iwl_trans_start_hw(struct iwl_trans *trans)
-{
- might_sleep();
+void iwl_trans_op_mode_leave(struct iwl_trans *trans);
- return trans->ops->start_hw(trans);
-}
+void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr);
-static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
-{
- might_sleep();
+int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw,
+ bool run_in_rfkill);
- if (trans->ops->op_mode_leave)
- trans->ops->op_mode_leave(trans);
+void iwl_trans_stop_device(struct iwl_trans *trans);
- trans->op_mode = NULL;
-
- trans->state = IWL_TRANS_NO_FW;
-}
-
-static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
-{
- might_sleep();
-
- trans->state = IWL_TRANS_FW_ALIVE;
-
- trans->ops->fw_alive(trans, scd_addr);
-}
-
-static inline int iwl_trans_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw,
- bool run_in_rfkill)
-{
- int ret;
+int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
- might_sleep();
+int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status,
+ bool test, bool reset);
- WARN_ON_ONCE(!trans->rx_mpdu_cmd);
-
- clear_bit(STATUS_FW_ERROR, &trans->status);
- ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
- if (ret == 0)
- trans->state = IWL_TRANS_FW_STARTED;
-
- return ret;
-}
-
-static inline void iwl_trans_stop_device(struct iwl_trans *trans)
-{
- might_sleep();
-
- trans->ops->stop_device(trans);
-
- trans->state = IWL_TRANS_NO_FW;
-}
-
-static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
- bool reset)
-{
- might_sleep();
- if (!trans->ops->d3_suspend)
- return -EOPNOTSUPP;
-
- return trans->ops->d3_suspend(trans, test, reset);
-}
-
-static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset)
-{
- might_sleep();
- if (!trans->ops->d3_resume)
- return -EOPNOTSUPP;
-
- return trans->ops->d3_resume(trans, status, test, reset);
-}
-
-static inline struct iwl_trans_dump_data *
+struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
- void *sanitize_ctx)
-{
- if (!trans->ops->dump_data)
- return NULL;
- return trans->ops->dump_data(trans, dump_mask,
- sanitize_ops, sanitize_ctx);
-}
+ void *sanitize_ctx);
static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
@@ -1295,109 +992,31 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
}
-static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int queue)
-{
- if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
- return -EIO;
-
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return -EIO;
- }
-
- return trans->ops->tx(trans, skb, dev_cmd, queue);
-}
-
-static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
- int ssn, struct sk_buff_head *skbs,
- bool is_flush)
-{
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
- }
-
- trans->ops->reclaim(trans, queue, ssn, skbs, is_flush);
-}
-
-static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
- int ptr)
-{
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
- }
-
- trans->ops->set_q_ptrs(trans, queue, ptr);
-}
-
-static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
- bool configure_scd)
-{
- trans->ops->txq_disable(trans, queue, configure_scd);
-}
+int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int queue);
-static inline bool
-iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
- const struct iwl_trans_txq_scd_cfg *cfg,
- unsigned int queue_wdg_timeout)
-{
- might_sleep();
+void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn,
+ struct sk_buff_head *skbs, bool is_flush);
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return false;
- }
+void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr);
- return trans->ops->txq_enable(trans, queue, ssn,
- cfg, queue_wdg_timeout);
-}
+void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
+ bool configure_scd);
-static inline int
-iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
- struct iwl_trans_rxq_dma_data *data)
-{
- if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
- return -EOPNOTSUPP;
+bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
+ const struct iwl_trans_txq_scd_cfg *cfg,
+ unsigned int queue_wdg_timeout);
- return trans->ops->rxq_dma_data(trans, queue, data);
-}
+int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data);
-static inline void
-iwl_trans_txq_free(struct iwl_trans *trans, int queue)
-{
- if (WARN_ON_ONCE(!trans->ops->txq_free))
- return;
+void iwl_trans_txq_free(struct iwl_trans *trans, int queue);
- trans->ops->txq_free(trans, queue);
-}
-
-static inline int
-iwl_trans_txq_alloc(struct iwl_trans *trans,
- u32 flags, u32 sta_mask, u8 tid,
- int size, unsigned int wdg_timeout)
-{
- might_sleep();
-
- if (WARN_ON_ONCE(!trans->ops->txq_alloc))
- return -EOPNOTSUPP;
-
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return -EIO;
- }
-
- return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
- size, wdg_timeout);
-}
+int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int wdg_timeout);
-static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
- int queue, bool shared_mode)
-{
- if (trans->ops->txq_set_shared_mode)
- trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
-}
+void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
+ int txq_id, bool shared_mode);
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid,
@@ -1430,78 +1049,32 @@ void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
}
-static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
- unsigned long txqs,
- bool freeze)
-{
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return;
- }
+void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze);
- if (trans->ops->freeze_txq_timer)
- trans->ops->freeze_txq_timer(trans, txqs, freeze);
-}
+int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs);
-static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
- u32 txqs)
-{
- if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
- return -EOPNOTSUPP;
+int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue);
- /* No need to wait if the firmware is not alive */
- if (trans->state != IWL_TRANS_FW_ALIVE) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return -EIO;
- }
+void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val);
- return trans->ops->wait_tx_queues_empty(trans, txqs);
-}
+void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val);
-static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
-{
- if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
- return -EOPNOTSUPP;
+u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs);
- if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
- IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
- return -EIO;
- }
+u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs);
- return trans->ops->wait_txq_empty(trans, queue);
-}
+void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
-static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
-{
- trans->ops->write8(trans, ofs, val);
-}
+int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
-static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
-{
- trans->ops->write32(trans, ofs, val);
-}
-
-static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
-{
- return trans->ops->read32(trans, ofs);
-}
+int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val);
-static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
-{
- return trans->ops->read_prph(trans, ofs);
-}
-
-static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
- u32 val)
-{
- return trans->ops->write_prph(trans, ofs, val);
-}
-
-static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
-{
- return trans->ops->read_mem(trans, addr, buf, dwords);
-}
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+void iwl_trans_debugfs_cleanup(struct iwl_trans *trans);
+#endif
#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
do { \
@@ -1510,14 +1083,8 @@ static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
} while (0)
-static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
- u32 dst_addr, u64 src_addr,
- u32 byte_cnt)
-{
- if (trans->ops->imr_dma_data)
- return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
- return 0;
-}
+int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr,
+ u64 src_addr, u32 byte_cnt);
static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
{
@@ -1529,11 +1096,8 @@ static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
return value;
}
-static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords)
-{
- return trans->ops->write_mem(trans, addr, buf, dwords);
-}
+int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords);
static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
u32 val)
@@ -1541,36 +1105,21 @@ static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
return iwl_trans_write_mem(trans, addr, &val, 1);
}
-static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
-{
- if (trans->ops->set_pmi)
- trans->ops->set_pmi(trans, state);
-}
+void iwl_trans_set_pmi(struct iwl_trans *trans, bool state);
-static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
- bool retake_ownership)
-{
- if (trans->ops->sw_reset)
- return trans->ops->sw_reset(trans, retake_ownership);
- return 0;
-}
+int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership);
-static inline void
-iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
-{
- trans->ops->set_bits_mask(trans, reg, mask, value);
-}
+void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value);
+
+bool _iwl_trans_grab_nic_access(struct iwl_trans *trans);
#define iwl_trans_grab_nic_access(trans) \
__cond_lock(nic_access, \
- likely((trans)->ops->grab_nic_access(trans)))
+ likely(_iwl_trans_grab_nic_access(trans)))
-static inline void __releases(nic_access)
-iwl_trans_release_nic_access(struct iwl_trans *trans)
-{
- trans->ops->release_nic_access(trans);
- __release(nic_access);
-}
+void __releases(nic_access)
+iwl_trans_release_nic_access(struct iwl_trans *trans);
static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
{
@@ -1589,44 +1138,24 @@ static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
return trans->state == IWL_TRANS_FW_ALIVE;
}
-static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
-{
- if (trans->ops->sync_nmi)
- trans->ops->sync_nmi(trans);
-}
+void iwl_trans_sync_nmi(struct iwl_trans *trans);
void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
u32 sw_err_bit);
-static inline int iwl_trans_load_pnvm(struct iwl_trans *trans,
- const struct iwl_pnvm_image *pnvm_data,
- const struct iwl_ucode_capabilities *capa)
-{
- return trans->ops->load_pnvm(trans, pnvm_data, capa);
-}
+int iwl_trans_load_pnvm(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *pnvm_data,
+ const struct iwl_ucode_capabilities *capa);
-static inline void iwl_trans_set_pnvm(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
-{
- if (trans->ops->set_pnvm)
- trans->ops->set_pnvm(trans, capa);
-}
+void iwl_trans_set_pnvm(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
-static inline int iwl_trans_load_reduce_power
- (struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads,
- const struct iwl_ucode_capabilities *capa)
-{
- return trans->ops->load_reduce_power(trans, payloads, capa);
-}
+int iwl_trans_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa);
-static inline void
-iwl_trans_set_reduce_power(struct iwl_trans *trans,
- const struct iwl_ucode_capabilities *capa)
-{
- if (trans->ops->set_reduce_power)
- trans->ops->set_reduce_power(trans, capa);
-}
+void iwl_trans_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
{
@@ -1634,18 +1163,13 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
}
-static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
-{
- if (trans->ops->interrupts)
- trans->ops->interrupts(trans, enable);
-}
+void iwl_trans_interrupts(struct iwl_trans *trans, bool enable);
/*****************************************************
* transport helper functions
*****************************************************/
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
- const struct iwl_trans_ops *ops,
const struct iwl_cfg_trans_params *cfg_trans);
int iwl_trans_init(struct iwl_trans *trans);
void iwl_trans_free(struct iwl_trans *trans);
@@ -1656,10 +1180,13 @@ static inline bool iwl_trans_is_hw_error_value(u32 val)
}
/*****************************************************
-* driver (transport) register/unregister functions
-******************************************************/
+ * PCIe handling
+ *****************************************************/
int __must_check iwl_pci_register_driver(void);
void iwl_pci_unregister_driver(void);
void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd);
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/Makefile b/drivers/net/wireless/intel/iwlwifi/mei/Makefile
index 8e3ef0347db7..e05f9421be4a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mei/Makefile
@@ -5,4 +5,4 @@ iwlmei-y += net.o
iwlmei-$(CONFIG_IWLWIFI_DEVICE_TRACING) += trace.o
CFLAGS_trace.o := -I$(src)
-ccflags-y += -I $(srctree)/$(src)/../
+ccflags-y += -I $(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
index 1f3c885aeb65..4900de3cc0d3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
+++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (C) 2021-2023 Intel Corporation
+ * Copyright (C) 2021-2024 Intel Corporation
*/
#ifndef __iwl_mei_h__
@@ -456,8 +456,11 @@ void iwl_mei_device_state(bool up);
/**
* iwl_mei_pldr_req() - must be called before loading the fw
*
- * Return: 0 if the PLDR flow was successful and the fw can be loaded, negative
- * value otherwise.
+ * Requests from the ME that it releases its potential bus access to
+ * the WiFi NIC so that the device can safely undergo product reset.
+ *
+ * Return: 0 if the request was successful and the device can be
+ * reset, a negative error value otherwise
*/
int iwl_mei_pldr_req(void);
@@ -488,7 +491,7 @@ static inline void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_add
static inline void iwl_mei_set_country_code(u16 mcc)
{}
-static inline void iwl_mei_set_power_limit(__le16 *power_limit)
+static inline void iwl_mei_set_power_limit(const __le16 *power_limit)
{}
static inline int iwl_mei_register(void *priv,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
index 593fe28d89cf..8873904f51ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_IWLMVM) += iwlmvm.o
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += tests/
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o rxmq.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o rs-fw.o
@@ -15,4 +16,4 @@ iwlmvm-$(CONFIG_IWLWIFI_LEDS) += led.o
iwlmvm-$(CONFIG_PM) += d3.o
iwlmvm-$(CONFIG_IWLMEI) += vendor-cmd.o
-ccflags-y += -I $(srctree)/$(src)/../
+subdir-ccflags-y += -I $(src)/../
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
index 535edb51d1c0..ad3e14a0d043 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/coex.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2013-2014, 2018-2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2020, 2022-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
*/
#include <linux/ieee80211.h>
@@ -219,15 +219,13 @@ struct iwl_bt_iterator_data {
static inline
void iwl_mvm_bt_coex_enable_rssi_event(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
+ struct iwl_mvm_vif_link_info *link_info,
bool enable, int rssi)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- mvmvif->bf_data.last_bt_coex_event = rssi;
- mvmvif->bf_data.bt_coex_max_thold =
+ link_info->bf_data.last_bt_coex_event = rssi;
+ link_info->bf_data.bt_coex_max_thold =
enable ? -IWL_MVM_BT_COEX_EN_RED_TXP_THRESH : 0;
- mvmvif->bf_data.bt_coex_min_thold =
+ link_info->bf_data.bt_coex_min_thold =
enable ? -IWL_MVM_BT_COEX_DIS_RED_TXP_THRESH : 0;
}
@@ -255,44 +253,6 @@ static void iwl_mvm_bt_coex_tcm_based_ci(struct iwl_mvm *mvm,
swap(data->primary, data->secondary);
}
-static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, bool enable)
-{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int link_id;
-
- lockdep_assert_held(&mvm->mutex);
-
- if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
- return;
-
- /* Done already */
- if (mvmvif->bt_coex_esr_disabled == !enable)
- return;
-
- mvmvif->bt_coex_esr_disabled = !enable;
-
- /* Nothing to do */
- if (mvmvif->esr_active == enable)
- return;
-
- if (enable) {
- /* Try to re-enable eSR*/
- iwl_mvm_mld_select_links(mvm, vif, false);
- return;
- }
-
- /*
- * Find the primary link, as we want to switch to it and drop the
- * secondary one.
- */
- link_id = iwl_mvm_mld_get_primary_link(mvm, vif, vif->active_links);
- WARN_ON(link_id < 0);
-
- ieee80211_set_active_links_async(vif,
- vif->active_links & BIT(link_id));
-}
-
/*
* This function receives the LB link id and checks if eSR should be
* enabled or disabled (due to BT coex)
@@ -300,35 +260,25 @@ static void iwl_mvm_bt_coex_enable_esr(struct iwl_mvm *mvm,
bool
iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- int link_id, int primary_link)
+ s32 link_rssi,
+ bool primary)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm_vif_link_info *link_info = mvmvif->link[link_id];
bool have_wifi_loss_rate =
iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
BT_PROFILE_NOTIFICATION, 0) > 4;
- s8 link_rssi = 0;
u8 wifi_loss_rate;
- lockdep_assert_held(&mvm->mutex);
-
if (mvm->last_bt_notif.wifi_loss_low_rssi == BT_OFF)
return true;
- /* If LB link is the primary one we should always disable eSR */
- if (link_id == primary_link)
+ if (primary)
return false;
/* The feature is not supported */
if (!have_wifi_loss_rate)
return true;
- /*
- * We might not have a link_info when checking whether we can
- * (re)enable eSR - the LB link might not exist yet
- */
- if (link_info)
- link_rssi = (s8)link_info->beacon_stats.avg_signal;
/*
* In case we don't know the RSSI - take the lower wifi loss,
@@ -338,7 +288,7 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
if (!link_rssi)
wifi_loss_rate = mvm->last_bt_notif.wifi_loss_mid_high_rssi;
- else if (!mvmvif->bt_coex_esr_disabled)
+ else if (mvmvif->esr_active)
/* RSSI needs to get really low to disable eSR... */
wifi_loss_rate =
link_rssi <= -IWL_MVM_BT_COEX_DISABLE_ESR_THRESH ?
@@ -354,23 +304,24 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
return wifi_loss_rate <= IWL_MVM_BT_COEX_WIFI_LOSS_THRESH;
}
-void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id)
+void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id)
{
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
- usable_links);
- bool enable;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link = mvmvif->link[link_id];
- /* Not assoc, not MLD vif or only one usable link */
- if (primary_link < 0)
+ if (!ieee80211_vif_is_mld(vif) ||
+ !iwl_mvm_vif_from_mac80211(vif)->authorized ||
+ WARN_ON(!link))
return;
- enable = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
- primary_link);
-
- iwl_mvm_bt_coex_enable_esr(mvm, vif, enable);
+ if (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif,
+ (s8)link->beacon_stats.avg_signal,
+ link_id == iwl_mvm_get_primary_link(vif)))
+ /* In case we decided to exit eSR - stay with the primary */
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_COEX,
+ iwl_mvm_get_primary_link(vif));
}
static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
@@ -412,13 +363,13 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
smps_mode, link_id);
iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id,
false);
- /* FIXME: should this be per link? */
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false,
+ 0);
}
return;
}
- iwl_mvm_bt_coex_update_vif_esr(mvm, vif, link_id);
+ iwl_mvm_bt_coex_update_link_esr(mvm, vif, link_id);
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_COEX_SCHEMA_2))
min_ag_for_static_smps = BT_VERY_HIGH_TRAFFIC;
@@ -508,13 +459,12 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
le32_to_cpu(mvm->last_bt_notif.bt_activity_grading) == BT_OFF ||
!vif->cfg.assoc) {
iwl_mvm_bt_coex_reduced_txp(mvm, link_info->ap_sta_id, false);
- /* FIXME: should this be per link? */
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, false, 0);
return;
}
/* try to get the avg rssi from fw */
- ave_rssi = mvmvif->bf_data.ave_beacon_signal;
+ ave_rssi = link_info->bf_data.ave_beacon_signal;
/* if the RSSI isn't valid, fake it is very low */
if (!ave_rssi)
@@ -530,7 +480,7 @@ static void iwl_mvm_bt_notif_per_link(struct iwl_mvm *mvm,
}
/* Begin to monitor the RSSI: it may influence the reduced Tx power */
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, true, ave_rssi);
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, link_info, true, ave_rssi);
}
/* must be called under rcu_read_lock */
@@ -555,10 +505,6 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
return;
}
- /* When BT is off this will be 0 */
- if (data->notif->wifi_loss_low_rssi == BT_OFF)
- iwl_mvm_bt_coex_enable_esr(mvm, vif, true);
-
for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
iwl_mvm_bt_notif_per_link(mvm, vif, data, link_id);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
index f5122c4678a1..c4c1e67b9ac7 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
- * Copyright (C) 2013-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2013-2014, 2018-2024 Intel Corporation
* Copyright (C) 2015 Intel Deutschland GmbH
*/
#ifndef __MVM_CONSTANTS_H
@@ -14,6 +14,8 @@
#define IWL_MVM_BT_COEX_DISABLE_ESR_THRESH 69
#define IWL_MVM_BT_COEX_ENABLE_ESR_THRESH 63
#define IWL_MVM_BT_COEX_WIFI_LOSS_THRESH 0
+#define IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC 30
+#define IWL_MVM_TPT_COUNT_WINDOW_SEC 5
#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
@@ -21,7 +23,7 @@
#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
#define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */
#define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */
-#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0
+#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 1
#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
#define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
@@ -54,7 +56,6 @@
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
#define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_HW_CSUM_DISABLE 0
-#define IWL_MVM_PARSE_NVM 0
#define IWL_MVM_ADWELL_ENABLE 1
#define IWL_MVM_ADWELL_MAX_BUDGET 0
#define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */
@@ -98,6 +99,7 @@
#define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE
#define IWL_MVM_FTM_INITIATOR_DYNACK true
#define IWL_MVM_FTM_LMR_FEEDBACK_TERMINATE false
+#define IWL_MVM_FTM_TEST_INCORRECT_SAC false
#define IWL_MVM_FTM_R2I_MAX_REP 7
#define IWL_MVM_FTM_I2R_MAX_REP 7
#define IWL_MVM_FTM_R2I_MAX_STS 1
@@ -112,7 +114,6 @@
#define IWL_MVM_D3_DEBUG false
#define IWL_MVM_USE_TWT true
#define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20
-#define IWL_MVM_USE_NSSN_SYNC 0
#define IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH false
#define IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA 40
/* 20016 pSec is 6 meter RTT, meaning 3 meter range */
@@ -122,6 +123,18 @@
#define IWL_MVM_DISABLE_AP_FILS false
#define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */
#define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */
+#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16
#define IWL_MVM_AUTO_EML_ENABLE true
+#define IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH 7
+#define IWL_MVM_HIGH_RSSI_THRESH_20MHZ -67
+#define IWL_MVM_LOW_RSSI_THRESH_20MHZ -71
+#define IWL_MVM_HIGH_RSSI_THRESH_40MHZ -64
+#define IWL_MVM_LOW_RSSI_THRESH_40MHZ -67
+#define IWL_MVM_HIGH_RSSI_THRESH_80MHZ -61
+#define IWL_MVM_LOW_RSSI_THRESH_80MHZ -74
+#define IWL_MVM_HIGH_RSSI_THRESH_160MHZ -58
+#define IWL_MVM_LOW_RSSI_THRESH_160MHZ -61
+
+#define IWL_MVM_ENTER_ESR_TPT_THRESH 400
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 52518a47554e..b4d650583ac2 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -595,6 +595,12 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
void *_data)
{
struct wowlan_key_gtk_type_iter *data = _data;
+ __le32 *cipher = NULL;
+
+ if (key->keyidx == 4 || key->keyidx == 5)
+ cipher = &data->kek_kck_cmd->igtk_cipher;
+ if (key->keyidx == 6 || key->keyidx == 7)
+ cipher = &data->kek_kck_cmd->bigtk_cipher;
switch (key->cipher) {
default:
@@ -606,10 +612,13 @@ static void iwl_mvm_wowlan_gtk_type_iter(struct ieee80211_hw *hw,
return;
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
- data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
+ if (cipher)
+ *cipher = cpu_to_le32(STA_KEY_FLG_GCMP);
return;
case WLAN_CIPHER_SUITE_AES_CMAC:
- data->kek_kck_cmd->igtk_cipher = cpu_to_le32(STA_KEY_FLG_CCM);
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ if (cipher)
+ *cipher = cpu_to_le32(STA_KEY_FLG_CCM);
return;
case WLAN_CIPHER_SUITE_CCMP:
if (!sta)
@@ -1152,7 +1161,8 @@ iwl_mvm_wowlan_config(struct iwl_mvm *mvm,
if (ret)
return ret;
- return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0);
+ return iwl_mvm_send_proto_offload(mvm, vif, false, true, 0,
+ mvm_link->ap_sta_id);
}
static int
@@ -1242,7 +1252,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
.data[0] = &d3_cfg_cmd_data,
.len[0] = sizeof(d3_cfg_cmd_data),
};
- int ret, primary_link;
+ int ret;
int len __maybe_unused;
bool unified_image = fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG);
@@ -1260,28 +1270,9 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (IS_ERR_OR_NULL(vif))
return 1;
- if (hweight16(vif->active_links) > 1) {
- /*
- * Select the 'best' link.
- * May need to revisit, it seems better to not optimize
- * for throughput but rather range, reliability and
- * power here - and select 2.4 GHz ...
- */
- primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
- vif->active_links);
-
- if (WARN_ONCE(primary_link < 0, "no primary link in 0x%x\n",
- vif->active_links))
- primary_link = __ffs(vif->active_links);
-
- ret = ieee80211_set_active_links(vif, BIT(primary_link));
- if (ret)
- return ret;
- } else if (vif->active_links) {
- primary_link = __ffs(vif->active_links);
- } else {
- primary_link = 0;
- }
+ ret = iwl_mvm_block_esr_sync(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
+ if (ret)
+ return ret;
mutex_lock(&mvm->mutex);
@@ -1291,7 +1282,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvmvif = iwl_mvm_vif_from_mac80211(vif);
- mvm_link = mvmvif->link[primary_link];
+ mvm_link = mvmvif->link[iwl_mvm_get_primary_link(vif)];
if (WARN_ON_ONCE(!mvm_link)) {
ret = -EINVAL;
goto out_noreset;
@@ -1472,6 +1463,9 @@ struct iwl_wowlan_status_data {
struct iwl_multicast_key_data igtk;
struct iwl_multicast_key_data bigtk[WOWLAN_BIGTK_KEYS_NUM];
+ int num_mlo_keys;
+ struct iwl_wowlan_mlo_gtk mlo_keys[WOWLAN_MAX_MLO_KEYS];
+
u8 *wake_packet;
};
@@ -1830,6 +1824,10 @@ static void iwl_mvm_d3_find_last_keys(struct ieee80211_hw *hw,
void *_data)
{
struct iwl_mvm_d3_gtk_iter_data *data = _data;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+
+ if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
+ return;
if (data->unhandled_cipher)
return;
@@ -1918,6 +1916,10 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
struct iwl_mvm_d3_gtk_iter_data *data = _data;
struct iwl_wowlan_status_data *status = data->status;
s8 keyidx;
+ int link_id = vif->active_links ? __ffs(vif->active_links) : -1;
+
+ if (link_id >= 0 && key->link_id >= 0 && link_id != key->link_id)
+ return;
if (data->unhandled_cipher)
return;
@@ -1973,6 +1975,169 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
}
}
+struct iwl_mvm_d3_mlo_old_keys {
+ u32 cipher[IEEE80211_MLD_MAX_NUM_LINKS][WOWLAN_MLO_GTK_KEY_NUM_TYPES];
+ struct ieee80211_key_conf *key[IEEE80211_MLD_MAX_NUM_LINKS][8];
+};
+
+static void iwl_mvm_mlo_key_ciphers(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct iwl_mvm_d3_mlo_old_keys *old_keys = data;
+ enum iwl_wowlan_mlo_gtk_type key_type;
+
+ if (key->link_id < 0)
+ return;
+
+ if (WARN_ON(key->link_id >= IEEE80211_MLD_MAX_NUM_LINKS ||
+ key->keyidx >= 8))
+ return;
+
+ if (WARN_ON(old_keys->key[key->link_id][key->keyidx]))
+ return;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ key_type = WOWLAN_MLO_GTK_KEY_TYPE_GTK;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (key->keyidx == 4 || key->keyidx == 5) {
+ key_type = WOWLAN_MLO_GTK_KEY_TYPE_IGTK;
+ break;
+ } else if (key->keyidx == 6 || key->keyidx == 7) {
+ key_type = WOWLAN_MLO_GTK_KEY_TYPE_BIGTK;
+ break;
+ }
+ return;
+ default:
+ /* ignore WEP/TKIP or unknown ciphers */
+ return;
+ }
+
+ old_keys->cipher[key->link_id][key_type] = key->cipher;
+ old_keys->key[key->link_id][key->keyidx] = key;
+}
+
+static bool iwl_mvm_mlo_gtk_rekey(struct iwl_wowlan_status_data *status,
+ struct ieee80211_vif *vif,
+ struct iwl_mvm *mvm)
+{
+ int i;
+ struct iwl_mvm_d3_mlo_old_keys *old_keys;
+ bool ret = true;
+
+ IWL_DEBUG_WOWLAN(mvm, "Num of MLO Keys: %d\n", status->num_mlo_keys);
+ if (!status->num_mlo_keys)
+ return true;
+
+ old_keys = kzalloc(sizeof(*old_keys), GFP_KERNEL);
+ if (!old_keys)
+ return false;
+
+ /* find the cipher for each mlo key */
+ ieee80211_iter_keys(mvm->hw, vif, iwl_mvm_mlo_key_ciphers, old_keys);
+
+ for (i = 0; i < status->num_mlo_keys; i++) {
+ struct iwl_wowlan_mlo_gtk *mlo_key = &status->mlo_keys[i];
+ struct ieee80211_key_conf *key, *old_key;
+ struct ieee80211_key_seq seq;
+ struct {
+ struct ieee80211_key_conf conf;
+ u8 key[32];
+ } conf = {};
+ u16 flags = le16_to_cpu(mlo_key->flags);
+ int j, link_id, key_id, key_type;
+
+ link_id = u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_LINK_ID_MSK);
+ key_id = u16_get_bits(flags, WOWLAN_MLO_GTK_FLAG_KEY_ID_MSK);
+ key_type = u16_get_bits(flags,
+ WOWLAN_MLO_GTK_FLAG_KEY_TYPE_MSK);
+
+ if (!(vif->valid_links & BIT(link_id)))
+ continue;
+
+ if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS ||
+ key_id >= 8 ||
+ key_type >= WOWLAN_MLO_GTK_KEY_NUM_TYPES))
+ continue;
+
+ conf.conf.cipher = old_keys->cipher[link_id][key_type];
+ /* WARN_ON? */
+ if (!conf.conf.cipher)
+ continue;
+
+ conf.conf.keylen = 0;
+ switch (conf.conf.cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ conf.conf.keylen = WLAN_KEY_LEN_CCMP;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_128;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_GMAC_256;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ conf.conf.keylen = WLAN_KEY_LEN_AES_CMAC;
+ break;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+ conf.conf.keylen = WLAN_KEY_LEN_BIP_CMAC_256;
+ break;
+ }
+
+ if (WARN_ON(!conf.conf.keylen ||
+ conf.conf.keylen > sizeof(conf.key)))
+ continue;
+
+ memcpy(conf.conf.key, mlo_key->key, conf.conf.keylen);
+ conf.conf.keyidx = key_id;
+
+ old_key = old_keys->key[link_id][key_id];
+ if (old_key) {
+ IWL_DEBUG_WOWLAN(mvm,
+ "Remove MLO key id %d, link id %d\n",
+ key_id, link_id);
+ ieee80211_remove_key(old_key);
+ }
+
+ IWL_DEBUG_WOWLAN(mvm, "Add MLO key id %d, link id %d\n",
+ key_id, link_id);
+ key = ieee80211_gtk_rekey_add(vif, &conf.conf, link_id);
+ if (WARN_ON(IS_ERR(key))) {
+ ret = false;
+ goto out;
+ }
+
+ /*
+ * mac80211 expects the pn in big-endian
+ * also note that seq is a union of all cipher types
+ * (ccmp, gcmp, cmac, gmac), and they all have the same
+ * pn field (of length 6) so just copy it to ccmp.pn.
+ */
+ for (j = 5; j >= 0; j--)
+ seq.ccmp.pn[5 - j] = mlo_key->pn[j];
+
+ /* group keys are non-QoS and use TID 0 */
+ ieee80211_set_key_rx_seq(key, 0, &seq);
+ }
+
+out:
+ kfree(old_keys);
+ return ret;
+}
+
static bool iwl_mvm_gtk_rekey(struct iwl_wowlan_status_data *status,
struct ieee80211_vif *vif,
struct iwl_mvm *mvm, u32 gtk_cipher)
@@ -2176,13 +2341,17 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
return false;
}
+ if (!iwl_mvm_mlo_gtk_rekey(status, vif, mvm))
+ return false;
+
ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
(void *)&replay_ctr, GFP_KERNEL);
}
out:
if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP,
- WOWLAN_GET_STATUSES, 0) < 10) {
+ WOWLAN_GET_STATUSES,
+ IWL_FW_CMD_VER_UNKNOWN) < 10) {
mvmvif->seqno_valid = true;
/* +0x10 because the set API expects next-to-use, not last-used */
mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
@@ -2303,9 +2472,10 @@ static void iwl_mvm_convert_bigtk(struct iwl_wowlan_status_data *status,
static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
struct iwl_wowlan_info_notif *data,
struct iwl_wowlan_status_data *status,
- u32 len)
+ u32 len, bool has_mlo_keys)
{
u32 i;
+ u32 expected_len = sizeof(*data);
if (!data) {
IWL_ERR(mvm, "iwl_wowlan_info_notif data is NULL\n");
@@ -2313,12 +2483,19 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
return;
}
- if (len < sizeof(*data)) {
+ if (has_mlo_keys)
+ expected_len += (data->num_mlo_link_keys *
+ sizeof(status->mlo_keys[0]));
+
+ if (len < expected_len) {
IWL_ERR(mvm, "Invalid WoWLAN info notification!\n");
status = NULL;
return;
}
+ if (mvm->fast_resume)
+ return;
+
iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc);
iwl_mvm_convert_gtk_v3(status, data->gtk);
iwl_mvm_convert_igtk(status, &data->igtk[0]);
@@ -2333,6 +2510,17 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm,
le32_to_cpu(data->num_of_gtk_rekeys);
status->received_beacons = le32_to_cpu(data->received_beacons);
status->tid_tear_down = data->tid_tear_down;
+
+ if (has_mlo_keys && data->num_mlo_link_keys) {
+ status->num_mlo_keys = data->num_mlo_link_keys;
+ if (IWL_FW_CHECK(mvm,
+ status->num_mlo_keys > WOWLAN_MAX_MLO_KEYS,
+ "Too many mlo keys: %d, max %d\n",
+ status->num_mlo_keys, WOWLAN_MAX_MLO_KEYS))
+ status->num_mlo_keys = WOWLAN_MAX_MLO_KEYS;
+ memcpy(status->mlo_keys, data->mlo_gtks,
+ status->num_mlo_keys * sizeof(status->mlo_keys[0]));
+ }
}
static void
@@ -2553,6 +2741,12 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
int i;
bool keep = false;
struct iwl_mvm_sta *mvm_ap_sta;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct iwl_mvm_vif_link_info *mvm_link = mvmvif->link[link_id];
+
+ if (WARN_ON(!mvm_link))
+ goto out_unlock;
if (!status)
goto out_unlock;
@@ -2560,8 +2754,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n",
status->wakeup_reasons);
- /* still at hard-coded place 0 for D3 image */
- mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
+ mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, mvm_link->ap_sta_id);
if (!mvm_ap_sta)
goto out_unlock;
@@ -2859,7 +3052,7 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm,
if (iwl_mvm_rt_status(mvm->trans,
mvm->trans->dbg.lmac_error_event_table[0],
&err_id)) {
- if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) {
struct cfg80211_wowlan_wakeup wakeup = {
.rfkill_release = true,
};
@@ -3074,7 +3267,8 @@ static bool iwl_mvm_wait_d3_notif(struct iwl_notif_wait_data *notif_wait,
(void *)pkt->data;
iwl_mvm_parse_wowlan_info_notif(mvm, notif,
- d3_data->status, len);
+ d3_data->status, len,
+ wowlan_info_ver > 3);
}
d3_data->notif_received |= IWL_D3_NOTIF_WOWLAN_INFO;
@@ -3175,7 +3369,7 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test)
return ret;
}
-#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 5)
+#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 3)
static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
struct iwl_d3_data *d3_data)
@@ -3186,12 +3380,22 @@ static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm,
WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF),
WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
};
+ static const u16 d3_fast_resume_notif[] = {
+ WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION)
+ };
struct iwl_notification_wait wait_d3_notif;
int ret;
- iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
- d3_resume_notif, ARRAY_SIZE(d3_resume_notif),
- iwl_mvm_wait_d3_notif, d3_data);
+ if (mvm->fast_resume)
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
+ d3_fast_resume_notif,
+ ARRAY_SIZE(d3_fast_resume_notif),
+ iwl_mvm_wait_d3_notif, d3_data);
+ else
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif,
+ d3_resume_notif,
+ ARRAY_SIZE(d3_resume_notif),
+ iwl_mvm_wait_d3_notif, d3_data);
ret = iwl_mvm_resume_firmware(mvm, d3_data->test);
if (ret) {
@@ -3272,6 +3476,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
goto err;
}
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_WOWLAN);
+
/* after the successful handshake, we're out of D3 */
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
@@ -3374,6 +3580,68 @@ void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled)
device_set_wakeup_enable(mvm->trans->dev, enabled);
}
+void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
+{
+ struct iwl_d3_manager_config d3_cfg_cmd_data = {};
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_WOWLAN(mvm, "Starting fast suspend flow\n");
+
+ mvm->fast_resume = true;
+ set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+
+ WARN_ON(iwl_mvm_power_update_device(mvm));
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3;
+ ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SEND_IN_D3,
+ sizeof(d3_cfg_cmd_data), &d3_cfg_cmd_data);
+ if (ret)
+ IWL_ERR(mvm,
+ "fast suspend: couldn't send D3_CONFIG_CMD %d\n", ret);
+
+ WARN_ON(iwl_mvm_power_update_mac(mvm));
+
+ ret = iwl_trans_d3_suspend(mvm->trans, false, false);
+ if (ret)
+ IWL_ERR(mvm, "fast suspend: trans_d3_suspend failed %d\n", ret);
+}
+
+int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
+{
+ struct iwl_d3_data d3_data = {
+ .notif_expected =
+ IWL_D3_NOTIF_D3_END_NOTIF,
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_WOWLAN(mvm, "Starting the fast resume flow\n");
+
+ mvm->last_reset_or_resume_time_jiffies = jiffies;
+ iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt);
+
+ if (iwl_mvm_check_rt_status(mvm, NULL)) {
+ set_bit(STATUS_FW_ERROR, &mvm->trans->status);
+ iwl_mvm_dump_nic_error_log(mvm);
+ iwl_dbg_tlv_time_point(&mvm->fwrt,
+ IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL);
+ iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
+ false, 0);
+ return -ENODEV;
+ }
+ ret = iwl_mvm_d3_notif_wait(mvm, &d3_data);
+ clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+ mvm->fast_resume = false;
+
+ if (ret)
+ IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret);
+
+ return ret;
+}
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
index 7fe57ecd0682..25f07e00db42 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c
@@ -407,7 +407,7 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
};
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- if (mvmvif->bf_data.bf_enabled)
+ if (mvmvif->bf_enabled)
cmd.bf_enable_beacon_filter = cpu_to_le32(1);
else
cmd.bf_enable_beacon_filter = 0;
@@ -692,6 +692,132 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
+static ssize_t iwl_dbgfs_max_tx_op_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u16 value;
+ int ret;
+
+ ret = kstrtou16(buf, 0, &value);
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+ mvmvif->max_tx_op = value;
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_max_tx_op_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ char buf[10];
+ int len;
+
+ mutex_lock(&mvm->mutex);
+ len = scnprintf(buf, sizeof(buf), "%hu\n", mvmvif->max_tx_op);
+ mutex_unlock(&mvm->mutex);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 action;
+ int ret;
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return -EINVAL;
+
+ if (kstrtou32(buf, 0, &action))
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!action) {
+ ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
+ } else if (action == 1) {
+ ret = iwl_mvm_int_mlo_scan(mvm, vif);
+ } else {
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret ?: count;
+}
+
+static ssize_t iwl_dbgfs_esr_disable_reason_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ unsigned long esr_mask;
+ char *buf;
+ int bufsz, pos, i;
+ ssize_t rv;
+
+ mutex_lock(&mvm->mutex);
+ esr_mask = mvmvif->esr_disable_reason;
+ mutex_unlock(&mvm->mutex);
+
+ bufsz = hweight32(esr_mask) * 32 + 40;
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos = scnprintf(buf, bufsz, "EMLSR state: '0x%lx'\nreasons:\n",
+ esr_mask);
+ for_each_set_bit(i, &esr_mask, BITS_PER_LONG)
+ pos += scnprintf(buf + pos, bufsz - pos, " - %s\n",
+ iwl_get_esr_state_string(BIT(i)));
+
+ rv = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return rv;
+}
+
+static ssize_t iwl_dbgfs_esr_disable_reason_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u32 reason;
+ u8 block;
+ int ret;
+
+ ret = sscanf(buf, "%u %hhu", &reason, &block);
+ if (ret < 0)
+ return ret;
+
+ if (hweight16(reason) != 1 || !(reason & IWL_MVM_BLOCK_ESR_REASONS))
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ if (block)
+ iwl_mvm_block_esr(mvm, vif, reason,
+ iwl_mvm_get_primary_link(vif));
+ else
+ iwl_mvm_unblock_esr(mvm, vif, reason);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -711,6 +837,9 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32);
MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(max_tx_op, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(esr_disable_reason, 32);
void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@@ -738,6 +867,11 @@ void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400);
+ MVM_DEBUGFS_ADD_FILE_VIF(max_tx_op, mvmvif->dbgfs_dir, 0600);
+ debugfs_create_bool("ftm_unprotected", 0200, mvmvif->dbgfs_dir,
+ &mvmvif->ftm_unprotected);
+ MVM_DEBUGFS_ADD_FILE_VIF(int_mlo_scan, mvmvif->dbgfs_dir, 0200);
+ MVM_DEBUGFS_ADD_FILE_VIF(esr_disable_reason, mvmvif->dbgfs_dir, 0600);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 79f4ac8cbc72..91ca830a7b60 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -151,37 +151,6 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
return ret;
}
-static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm_sta *mvmsta;
- int sta_id, drain, ret;
-
- if (!iwl_mvm_firmware_running(mvm) ||
- mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
- return -EIO;
-
- if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
- return -EINVAL;
- if (sta_id < 0 || sta_id >= mvm->fw->ucode_capa.num_stations)
- return -EINVAL;
- if (drain < 0 || drain > 1)
- return -EINVAL;
-
- mutex_lock(&mvm->mutex);
-
- mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
-
- if (!mvmsta)
- ret = -ENOENT;
- else
- ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
-
- mutex_unlock(&mvm->mutex);
-
- return ret;
-}
-
static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -568,193 +537,12 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
return ret ?: count;
}
-static
-int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf,
- int pos, int bufsz)
-{
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n");
-
- BT_MBOX_PRINT(0, LE_SLAVE_LAT, false);
- BT_MBOX_PRINT(0, LE_PROF1, false);
- BT_MBOX_PRINT(0, LE_PROF2, false);
- BT_MBOX_PRINT(0, LE_PROF_OTHER, false);
- BT_MBOX_PRINT(0, CHL_SEQ_N, false);
- BT_MBOX_PRINT(0, INBAND_S, false);
- BT_MBOX_PRINT(0, LE_MIN_RSSI, false);
- BT_MBOX_PRINT(0, LE_SCAN, false);
- BT_MBOX_PRINT(0, LE_ADV, false);
- BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false);
- BT_MBOX_PRINT(0, OPEN_CON_1, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n");
-
- BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false);
- BT_MBOX_PRINT(1, IP_SR, false);
- BT_MBOX_PRINT(1, LE_MSTR, false);
- BT_MBOX_PRINT(1, AGGR_TRFC_LD, false);
- BT_MBOX_PRINT(1, MSG_TYPE, false);
- BT_MBOX_PRINT(1, SSN, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n");
-
- BT_MBOX_PRINT(2, SNIFF_ACT, false);
- BT_MBOX_PRINT(2, PAG, false);
- BT_MBOX_PRINT(2, INQUIRY, false);
- BT_MBOX_PRINT(2, CONN, false);
- BT_MBOX_PRINT(2, SNIFF_INTERVAL, false);
- BT_MBOX_PRINT(2, DISC, false);
- BT_MBOX_PRINT(2, SCO_TX_ACT, false);
- BT_MBOX_PRINT(2, SCO_RX_ACT, false);
- BT_MBOX_PRINT(2, ESCO_RE_TX, false);
- BT_MBOX_PRINT(2, SCO_DURATION, true);
-
- pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n");
-
- BT_MBOX_PRINT(3, SCO_STATE, false);
- BT_MBOX_PRINT(3, SNIFF_STATE, false);
- BT_MBOX_PRINT(3, A2DP_STATE, false);
- BT_MBOX_PRINT(3, A2DP_SRC, false);
- BT_MBOX_PRINT(3, ACL_STATE, false);
- BT_MBOX_PRINT(3, MSTR_STATE, false);
- BT_MBOX_PRINT(3, OBX_STATE, false);
- BT_MBOX_PRINT(3, OPEN_CON_2, false);
- BT_MBOX_PRINT(3, TRAFFIC_LOAD, false);
- BT_MBOX_PRINT(3, CHL_SEQN_LSB, false);
- BT_MBOX_PRINT(3, INBAND_P, false);
- BT_MBOX_PRINT(3, MSG_TYPE_2, false);
- BT_MBOX_PRINT(3, SSN_2, false);
- BT_MBOX_PRINT(3, UPDATE_REQUEST, true);
-
- return pos;
-}
-
-static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
- char *buf;
- int ret, pos = 0, bufsz = sizeof(char) * 1024;
-
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- mutex_lock(&mvm->mutex);
-
- pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz);
-
- pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n",
- notif->bt_ci_compliance);
- pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n",
- le32_to_cpu(notif->primary_ch_lut));
- pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n",
- le32_to_cpu(notif->secondary_ch_lut));
- pos += scnprintf(buf + pos,
- bufsz - pos, "bt_activity_grading = %d\n",
- le32_to_cpu(notif->bt_activity_grading));
- pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n",
- notif->rrc_status & 0xF);
- pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n",
- notif->ttc_status & 0xF);
-
- pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n",
- IWL_MVM_BT_COEX_SYNC2SCO);
- pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n",
- IWL_MVM_BT_COEX_MPLUT);
-
- mutex_unlock(&mvm->mutex);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
-
- return ret;
-}
-#undef BT_MBOX_PRINT
-
-static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_mvm *mvm = file->private_data;
- struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd;
- char buf[256];
- int bufsz = sizeof(buf);
- int pos = 0;
-
- mutex_lock(&mvm->mutex);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tPrimary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_primary_ci));
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tSecondary Channel Bitmap 0x%016llx\n",
- le64_to_cpu(cmd->bt_secondary_ci));
-
- mutex_unlock(&mvm->mutex);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t
-iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- u32 bt_tx_prio;
-
- if (sscanf(buf, "%u", &bt_tx_prio) != 1)
- return -EINVAL;
- if (bt_tx_prio > 4)
- return -EINVAL;
-
- mvm->bt_tx_prio = bt_tx_prio;
-
- return count;
-}
-
-static ssize_t
-iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf,
- size_t count, loff_t *ppos)
-{
- static const char * const modes_str[BT_FORCE_ANT_MAX] = {
- [BT_FORCE_ANT_DIS] = "dis",
- [BT_FORCE_ANT_AUTO] = "auto",
- [BT_FORCE_ANT_BT] = "bt",
- [BT_FORCE_ANT_WIFI] = "wifi",
- };
- int ret, bt_force_ant_mode;
-
- ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf);
- if (ret < 0)
- return ret;
-
- bt_force_ant_mode = ret;
- ret = 0;
- mutex_lock(&mvm->mutex);
- if (mvm->bt_force_ant_mode == bt_force_ant_mode)
- goto out;
-
- mvm->bt_force_ant_mode = bt_force_ant_mode;
- IWL_DEBUG_COEX(mvm, "Force mode: %s\n",
- modes_str[mvm->bt_force_ant_mode]);
-
- if (iwl_mvm_firmware_running(mvm))
- ret = iwl_mvm_send_bt_init_conf(mvm);
- else
- ret = 0;
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret ?: count;
-}
-
static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
char *buff, *pos, *endpos;
static const size_t bufsz = 1024;
- char _fw_name_pre[FW_NAME_PRE_BUFSIZE];
int ret;
buff = kmalloc(bufsz, GFP_KERNEL);
@@ -764,8 +552,8 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf,
pos = buff;
endpos = pos + bufsz;
- pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n",
- iwl_drv_get_fwname_pre(mvm->trans, _fw_name_pre));
+ pos += scnprintf(pos, endpos - pos, "FW id: %s\n",
+ mvm->fwrt.fw->fw_version);
pos += scnprintf(pos, endpos - pos, "FW: %s\n",
mvm->fwrt.fw->human_readable);
pos += scnprintf(pos, endpos - pos, "Device: %s\n",
@@ -1396,6 +1184,8 @@ static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf,
if (!iwl_mvm_firmware_running(mvm))
return -EIO;
+ IWL_ERR(mvm, "Triggering an NMI from debugfs\n");
+
if (count == 6 && !strcmp(buf, "nolog\n"))
set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status);
@@ -1617,6 +1407,15 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
&beacon_cmd.tim_size,
beacon->data, beacon->len);
+ if (iwl_fw_lookup_cmd_ver(mvm->fw,
+ BEACON_TEMPLATE_CMD, 0) >= 14) {
+ u32 offset = iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len);
+
+ beacon_cmd.btwt_offset = cpu_to_le32(offset);
+ }
+
iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
}
@@ -2155,15 +1954,12 @@ MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(start_ctdp, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
-MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64);
MVM_DEBUGFS_READ_FILE_OPS(nic_temp);
MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_LINK_STA_FILE_OPS(rs_data);
-MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
-MVM_DEBUGFS_READ_FILE_OPS(bt_cmd);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64);
MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
@@ -2173,8 +1969,6 @@ MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver);
MVM_DEBUGFS_READ_FILE_OPS(tas_get_status);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
-MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
-MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64);
@@ -2361,7 +2155,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
spin_lock_init(&mvm->drv_stats_lock);
MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400);
@@ -2370,8 +2163,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(start_ctdp, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400);
- MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400);
- MVM_DEBUGFS_ADD_FILE(bt_cmd, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400);
@@ -2379,8 +2170,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
MVM_DEBUGFS_ADD_FILE(fw_system_stats, mvm->debugfs_dir, 0400);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200);
- MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600);
MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600);
@@ -2439,6 +2228,9 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm)
debugfs_create_file("mem", 0600, mvm->debugfs_dir, mvm,
&iwl_dbgfs_mem_ops);
+ debugfs_create_bool("rx_ts_ptp", 0600, mvm->debugfs_dir,
+ &mvm->rx_ts_ptp);
+
/*
* Create a symlink with mac80211. It will be removed when mac80211
* exists (before the opmode exists which removes the target.)
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index d84d7e955bb0..afd90a52d4ec 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <linux/etherdevice.h>
#include <linux/math64.h>
@@ -40,6 +40,12 @@ struct iwl_mvm_ftm_pasn_entry {
u32 flags;
};
+struct iwl_mvm_ftm_iter_data {
+ u8 *cipher;
+ u8 *bssid;
+ u8 *tk;
+};
+
int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
u8 *addr, u32 cipher, u8 *tk, u32 tk_len,
u8 *hltk, u32 hltk_len)
@@ -431,47 +437,55 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm,
return 0;
}
-#define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \
+#define FTM_SET_FLAG(flag) (*flags |= \
cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag))
static void
-iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
- struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry_v6 *target)
+iwl_mvm_ftm_set_target_flags(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ __le32 *flags)
{
- memcpy(target->bssid, peer->addr, ETH_ALEN);
- target->burst_period =
- cpu_to_le16(peer->ftm.burst_period);
- target->samples_per_burst = peer->ftm.ftms_per_burst;
- target->num_of_bursts = peer->ftm.num_bursts_exp;
- target->ftmr_max_retries = peer->ftm.ftmr_retries;
- target->initiator_ap_flags = cpu_to_le32(0);
+ *flags = cpu_to_le32(0);
if (peer->ftm.asap)
- FTM_PUT_FLAG(ASAP);
+ FTM_SET_FLAG(ASAP);
if (peer->ftm.request_lci)
- FTM_PUT_FLAG(LCI_REQUEST);
+ FTM_SET_FLAG(LCI_REQUEST);
if (peer->ftm.request_civicloc)
- FTM_PUT_FLAG(CIVIC_REQUEST);
+ FTM_SET_FLAG(CIVIC_REQUEST);
if (IWL_MVM_FTM_INITIATOR_DYNACK)
- FTM_PUT_FLAG(DYN_ACK);
+ FTM_SET_FLAG(DYN_ACK);
if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG)
- FTM_PUT_FLAG(ALGO_LR);
+ FTM_SET_FLAG(ALGO_LR);
else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT)
- FTM_PUT_FLAG(ALGO_FFT);
+ FTM_SET_FLAG(ALGO_FFT);
if (peer->ftm.trigger_based)
- FTM_PUT_FLAG(TB);
+ FTM_SET_FLAG(TB);
else if (peer->ftm.non_trigger_based)
- FTM_PUT_FLAG(NON_TB);
+ FTM_SET_FLAG(NON_TB);
if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) &&
peer->ftm.lmr_feedback)
- FTM_PUT_FLAG(LMR_FEEDBACK);
+ FTM_SET_FLAG(LMR_FEEDBACK);
+}
+
+static void
+iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v6 *target)
+{
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period =
+ cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ target->ftmr_max_retries = peer->ftm.ftmr_retries;
+ iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags);
}
static int
@@ -514,21 +528,10 @@ iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm,
return 0;
}
-static int
-iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_pmsr_request_peer *peer,
- struct iwl_tof_range_req_ap_entry_v6 *target)
+static int iwl_mvm_ftm_set_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ u8 *sta_id, __le32 *flags)
{
- int ret;
-
- ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
- &target->format_bw,
- &target->ctrl_ch_position);
- if (ret)
- return ret;
-
- iwl_mvm_ftm_put_target_common(mvm, peer, target);
-
if (vif->cfg.assoc) {
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_sta *sta;
@@ -540,8 +543,8 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN))
continue;
- target->sta_id = mvmvif->link[link_id]->ap_sta_id;
- sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]);
+ *sta_id = mvmvif->link[link_id]->ap_sta_id;
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[*sta_id]);
if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
rcu_read_unlock();
return PTR_ERR_OR_ZERO(sta);
@@ -549,14 +552,42 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (sta->mfp && (peer->ftm.trigger_based ||
peer->ftm.non_trigger_based))
- FTM_PUT_FLAG(PMF);
+ FTM_SET_FLAG(PMF);
break;
}
rcu_read_unlock();
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->ftm_unprotected) {
+ *sta_id = IWL_MVM_INVALID_STA;
+ *flags &= ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF);
+ }
+#endif
} else {
- target->sta_id = IWL_MVM_INVALID_STA;
+ *sta_id = IWL_MVM_INVALID_STA;
}
+ return 0;
+}
+
+static int
+iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v6 *target)
+{
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ iwl_mvm_ftm_put_target_common(mvm, peer, target);
+
+ iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id,
+ &target->initiator_ap_flags);
+
/*
* TODO: Beacon interval is currently unknown, so use the common value
* of 100 TUs.
@@ -694,57 +725,62 @@ static void iter(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key,
void *data)
{
- struct iwl_tof_range_req_ap_entry_v6 *target = data;
+ struct iwl_mvm_ftm_iter_data *target = data;
if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN))
return;
WARN_ON(!sta->mfp);
- if (WARN_ON(key->keylen > sizeof(target->tk)))
- return;
-
- memcpy(target->tk, key->key, key->keylen);
- target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
- WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID);
+ target->tk = key->key;
+ *target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher);
+ WARN_ON(*target->cipher == IWL_LOCATION_CIPHER_INVALID);
}
static void
iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_tof_range_req_ap_entry_v7 *target)
+ u8 *bssid, u8 *cipher, u8 *hltk, u8 *tk,
+ u8 *rx_pn, u8 *tx_pn, __le32 *flags)
{
struct iwl_mvm_ftm_pasn_entry *entry;
- u32 flags = le32_to_cpu(target->initiator_ap_flags);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif->ftm_unprotected)
+ return;
+#endif
- if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB |
+ if (!(le32_to_cpu(*flags) & (IWL_INITIATOR_AP_FLAGS_NON_TB |
IWL_INITIATOR_AP_FLAGS_TB)))
return;
lockdep_assert_held(&mvm->mutex);
list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) {
- if (memcmp(entry->addr, target->bssid, sizeof(entry->addr)))
+ if (memcmp(entry->addr, bssid, sizeof(entry->addr)))
continue;
- target->cipher = entry->cipher;
+ *cipher = entry->cipher;
if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK)
- memcpy(target->hltk, entry->hltk, sizeof(target->hltk));
+ memcpy(hltk, entry->hltk, sizeof(entry->hltk));
else
- memset(target->hltk, 0, sizeof(target->hltk));
+ memset(hltk, 0, sizeof(entry->hltk));
if (vif->cfg.assoc &&
- !memcmp(vif->bss_conf.bssid, target->bssid,
- sizeof(target->bssid)))
- ieee80211_iter_keys(mvm->hw, vif, iter, target);
- else
- memcpy(target->tk, entry->tk, sizeof(target->tk));
+ !memcmp(vif->bss_conf.bssid, bssid, ETH_ALEN)) {
+ struct iwl_mvm_ftm_iter_data target;
- memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn));
- memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn));
+ target.bssid = bssid;
+ ieee80211_iter_keys(mvm->hw, vif, iter, &target);
+ } else {
+ memcpy(tk, entry->tk, sizeof(entry->tk));
+ }
- target->initiator_ap_flags |=
- cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED);
+ memcpy(rx_pn, entry->rx_pn, sizeof(entry->rx_pn));
+ memcpy(tx_pn, entry->tx_pn, sizeof(entry->tx_pn));
+
+ FTM_SET_FLAG(SECURED);
return;
}
}
@@ -758,7 +794,11 @@ iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (err)
return err;
- iwl_mvm_ftm_set_secured_ranging(mvm, vif, target);
+ iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid,
+ &target->cipher, target->hltk,
+ target->tk, target->rx_pn,
+ target->tx_pn,
+ &target->initiator_ap_flags);
return err;
}
@@ -905,6 +945,105 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm,
return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
}
+static int
+iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request_peer *peer,
+ struct iwl_tof_range_req_ap_entry_v10 *target)
+{
+ u32 i2r_max_sts, flags;
+ int ret;
+
+ ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num,
+ &target->format_bw,
+ &target->ctrl_ch_position);
+ if (ret)
+ return ret;
+
+ memcpy(target->bssid, peer->addr, ETH_ALEN);
+ target->burst_period =
+ cpu_to_le16(peer->ftm.burst_period);
+ target->samples_per_burst = peer->ftm.ftms_per_burst;
+ target->num_of_bursts = peer->ftm.num_bursts_exp;
+ iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags);
+ iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id,
+ &target->initiator_ap_flags);
+ iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid,
+ &target->cipher, target->hltk,
+ target->tk, target->rx_pn,
+ target->tx_pn,
+ &target->initiator_ap_flags);
+
+ i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 :
+ IWL_MVM_FTM_I2R_MAX_STS;
+
+ target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP |
+ (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) |
+ (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+ target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP |
+ (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) |
+ (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS);
+
+ if (peer->ftm.non_trigger_based) {
+ target->min_time_between_msr =
+ cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR);
+ target->burst_period =
+ cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR);
+ } else {
+ target->min_time_between_msr = cpu_to_le16(0);
+ }
+
+ target->band =
+ iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band);
+
+ /*
+ * TODO: Beacon interval is currently unknown, so use the common value
+ * of 100 TUs.
+ */
+ target->beacon_interval = cpu_to_le16(100);
+
+ /*
+ * If secure LTF is turned off, replace the flag with PMF only
+ */
+ flags = le32_to_cpu(target->initiator_ap_flags);
+ if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) {
+ if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF)
+ flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED;
+
+ flags |= IWL_INITIATOR_AP_FLAGS_PMF;
+ target->initiator_ap_flags = cpu_to_le32(flags);
+ }
+
+ return 0;
+}
+
+static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_pmsr_request *req)
+{
+ struct iwl_tof_range_req_cmd_v14 cmd;
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD),
+ .dataflags[0] = IWL_HCMD_DFL_DUP,
+ .data[0] = &cmd,
+ .len[0] = sizeof(cmd),
+ };
+ u8 i;
+ int err;
+
+ iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req);
+
+ for (i = 0; i < cmd.num_of_ap; i++) {
+ struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
+ struct iwl_tof_range_req_ap_entry_v10 *target = &cmd.ap[i];
+
+ err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target);
+ if (err)
+ return err;
+ }
+
+ return iwl_mvm_ftm_send_cmd(mvm, &hcmd);
+}
+
int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *req)
{
@@ -923,6 +1062,9 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_FW_CMD_VER_UNKNOWN);
switch (cmd_ver) {
+ case 14:
+ err = iwl_mvm_ftm_start_v14(mvm, vif, req);
+ break;
case 13:
err = iwl_mvm_ftm_start_v13(mvm, vif, req);
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index 8e760300a1ab..e4caa362f597 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include <net/cfg80211.h>
#include <linux/etherdevice.h>
@@ -88,7 +88,7 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
static void
iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm,
- struct iwl_tof_responder_config_cmd_v9 *cmd)
+ struct iwl_tof_responder_config_cmd *cmd)
{
/* Up to 2 R2I STS are allowed on the responder */
u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ?
@@ -117,7 +117,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
* field interpretation is different), so the same struct can be use
* for all cases.
*/
- struct iwl_tof_responder_config_cmd_v9 cmd = {
+ struct iwl_tof_responder_config_cmd cmd = {
.channel_num = chandef->chan->hw_value,
.cmd_valid_fields =
cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO |
@@ -131,8 +131,13 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
+ if (cmd_ver == 10) {
+ cmd.band =
+ iwl_mvm_phy_band_from_nl80211(chandef->chan->band);
+ }
+
/* Use a default of bss_color=1 for now */
- if (cmd_ver == 9) {
+ if (cmd_ver >= 9) {
cmd.cmd_valid_fields |=
cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR |
IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR);
@@ -148,7 +153,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
}
if (cmd_ver >= 8)
- iwl_mvm_ftm_responder_set_ndp(mvm, &cmd);
+ iwl_mvm_ftm_responder_set_ndp(mvm, (void *)&cmd);
if (cmd_ver >= 7)
err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index e1c2b7fc92ab..08c4898c8f1a 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -28,9 +28,6 @@
#define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ)
#define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
-#define IWL_UATS_VLP_AP_SUPPORTED BIT(29)
-#define IWL_UATS_AFC_AP_SUPPORTED BIT(30)
-
struct iwl_mvm_alive_data {
bool valid;
u32 scd_base_addr;
@@ -94,20 +91,10 @@ void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm *mvm,
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mfu_assert_dump_notif *mfu_dump_notif = (void *)pkt->data;
- __le32 *dump_data = mfu_dump_notif->data;
- int n_words = le32_to_cpu(mfu_dump_notif->data_size) / sizeof(__le32);
- int i;
if (mfu_dump_notif->index_num == 0)
IWL_INFO(mvm, "MFUART assert id 0x%x occurred\n",
le32_to_cpu(mfu_dump_notif->assert_id));
-
- for (i = 0; i < n_words; i++)
- IWL_DEBUG_INFO(mvm,
- "MFUART assert dump, dword %u: 0x%08x\n",
- le16_to_cpu(mfu_dump_notif->index_num) *
- n_words + i,
- le32_to_cpu(dump_data[i]));
}
static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
@@ -418,7 +405,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
UREG_LMAC2_CURRENT_PC));
}
- if (ret == -ETIMEDOUT && !mvm->pldr_sync)
+ if (ret == -ETIMEDOUT && !mvm->fw_product_reset)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_ALIVE_TIMEOUT);
@@ -470,12 +457,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
#endif
/*
+ * For pre-MLD API (MLD API doesn't use the timestamps):
* All the BSSes in the BSS table include the GP2 in the system
* at the beacon Rx time, this is of course no longer relevant
* since we are resetting the firmware.
* Purge all the BSS table.
*/
- cfg80211_bss_flush(mvm->hw->wiphy);
+ if (!mvm->mld_api_is_used)
+ cfg80211_bss_flush(mvm->hw->wiphy);
return 0;
}
@@ -494,44 +483,39 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm)
int ret;
struct iwl_host_cmd cmd = {
.id = WIDE_ID(REGULATORY_AND_NVM_GROUP,
- UATS_TABLE_CMD),
+ MCC_ALLOWED_AP_TYPE_CMD),
.flags = 0,
.data[0] = &mvm->fwrt.uats_table,
.len[0] = sizeof(mvm->fwrt.uats_table),
.dataflags[0] = IWL_HCMD_DFL_NOCOPY,
};
- if (!(mvm->trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_AX210)) {
+ if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) {
IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n");
return;
}
- if (!mvm->fwrt.uats_enabled) {
- IWL_DEBUG_RADIO(mvm, "UATS feature is disabled\n");
- return;
- }
-
cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id,
IWL_FW_CMD_VER_UNKNOWN);
if (cmd_ver != 1) {
IWL_DEBUG_RADIO(mvm,
- "UATS_TABLE_CMD ver %d not supported\n",
+ "MCC_ALLOWED_AP_TYPE_CMD ver %d not supported\n",
cmd_ver);
return;
}
ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt);
if (ret < 0) {
- IWL_ERR(mvm, "failed to read UATS table (%d)\n", ret);
+ IWL_DEBUG_FW(mvm, "failed to read UATS table (%d)\n", ret);
return;
}
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret < 0)
- IWL_ERR(mvm, "failed to send UATS_TABLE_CMD (%d)\n", ret);
+ IWL_ERR(mvm, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n",
+ ret);
else
- IWL_DEBUG_RADIO(mvm, "UATS_TABLE_CMD sent to FW\n");
+ IWL_DEBUG_RADIO(mvm, "MCC_ALLOWED_AP_TYPE_CMD sent to FW\n");
}
static int iwl_mvm_sgom_init(struct iwl_mvm *mvm)
@@ -636,8 +620,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) {
sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG);
/* if needed, we'll reset this on our way out later */
- mvm->pldr_sync = sb_cfg == SB_CFG_RESIDES_IN_ROM;
- if (mvm->pldr_sync && iwl_mei_pldr_req())
+ mvm->fw_product_reset = sb_cfg == SB_CFG_RESIDES_IN_ROM;
+ if (mvm->fw_product_reset && iwl_mei_pldr_req())
return -EBUSY;
}
@@ -656,7 +640,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
/* if we needed reset then fail here, but notify and remove */
- if (mvm->pldr_sync) {
+ if (mvm->fw_product_reset) {
iwl_mei_alive_notif(false);
iwl_trans_pcie_remove(mvm->trans, true);
}
@@ -666,7 +650,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_AFTER_ALIVE,
NULL);
- if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ)
+ if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
mvm->trans->step_urm = !!(iwl_read_umac_prph(mvm->trans,
CNVI_PMU_STEP_FLOW) &
CNVI_PMU_STEP_FLOW_FORCE_URM);
@@ -695,14 +679,6 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
goto error;
}
- if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
- ret = iwl_nvm_init(mvm);
- if (ret) {
- IWL_ERR(mvm, "Failed to read NVM: %d\n", ret);
- goto error;
- }
- }
-
ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP,
NVM_ACCESS_COMPLETE),
CMD_SEND_IN_RFKILL,
@@ -727,7 +703,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm)
return ret;
/* Read the NVM only at driver load time, no need to do this twice */
- if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) {
+ if (!mvm->nvm_data) {
mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw,
mvm->set_tx_ant, mvm->set_rx_ant);
if (IS_ERR(mvm->nvm_data)) {
@@ -852,7 +828,7 @@ remove_notif:
iwl_remove_notification(&mvm->notif_wait, &calib_wait);
out:
mvm->rfkill_safe_init_done = false;
- if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
+ if (!mvm->nvm_data) {
/* we want to debug INIT and we have no NVM - fake */
mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
sizeof(struct ieee80211_channel) +
@@ -894,13 +870,15 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
int ret;
u16 len = 0;
u32 n_subbands;
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
- if (cmd_ver == 7) {
+ u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 3);
+
+ if (cmd_ver >= 7) {
len = sizeof(cmd.v7);
n_subbands = IWL_NUM_SUB_BANDS_V2;
per_chain = cmd.v7.per_chain[0][0];
cmd.v7.flags = cpu_to_le32(mvm->fwrt.reduced_power_flags);
+ if (cmd_ver == 8)
+ len = sizeof(cmd.v8);
} else if (cmd_ver == 6) {
len = sizeof(cmd.v6);
n_subbands = IWL_NUM_SUB_BANDS_V2;
@@ -1223,94 +1201,12 @@ static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
+ struct iwl_lari_config_change_cmd cmd;
+ size_t cmd_size;
int ret;
- u32 value;
- struct iwl_lari_config_change_cmd_v7 cmd = {};
- u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(REGULATORY_AND_NVM_GROUP,
- LARI_CONFIG_CHANGE), 1);
-
- cmd.config_bitmap = iwl_get_lari_config_bitmap(&mvm->fwrt);
- ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_11AX_ENABLEMENT, &value);
- if (!ret)
- cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
-
- ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_UNII4_CHAN, &value);
- if (!ret)
- cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
-
- ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ACTIVATE_CHANNEL, &value);
+ ret = iwl_fill_lari_config(&mvm->fwrt, &cmd, &cmd_size);
if (!ret) {
- if (cmd_ver < 8)
- value &= ~ACTIVATE_5G2_IN_WW_MASK;
- cmd.chan_state_active_bitmap = cpu_to_le32(value);
- }
-
- ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENABLE_6E, &value);
- if (!ret)
- cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
-
- ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_FORCE_DISABLE_CHANNELS,
- &value);
- if (!ret)
- cmd.force_disable_channels_bitmap = cpu_to_le32(value);
-
- ret = iwl_bios_get_dsm(&mvm->fwrt, DSM_FUNC_ENERGY_DETECTION_THRESHOLD,
- &value);
- if (!ret)
- cmd.edt_bitmap = cpu_to_le32(value);
-
- if (cmd.config_bitmap ||
- cmd.oem_uhb_allow_bitmap ||
- cmd.oem_11ax_allow_bitmap ||
- cmd.oem_unii4_allow_bitmap ||
- cmd.chan_state_active_bitmap ||
- cmd.force_disable_channels_bitmap ||
- cmd.edt_bitmap) {
- size_t cmd_size;
-
- switch (cmd_ver) {
- case 8:
- case 7:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v7);
- break;
- case 6:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v6);
- break;
- case 5:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
- break;
- case 4:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
- break;
- case 3:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
- break;
- case 2:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v2);
- break;
- default:
- cmd_size = sizeof(struct iwl_lari_config_change_cmd_v1);
- break;
- }
-
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n",
- le32_to_cpu(cmd.config_bitmap),
- le32_to_cpu(cmd.oem_11ax_allow_bitmap));
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
- le32_to_cpu(cmd.oem_unii4_allow_bitmap),
- le32_to_cpu(cmd.chan_state_active_bitmap),
- cmd_ver);
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x, force_disable_channels_bitmap=0x%x\n",
- le32_to_cpu(cmd.oem_uhb_allow_bitmap),
- le32_to_cpu(cmd.force_disable_channels_bitmap));
- IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x\n",
- le32_to_cpu(cmd.edt_bitmap));
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
@@ -1320,10 +1216,6 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
"Failed to send LARI_CONFIG_CHANGE (%d)\n",
ret);
}
-
- if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED ||
- le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED)
- mvm->fwrt.uats_enabled = true;
}
void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm)
@@ -1465,9 +1357,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm)
if (ret) {
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
-
- if (iwlmvm_mod_params.init_dbg)
- return 0;
return ret;
}
@@ -1504,14 +1393,14 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
ret = iwl_mvm_load_rt_fw(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret);
- if (ret != -ERFKILL && !mvm->pldr_sync)
+ if (ret != -ERFKILL && !mvm->fw_product_reset)
iwl_fw_dbg_error_collect(&mvm->fwrt,
FW_DBG_TRIGGER_DRIVER);
goto error;
}
/* FW loaded successfully */
- mvm->pldr_sync = false;
+ mvm->fw_product_reset = false;
iwl_fw_disable_dbg_asserts(&mvm->fwrt);
iwl_get_shared_mem_conf(&mvm->fwrt);
@@ -1578,8 +1467,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
for (i = 0; i < IWL_MVM_FW_MAX_LINK_ID + 1; i++)
RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL);
- memset(&mvm->fw_link_ids_map, 0, sizeof(mvm->fw_link_ids_map));
-
mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
/* reset quota debouncing buffer - 0xff will yield invalid data */
@@ -1708,8 +1595,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
- if (!iwlmvm_mod_params.init_dbg || !ret)
- iwl_mvm_stop_device(mvm);
+ iwl_mvm_stop_device(mvm);
return ret;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
index fe5bba8561d0..a9929aa49913 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c
@@ -5,29 +5,61 @@
#include "mvm.h"
#include "time-event.h"
-static u32 iwl_mvm_get_free_fw_link_id(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvm_vif)
+#define HANDLE_ESR_REASONS(HOW) \
+ HOW(BLOCKED_PREVENTION) \
+ HOW(BLOCKED_WOWLAN) \
+ HOW(BLOCKED_TPT) \
+ HOW(BLOCKED_FW) \
+ HOW(BLOCKED_NON_BSS) \
+ HOW(BLOCKED_ROC) \
+ HOW(EXIT_MISSED_BEACON) \
+ HOW(EXIT_LOW_RSSI) \
+ HOW(EXIT_COEX) \
+ HOW(EXIT_BANDWIDTH) \
+ HOW(EXIT_CSA) \
+ HOW(EXIT_LINK_USAGE)
+
+static const char *const iwl_mvm_esr_states_names[] = {
+#define NAME_ENTRY(x) [ilog2(IWL_MVM_ESR_##x)] = #x,
+ HANDLE_ESR_REASONS(NAME_ENTRY)
+};
+
+const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state)
{
- u32 link_id;
-
- lockdep_assert_held(&mvm->mutex);
+ int offs = ilog2(state);
- link_id = ffz(mvm->fw_link_ids_map);
+ if (offs >= ARRAY_SIZE(iwl_mvm_esr_states_names) ||
+ !iwl_mvm_esr_states_names[offs])
+ return "UNKNOWN";
- /* this case can happen if there're deactivated but not removed links */
- if (link_id > IWL_MVM_FW_MAX_LINK_ID)
- return IWL_MVM_FW_LINK_ID_INVALID;
+ return iwl_mvm_esr_states_names[offs];
+}
- mvm->fw_link_ids_map |= BIT(link_id);
- return link_id;
+static void iwl_mvm_print_esr_state(struct iwl_mvm *mvm, u32 mask)
+{
+#define NAME_FMT(x) "%s"
+#define NAME_PR(x) (mask & IWL_MVM_ESR_##x) ? "[" #x "]" : "",
+ IWL_DEBUG_INFO(mvm,
+ "EMLSR state = " HANDLE_ESR_REASONS(NAME_FMT)
+ " (0x%x)\n",
+ HANDLE_ESR_REASONS(NAME_PR)
+ mask);
+#undef NAME_FMT
+#undef NAME_PR
}
-static void iwl_mvm_release_fw_link_id(struct iwl_mvm *mvm, u32 link_id)
+static u32 iwl_mvm_get_free_fw_link_id(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvm_vif)
{
+ u32 i;
+
lockdep_assert_held(&mvm->mutex);
- if (!WARN_ON(link_id > IWL_MVM_FW_MAX_LINK_ID))
- mvm->fw_link_ids_map &= ~BIT(link_id);
+ for (i = 0; i < ARRAY_SIZE(mvm->link_id_to_link_conf); i++)
+ if (!rcu_access_pointer(mvm->link_id_to_link_conf[i]))
+ return i;
+
+ return IWL_MVM_FW_LINK_ID_INVALID;
}
static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm,
@@ -108,6 +140,65 @@ int iwl_mvm_add_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_link_cmd_send(mvm, &cmd, FW_CTXT_ACTION_ADD);
}
+struct iwl_mvm_esr_iter_data {
+ struct ieee80211_vif *vif;
+ unsigned int link_id;
+ bool lift_block;
+};
+
+static void iwl_mvm_esr_vif_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_esr_iter_data *data = _data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int link_id;
+
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_STATION)
+ return;
+
+ for_each_mvm_vif_valid_link(mvmvif, link_id) {
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_id];
+ if (vif == data->vif && link_id == data->link_id)
+ continue;
+ if (link_info->active)
+ data->lift_block = false;
+ }
+}
+
+int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ unsigned int link_id, bool active)
+{
+ /* An active link of a non-station vif blocks EMLSR. Upon activation
+ * block EMLSR on the bss vif. Upon deactivation, check if this link
+ * was the last non-station link active, and if so unblock the bss vif
+ */
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_esr_iter_data data = {
+ .vif = vif,
+ .link_id = link_id,
+ .lift_block = true,
+ };
+
+ if (IS_ERR_OR_NULL(bss_vif))
+ return 0;
+
+ if (active)
+ return iwl_mvm_block_esr_sync(mvm, bss_vif,
+ IWL_MVM_ESR_BLOCKED_NON_BSS);
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_esr_vif_iterator, &data);
+ if (data.lift_block) {
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_NON_BSS);
+ mutex_unlock(&mvm->mutex);
+ }
+
+ return 0;
+}
+
int iwl_mvm_link_changed(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
u32 changes, bool active)
@@ -279,7 +370,6 @@ int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id],
NULL);
- iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id);
return 0;
}
@@ -329,3 +419,732 @@ int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return ret;
}
+
+struct iwl_mvm_rssi_to_grade {
+ s8 rssi[2];
+ u16 grade;
+};
+
+#define RSSI_TO_GRADE_LINE(_lb, _hb_uhb, _grade) \
+ { \
+ .rssi = {_lb, _hb_uhb}, \
+ .grade = _grade \
+ }
+
+/*
+ * This array must be sorted by increasing RSSI for proper functionality.
+ * The grades are actually estimated throughput, represented as fixed-point
+ * with a scale factor of 1/10.
+ */
+static const struct iwl_mvm_rssi_to_grade rssi_to_grade_map[] = {
+ RSSI_TO_GRADE_LINE(-85, -89, 177),
+ RSSI_TO_GRADE_LINE(-83, -86, 344),
+ RSSI_TO_GRADE_LINE(-82, -85, 516),
+ RSSI_TO_GRADE_LINE(-80, -83, 688),
+ RSSI_TO_GRADE_LINE(-77, -79, 1032),
+ RSSI_TO_GRADE_LINE(-73, -76, 1376),
+ RSSI_TO_GRADE_LINE(-70, -74, 1548),
+ RSSI_TO_GRADE_LINE(-69, -72, 1750),
+ RSSI_TO_GRADE_LINE(-65, -68, 2064),
+ RSSI_TO_GRADE_LINE(-61, -66, 2294),
+ RSSI_TO_GRADE_LINE(-58, -61, 2580),
+ RSSI_TO_GRADE_LINE(-55, -58, 2868),
+ RSSI_TO_GRADE_LINE(-46, -55, 3098),
+ RSSI_TO_GRADE_LINE(-43, -54, 3442)
+};
+
+#define MAX_GRADE (rssi_to_grade_map[ARRAY_SIZE(rssi_to_grade_map) - 1].grade)
+
+#define DEFAULT_CHAN_LOAD_LB 30
+#define DEFAULT_CHAN_LOAD_HB 15
+#define DEFAULT_CHAN_LOAD_UHB 0
+
+/* Factors calculation is done with fixed-point with a scaling factor of 1/256 */
+#define SCALE_FACTOR 256
+
+/* Convert a percentage from [0,100] to [0,255] */
+#define NORMALIZE_PERCENT_TO_255(percentage) ((percentage) * SCALE_FACTOR / 100)
+
+static unsigned int
+iwl_mvm_get_puncturing_factor(const struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_chan_width chan_width =
+ link_conf->chanreq.oper.width;
+ int mhz = nl80211_chan_width_to_mhz(chan_width);
+ unsigned int n_subchannels, n_punctured, puncturing_penalty;
+
+ if (WARN_ONCE(mhz < 20 || mhz > 320,
+ "Invalid channel width : (%d)\n", mhz))
+ return SCALE_FACTOR;
+
+ /* No puncturing, no penalty */
+ if (mhz < 80)
+ return SCALE_FACTOR;
+
+ /* total number of subchannels */
+ n_subchannels = mhz / 20;
+ /* how many of these are punctured */
+ n_punctured = hweight16(link_conf->chanreq.oper.punctured);
+
+ puncturing_penalty = n_punctured * SCALE_FACTOR / n_subchannels;
+ return SCALE_FACTOR - puncturing_penalty;
+}
+
+static unsigned int
+iwl_mvm_get_chan_load(struct ieee80211_bss_conf *link_conf)
+{
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct iwl_mvm_vif_link_info *mvm_link =
+ iwl_mvm_vif_from_mac80211(link_conf->vif)->link[link_conf->link_id];
+ const struct element *bss_load_elem;
+ const struct ieee80211_bss_load_elem *bss_load;
+ enum nl80211_band band = link_conf->chanreq.oper.chan->band;
+ const struct cfg80211_bss_ies *ies;
+ unsigned int chan_load;
+ u32 chan_load_by_us;
+
+ rcu_read_lock();
+ if (ieee80211_vif_link_active(vif, link_conf->link_id))
+ ies = rcu_dereference(link_conf->bss->beacon_ies);
+ else
+ ies = rcu_dereference(link_conf->bss->ies);
+
+ if (ies)
+ bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD,
+ ies->data, ies->len);
+ else
+ bss_load_elem = NULL;
+
+ /* If there isn't BSS Load element, take the defaults */
+ if (!bss_load_elem ||
+ bss_load_elem->datalen != sizeof(*bss_load)) {
+ rcu_read_unlock();
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ chan_load = DEFAULT_CHAN_LOAD_LB;
+ break;
+ case NL80211_BAND_5GHZ:
+ chan_load = DEFAULT_CHAN_LOAD_HB;
+ break;
+ case NL80211_BAND_6GHZ:
+ chan_load = DEFAULT_CHAN_LOAD_UHB;
+ break;
+ default:
+ chan_load = 0;
+ break;
+ }
+ /* The defaults are given in percentage */
+ return NORMALIZE_PERCENT_TO_255(chan_load);
+ }
+
+ bss_load = (const void *)bss_load_elem->data;
+ /* Channel util is in range 0-255 */
+ chan_load = bss_load->channel_util;
+ rcu_read_unlock();
+
+ if (!mvm_link || !mvm_link->active)
+ return chan_load;
+
+ if (WARN_ONCE(!mvm_link->phy_ctxt,
+ "Active link (%u) without phy ctxt assigned!\n",
+ link_conf->link_id))
+ return chan_load;
+
+ /* channel load by us is given in percentage */
+ chan_load_by_us =
+ NORMALIZE_PERCENT_TO_255(mvm_link->phy_ctxt->channel_load_by_us);
+
+ /* Use only values that firmware sends that can possibly be valid */
+ if (chan_load_by_us <= chan_load)
+ chan_load -= chan_load_by_us;
+
+ return chan_load;
+}
+
+static unsigned int
+iwl_mvm_get_chan_load_factor(struct ieee80211_bss_conf *link_conf)
+{
+ return SCALE_FACTOR - iwl_mvm_get_chan_load(link_conf);
+}
+
+/* This function calculates the grade of a link. Returns 0 in error case */
+VISIBLE_IF_IWLWIFI_KUNIT
+unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf)
+{
+ enum nl80211_band band;
+ int i, rssi_idx;
+ s32 link_rssi;
+ unsigned int grade = MAX_GRADE;
+
+ if (WARN_ON_ONCE(!link_conf))
+ return 0;
+
+ band = link_conf->chanreq.oper.chan->band;
+ if (WARN_ONCE(band != NL80211_BAND_2GHZ &&
+ band != NL80211_BAND_5GHZ &&
+ band != NL80211_BAND_6GHZ,
+ "Invalid band (%u)\n", band))
+ return 0;
+
+ link_rssi = MBM_TO_DBM(link_conf->bss->signal);
+ /*
+ * For 6 GHz the RSSI of the beacons is lower than
+ * the RSSI of the data.
+ */
+ if (band == NL80211_BAND_6GHZ)
+ link_rssi += 4;
+
+ rssi_idx = band == NL80211_BAND_2GHZ ? 0 : 1;
+
+ /* No valid RSSI - take the lowest grade */
+ if (!link_rssi)
+ link_rssi = rssi_to_grade_map[0].rssi[rssi_idx];
+
+ /* Get grade based on RSSI */
+ for (i = 0; i < ARRAY_SIZE(rssi_to_grade_map); i++) {
+ const struct iwl_mvm_rssi_to_grade *line =
+ &rssi_to_grade_map[i];
+
+ if (link_rssi > line->rssi[rssi_idx])
+ continue;
+ grade = line->grade;
+ break;
+ }
+
+ /* apply the channel load and puncturing factors */
+ grade = grade * iwl_mvm_get_chan_load_factor(link_conf) / SCALE_FACTOR;
+ grade = grade * iwl_mvm_get_puncturing_factor(link_conf) / SCALE_FACTOR;
+ return grade;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_get_link_grade);
+
+static
+u8 iwl_mvm_set_link_selection_data(struct ieee80211_vif *vif,
+ struct iwl_mvm_link_sel_data *data,
+ unsigned long usable_links,
+ u8 *best_link_idx)
+{
+ u8 n_data = 0;
+ u16 max_grade = 0;
+ unsigned long link_id;
+
+ /* TODO: don't select links that weren't discovered in the last scan */
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ link_conf_dereference_protected(vif, link_id);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ data[n_data].link_id = link_id;
+ data[n_data].chandef = &link_conf->chanreq.oper;
+ data[n_data].signal = link_conf->bss->signal / 100;
+ data[n_data].grade = iwl_mvm_get_link_grade(link_conf);
+
+ if (data[n_data].grade > max_grade) {
+ max_grade = data[n_data].grade;
+ *best_link_idx = n_data;
+ }
+ n_data++;
+ }
+
+ return n_data;
+}
+
+struct iwl_mvm_bw_to_rssi_threshs {
+ s8 low;
+ s8 high;
+};
+
+#define BW_TO_RSSI_THRESHOLDS(_bw) \
+ [IWL_PHY_CHANNEL_MODE ## _bw] = { \
+ .low = IWL_MVM_LOW_RSSI_THRESH_##_bw##MHZ, \
+ .high = IWL_MVM_HIGH_RSSI_THRESH_##_bw##MHZ \
+ }
+
+s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
+ const struct cfg80211_chan_def *chandef,
+ bool low)
+{
+ const struct iwl_mvm_bw_to_rssi_threshs bw_to_rssi_threshs_map[] = {
+ BW_TO_RSSI_THRESHOLDS(20),
+ BW_TO_RSSI_THRESHOLDS(40),
+ BW_TO_RSSI_THRESHOLDS(80),
+ BW_TO_RSSI_THRESHOLDS(160)
+ /* 320 MHz has the same thresholds as 20 MHz */
+ };
+ const struct iwl_mvm_bw_to_rssi_threshs *threshs;
+ u8 chan_width = iwl_mvm_get_channel_width(chandef);
+
+ if (WARN_ON(chandef->chan->band != NL80211_BAND_2GHZ &&
+ chandef->chan->band != NL80211_BAND_5GHZ &&
+ chandef->chan->band != NL80211_BAND_6GHZ))
+ return S8_MAX;
+
+ /* 6 GHz will always use 20 MHz thresholds, regardless of the BW */
+ if (chan_width == IWL_PHY_CHANNEL_MODE320)
+ chan_width = IWL_PHY_CHANNEL_MODE20;
+
+ threshs = &bw_to_rssi_threshs_map[chan_width];
+
+ return low ? threshs->low : threshs->high;
+}
+
+static u32
+iwl_mvm_esr_disallowed_with_link(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *link,
+ bool primary)
+{
+ struct wiphy *wiphy = mvm->hw->wiphy;
+ struct ieee80211_bss_conf *conf;
+ enum iwl_mvm_esr_state ret = 0;
+ s8 thresh;
+
+ conf = wiphy_dereference(wiphy, vif->link_conf[link->link_id]);
+ if (WARN_ON_ONCE(!conf))
+ return false;
+
+ /* BT Coex effects eSR mode only if one of the links is on LB */
+ if (link->chandef->chan->band == NL80211_BAND_2GHZ &&
+ (!iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link->signal,
+ primary)))
+ ret |= IWL_MVM_ESR_EXIT_COEX;
+
+ thresh = iwl_mvm_get_esr_rssi_thresh(mvm, link->chandef,
+ false);
+
+ if (link->signal < thresh)
+ ret |= IWL_MVM_ESR_EXIT_LOW_RSSI;
+
+ if (conf->csa_active)
+ ret |= IWL_MVM_ESR_EXIT_CSA;
+
+ if (ret) {
+ IWL_DEBUG_INFO(mvm,
+ "Link %d is not allowed for esr\n",
+ link->link_id);
+ iwl_mvm_print_esr_state(mvm, ret);
+ }
+ return ret;
+}
+
+VISIBLE_IF_IWLWIFI_KUNIT
+bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *a,
+ const struct iwl_mvm_link_sel_data *b)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ enum iwl_mvm_esr_state ret = 0;
+
+ /* Per-link considerations */
+ if (iwl_mvm_esr_disallowed_with_link(mvm, vif, a, true) ||
+ iwl_mvm_esr_disallowed_with_link(mvm, vif, b, false))
+ return false;
+
+ if (a->chandef->width != b->chandef->width ||
+ !(a->chandef->chan->band == NL80211_BAND_6GHZ &&
+ b->chandef->chan->band == NL80211_BAND_5GHZ))
+ ret |= IWL_MVM_ESR_EXIT_BANDWIDTH;
+
+ if (ret) {
+ IWL_DEBUG_INFO(mvm,
+ "Links %d and %d are not a valid pair for EMLSR\n",
+ a->link_id, b->link_id);
+ iwl_mvm_print_esr_state(mvm, ret);
+ return false;
+ }
+
+ return true;
+
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_mld_valid_link_pair);
+
+/*
+ * Returns the combined eSR grade of two given links.
+ * Returns 0 if eSR is not allowed with these 2 links.
+ */
+static
+unsigned int iwl_mvm_get_esr_grade(struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *a,
+ const struct iwl_mvm_link_sel_data *b,
+ u8 *primary_id)
+{
+ struct ieee80211_bss_conf *primary_conf;
+ struct wiphy *wiphy = ieee80211_vif_to_wdev(vif)->wiphy;
+ unsigned int primary_load;
+
+ lockdep_assert_wiphy(wiphy);
+
+ /* a is always primary, b is always secondary */
+ if (b->grade > a->grade)
+ swap(a, b);
+
+ *primary_id = a->link_id;
+
+ if (!iwl_mvm_mld_valid_link_pair(vif, a, b))
+ return 0;
+
+ primary_conf = wiphy_dereference(wiphy, vif->link_conf[*primary_id]);
+
+ if (WARN_ON_ONCE(!primary_conf))
+ return 0;
+
+ primary_load = iwl_mvm_get_chan_load(primary_conf);
+
+ return a->grade +
+ ((b->grade * primary_load) / SCALE_FACTOR);
+}
+
+void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
+ struct iwl_mvm_link_sel_data *best_link;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 max_active_links = iwl_mvm_max_active_links(mvm, vif);
+ u16 usable_links = ieee80211_vif_usable_links(vif);
+ u8 best, primary_link, best_in_pair, n_data;
+ u16 max_esr_grade = 0, new_active_links;
+
+ lockdep_assert_wiphy(mvm->hw->wiphy);
+
+ if (!mvmvif->authorized || !ieee80211_vif_is_mld(vif))
+ return;
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* The logic below is a simple version that doesn't suit more than 2
+ * links
+ */
+ WARN_ON_ONCE(max_active_links > 2);
+
+ n_data = iwl_mvm_set_link_selection_data(vif, data, usable_links,
+ &best);
+
+ if (WARN(!n_data, "Couldn't find a valid grade for any link!\n"))
+ return;
+
+ best_link = &data[best];
+ primary_link = best_link->link_id;
+ new_active_links = BIT(best_link->link_id);
+
+ /* eSR is not supported/blocked, or only one usable link */
+ if (max_active_links == 1 || !iwl_mvm_vif_has_esr_cap(mvm, vif) ||
+ mvmvif->esr_disable_reason || n_data == 1)
+ goto set_active;
+
+ for (u8 a = 0; a < n_data; a++)
+ for (u8 b = a + 1; b < n_data; b++) {
+ u16 esr_grade = iwl_mvm_get_esr_grade(vif, &data[a],
+ &data[b],
+ &best_in_pair);
+
+ if (esr_grade <= max_esr_grade)
+ continue;
+
+ max_esr_grade = esr_grade;
+ primary_link = best_in_pair;
+ new_active_links = BIT(data[a].link_id) |
+ BIT(data[b].link_id);
+ }
+
+ /* No valid pair was found, go with the best link */
+ if (hweight16(new_active_links) <= 1)
+ goto set_active;
+
+ /* For equal grade - prefer EMLSR */
+ if (best_link->grade > max_esr_grade) {
+ primary_link = best_link->link_id;
+ new_active_links = BIT(best_link->link_id);
+ }
+set_active:
+ IWL_DEBUG_INFO(mvm, "Link selection result: 0x%x. Primary = %d\n",
+ new_active_links, primary_link);
+ ieee80211_set_active_links_async(vif, new_active_links);
+ mvmvif->link_selection_res = new_active_links;
+ mvmvif->link_selection_primary = primary_link;
+}
+
+u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ /* relevant data is written with both locks held, so read with either */
+ lockdep_assert(lockdep_is_held(&mvmvif->mvm->mutex) ||
+ lockdep_is_held(&mvmvif->mvm->hw->wiphy->mtx));
+
+ if (!ieee80211_vif_is_mld(vif))
+ return 0;
+
+ /* In AP mode, there is no primary link */
+ if (vif->type == NL80211_IFTYPE_AP)
+ return __ffs(vif->active_links);
+
+ if (mvmvif->esr_active &&
+ !WARN_ON(!(BIT(mvmvif->primary_link) & vif->active_links)))
+ return mvmvif->primary_link;
+
+ return __ffs(vif->active_links);
+}
+
+/*
+ * For non-MLO/single link, this will return the deflink/single active link,
+ * respectively
+ */
+u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id)
+{
+ switch (hweight16(vif->active_links)) {
+ case 0:
+ return 0;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case 1:
+ return __ffs(vif->active_links);
+ case 2:
+ return __ffs(vif->active_links & ~BIT(link_id));
+ }
+}
+
+/* Reasons that can cause esr prevention */
+#define IWL_MVM_ESR_PREVENT_REASONS IWL_MVM_ESR_EXIT_MISSED_BEACON
+#define IWL_MVM_PREVENT_ESR_TIMEOUT (HZ * 400)
+#define IWL_MVM_ESR_PREVENT_SHORT (HZ * 300)
+#define IWL_MVM_ESR_PREVENT_LONG (HZ * 600)
+
+static bool iwl_mvm_check_esr_prevention(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif,
+ enum iwl_mvm_esr_state reason)
+{
+ bool timeout_expired = time_after(jiffies,
+ mvmvif->last_esr_exit.ts +
+ IWL_MVM_PREVENT_ESR_TIMEOUT);
+ unsigned long delay;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ /* Only handle reasons that can cause prevention */
+ if (!(reason & IWL_MVM_ESR_PREVENT_REASONS))
+ return false;
+
+ /*
+ * Reset the counter if more than 400 seconds have passed between one
+ * exit and the other, or if we exited due to a different reason.
+ * Will also reset the counter after the long prevention is done.
+ */
+ if (timeout_expired || mvmvif->last_esr_exit.reason != reason) {
+ mvmvif->exit_same_reason_count = 1;
+ return false;
+ }
+
+ mvmvif->exit_same_reason_count++;
+ if (WARN_ON(mvmvif->exit_same_reason_count < 2 ||
+ mvmvif->exit_same_reason_count > 3))
+ return false;
+
+ mvmvif->esr_disable_reason |= IWL_MVM_ESR_BLOCKED_PREVENTION;
+
+ /*
+ * For the second exit, use a short prevention, and for the third one,
+ * use a long prevention.
+ */
+ delay = mvmvif->exit_same_reason_count == 2 ?
+ IWL_MVM_ESR_PREVENT_SHORT :
+ IWL_MVM_ESR_PREVENT_LONG;
+
+ IWL_DEBUG_INFO(mvm,
+ "Preventing EMLSR for %ld seconds due to %u exits with the reason = %s (0x%x)\n",
+ delay / HZ, mvmvif->exit_same_reason_count,
+ iwl_get_esr_state_string(reason), reason);
+
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->prevent_esr_done_wk, delay);
+ return true;
+}
+
+#define IWL_MVM_TRIGGER_LINK_SEL_TIME (IWL_MVM_TRIGGER_LINK_SEL_TIME_SEC * HZ)
+
+/* API to exit eSR mode */
+void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 new_active_links;
+ bool prevented;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* Nothing to do */
+ if (!mvmvif->esr_active)
+ return;
+
+ if (WARN_ON(!ieee80211_vif_is_mld(vif) || !mvmvif->authorized))
+ return;
+
+ if (WARN_ON(!(vif->active_links & BIT(link_to_keep))))
+ link_to_keep = __ffs(vif->active_links);
+
+ new_active_links = BIT(link_to_keep);
+ IWL_DEBUG_INFO(mvm,
+ "Exiting EMLSR. reason = %s (0x%x). Current active links=0x%x, new active links = 0x%x\n",
+ iwl_get_esr_state_string(reason), reason,
+ vif->active_links, new_active_links);
+
+ ieee80211_set_active_links_async(vif, new_active_links);
+
+ /* Prevent EMLSR if needed */
+ prevented = iwl_mvm_check_esr_prevention(mvm, mvmvif, reason);
+
+ /* Remember why and when we exited EMLSR */
+ mvmvif->last_esr_exit.ts = jiffies;
+ mvmvif->last_esr_exit.reason = reason;
+
+ /*
+ * If EMLSR is prevented now - don't try to get back to EMLSR.
+ * If we exited due to a blocking event, we will try to get back to
+ * EMLSR when the corresponding unblocking event will happen.
+ */
+ if (prevented || reason & IWL_MVM_BLOCK_ESR_REASONS)
+ return;
+
+ /* If EMLSR is not blocked - try enabling it again in 30 seconds */
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk,
+ round_jiffies_relative(IWL_MVM_TRIGGER_LINK_SEL_TIME));
+}
+
+void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* This should be called only with disable reasons */
+ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
+ return;
+
+ if (mvmvif->esr_disable_reason & reason)
+ return;
+
+ IWL_DEBUG_INFO(mvm,
+ "Blocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_get_esr_state_string(reason), reason);
+
+ mvmvif->esr_disable_reason |= reason;
+
+ iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
+
+ iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep);
+}
+
+int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason)
+{
+ int primary_link = iwl_mvm_get_primary_link(vif);
+ int ret;
+
+ if (!IWL_MVM_AUTO_EML_ENABLE || !ieee80211_vif_is_mld(vif))
+ return 0;
+
+ /* This should be called only with blocking reasons */
+ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
+ return 0;
+
+ /* leave ESR immediately, not only async with iwl_mvm_block_esr() */
+ ret = ieee80211_set_active_links(vif, BIT(primary_link));
+ if (ret)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+ /* only additionally block for consistency and to avoid concurrency */
+ iwl_mvm_block_esr(mvm, vif, reason, primary_link);
+ mutex_unlock(&mvm->mutex);
+
+ return 0;
+}
+
+static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ bool need_new_sel = time_after(jiffies, mvmvif->last_esr_exit.ts +
+ IWL_MVM_TRIGGER_LINK_SEL_TIME);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!ieee80211_vif_is_mld(vif) || !mvmvif->authorized ||
+ mvmvif->esr_active)
+ return;
+
+ IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n");
+
+ /* If we exited due to an EXIT reason, and the exit was in less than
+ * 30 seconds, then a MLO scan was scheduled already.
+ */
+ if (!need_new_sel &&
+ !(mvmvif->last_esr_exit.reason & IWL_MVM_BLOCK_ESR_REASONS)) {
+ IWL_DEBUG_INFO(mvm, "Wait for MLO scan\n");
+ return;
+ }
+
+ /*
+ * If EMLSR was blocked for more than 30 seconds, or the last link
+ * selection decided to not enter EMLSR, trigger a new scan.
+ */
+ if (need_new_sel || hweight16(mvmvif->link_selection_res) < 2) {
+ IWL_DEBUG_INFO(mvm, "Trigger MLO scan\n");
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk, 0);
+ /*
+ * If EMLSR was blocked for less than 30 seconds, and the last link
+ * selection decided to use EMLSR, activate EMLSR using the previous
+ * link selection result.
+ */
+ } else {
+ IWL_DEBUG_INFO(mvm,
+ "Use the latest link selection result: 0x%x\n",
+ mvmvif->link_selection_res);
+ ieee80211_set_active_links_async(vif,
+ mvmvif->link_selection_res);
+ }
+}
+
+void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!IWL_MVM_AUTO_EML_ENABLE)
+ return;
+
+ /* This should be called only with disable reasons */
+ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS)))
+ return;
+
+ /* No Change */
+ if (!(mvmvif->esr_disable_reason & reason))
+ return;
+
+ mvmvif->esr_disable_reason &= ~reason;
+
+ IWL_DEBUG_INFO(mvm,
+ "Unblocking EMLSR mode. reason = %s (0x%x)\n",
+ iwl_get_esr_state_string(reason), reason);
+ iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason);
+
+ if (!mvmvif->esr_disable_reason)
+ iwl_mvm_esr_unblocked(mvm, vif);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index 228ede7b8957..dfcc96f18b4f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -296,6 +296,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
INIT_LIST_HEAD(&mvmvif->time_event_data.list);
mvmvif->time_event_data.id = TE_MAX;
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA;
mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA;
@@ -873,7 +874,7 @@ void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
}
}
-static u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
+u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size)
{
struct ieee80211_mgmt *mgmt = (void *)beacon;
const u8 *ie;
@@ -1010,12 +1011,13 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
tx->tx_flags = cpu_to_le32(tx_flags);
if (!fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION))
+ IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) {
iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
- tx->rate_n_flags =
- cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
- RATE_MCS_ANT_POS);
+ tx->rate_n_flags =
+ cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) <<
+ RATE_MCS_ANT_POS);
+ }
rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif);
@@ -1163,6 +1165,13 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
WLAN_EID_EXT_CHANSWITCH_ANN,
beacon->len));
+ if (vif->type == NL80211_IFTYPE_AP &&
+ iwl_fw_lookup_cmd_ver(mvm->fw, BEACON_TEMPLATE_CMD, 0) >= 14)
+ beacon_cmd.btwt_offset =
+ cpu_to_le32(iwl_mvm_find_ie_offset(beacon->data,
+ WLAN_EID_S1G_TWT,
+ beacon->len));
+
return iwl_mvm_mac_ctxt_send_beacon_cmd(mvm, beacon, &beacon_cmd,
sizeof(beacon_cmd));
}
@@ -1591,23 +1600,23 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
u32 id = le32_to_cpu(mb->link_id);
union iwl_dbg_tlv_tp_data tp_data = { .fw_pkt = pkt };
u32 mac_type;
+ int link_id = -1;
u8 notif_ver = iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
MISSED_BEACONS_NOTIFICATION,
0);
- rcu_read_lock();
-
/* before version four the ID in the notification refers to mac ID */
if (notif_ver < 4) {
- vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, true);
+ vif = iwl_mvm_rcu_dereference_vif_id(mvm, id, false);
} else {
struct ieee80211_bss_conf *bss_conf =
- iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, true);
+ iwl_mvm_rcu_fw_link_id_to_link_conf(mvm, id, false);
if (!bss_conf)
- goto out;
+ return;
vif = bss_conf->vif;
+ link_id = bss_conf->link_id;
}
IWL_DEBUG_INFO(mvm,
@@ -1620,7 +1629,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
le32_to_cpu(mb->num_expected_beacons));
if (!vif)
- goto out;
+ return;
mac_type = iwl_mvm_get_mac_type(vif);
@@ -1647,6 +1656,10 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
"missed_beacons:%d, missed_beacons_since_rx:%d\n",
rx_missed_bcon, rx_missed_bcon_since_rx);
}
+ } else if (rx_missed_bcon >= IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH &&
+ link_id >= 0 && hweight16(vif->active_links) > 1) {
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_MISSED_BEACON,
+ iwl_mvm_get_other_link(vif, link_id));
} else if (rx_missed_bcon_since_rx > IWL_MVM_MISSED_BEACONS_THRESHOLD) {
if (!iwl_mvm_has_new_tx_api(mvm))
ieee80211_beacon_loss(vif);
@@ -1660,7 +1673,7 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
trigger = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif),
FW_DBG_TRIGGER_MISSED_BEACONS);
if (!trigger)
- goto out;
+ return;
bcon_trig = (void *)trigger->data;
stop_trig_missed_bcon = le32_to_cpu(bcon_trig->stop_consec_missed_bcon);
@@ -1672,9 +1685,6 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm,
if (rx_missed_bcon_since_rx >= stop_trig_missed_bcon_since_rx ||
rx_missed_bcon >= stop_trig_missed_bcon)
iwl_fw_dbg_collect_trig(&mvm->fwrt, trigger, NULL);
-
-out:
- rcu_read_unlock();
}
void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 8f4b063d6243..835a05b91833 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -22,7 +22,7 @@
#include "mvm.h"
#include "sta.h"
#include "time-event.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "iwl-phy-db.h"
#include "testmode.h"
#include "fw/error-dump.h"
@@ -30,21 +30,28 @@
#include "iwl-nvm-parse.h"
#include "time-sync.h"
+#define IWL_MVM_LIMITS(ap) \
+ { \
+ .max = 1, \
+ .types = BIT(NL80211_IFTYPE_STATION), \
+ }, \
+ { \
+ .max = 1, \
+ .types = ap | \
+ BIT(NL80211_IFTYPE_P2P_CLIENT) | \
+ BIT(NL80211_IFTYPE_P2P_GO), \
+ }, \
+ { \
+ .max = 1, \
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE), \
+ }
+
static const struct ieee80211_iface_limit iwl_mvm_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
- },
+ IWL_MVM_LIMITS(0)
+};
+
+static const struct ieee80211_iface_limit iwl_mvm_limits_ap[] = {
+ IWL_MVM_LIMITS(BIT(NL80211_IFTYPE_AP))
};
static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
@@ -54,6 +61,12 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = {
.limits = iwl_mvm_limits,
.n_limits = ARRAY_SIZE(iwl_mvm_limits),
},
+ {
+ .num_different_channels = 1,
+ .max_interfaces = 3,
+ .limits = iwl_mvm_limits_ap,
+ .n_limits = ARRAY_SIZE(iwl_mvm_limits_ap),
+ },
};
static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
@@ -138,8 +151,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy,
resp->channels,
__le16_to_cpu(resp->mcc),
__le16_to_cpu(resp->geo_info),
- le32_to_cpu(resp->cap), resp_ver,
- mvm->fwrt.uats_enabled);
+ le32_to_cpu(resp->cap), resp_ver);
/* Store the return source id */
src_id = resp->source_id;
if (IS_ERR_OR_NULL(regd)) {
@@ -359,8 +371,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
/* Set this early since we need to have it for the check below */
if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable &&
!iwlwifi_mod_params.disable_11ax &&
- !iwlwifi_mod_params.disable_11be)
- hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+ !iwlwifi_mod_params.disable_11be) {
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ /* we handle this already earlier, but need it for MLO */
+ ieee80211_hw_set(hw, HANDLES_QUIET_CSA);
+ }
/* With MLD FW API, it tracks timing by itself,
* no need for any timing from the host
@@ -368,12 +383,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (!mvm->mld_api_is_used)
ieee80211_hw_set(hw, TIMING_BEACON_ONLY);
- /* We should probably have this, but mac80211
- * currently doesn't support it for MLO.
- */
- if (!(hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))
- ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP);
-
/*
* On older devices, enabling TX A-MSDU occasionally leads to
* something getting messed up, the command read from the FIFO
@@ -576,13 +585,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK);
- BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
- IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
+ BUILD_BUG_ON(IWL_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) ||
+ IWL_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK));
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
- mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS;
+ mvm->max_scans = IWL_MAX_UMAC_SCANS;
else
- mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS;
+ mvm->max_scans = IWL_MAX_LMAC_SCANS;
if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels)
hw->wiphy->bands[NL80211_BAND_2GHZ] =
@@ -651,7 +660,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES;
if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL,
- IWL_FW_CMD_VER_UNKNOWN) == 3)
+ IWL_FW_CMD_VER_UNKNOWN) >= 3)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
if (fw_has_api(&mvm->fw->ucode_capa,
@@ -720,10 +729,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
+ ieee80211_hw_set(hw, DISALLOW_PUNCTURING_5GHZ);
+
#ifdef CONFIG_PM_SLEEP
if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) &&
- mvm->trans->ops->d3_suspend &&
- mvm->trans->ops->d3_resume &&
device_can_wakeup(mvm->trans->dev)) {
mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_DISCONNECT |
@@ -818,7 +827,7 @@ void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
}
if (offchannel &&
- !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
+ !test_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status) &&
!test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
goto drop;
@@ -903,6 +912,8 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
&mvmtxq->state) &&
!test_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT,
&mvmtxq->state) &&
+ !test_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA,
+ &mvmtxq->state) &&
!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) {
skb = ieee80211_tx_dequeue(hw, txq);
@@ -1097,15 +1108,23 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data);
spin_unlock_bh(&mvm->time_event_lock);
- memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data));
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+
+ mvmvif->bf_enabled = false;
+ mvmvif->ba_enabled = false;
mvmvif->ap_sta = NULL;
+ mvmvif->esr_active = false;
+ vif->driver_flags &= ~IEEE80211_VIF_EML_ACTIVE;
+
for_each_mvm_vif_valid_link(mvmvif, link_id) {
mvmvif->link[link_id]->ap_sta_id = IWL_MVM_INVALID_STA;
mvmvif->link[link_id]->fw_link_id = IWL_MVM_FW_LINK_ID_INVALID;
mvmvif->link[link_id]->phy_ctxt = NULL;
mvmvif->link[link_id]->active = 0;
mvmvif->link[link_id]->igtk = NULL;
+ memset(&mvmvif->link[link_id]->bf_data, 0,
+ sizeof(mvmvif->link[link_id]->bf_data));
}
probe_data = rcu_dereference_protected(mvmvif->deflink.probe_resp_data,
@@ -1115,6 +1134,39 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
RCU_INIT_POINTER(mvmvif->deflink.probe_resp_data, NULL);
}
+static void iwl_mvm_cleanup_sta_iterator(void *data, struct ieee80211_sta *sta)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_sta *mvm_sta;
+ struct ieee80211_vif *vif;
+ int link_id;
+
+ mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvm_sta->vif;
+
+ if (!sta->valid_links)
+ return;
+
+ for (link_id = 0; link_id < ARRAY_SIZE((sta)->link); link_id++) {
+ struct iwl_mvm_link_sta *mvm_link_sta;
+
+ mvm_link_sta =
+ rcu_dereference_check(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ if (mvm_link_sta && !(vif->active_links & BIT(link_id))) {
+ /*
+ * We have a link STA but the link is inactive in
+ * mac80211. This will happen if we failed to
+ * deactivate the link but mac80211 roll back the
+ * deactivation of the link.
+ * Delete the stale data to avoid issues later on.
+ */
+ iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
+ link_id, false);
+ }
+ }
+}
+
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
iwl_mvm_stop_device(mvm);
@@ -1137,6 +1189,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
*/
ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm);
+ /* cleanup stations as links may be gone after restart */
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_cleanup_sta_iterator, mvm);
+
mvm->p2p_device_vif = NULL;
iwl_mvm_reset_phy_ctxts(mvm);
@@ -1159,6 +1215,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
{
+ bool fast_resume = false;
int ret;
lockdep_assert_held(&mvm->mutex);
@@ -1184,6 +1241,30 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
mvm->nvm_data = NULL;
}
+#ifdef CONFIG_PM
+ /* fast_resume will be cleared by iwl_mvm_fast_resume */
+ fast_resume = mvm->fast_resume;
+
+ if (fast_resume) {
+ ret = iwl_mvm_fast_resume(mvm);
+ if (ret) {
+ iwl_mvm_stop_device(mvm);
+ /* iwl_mvm_up() will be called further down */
+ } else {
+ /*
+ * We clear IWL_MVM_STATUS_FIRMWARE_RUNNING upon
+ * mac_down() so that debugfs will stop honoring
+ * requests after we flush all the workers.
+ * Set the IWL_MVM_STATUS_FIRMWARE_RUNNING bit again
+ * now that we are back. This is a bit abusing the
+ * flag since the firmware wasn't really ever stopped,
+ * but this still serves the purpose.
+ */
+ set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status);
+ }
+ }
+#endif /* CONFIG_PM */
+
if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) {
/*
* Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART
@@ -1194,7 +1275,10 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm)
/* Clean up some internal and mac80211 state on restart */
iwl_mvm_restart_cleanup(mvm);
}
- ret = iwl_mvm_up(mvm);
+
+ /* we also want to load the firmware if fast_resume failed */
+ if (!fast_resume || ret)
+ ret = iwl_mvm_up(mvm);
iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT,
NULL);
@@ -1245,7 +1329,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
{
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
@@ -1261,8 +1345,6 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
* of packets the FW sent out, so we must reconnect.
*/
iwl_mvm_teardown_tdls_peers(mvm);
-
- mutex_unlock(&mvm->mutex);
}
void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
@@ -1279,7 +1361,7 @@ void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
}
}
-void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
+void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend)
{
lockdep_assert_held(&mvm->mutex);
@@ -1295,7 +1377,11 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
if (!iwl_mvm_has_new_station_api(mvm->fw))
iwl_mvm_rm_aux_sta(mvm);
- iwl_mvm_stop_device(mvm);
+ if (suspend &&
+ mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ iwl_mvm_fast_suspend(mvm);
+ else
+ iwl_mvm_stop_device(mvm);
iwl_mvm_async_handlers_purge(mvm);
/* async_handlers_list is empty and will stay empty: HW is stopped */
@@ -1328,10 +1414,17 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
}
}
-void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
+void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ /* Stop internal MLO scan, if running */
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_INT_MLO, false);
+ mutex_unlock(&mvm->mutex);
+
+ wiphy_work_cancel(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
+ wiphy_work_flush(mvm->hw->wiphy, &mvm->async_handlers_wiphy_wk);
flush_work(&mvm->async_handlers_wk);
flush_work(&mvm->add_stream_wk);
@@ -1357,7 +1450,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
iwl_mvm_mei_set_sw_rfkill_state(mvm);
mutex_lock(&mvm->mutex);
- __iwl_mvm_mac_stop(mvm);
+ __iwl_mvm_mac_stop(mvm, suspend);
mutex_unlock(&mvm->mutex);
/*
@@ -1399,7 +1492,9 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
- if (cmd_ver == 7)
+ if (cmd_ver == 8)
+ len = sizeof(cmd.v8);
+ else if (cmd_ver == 7)
len = sizeof(cmd.v7);
else if (cmd_ver == 6)
len = sizeof(cmd.v6);
@@ -1418,6 +1513,20 @@ int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd);
}
+static void iwl_mvm_post_csa_tx(void *data, struct ieee80211_sta *sta)
+{
+ struct ieee80211_hw *hw = data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+ iwl_mvm_mac_itxq_xmit(hw, sta->txq[i]);
+ }
+}
+
int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf)
@@ -1434,6 +1543,7 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
u8 ap_sta_id = mvmvif->link[link_id]->ap_sta_id;
mvmvif->csa_bcn_pending = false;
+ mvmvif->csa_blocks_tx = false;
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, ap_sta_id);
if (WARN_ON(!mvmsta)) {
@@ -1455,6 +1565,18 @@ int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
iwl_mvm_stop_session_protection(mvm, vif);
}
+ } else if (vif->type == NL80211_IFTYPE_AP && mvmvif->csa_blocks_tx) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(vif->txq);
+
+ clear_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+
+ local_bh_disable();
+ iwl_mvm_mac_itxq_xmit(hw, vif->txq);
+ ieee80211_iterate_stations_atomic(hw, iwl_mvm_post_csa_tx, hw);
+ local_bh_enable();
+
+ mvmvif->csa_blocks_tx = false;
}
mvmvif->ps_disabled = false;
@@ -1564,6 +1686,62 @@ static int iwl_mvm_alloc_bcast_mcast_sta(struct iwl_mvm *mvm,
IWL_STA_MULTICAST);
}
+static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif =
+ container_of(wk, struct iwl_mvm_vif, prevent_esr_done_wk.work);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+
+ guard(mvm)(mvm);
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION);
+}
+
+static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif = container_of(wk, struct iwl_mvm_vif,
+ mlo_int_scan_wk.work);
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+
+ guard(mvm)(mvmvif->mvm);
+ iwl_mvm_int_mlo_scan(mvmvif->mvm, vif);
+}
+
+static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk)
+{
+ struct iwl_mvm_vif *mvmvif =
+ container_of(wk, struct iwl_mvm_vif, unblock_esr_tpt_wk);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct ieee80211_vif *vif =
+ container_of((void *)mvmvif, struct ieee80211_vif, drv_priv);
+
+ guard(mvm)(mvm);
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT);
+}
+
+void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return;
+
+ INIT_DELAYED_WORK(&mvmvif->csa_work,
+ iwl_mvm_channel_switch_disconnect_wk);
+
+ wiphy_delayed_work_init(&mvmvif->prevent_esr_done_wk,
+ iwl_mvm_prevent_esr_done_wk);
+
+ wiphy_delayed_work_init(&mvmvif->mlo_int_scan_wk,
+ iwl_mvm_mlo_int_scan_wk);
+
+ wiphy_work_init(&mvmvif->unblock_esr_tpt_wk,
+ iwl_mvm_unblock_esr_tpt);
+}
+
static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -1574,6 +1752,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
+ iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
+
mvmvif->mvm = mvm;
/* the first link always points to the default one */
@@ -1651,15 +1831,10 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
- if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
- vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
-
if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
mvm->p2p_device_vif = vif;
iwl_mvm_tcm_add_vif(mvm, vif);
- INIT_DELAYED_WORK(&mvmvif->csa_work,
- iwl_mvm_channel_switch_disconnect_wk);
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
@@ -1697,6 +1872,8 @@ out:
void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
/*
* Flush the ROC worker which will flush the OFFCHANNEL queue.
@@ -1705,14 +1882,20 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm,
*/
flush_work(&mvm->roc_done_wk);
}
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->prevent_esr_done_wk);
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk);
+
+ wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
+
+ cancel_delayed_work_sync(&mvmvif->csa_work);
}
-/* This function is doing the common part of removing the interface for
- * both - MLD and non-MLD modes. Returns true if removing the interface
- * is done
- */
-static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -1760,21 +1943,10 @@ static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw,
mvm->noa_duration = 0;
}
#endif
- return true;
+ goto out;
}
iwl_mvm_power_update_mac(mvm);
- return false;
-}
-
-static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- if (iwl_mvm_mac_remove_interface_common(hw, vif))
- goto out;
/* Before the interface removal, mac80211 would cancel the ROC, and the
* ROC worker would be scheduled if needed. The worker would be flushed
@@ -1922,7 +2094,7 @@ void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* replace previous configuration */
kfree(mvm->mcast_filter_cmd);
@@ -1939,7 +2111,6 @@ void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
iwl_mvm_recalc_multicast(mvm);
out:
- mutex_unlock(&mvm->mutex);
*total_flags = 0;
}
@@ -1959,9 +2130,8 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw,
!vif->p2p)
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
- mutex_unlock(&mvm->mutex);
}
int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
@@ -2545,6 +2715,7 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
+ int link_id;
/* The firmware tracks the MU-MIMO group on its own.
* However, on HW restart we should restore this data.
@@ -2560,7 +2731,8 @@ void iwl_mvm_bss_info_changed_station_assoc(struct iwl_mvm *mvm,
iwl_mvm_recalc_multicast(mvm);
/* reset rssi values */
- mvmvif->bf_data.ave_beacon_signal = 0;
+ for_each_mvm_vif_valid_link(mvmvif, link_id)
+ mvmvif->link[link_id]->bf_data.ave_beacon_signal = 0;
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_update_smps_on_active_links(mvm, vif, IWL_MVM_SMPS_REQ_TT,
@@ -2601,10 +2773,14 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
}
if (changes & BSS_CHANGED_CQM) {
- IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n");
- /* reset cqm events tracking */
- mvmvif->bf_data.last_cqm_event = 0;
- if (mvmvif->bf_data.bf_enabled) {
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[link_conf->link_id];
+
+ IWL_DEBUG_MAC80211(mvm, "CQM info_changed\n");
+ if (link_info)
+ link_info->bf_data.last_cqm_event = 0;
+
+ if (mvmvif->bf_enabled) {
/* FIXME: need to update per link when FW API will
* support it
*/
@@ -2617,6 +2793,13 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_BANDWIDTH)
iwl_mvm_update_link_smps(vif, link_conf);
+
+ if (changes & BSS_CHANGED_TPE) {
+ IWL_DEBUG_CALIB(mvm, "Changing TPE\n");
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
+ }
}
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
@@ -2666,6 +2849,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc) {
+ mvmvif->session_prot_connection_loss = false;
+
/* clear statistics to get clean beacon counter */
iwl_mvm_request_statistics(mvm, true);
for_each_mvm_vif_valid_link(mvmvif, i)
@@ -3000,7 +3185,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_stop_ap_ibss_common(mvm, vif);
@@ -3030,8 +3215,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_power_update_mac(mvm);
iwl_mvm_mac_ctxt_remove(mvm, vif);
-
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_stop_ap(struct ieee80211_hw *hw,
@@ -3086,7 +3269,7 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle)
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
@@ -3113,25 +3296,19 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
}
-
- mutex_unlock(&mvm->mutex);
}
int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
if (hw_req->req.n_channels == 0 ||
hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels)
return -EINVAL;
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies);
}
void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
@@ -3139,7 +3316,7 @@ void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* Due to a race condition, it's possible that mac80211 asks
* us to stop a hw_scan when it's already stopped. This can
@@ -3150,8 +3327,6 @@ void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
*/
if (mvm->scan_status & IWL_MVM_SCAN_REGULAR)
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true);
-
- mutex_unlock(&mvm->mutex);
}
void
@@ -3320,7 +3495,7 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
* Since there's mvm->mutex here, no need to have RCU lock for
* mvm_sta->link access.
*/
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
for (link_id = 0; link_id < ARRAY_SIZE(mvm_sta->link); link_id++) {
struct iwl_mvm_link_sta *link_sta;
u32 sta_id;
@@ -3337,7 +3512,6 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw,
RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL);
}
}
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -3613,8 +3787,6 @@ static void iwl_mvm_rs_rate_init_all_links(struct iwl_mvm *mvm,
}
}
-#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16
-
static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -3816,15 +3988,29 @@ iwl_mvm_sta_state_assoc_to_authorized(struct iwl_mvm *mvm,
mvmvif->authorized = 1;
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ mvmvif->link_selection_res = vif->active_links;
+ mvmvif->link_selection_primary =
+ vif->active_links ? __ffs(vif->active_links) : 0;
+ }
+
callbacks->mac_ctxt_changed(mvm, vif, false);
iwl_mvm_mei_host_associated(mvm, vif, mvm_sta);
+ memset(&mvmvif->last_esr_exit, 0,
+ sizeof(mvmvif->last_esr_exit));
+
+ iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT, 0);
+
+ /* Block until FW notif will arrive */
+ iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW, 0);
+
/* when client is authorized (AP station marked as such),
- * try to enable more links
+ * try to enable the best link(s).
*/
if (vif->type == NL80211_IFTYPE_STATION &&
!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- iwl_mvm_mld_select_links(mvm, vif, false);
+ iwl_mvm_select_links(mvm, vif);
}
mvm_sta->authorized = true;
@@ -3868,9 +4054,22 @@ iwl_mvm_sta_state_authorized_to_assoc(struct iwl_mvm *mvm,
* time.
*/
mvmvif->authorized = 0;
+ mvmvif->link_selection_res = 0;
/* disable beacon filtering */
iwl_mvm_disable_beacon_filter(mvm, vif);
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->prevent_esr_done_wk);
+
+ wiphy_delayed_work_cancel(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk);
+
+ wiphy_work_cancel(mvm->hw->wiphy, &mvmvif->unblock_esr_tpt_wk);
+
+ /* No need for the periodic statistics anymore */
+ if (ieee80211_vif_is_mld(vif) && mvmvif->esr_active)
+ iwl_mvm_request_periodic_system_statistics(mvm, false);
}
return 0;
@@ -4057,12 +4256,8 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw,
* The exception is P2P_DEVICE interface which needs immediate update.
*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
- mutex_unlock(&mvm->mutex);
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
}
return 0;
}
@@ -4071,11 +4266,14 @@ void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_prep_tx_info *info)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ if (info->was_assoc && !mvmvif->session_prot_connection_loss)
+ return;
+
+ guard(mvm)(mvm);
iwl_mvm_protect_assoc(mvm, vif, info->duration, info->link_id);
- mutex_unlock(&mvm->mutex);
}
void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw,
@@ -4088,9 +4286,8 @@ void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw,
if (info->success)
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_stop_session_protection(mvm, vif);
- mutex_unlock(&mvm->mutex);
}
int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
@@ -4100,20 +4297,12 @@ int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
-
- if (!vif->cfg.idle) {
- ret = -EBUSY;
- goto out;
- }
+ guard(mvm)(mvm);
- ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
+ if (!vif->cfg.idle)
+ return -EBUSY;
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED);
}
int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
@@ -4391,13 +4580,9 @@ int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key);
}
void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw,
@@ -4608,7 +4793,7 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm,
if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) {
ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, vif, duration);
- } else if (fw_ver == 3) {
+ } else if (fw_ver >= 3) {
ret = iwl_mvm_roc_add_cmd(mvm, channel, vif, duration,
ROC_ACTIVITY_HOTSPOT);
} else {
@@ -4619,6 +4804,37 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm,
return ret;
}
+static int iwl_mvm_roc_p2p(struct iwl_mvm *mvm,
+ struct ieee80211_channel *channel,
+ struct ieee80211_vif *vif,
+ int duration,
+ enum ieee80211_roc_type type)
+{
+ enum iwl_roc_activity activity;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ switch (type) {
+ case IEEE80211_ROC_TYPE_NORMAL:
+ activity = ROC_ACTIVITY_P2P_DISC;
+ break;
+ case IEEE80211_ROC_TYPE_MGMT_TX:
+ activity = ROC_ACTIVITY_P2P_NEG;
+ break;
+ default:
+ WARN_ONCE(1, "Got an invalid P2P ROC type\n");
+ return -EINVAL;
+ }
+
+ ret = iwl_mvm_mld_add_aux_sta(mvm,
+ iwl_mvm_get_lmac_id(mvm, channel->band));
+ if (ret)
+ return ret;
+
+ return iwl_mvm_roc_add_cmd(mvm, channel, vif, duration, activity);
+}
+
static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_channel *channel)
@@ -4672,6 +4888,7 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct iwl_mvm_roc_ops *ops)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
u32 lmac_id;
int ret;
@@ -4684,7 +4901,14 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
*/
flush_work(&mvm->roc_done_wk);
- mutex_lock(&mvm->mutex);
+ if (!IS_ERR_OR_NULL(bss_vif)) {
+ ret = iwl_mvm_block_esr_sync(mvm, bss_vif,
+ IWL_MVM_ESR_BLOCKED_ROC);
+ if (ret)
+ return ret;
+ }
+
+ guard(mvm)(mvm);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -4694,30 +4918,29 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ret = ops->add_aux_sta_for_hs20(mvm, lmac_id);
if (!ret)
ret = iwl_mvm_roc_station(mvm, channel, vif, duration);
- goto out_unlock;
+ return ret;
case NL80211_IFTYPE_P2P_DEVICE:
/* handle below */
break;
default:
IWL_ERR(mvm, "ROC: Invalid vif type=%u\n", vif->type);
- ret = -EINVAL;
- goto out_unlock;
+ return -EINVAL;
}
+ if (iwl_mvm_has_p2p_over_aux(mvm)) {
+ ret = iwl_mvm_roc_p2p(mvm, channel, vif, duration, type);
+ return ret;
+ }
ret = iwl_mvm_p2p_find_phy_ctxt(mvm, vif, channel);
if (ret)
- goto out_unlock;
+ return ret;
ret = ops->link(mvm, vif);
if (ret)
- goto out_unlock;
+ return ret;
- ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
-out_unlock:
- mutex_unlock(&mvm->mutex);
- IWL_DEBUG_MAC80211(mvm, "leave\n");
- return ret;
+ return iwl_mvm_start_p2p_roc(mvm, vif, duration, type);
}
int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
@@ -4727,9 +4950,7 @@ int iwl_mvm_cancel_roc(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(mvm, "enter\n");
- mutex_lock(&mvm->mutex);
iwl_mvm_stop_roc(mvm, vif);
- mutex_unlock(&mvm->mutex);
IWL_DEBUG_MAC80211(mvm, "leave\n");
return 0;
@@ -4800,13 +5021,9 @@ int iwl_mvm_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_add_chanctx(mvm, ctx);
- mutex_unlock(&mvm->mutex);
- return ret;
+ guard(mvm)(mvm);
+ return __iwl_mvm_add_chanctx(mvm, ctx);
}
static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm,
@@ -4825,9 +5042,8 @@ void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
__iwl_mvm_remove_chanctx(mvm, ctx);
- mutex_unlock(&mvm->mutex);
}
void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
@@ -4847,26 +5063,23 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
phy_ctxt->ref, changed))
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* we are only changing the min_width, may be a noop */
if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) {
if (phy_ctxt->width == def->width)
- goto out_unlock;
+ return;
/* we are just toggling between 20_NOHT and 20 */
if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 &&
def->width <= NL80211_CHAN_WIDTH_20)
- goto out_unlock;
+ return;
}
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, &ctx->ap,
ctx->rx_chains_static,
ctx->rx_chains_dynamic);
-
-out_unlock:
- mutex_unlock(&mvm->mutex);
}
/*
@@ -4986,6 +5199,10 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm,
}
iwl_mvm_update_quotas(mvm, false, NULL);
+
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
}
goto out;
@@ -5005,13 +5222,9 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
- mutex_unlock(&mvm->mutex);
- return ret;
+ guard(mvm)(mvm);
+ return __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
}
/*
@@ -5099,9 +5312,8 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
__iwl_mvm_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false);
- mutex_unlock(&mvm->mutex);
}
static int
@@ -5111,7 +5323,7 @@ iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
{
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
vifs[0].old_ctx, true);
__iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx);
@@ -5134,7 +5346,7 @@ iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm,
if (iwl_mvm_phy_ctx_count(mvm) > 1)
iwl_mvm_teardown_tdls_peers(mvm);
- goto out;
+ return 0;
out_remove:
__iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx);
@@ -5151,15 +5363,11 @@ out_reassign:
goto out_restart;
}
- goto out;
+ return ret;
out_restart:
/* things keep failing, better restart the hw */
iwl_mvm_nic_restart(mvm, false);
-
-out:
- mutex_unlock(&mvm->mutex);
-
return ret;
}
@@ -5170,7 +5378,7 @@ iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
{
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
vifs[0].old_ctx, true);
@@ -5182,7 +5390,7 @@ iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm,
goto out_reassign;
}
- goto out;
+ return 0;
out_reassign:
if (ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf,
@@ -5191,15 +5399,11 @@ out_reassign:
goto out_restart;
}
- goto out;
+ return ret;
out_restart:
/* things keep failing, better restart the hw */
iwl_mvm_nic_restart(mvm, false);
-
-out:
- mutex_unlock(&mvm->mutex);
-
return ret;
}
@@ -5326,13 +5530,9 @@ int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw,
void *data, int len)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int err;
-
- mutex_lock(&mvm->mutex);
- err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
- mutex_unlock(&mvm->mutex);
- return err;
+ guard(mvm)(mvm);
+ return __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len);
}
#endif
@@ -5398,7 +5598,7 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm,
if (chsw->block_tx)
iwl_mvm_csa_client_absent(mvm, vif);
- if (mvmvif->bf_data.bf_enabled) {
+ if (mvmvif->bf_enabled) {
int ret = iwl_mvm_disable_beacon_filter(mvm, vif);
if (ret)
@@ -5411,19 +5611,32 @@ static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm,
return 0;
}
+static void iwl_mvm_csa_block_txqs(void *data, struct ieee80211_sta *sta)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
+ struct iwl_mvm_txq *mvmtxq =
+ iwl_mvm_txq_from_mac80211(sta->txq[i]);
+
+ set_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+ }
+}
+
#define IWL_MAX_CSA_BLOCK_TX 1500
-int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
+int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw)
{
- struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct ieee80211_vif *csa_vif;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_txq *mvmtxq;
int ret;
- mutex_lock(&mvm->mutex);
+ lockdep_assert_held(&mvm->mutex);
mvmvif->csa_failed = false;
+ mvmvif->csa_blocks_tx = false;
IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n",
chsw->chandef.center_freq1);
@@ -5438,30 +5651,38 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
rcu_dereference_protected(mvm->csa_vif,
lockdep_is_held(&mvm->mutex));
if (WARN_ONCE(csa_vif && csa_vif->bss_conf.csa_active,
- "Another CSA is already in progress")) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ "Another CSA is already in progress"))
+ return -EBUSY;
/* we still didn't unblock tx. prevent new CS meanwhile */
if (rcu_dereference_protected(mvm->csa_tx_blocked_vif,
- lockdep_is_held(&mvm->mutex))) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ lockdep_is_held(&mvm->mutex)))
+ return -EBUSY;
rcu_assign_pointer(mvm->csa_vif, vif);
if (WARN_ONCE(mvmvif->csa_countdown,
- "Previous CSA countdown didn't complete")) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ "Previous CSA countdown didn't complete"))
+ return -EBUSY;
mvmvif->csa_target_freq = chsw->chandef.chan->center_freq;
+ if (!chsw->block_tx)
+ break;
+ /* don't need blocking in driver otherwise - mac80211 will do */
+ if (!ieee80211_hw_check(mvm->hw, HANDLES_QUIET_CSA))
+ break;
+
+ mvmvif->csa_blocks_tx = true;
+ mvmtxq = iwl_mvm_txq_from_mac80211(vif->txq);
+ set_bit(IWL_MVM_TXQ_STATE_STOP_AP_CSA, &mvmtxq->state);
+ ieee80211_iterate_stations_atomic(mvm->hw,
+ iwl_mvm_csa_block_txqs,
+ NULL);
break;
case NL80211_IFTYPE_STATION:
+ mvmvif->csa_blocks_tx = chsw->block_tx;
+
/*
* In the new flow FW is in charge of timing the switch so there
* is no need for all of this
@@ -5476,10 +5697,8 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
* we don't know the dtim period. In this case, the firmware can't
* track the beacons.
*/
- if (!vif->cfg.assoc || !vif->bss_conf.dtim_period) {
- ret = -EBUSY;
- goto out_unlock;
- }
+ if (!vif->cfg.assoc || !vif->bss_conf.dtim_period)
+ return -EBUSY;
if (chsw->delay > IWL_MAX_CSA_BLOCK_TX &&
hweight16(vif->valid_links) <= 1)
@@ -5501,7 +5720,7 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) {
ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw);
if (ret)
- goto out_unlock;
+ return ret;
} else {
iwl_mvm_schedule_client_csa(mvm, vif, chsw);
}
@@ -5517,17 +5736,24 @@ int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
ret = iwl_mvm_power_update_ps(mvm);
if (ret)
- goto out_unlock;
+ return ret;
/* we won't be on this channel any longer */
iwl_mvm_teardown_tdls_peers(mvm);
-out_unlock:
- mutex_unlock(&mvm->mutex);
-
return ret;
}
+static int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ guard(mvm)(mvm);
+ return iwl_mvm_pre_channel_switch(mvm, vif, chsw);
+}
+
void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw)
@@ -5577,16 +5803,14 @@ void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
}
mvmvif->csa_count = chsw->count;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (mvmvif->csa_failed)
- goto out_unlock;
+ return;
WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP,
CHANNEL_SWITCH_TIME_EVENT_CMD),
0, sizeof(cmd), &cmd));
-out_unlock:
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
@@ -5595,17 +5819,16 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
if (!iwl_mvm_has_new_tx_api(mvm)) {
if (drop) {
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_flush_tx_path(mvm,
iwl_mvm_flushable_queues(mvm) & queues);
- mutex_unlock(&mvm->mutex);
} else {
iwl_trans_wait_tx_queues_empty(mvm->trans, queues);
}
return;
}
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
struct ieee80211_sta *sta;
@@ -5620,14 +5843,13 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop)
iwl_mvm_wait_sta_queues_empty(mvm,
iwl_mvm_sta_from_mac80211(sta));
}
- mutex_unlock(&mvm->mutex);
}
void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- struct iwl_mvm_vif *mvmvif;
struct iwl_mvm_sta *mvmsta;
struct ieee80211_sta *sta;
bool ap_sta_done = false;
@@ -5639,11 +5861,22 @@ void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return;
}
+ if (!drop && hweight16(vif->active_links) <= 1) {
+ int link_id = vif->active_links ? __ffs(vif->active_links) : 0;
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = wiphy_dereference(hw->wiphy,
+ vif->link_conf[link_id]);
+ if (WARN_ON(!link_conf))
+ return;
+ if (link_conf->csa_active && mvmvif->csa_blocks_tx)
+ drop = true;
+ }
+
/* Make sure we're done with the deferred traffic before flushing */
flush_work(&mvm->add_stream_wk);
mutex_lock(&mvm->mutex);
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
/* flush the AP-station and all TDLS peers */
for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
@@ -5692,7 +5925,7 @@ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_link_sta *link_sta;
int link_id;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
for_each_sta_active_link(vif, sta, link_sta, link_id) {
mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_id],
lockdep_is_held(&mvm->mutex));
@@ -5703,14 +5936,71 @@ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mvmsta->tfd_queue_msk))
IWL_ERR(mvm, "flush request fail\n");
}
+}
+
+static int iwl_mvm_mac_get_acs_survey(struct iwl_mvm *mvm, int idx,
+ struct survey_info *survey)
+{
+ int chan_idx;
+ enum nl80211_band band;
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+
+ if (!mvm->acs_survey) {
+ ret = -ENOENT;
+ goto out;
+ }
+
+ /* Find and return the next entry that has a non-zero active time */
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband =
+ mvm->hw->wiphy->bands[band];
+
+ if (!sband)
+ continue;
+
+ for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
+ struct iwl_mvm_acs_survey_channel *info =
+ &mvm->acs_survey->bands[band][chan_idx];
+
+ if (!info->time)
+ continue;
+
+ /* Found (the next) channel to report */
+ survey->channel = &sband->channels[chan_idx];
+ survey->filled = SURVEY_INFO_TIME |
+ SURVEY_INFO_TIME_BUSY |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_TX;
+ survey->time = info->time;
+ survey->time_busy = info->time_busy;
+ survey->time_rx = info->time_rx;
+ survey->time_tx = info->time_tx;
+ survey->noise = info->noise;
+ if (survey->noise < 0)
+ survey->filled |= SURVEY_INFO_NOISE_DBM;
+
+ /* Clear time so that channel is only reported once */
+ info->time = 0;
+
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ret = -ENOENT;
+
+out:
mutex_unlock(&mvm->mutex);
+
+ return ret;
}
int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret = 0;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
WIDE_ID(SYSTEM_GROUP,
SYSTEM_STATISTICS_CMD),
@@ -5718,20 +6008,25 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
memset(survey, 0, sizeof(*survey));
- /* only support global statistics right now */
- if (idx != 0)
- return -ENOENT;
-
if (!fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS))
return -ENOENT;
- mutex_lock(&mvm->mutex);
+ /*
+ * Return the beacon stats at index zero and pass on following indices
+ * to the function returning the full survey, most likely for ACS
+ * (Automatic Channel Selection).
+ */
+ if (idx > 0)
+ return iwl_mvm_mac_get_acs_survey(mvm, idx - 1, survey);
+
+ guard(mvm)(mvm);
if (iwl_mvm_firmware_running(mvm)) {
- ret = iwl_mvm_request_statistics(mvm, false);
+ int ret = iwl_mvm_request_statistics(mvm, false);
+
if (ret)
- goto out;
+ return ret;
}
survey->filled = SURVEY_INFO_TIME_RX |
@@ -5747,7 +6042,7 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
/* the new fw api doesn't support the following fields */
if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
- goto out;
+ return 0;
survey->filled |= SURVEY_INFO_TIME |
SURVEY_INFO_TIME_SCAN;
@@ -5759,9 +6054,7 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
mvm->radio_stats.on_time_scan;
do_div(survey->time_scan, USEC_PER_MSEC);
- out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return 0;
}
static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
@@ -5928,13 +6221,13 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
if (!vif->cfg.assoc)
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (mvmvif->deflink.ap_sta_id != mvmsta->deflink.sta_id)
- goto unlock;
+ return;
if (iwl_mvm_request_statistics(mvm, false))
- goto unlock;
+ return;
sinfo->rx_beacon = 0;
for_each_mvm_vif_valid_link(mvmvif, i)
@@ -5948,8 +6241,6 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
mvmvif->deflink.beacon_stats.avg_signal;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG);
}
- unlock:
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm,
@@ -6092,7 +6383,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
.len[0] = sizeof(cmd),
.data[1] = data,
.len[1] = size,
- .flags = sync ? 0 : CMD_ASYNC,
+ .flags = CMD_SEND_IN_RFKILL | (sync ? 0 : CMD_ASYNC),
};
int ret;
@@ -6117,11 +6408,9 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
if (sync) {
lockdep_assert_held(&mvm->mutex);
ret = wait_event_timeout(mvm->rx_sync_waitq,
- READ_ONCE(mvm->queue_sync_state) == 0 ||
- iwl_mvm_is_radio_hw_killed(mvm),
+ READ_ONCE(mvm->queue_sync_state) == 0,
SYNC_RX_QUEUE_TIMEOUT);
- WARN_ONCE(!ret && !iwl_mvm_is_radio_hw_killed(mvm),
- "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
+ WARN_ONCE(!ret, "queue sync: failed to sync, state is 0x%lx, cookie %d\n",
mvm->queue_sync_state,
mvm->queue_sync_cookie);
}
@@ -6137,9 +6426,8 @@ void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0);
- mutex_unlock(&mvm->mutex);
}
int
@@ -6175,13 +6463,9 @@ int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct cfg80211_pmsr_request *request)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_ftm_start(mvm, vif, request);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_ftm_start(mvm, vif, request);
}
void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -6189,9 +6473,8 @@ void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_ftm_abort(mvm, request);
- mutex_unlock(&mvm->mutex);
}
static bool iwl_mvm_can_hw_csum(struct sk_buff *skb)
@@ -6226,7 +6509,6 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
u32 protocols = 0;
- int ret;
/* HW timestamping is only supported for a specific station */
if (!hwts->macaddr)
@@ -6236,11 +6518,8 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
protocols =
IWL_TIME_SYNC_PROTOCOL_TM | IWL_TIME_SYNC_PROTOCOL_FTM;
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols);
- mutex_unlock(&mvm->mutex);
-
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols);
}
const struct ieee80211_ops iwl_mvm_hw_ops = {
@@ -6297,7 +6576,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.set_tim = iwl_mvm_set_tim,
.channel_switch = iwl_mvm_channel_switch,
- .pre_channel_switch = iwl_mvm_pre_channel_switch,
+ .pre_channel_switch = iwl_mvm_mac_pre_channel_switch,
.post_channel_switch = iwl_mvm_post_channel_switch,
.abort_channel_switch = iwl_mvm_abort_channel_switch,
.channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 084314bf6f36..3c99396ad369 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -12,7 +12,9 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
int ret;
int i;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
+
+ iwl_mvm_mac_init_mvmvif(mvm, mvmvif);
mvmvif->mvm = mvm;
@@ -30,7 +32,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
/* Allocate resources for the MAC context, and add it to the fw */
ret = iwl_mvm_mac_ctxt_init(mvm, vif);
if (ret)
- goto out_unlock;
+ return ret;
rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif);
@@ -44,7 +46,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
ret = iwl_mvm_mld_mac_ctxt_add(mvm, vif);
if (ret)
- goto out_unlock;
+ return ret;
/* beacon filtering */
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
@@ -73,8 +75,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
goto out_free_bf;
iwl_mvm_tcm_add_vif(mvm, vif);
- INIT_DELAYED_WORK(&mvmvif->csa_work,
- iwl_mvm_channel_switch_disconnect_wk);
if (vif->type == NL80211_IFTYPE_MONITOR) {
mvm->monitor_on = true;
@@ -92,7 +92,10 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
mvm->csme_vif = vif;
}
- goto out_unlock;
+ if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5)
+ vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW;
+
+ return 0;
out_free_bf:
if (mvm->bf_allowed_vif == mvmvif) {
@@ -103,9 +106,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw,
out_remove_mac:
mvmvif->link[0] = NULL;
iwl_mvm_mld_mac_ctxt_remove(mvm, vif);
- out_unlock:
- mutex_unlock(&mvm->mutex);
-
return ret;
}
@@ -122,7 +122,7 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC))
iwl_mvm_tcm_rm_vif(mvm, vif);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (vif == mvm->csme_vif) {
iwl_mei_set_netdev(NULL);
@@ -185,27 +185,47 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw,
mvm->monitor_on = false;
__clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags);
}
-
- mutex_unlock(&mvm->mutex);
}
-static unsigned int iwl_mvm_mld_count_active_links(struct ieee80211_vif *vif)
+static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif)
{
unsigned int n_active = 0;
int i;
for (i = 0; i < IEEE80211_MLD_MAX_NUM_LINKS; i++) {
- struct ieee80211_bss_conf *link_conf;
-
- link_conf = link_conf_dereference_protected(vif, i);
- if (link_conf &&
- rcu_access_pointer(link_conf->chanctx_conf))
+ if (mvmvif->link[i] && mvmvif->link[i]->phy_ctxt)
n_active++;
}
return n_active;
}
+static void iwl_mvm_restart_mpdu_count(struct iwl_mvm *mvm,
+ struct iwl_mvm_vif *mvmvif)
+{
+ struct ieee80211_sta *ap_sta = mvmvif->ap_sta;
+ struct iwl_mvm_sta *mvmsta;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!ap_sta)
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
+ if (!mvmsta->mpdu_counters)
+ return;
+
+ for (int q = 0; q < mvm->trans->num_rx_queues; q++) {
+ spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
+ memset(mvmsta->mpdu_counters[q].per_link, 0,
+ sizeof(mvmsta->mpdu_counters[q].per_link));
+ mvmsta->mpdu_counters[q].window_start = jiffies;
+ spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
+ }
+
+ IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n");
+}
+
static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
@@ -233,6 +253,22 @@ static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm,
link->phy_ctxt->rlc_disabled = true;
}
+ if (vif->active_links == mvmvif->link_selection_res &&
+ !WARN_ON(!(vif->active_links & BIT(mvmvif->link_selection_primary))))
+ mvmvif->primary_link = mvmvif->link_selection_primary;
+ else
+ mvmvif->primary_link = __ffs(vif->active_links);
+
+ /* Needed for tracking RSSI */
+ iwl_mvm_request_periodic_system_statistics(mvm, true);
+
+ /*
+ * Restart the MPDU counters and the counting window, so when the
+ * statistics arrive (which is where we look at the counters) we
+ * will be at the end of the window.
+ */
+ iwl_mvm_restart_mpdu_count(mvm, mvmvif);
+
return ret;
}
@@ -245,18 +281,18 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
{
u16 *phy_ctxt_id = (u16 *)ctx->drv_priv;
struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id];
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
unsigned int link_id = link_conf->link_id;
int ret;
- /* if the assigned one was not counted yet, count it now */
- if (!rcu_access_pointer(link_conf->chanctx_conf))
- n_active++;
-
if (WARN_ON_ONCE(!mvmvif->link[link_id]))
return -EINVAL;
+ /* if the assigned one was not counted yet, count it now */
+ if (!mvmvif->link[link_id]->phy_ctxt)
+ n_active++;
+
/* mac parameters such as HE support can change at this stage
* For sta, need first to configure correct state from drv_sta_state
* and only after that update mac config.
@@ -276,6 +312,7 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
ret = iwl_mvm_esr_mode_active(mvm, vif);
if (ret) {
IWL_ERR(mvm, "failed to activate ESR mode (%d)\n", ret);
+ iwl_mvm_request_periodic_system_statistics(mvm, false);
goto out;
}
}
@@ -296,13 +333,8 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
* this needs the phy context assigned (and in FW?), and we cannot
* do it later because it needs to be initialized as soon as we're
* able to TX on the link, i.e. when active.
- *
- * Firmware restart isn't quite correct yet for MLO, but we don't
- * need to do it in that case anyway since it will happen from the
- * normal station state callback.
*/
- if (mvmvif->ap_sta &&
- !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
+ if (mvmvif->ap_sta) {
struct ieee80211_link_sta *link_sta;
rcu_read_lock();
@@ -315,6 +347,11 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm,
rcu_read_unlock();
}
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf,
+ false);
+
/* then activate */
ret = iwl_mvm_link_changed(mvm, vif, link_conf,
LINK_CONTEXT_MODIFY_ACTIVE |
@@ -352,13 +389,23 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
- mutex_lock(&mvm->mutex);
- ret = __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
- mutex_unlock(&mvm->mutex);
+ /* update EMLSR mode */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) {
+ int ret;
- return ret;
+ ret = iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id,
+ true);
+ /*
+ * Don't activate this link if failed to exit EMLSR in
+ * the BSS interface
+ */
+ if (ret)
+ return ret;
+ }
+
+ guard(mvm)(mvm);
+ return __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false);
}
static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
@@ -404,6 +451,11 @@ static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm,
break;
}
+ iwl_mvm_request_periodic_system_statistics(mvm, false);
+
+ /* Start a new counting window */
+ iwl_mvm_restart_mpdu_count(mvm, mvmvif);
+
return ret;
}
@@ -416,7 +468,7 @@ __iwl_mvm_mld_unassign_vif_chanctx(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- unsigned int n_active = iwl_mvm_mld_count_active_links(vif);
+ unsigned int n_active = iwl_mvm_mld_count_active_links(mvmvif);
unsigned int link_id = link_conf->link_id;
/* shouldn't happen, but verify link_id is valid before accessing */
@@ -472,6 +524,87 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_add_link(mvm, vif, link_conf);
}
mutex_unlock(&mvm->mutex);
+
+ /* update EMLSR mode */
+ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION)
+ iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id, false);
+}
+
+static void
+iwl_mvm_tpe_sta_cmd_data(struct iwl_txpower_constraints_cmd *cmd,
+ const struct ieee80211_bss_conf *bss_info)
+{
+ u8 i;
+
+ /*
+ * NOTE: the 0 here is IEEE80211_TPE_CAT_6GHZ_DEFAULT,
+ * we fully ignore IEEE80211_TPE_CAT_6GHZ_SUBORDINATE
+ */
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd->psd_pwr) !=
+ ARRAY_SIZE(bss_info->tpe.psd_local[0].power));
+
+ /* if not valid, mac80211 puts default (max value) */
+ for (i = 0; i < ARRAY_SIZE(cmd->psd_pwr); i++)
+ cmd->psd_pwr[i] = min(bss_info->tpe.psd_local[0].power[i],
+ bss_info->tpe.psd_reg_client[0].power[i]);
+
+ BUILD_BUG_ON(ARRAY_SIZE(cmd->eirp_pwr) !=
+ ARRAY_SIZE(bss_info->tpe.max_local[0].power));
+
+ for (i = 0; i < ARRAY_SIZE(cmd->eirp_pwr); i++)
+ cmd->eirp_pwr[i] = min(bss_info->tpe.max_local[0].power[i],
+ bss_info->tpe.max_reg_client[0].power[i]);
+}
+
+void
+iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ bool is_ap)
+{
+ struct iwl_txpower_constraints_cmd cmd = {};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif_link_info *link_info =
+ mvmvif->link[bss_conf->link_id];
+ u32 cmd_id = WIDE_ID(PHY_OPS_GROUP, AP_TX_POWER_CONSTRAINTS_CMD);
+ u32 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
+ IWL_FW_CMD_VER_UNKNOWN);
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (cmd_ver == IWL_FW_CMD_VER_UNKNOWN)
+ return;
+
+ if (!link_info->active ||
+ link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID)
+ return;
+
+ if (bss_conf->chanreq.oper.chan->band != NL80211_BAND_6GHZ)
+ return;
+
+ cmd.link_id = cpu_to_le16(link_info->fw_link_id);
+ memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr));
+ memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr));
+
+ if (is_ap) {
+ cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP);
+ } else if (bss_conf->power_type == IEEE80211_REG_UNSET_AP) {
+ return;
+ } else {
+ cmd.ap_type = cpu_to_le16(bss_conf->power_type - 1);
+ iwl_mvm_tpe_sta_cmd_data(&cmd, bss_conf);
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(PHY_OPS_GROUP,
+ AP_TX_POWER_CONSTRAINTS_CMD),
+ 0, sizeof(cmd), &cmd);
+ if (ret)
+ IWL_ERR(mvm,
+ "failed to send AP_TX_POWER_CONSTRAINTS_CMD (%d)\n",
+ ret);
}
static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
@@ -482,11 +615,16 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
+
+ if (vif->type == NL80211_IFTYPE_AP)
+ iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif,
+ link_conf, true);
+
/* Send the beacon template */
ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf);
if (ret)
- goto out_unlock;
+ return ret;
/* the link should be already activated when assigning chan context */
ret = iwl_mvm_link_changed(mvm, vif, link_conf,
@@ -494,11 +632,11 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
~LINK_CONTEXT_MODIFY_ACTIVE,
true);
if (ret)
- goto out_unlock;
+ return ret;
ret = iwl_mvm_mld_add_mcast_sta(mvm, vif, link_conf);
if (ret)
- goto out_unlock;
+ return ret;
/* Send the bcast station. At this stage the TBTT and DTIM time
* events are added and applied to the scheduler
@@ -522,7 +660,7 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_ftm_restart_responder(mvm, vif, link_conf);
- goto out_unlock;
+ return 0;
out_failed:
iwl_mvm_power_update_mac(mvm);
@@ -530,8 +668,6 @@ out_failed:
iwl_mvm_mld_rm_bcast_sta(mvm, vif, link_conf);
out_rm_mcast:
iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf);
-out_unlock:
- mutex_unlock(&mvm->mutex);
return ret;
}
@@ -554,7 +690,7 @@ static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_stop_ap_ibss_common(mvm, vif);
@@ -568,7 +704,6 @@ static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf);
iwl_mvm_power_update_mac(mvm);
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_mld_stop_ap(struct ieee80211_hw *hw,
@@ -601,126 +736,23 @@ static int iwl_mvm_mld_mac_sta_state(struct ieee80211_hw *hw,
&callbacks);
}
-struct iwl_mvm_link_sel_data {
- u8 link_id;
- enum nl80211_band band;
- enum nl80211_chan_width width;
- bool active;
-};
-
-static bool iwl_mvm_mld_valid_link_pair(struct iwl_mvm_link_sel_data *a,
- struct iwl_mvm_link_sel_data *b)
+static bool iwl_mvm_esr_bw_criteria(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf)
{
- return a->band != b->band;
-}
-
-void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- bool valid_links_changed)
-{
- struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
- unsigned long usable_links = ieee80211_vif_usable_links(vif);
- u32 max_active_links = iwl_mvm_max_active_links(mvm, vif);
- u16 new_active_links;
- u8 link_id, n_data = 0, i, j;
-
- if (!IWL_MVM_AUTO_EML_ENABLE)
- return;
-
- if (!ieee80211_vif_is_mld(vif) || usable_links == 1)
- return;
-
- /* The logic below is a simple version that doesn't suit more than 2
- * links
- */
- WARN_ON_ONCE(max_active_links > 2);
-
- /* if only a single active link is supported, assume that the one
- * selected by higher layer for connection establishment is the best.
- */
- if (max_active_links == 1 && !valid_links_changed)
- return;
-
- /* If we are already using the maximal number of active links, don't do
- * any change. This can later be optimized to pick a 'better' link pair.
- */
- if (hweight16(vif->active_links) == max_active_links)
- return;
-
- rcu_read_lock();
-
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- rcu_dereference(vif->link_conf[link_id]);
+ struct ieee80211_bss_conf *other_link;
+ int link_id;
- if (WARN_ON_ONCE(!link_conf))
+ /* Exit EMLSR if links don't have equal bandwidths */
+ for_each_vif_active_link(vif, other_link, link_id) {
+ if (link_id == link_conf->link_id)
continue;
-
- data[n_data].link_id = link_id;
- data[n_data].band = link_conf->chanreq.oper.chan->band;
- data[n_data].width = link_conf->chanreq.oper.width;
- data[n_data].active = vif->active_links & BIT(link_id);
- n_data++;
- }
-
- rcu_read_unlock();
-
- /* this is expected to be the current active link */
- if (n_data == 1)
- return;
-
- new_active_links = 0;
-
- /* Assume that after association only a single link is active, thus,
- * select only the 2nd link
- */
- if (!valid_links_changed) {
- for (i = 0; i < n_data; i++) {
- if (data[i].active)
- break;
- }
-
- if (WARN_ON_ONCE(i == n_data))
- return;
-
- for (j = 0; j < n_data; j++) {
- if (i == j)
- continue;
-
- if (iwl_mvm_mld_valid_link_pair(&data[i], &data[j]))
- break;
- }
-
- if (j != n_data)
- new_active_links = BIT(data[i].link_id) |
- BIT(data[j].link_id);
- } else {
- /* Try to find a valid link pair for EMLSR operation. If a pair
- * is not found continue using the current active link.
- */
- for (i = 0; i < n_data; i++) {
- for (j = 0; j < n_data; j++) {
- if (i == j)
- continue;
-
- if (iwl_mvm_mld_valid_link_pair(&data[i],
- &data[j]))
- break;
- }
-
- /* found a valid pair for EMLSR, use it */
- if (j != n_data) {
- new_active_links = BIT(data[i].link_id) |
- BIT(data[j].link_id);
- break;
- }
- }
+ if (link_conf->chanreq.oper.width ==
+ other_link->chanreq.oper.width)
+ return true;
}
- if (!new_active_links)
- return;
-
- if (vif->active_links != new_active_links)
- ieee80211_set_active_links_async(vif, new_active_links);
+ return false;
}
static void
@@ -752,6 +784,14 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
link_changes |= LINK_CONTEXT_MODIFY_HE_PARAMS;
}
+ if ((changes & BSS_CHANGED_BANDWIDTH) &&
+ ieee80211_vif_link_active(vif, link_conf->link_id) &&
+ mvmvif->esr_active &&
+ !iwl_mvm_esr_bw_criteria(mvm, vif, link_conf))
+ iwl_mvm_exit_esr(mvm, vif,
+ IWL_MVM_ESR_EXIT_BANDWIDTH,
+ iwl_mvm_get_primary_link(vif));
+
/* if associated, maybe puncturing changed - we'll check later */
if (vif->cfg.assoc)
link_changes |= LINK_CONTEXT_MODIFY_EHT_PARAMS;
@@ -767,9 +807,6 @@ iwl_mvm_mld_link_info_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
- if (changes & BSS_CHANGED_MLD_VALID_LINKS)
- iwl_mvm_mld_select_links(mvm, vif, true);
-
memcpy(mvmvif->link[link_conf->link_id]->bssid, link_conf->bssid,
ETH_ALEN);
@@ -836,6 +873,8 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
if (changes & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc) {
+ mvmvif->session_prot_connection_loss = false;
+
/* clear statistics to get clean beacon counter */
iwl_mvm_request_statistics(mvm, true);
iwl_mvm_sf_update(mvm, vif, false);
@@ -912,6 +951,11 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
+
+ if (changes & (BSS_CHANGED_MLD_VALID_LINKS | BSS_CHANGED_MLD_TTLM) &&
+ ieee80211_vif_is_mld(vif) && mvmvif->authorized)
+ wiphy_delayed_work_queue(mvm->hw->wiphy,
+ &mvmvif->mlo_int_scan_wk, 0);
}
static void
@@ -964,7 +1008,7 @@ static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
@@ -990,8 +1034,6 @@ static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw,
link_conf->txpower);
iwl_mvm_set_tx_power(mvm, vif, link_conf->txpower);
}
-
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw,
@@ -1000,15 +1042,13 @@ static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle)
iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true);
if (vif->type == NL80211_IFTYPE_STATION)
iwl_mvm_mld_vif_cfg_changed_station(mvm, vif, changes);
-
- mutex_unlock(&mvm->mutex);
}
static int
@@ -1041,9 +1081,8 @@ static void iwl_mvm_mld_config_iface_filter(struct ieee80211_hw *hw,
!vif->p2p)
return;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false);
- mutex_unlock(&mvm->mutex);
}
static int
@@ -1065,14 +1104,10 @@ iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw,
* The exception is P2P_DEVICE interface which needs immediate update.
*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
- LINK_CONTEXT_MODIFY_QOS_PARAMS,
- true);
- mutex_unlock(&mvm->mutex);
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_link_changed(mvm, vif, &vif->bss_conf,
+ LINK_CONTEXT_MODIFY_QOS_PARAMS,
+ true);
}
return 0;
}
@@ -1190,6 +1225,14 @@ iwl_mvm_mld_change_vif_links(struct ieee80211_hw *hw,
if (new_links == 0) {
mvmvif->link[0] = &mvmvif->deflink;
err = iwl_mvm_add_link(mvm, vif, &vif->bss_conf);
+ if (err == 0)
+ mvmvif->primary_link = 0;
+ } else if (!(new_links & BIT(mvmvif->primary_link))) {
+ /*
+ * Ensure we always have a valid primary_link, the real
+ * decision happens later when PHY is activated.
+ */
+ mvmvif->primary_link = __ffs(new_links);
}
out_err:
@@ -1209,77 +1252,19 @@ iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw,
u16 old_links, u16 new_links)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links);
- mutex_unlock(&mvm->mutex);
-
- return ret;
-}
-
-/*
- * This function receives a subset of the usable links bitmap and
- * returns the primary link id, and -1 if such link doesn't exist
- * (e.g. non-MLO connection) or wasn't found.
- */
-int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- unsigned long usable_links)
-{
- struct iwl_mvm_link_sel_data data[IEEE80211_MLD_MAX_NUM_LINKS];
- u8 link_id, n_data = 0;
-
- if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc)
- return -1;
-
- for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- link_conf_dereference_protected(vif, link_id);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- data[n_data].link_id = link_id;
- data[n_data].band = link_conf->chanreq.oper.chan->band;
- data[n_data].width = link_conf->chanreq.oper.width;
- data[n_data].active = true;
- n_data++;
- }
-
- if (n_data <= 1)
- return -1;
-
- /* The logic should be modified to handle more than 2 links */
- WARN_ON_ONCE(n_data > 2);
-
- /* Primary link is the link with the wider bandwidth or higher band */
- if (data[0].width > data[1].width)
- return data[0].link_id;
- if (data[0].width < data[1].width)
- return data[1].link_id;
- if (data[0].band >= data[1].band)
- return data[0].link_id;
- return data[1].link_id;
+ guard(mvm)(mvm);
+ return iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links);
}
-/*
- * This function receives a bitmap of usable links and check if we can enter
- * eSR on those links.
- */
-static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- unsigned long desired_links)
+bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- int primary_link = iwl_mvm_mld_get_primary_link(mvm, vif,
- desired_links);
const struct wiphy_iftype_ext_capab *ext_capa;
- bool ret = true;
- int link_id;
- if (primary_link < 0)
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!ieee80211_vif_is_mld(vif) || !vif->cfg.assoc ||
+ hweight16(ieee80211_vif_usable_links(vif)) == 1)
return false;
if (!(vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP))
@@ -1287,30 +1272,8 @@ static bool iwl_mvm_can_enter_esr(struct iwl_mvm *mvm,
ext_capa = cfg80211_get_iftype_ext_capa(mvm->hw->wiphy,
ieee80211_vif_type_p2p(vif));
- if (!ext_capa ||
- !(ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP))
- return false;
-
- for_each_set_bit(link_id, &desired_links, IEEE80211_MLD_MAX_NUM_LINKS) {
- struct ieee80211_bss_conf *link_conf =
- link_conf_dereference_protected(vif, link_id);
-
- if (WARN_ON_ONCE(!link_conf))
- continue;
-
- /* BT Coex effects eSR mode only if one of the link is on LB */
- if (link_conf->chanreq.oper.chan->band != NL80211_BAND_2GHZ)
- continue;
-
- ret = iwl_mvm_bt_coex_calculate_esr_mode(mvm, vif, link_id,
- primary_link);
- // Mark eSR as disabled for the next time
- if (!ret)
- mvmvif->bt_coex_esr_disabled = true;
- break;
- }
-
- return ret;
+ return (ext_capa &&
+ (ext_capa->eml_capabilities & IEEE80211_EML_CAP_EMLSR_SUPP));
}
static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
@@ -1319,25 +1282,19 @@ static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int n_links = hweight16(desired_links);
- bool ret = true;
if (n_links <= 1)
return true;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* Check if HW supports the wanted number of links */
- if (n_links > iwl_mvm_max_active_links(mvm, vif)) {
- ret = false;
- goto unlock;
- }
+ if (n_links > iwl_mvm_max_active_links(mvm, vif))
+ return false;
/* If it is an eSR device, check that we can enter eSR */
- if (iwl_mvm_is_esr_supported(mvm->fwrt.trans))
- ret = iwl_mvm_can_enter_esr(mvm, vif, desired_links);
-unlock:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return iwl_mvm_is_esr_supported(mvm->fwrt.trans) &&
+ iwl_mvm_vif_has_esr_cap(mvm, vif);
}
static enum ieee80211_neg_ttlm_res
@@ -1358,6 +1315,45 @@ iwl_mvm_mld_can_neg_ttlm(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return NEG_TTLM_RES_ACCEPT;
}
+static int
+iwl_mvm_mld_mac_pre_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel_switch *chsw)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ int ret;
+
+ mutex_lock(&mvm->mutex);
+ if (mvmvif->esr_active) {
+ u8 primary = iwl_mvm_get_primary_link(vif);
+ int selected;
+
+ /* prefer primary unless quiet CSA on it */
+ if (chsw->link_id == primary && chsw->block_tx)
+ selected = iwl_mvm_get_other_link(vif, primary);
+ else
+ selected = primary;
+
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_CSA, selected);
+ mutex_unlock(&mvm->mutex);
+
+ /*
+ * If we've not kept the link active that's doing the CSA
+ * then we don't need to do anything else, just return.
+ */
+ if (selected != chsw->link_id)
+ return 0;
+
+ mutex_lock(&mvm->mutex);
+ }
+
+ ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw);
+ mutex_unlock(&mvm->mutex);
+
+ return ret;
+}
+
const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx = iwl_mvm_mac_tx,
.wake_tx_queue = iwl_mvm_mac_wake_tx_queue,
@@ -1411,7 +1407,7 @@ const struct ieee80211_ops iwl_mvm_mld_hw_ops = {
.tx_last_beacon = iwl_mvm_tx_last_beacon,
.channel_switch = iwl_mvm_channel_switch,
- .pre_channel_switch = iwl_mvm_pre_channel_switch,
+ .pre_channel_switch = iwl_mvm_mld_mac_pre_channel_switch,
.post_channel_switch = iwl_mvm_post_channel_switch,
.abort_channel_switch = iwl_mvm_abort_channel_switch,
.channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
index 23e64a757cfe..d5a204e52076 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2022-2023 Intel Corporation
+ * Copyright (C) 2022-2024 Intel Corporation
*/
#include "mvm.h"
#include "time-sync.h"
@@ -9,7 +9,9 @@
u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int filter_link_id)
{
+ struct ieee80211_link_sta *link_sta;
struct iwl_mvm_sta *mvmsta;
+ struct ieee80211_vif *vif;
unsigned int link_id;
u32 result = 0;
@@ -17,26 +19,27 @@ u32 iwl_mvm_sta_fw_id_mask(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return 0;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ vif = mvmsta->vif;
/* it's easy when the STA is not an MLD */
if (!sta->valid_links)
return BIT(mvmsta->deflink.sta_id);
/* but if it is an MLD, get the mask of all the FW STAs it has ... */
- for (link_id = 0; link_id < ARRAY_SIZE(mvmsta->link); link_id++) {
- struct iwl_mvm_link_sta *link_sta;
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ struct iwl_mvm_link_sta *mvm_link_sta;
/* unless we have a specific link in mind */
if (filter_link_id >= 0 && link_id != filter_link_id)
continue;
- link_sta =
+ mvm_link_sta =
rcu_dereference_check(mvmsta->link[link_id],
lockdep_is_held(&mvm->mutex));
- if (!link_sta)
+ if (!mvm_link_sta)
continue;
- result |= BIT(link_sta->sta_id);
+ result |= BIT(mvm_link_sta->sta_id);
}
return result;
@@ -238,7 +241,7 @@ int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
IWL_MAX_TID_COUNT, &wdg_timeout);
}
-/* Allocate a new station entry for the broadcast station to the given vif,
+/* Allocate a new station entry for the multicast station to the given vif,
* and send it to the FW.
* Note that each AP/GO mac should have its own multicast station.
*/
@@ -467,7 +470,7 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
- switch (sta->deflink.smps_mode) {
+ switch (link_sta->smps_mode) {
case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_NUM_MODES:
WARN_ON(1);
@@ -512,11 +515,11 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return iwl_mvm_mld_send_sta_cmd(mvm, &cmd);
}
-static void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
- struct iwl_mvm_sta *mvm_sta,
- struct iwl_mvm_link_sta *mvm_sta_link,
- unsigned int link_id,
- bool is_in_fw)
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ struct iwl_mvm_link_sta *mvm_sta_link,
+ unsigned int link_id,
+ bool is_in_fw)
{
RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta_link->sta_id],
is_in_fw ? ERR_PTR(-EINVAL) : NULL);
@@ -582,14 +585,14 @@ static int iwl_mvm_mld_alloc_sta_links(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
+ struct ieee80211_link_sta *link_sta;
unsigned int link_id;
int ret;
lockdep_assert_held(&mvm->mutex);
- for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) {
- if (!rcu_access_pointer(sta->link[link_id]) ||
- mvm_sta->link[link_id])
+ for_each_sta_active_link(vif, sta, link_sta, link_id) {
+ if (WARN_ON(mvm_sta->link[link_id]))
continue;
ret = iwl_mvm_mld_alloc_sta_link(mvm, vif, sta, link_id);
@@ -616,9 +619,6 @@ static void iwl_mvm_mld_set_ap_sta_id(struct ieee80211_sta *sta,
}
}
-/* FIXME: consider waiting for mac80211 to add the STA instead of allocating
- * queues here
- */
static int iwl_mvm_alloc_sta_after_restart(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -723,7 +723,6 @@ int iwl_mvm_mld_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_mld_set_ap_sta_id(sta, mvm_vif->link[link_id],
mvm_link_sta);
}
-
return 0;
err:
@@ -849,6 +848,8 @@ int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_mld_free_sta_link(mvm, mvm_sta, mvm_link_sta,
link_id, stay_in_fw);
}
+ kfree(mvm_sta->mpdu_counters);
+ mvm_sta->mpdu_counters = NULL;
return ret;
}
@@ -989,6 +990,10 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
int baid;
+ /* mac80211 will remove sessions later, but we ignore all that */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return 0;
+
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
for (baid = 0; baid < ARRAY_SIZE(mvm->baid_map); baid++) {
@@ -1009,7 +1014,8 @@ static int iwl_mvm_mld_update_sta_baids(struct iwl_mvm *mvm,
cmd.modify.tid = cpu_to_le32(data->tid);
- ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_SEND_IN_RFKILL,
+ sizeof(cmd), &cmd);
data->sta_mask = new_sta_mask;
if (ret)
return ret;
@@ -1122,10 +1128,21 @@ int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
}
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
- if (WARN_ON(!mvm_sta->link[link_id])) {
+ struct iwl_mvm_link_sta *mvm_link_sta =
+ rcu_dereference_protected(mvm_sta->link[link_id],
+ lockdep_is_held(&mvm->mutex));
+ u32 sta_id;
+
+ if (WARN_ON(!mvm_link_sta)) {
ret = -EINVAL;
goto err;
}
+
+ sta_id = mvm_link_sta->sta_id;
+
+ rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
+ rcu_assign_pointer(mvm->fw_id_to_link_sta[sta_id],
+ link_sta);
} else {
if (WARN_ON(mvm_sta->link[link_id])) {
ret = -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index f0b24f00938b..22f48b66d79c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -9,6 +9,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/cleanup.h>
#include <linux/leds.h>
#include <linux/in6.h>
@@ -23,7 +24,7 @@
#include "iwl-op-mode.h"
#include "iwl-trans.h"
#include "fw/notif-wait.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "fw/file.h"
#include "iwl-config.h"
#include "sta.h"
@@ -82,14 +83,9 @@ extern const struct ieee80211_ops iwl_mvm_mld_hw_ops;
/**
* struct iwl_mvm_mod_params - module parameters for iwlmvm
- * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
- * We will register to mac80211 to have testmode working. The NIC must not
- * be up'ed after the INIT fw asserted. This is useful to be able to use
- * proprietary tools over testmode to debug the INIT fw.
* @power_scheme: one of enum iwl_power_scheme
*/
struct iwl_mvm_mod_params {
- bool init_dbg;
int power_scheme;
};
extern struct iwl_mvm_mod_params iwlmvm_mod_params;
@@ -255,18 +251,14 @@ enum iwl_mvm_low_latency_cause {
};
/**
-* struct iwl_mvm_vif_bf_data - beacon filtering related data
-* @bf_enabled: indicates if beacon filtering is enabled
-* @ba_enabled: indicated if beacon abort is enabled
+* struct iwl_mvm_link_bf_data - beacon filtering related data
* @ave_beacon_signal: average beacon signal
* @last_cqm_event: rssi of the last cqm event
* @bt_coex_min_thold: minimum threshold for BT coex
* @bt_coex_max_thold: maximum threshold for BT coex
* @last_bt_coex_event: rssi of the last BT coex event
*/
-struct iwl_mvm_vif_bf_data {
- bool bf_enabled;
- bool ba_enabled;
+struct iwl_mvm_link_bf_data {
int ave_beacon_signal;
int last_cqm_event;
int bt_coex_min_thold;
@@ -309,6 +301,7 @@ struct iwl_probe_resp_data {
* @listen_lmac: indicates this link is allocated to the listen LMAC
* @mcast_sta: multicast station
* @phy_ctxt: phy context allocated to this link, if any
+ * @bf_data: beacon filtering data
*/
struct iwl_mvm_vif_link_info {
u8 bssid[ETH_ALEN];
@@ -344,6 +337,66 @@ struct iwl_mvm_vif_link_info {
struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
u16 mgmt_queue;
+
+ struct iwl_mvm_link_bf_data bf_data;
+};
+
+/**
+ * enum iwl_mvm_esr_state - defines reasons for which the EMLSR is exited or
+ * blocked.
+ * The low 16 bits are used for blocking reasons, and the 16 higher bits
+ * are used for exit reasons.
+ * For the blocking reasons - use iwl_mvm_(un)block_esr(), and for the exit
+ * reasons - use iwl_mvm_exit_esr().
+ *
+ * Note: new reasons shall be added to HANDLE_ESR_REASONS as well (for logs)
+ *
+ * @IWL_MVM_ESR_BLOCKED_PREVENTION: Prevent EMLSR to avoid entering and exiting
+ * in a loop.
+ * @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR
+ * @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic
+ * @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR
+ * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-BSS interface's link is
+ * preventing EMLSR
+ * @IWL_MVM_ESR_BLOCKED_ROC: remain-on-channel is preventing EMLSR
+ * @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons
+ * @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR
+ * due to low RSSI.
+ * @IWL_MVM_ESR_EXIT_COEX: link is deactivated/not allowed for EMLSR
+ * due to BT Coex.
+ * @IWL_MVM_ESR_EXIT_BANDWIDTH: Bandwidths of primary and secondry links
+ * preventing the enablement of EMLSR
+ * @IWL_MVM_ESR_EXIT_CSA: CSA happened, so exit EMLSR
+ * @IWL_MVM_ESR_EXIT_LINK_USAGE: Exit EMLSR due to low tpt on secondary link
+ */
+enum iwl_mvm_esr_state {
+ IWL_MVM_ESR_BLOCKED_PREVENTION = 0x1,
+ IWL_MVM_ESR_BLOCKED_WOWLAN = 0x2,
+ IWL_MVM_ESR_BLOCKED_TPT = 0x4,
+ IWL_MVM_ESR_BLOCKED_FW = 0x8,
+ IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10,
+ IWL_MVM_ESR_BLOCKED_ROC = 0x20,
+ IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000,
+ IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000,
+ IWL_MVM_ESR_EXIT_COEX = 0x40000,
+ IWL_MVM_ESR_EXIT_BANDWIDTH = 0x80000,
+ IWL_MVM_ESR_EXIT_CSA = 0x100000,
+ IWL_MVM_ESR_EXIT_LINK_USAGE = 0x200000,
+};
+
+#define IWL_MVM_BLOCK_ESR_REASONS 0xffff
+
+const char *iwl_get_esr_state_string(enum iwl_mvm_esr_state state);
+
+/**
+ * struct iwl_mvm_esr_exit - details of the last exit from EMLSR mode.
+ * @reason: The reason for the last exit from EMLSR.
+ * &iwl_mvm_prevent_esr_reasons. Will be 0 before exiting EMLSR.
+ * @ts: the time stamp of the last time we existed EMLSR.
+ */
+struct iwl_mvm_esr_exit {
+ unsigned long ts;
+ enum iwl_mvm_esr_state reason;
};
/**
@@ -361,7 +414,6 @@ struct iwl_mvm_vif_link_info {
* @pm_enabled - indicate if MAC power management is allowed
* @monitor_active: indicates that monitor context is configured, and that the
* interface should get quota etc.
- * @bt_coex_esr_disabled: indicates if esr is disabled due to bt coex
* @low_latency: bit flags for low latency
* see enum &iwl_mvm_low_latency_cause for causes.
* @low_latency_actual: boolean, indicates low latency is set,
@@ -371,14 +423,65 @@ struct iwl_mvm_vif_link_info {
* @csa_countdown: indicates that CSA countdown may be started
* @csa_failed: CSA failed to schedule time event, report an error later
* @csa_bcn_pending: indicates that we are waiting for a beacon on a new channel
+ * @csa_blocks_tx: CSA is blocking TX
* @features: hw features active for this vif
+ * @max_tx_op: max TXOP in usecs for all ACs, zero for no limit.
* @ap_beacon_time: AP beacon time for synchronisation (on older FW)
+ * @bf_enabled: indicates if beacon filtering is enabled
+ * @ba_enabled: indicated if beacon abort is enabled
* @bcn_prot: beacon protection data (keys; FIXME: needs to be per link)
- * @bf_data: beacon filtering data
* @deflink: default link data for use in non-MLO
* @link: link data for each link in MLO
* @esr_active: indicates eSR mode is active
+ * @esr_disable_reason: a bitmap of &enum iwl_mvm_esr_state
* @pm_enabled: indicates powersave is enabled
+ * @link_selection_res: bitmap of active links as it was decided in the last
+ * link selection. Valid only for a MLO vif after assoc. 0 if there wasn't
+ * any link selection yet.
+ * @link_selection_primary: primary link selected by link selection
+ * @primary_link: primary link in eSR. Valid only for an associated MLD vif,
+ * and in eSR mode. Valid only for a STA.
+ * @last_esr_exit: Details of the last exit from EMLSR.
+ * @exit_same_reason_count: The number of times we exited due to the specified
+ * @last_esr_exit::reason, only counting exits due to
+ * &IWL_MVM_ESR_PREVENT_REASONS.
+ * @prevent_esr_done_wk: work that should be done when esr prevention ends.
+ * @mlo_int_scan_wk: work for the internal MLO scan.
+ * @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough.
+ * @roc_activity: currently running ROC activity for this vif (or
+ * ROC_NUM_ACTIVITIES if no activity is running).
+ * @session_prot_connection_loss: the connection was lost due to session
+ * protection ending without receiving a beacon, so we need to now
+ * protect the deauth separately
+ * @ap_early_keys: The firmware cannot install keys before stations etc.,
+ * but higher layers work differently, so we store the keys here for
+ * later installation.
+ * @ap_sta: pointer to the AP STA data structure
+ * @csa_count: CSA counter (old CSA implementation w/o firmware)
+ * @csa_misbehave: CSA AP misbehaviour flag (old implementation)
+ * @csa_target_freq: CSA target channel frequency (old implementation)
+ * @csa_work: CSA work (old implementation)
+ * @dbgfs_bf: beamforming debugfs data
+ * @dbgfs_dir: debugfs directory for this vif
+ * @dbgfs_pm: power management debugfs data
+ * @dbgfs_quota_min: debugfs value for minimal quota
+ * @dbgfs_slink: debugfs symlink for this interface
+ * @ftm_unprotected: unprotected FTM debugfs override
+ * @hs_time_event_data: hotspot/AUX ROC time event data
+ * @mac_pwr_cmd: debugfs override for MAC power command
+ * @target_ipv6_addrs: IPv6 addresses on this interface for offload
+ * @num_target_ipv6_addrs: number of @target_ipv6_addrs
+ * @tentative_addrs: bitmap of tentative IPv6 addresses in @target_ipv6_addrs
+ * @rekey_data: rekeying data for WoWLAN GTK rekey offload
+ * @seqno: storage for seqno for older firmware D0/D3 transition
+ * @seqno_valid: indicates @seqno is valid
+ * @time_event_data: session protection time event data
+ * @tsf_id: the TSF resource ID assigned in firmware (for firmware needing that)
+ * @tx_key_idx: WEP transmit key index for D3
+ * @uapsd_misbehaving_ap_addr: MLD address/BSSID of U-APSD misbehaving AP, to
+ * not use U-APSD on reconnection
+ * @uapsd_nonagg_detected_wk: worker for handling detection of no aggregation
+ * in U-APSD
*/
struct iwl_mvm_vif {
struct iwl_mvm *mvm;
@@ -392,7 +495,7 @@ struct iwl_mvm_vif {
bool pm_enabled;
bool monitor_active;
bool esr_active;
- bool bt_coex_esr_disabled;
+ bool session_prot_connection_loss;
u8 low_latency: 6;
u8 low_latency_actual: 1;
@@ -400,8 +503,10 @@ struct iwl_mvm_vif {
u8 authorized:1;
bool ps_disabled;
+ u32 esr_disable_reason;
u32 ap_beacon_time;
- struct iwl_mvm_vif_bf_data bf_data;
+ bool bf_enabled;
+ bool ba_enabled;
#ifdef CONFIG_PM
/* WoWLAN GTK rekey data */
@@ -435,6 +540,7 @@ struct iwl_mvm_vif {
struct iwl_dbgfs_bf dbgfs_bf;
struct iwl_mac_power_cmd mac_pwr_cmd;
int dbgfs_quota_min;
+ bool ftm_unprotected;
#endif
/* FW identified misbehaving AP */
@@ -444,6 +550,7 @@ struct iwl_mvm_vif {
bool csa_countdown;
bool csa_failed;
bool csa_bcn_pending;
+ bool csa_blocks_tx;
u16 csa_target_freq;
u16 csa_count;
u16 csa_misbehave;
@@ -453,6 +560,7 @@ struct iwl_mvm_vif {
struct iwl_mvm_time_event_data time_event_data;
struct iwl_mvm_time_event_data hs_time_event_data;
+ enum iwl_roc_activity roc_activity;
/* TCP Checksum Offload */
netdev_features_t features;
@@ -466,6 +574,17 @@ struct iwl_mvm_vif {
struct ieee80211_key_conf __rcu *keys[2];
} bcn_prot;
+ u16 max_tx_op;
+
+ u16 link_selection_res;
+ u8 link_selection_primary;
+ u8 primary_link;
+ struct iwl_mvm_esr_exit last_esr_exit;
+ u8 exit_same_reason_count;
+ struct wiphy_delayed_work prevent_esr_done_wk;
+ struct wiphy_delayed_work mlo_int_scan_wk;
+ struct wiphy_work unblock_esr_tpt_wk;
+
struct iwl_mvm_vif_link_info deflink;
struct iwl_mvm_vif_link_info *link[IEEE80211_MLD_MAX_NUM_LINKS];
};
@@ -490,10 +609,12 @@ enum iwl_scan_status {
IWL_MVM_SCAN_REGULAR = BIT(0),
IWL_MVM_SCAN_SCHED = BIT(1),
IWL_MVM_SCAN_NETDETECT = BIT(2),
+ IWL_MVM_SCAN_INT_MLO = BIT(3),
IWL_MVM_SCAN_STOPPING_REGULAR = BIT(8),
IWL_MVM_SCAN_STOPPING_SCHED = BIT(9),
IWL_MVM_SCAN_STOPPING_NETDETECT = BIT(10),
+ IWL_MVM_SCAN_STOPPING_INT_MLO = BIT(11),
IWL_MVM_SCAN_REGULAR_MASK = IWL_MVM_SCAN_REGULAR |
IWL_MVM_SCAN_STOPPING_REGULAR,
@@ -501,6 +622,8 @@ enum iwl_scan_status {
IWL_MVM_SCAN_STOPPING_SCHED,
IWL_MVM_SCAN_NETDETECT_MASK = IWL_MVM_SCAN_NETDETECT |
IWL_MVM_SCAN_STOPPING_NETDETECT,
+ IWL_MVM_SCAN_INT_MLO_MASK = IWL_MVM_SCAN_INT_MLO |
+ IWL_MVM_SCAN_STOPPING_INT_MLO,
IWL_MVM_SCAN_STOPPING_MASK = 0xff << IWL_MVM_SCAN_STOPPING_SHIFT,
IWL_MVM_SCAN_MASK = 0xff,
@@ -522,7 +645,7 @@ enum iwl_mvm_sched_scan_pass_all_states {
};
/**
- * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure
+ * struct iwl_mvm_tt_mgmt - Thermal Throttling Management structure
* @ct_kill_exit: worker to exit thermal kill
* @dynamic_smps: Is thermal throttling enabled dynamic_smps?
* @tx_backoff: The current thremal throttling tx backoff in uSec.
@@ -645,24 +768,20 @@ struct iwl_mvm_tcm {
* struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer
* @head_sn: reorder window head sn
* @num_stored: number of mpdus stored in the buffer
- * @buf_size: the reorder buffer size as set by the last addba request
* @queue: queue of this reorder buffer
* @last_amsdu: track last ASMDU SN for duplication detection
* @last_sub_index: track ASMDU sub frame index for duplication detection
* @valid: reordering is valid for this queue
* @lock: protect reorder buffer internal state
- * @mvm: mvm pointer, needed for frame timer context
*/
struct iwl_mvm_reorder_buffer {
u16 head_sn;
u16 num_stored;
- u16 buf_size;
int queue;
u16 last_amsdu;
u8 last_sub_index;
bool valid;
spinlock_t lock;
- struct iwl_mvm *mvm;
} ____cacheline_aligned_in_smp;
/**
@@ -684,6 +803,7 @@ __aligned(roundup_pow_of_two(sizeof(struct sk_buff_head)))
* @tid: tid of the session
* @baid: baid of the session
* @timeout: the timeout set in the addba request
+ * @buf_size: the reorder buffer size as set by the last addba request
* @entries_per_queue: # of buffers per queue, this actually gets
* aligned up to avoid cache line sharing between queues
* @last_rx: last rx jiffies, updated only if timeout passed from last update
@@ -700,13 +820,14 @@ struct iwl_mvm_baid_data {
u8 tid;
u8 baid;
u16 timeout;
+ u16 buf_size;
u16 entries_per_queue;
unsigned long last_rx;
struct timer_list session_timer;
struct iwl_mvm_baid_data __rcu **rcu_ptr;
struct iwl_mvm *mvm;
struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES];
- struct iwl_mvm_reorder_buf_entry entries[];
+ struct iwl_mvm_reorder_buf_entry entries[] ____cacheline_aligned_in_smp;
};
static inline struct iwl_mvm_baid_data *
@@ -754,9 +875,10 @@ struct iwl_mvm_txq {
struct list_head list;
u16 txq_id;
atomic_t tx_request;
-#define IWL_MVM_TXQ_STATE_STOP_FULL 0
-#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 1
-#define IWL_MVM_TXQ_STATE_READY 2
+#define IWL_MVM_TXQ_STATE_READY 0
+#define IWL_MVM_TXQ_STATE_STOP_FULL 1
+#define IWL_MVM_TXQ_STATE_STOP_REDIRECT 2
+#define IWL_MVM_TXQ_STATE_STOP_AP_CSA 3
unsigned long state;
};
@@ -834,6 +956,35 @@ struct iwl_mei_scan_filter {
struct work_struct scan_work;
};
+/**
+ * struct iwl_mvm_acs_survey_channel - per-channel survey information
+ *
+ * Stripped down version of &struct survey_info.
+ *
+ * @time: time in ms the radio was on the channel
+ * @time_busy: time in ms the channel was sensed busy
+ * @time_tx: time in ms spent transmitting data
+ * @time_rx: time in ms spent receiving data
+ * @noise: channel noise in dBm
+ */
+struct iwl_mvm_acs_survey_channel {
+ u32 time;
+ u32 time_busy;
+ u32 time_tx;
+ u32 time_rx;
+ s8 noise;
+};
+
+struct iwl_mvm_acs_survey {
+ struct iwl_mvm_acs_survey_channel *bands[NUM_NL80211_BANDS];
+
+ /* Overall number of channels */
+ int n_channels;
+
+ /* Storage space for per-channel information follows */
+ struct iwl_mvm_acs_survey_channel channels[] __counted_by(n_channels);
+};
+
struct iwl_mvm {
/* for logger access */
struct device *dev;
@@ -853,6 +1004,8 @@ struct iwl_mvm {
/* For async rx handlers that require the wiphy lock */
struct wiphy_work async_handlers_wiphy_wk;
+ struct wiphy_work trig_link_selection_wk;
+
struct work_struct roc_done_wk;
unsigned long init_status;
@@ -923,7 +1076,6 @@ struct iwl_mvm {
struct iwl_rx_phy_info last_phy_info;
struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX];
struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_MVM_STATION_COUNT_MAX];
- unsigned long fw_link_ids_map;
u8 rx_ba_sessions;
/* configured by mac80211 */
@@ -945,7 +1097,7 @@ struct iwl_mvm {
unsigned int max_scans;
/* UMAC scan tracking */
- u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS];
+ u32 scan_uid_status[IWL_MAX_UMAC_SCANS];
/* start time of last scan in TSF of the mac that requested the scan */
u64 scan_start;
@@ -1035,6 +1187,7 @@ struct iwl_mvm {
struct ieee80211_channel **nd_channels;
int n_nd_channels;
bool net_detect;
+ bool fast_resume;
u8 offload_tid;
#ifdef CONFIG_IWLWIFI_DEBUGFS
bool d3_wake_sysassert;
@@ -1189,18 +1342,28 @@ struct iwl_mvm {
struct iwl_phy_specific_cfg phy_filters;
#endif
+ /* report rx timestamp in ptp clock time */
+ bool rx_ts_ptp;
+
unsigned long last_6ghz_passive_scan_jiffies;
unsigned long last_reset_or_resume_time_jiffies;
bool sta_remove_requires_queue_remove;
bool mld_api_is_used;
- bool pldr_sync;
+ /*
+ * Indicates that firmware will do a product reset (and then
+ * therefore fail to load) when we start it (due to OTP burn),
+ * if so don't dump errors etc. since this is expected.
+ */
+ bool fw_product_reset;
struct iwl_time_sync_data time_sync;
struct iwl_mei_scan_filter mei_scan_filter;
+ struct iwl_mvm_acs_survey *acs_survey;
+
bool statistics_clear;
};
@@ -1211,11 +1374,14 @@ struct iwl_mvm {
#define IWL_MAC80211_GET_MVM(_hw) \
IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv))
+DEFINE_GUARD(mvm, struct iwl_mvm *, mutex_lock(&_T->mutex), mutex_unlock(&_T->mutex))
+
/**
* enum iwl_mvm_status - MVM status bits
* @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted
* @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active
- * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running
+ * @IWL_MVM_STATUS_ROC_P2P_RUNNING: remain-on-channel on P2P is running (when
+ * P2P is not over AUX)
* @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested
* @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active
* @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running
@@ -1229,7 +1395,7 @@ struct iwl_mvm {
enum iwl_mvm_status {
IWL_MVM_STATUS_HW_RFKILL,
IWL_MVM_STATUS_HW_CTKILL,
- IWL_MVM_STATUS_ROC_RUNNING,
+ IWL_MVM_STATUS_ROC_P2P_RUNNING,
IWL_MVM_STATUS_HW_RESTART_REQUESTED,
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_ROC_AUX_RUNNING,
@@ -1320,7 +1486,8 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu)
static inline struct ieee80211_bss_conf *
iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu)
{
- if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf)))
+ if (IWL_FW_CHECK(mvm, link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf),
+ "erroneous FW link ID: %d\n", link_id))
return NULL;
if (rcu)
@@ -1570,17 +1737,14 @@ static inline int iwl_mvm_max_active_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_trans *trans = mvm->fwrt.trans;
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
-
- lockdep_assert_held(&mvm->mutex);
if (vif->type == NL80211_IFTYPE_AP)
return mvm->fw->ucode_capa.num_beacons;
- if ((iwl_mvm_is_esr_supported(trans) &&
- !mvmvif->bt_coex_esr_disabled) ||
- ((CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
- CSR_HW_RFID_IS_CDB(trans->hw_rf_id))))
+ /* Check if HW supports eSR or STR */
+ if (iwl_mvm_is_esr_supported(trans) ||
+ (CSR_HW_RFID_TYPE(trans->hw_rf_id) == IWL_CFG_RF_TYPE_FM &&
+ CSR_HW_RFID_IS_CDB(trans->hw_rf_id)))
return IWL_MVM_FW_MAX_ACTIVE_LINKS_NUM;
return 1;
@@ -1608,7 +1772,7 @@ struct iwl_rate_info {
u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
};
-void __iwl_mvm_mac_stop(struct iwl_mvm *mvm);
+void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend);
int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
/******************
@@ -1642,6 +1806,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
void iwl_mvm_get_sync_time(struct iwl_mvm *mvm, int clock_type, u32 *gp2,
u64 *boottime, ktime_t *realtime);
u32 iwl_mvm_get_systime(struct iwl_mvm *mvm);
+u32 iwl_mvm_find_ie_offset(u8 *beacon, u8 eid, u32 frame_size);
/* Tx / Host Commands */
int __must_check iwl_mvm_send_cmd(struct iwl_mvm *mvm,
@@ -1720,6 +1885,8 @@ void iwl_mvm_handle_rx_statistics(struct iwl_mvm *mvm,
void iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear);
+int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm,
+ bool enable);
void iwl_mvm_accu_radio_stats(struct iwl_mvm *mvm);
/* NVM */
@@ -1741,10 +1908,10 @@ static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm)
static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm)
{
- u8 rx_ant = mvm->fw->valid_tx_ant;
+ u8 rx_ant = mvm->fw->valid_rx_ant;
if (mvm->nvm_data && mvm->nvm_data->valid_rx_ant)
- rx_ant &= mvm->nvm_data->valid_tx_ant;
+ rx_ant &= mvm->nvm_data->valid_rx_ant;
if (mvm->set_rx_ant)
rx_ant &= mvm->set_rx_ant;
@@ -1776,6 +1943,8 @@ int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif);
+
/*
* FW notifications / CMD responses handlers
* Convention: iwl_mvm_rx_<NAME OF THE CMD>
@@ -1930,6 +2099,26 @@ int iwl_mvm_remove_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_disable_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf);
+void iwl_mvm_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+u8 iwl_mvm_get_primary_link(struct ieee80211_vif *vif);
+u8 iwl_mvm_get_other_link(struct ieee80211_vif *vif, u8 link_id);
+
+struct iwl_mvm_link_sel_data {
+ u8 link_id;
+ const struct cfg80211_chan_def *chandef;
+ s32 signal;
+ u16 grade;
+};
+
+#if IS_ENABLED(CONFIG_IWLWIFI_KUNIT_TESTS)
+unsigned int iwl_mvm_get_link_grade(struct ieee80211_bss_conf *link_conf);
+bool iwl_mvm_mld_valid_link_pair(struct ieee80211_vif *vif,
+ const struct iwl_mvm_link_sel_data *a,
+ const struct iwl_mvm_link_sel_data *b);
+
+s8 iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif);
+#endif
+
/* AP and IBSS */
bool iwl_mvm_start_ap_ibss_common(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int *ret);
@@ -2005,9 +2194,13 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_scan_ies *ies);
size_t iwl_mvm_scan_size(struct iwl_mvm *mvm);
int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify);
+
int iwl_mvm_max_scan_ie_len(struct iwl_mvm *mvm);
void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm);
void iwl_mvm_scan_timeout_wk(struct work_struct *work);
+int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* Scheduled scan */
void iwl_mvm_rx_lmac_scan_complete_notif(struct iwl_mvm *mvm,
@@ -2101,11 +2294,22 @@ extern const struct file_operations iwl_dbgfs_d3_test_ops;
#ifdef CONFIG_PM
void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
+void iwl_mvm_fast_suspend(struct iwl_mvm *mvm);
+int iwl_mvm_fast_resume(struct iwl_mvm *mvm);
#else
static inline void
iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
}
+
+static inline void iwl_mvm_fast_suspend(struct iwl_mvm *mvm)
+{
+}
+
+static inline int iwl_mvm_fast_resume(struct iwl_mvm *mvm)
+{
+ return 0;
+}
#endif
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
struct iwl_wowlan_config_cmd *cmd);
@@ -2113,7 +2317,8 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,
bool offload_ns,
- u32 cmd_flags);
+ u32 cmd_flags,
+ u8 sta_id);
/* BT Coex */
int iwl_mvm_send_bt_init_conf(struct iwl_mvm *mvm);
@@ -2133,12 +2338,6 @@ bool iwl_mvm_bt_coex_is_tpc_allowed(struct iwl_mvm *mvm,
u8 iwl_mvm_bt_coex_get_single_ant_msk(struct iwl_mvm *mvm, u8 enabled_ants);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
-bool iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id, int primary_link);
-void iwl_mvm_bt_coex_update_vif_esr(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- int link_id);
/* beacon filtering */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -2450,6 +2649,21 @@ static inline u8 iwl_mvm_phy_band_from_nl80211(enum nl80211_band band)
}
}
+static inline u8 iwl_mvm_nl80211_band_from_phy(u8 phy_band)
+{
+ switch (phy_band) {
+ case PHY_BAND_24:
+ return NL80211_BAND_2GHZ;
+ case PHY_BAND_5:
+ return NL80211_BAND_5GHZ;
+ case PHY_BAND_6:
+ return NL80211_BAND_6GHZ;
+ default:
+ WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
+ return NL80211_BAND_5GHZ;
+ }
+}
+
/* Channel Switch */
void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk);
int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw,
@@ -2607,6 +2821,13 @@ static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm)
sw_rfkill);
}
+static inline bool iwl_mvm_has_p2p_over_aux(struct iwl_mvm *mvm)
+{
+ u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
+
+ return iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) >= 4;
+}
+
static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm,
struct sk_buff *skb)
{
@@ -2641,7 +2862,7 @@ int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant);
int iwl_mvm_mac_start(struct ieee80211_hw *hw);
void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type);
-void iwl_mvm_mac_stop(struct ieee80211_hw *hw);
+void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend);
static inline int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
{
return 0;
@@ -2711,7 +2932,7 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw,
int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_mvm_channel_switch(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
-int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw,
+int iwl_mvm_pre_channel_switch(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *chsw);
void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw,
@@ -2753,12 +2974,6 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw,
int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
bool iwl_mvm_enable_fils(struct iwl_mvm *mvm,
struct ieee80211_chanctx_conf *ctx);
-void iwl_mvm_mld_select_links(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- bool valid_links_changed);
-int iwl_mvm_mld_get_primary_link(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- unsigned long usable_links);
-
bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm,
struct ieee80211_chanctx_conf *ctx);
@@ -2778,5 +2993,37 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
- int duration, u32 activity);
+ int duration, enum iwl_roc_activity activity);
+
+/* EMLSR */
+bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep);
+int iwl_mvm_block_esr_sync(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason);
+void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason);
+void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ enum iwl_mvm_esr_state reason,
+ u8 link_to_keep);
+s8 iwl_mvm_get_esr_rssi_thresh(struct iwl_mvm *mvm,
+ const struct cfg80211_chan_def *chandef,
+ bool low);
+void iwl_mvm_bt_coex_update_link_esr(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ int link_id);
+bool
+iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ s32 link_rssi,
+ bool primary);
+int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ unsigned int link_id, bool active);
+
+void
+iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ bool is_ap);
#endif /* __IWL_MVM_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index ae8177222881..836ca22597bc 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -9,8 +9,7 @@
#include "iwl-trans.h"
#include "iwl-csr.h"
#include "mvm.h"
-#include "iwl-eeprom-parse.h"
-#include "iwl-eeprom-read.h"
+#include "iwl-nvm-utils.h"
#include "iwl-nvm-parse.h"
#include "iwl-prph.h"
#include "fw/acpi.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
index dfb16ca5b438..1eb21fe861e5 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/offloading.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2021-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2021-2022, 2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
@@ -30,7 +30,8 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,
bool offload_ns,
- u32 cmd_flags)
+ u32 cmd_flags,
+ u8 sta_id)
{
union {
struct iwl_proto_offload_cmd_v1 v1;
@@ -205,6 +206,9 @@ int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
if (!disable_offloading)
common->enabled = cpu_to_le32(enabled);
+ if (ver >= 4)
+ cmd.v4.sta_id = cpu_to_le32(sta_id);
+
hcmd.len[0] = size;
return iwl_mvm_send_cmd(mvm, &hcmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index a93981cb9714..b7dcae76a05d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -18,7 +18,7 @@
#include "iwl-modparams.h"
#include "mvm.h"
#include "iwl-phy-db.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "iwl-csr.h"
#include "iwl-io.h"
#include "iwl-prph.h"
@@ -41,12 +41,8 @@ static const struct iwl_op_mode_ops iwl_mvm_ops_mq;
struct iwl_mvm_mod_params iwlmvm_mod_params = {
.power_scheme = IWL_POWER_SCHEME_BPS,
- /* rest of fields are 0 by default */
};
-module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444);
-MODULE_PARM_DESC(init_dbg,
- "set to true to debug an ASSERT in INIT fw (default: false");
module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444);
MODULE_PARM_DESC(power_scheme,
"power management scheme: 1-active, 2-balanced, 3-low power, default: 2");
@@ -145,6 +141,24 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
+static void iwl_mvm_rx_esr_mode_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_esr_mode_notif *notif = (void *)pkt->data;
+ struct ieee80211_vif *vif = iwl_mvm_get_bss_vif(mvm);
+
+ /* FW recommendations is only for entering EMLSR */
+ if (IS_ERR_OR_NULL(vif) || iwl_mvm_vif_from_mac80211(vif)->esr_active)
+ return;
+
+ if (le32_to_cpu(notif->action) == ESR_RECOMMEND_ENTER)
+ iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW);
+ else
+ iwl_mvm_block_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_FW,
+ iwl_mvm_get_primary_link(vif));
+}
+
static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -365,13 +379,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_rx_scan_match_found,
RX_HANDLER_SYNC),
RX_HANDLER(SCAN_COMPLETE_UMAC, iwl_mvm_rx_umac_scan_complete_notif,
- RX_HANDLER_ASYNC_LOCKED, struct iwl_umac_scan_complete),
+ RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_umac_scan_complete),
RX_HANDLER(SCAN_ITERATION_COMPLETE_UMAC,
iwl_mvm_rx_umac_scan_iter_complete_notif, RX_HANDLER_SYNC,
struct iwl_umac_scan_iter_complete_notif),
RX_HANDLER(MISSED_BEACONS_NOTIFICATION, iwl_mvm_rx_missed_beacons_notif,
- RX_HANDLER_SYNC, struct iwl_missed_beacons_notif),
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_missed_beacons_notif),
RX_HANDLER(REPLY_ERROR, iwl_mvm_rx_fw_error, RX_HANDLER_SYNC,
struct iwl_error_resp),
@@ -423,6 +439,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_channel_switch_error_notif,
RX_HANDLER_ASYNC_UNLOCKED,
struct iwl_channel_switch_error_notif),
+
+ RX_HANDLER_GRP(DATA_PATH_GROUP, ESR_MODE_NOTIF,
+ iwl_mvm_rx_esr_mode_notif,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_mvm_esr_mode_notif),
+
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
@@ -445,8 +467,11 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC,
struct iwl_time_msmt_cfm_notify),
RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF,
- iwl_mvm_rx_roc_notif, RX_HANDLER_SYNC,
+ iwl_mvm_rx_roc_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_roc_notif),
+ RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
+ iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED,
+ struct iwl_umac_scan_channel_survey_notif),
};
#undef RX_HANDLER
#undef RX_HANDLER_GRP
@@ -543,6 +568,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
HCMD_NAME(D0I3_END_CMD),
HCMD_NAME(LTR_CONFIG),
HCMD_NAME(LDBG_CONFIG_CMD),
+ HCMD_NAME(DEBUG_LOG_MSG),
};
/* Please keep this array *SORTED* by hex value.
@@ -550,6 +576,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = {
*/
static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
HCMD_NAME(SHARED_MEM_CFG_CMD),
+ HCMD_NAME(SOC_CONFIGURATION_CMD),
HCMD_NAME(INIT_EXTENDED_CFG_CMD),
HCMD_NAME(FW_ERROR_RECOVERY_CMD),
HCMD_NAME(RFI_CONFIG_CMD),
@@ -564,8 +591,10 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = {
* Access is done through binary search
*/
static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
+ HCMD_NAME(LOW_LATENCY_CMD),
HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
HCMD_NAME(SESSION_PROTECTION_CMD),
+ HCMD_NAME(CANCEL_CHANNEL_SWITCH_CMD),
HCMD_NAME(MAC_CONFIG_CMD),
HCMD_NAME(LINK_CONFIG_CMD),
HCMD_NAME(STA_CONFIG_CMD),
@@ -574,7 +603,10 @@ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
HCMD_NAME(STA_DISABLE_TX_CMD),
HCMD_NAME(ROC_CMD),
HCMD_NAME(ROC_NOTIF),
+ HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF),
+ HCMD_NAME(MISSED_VAP_NOTIF),
HCMD_NAME(SESSION_PROTECTION_NOTIF),
+ HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF),
HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
};
@@ -586,6 +618,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
HCMD_NAME(CTDP_CONFIG_CMD),
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
+ HCMD_NAME(AP_TX_POWER_CONSTRAINTS_CMD),
HCMD_NAME(CT_KILL_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
};
@@ -597,6 +630,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(DQA_ENABLE_CMD),
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
+ HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD),
+ HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD),
HCMD_NAME(STA_HE_CTXT_CMD),
HCMD_NAME(RLC_CONFIG_CMD),
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
@@ -604,6 +639,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(CHEST_COLLECTOR_FILTER_CONFIG_CMD),
HCMD_NAME(SCD_QUEUE_CONFIG_CMD),
HCMD_NAME(SEC_KEY_CMD),
+ HCMD_NAME(ESR_MODE_NOTIF),
HCMD_NAME(MONITOR_NOTIF),
HCMD_NAME(THERMAL_DUAL_CHAIN_REQUEST),
HCMD_NAME(STA_PM_NOTIF),
@@ -622,7 +658,23 @@ static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = {
/* Please keep this array *SORTED* by hex value.
* Access is done through binary search
*/
+static const struct iwl_hcmd_names iwl_mvm_debug_names[] = {
+ HCMD_NAME(LMAC_RD_WR),
+ HCMD_NAME(UMAC_RD_WR),
+ HCMD_NAME(HOST_EVENT_CFG),
+ HCMD_NAME(DBGC_SUSPEND_RESUME),
+ HCMD_NAME(BUFFER_ALLOCATION),
+ HCMD_NAME(GET_TAS_STATUS),
+ HCMD_NAME(FW_DUMP_COMPLETE_CMD),
+ HCMD_NAME(FW_CLEAR_BUFFER),
+ HCMD_NAME(MFU_ASSERT_DUMP_NTF),
+};
+
+/* Please keep this array *SORTED* by hex value.
+ * Access is done through binary search
+ */
static const struct iwl_hcmd_names iwl_mvm_scan_names[] = {
+ HCMD_NAME(CHANNEL_SURVEY_NOTIF),
HCMD_NAME(OFFLOAD_MATCH_INFO_NOTIF),
};
@@ -673,6 +725,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
[PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names),
[REGULATORY_AND_NVM_GROUP] =
HCMD_ARR(iwl_mvm_regulatory_and_nvm_names),
+ [DEBUG_GROUP] = HCMD_ARR(iwl_mvm_debug_names),
[STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names),
};
@@ -708,20 +761,18 @@ static void iwl_mvm_tx_unblock_dwork(struct work_struct *work)
struct ieee80211_vif *tx_blocked_vif;
struct iwl_mvm_vif *mvmvif;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
tx_blocked_vif =
rcu_dereference_protected(mvm->csa_tx_blocked_vif,
lockdep_is_held(&mvm->mutex));
if (!tx_blocked_vif)
- goto unlock;
+ return;
mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif);
iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false);
RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL);
-unlock:
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_fwrt_dump_start(void *ctx)
@@ -738,21 +789,12 @@ static void iwl_mvm_fwrt_dump_end(void *ctx)
mutex_unlock(&mvm->mutex);
}
-static bool iwl_mvm_fwrt_fw_running(void *ctx)
-{
- return iwl_mvm_firmware_running(ctx);
-}
-
static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd)
{
struct iwl_mvm *mvm = (struct iwl_mvm *)ctx;
- int ret;
-
- mutex_lock(&mvm->mutex);
- ret = iwl_mvm_send_cmd(mvm, host_cmd);
- mutex_unlock(&mvm->mutex);
- return ret;
+ guard(mvm)(mvm);
+ return iwl_mvm_send_cmd(mvm, host_cmd);
}
static bool iwl_mvm_d3_debug_enable(void *ctx)
@@ -763,7 +805,6 @@ static bool iwl_mvm_d3_debug_enable(void *ctx)
static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = {
.dump_start = iwl_mvm_fwrt_dump_start,
.dump_end = iwl_mvm_fwrt_dump_end,
- .fw_running = iwl_mvm_fwrt_fw_running,
.send_hcmd = iwl_mvm_fwrt_send_hcmd,
.d3_debug_enable = iwl_mvm_d3_debug_enable,
};
@@ -819,8 +860,7 @@ get_nvm_from_fw:
ret = iwl_mvm_init_mcc(mvm);
}
- if (!iwlmvm_mod_params.init_dbg || !ret)
- iwl_mvm_stop_device(mvm);
+ iwl_mvm_stop_device(mvm);
mutex_unlock(&mvm->mutex);
wiphy_unlock(mvm->hw->wiphy);
@@ -830,7 +870,7 @@ get_nvm_from_fw:
IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
/* no longer need this regardless of failure or not */
- mvm->pldr_sync = false;
+ mvm->fw_product_reset = false;
return ret;
}
@@ -1143,6 +1183,27 @@ static const struct iwl_mei_ops mei_ops = {
.nic_stolen = iwl_mvm_mei_nic_stolen,
};
+static void iwl_mvm_find_link_selection_vif(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (ieee80211_vif_is_mld(vif) && mvmvif->authorized)
+ iwl_mvm_select_links(mvmvif->mvm, vif);
+}
+
+static void iwl_mvm_trig_link_selection(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, trig_link_selection_wk);
+
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_find_link_selection_vif,
+ NULL);
+}
+
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -1274,6 +1335,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
wiphy_work_init(&mvm->async_handlers_wiphy_wk,
iwl_mvm_async_handlers_wiphy_wk);
+
+ wiphy_work_init(&mvm->trig_link_selection_wk,
+ iwl_mvm_trig_link_selection);
+
init_waitqueue_head(&mvm->rx_sync_waitq);
mvm->queue_sync_state = 0;
@@ -1303,24 +1368,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.no_reclaim_cmds = no_reclaim_cmds;
trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds);
- switch (iwlwifi_mod_params.amsdu_size) {
- case IWL_AMSDU_DEF:
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- break;
- case IWL_AMSDU_4K:
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- break;
- case IWL_AMSDU_8K:
- trans_cfg.rx_buf_size = IWL_AMSDU_8K;
- break;
- case IWL_AMSDU_12K:
- trans_cfg.rx_buf_size = IWL_AMSDU_12K;
- break;
- default:
- pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME,
- iwlwifi_mod_params.amsdu_size);
- trans_cfg.rx_buf_size = IWL_AMSDU_4K;
- }
+ trans_cfg.rx_buf_size = iwl_amsdu_size_to_rxb_size();
trans->wide_cmd_header = true;
trans_cfg.bc_table_dword =
@@ -1380,9 +1428,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
goto out_free;
}
- IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
- mvm->trans->name, mvm->trans->hw_rev);
-
if (iwlwifi_mod_params.nvm_file)
mvm->nvm_file_name = iwlwifi_mod_params.nvm_file;
else
@@ -1450,8 +1495,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_fw_flush_dumps(&mvm->fwrt);
iwl_fw_runtime_free(&mvm->fwrt);
- if (iwlmvm_mod_params.init_dbg)
- return op_mode;
iwl_phy_db_free(mvm->phy_db);
kfree(mvm->scan_cmd);
iwl_trans_op_mode_leave(trans);
@@ -1528,6 +1571,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
kfree(mvm->temp_nvm_data);
for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
+ kfree(mvm->acs_survey);
cancel_delayed_work_sync(&mvm->tcm.work);
@@ -1854,12 +1898,10 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
bool unified = iwl_mvm_has_unified_ucode(mvm);
- if (state) {
+ if (state)
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- wake_up(&mvm->rx_sync_waitq);
- } else {
+ else
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- }
iwl_mvm_set_rfkill_state(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index 41e68aa6bec8..bc363e8427e4 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -79,7 +79,7 @@ void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
cmd->bf_roaming_state =
cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
}
- cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
+ cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->ba_enabled);
}
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
@@ -211,19 +211,37 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
}
-static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+struct iwl_allow_uapsd_iface_iterator_data {
+ struct ieee80211_vif *current_vif;
+ bool allow_uapsd;
+};
+
+static void iwl_mvm_allow_uapsd_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
- bool *is_p2p_standalone = _data;
+ struct iwl_allow_uapsd_iface_iterator_data *data = _data;
+ struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_vif *curr_mvmvif =
+ iwl_mvm_vif_from_mac80211(data->current_vif);
- switch (ieee80211_vif_type_p2p(vif)) {
- case NL80211_IFTYPE_P2P_GO:
+ /* exclude the given vif */
+ if (vif == data->current_vif)
+ return;
+
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
- *is_p2p_standalone = false;
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_NAN:
+ data->allow_uapsd = false;
break;
case NL80211_IFTYPE_STATION:
- if (vif->cfg.assoc)
- *is_p2p_standalone = false;
+ /* allow UAPSD if P2P interface and BSS station interface share
+ * the same channel.
+ */
+ if (vif->cfg.assoc && other_mvmvif->deflink.phy_ctxt &&
+ curr_mvmvif->deflink.phy_ctxt &&
+ other_mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id)
+ data->allow_uapsd = false;
break;
default:
@@ -235,6 +253,10 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_allow_uapsd_iface_iterator_data data = {
+ .current_vif = vif,
+ .allow_uapsd = true,
+ };
if (ether_addr_equal(mvmvif->uapsd_misbehaving_ap_addr,
vif->cfg.ap_addr))
@@ -249,88 +271,75 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm,
IEEE80211_P2P_OPPPS_ENABLE_BIT))
return false;
- /*
- * Avoid using uAPSD if client is in DCM -
- * low latency issue in Miracast
- */
- if (iwl_mvm_phy_ctx_count(mvm) >= 2)
+ if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm))
return false;
- if (vif->p2p) {
- /* Allow U-APSD only if p2p is stand alone */
- bool is_p2p_standalone = true;
-
- if (!iwl_mvm_is_p2p_scm_uapsd_supported(mvm))
- return false;
-
- ieee80211_iterate_active_interfaces_atomic(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_p2p_standalone_iterator,
- &is_p2p_standalone);
-
- if (!is_p2p_standalone)
- return false;
- }
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_allow_uapsd_iterator,
+ &data);
- return true;
+ return data.allow_uapsd;
}
-static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif)
+static bool iwl_mvm_power_is_radar(struct ieee80211_bss_conf *link_conf)
{
struct ieee80211_chanctx_conf *chanctx_conf;
- struct ieee80211_bss_conf *link_conf;
- bool radar_detect = false;
- unsigned int link_id;
- rcu_read_lock();
- for_each_vif_active_link(vif, link_conf, link_id) {
- chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
- /* this happens on link switching, just ignore inactive ones */
- if (!chanctx_conf)
- continue;
+ chanctx_conf = rcu_dereference(link_conf->chanctx_conf);
- radar_detect = !!(chanctx_conf->def.chan->flags &
- IEEE80211_CHAN_RADAR);
- if (radar_detect)
- goto out;
- }
+ /* this happens on link switching, just ignore inactive ones */
+ if (!chanctx_conf)
+ return false;
-out:
- rcu_read_unlock();
- return radar_detect;
+ return chanctx_conf->def.chan->flags & IEEE80211_CHAN_RADAR;
}
static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mac_power_cmd *cmd)
{
- int dtimper = vif->bss_conf.dtim_period ?: 1;
- int skip;
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int min_link_skip = ~0;
+ unsigned int link_id;
/* disable, in case we're supposed to override */
cmd->skip_dtim_periods = 0;
cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- if (iwl_mvm_power_is_radar(vif))
+ if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
+ return;
+ cmd->skip_dtim_periods = 2;
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
return;
+ }
- if (dtimper >= 10)
- return;
+ rcu_read_lock();
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ unsigned int dtimper = link_conf->dtim_period ?: 1;
+ unsigned int dtimper_tu = dtimper * link_conf->beacon_int;
+ unsigned int skip;
- if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) {
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP)
+ if (dtimper >= 10 || iwl_mvm_power_is_radar(link_conf)) {
+ rcu_read_unlock();
return;
- skip = 2;
- } else {
- int dtimper_tu = dtimper * vif->bss_conf.beacon_int;
+ }
if (WARN_ON(!dtimper_tu))
- return;
+ continue;
+
/* configure skip over dtim up to 900 TU DTIM interval */
- skip = max_t(u8, 1, 900 / dtimper_tu);
+ skip = max_t(int, 1, 900 / dtimper_tu);
+ min_link_skip = min(min_link_skip, skip);
}
+ rcu_read_unlock();
+
+ /* no WARN_ON, can only happen with WARN_ON above */
+ if (min_link_skip == ~0)
+ return;
- cmd->skip_dtim_periods = skip;
+ cmd->skip_dtim_periods = min_link_skip;
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
}
@@ -826,7 +835,7 @@ static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd);
if (!ret)
- mvmvif->bf_data.bf_enabled = true;
+ mvmvif->bf_enabled = true;
return ret;
}
@@ -855,7 +864,7 @@ static int _iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
- mvmvif->bf_data.bf_enabled = false;
+ mvmvif->bf_enabled = false;
return ret;
}
@@ -903,16 +912,16 @@ static int iwl_mvm_power_set_ba(struct iwl_mvm *mvm,
.bf_enable_beacon_filter = cpu_to_le32(1),
};
- if (!mvmvif->bf_data.bf_enabled)
+ if (!mvmvif->bf_enabled)
return 0;
if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))
cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
- mvmvif->bf_data.ba_enabled = !(!mvmvif->pm_enabled ||
- mvm->ps_disabled ||
- !vif->cfg.ps ||
- iwl_mvm_vif_low_latency(mvmvif));
+ mvmvif->ba_enabled = !(!mvmvif->pm_enabled ||
+ mvm->ps_disabled ||
+ !vif->cfg.ps ||
+ iwl_mvm_vif_low_latency(mvmvif));
return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 00860feefa7a..05715e5af6ab 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -514,6 +514,8 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
link_sta->agg.max_tid_amsdu_len[i] = 1;
}
+ ieee80211_sta_recalc_aggregates(sta);
+
IWL_DEBUG_RATE(mvm,
"AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n",
le32_to_cpu(notif->amsdu_size), size,
@@ -609,6 +611,7 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm,
cpu_to_le16(max_amsdu_len) : 0,
};
unsigned int link_id = link_conf->link_id;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
int cmd_ver;
int ret;
@@ -652,12 +655,12 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm,
* since TLC offload works with one mode we can assume
* that only vht/ht is used and also set it as station max amsdu
*/
- sta->deflink.agg.max_amsdu_len = max_amsdu_len;
+ link_sta->agg.max_amsdu_len = max_amsdu_len;
+ ieee80211_sta_recalc_aggregates(sta);
+
+ cfg_cmd.max_tx_op = cpu_to_le16(mvmvif->max_tx_op);
- cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD),
- 0);
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n",
cfg_cmd.sta_id, cfg_cmd.max_ch_width, cfg_cmd.mode);
IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, chains=0x%X, ch_wid_supp=%d, flags=0x%X\n",
@@ -693,9 +696,7 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm,
u16 cmd_size = sizeof(cfg_cmd_v3);
/* In old versions of the API the struct is 4 bytes smaller */
- if (iwl_fw_lookup_cmd_ver(mvm->fw,
- WIDE_ID(DATA_PATH_GROUP,
- TLC_MNG_CONFIG_CMD), 0) < 3)
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) < 3)
cmd_size -= 4;
ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, CMD_ASYNC, cmd_size,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 376b23b409dc..ea81cb236d5c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -3,7 +3,7 @@
*
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright (C) 2003 - 2014, 2018 - 2023 Intel Corporation
+ * Copyright (C) 2003 - 2014, 2018 - 2024 Intel Corporation
*****************************************************************************/
#ifndef __rs_h__
@@ -122,13 +122,8 @@ enum {
#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
-/*
- * FIXME - various places in firmware API still use u8,
- * e.g. LQ command and SCD config command.
- * This should be 256 instead.
- */
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255)
-#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64)
+#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64)
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
@@ -203,11 +198,12 @@ struct rs_rate {
/**
* struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW
* @last_rate_n_flags: last rate reported by FW
+ * @pers: persistent fields
* @pers.sta_id: the id of the station
- * @chains: bitmask of chains reported in %chain_signal
- * @chain_signal: per chain signal strength
- * @last_rssi: last rssi reported
- * @drv: pointer back to the driver data
+ * @pers.chains: bitmask of chains reported in %chain_signal
+ * @pers.chain_signal: per chain signal strength
+ * @pers.last_rssi: last rssi reported
+ * @pers.drv: pointer back to the driver data
*/
struct iwl_lq_sta_rs_fw {
/* last tx rate_n_flags */
@@ -218,11 +214,11 @@ struct iwl_lq_sta_rs_fw {
u32 sta_id;
#ifdef CONFIG_MAC80211_DEBUGFS
/**
- * @dbg_fixed_rate: for debug, use fixed rate if not 0
+ * @pers.dbg_fixed_rate: for debug, use fixed rate if not 0
*/
u32 dbg_fixed_rate;
/**
- * @dbg_agg_frame_count_lim: for debug, max number of
+ * @pers.dbg_agg_frame_count_lim: for debug, max number of
* frames in A-MPDU
*/
u16 dbg_agg_frame_count_lim;
@@ -407,7 +403,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, struct ieee80211_tx_info *info, bool ndp);
/**
- * iwl_rate_control_register - Register the rate control algorithm callbacks
+ * iwl_mvm_rate_control_register - Register the rate control algorithm callbacks
*
* Since the rate control algorithm is hardware specific, there is no need
* or reason to place it as a stand alone module. The driver can call
@@ -419,7 +415,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int iwl_mvm_rate_control_register(void);
/**
- * iwl_rate_control_unregister - Unregister the rate control callbacks
+ * iwl_mvm_rate_control_unregister - Unregister the rate control callbacks
*
* This should be called after calling ieee80211_unregister_hw, but before
* the driver is unloaded.
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index b1add7942c5b..151289e13308 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -556,36 +556,38 @@ struct iwl_mvm_stat_data_all_macs {
struct iwl_stats_ntfy_per_mac *per_mac;
};
-static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
+static void iwl_mvm_update_link_sig(struct ieee80211_vif *vif, int sig,
+ struct iwl_mvm_vif_link_info *link_info,
+ struct ieee80211_bss_conf *bss_conf)
{
- struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = mvmvif->mvm;
- int thold = vif->bss_conf.cqm_rssi_thold;
- int hyst = vif->bss_conf.cqm_rssi_hyst;
+ struct iwl_mvm *mvm = iwl_mvm_vif_from_mac80211(vif)->mvm;
+ int thold = bss_conf->cqm_rssi_thold;
+ int hyst = bss_conf->cqm_rssi_hyst;
int last_event;
+ s8 exit_esr_thresh;
if (sig == 0) {
IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
return;
}
- mvmvif->bf_data.ave_beacon_signal = sig;
+ link_info->bf_data.ave_beacon_signal = sig;
/* BT Coex */
- if (mvmvif->bf_data.bt_coex_min_thold !=
- mvmvif->bf_data.bt_coex_max_thold) {
- last_event = mvmvif->bf_data.last_bt_coex_event;
- if (sig > mvmvif->bf_data.bt_coex_max_thold &&
- (last_event <= mvmvif->bf_data.bt_coex_min_thold ||
+ if (link_info->bf_data.bt_coex_min_thold !=
+ link_info->bf_data.bt_coex_max_thold) {
+ last_event = link_info->bf_data.last_bt_coex_event;
+ if (sig > link_info->bf_data.bt_coex_max_thold &&
+ (last_event <= link_info->bf_data.bt_coex_min_thold ||
last_event == 0)) {
- mvmvif->bf_data.last_bt_coex_event = sig;
+ link_info->bf_data.last_bt_coex_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator bt coex high %d\n",
sig);
iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_HIGH);
- } else if (sig < mvmvif->bf_data.bt_coex_min_thold &&
- (last_event >= mvmvif->bf_data.bt_coex_max_thold ||
+ } else if (sig < link_info->bf_data.bt_coex_min_thold &&
+ (last_event >= link_info->bf_data.bt_coex_max_thold ||
last_event == 0)) {
- mvmvif->bf_data.last_bt_coex_event = sig;
+ link_info->bf_data.last_bt_coex_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator bt coex low %d\n",
sig);
iwl_mvm_bt_rssi_event(mvm, vif, RSSI_EVENT_LOW);
@@ -596,10 +598,10 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
return;
/* CQM Notification */
- last_event = mvmvif->bf_data.last_cqm_event;
+ last_event = link_info->bf_data.last_cqm_event;
if (thold && sig < thold && (last_event == 0 ||
sig < last_event - hyst)) {
- mvmvif->bf_data.last_cqm_event = sig;
+ link_info->bf_data.last_cqm_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
sig);
ieee80211_cqm_rssi_notify(
@@ -609,7 +611,7 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
GFP_KERNEL);
} else if (sig > thold &&
(last_event == 0 || sig > last_event + hyst)) {
- mvmvif->bf_data.last_cqm_event = sig;
+ link_info->bf_data.last_cqm_event = sig;
IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
sig);
ieee80211_cqm_rssi_notify(
@@ -618,6 +620,20 @@ static void iwl_mvm_update_vif_sig(struct ieee80211_vif *vif, int sig)
sig,
GFP_KERNEL);
}
+
+ /* ESR recalculation */
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return;
+
+ exit_esr_thresh =
+ iwl_mvm_get_esr_rssi_thresh(mvm,
+ &bss_conf->chanreq.oper,
+ true);
+
+ if (sig < exit_esr_thresh)
+ iwl_mvm_exit_esr(mvm, vif, IWL_MVM_ESR_EXIT_LOW_RSSI,
+ iwl_mvm_get_other_link(vif,
+ bss_conf->link_id));
}
static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
@@ -651,7 +667,8 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
mvmvif->deflink.beacon_stats.accu_num_beacons +=
mvmvif->deflink.beacon_stats.num_beacons;
- iwl_mvm_update_vif_sig(vif, sig);
+ /* This is used in pre-MLO API so use deflink */
+ iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink, &vif->bss_conf);
}
static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
@@ -684,7 +701,9 @@ static void iwl_mvm_stat_iterator_all_macs(void *_data, u8 *mac,
mvmvif->deflink.beacon_stats.num_beacons;
sig = -le32_to_cpu(mac_stats->beacon_filter_average_energy);
- iwl_mvm_update_vif_sig(vif, sig);
+
+ /* This is used in pre-MLO API so use deflink */
+ iwl_mvm_update_link_sig(vif, sig, &mvmvif->deflink, &vif->bss_conf);
}
static inline void
@@ -889,8 +908,8 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
if (link_info->phy_ctxt &&
link_info->phy_ctxt->channel->band == NL80211_BAND_2GHZ)
- iwl_mvm_bt_coex_update_vif_esr(mvm, bss_conf->vif,
- link_id);
+ iwl_mvm_bt_coex_update_link_esr(mvm, bss_conf->vif,
+ link_id);
/* make sure that beacon statistics don't go backwards with TCM
* request to clear statistics
@@ -900,7 +919,8 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
mvmvif->link[link_id]->beacon_stats.num_beacons;
sig = -le32_to_cpu(link_stats->beacon_filter_average_energy);
- iwl_mvm_update_vif_sig(bss_conf->vif, sig);
+ iwl_mvm_update_link_sig(bss_conf->vif, sig, link_info,
+ bss_conf);
if (WARN_ONCE(mvmvif->id >= MAC_INDEX_AUX,
"invalid mvmvif id: %d", mvmvif->id))
@@ -930,6 +950,92 @@ iwl_mvm_stat_iterator_all_links(struct iwl_mvm *mvm,
}
}
+#define SEC_LINK_MIN_PERC 10
+#define SEC_LINK_MIN_TX 3000
+#define SEC_LINK_MIN_RX 400
+
+static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm)
+{
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_sta *mvmsta;
+ unsigned long total_tx = 0, total_rx = 0;
+ unsigned long sec_link_tx = 0, sec_link_rx = 0;
+ u8 sec_link_tx_perc, sec_link_rx_perc;
+ u8 sec_link;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (IS_ERR_OR_NULL(bss_vif))
+ return;
+
+ mvmvif = iwl_mvm_vif_from_mac80211(bss_vif);
+
+ if (!mvmvif->esr_active || !mvmvif->ap_sta)
+ return;
+
+ mvmsta = iwl_mvm_sta_from_mac80211(mvmvif->ap_sta);
+ /* We only count for the AP sta in a MLO connection */
+ if (!mvmsta->mpdu_counters)
+ return;
+
+ /* Get the FW ID of the secondary link */
+ sec_link = iwl_mvm_get_other_link(bss_vif,
+ iwl_mvm_get_primary_link(bss_vif));
+ if (WARN_ON(!mvmvif->link[sec_link]))
+ return;
+ sec_link = mvmvif->link[sec_link]->fw_link_id;
+
+ /* Sum up RX and TX MPDUs from the different queues/links */
+ for (int q = 0; q < mvm->trans->num_rx_queues; q++) {
+ spin_lock_bh(&mvmsta->mpdu_counters[q].lock);
+
+ /* The link IDs that doesn't exist will contain 0 */
+ for (int link = 0; link < IWL_MVM_FW_MAX_LINK_ID; link++) {
+ total_tx += mvmsta->mpdu_counters[q].per_link[link].tx;
+ total_rx += mvmsta->mpdu_counters[q].per_link[link].rx;
+ }
+
+ sec_link_tx += mvmsta->mpdu_counters[q].per_link[sec_link].tx;
+ sec_link_rx += mvmsta->mpdu_counters[q].per_link[sec_link].rx;
+
+ /*
+ * In EMLSR we have statistics every 5 seconds, so we can reset
+ * the counters upon every statistics notification.
+ */
+ memset(mvmsta->mpdu_counters[q].per_link, 0,
+ sizeof(mvmsta->mpdu_counters[q].per_link));
+
+ spin_unlock_bh(&mvmsta->mpdu_counters[q].lock);
+ }
+
+ IWL_DEBUG_STATS(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n",
+ total_tx, total_rx);
+
+ /* If we don't have enough MPDUs - exit EMLSR */
+ if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH &&
+ total_rx < IWL_MVM_ENTER_ESR_TPT_THRESH) {
+ iwl_mvm_block_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_TPT,
+ iwl_mvm_get_primary_link(bss_vif));
+ return;
+ }
+
+ /* Calculate the percentage of the secondary link TX/RX */
+ sec_link_tx_perc = total_tx ? sec_link_tx * 100 / total_tx : 0;
+ sec_link_rx_perc = total_rx ? sec_link_rx * 100 / total_rx : 0;
+
+ /*
+ * The TX/RX percentage is checked only if it exceeds the required
+ * minimum. In addition, RX is checked only if the TX check failed.
+ */
+ if ((total_tx > SEC_LINK_MIN_TX &&
+ sec_link_tx_perc < SEC_LINK_MIN_PERC) ||
+ (total_rx > SEC_LINK_MIN_RX &&
+ sec_link_rx_perc < SEC_LINK_MIN_PERC))
+ iwl_mvm_exit_esr(mvm, bss_vif, IWL_MVM_ESR_EXIT_LINK_USAGE,
+ iwl_mvm_get_primary_link(bss_vif));
+}
+
void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
@@ -957,6 +1063,8 @@ void iwl_mvm_handle_rx_system_oper_stats(struct iwl_mvm *mvm,
ieee80211_iterate_stations_atomic(mvm->hw, iwl_mvm_stats_energy_iter,
average_energy);
iwl_mvm_handle_per_phy_stats(mvm, stats->per_phy);
+
+ iwl_mvm_update_esr_mode_tpt(mvm);
}
void iwl_mvm_handle_rx_system_oper_part1_stats(struct iwl_mvm *mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index ce8d83c771a7..1a210d0c22b3 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -566,7 +566,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
lockdep_assert_held(&reorder_buf->lock);
while (ieee80211_sn_less(ssn, nssn)) {
- int index = ssn % reorder_buf->buf_size;
+ int index = ssn % baid_data->buf_size;
struct sk_buff_head *skb_list = &entries[index].frames;
struct sk_buff *skb;
@@ -617,7 +617,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
spin_lock_bh(&reorder_buf->lock);
iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf,
ieee80211_sn_add(reorder_buf->head_sn,
- reorder_buf->buf_size));
+ ba_data->buf_size));
spin_unlock_bh(&reorder_buf->lock);
out:
@@ -839,7 +839,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
}
/* put in reorder buffer */
- index = sn % buffer->buf_size;
+ index = sn % baid_data->buf_size;
__skb_queue_tail(&entries[index].frames, skb);
buffer->num_stored++;
@@ -1890,21 +1890,6 @@ static void iwl_mvm_decode_lsig(struct sk_buff *skb,
}
}
-static inline u8 iwl_mvm_nl80211_band_from_rx_msdu(u8 phy_band)
-{
- switch (phy_band) {
- case PHY_BAND_24:
- return NL80211_BAND_2GHZ;
- case PHY_BAND_5:
- return NL80211_BAND_5GHZ;
- case PHY_BAND_6:
- return NL80211_BAND_6GHZ;
- default:
- WARN_ONCE(1, "Unsupported phy band (%u)\n", phy_band);
- return NL80211_BAND_5GHZ;
- }
-}
-
struct iwl_rx_sta_csa {
bool all_sta_unblocked;
struct ieee80211_vif *vif;
@@ -1969,6 +1954,16 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm,
iwl_mvm_decode_lsig(skb, phy_data);
rx_status->device_timestamp = phy_data->gp2_on_air_rise;
+
+ if (mvm->rx_ts_ptp && mvm->monitor_on) {
+ u64 adj_time =
+ iwl_mvm_ptp_get_adj_time(mvm, phy_data->gp2_on_air_rise * NSEC_PER_USEC);
+
+ rx_status->mactime = div64_u64(adj_time, NSEC_PER_USEC);
+ rx_status->flag |= RX_FLAG_MACTIME_IS_RTAP_TS64;
+ rx_status->flag &= ~RX_FLAG_MACTIME;
+ }
+
rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel,
rx_status->band);
iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags,
@@ -2047,9 +2042,9 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
u32 len;
u32 pkt_len = iwl_rx_packet_payload_len(pkt);
struct ieee80211_sta *sta = NULL;
- struct ieee80211_link_sta *link_sta = NULL;
struct sk_buff *skb;
u8 crypt_len = 0;
+ u8 sta_id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
size_t desc_size;
struct iwl_mvm_rx_phy_data phy_data = {};
u32 format;
@@ -2168,7 +2163,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (iwl_mvm_is_band_in_rx_supported(mvm)) {
u8 band = BAND_IN_RX_STATUS(desc->mac_phy_idx);
- rx_status->band = iwl_mvm_nl80211_band_from_rx_msdu(band);
+ rx_status->band = iwl_mvm_nl80211_band_from_phy(band);
} else {
rx_status->band = phy_data.channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ;
@@ -2198,13 +2193,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rcu_read_lock();
if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) {
- u8 id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID);
+ if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) {
+ struct ieee80211_link_sta *link_sta;
- if (!WARN_ON_ONCE(id >= mvm->fw->ucode_capa.num_stations)) {
- sta = rcu_dereference(mvm->fw_id_to_mac_id[id]);
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (IS_ERR(sta))
sta = NULL;
- link_sta = rcu_dereference(mvm->fw_id_to_link_sta[id]);
+ link_sta = rcu_dereference(mvm->fw_id_to_link_sta[sta_id]);
if (sta && sta->valid_links && link_sta) {
rx_status->link_valid = 1;
@@ -2325,6 +2320,16 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
iwl_mvm_agg_rx_received(mvm, reorder_data, baid);
}
+
+ if (ieee80211_is_data(hdr->frame_control)) {
+ u8 sub_frame_idx = desc->amsdu_info &
+ IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
+
+ /* 0 means not an A-MSDU, and 1 means a new A-MSDU */
+ if (!sub_frame_idx || sub_frame_idx == 1)
+ iwl_mvm_count_mpdu(mvmsta, sta_id, 1, false,
+ queue);
+ }
}
/* management stuff on default queue */
@@ -2366,7 +2371,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_no_data_ver_3 *desc = (void *)pkt->data;
u32 rssi;
- u32 info_type;
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
struct iwl_mvm_rx_phy_data phy_data;
@@ -2379,7 +2383,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
return;
rssi = le32_to_cpu(desc->rssi);
- info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK;
phy_data.d0 = desc->phy_info[0];
phy_data.d1 = desc->phy_info[1];
phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD;
@@ -2431,7 +2434,12 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
/* 0-length PSDU */
rx_status->flag |= RX_FLAG_NO_PSDU;
- switch (info_type) {
+ /* mark as failed PLCP on any errors to skip checks in mac80211 */
+ if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) !=
+ RX_NO_DATA_INFO_ERR_NONE)
+ rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC;
+
+ switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) {
case RX_NO_DATA_INFO_TYPE_NDP:
rx_status->zero_length_psdu_type =
IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING;
@@ -2456,8 +2464,11 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
*
* We mark it as mac header, for upper layers to know where
* all radio tap header ends.
+ *
+ * Since data doesn't move data while putting data on skb and that is
+ * the only way we use, data + len is the next place that hdr would be put
*/
- skb_reset_mac_header(skb);
+ skb_set_mac_header(skb, skb->len);
/*
* Override the nss from the rx_vec since the rate_n_flags has
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 11559563ae38..8e0df31f1b3e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -208,7 +208,7 @@ static void iwl_mvm_scan_iterator(void *_data, u8 *mac,
curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif);
- if (vif->type == NL80211_IFTYPE_AP && vif->p2p &&
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO &&
mvmvif->deflink.phy_ctxt && curr_mvmvif->deflink.phy_ctxt &&
mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id)
data->is_dcm_with_p2p_go = true;
@@ -226,6 +226,14 @@ iwl_mvm_scan_type _iwl_mvm_get_scan_type(struct iwl_mvm *mvm,
.global_cnt = 0,
};
+ /*
+ * A scanning AP interface probably wants to generate a survey to do
+ * ACS (automatic channel selection).
+ * Force a non-fragmented scan in that case.
+ */
+ if (vif && ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP)
+ return IWL_SCAN_TYPE_WILD;
+
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_iterator,
@@ -852,11 +860,13 @@ static inline bool iwl_mvm_scan_use_ebs(struct iwl_mvm *mvm,
* 4. it's not a p2p find operation.
* 5. we are not in low latency mode,
* or if fragmented ebs is supported by the FW
+ * 6. the VIF is not an AP interface (scan wants survey results)
*/
return ((capa->flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT) &&
mvm->last_ebs_successful && IWL_MVM_ENABLE_EBS &&
vif->type != NL80211_IFTYPE_P2P_DEVICE &&
- (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)));
+ (!low_latency || iwl_mvm_is_frag_ebs_supported(mvm)) &&
+ ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_AP);
}
static inline bool iwl_mvm_is_regular_scan(struct iwl_mvm_scan_params *params)
@@ -1303,7 +1313,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
if (IWL_MVM_ADWELL_MAX_BUDGET)
cmd->v7.adwell_max_budget =
cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
- else if (params->ssids && params->ssids[0].ssid_len)
+ else if (params->n_ssids && params->ssids[0].ssid_len)
cmd->v7.adwell_max_budget =
cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
else
@@ -1377,11 +1387,14 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
-static u32 iwl_mvm_scan_umac_ooc_priority(struct iwl_mvm_scan_params *params)
+static u32 iwl_mvm_scan_umac_ooc_priority(int type)
{
- return iwl_mvm_is_regular_scan(params) ?
- IWL_SCAN_PRIORITY_EXT_6 :
- IWL_SCAN_PRIORITY_EXT_2;
+ if (type == IWL_MVM_SCAN_REGULAR)
+ return IWL_SCAN_PRIORITY_EXT_6;
+ if (type == IWL_MVM_SCAN_INT_MLO)
+ return IWL_SCAN_PRIORITY_EXT_4;
+
+ return IWL_SCAN_PRIORITY_EXT_2;
}
static void
@@ -1405,7 +1418,7 @@ iwl_mvm_scan_umac_dwell_v11(struct iwl_mvm *mvm,
if (IWL_MVM_ADWELL_MAX_BUDGET)
general_params->adwell_max_budget =
cpu_to_le16(IWL_MVM_ADWELL_MAX_BUDGET);
- else if (params->ssids && params->ssids[0].ssid_len)
+ else if (params->n_ssids && params->ssids[0].ssid_len)
general_params->adwell_max_budget =
cpu_to_le16(IWL_SCAN_ADWELL_MAX_BUDGET_DIRECTED_SCAN);
else
@@ -1717,7 +1730,10 @@ iwl_mvm_umac_scan_fill_6g_chan_list(struct iwl_mvm *mvm,
break;
}
- if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE) {
+ if (k == idex_b && idex_b < SCAN_BSSID_MAX_SIZE &&
+ !WARN_ONCE(!is_valid_ether_addr(scan_6ghz_params[j].bssid),
+ "scan: invalid BSSID at index %u, index_b=%u\n",
+ j, idex_b)) {
memcpy(&pp->bssid_array[idex_b++],
scan_6ghz_params[j].bssid, ETH_ALEN);
}
@@ -1747,8 +1763,9 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
&cp->channel_config[ch_cnt];
u32 s_ssid_bitmap = 0, bssid_bitmap = 0, flags = 0;
- u8 j, k, s_max = 0, b_max = 0, n_used_bssid_entries;
- bool force_passive, found = false, allow_passive = true,
+ u8 j, k, n_s_ssids = 0, n_bssids = 0;
+ u8 max_s_ssids, max_bssids;
+ bool force_passive = false, found = false, allow_passive = true,
unsolicited_probe_on_chan = false, psc_no_listen = false;
s8 psd_20 = IEEE80211_RNR_TBTT_PARAMS_PSD_RESERVED;
@@ -1771,20 +1788,15 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
cfg->v5.iter_count = 1;
cfg->v5.iter_interval = 0;
- /*
- * The optimize the scan time, i.e., reduce the scan dwell time
- * on each channel, the below logic tries to set 3 direct BSSID
- * probe requests for each broadcast probe request with a short
- * SSID.
- * TODO: improve this logic
- */
- n_used_bssid_entries = 3;
for (j = 0; j < params->n_6ghz_params; j++) {
s8 tmp_psd_20;
if (!(scan_6ghz_params[j].channel_idx == i))
continue;
+ unsolicited_probe_on_chan |=
+ scan_6ghz_params[j].unsolicited_probe;
+
/* Use the highest PSD value allowed as advertised by
* APs for this channel
*/
@@ -1796,12 +1808,69 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
psd_20 < tmp_psd_20))
psd_20 = tmp_psd_20;
- found = false;
- unsolicited_probe_on_chan |=
- scan_6ghz_params[j].unsolicited_probe;
psc_no_listen |= scan_6ghz_params[j].psc_no_listen;
+ }
- for (k = 0; k < pp->short_ssid_num; k++) {
+ /*
+ * In the following cases apply passive scan:
+ * 1. Non fragmented scan:
+ * - PSC channel with NO_LISTEN_FLAG on should be treated
+ * like non PSC channel
+ * - Non PSC channel with more than 3 short SSIDs or more
+ * than 9 BSSIDs.
+ * - Non PSC Channel with unsolicited probe response and
+ * more than 2 short SSIDs or more than 6 BSSIDs.
+ * - PSC channel with more than 2 short SSIDs or more than
+ * 6 BSSIDs.
+ * 3. Fragmented scan:
+ * - PSC channel with more than 1 SSID or 3 BSSIDs.
+ * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
+ * - Non PSC channel with unsolicited probe response and
+ * more than 1 SSID or more than 3 BSSIDs.
+ */
+ if (!iwl_mvm_is_scan_fragmented(params->type)) {
+ if (!cfg80211_channel_is_psc(params->channels[i]) ||
+ psc_no_listen) {
+ if (unsolicited_probe_on_chan) {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ } else {
+ max_s_ssids = 3;
+ max_bssids = 9;
+ }
+ } else {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ }
+ } else if (cfg80211_channel_is_psc(params->channels[i])) {
+ max_s_ssids = 1;
+ max_bssids = 3;
+ } else {
+ if (unsolicited_probe_on_chan) {
+ max_s_ssids = 1;
+ max_bssids = 3;
+ } else {
+ max_s_ssids = 2;
+ max_bssids = 6;
+ }
+ }
+
+ /*
+ * The optimize the scan time, i.e., reduce the scan dwell time
+ * on each channel, the below logic tries to set 3 direct BSSID
+ * probe requests for each broadcast probe request with a short
+ * SSID.
+ * TODO: improve this logic
+ */
+ for (j = 0; j < params->n_6ghz_params; j++) {
+ if (!(scan_6ghz_params[j].channel_idx == i))
+ continue;
+
+ found = false;
+
+ for (k = 0;
+ k < pp->short_ssid_num && n_s_ssids < max_s_ssids;
+ k++) {
if (!scan_6ghz_params[j].unsolicited_probe &&
le32_to_cpu(pp->short_ssid[k]) ==
scan_6ghz_params[j].short_ssid) {
@@ -1812,25 +1881,25 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
}
/*
- * Use short SSID only to create a new
- * iteration during channel dwell or in
- * case that the short SSID has a
- * matching SSID, i.e., scan for hidden
- * APs.
+ * Prefer creating BSSID entries unless
+ * the short SSID probe can be done in
+ * the same channel dwell iteration.
+ *
+ * We also need to create a short SSID
+ * entry for any hidden AP.
*/
- if (n_used_bssid_entries >= 3) {
- s_ssid_bitmap |= BIT(k);
- s_max++;
- n_used_bssid_entries -= 3;
- found = true;
+ if (3 * n_s_ssids > n_bssids &&
+ !pp->direct_scan[k].len)
break;
- } else if (pp->direct_scan[k].len) {
- s_ssid_bitmap |= BIT(k);
- s_max++;
- found = true;
+
+ /* Hidden AP, cannot do passive scan */
+ if (pp->direct_scan[k].len)
allow_passive = false;
- break;
- }
+
+ s_ssid_bitmap |= BIT(k);
+ n_s_ssids++;
+ found = true;
+ break;
}
}
@@ -1842,9 +1911,12 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
scan_6ghz_params[j].bssid,
ETH_ALEN)) {
if (!(bssid_bitmap & BIT(k))) {
- bssid_bitmap |= BIT(k);
- b_max++;
- n_used_bssid_entries++;
+ if (n_bssids < max_bssids) {
+ bssid_bitmap |= BIT(k);
+ n_bssids++;
+ } else {
+ force_passive = TRUE;
+ }
}
break;
}
@@ -1858,39 +1930,6 @@ iwl_mvm_umac_scan_cfg_channels_v7_6g(struct iwl_mvm *mvm,
if (unsolicited_probe_on_chan)
flags |= IWL_UHB_CHAN_CFG_FLAG_UNSOLICITED_PROBE_RES;
- /*
- * In the following cases apply passive scan:
- * 1. Non fragmented scan:
- * - PSC channel with NO_LISTEN_FLAG on should be treated
- * like non PSC channel
- * - Non PSC channel with more than 3 short SSIDs or more
- * than 9 BSSIDs.
- * - Non PSC Channel with unsolicited probe response and
- * more than 2 short SSIDs or more than 6 BSSIDs.
- * - PSC channel with more than 2 short SSIDs or more than
- * 6 BSSIDs.
- * 3. Fragmented scan:
- * - PSC channel with more than 1 SSID or 3 BSSIDs.
- * - Non PSC channel with more than 2 SSIDs or 6 BSSIDs.
- * - Non PSC channel with unsolicited probe response and
- * more than 1 SSID or more than 3 BSSIDs.
- */
- if (!iwl_mvm_is_scan_fragmented(params->type)) {
- if (!cfg80211_channel_is_psc(params->channels[i]) ||
- flags & IWL_UHB_CHAN_CFG_FLAG_PSC_CHAN_NO_LISTEN) {
- force_passive = (s_max > 3 || b_max > 9);
- force_passive |= (unsolicited_probe_on_chan &&
- (s_max > 2 || b_max > 6));
- } else {
- force_passive = (s_max > 2 || b_max > 6);
- }
- } else if (cfg80211_channel_is_psc(params->channels[i])) {
- force_passive = (s_max > 1 || b_max > 3);
- } else {
- force_passive = (s_max > 2 || b_max > 6);
- force_passive |= (unsolicited_probe_on_chan &&
- (s_max > 1 || b_max > 3));
- }
if ((allow_passive && force_passive) ||
(!(bssid_bitmap | s_ssid_bitmap) &&
!cfg80211_channel_is_psc(params->channels[i])))
@@ -2098,7 +2137,8 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
struct iwl_mvm_scan_params *params,
- struct ieee80211_vif *vif, int type)
+ struct ieee80211_vif *vif, int type,
+ u16 gen_flags)
{
u8 flags = 0;
@@ -2118,6 +2158,13 @@ static u8 iwl_mvm_scan_umac_flags2(struct iwl_mvm *mvm,
IWL_UCODE_TLV_CAPA_SCAN_DONT_TOGGLE_ANT))
flags |= IWL_UMAC_SCAN_GEN_PARAMS_FLAGS2_DONT_TOGGLE_ANT;
+ /* Passive and AP interface -> ACS (automatic channel selection) */
+ if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE &&
+ ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_AP &&
+ iwl_fw_lookup_notif_ver(mvm->fw, SCAN_GROUP, CHANNEL_SURVEY_NOTIF,
+ 0) >= 1)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS2_COLLECT_CHANNEL_STATS;
+
return flags;
}
@@ -2255,8 +2302,6 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_scan_umac_dwell(mvm, cmd, params);
- mvm->scan_uid_status[uid] = type;
-
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags(mvm, params, vif);
cmd->general_flags = cpu_to_le16(gen_flags);
@@ -2297,10 +2342,8 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ret = iwl_mvm_fill_scan_sched_params(params, tail_v2->schedule,
&tail_v2->delay);
- if (ret) {
- mvm->scan_uid_status[uid] = 0;
+ if (ret)
return ret;
- }
if (iwl_mvm_is_scan_ext_chan_supported(mvm)) {
tail_v2->preq = params->preq;
@@ -2450,9 +2493,7 @@ static int iwl_mvm_scan_umac_v12(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int ret;
u16 gen_flags;
- mvm->scan_uid_status[uid] = type;
-
- cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(type));
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
@@ -2487,15 +2528,14 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
u8 gen_flags2;
u32 bitmap_ssid = 0;
- mvm->scan_uid_status[uid] = type;
-
- cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(params));
+ cmd->ooc_priority = cpu_to_le32(iwl_mvm_scan_umac_ooc_priority(type));
cmd->uid = cpu_to_le32(uid);
gen_flags = iwl_mvm_scan_umac_flags_v2(mvm, params, vif, type);
if (version >= 15)
- gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type);
+ gen_flags2 = iwl_mvm_scan_umac_flags2(mvm, params, vif, type,
+ gen_flags);
else
gen_flags2 = 0;
@@ -2532,10 +2572,8 @@ static int iwl_mvm_scan_umac_v14_and_above(struct iwl_mvm *mvm,
params->n_channels,
pb, cp, vif->type,
version);
- if (!cp->count) {
- mvm->scan_uid_status[uid] = 0;
+ if (!cp->count)
return -EINVAL;
- }
if (!params->n_ssids ||
(params->n_ssids == 1 && !params->ssids[0].ssid_len))
@@ -2840,7 +2878,7 @@ static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac,
if (vif == data->current_vif)
return;
- if (vif->type == NL80211_IFTYPE_AP && vif->p2p) {
+ if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO) {
u32 link_id;
for (link_id = 0;
@@ -2915,9 +2953,11 @@ static void iwl_mvm_fill_respect_p2p_go(struct iwl_mvm *mvm,
}
}
-int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req,
- struct ieee80211_scan_ies *ies)
+static int _iwl_mvm_single_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies,
+ int type)
{
struct iwl_host_cmd hcmd = {
.len = { iwl_mvm_scan_size(mvm), },
@@ -2935,7 +2975,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EBUSY;
}
- ret = iwl_mvm_check_running_scans(mvm, IWL_MVM_SCAN_REGULAR);
+ ret = iwl_mvm_check_running_scans(mvm, type);
if (ret)
return ret;
@@ -2984,8 +3024,7 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_scan_6ghz_passive_scan(mvm, &params, vif);
- uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params,
- IWL_MVM_SCAN_REGULAR);
+ uid = iwl_mvm_build_scan_cmd(mvm, vif, &hcmd, &params, type);
if (uid < 0)
return uid;
@@ -3000,23 +3039,35 @@ int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
*/
IWL_ERR(mvm, "Scan failed! ret %d\n", ret);
iwl_mvm_resume_tcm(mvm);
- mvm->scan_uid_status[uid] = 0;
return ret;
}
- IWL_DEBUG_SCAN(mvm, "Scan request was sent successfully\n");
- mvm->scan_status |= IWL_MVM_SCAN_REGULAR;
- mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
+ IWL_DEBUG_SCAN(mvm, "Scan request send success: type=%u, uid=%u\n",
+ type, uid);
+
+ mvm->scan_uid_status[uid] = type;
+ mvm->scan_status |= type;
+
+ if (type == IWL_MVM_SCAN_REGULAR) {
+ mvm->scan_vif = iwl_mvm_vif_from_mac80211(vif);
+ schedule_delayed_work(&mvm->scan_timeout_dwork,
+ msecs_to_jiffies(SCAN_TIMEOUT));
+ }
if (params.enable_6ghz_passive)
mvm->last_6ghz_passive_scan_jiffies = jiffies;
- schedule_delayed_work(&mvm->scan_timeout_dwork,
- msecs_to_jiffies(SCAN_TIMEOUT));
-
return 0;
}
+int iwl_mvm_reg_scan_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req,
+ struct ieee80211_scan_ies *ies)
+{
+ return _iwl_mvm_single_scan_start(mvm, vif, req, ies,
+ IWL_MVM_SCAN_REGULAR);
+}
+
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
@@ -3133,7 +3184,9 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
ret = iwl_mvm_send_cmd(mvm, &hcmd);
if (!ret) {
IWL_DEBUG_SCAN(mvm,
- "Sched scan request was sent successfully\n");
+ "Sched scan request send success: type=%u, uid=%u\n",
+ type, uid);
+ mvm->scan_uid_status[uid] = type;
mvm->scan_status |= type;
} else {
/* If the scan failed, it usually means that the FW was unable
@@ -3141,7 +3194,6 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
* should try to send the command again with different params.
*/
IWL_ERR(mvm, "Sched scan failed! ret %d\n", ret);
- mvm->scan_uid_status[uid] = 0;
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
}
@@ -3155,9 +3207,25 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_umac_scan_complete *notif = (void *)pkt->data;
u32 uid = __le32_to_cpu(notif->uid);
bool aborted = (notif->status == IWL_SCAN_OFFLOAD_ABORTED);
+ bool select_links = false;
mvm->mei_scan_filter.is_mei_limited_scan = false;
+ IWL_DEBUG_SCAN(mvm,
+ "Scan completed: uid=%u type=%u, status=%s, EBS=%s\n",
+ uid, mvm->scan_uid_status[uid],
+ notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
+ "completed" : "aborted",
+ iwl_mvm_ebs_status_str(notif->ebs_status));
+
+ IWL_DEBUG_SCAN(mvm, "Scan completed: scan_status=0x%x\n",
+ mvm->scan_status);
+
+ IWL_DEBUG_SCAN(mvm,
+ "Scan completed: line=%u, iter=%u, elapsed time=%u\n",
+ notif->last_schedule, notif->last_iter,
+ __le32_to_cpu(notif->time_from_last_iter));
+
if (WARN_ON(!(mvm->scan_uid_status[uid] & mvm->scan_status)))
return;
@@ -3171,8 +3239,13 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
struct iwl_mvm_vif_link_info *link_info =
scan_vif->link[mvm->scan_link_id];
- if (!WARN_ON(!link_info))
+ /* It is possible that by the time the scan is complete the link
+ * was already removed and is not valid.
+ */
+ if (link_info)
memcpy(info.tsf_bssid, link_info->bssid, ETH_ALEN);
+ else
+ IWL_DEBUG_SCAN(mvm, "Scan link is no longer valid\n");
ieee80211_scan_completed(mvm->hw, &info);
mvm->scan_vif = NULL;
@@ -3181,25 +3254,28 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm,
} else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_SCHED) {
ieee80211_sched_scan_stopped(mvm->hw);
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
+ } else if (mvm->scan_uid_status[uid] == IWL_MVM_SCAN_INT_MLO) {
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan completed\n");
+ /*
+ * Other scan types won't necessarily scan for the MLD links channels.
+ * Therefore, only select links after successful internal scan.
+ */
+ select_links = notif->status == IWL_SCAN_OFFLOAD_COMPLETED;
}
mvm->scan_status &= ~mvm->scan_uid_status[uid];
- IWL_DEBUG_SCAN(mvm,
- "Scan completed, uid %u type %u, status %s, EBS status %s\n",
- uid, mvm->scan_uid_status[uid],
- notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
- "completed" : "aborted",
- iwl_mvm_ebs_status_str(notif->ebs_status));
- IWL_DEBUG_SCAN(mvm,
- "Last line %d, Last iteration %d, Time from last iteration %d\n",
- notif->last_schedule, notif->last_iter,
- __le32_to_cpu(notif->time_from_last_iter));
+
+ IWL_DEBUG_SCAN(mvm, "Scan completed: after update: scan_status=0x%x\n",
+ mvm->scan_status);
if (notif->ebs_status != IWL_SCAN_EBS_SUCCESS &&
notif->ebs_status != IWL_SCAN_EBS_INACTIVE)
mvm->last_ebs_successful = false;
mvm->scan_uid_status[uid] = 0;
+
+ if (select_links)
+ wiphy_work_queue(mvm->hw->wiphy, &mvm->trig_link_selection_wk);
}
void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm,
@@ -3246,10 +3322,11 @@ static int iwl_mvm_umac_scan_abort(struct iwl_mvm *mvm, int type)
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(IWL_ALWAYS_LONG_GROUP, SCAN_ABORT_UMAC),
- 0, sizeof(cmd), &cmd);
+ CMD_SEND_IN_RFKILL, sizeof(cmd), &cmd);
if (!ret)
mvm->scan_uid_status[uid] = type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ IWL_DEBUG_SCAN(mvm, "Scan abort: ret=%d\n", ret);
return ret;
}
@@ -3367,6 +3444,12 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
mvm->sched_scan_pass_all = SCHED_SCAN_PASS_ALL_DISABLED;
mvm->scan_uid_status[uid] = 0;
}
+ uid = iwl_mvm_scan_uid_by_status(mvm, IWL_MVM_SCAN_INT_MLO);
+ if (uid >= 0) {
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan aborted\n");
+ mvm->scan_uid_status[uid] = 0;
+ }
+
uid = iwl_mvm_scan_uid_by_status(mvm,
IWL_MVM_SCAN_STOPPING_REGULAR);
if (uid >= 0)
@@ -3377,6 +3460,11 @@ void iwl_mvm_report_scan_aborted(struct iwl_mvm *mvm)
if (uid >= 0)
mvm->scan_uid_status[uid] = 0;
+ uid = iwl_mvm_scan_uid_by_status(mvm,
+ IWL_MVM_SCAN_STOPPING_INT_MLO);
+ if (uid >= 0)
+ mvm->scan_uid_status[uid] = 0;
+
/* We shouldn't have any UIDs still set. Loop over all the
* UIDs to make sure there's nothing left there and warn if
* any is found.
@@ -3413,6 +3501,10 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
{
int ret;
+ IWL_DEBUG_SCAN(mvm,
+ "Request to stop scan: type=0x%x, status=0x%x\n",
+ type, mvm->scan_status);
+
if (!(mvm->scan_status & type))
return 0;
@@ -3424,6 +3516,9 @@ int iwl_mvm_scan_stop(struct iwl_mvm *mvm, int type, bool notify)
ret = iwl_mvm_scan_stop_wait(mvm, type);
if (!ret)
mvm->scan_status |= type << IWL_MVM_SCAN_STOPPING_SHIFT;
+ else
+ IWL_DEBUG_SCAN(mvm, "Failed to stop scan\n");
+
out:
/* Clear the scan status so the next scan requests will
* succeed and mark the scan as stopping, so that the Rx
@@ -3448,3 +3543,276 @@ out:
return ret;
}
+
+static int iwl_mvm_int_mlo_scan_start(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct ieee80211_channel **channels,
+ size_t n_channels)
+{
+ struct cfg80211_scan_request *req = NULL;
+ struct ieee80211_scan_ies ies = {};
+ size_t size, i;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ IWL_DEBUG_SCAN(mvm, "Starting Internal MLO scan: n_channels=%zu\n",
+ n_channels);
+
+ if (!vif->cfg.assoc || !ieee80211_vif_is_mld(vif))
+ return -EINVAL;
+
+ size = struct_size(req, channels, n_channels);
+ req = kzalloc(size, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ /* set the requested channels */
+ for (i = 0; i < n_channels; i++)
+ req->channels[i] = channels[i];
+
+ req->n_channels = n_channels;
+
+ /* set the rates */
+ for (i = 0; i < NUM_NL80211_BANDS; i++)
+ if (mvm->hw->wiphy->bands[i])
+ req->rates[i] =
+ (1 << mvm->hw->wiphy->bands[i]->n_bitrates) - 1;
+
+ req->wdev = ieee80211_vif_to_wdev(vif);
+ req->wiphy = mvm->hw->wiphy;
+ req->scan_start = jiffies;
+ req->tsf_report_link_id = -1;
+
+ ret = _iwl_mvm_single_scan_start(mvm, vif, req, &ies,
+ IWL_MVM_SCAN_INT_MLO);
+ kfree(req);
+
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan: ret=%d\n", ret);
+ return ret;
+}
+
+int iwl_mvm_int_mlo_scan(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+{
+ struct ieee80211_channel *channels[IEEE80211_MLD_MAX_NUM_LINKS];
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ size_t n_channels = 0;
+ u8 link_id;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (mvm->scan_status & IWL_MVM_SCAN_INT_MLO) {
+ IWL_DEBUG_SCAN(mvm, "Internal MLO scan is already running\n");
+ return -EBUSY;
+ }
+
+ rcu_read_lock();
+
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ rcu_dereference(vif->link_conf[link_id]);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
+
+ channels[n_channels++] = link_conf->chanreq.oper.chan;
+ }
+
+ rcu_read_unlock();
+
+ if (!n_channels)
+ return -EINVAL;
+
+ return iwl_mvm_int_mlo_scan_start(mvm, vif, channels, n_channels);
+}
+
+static int iwl_mvm_chanidx_from_phy(struct iwl_mvm *mvm,
+ enum nl80211_band band,
+ u16 phy_chan_num)
+{
+ struct ieee80211_supported_band *sband = mvm->hw->wiphy->bands[band];
+ int chan_idx;
+
+ if (WARN_ON_ONCE(!sband))
+ return -EINVAL;
+
+ for (chan_idx = 0; chan_idx < sband->n_channels; chan_idx++) {
+ struct ieee80211_channel *channel = &sband->channels[chan_idx];
+
+ if (channel->hw_value == phy_chan_num)
+ return chan_idx;
+ }
+
+ return -EINVAL;
+}
+
+static u32 iwl_mvm_div_by_db(u32 value, u8 db)
+{
+ /*
+ * 2^32 * 10**(i / 10) for i = [1, 10], skipping 0 and simply stopping
+ * at 10 dB and looping instead of using a much larger table.
+ *
+ * Using 64 bit math is overkill, but means the helper does not require
+ * a limit on the input range.
+ */
+ static const u32 db_to_val[] = {
+ 0xcb59185e, 0xa1866ba8, 0x804dce7a, 0x65ea59fe, 0x50f44d89,
+ 0x404de61f, 0x331426af, 0x2892c18b, 0x203a7e5b, 0x1999999a,
+ };
+
+ while (value && db > 0) {
+ u8 change = min_t(u8, db, ARRAY_SIZE(db_to_val));
+
+ value = (((u64)value) * db_to_val[change - 1]) >> 32;
+
+ db -= change;
+ }
+
+ return value;
+}
+
+VISIBLE_IF_IWLWIFI_KUNIT s8
+iwl_mvm_average_dbm_values(const struct iwl_umac_scan_channel_survey_notif *notif)
+{
+ s8 average_magnitude;
+ u32 average_factor;
+ s8 sum_magnitude = -128;
+ u32 sum_factor = 0;
+ int i, count = 0;
+
+ /*
+ * To properly average the decibel values (signal values given in dBm)
+ * we need to do the math in linear space. Doing a linear average of
+ * dB (dBm) values is a bit annoying though due to the large range of
+ * at least -10 to -110 dBm that will not fit into a 32 bit integer.
+ *
+ * A 64 bit integer should be sufficient, but then we still have the
+ * problem that there are no directly usable utility functions
+ * available.
+ *
+ * So, lets not deal with that and instead do much of the calculation
+ * with a 16.16 fixed point integer along with a base in dBm. 16.16 bit
+ * gives us plenty of head-room for adding up a few values and even
+ * doing some math on it. And the tail should be accurate enough too
+ * (1/2^16 is somewhere around -48 dB, so effectively zero).
+ *
+ * i.e. the real value of sum is:
+ * sum = sum_factor / 2^16 * 10^(sum_magnitude / 10) mW
+ *
+ * However, that does mean we need to be able to bring two values to
+ * a common base, so we need a helper for that.
+ *
+ * Note that this function takes an input with unsigned negative dBm
+ * values but returns a signed dBm (i.e. a negative value).
+ */
+
+ for (i = 0; i < ARRAY_SIZE(notif->noise); i++) {
+ s8 val_magnitude;
+ u32 val_factor;
+
+ if (notif->noise[i] == 0xff)
+ continue;
+
+ val_factor = 0x10000;
+ val_magnitude = -notif->noise[i];
+
+ if (val_magnitude <= sum_magnitude) {
+ u8 div_db = sum_magnitude - val_magnitude;
+
+ val_factor = iwl_mvm_div_by_db(val_factor, div_db);
+ val_magnitude = sum_magnitude;
+ } else {
+ u8 div_db = val_magnitude - sum_magnitude;
+
+ sum_factor = iwl_mvm_div_by_db(sum_factor, div_db);
+ sum_magnitude = val_magnitude;
+ }
+
+ sum_factor += val_factor;
+ count++;
+ }
+
+ /* No valid noise measurement, return a very high noise level */
+ if (count == 0)
+ return 0;
+
+ average_magnitude = sum_magnitude;
+ average_factor = sum_factor / count;
+
+ /*
+ * average_factor will be a number smaller than 1.0 (0x10000) at this
+ * point. What we need to do now is to adjust average_magnitude so that
+ * average_factor is between -0.5 dB and 0.5 dB.
+ *
+ * Just do -1 dB steps and find the point where
+ * -0.5 dB * -i dB = 0x10000 * 10^(-0.5/10) / i dB
+ * = div_by_db(0xe429, i)
+ * is smaller than average_factor.
+ */
+ for (i = 0; average_factor < iwl_mvm_div_by_db(0xe429, i); i++) {
+ /* nothing */
+ }
+
+ return average_magnitude - i;
+}
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mvm_average_dbm_values);
+
+void iwl_mvm_rx_channel_survey_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ const struct iwl_umac_scan_channel_survey_notif *notif =
+ (void *)pkt->data;
+ struct iwl_mvm_acs_survey_channel *info;
+ enum nl80211_band band;
+ int chan_idx;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!mvm->acs_survey) {
+ size_t n_channels = 0;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!mvm->hw->wiphy->bands[band])
+ continue;
+
+ n_channels += mvm->hw->wiphy->bands[band]->n_channels;
+ }
+
+ mvm->acs_survey = kzalloc(struct_size(mvm->acs_survey,
+ channels, n_channels),
+ GFP_KERNEL);
+
+ if (!mvm->acs_survey)
+ return;
+
+ mvm->acs_survey->n_channels = n_channels;
+ n_channels = 0;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ if (!mvm->hw->wiphy->bands[band])
+ continue;
+
+ mvm->acs_survey->bands[band] =
+ &mvm->acs_survey->channels[n_channels];
+ n_channels += mvm->hw->wiphy->bands[band]->n_channels;
+ }
+ }
+
+ band = iwl_mvm_nl80211_band_from_phy(le32_to_cpu(notif->band));
+ chan_idx = iwl_mvm_chanidx_from_phy(mvm, band,
+ le32_to_cpu(notif->channel));
+ if (WARN_ON_ONCE(chan_idx < 0))
+ return;
+
+ IWL_DEBUG_SCAN(mvm, "channel survey received for freq %d\n",
+ mvm->hw->wiphy->bands[band]->channels[chan_idx].center_freq);
+
+ info = &mvm->acs_survey->bands[band][chan_idx];
+
+ /* Times are all in ms */
+ info->time = le32_to_cpu(notif->active_time);
+ info->time_busy = le32_to_cpu(notif->busy_time);
+ info->time_rx = le32_to_cpu(notif->rx_time);
+ info->time_tx = le32_to_cpu(notif->tx_time);
+ info->noise = iwl_mvm_average_dbm_values(notif);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 491c449fd431..15e64d94d6ea 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -857,12 +857,6 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
size = iwl_mvm_get_queue_size(sta);
}
- /* take the min with bc tbl entries allowed */
- size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
-
- /* size needs to be power of 2 values for calculating read/write pointers */
- size = rounddown_pow_of_two(size);
-
if (sta) {
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
struct ieee80211_link_sta *link_sta;
@@ -887,22 +881,13 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
if (!sta_mask)
return -EINVAL;
- do {
- queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
- tid, size, timeout);
-
- if (queue < 0)
- IWL_DEBUG_TX_QUEUES(mvm,
- "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %d\n",
- size, sta_mask, tid, queue);
- size /= 2;
- } while (queue < 0 && size >= 16);
+ queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask,
+ tid, size, timeout);
- if (queue < 0)
- return queue;
-
- IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
- queue, sta_mask, tid);
+ if (queue >= 0)
+ IWL_DEBUG_TX_QUEUES(mvm,
+ "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
+ queue, sta_mask, tid);
return queue;
}
@@ -1847,6 +1832,18 @@ int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
+ /* MPDUs are counted only when EMLSR is possible */
+ if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ !sta->tdls && ieee80211_vif_is_mld(vif)) {
+ mvm_sta->mpdu_counters =
+ kcalloc(mvm->trans->num_rx_queues,
+ sizeof(*mvm_sta->mpdu_counters),
+ GFP_KERNEL);
+ if (mvm_sta->mpdu_counters)
+ for (int q = 0; q < mvm->trans->num_rx_queues; q++)
+ spin_lock_init(&mvm_sta->mpdu_counters[q].lock);
+ }
+
return 0;
}
@@ -2746,7 +2743,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
*/
WARN_ON(1);
- for (j = 0; j < reorder_buf->buf_size; j++)
+ for (j = 0; j < data->buf_size; j++)
__skb_queue_purge(&entries[j].frames);
spin_unlock_bh(&reorder_buf->lock);
@@ -2755,7 +2752,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
struct iwl_mvm_baid_data *data,
- u16 ssn, u16 buf_size)
+ u16 ssn)
{
int i;
@@ -2768,12 +2765,10 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
reorder_buf->num_stored = 0;
reorder_buf->head_sn = ssn;
- reorder_buf->buf_size = buf_size;
spin_lock_init(&reorder_buf->lock);
- reorder_buf->mvm = mvm;
reorder_buf->queue = i;
reorder_buf->valid = false;
- for (j = 0; j < reorder_buf->buf_size; j++)
+ for (j = 0; j < data->buf_size; j++)
__skb_queue_head_init(&entries[j].frames);
}
}
@@ -2836,7 +2831,12 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
};
- u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
+ struct iwl_host_cmd hcmd = {
+ .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
+ .flags = CMD_SEND_IN_RFKILL,
+ .len[0] = sizeof(cmd),
+ .data[0] = &cmd,
+ };
int ret;
BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
@@ -2848,7 +2848,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
cmd.alloc.ssn = cpu_to_le16(ssn);
cmd.alloc.win_size = cpu_to_le16(buf_size);
baid = -EIO;
- } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
+ } else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) {
cmd.remove_v1.baid = cpu_to_le32(baid);
BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
} else {
@@ -2857,8 +2857,7 @@ static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
cmd.remove.tid = cpu_to_le32(tid);
}
- ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
- &cmd, &baid);
+ ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid);
if (ret)
return ret;
@@ -2978,13 +2977,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
baid_data->mvm = mvm;
baid_data->tid = tid;
baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1);
+ baid_data->buf_size = buf_size;
mvm_sta->tid_to_baid[tid] = baid;
if (timeout)
mod_timer(&baid_data->session_timer,
TU_TO_EXP_TIME(timeout * 2));
- iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
+ iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn);
/*
* protect the BA data with RCU to cover a case where our
* internal RX sync mechanism will timeout (not that it's
@@ -4392,3 +4392,80 @@ void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "Failed to cancel the channel switch\n");
}
+
+static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif,
+ u8 fw_sta_id)
+{
+ struct ieee80211_link_sta *link_sta =
+ rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]);
+ struct iwl_mvm_vif_link_info *link;
+
+ if (WARN_ON_ONCE(!link_sta))
+ return -EINVAL;
+
+ link = mvmvif->link[link_sta->link_id];
+
+ if (WARN_ON_ONCE(!link))
+ return -EINVAL;
+
+ return link->fw_link_id;
+}
+
+#define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ)
+
+void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
+ bool tx, int queue)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_mvm_tpt_counter *queue_counter;
+ struct iwl_mvm_mpdu_counter *link_counter;
+ u32 total_mpdus = 0;
+ int fw_link_id;
+
+ /* Count only for a BSS sta, and only when EMLSR is possible */
+ if (!mvm_sta->mpdu_counters)
+ return;
+
+ /* Map sta id to link id */
+ fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id);
+ if (fw_link_id < 0)
+ return;
+
+ queue_counter = &mvm_sta->mpdu_counters[queue];
+ link_counter = &queue_counter->per_link[fw_link_id];
+
+ spin_lock_bh(&queue_counter->lock);
+
+ if (tx)
+ link_counter->tx += count;
+ else
+ link_counter->rx += count;
+
+ /*
+ * When not in EMLSR, the window and the decision to enter EMLSR are
+ * handled during counting, when in EMLSR - in the statistics flow
+ */
+ if (mvmvif->esr_active)
+ goto out;
+
+ if (time_is_before_jiffies(queue_counter->window_start +
+ IWL_MVM_TPT_COUNT_WINDOW)) {
+ memset(queue_counter->per_link, 0,
+ sizeof(queue_counter->per_link));
+ queue_counter->window_start = jiffies;
+
+ IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n");
+ }
+
+ for (int i = 0; i < IWL_MVM_FW_MAX_LINK_ID; i++)
+ total_mpdus += tx ? queue_counter->per_link[i].tx :
+ queue_counter->per_link[i].rx;
+
+ if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH)
+ wiphy_work_queue(mvmvif->mvm->hw->wiphy,
+ &mvmvif->unblock_esr_tpt_wk);
+
+out:
+ spin_unlock_bh(&queue_counter->lock);
+}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index b3450569864e..0dc83d6afb3c 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -347,6 +347,24 @@ struct iwl_mvm_link_sta {
u8 avg_energy;
};
+struct iwl_mvm_mpdu_counter {
+ u32 tx;
+ u32 rx;
+};
+
+/**
+ * struct iwl_mvm_tpt_counter - per-queue MPDU counter
+ *
+ * @lock: Needed to protect the counters when modified from statistics.
+ * @per_link: per-link counters.
+ * @window_start: timestamp of the counting-window start
+ */
+struct iwl_mvm_tpt_counter {
+ spinlock_t lock;
+ struct iwl_mvm_mpdu_counter per_link[IWL_MVM_FW_MAX_LINK_ID];
+ unsigned long window_start;
+} ____cacheline_aligned_in_smp;
+
/**
* struct iwl_mvm_sta - representation of a station in the driver
* @vif: the interface the station belongs to
@@ -394,6 +412,7 @@ struct iwl_mvm_link_sta {
* @link: per link sta entries. For non-MLO only link[0] holds data. For MLO,
* link[0] points to deflink and link[link_id] is allocated when new link
* sta is added.
+ * @mpdu_counters: RX/TX MPDUs counters for each queue.
*
* When mac80211 creates a station it reserves some space (hw->sta_data_size)
* in the structure for use by driver. This structure is placed in that
@@ -433,6 +452,8 @@ struct iwl_mvm_sta {
struct iwl_mvm_link_sta deflink;
struct iwl_mvm_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ struct iwl_mvm_tpt_counter *mpdu_counters;
};
u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data);
@@ -457,7 +478,7 @@ struct iwl_mvm_int_sta {
};
/**
- * Send the STA info to the FW.
+ * iwl_mvm_sta_send_to_fw - Send the STA info to the FW.
*
* @mvm: the iwl_mvm* to use
* @sta: the STA
@@ -514,6 +535,9 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count,
+ bool tx, int queue);
+
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
@@ -638,6 +662,11 @@ int iwl_mvm_mld_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int iwl_mvm_mld_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+void iwl_mvm_mld_free_sta_link(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta,
+ struct iwl_mvm_link_sta *mvm_sta_link,
+ unsigned int link_id,
+ bool is_in_fw);
int iwl_mvm_mld_rm_sta_id(struct iwl_mvm *mvm, u8 sta_id);
int iwl_mvm_mld_update_sta_links(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
index e7d5f4ebeb25..3d25ff5cd7e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c
@@ -2,7 +2,7 @@
/*
* Copyright (C) 2014 Intel Mobile Communications GmbH
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2022-2023 Intel Corporation
+ * Copyright (C) 2018-2020, 2022-2024 Intel Corporation
*/
#include <linux/etherdevice.h>
#include "mvm.h"
@@ -151,7 +151,7 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int;
/* Protect the session to hear the TDLS setup response on the channel */
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
iwl_mvm_schedule_session_protection(mvm, vif, duration,
@@ -159,7 +159,6 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw,
else
iwl_mvm_protect_session(mvm, vif, duration,
duration, 100, true);
- mutex_unlock(&mvm->mutex);
}
static const char *
@@ -460,21 +459,21 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
int ret;
mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work);
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
/* called after an active channel switch has finished or timed-out */
iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE);
/* station might be gone, in that case do nothing */
if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
- goto out;
+ return;
sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
lockdep_is_held(&mvm->mutex));
/* the station may not be here, but if it is, it must be a TDLS peer */
if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls))
- goto out;
+ return;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
vif = mvmsta->vif;
@@ -493,8 +492,6 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work)
/* retry after a DTIM if we failed sending now */
delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int);
schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay));
-out:
- mutex_unlock(&mvm->mutex);
}
int
@@ -509,7 +506,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
unsigned int delay;
int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n",
sta->addr, chandef->chan->center_freq, chandef->width);
@@ -519,8 +516,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
IWL_DEBUG_TDLS(mvm,
"Existing peer. Can't start switch with %pM\n",
sta->addr);
- ret = -EBUSY;
- goto out;
+ return -EBUSY;
}
ret = iwl_mvm_tdls_config_channel_switch(mvm, vif,
@@ -529,17 +525,15 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
oper_class, chandef, 0, 0, 0,
tmpl_skb, ch_sw_tm_ie);
if (ret)
- goto out;
+ return ret;
/*
* Mark the peer as "in tdls switch" for this vif. We only allow a
* single such peer per vif.
*/
mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
- if (!mvm->tdls_cs.peer.skb) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!mvm->tdls_cs.peer.skb)
+ return -ENOMEM;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
mvm->tdls_cs.peer.sta_id = mvmsta->deflink.sta_id;
@@ -556,10 +550,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw,
vif->bss_conf.beacon_int);
mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return 0;
}
void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw,
@@ -626,7 +617,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw,
params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ?
"REQ" : "RESP";
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
IWL_DEBUG_TDLS(mvm,
"Received TDLS ch switch action %s from %pM status %d\n",
@@ -670,5 +661,4 @@ retry:
1024 / 1000;
mod_delayed_work(system_wq, &mvm->tdls_cs.dwork,
msecs_to_jiffies(delay));
- mutex_unlock(&mvm->mutex);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
new file mode 100644
index 000000000000..6bd56a28cffd
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/Makefile
@@ -0,0 +1,3 @@
+iwlmvm-tests-y += module.o links.o scan.o
+
+obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlmvm-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
new file mode 100644
index 000000000000..47b8e7b64ead
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <net/mac80211.h>
+#include "../mvm.h"
+#include <kunit/test.h>
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static struct wiphy wiphy = {
+ .mtx = __MUTEX_INITIALIZER(wiphy.mtx),
+};
+
+static struct ieee80211_hw hw = {
+ .wiphy = &wiphy,
+};
+
+static struct ieee80211_channel chan_5ghz = {
+ .band = NL80211_BAND_5GHZ,
+};
+
+static struct ieee80211_channel chan_6ghz = {
+ .band = NL80211_BAND_6GHZ,
+};
+
+static struct ieee80211_channel chan_2ghz = {
+ .band = NL80211_BAND_2GHZ,
+};
+
+static struct cfg80211_chan_def chandef_a = {};
+
+static struct cfg80211_chan_def chandef_b = {};
+
+static struct iwl_mvm_phy_ctxt ctx = {};
+
+static struct iwl_mvm_vif_link_info mvm_link = {
+ .phy_ctxt = &ctx,
+ .active = true
+};
+
+static struct cfg80211_bss bss = {};
+
+static struct ieee80211_bss_conf link_conf = {.bss = &bss};
+
+static const struct iwl_fw_cmd_version entry = {
+ .group = LEGACY_GROUP,
+ .cmd = BT_PROFILE_NOTIFICATION,
+ .notif_ver = 4
+};
+
+static struct iwl_fw fw = {
+ .ucode_capa = {
+ .n_cmd_versions = 1,
+ .cmd_versions = &entry,
+ },
+};
+
+static struct iwl_mvm mvm = {
+ .hw = &hw,
+ .fw = &fw,
+};
+
+static const struct link_grading_case {
+ const char *desc;
+ const struct cfg80211_chan_def chandef;
+ s32 signal;
+ s16 channel_util;
+ int chan_load_by_us;
+ unsigned int grade;
+} link_grading_cases[] = {
+ {
+ .desc = "UHB, RSSI below range, no factors",
+ .chandef = {
+ .chan = &chan_6ghz,
+ .width = NL80211_CHAN_WIDTH_20,
+ },
+ .signal = -100,
+ .grade = 177,
+ },
+ {
+ .desc = "LB, RSSI in range, no factors",
+ .chandef = {
+ .chan = &chan_2ghz,
+ .width = NL80211_CHAN_WIDTH_20,
+ },
+ .signal = -84,
+ .grade = 344,
+ },
+ {
+ .desc = "HB, RSSI above range, no factors",
+ .chandef = {
+ .chan = &chan_5ghz,
+ .width = NL80211_CHAN_WIDTH_20,
+ },
+ .signal = -50,
+ .grade = 3442,
+ },
+ {
+ .desc = "HB, BSS Load IE (20 percent), inactive link, no puncturing factor",
+ .chandef = {
+ .chan = &chan_5ghz,
+ .width = NL80211_CHAN_WIDTH_20,
+ },
+ .signal = -66,
+ .channel_util = 51,
+ .grade = 1836,
+ },
+ {
+ .desc = "LB, BSS Load IE (20 percent), active link, chan_load_by_us=10 percent. No puncturing factor",
+ .chandef = {
+ .chan = &chan_2ghz,
+ .width = NL80211_CHAN_WIDTH_20,
+ },
+ .signal = -61,
+ .channel_util = 51,
+ .chan_load_by_us = 10,
+ .grade = 2061,
+ },
+ {
+ .desc = "UHB, BSS Load IE (40 percent), active link, chan_load_by_us=50 (invalid) percent. No puncturing factor",
+ .chandef = {
+ .chan = &chan_6ghz,
+ .width = NL80211_CHAN_WIDTH_20,
+ },
+ .signal = -66,
+ .channel_util = 102,
+ .chan_load_by_us = 50,
+ .grade = 1552,
+ },
+ { .desc = "HB, 80 MHz, no channel load factor, punctured percentage 0",
+ .chandef = {
+ .chan = &chan_5ghz,
+ .width = NL80211_CHAN_WIDTH_80,
+ .punctured = 0x0000
+ },
+ .signal = -72,
+ .grade = 1750,
+ },
+ { .desc = "HB, 160 MHz, no channel load factor, punctured percentage 25",
+ .chandef = {
+ .chan = &chan_5ghz,
+ .width = NL80211_CHAN_WIDTH_160,
+ .punctured = 0x3
+ },
+ .signal = -72,
+ .grade = 1312,
+ },
+ { .desc = "UHB, 320 MHz, no channel load factor, punctured percentage 12.5 (2/16)",
+ .chandef = {
+ .chan = &chan_6ghz,
+ .width = NL80211_CHAN_WIDTH_320,
+ .punctured = 0x3
+ },
+ .signal = -72,
+ .grade = 1806,
+ },
+ { .desc = "HB, 160 MHz, channel load 20, channel load by us 10, punctured percentage 25",
+ .chandef = {
+ .chan = &chan_5ghz,
+ .width = NL80211_CHAN_WIDTH_160,
+ .punctured = 0x3
+ },
+ .channel_util = 51,
+ .chan_load_by_us = 10,
+ .signal = -72,
+ .grade = 1179,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(link_grading, link_grading_cases, desc)
+
+static void setup_link_conf(struct kunit *test)
+{
+ const struct link_grading_case *params = test->param_value;
+ size_t vif_size = sizeof(struct ieee80211_vif) +
+ sizeof(struct iwl_mvm_vif);
+ struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL);
+ struct ieee80211_bss_load_elem *bss_load;
+ struct element *element;
+ size_t ies_size = sizeof(struct cfg80211_bss_ies) + sizeof(*bss_load) + sizeof(element);
+ struct cfg80211_bss_ies *ies;
+ struct iwl_mvm_vif *mvmvif;
+
+ KUNIT_ASSERT_NOT_NULL(test, vif);
+
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ if (params->chan_load_by_us > 0) {
+ ctx.channel_load_by_us = params->chan_load_by_us;
+ mvmvif->link[0] = &mvm_link;
+ }
+
+ link_conf.vif = vif;
+ link_conf.chanreq.oper = params->chandef;
+ bss.signal = DBM_TO_MBM(params->signal);
+
+ ies = kunit_kzalloc(test, ies_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, ies);
+ ies->len = sizeof(*bss_load) + sizeof(struct element);
+
+ element = (void *)ies->data;
+ element->datalen = sizeof(*bss_load);
+ element->id = 11;
+
+ bss_load = (void *)element->data;
+ bss_load->channel_util = params->channel_util;
+
+ rcu_assign_pointer(bss.ies, ies);
+ rcu_assign_pointer(bss.beacon_ies, ies);
+}
+
+static void test_link_grading(struct kunit *test)
+{
+ const struct link_grading_case *params = test->param_value;
+ unsigned int ret;
+
+ setup_link_conf(test);
+
+ rcu_read_lock();
+ ret = iwl_mvm_get_link_grade(&link_conf);
+ rcu_read_unlock();
+
+ KUNIT_EXPECT_EQ(test, ret, params->grade);
+
+ kunit_kfree(test, link_conf.vif);
+ RCU_INIT_POINTER(bss.ies, NULL);
+}
+
+static struct kunit_case link_grading_test_cases[] = {
+ KUNIT_CASE_PARAM(test_link_grading, link_grading_gen_params),
+ {}
+};
+
+static struct kunit_suite link_grading = {
+ .name = "iwlmvm-link-grading",
+ .test_cases = link_grading_test_cases,
+};
+
+kunit_test_suite(link_grading);
+
+static const struct valid_link_pair_case {
+ const char *desc;
+ bool bt;
+ struct ieee80211_channel *chan_a;
+ struct ieee80211_channel *chan_b;
+ enum nl80211_chan_width cw_a;
+ enum nl80211_chan_width cw_b;
+ s32 sig_a;
+ s32 sig_b;
+ bool csa_a;
+ bool valid;
+} valid_link_pair_cases[] = {
+ {
+ .desc = "HB + UHB, valid.",
+ .chan_a = &chan_6ghz,
+ .chan_b = &chan_5ghz,
+ .valid = true,
+ },
+ {
+ .desc = "LB + HB, no BT.",
+ .chan_a = &chan_2ghz,
+ .chan_b = &chan_5ghz,
+ .valid = false,
+ },
+ {
+ .desc = "LB + HB, with BT.",
+ .bt = true,
+ .chan_a = &chan_2ghz,
+ .chan_b = &chan_5ghz,
+ .valid = false,
+ },
+ {
+ .desc = "Same band",
+ .chan_a = &chan_2ghz,
+ .chan_b = &chan_2ghz,
+ .valid = false,
+ },
+ {
+ .desc = "RSSI: LB, 20 MHz, low",
+ .chan_a = &chan_2ghz,
+ .cw_a = NL80211_CHAN_WIDTH_20,
+ .sig_a = -68,
+ .chan_b = &chan_5ghz,
+ .valid = false,
+ },
+ {
+ .desc = "RSSI: UHB, 20 MHz, high",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_20,
+ .sig_a = -66,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_20,
+ .valid = true,
+ },
+ {
+ .desc = "RSSI: UHB, 40 MHz, low",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_40,
+ .sig_a = -65,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_40,
+ .valid = false,
+ },
+ {
+ .desc = "RSSI: UHB, 40 MHz, high",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_40,
+ .sig_a = -63,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_40,
+ .valid = true,
+ },
+ {
+ .desc = "RSSI: UHB, 80 MHz, low",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_80,
+ .sig_a = -62,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_80,
+ .valid = false,
+ },
+ {
+ .desc = "RSSI: UHB, 80 MHz, high",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_80,
+ .sig_a = -60,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_80,
+ .valid = true,
+ },
+ {
+ .desc = "RSSI: UHB, 160 MHz, low",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_160,
+ .sig_a = -59,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_160,
+ .valid = false,
+ },
+ {
+ .desc = "RSSI: HB, 160 MHz, high",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_160,
+ .sig_a = -5,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_160,
+ .valid = true,
+ },
+ {
+ .desc = "CSA active",
+ .chan_a = &chan_6ghz,
+ .cw_a = NL80211_CHAN_WIDTH_160,
+ .sig_a = -5,
+ .chan_b = &chan_5ghz,
+ .cw_b = NL80211_CHAN_WIDTH_160,
+ .valid = false,
+ /* same as previous entry with valid=true except for CSA */
+ .csa_a = true,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(valid_link_pair, valid_link_pair_cases, desc)
+
+static void test_valid_link_pair(struct kunit *test)
+{
+ const struct valid_link_pair_case *params = test->param_value;
+ size_t vif_size = sizeof(struct ieee80211_vif) +
+ sizeof(struct iwl_mvm_vif);
+ struct ieee80211_vif *vif = kunit_kzalloc(test, vif_size, GFP_KERNEL);
+ struct iwl_trans *trans = kunit_kzalloc(test, sizeof(struct iwl_trans),
+ GFP_KERNEL);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_link_sel_data link_a = {
+ .chandef = &chandef_a,
+ .link_id = 1,
+ .signal = params->sig_a,
+ };
+ struct iwl_mvm_link_sel_data link_b = {
+ .chandef = &chandef_b,
+ .link_id = 5,
+ .signal = params->sig_b,
+ };
+ struct ieee80211_bss_conf *conf;
+ bool result;
+
+ KUNIT_ASSERT_NOT_NULL(test, vif);
+ KUNIT_ASSERT_NOT_NULL(test, trans);
+
+ chandef_a.chan = params->chan_a;
+ chandef_b.chan = params->chan_b;
+
+ chandef_a.width = params->cw_a ?: NL80211_CHAN_WIDTH_20;
+ chandef_b.width = params->cw_b ?: NL80211_CHAN_WIDTH_20;
+
+ mvm.trans = trans;
+
+ mvm.last_bt_notif.wifi_loss_low_rssi = params->bt;
+ mvmvif->mvm = &mvm;
+
+ conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, conf);
+ conf->chanreq.oper = chandef_a;
+ conf->csa_active = params->csa_a;
+ vif->link_conf[link_a.link_id] = (void __rcu *)conf;
+
+ conf = kunit_kzalloc(test, sizeof(*vif->link_conf[0]), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, conf);
+ conf->chanreq.oper = chandef_b;
+ vif->link_conf[link_b.link_id] = (void __rcu *)conf;
+
+ wiphy_lock(&wiphy);
+ result = iwl_mvm_mld_valid_link_pair(vif, &link_a, &link_b);
+ wiphy_unlock(&wiphy);
+
+ KUNIT_EXPECT_EQ(test, result, params->valid);
+
+ kunit_kfree(test, vif);
+ kunit_kfree(test, trans);
+}
+
+static struct kunit_case valid_link_pair_test_cases[] = {
+ KUNIT_CASE_PARAM(test_valid_link_pair, valid_link_pair_gen_params),
+ {},
+};
+
+static struct kunit_suite valid_link_pair = {
+ .name = "iwlmvm-valid-link-pair",
+ .test_cases = valid_link_pair_test_cases,
+};
+
+kunit_test_suite(valid_link_pair);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c
new file mode 100644
index 000000000000..f556acbac77f
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/module.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This is just module boilerplate for the iwlmvm kunit module.
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <linux/module.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("kunit tests for iwlmvm");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c
new file mode 100644
index 000000000000..d3b6a57c3ebe
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/scan.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KUnit tests for channel helper functions
+ *
+ * Copyright (C) 2024 Intel Corporation
+ */
+#include <net/mac80211.h>
+#include "../mvm.h"
+#include <kunit/test.h>
+
+MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
+
+static const struct acs_average_db_case {
+ const char *desc;
+ u8 neg_dbm[22];
+ s8 result;
+} acs_average_db_cases[] = {
+ {
+ .desc = "Smallest possible value, all filled",
+ .neg_dbm = {
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 128, 128
+ },
+ .result = -128,
+ },
+ {
+ .desc = "Biggest possible value, all filled",
+ .neg_dbm = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0,
+ },
+ .result = 0,
+ },
+ {
+ .desc = "Smallest possible value, partial filled",
+ .neg_dbm = {
+ 128, 128, 128, 128, 128, 128, 128, 128, 128, 128,
+ 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff,
+ },
+ .result = -128,
+ },
+ {
+ .desc = "Biggest possible value, partial filled",
+ .neg_dbm = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff,
+ },
+ .result = 0,
+ },
+ {
+ .desc = "Adding -80dBm to -75dBm until it is still rounded to -79dBm",
+ .neg_dbm = {
+ 75, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 0xff, 0xff, 0xff,
+ 0xff, 0xff,
+ },
+ .result = -79,
+ },
+ {
+ .desc = "Adding -80dBm to -75dBm until it is just rounded to -80dBm",
+ .neg_dbm = {
+ 75, 80, 80, 80, 80, 80, 80, 80, 80, 80,
+ 80, 80, 80, 80, 80, 80, 80, 80, 0xff, 0xff,
+ 0xff, 0xff,
+ },
+ .result = -80,
+ },
+};
+
+KUNIT_ARRAY_PARAM_DESC(acs_average_db, acs_average_db_cases, desc)
+
+static void test_acs_average_db(struct kunit *test)
+{
+ const struct acs_average_db_case *params = test->param_value;
+ struct iwl_umac_scan_channel_survey_notif notif;
+ int i;
+
+ /* Test the values in the given order */
+ for (i = 0; i < ARRAY_SIZE(params->neg_dbm); i++)
+ notif.noise[i] = params->neg_dbm[i];
+ KUNIT_ASSERT_EQ(test,
+ iwl_mvm_average_dbm_values(&notif),
+ params->result);
+
+ /* Test in reverse order */
+ for (i = 0; i < ARRAY_SIZE(params->neg_dbm); i++)
+ notif.noise[ARRAY_SIZE(params->neg_dbm) - i - 1] =
+ params->neg_dbm[i];
+ KUNIT_ASSERT_EQ(test,
+ iwl_mvm_average_dbm_values(&notif),
+ params->result);
+}
+
+static struct kunit_case acs_average_db_case[] = {
+ KUNIT_CASE_PARAM(test_acs_average_db, acs_average_db_gen_params),
+ {}
+};
+
+static struct kunit_suite acs_average_db = {
+ .name = "iwlmvm-acs-average-db",
+ .test_cases = acs_average_db_case,
+};
+
+kunit_test_suite(acs_average_db);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
index ad960faceb0d..a8c42ce3b630 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c
@@ -47,8 +47,13 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
{
+ struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm);
+ struct ieee80211_vif *vif = mvm->p2p_device_vif;
+
+ lockdep_assert_held(&mvm->mutex);
+
/*
- * Clear the ROC_RUNNING status bit.
+ * Clear the ROC_P2P_RUNNING status bit.
* This will cause the TX path to drop offchannel transmissions.
* That would also be done by mac80211, but it is racy, in particular
* in the case that the time event actually completed in the firmware.
@@ -58,7 +63,7 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
* won't get stuck on the queue and be transmitted in the next
* time event.
*/
- if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) {
+ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status)) {
struct iwl_mvm_vif *mvmvif;
synchronize_net();
@@ -70,9 +75,7 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
* not really racy.
*/
- if (!WARN_ON(!mvm->p2p_device_vif)) {
- struct ieee80211_vif *vif = mvm->p2p_device_vif;
-
+ if (!WARN_ON(!vif)) {
mvmvif = iwl_mvm_vif_from_mac80211(vif);
iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id,
mvmvif->deflink.bcast_sta.tfd_queue_msk);
@@ -97,7 +100,14 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
}
}
- /* Do the same for AUX ROC */
+ /*
+ * P2P AUX ROC and HS2.0 ROC do not run simultaneously.
+ * Clear the ROC_AUX_RUNNING status bit.
+ * This will cause the TX path to drop offchannel transmissions.
+ * That would also be done by mac80211, but it is racy, in particular
+ * in the case that the time event actually completed in the firmware
+ * (which is handled in iwl_mvm_te_handle_notif).
+ */
if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) {
synchronize_net();
@@ -106,6 +116,7 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
if (mvm->mld_api_is_used) {
iwl_mvm_mld_rm_aux_sta(mvm);
+ mutex_unlock(&mvm->mutex);
return;
}
@@ -115,6 +126,10 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm)
if (iwl_mvm_has_new_station_api(mvm->fw))
iwl_mvm_rm_aux_sta(mvm);
}
+
+ if (!IS_ERR_OR_NULL(bss_vif))
+ iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_ROC);
+ mutex_unlock(&mvm->mutex);
}
void iwl_mvm_roc_done_wk(struct work_struct *wk)
@@ -122,8 +137,8 @@ void iwl_mvm_roc_done_wk(struct work_struct *wk)
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, roc_done_wk);
mutex_lock(&mvm->mutex);
+ /* Mutex is released inside */
iwl_mvm_cleanup_roc(mvm);
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
@@ -207,6 +222,8 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
iwl_dbg_tlv_time_point(&mvm->fwrt,
IWL_FW_INI_TIME_POINT_ASSOC_FAILED,
NULL);
+
+ mvmvif->session_prot_connection_loss = true;
}
iwl_mvm_connection_loss(mvm, vif, errmsg);
@@ -371,7 +388,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
- set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw);
} else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) {
iwl_mvm_te_handle_notify_csa(mvm, te_data, notif);
@@ -381,14 +398,51 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
}
}
+struct iwl_mvm_rx_roc_iterator_data {
+ u32 activity;
+ bool end_activity;
+ bool found;
+};
+
+static void iwl_mvm_rx_roc_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm_rx_roc_iterator_data *data = _data;
+
+ if (mvmvif->roc_activity == data->activity) {
+ data->found = true;
+ if (data->end_activity)
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+ }
+}
+
void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_roc_notif *notif = (void *)pkt->data;
+ u32 activity = le32_to_cpu(notif->activity);
+ bool started = le32_to_cpu(notif->success) &&
+ le32_to_cpu(notif->started);
+ struct iwl_mvm_rx_roc_iterator_data data = {
+ .activity = activity,
+ .end_activity = !started,
+ };
- if (le32_to_cpu(notif->success) && le32_to_cpu(notif->started) &&
- le32_to_cpu(notif->activity) == ROC_ACTIVITY_HOTSPOT) {
+ /* Clear vif roc_activity if done (set to ROC_NUM_ACTIVITIES) */
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_rx_roc_iterator,
+ &data);
+ /*
+ * It is possible that the ROC was canceled
+ * but the notification was already fired.
+ */
+ if (!data.found)
+ return;
+
+ if (started) {
set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw);
} else {
@@ -717,6 +771,21 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm,
"Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret);
}
+static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
+{
+ struct iwl_roc_req roc_cmd = {
+ .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
+ .activity = cpu_to_le32(activity),
+ };
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+ ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0,
+ sizeof(roc_cmd), &roc_cmd);
+ if (ret)
+ IWL_ERR(mvm, "Couldn't send the ROC_CMD: %d\n", ret);
+}
+
static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_time_event_data *te_data,
u32 *uid)
@@ -726,6 +795,9 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif;
enum nl80211_iftype iftype;
s8 link_id;
+ bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
+ u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
if (!vif)
return false;
@@ -750,14 +822,22 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
iwl_mvm_te_clear_data(mvm, te_data);
spin_unlock_bh(&mvm->time_event_lock);
- /* When session protection is used, the te_data->id field
- * is reused to save session protection's configuration.
- * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set
- * to HOT_SPOT_CMD.
- */
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
- id != HOT_SPOT_CMD) {
+ if ((p2p_aux && iftype == NL80211_IFTYPE_P2P_DEVICE) ||
+ (roc_ver >= 3 && mvmvif->roc_activity == ROC_ACTIVITY_HOTSPOT)) {
+ if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) {
+ iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity);
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+ iwl_mvm_roc_finished(mvm);
+ }
+ return false;
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) &&
+ id != HOT_SPOT_CMD) {
+ /* When session protection is used, the te_data->id field
+ * is reused to save session protection's configuration.
+ * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id
+ * field is set to HOT_SPOT_CMD.
+ */
if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) {
/* Session protection is still ongoing. Cancel it */
iwl_mvm_cancel_session_protection(mvm, vif, id,
@@ -958,7 +1038,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm,
if (WARN_ON(mvmvif->time_event_data.id !=
le32_to_cpu(notif->conf_id)))
goto out_unlock;
- set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
ieee80211_ready_on_channel(mvm->hw); /* Start TE */
}
@@ -977,12 +1057,21 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
u32 *duration_tu,
u32 *delay)
{
- u32 dtim_interval = vif->bss_conf.dtim_period *
- vif->bss_conf.beacon_int;
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+ u32 dtim_interval = 0;
*delay = AUX_ROC_MIN_DELAY;
*duration_tu = MSEC_TO_TU(duration_ms);
+ rcu_read_lock();
+ for_each_vif_active_link(vif, link_conf, link_id) {
+ dtim_interval =
+ max_t(u32, dtim_interval,
+ link_conf->dtim_period * link_conf->beacon_int);
+ }
+ rcu_read_unlock();
+
/*
* If we are associated we want the delay time to be at least one
* dtim interval so that the FW can wait until after the DTIM and
@@ -991,8 +1080,10 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
* Since we want to use almost a whole dtim interval we would also
* like the delay to be for 2-3 dtim intervals, in case there are
* other time events with higher priority.
+ * dtim_interval should never be 0, it can be 1 if we don't know it
+ * (we haven't heard any beacon yet).
*/
- if (vif->cfg.assoc) {
+ if (vif->cfg.assoc && !WARN_ON(!dtim_interval)) {
*delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY);
/* We cannot remain off-channel longer than the DTIM interval */
if (dtim_interval <= *duration_tu) {
@@ -1007,7 +1098,7 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif,
int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
struct ieee80211_channel *channel,
struct ieee80211_vif *vif,
- int duration, u32 activity)
+ int duration, enum iwl_roc_activity activity)
{
int res;
u32 duration_tu, delay;
@@ -1016,9 +1107,13 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
.activity = cpu_to_le32(activity),
.sta_id = cpu_to_le32(mvm->aux_sta.sta_id),
};
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
lockdep_assert_held(&mvm->mutex);
+ if (WARN_ON(mvmvif->roc_activity != ROC_NUM_ACTIVITIES))
+ return -EBUSY;
+
/* Set the channel info data */
iwl_mvm_set_chan_info(mvm, &roc_req.channel_info,
channel->hw_value,
@@ -1034,14 +1129,16 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm,
"\t(requested = %ums, max_delay = %ums)\n",
duration, delay);
IWL_DEBUG_TE(mvm,
- "Requesting to remain on channel %u for %utu\n",
- channel->hw_value, duration_tu);
+ "Requesting to remain on channel %u for %utu. activity %u\n",
+ channel->hw_value, duration_tu, activity);
/* Set the node address */
memcpy(roc_req.node_addr, vif->addr, ETH_ALEN);
res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
0, sizeof(roc_req), &roc_req);
+ if (!res)
+ mvmvif->roc_activity = activity;
return res;
}
@@ -1184,59 +1281,40 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm)
__iwl_mvm_remove_time_event(mvm, te_data, &uid);
}
-static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity)
-{
- int ret;
- struct iwl_roc_req roc_cmd = {
- .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE),
- .activity = cpu_to_le32(activity),
- };
-
- lockdep_assert_held(&mvm->mutex);
- ret = iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(MAC_CONF_GROUP, ROC_CMD),
- 0, sizeof(roc_cmd), &roc_cmd);
- WARN_ON(ret);
-}
-
-static void iwl_mvm_roc_station_remove(struct iwl_mvm *mvm,
- struct iwl_mvm_vif *mvmvif)
-{
- u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD);
- u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id,
- IWL_FW_CMD_VER_UNKNOWN);
-
- if (fw_ver == IWL_FW_CMD_VER_UNKNOWN)
- iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
- &mvmvif->hs_time_event_data);
- else if (fw_ver == 3)
- iwl_mvm_roc_rm_cmd(mvm, ROC_ACTIVITY_HOTSPOT);
- else
- IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver);
-}
-
void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
- struct iwl_mvm_vif *mvmvif;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data;
+ bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
+ u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
+ WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0);
+ int iftype = vif->type;
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
- mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ mutex_lock(&mvm->mutex);
+
+ if (p2p_aux || (roc_ver >= 3 && iftype != NL80211_IFTYPE_P2P_DEVICE)) {
+ if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) {
+ iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity);
+ mvmvif->roc_activity = ROC_NUM_ACTIVITIES;
+ }
+ goto cleanup_roc;
+ } else if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) {
te_data = &mvmvif->time_event_data;
- if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
+ if (iftype == NL80211_IFTYPE_P2P_DEVICE) {
if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) {
IWL_DEBUG_TE(mvm,
"No remain on channel event\n");
+ mutex_unlock(&mvm->mutex);
return;
}
-
iwl_mvm_cancel_session_protection(mvm, vif,
te_data->id,
te_data->link_id);
} else {
- iwl_mvm_roc_station_remove(mvm, mvmvif);
+ iwl_mvm_remove_aux_roc_te(mvm, mvmvif,
+ &mvmvif->hs_time_event_data);
}
goto cleanup_roc;
}
@@ -1244,12 +1322,13 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
te_data = iwl_mvm_get_roc_te(mvm);
if (!te_data) {
IWL_WARN(mvm, "No remain on channel event\n");
+ mutex_unlock(&mvm->mutex);
return;
}
mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif);
-
- if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ iftype = te_data->vif->type;
+ if (iftype == NL80211_IFTYPE_P2P_DEVICE)
iwl_mvm_remove_time_event(mvm, mvmvif, te_data);
else
iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data);
@@ -1260,9 +1339,12 @@ cleanup_roc:
* (so the status bit isn't set) set it here so iwl_mvm_cleanup_roc will
* cleanup things properly
*/
- set_bit(vif->type == NL80211_IFTYPE_P2P_DEVICE ?
- IWL_MVM_STATUS_ROC_RUNNING : IWL_MVM_STATUS_ROC_AUX_RUNNING,
- &mvm->status);
+ if (p2p_aux || iftype != NL80211_IFTYPE_P2P_DEVICE)
+ set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
+ else
+ set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status);
+
+ /* Mutex is released inside this function */
iwl_mvm_cleanup_roc(mvm);
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 61a4638d1be2..d92470960b38 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2019-2022 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2016 Intel Deutschland GmbH
*/
@@ -299,7 +299,7 @@ static void check_exit_ctkill(struct work_struct *work)
ret = iwl_mvm_get_temp(mvm, &temp);
- __iwl_mvm_mac_stop(mvm);
+ __iwl_mvm_mac_stop(mvm, false);
if (ret)
goto reschedule;
@@ -618,48 +618,41 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device,
int ret;
int temp;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (!iwl_mvm_firmware_running(mvm) ||
mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
- ret = -ENODATA;
- goto out;
+ /*
+ * Tell the core that there is no valid temperature value to
+ * return, but it need not worry about this.
+ */
+ *temperature = THERMAL_TEMP_INVALID;
+ return 0;
}
ret = iwl_mvm_get_temp(mvm, &temp);
if (ret)
- goto out;
+ return ret;
*temperature = temp * 1000;
-
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return 0;
}
static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device,
- int trip, int temp)
+ const struct thermal_trip *trip, int temp)
{
struct iwl_mvm *mvm = thermal_zone_device_priv(device);
- int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (!iwl_mvm_firmware_running(mvm) ||
- mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
- ret = -EIO;
- goto out;
- }
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
- if ((temp / 1000) > S16_MAX) {
- ret = -EINVAL;
- goto out;
- }
+ if ((temp / 1000) > S16_MAX)
+ return -EINVAL;
- ret = iwl_mvm_send_temp_report_ths_cmd(mvm);
-out:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return iwl_mvm_send_temp_report_ths_cmd(mvm);
}
static struct thermal_zone_device_ops tzone_ops = {
@@ -733,27 +726,18 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long new_state)
{
struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata);
- int ret;
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (!iwl_mvm_firmware_running(mvm) ||
- mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) {
- ret = -EIO;
- goto unlock;
- }
-
- if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) {
- ret = -EINVAL;
- goto unlock;
- }
+ mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR)
+ return -EIO;
- ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
- new_state);
+ if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets))
+ return -EINVAL;
-unlock:
- mutex_unlock(&mvm->mutex);
- return ret;
+ return iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START,
+ new_state);
}
static const struct thermal_cooling_device_ops tcooling_ops = {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 782ddc8c296b..7ff5ea5e7aca 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -12,7 +12,7 @@
#include <net/ipv6.h>
#include "iwl-trans.h"
-#include "iwl-eeprom-parse.h"
+#include "iwl-nvm-utils.h"
#include "mvm.h"
#include "sta.h"
#include "time-sync.h"
@@ -802,10 +802,30 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
if (info.control.vif) {
struct iwl_mvm_vif *mvmvif =
iwl_mvm_vif_from_mac80211(info.control.vif);
+ bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm);
- if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
- info.control.vif->type == NL80211_IFTYPE_AP ||
- info.control.vif->type == NL80211_IFTYPE_ADHOC) {
+ if ((info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE &&
+ p2p_aux) ||
+ (info.control.vif->type == NL80211_IFTYPE_STATION &&
+ offchannel)) {
+ /*
+ * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
+ * that can be used in 2 different types of vifs, P2P
+ * Device and STATION.
+ * P2P Device uses the offchannel queue.
+ * STATION (HS2.0) uses the auxiliary context of the FW,
+ * and hence needs to be sent on the aux queue.
+ * If P2P_DEV_OVER_AUX is supported (p2p_aux = true)
+ * also P2P Device uses the aux queue.
+ */
+ sta_id = mvm->aux_sta.sta_id;
+ queue = mvm->aux_queue;
+ if (WARN_ON(queue == IWL_MVM_INVALID_QUEUE))
+ return -1;
+ } else if (info.control.vif->type ==
+ NL80211_IFTYPE_P2P_DEVICE ||
+ info.control.vif->type == NL80211_IFTYPE_AP ||
+ info.control.vif->type == NL80211_IFTYPE_ADHOC) {
u32 link_id = u32_get_bits(info.control.flags,
IEEE80211_TX_CTRL_MLO_LINK);
struct iwl_mvm_vif_link_info *link;
@@ -831,18 +851,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
queue = mvm->snif_queue;
sta_id = mvm->snif_sta.sta_id;
- } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
- offchannel) {
- /*
- * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
- * that can be used in 2 different types of vifs, P2P &
- * STATION.
- * P2P uses the offchannel queue.
- * STATION (HS2.0) uses the auxiliary context of the FW,
- * and hence needs to be sent on the aux queue.
- */
- sta_id = mvm->aux_sta.sta_id;
- queue = mvm->aux_queue;
}
}
@@ -1870,6 +1878,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
IWL_DEBUG_TX_REPLY(mvm,
"Next reclaimed packet:%d\n",
next_reclaimed);
+ iwl_mvm_count_mpdu(mvmsta, sta_id, 1, true, 0);
} else {
IWL_DEBUG_TX_REPLY(mvm,
"NDP - don't update next_reclaimed\n");
@@ -2247,9 +2256,13 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
le32_to_cpu(ba_res->tx_rate), false);
}
- if (mvmsta)
+ if (mvmsta) {
iwl_mvm_tx_airtime(mvm, mvmsta,
le32_to_cpu(ba_res->wireless_time));
+
+ iwl_mvm_count_mpdu(mvmsta, sta_id,
+ le16_to_cpu(ba_res->txed), true, 0);
+ }
rcu_read_unlock();
return;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index ab56ff87c6f9..0e5fa8374103 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -344,6 +344,26 @@ static bool iwl_wait_stats_complete(struct iwl_notif_wait_data *notif_wait,
return true;
}
+#define PERIODIC_STAT_RATE 5
+
+int iwl_mvm_request_periodic_system_statistics(struct iwl_mvm *mvm, bool enable)
+{
+ u32 flags = enable ? 0 : IWL_STATS_CFG_FLG_DISABLE_NTFY_MSK;
+ u32 type = enable ? (IWL_STATS_NTFY_TYPE_ID_OPER |
+ IWL_STATS_NTFY_TYPE_ID_OPER_PART1) : 0;
+ struct iwl_system_statistics_cmd system_cmd = {
+ .cfg_mask = cpu_to_le32(flags),
+ .config_time_sec = cpu_to_le32(enable ?
+ PERIODIC_STAT_RATE : 0),
+ .type_id_mask = cpu_to_le32(type),
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm,
+ WIDE_ID(SYSTEM_GROUP,
+ SYSTEM_STATISTICS_CMD),
+ 0, sizeof(system_cmd), &system_cmd);
+}
+
static int iwl_mvm_request_system_statistics(struct iwl_mvm *mvm, bool clear,
u8 cmd_ver)
{
@@ -415,6 +435,13 @@ int iwl_mvm_request_statistics(struct iwl_mvm *mvm, bool clear)
IWL_FW_CMD_VER_UNKNOWN);
int ret;
+ /*
+ * Don't request statistics during restart, they'll not have any useful
+ * information right after restart, nor is clearing needed
+ */
+ if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ return 0;
+
if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN)
return iwl_mvm_request_system_statistics(mvm, clear, cmd_ver);
@@ -865,7 +892,7 @@ static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
{
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
ieee80211_iterate_active_interfaces(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -873,8 +900,6 @@ static void iwl_mvm_tcm_results(struct iwl_mvm *mvm)
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
iwl_mvm_config_scan(mvm);
-
- mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk)
@@ -1103,10 +1128,9 @@ void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm)
spin_unlock(&mvm->tcm.lock);
if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) {
- mutex_lock(&mvm->mutex);
+ guard(mvm)(mvm);
if (iwl_mvm_request_statistics(mvm, true))
handle_uapsd = false;
- mutex_unlock(&mvm->mutex);
}
spin_lock(&mvm->tcm.lock);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index c8fc8b4fd85c..e63efbf809f0 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -1,13 +1,34 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
+#include <linux/dmi.h>
#include "iwl-trans.h"
#include "iwl-fh.h"
#include "iwl-context-info-gen3.h"
#include "internal.h"
#include "iwl-prph.h"
+static const struct dmi_system_id dmi_force_scu_active_approved_list[] = {
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ },
+ },
+ { .ident = "DELL",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Alienware"),
+ },
+ },
+ /* keep last */
+ {}
+};
+
+static bool iwl_is_force_scu_active_approved(void)
+{
+ return !!dmi_check_system(dmi_force_scu_active_approved_list);
+}
+
static void
iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
struct iwl_prph_scratch_hwm_cfg *dbg_cfg,
@@ -128,6 +149,14 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
if (trans->trans_cfg->imr_enabled)
control_flags |= IWL_PRPH_SCRATCH_IMR_DEBUG_EN;
+ if (CSR_HW_REV_TYPE(trans->hw_rev) == IWL_CFG_MAC_TYPE_GL &&
+ iwl_is_force_scu_active_approved()) {
+ control_flags |= IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE;
+ IWL_DEBUG_FW(trans,
+ "Context Info: Set SCU_FORCE_ACTIVE (0x%x) in control_flags\n",
+ IWL_PRPH_SCRATCH_SCU_FORCE_ACTIVE);
+ }
+
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
@@ -187,7 +216,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
ctxt_info_gen3->cr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4);
ctxt_info_gen3->mtr_base_addr =
- cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
+ cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
ctxt_info_gen3->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
ctxt_info_gen3->mtr_size =
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
index 0fa92704cd14..344e4d5a1c6e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -218,7 +218,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
/* initialize TX command queue */
ctxt_info->hcmd_cfg.cmd_queue_addr =
- cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr);
+ cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 4a657036b9d6..9ad43464b702 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -33,7 +33,7 @@ extern int _invalid_type;
.driver_data = _ASSIGN_CFG(cfg)
/* Hardware specific file defines the PCI IDs table for that hardware module */
-static const struct pci_device_id iwl_hw_card_ids[] = {
+VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = {
#if IS_ENABLED(CONFIG_IWLDVM)
{IWL_PCI_DEVICE(0x4232, 0x1201, iwl5100_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x4232, 0x1301, iwl5100_agn_cfg)}, /* Half Mini Card */
@@ -492,7 +492,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x7AF0, PCI_ANY_ID, iwl_so_trans_cfg)},
{IWL_PCI_DEVICE(0x51F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_imr_trans_cfg)},
- {IWL_PCI_DEVICE(0x51F1, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x54F0, PCI_ANY_ID, iwl_so_long_latency_trans_cfg)},
{IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_so_trans_cfg)},
@@ -504,8 +503,39 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)},
- {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)},
{IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)},
+ {IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_trans_cfg)},
/* Sc devices */
{IWL_PCI_DEVICE(0xE440, PCI_ANY_ID, iwl_sc_trans_cfg)},
@@ -517,6 +547,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_hw_card_ids);
#define _IWL_DEV_INFO(_device, _subdevice, _mac_type, _mac_step, _rf_type, \
_rf_id, _rf_step, _no_160, _cores, _cdb, _cfg, _name) \
@@ -946,11 +977,6 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
iwl_cfg_ma, iwl_ax211_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_ma, iwl_ax221_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_ma, iwl_ax231_name),
@@ -1001,25 +1027,6 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB,
iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name),
-/* Bz */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
- iwl_cfg_bz, iwl_bz_name),
-
-/* Ga (Gl) */
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_gl, iwl_bz_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_gl, iwl_mtp_name),
-
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
@@ -1100,23 +1107,31 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
-/* MsP */
-/* For now we use the same FW as MR, but this will change in the future. */
+/* Bz */
+/* FIXME: need to change the naming according to the actual CRF */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_fm_name),
+
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_ms_a0, iwl_ax204_name),
+ IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY,
+ iwl_cfg_bz, iwl_fm_name),
+
+/* Ga (Gl) */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_MA, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_MS, IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_ma, iwl_ax204_name),
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_gl, iwl_gl_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_gl, iwl_mtp_name),
/* Sc */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1150,6 +1165,7 @@ static void get_crf_id(struct iwl_trans *iwl_trans)
{
u32 sd_reg_ver_addr;
u32 val = 0;
+ u8 step;
if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
sd_reg_ver_addr = SD_REG_VER_GEN2;
@@ -1168,16 +1184,23 @@ static void get_crf_id(struct iwl_trans *iwl_trans)
iwl_trans->hw_cnv_id =
iwl_read_prph_no_grab(iwl_trans, CNVI_AUX_MISC_CHIP);
+ /* For BZ-W, take B step also when A step is indicated */
+ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W)
+ step = SILICON_B_STEP;
+
/* In BZ, the MAC step must be read from the CNVI aux register */
if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ) {
- u8 step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id);
+ step = CNVI_AUX_MISC_CHIP_MAC_STEP(iwl_trans->hw_cnv_id);
/* For BZ-U, take B step also when A step is indicated */
if ((CNVI_AUX_MISC_CHIP_PROD_TYPE(iwl_trans->hw_cnv_id) ==
CNVI_AUX_MISC_CHIP_PROD_TYPE_BZ_U) &&
step == SILICON_A_STEP)
step = SILICON_B_STEP;
+ }
+ if (CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ ||
+ CSR_HW_REV_TYPE(iwl_trans->hw_rev) == IWL_CFG_MAC_TYPE_BZ_W) {
iwl_trans->hw_rev_step = step;
iwl_trans->hw_rev |= step;
}
@@ -1224,12 +1247,7 @@ static int map_crf_id(struct iwl_trans *iwl_trans)
case REG_CRF_ID_TYPE_GF:
iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12);
break;
- case REG_CRF_ID_TYPE_MR:
- iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_MR << 12);
- break;
case REG_CRF_ID_TYPE_FM:
- case REG_CRF_ID_TYPE_FMI:
- case REG_CRF_ID_TYPE_FMR:
iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12);
break;
case REG_CRF_ID_TYPE_WHP:
@@ -1488,6 +1506,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!iwl_trans->name)
iwl_trans->name = iwl_trans->cfg->name;
+ IWL_INFO(iwl_trans, "Detected %s\n", iwl_trans->name);
+
if (iwl_trans->trans_cfg->mq_rx_supported) {
if (WARN_ON(!iwl_trans->cfg->num_rbds)) {
ret = -EINVAL;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index 7805a42948af..27a7e0b5b3d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2003-2015, 2018-2023 Intel Corporation
+ * Copyright (C) 2003-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -22,7 +22,6 @@
#include "iwl-io.h"
#include "iwl-op-mode.h"
#include "iwl-drv.h"
-#include "queue/tx.h"
#include "iwl-context-info.h"
/*
@@ -273,7 +272,7 @@ enum iwl_pcie_fw_reset_state {
};
/**
- * enum wl_pcie_imr_status - imr dma transfer state
+ * enum iwl_pcie_imr_status - imr dma transfer state
* @IMR_D2S_IDLE: default value of the dma transfer
* @IMR_D2S_REQUESTED: dma transfer requested
* @IMR_D2S_COMPLETED: dma transfer completed
@@ -287,6 +286,58 @@ enum iwl_pcie_imr_status {
};
/**
+ * struct iwl_pcie_txqs - TX queues data
+ *
+ * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
+ * @page_offs: offset from skb->cb to mac header page pointer
+ * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
+ * @queue_used: bit mask of used queues
+ * @queue_stopped: bit mask of stopped queues
+ * @txq: array of TXQ data structures representing the TXQs
+ * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
+ * @queue_alloc_cmd_ver: queue allocation command version
+ * @bc_pool: bytecount DMA allocations pool
+ * @bc_tbl_size: bytecount table size
+ * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO
+ * (and similar usage)
+ * @cmd: command queue data
+ * @cmd.fifo: FIFO number
+ * @cmd.q_id: queue ID
+ * @cmd.wdg_timeout: watchdog timeout
+ * @tfd: TFD data
+ * @tfd.max_tbs: max number of buffers per TFD
+ * @tfd.size: TFD size
+ * @tfd.addr_size: TFD/TB address size
+ */
+struct iwl_pcie_txqs {
+ unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
+ struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
+ struct dma_pool *bc_pool;
+ size_t bc_tbl_size;
+ bool bc_table_dword;
+ u8 page_offs;
+ u8 dev_cmd_offs;
+ struct iwl_tso_hdr_page __percpu *tso_hdr_page;
+
+ struct {
+ u8 fifo;
+ u8 q_id;
+ unsigned int wdg_timeout;
+ } cmd;
+
+ struct {
+ u8 max_tbs;
+ u16 size;
+ u8 addr_size;
+ } tfd;
+
+ struct iwl_dma_ptr scd_bc_tbls;
+
+ u8 queue_alloc_cmd_ver;
+};
+
+/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
* @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues
@@ -367,6 +418,7 @@ enum iwl_pcie_imr_status {
* @is_down: indicates the NIC is down
* @isr_stats: interrupt statistics
* @napi_dev: (fake) netdev for NAPI registration
+ * @txqs: transport tx queues data.
*/
struct iwl_trans_pcie {
struct iwl_rxq *rxq;
@@ -386,7 +438,7 @@ struct iwl_trans_pcie {
dma_addr_t iml_dma_addr;
struct iwl_trans *trans;
- struct net_device napi_dev;
+ struct net_device *napi_dev;
/* INT ICT Table */
__le32 *ict_tbl;
@@ -464,6 +516,8 @@ struct iwl_trans_pcie {
enum iwl_pcie_imr_status imr_status;
wait_queue_head_t imr_waitq;
char rf_name[32];
+
+ struct iwl_pcie_txqs txqs;
};
static inline struct iwl_trans_pcie *
@@ -538,6 +592,33 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans);
/*****************************************************
* TX / HCMD
******************************************************/
+/* We need 2 entries for the TX command and header, and another one might
+ * be needed for potential data in the SKB's head. The remaining ones can
+ * be used for frags.
+ */
+#define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3)
+
+struct iwl_tso_hdr_page {
+ struct page *page;
+ u8 *pos;
+};
+
+/*
+ * Note that we put this struct *last* in the page. By doing that, we ensure
+ * that no TB referencing this page can trigger the 32-bit boundary hardware
+ * bug.
+ */
+struct iwl_tso_page_info {
+ dma_addr_t dma_addr;
+ struct page *next;
+ refcount_t use_count;
+};
+
+#define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info))
+#define IWL_TSO_PAGE_INFO(addr) \
+ ((struct iwl_tso_page_info *)(((unsigned long)addr & PAGE_MASK) + \
+ IWL_TSO_PAGE_DATA_SIZE))
+
int iwl_pcie_tx_init(struct iwl_trans *trans);
void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr);
int iwl_pcie_tx_stop(struct iwl_trans *trans);
@@ -552,10 +633,171 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id);
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans);
-int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_rx_cmd_buffer *rxb);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
+int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue);
+
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ unsigned int len);
+struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta,
+ u8 **hdr, unsigned int hdr_room);
+
+void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta);
+
+static inline dma_addr_t iwl_pcie_get_tso_page_phys(void *addr)
+{
+ dma_addr_t res;
+
+ res = IWL_TSO_PAGE_INFO(addr)->dma_addr;
+ res += (unsigned long)addr & ~PAGE_MASK;
+
+ return res;
+}
+
+static inline dma_addr_t
+iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
+{
+ return txq->first_tb_dma +
+ sizeof(struct iwl_pcie_first_tb_buf) * idx;
+}
+
+static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
+{
+ return index & (q->n_window - 1);
+}
+
+static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq, int idx)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (trans->trans_cfg->gen2)
+ idx = iwl_txq_get_cmd_index(txq, idx);
+
+ return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx;
+}
+
+/*
+ * We need this inline in case dma_addr_t is only 32-bits - since the
+ * hardware is always 64-bit, the issue can still occur in that case,
+ * so use u64 for 'phys' here to force the addition in 64-bit.
+ */
+static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
+{
+ return upper_32_bits(phys) != upper_32_bits(phys + len);
+}
+
+int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
+
+static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
+ iwl_op_mode_queue_full(trans->op_mode, txq->id);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
+ txq->id);
+ }
+}
+
+/**
+ * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
+ * @trans: the transport (for configuration data)
+ * @index: current index
+ */
+static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
+{
+ return ++index &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+}
+
+/**
+ * iwl_txq_dec_wrap - decrement queue index, wrap back to end
+ * @trans: the transport (for configuration data)
+ * @index: current index
+ */
+static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
+{
+ return --index &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+}
+
+void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
+
+static inline void
+iwl_trans_pcie_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
+ iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
+ }
+}
+
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd, dma_addr_t addr,
+ u16 len);
+
+static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
+ struct iwl_tfh_tfd *tfd)
+{
+ tfd->num_tbs = 0;
+
+ iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
+ trans->invalid_tx_cmd.size);
+}
+
+void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd);
+
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
+ u32 sta_mask, u8 tid,
+ int size, unsigned int timeout);
+
+int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id);
+
+void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
+void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
+int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue);
+int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id,
+ int queue_size);
+
+static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
+ void *_tfd, u8 idx)
+{
+ struct iwl_tfd *tfd;
+ struct iwl_tfd_tb *tb;
+
+ if (trans->trans_cfg->gen2) {
+ struct iwl_tfh_tfd *tfh_tfd = _tfd;
+ struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
+
+ return le16_to_cpu(tfh_tb->tb_len);
+ }
+
+ tfd = (struct iwl_tfd *)_tfd;
+ tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs, bool is_flush);
+void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
+void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze);
+int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx);
+int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm);
/*****************************************************
* Error handling
@@ -822,12 +1064,51 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
+void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans);
#else
static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { }
#endif
void iwl_pcie_rx_allocator_work(struct work_struct *data);
+/* common trans ops for all generations transports */
+void iwl_trans_pcie_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg);
+int iwl_trans_pcie_start_hw(struct iwl_trans *trans);
+void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans);
+void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val);
+void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val);
+u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs);
+u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg);
+void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val);
+int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords);
+int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords);
+int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership);
+struct iwl_trans_dump_data *
+iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx);
+int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status,
+ bool test, bool reset);
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset);
+void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable);
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
+void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value);
+int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val);
+bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
+void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans);
+
+/* transport gen 1 exported functions */
+void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw, bool run_in_rfkill);
+void iwl_trans_pcie_stop_device(struct iwl_trans *trans);
+
/* common functions that are used by gen2 transport */
int iwl_pcie_gen2_apm_init(struct iwl_trans *trans);
void iwl_pcie_apm_config(struct iwl_trans *trans);
@@ -849,7 +1130,7 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
-void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr);
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans);
int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd);
void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans);
@@ -864,5 +1145,7 @@ void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr, u32 byte_cnt);
int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
u32 dst_addr, u64 src_addr, u32 byte_cnt);
+int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data);
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 9c2461ba13c5..afb88eab8174 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -1000,6 +1000,11 @@ void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
+static inline struct iwl_trans_pcie *iwl_netdev_to_trans_pcie(struct net_device *dev)
+{
+ return *(struct iwl_trans_pcie **)netdev_priv(dev);
+}
+
static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
{
struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
@@ -1007,7 +1012,7 @@ static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
struct iwl_trans *trans;
int ret;
- trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
trans = trans_pcie->trans;
ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
@@ -1034,7 +1039,7 @@ static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
struct iwl_trans *trans;
int ret;
- trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
+ trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
trans = trans_pcie->trans;
ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
@@ -1131,7 +1136,7 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
if (trans_pcie->msix_enabled)
poll = iwl_pcie_napi_poll_msix;
- netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
+ netif_napi_add(trans_pcie->napi_dev, &rxq->napi,
poll);
napi_enable(&rxq->napi);
}
@@ -1296,7 +1301,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
int i)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
bool page_stolen = false;
int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0;
@@ -1673,6 +1678,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
*/
static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
@@ -1689,9 +1695,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
}
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- if (!trans->txqs.txq[i])
+ if (!trans_pcie->txqs.txq[i])
continue;
- del_timer(&trans->txqs.txq[i]->stuck_timer);
+ del_timer(&trans_pcie->txqs.txq[i]->stuck_timer);
}
/* The STATUS_FW_ERROR bit is set in this function. This must happen
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index a4a4772330cf..18dda89b7985 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-prph.h"
@@ -247,7 +247,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
return -ENOMEM;
/* Allocate or reset and init all Tx and Command queues */
- if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size))
+ if (iwl_txq_gen2_init(trans, trans_pcie->txqs.cmd.q_id, queue_size))
return -ENOMEM;
/* enable shadow regs in HW */
@@ -339,16 +339,17 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans)
pos += scnprintf(buf + pos, buflen - pos, "\n");
}
-void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
iwl_pcie_reset_ict(trans);
/* make sure all queue are not stopped/used */
- memset(trans->txqs.queue_stopped, 0,
- sizeof(trans->txqs.queue_stopped));
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+ memset(trans_pcie->txqs.queue_stopped, 0,
+ sizeof(trans_pcie->txqs.queue_stopped));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
/* now that we got alive we can free the fw image & the context info.
* paging memory cannot be freed included since FW will still use it
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index 6c76b2dd6878..719ddc4b72c5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2007-2015, 2018-2023 Intel Corporation
+ * Copyright (C) 2007-2015, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -127,8 +127,7 @@ out:
kfree(buf);
}
-static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans,
- bool retake_ownership)
+int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
@@ -1336,8 +1335,8 @@ void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
}
}
-static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw, bool run_in_rfkill)
+int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw, bool run_in_rfkill)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
@@ -1423,7 +1422,7 @@ out:
return ret;
}
-static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
{
iwl_pcie_reset_ict(trans);
iwl_pcie_tx_start(trans, scd_addr);
@@ -1458,7 +1457,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
}
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool was_in_rfkill;
@@ -1505,9 +1504,17 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
iwl_pcie_synchronize_irqs(trans);
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
+ } else {
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ }
if (reset) {
/*
@@ -1552,8 +1559,7 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
return 0;
}
-static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
- bool reset)
+int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
{
int ret;
@@ -1571,9 +1577,9 @@ static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
return 0;
}
-static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
- enum iwl_d3_status *status,
- bool test, bool reset)
+int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
+ enum iwl_d3_status *status,
+ bool test, bool reset)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
@@ -1586,8 +1592,12 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
goto out;
}
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ else
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
ret = iwl_finish_nic_init(trans);
if (ret)
@@ -1874,7 +1884,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return 0;
}
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
@@ -1886,7 +1896,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return ret;
}
-static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
+void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1906,17 +1916,17 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
iwl_pcie_synchronize_irqs(trans);
}
-static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
+void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
-static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
+void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
{
writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
-static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
+u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
{
return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
}
@@ -1929,7 +1939,7 @@ static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
return 0x000FFFFF;
}
-static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
+u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
{
u32 mask = iwl_trans_pcie_prph_msk(trans);
@@ -1938,8 +1948,7 @@ static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
}
-static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
- u32 val)
+void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
{
u32 mask = iwl_trans_pcie_prph_msk(trans);
@@ -1948,20 +1957,20 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
}
-static void iwl_trans_pcie_configure(struct iwl_trans *trans,
- const struct iwl_trans_config *trans_cfg)
+void iwl_trans_pcie_configure(struct iwl_trans *trans,
+ const struct iwl_trans_config *trans_cfg)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
/* free all first - we might be reconfigured for a different size */
iwl_pcie_free_rbs_pool(trans);
- trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
- trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
- trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
- trans->txqs.page_offs = trans_cfg->cb_data_offs;
- trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
- trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
+ trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue;
+ trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo;
+ trans_pcie->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
+ trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs;
+ trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
+ trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
trans_pcie->n_no_reclaim_cmds = 0;
@@ -1980,19 +1989,12 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
- trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
+ trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans->command_groups = trans_cfg->command_groups;
trans->command_groups_size = trans_cfg->command_groups_size;
- /* Initialize NAPI here - it should be before registering to mac80211
- * in the opmode but after the HW struct is allocated.
- * As this function may be called again in some corner cases don't
- * do anything if NAPI was already initialized.
- */
- if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
- init_dummy_netdev(&trans_pcie->napi_dev);
trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
}
@@ -2074,6 +2076,8 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_free_ict(trans);
}
+ free_netdev(trans_pcie->napi_dev);
+
iwl_pcie_free_invalid_tx_cmd(trans);
iwl_pcie_free_fw_monitor(trans);
@@ -2084,15 +2088,20 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
trans->dev);
mutex_destroy(&trans_pcie->mutex);
- iwl_trans_free(trans);
-}
-static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
-{
- if (state)
- set_bit(STATUS_TPOWER_PMI, &trans->status);
- else
- clear_bit(STATUS_TPOWER_PMI, &trans->status);
+ if (trans_pcie->txqs.tso_hdr_page) {
+ for_each_possible_cpu(i) {
+ struct iwl_tso_hdr_page *p =
+ per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i);
+
+ if (p && p->page)
+ __free_page(p->page);
+ }
+
+ free_percpu(trans_pcie->txqs.tso_hdr_page);
+ }
+
+ iwl_trans_free(trans);
}
struct iwl_trans_pcie_removal {
@@ -2258,7 +2267,7 @@ out:
return true;
}
-static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
+bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
{
bool ret;
@@ -2272,7 +2281,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
return false;
}
-static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
+void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2302,8 +2311,8 @@ out:
spin_unlock_bh(&trans_pcie->reg_lock);
}
-static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
- void *buf, int dwords)
+int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
+ void *buf, int dwords)
{
#define IWL_MAX_HW_ERRS 5
unsigned int num_consec_hw_errors = 0;
@@ -2352,8 +2361,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
return 0;
}
-static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
- const void *buf, int dwords)
+int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
+ const void *buf, int dwords)
{
int offs, ret = 0;
const u32 *vals = buf;
@@ -2370,8 +2379,8 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
return ret;
}
-static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
- u32 *val)
+int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
+ u32 *val)
{
return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
ofs, val);
@@ -2379,8 +2388,8 @@ static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
#define IWL_FLUSH_WAIT_MS 2000
-static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
- struct iwl_trans_rxq_dma_data *data)
+int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
+ struct iwl_trans_rxq_dma_data *data)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2395,8 +2404,9 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
return 0;
}
-static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
+int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq;
unsigned long now = jiffies;
bool overflow_tx;
@@ -2406,11 +2416,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
if (test_bit(STATUS_TRANS_DEAD, &trans->status))
return -ENODEV;
- if (!test_bit(txq_idx, trans->txqs.queue_used))
+ if (!test_bit(txq_idx, trans_pcie->txqs.queue_used))
return -EINVAL;
IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
- txq = trans->txqs.txq[txq_idx];
+ txq = trans_pcie->txqs.txq[txq_idx];
spin_lock_bh(&txq->lock);
overflow_tx = txq->overflow_tx ||
@@ -2456,8 +2466,9 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
return 0;
}
-static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
+int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cnt;
int ret = 0;
@@ -2466,9 +2477,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
cnt < trans->trans_cfg->base_params->num_of_queues;
cnt++) {
- if (cnt == trans->txqs.cmd.q_id)
+ if (cnt == trans_pcie->txqs.cmd.q_id)
continue;
- if (!test_bit(cnt, trans->txqs.queue_used))
+ if (!test_bit(cnt, trans_pcie->txqs.queue_used))
continue;
if (!(BIT(cnt) & txq_bm))
continue;
@@ -2481,8 +2492,8 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
return ret;
}
-static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
- u32 mask, u32 value)
+void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
+ u32 mask, u32 value)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -2641,12 +2652,13 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
struct iwl_dbgfs_tx_queue_state *state = v;
struct iwl_trans *trans = priv->trans;
- struct iwl_txq *txq = trans->txqs.txq[state->pos];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos];
seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
(unsigned int)state->pos,
- !!test_bit(state->pos, trans->txqs.queue_used),
- !!test_bit(state->pos, trans->txqs.queue_stopped));
+ !!test_bit(state->pos, trans_pcie->txqs.queue_used),
+ !!test_bit(state->pos, trans_pcie->txqs.queue_stopped));
if (txq)
seq_printf(seq,
"read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
@@ -2656,7 +2668,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
else
seq_puts(seq, "(unallocated)");
- if (state->pos == trans->txqs.cmd.q_id)
+ if (state->pos == trans_pcie->txqs.cmd.q_id)
seq_puts(seq, " (HCMD)");
seq_puts(seq, "\n");
@@ -3060,7 +3072,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
DEBUGFS_ADD_FILE(rf, dir, 0400);
}
-static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
+void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct cont_rec *data = &trans_pcie->fw_mon_data;
@@ -3073,10 +3085,11 @@ static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 cmdlen = 0;
int i;
- for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
+ for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++)
cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
return cmdlen;
@@ -3337,15 +3350,14 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
return 0;
}
-static struct iwl_trans_dump_data *
-iwl_trans_pcie_dump_data(struct iwl_trans *trans,
- u32 dump_mask,
+struct iwl_trans_dump_data *
+iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
const struct iwl_dump_sanitize_ops *sanitize_ops,
void *sanitize_ctx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
- struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs = 0, monitor_len = 0;
@@ -3412,7 +3424,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans,
data = (void *)dump_data->data;
if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
- u16 tfd_size = trans->txqs.tfd.size;
+ u16 tfd_size = trans_pcie->txqs.tfd.size;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data;
@@ -3488,7 +3500,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans,
return dump_data;
}
-static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
+void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
{
if (enable)
iwl_enable_interrupts(trans);
@@ -3496,7 +3508,7 @@ static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
iwl_disable_interrupts(trans);
}
-static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
{
u32 inta_addr, sw_err_bit;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -3515,95 +3527,16 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
}
-#define IWL_TRANS_COMMON_OPS \
- .op_mode_leave = iwl_trans_pcie_op_mode_leave, \
- .write8 = iwl_trans_pcie_write8, \
- .write32 = iwl_trans_pcie_write32, \
- .read32 = iwl_trans_pcie_read32, \
- .read_prph = iwl_trans_pcie_read_prph, \
- .write_prph = iwl_trans_pcie_write_prph, \
- .read_mem = iwl_trans_pcie_read_mem, \
- .write_mem = iwl_trans_pcie_write_mem, \
- .read_config32 = iwl_trans_pcie_read_config32, \
- .configure = iwl_trans_pcie_configure, \
- .set_pmi = iwl_trans_pcie_set_pmi, \
- .sw_reset = iwl_trans_pcie_sw_reset, \
- .grab_nic_access = iwl_trans_pcie_grab_nic_access, \
- .release_nic_access = iwl_trans_pcie_release_nic_access, \
- .set_bits_mask = iwl_trans_pcie_set_bits_mask, \
- .dump_data = iwl_trans_pcie_dump_data, \
- .d3_suspend = iwl_trans_pcie_d3_suspend, \
- .d3_resume = iwl_trans_pcie_d3_resume, \
- .interrupts = iwl_trans_pci_interrupts, \
- .sync_nmi = iwl_trans_pcie_sync_nmi, \
- .imr_dma_data = iwl_trans_pcie_copy_imr \
-
-static const struct iwl_trans_ops trans_ops_pcie = {
- IWL_TRANS_COMMON_OPS,
- .start_hw = iwl_trans_pcie_start_hw,
- .fw_alive = iwl_trans_pcie_fw_alive,
- .start_fw = iwl_trans_pcie_start_fw,
- .stop_device = iwl_trans_pcie_stop_device,
-
- .send_cmd = iwl_pcie_enqueue_hcmd,
-
- .tx = iwl_trans_pcie_tx,
- .reclaim = iwl_txq_reclaim,
-
- .txq_disable = iwl_trans_pcie_txq_disable,
- .txq_enable = iwl_trans_pcie_txq_enable,
-
- .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
-
- .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
-
- .freeze_txq_timer = iwl_trans_txq_freeze_timer,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
-#endif
-};
-
-static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
- IWL_TRANS_COMMON_OPS,
- .start_hw = iwl_trans_pcie_start_hw,
- .fw_alive = iwl_trans_pcie_gen2_fw_alive,
- .start_fw = iwl_trans_pcie_gen2_start_fw,
- .stop_device = iwl_trans_pcie_gen2_stop_device,
-
- .send_cmd = iwl_pcie_gen2_enqueue_hcmd,
-
- .tx = iwl_txq_gen2_tx,
- .reclaim = iwl_txq_reclaim,
-
- .set_q_ptrs = iwl_txq_set_q_ptrs,
-
- .txq_alloc = iwl_txq_dyn_alloc,
- .txq_free = iwl_txq_dyn_free,
- .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
- .rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
- .load_pnvm = iwl_trans_pcie_ctx_info_gen3_load_pnvm,
- .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
- .load_reduce_power = iwl_trans_pcie_ctx_info_gen3_load_reduce_power,
- .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
-#endif
-};
-
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
const struct iwl_cfg_trans_params *cfg_trans)
{
- struct iwl_trans_pcie *trans_pcie;
+ struct iwl_trans_pcie *trans_pcie, **priv;
struct iwl_trans *trans;
int ret, addr_size;
- const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
void __iomem * const *table;
u32 bar0;
- if (!cfg_trans->gen2)
- ops = &trans_ops_pcie;
-
/* reassign our BAR 0 if invalid due to possible runtime PM races */
pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0);
if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) {
@@ -3616,13 +3549,70 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (ret)
return ERR_PTR(ret);
- trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
+ trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
cfg_trans);
if (!trans)
return ERR_PTR(-ENOMEM);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ if (trans->trans_cfg->gen2) {
+ trans_pcie->txqs.tfd.addr_size = 64;
+ trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
+ trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
+ } else {
+ trans_pcie->txqs.tfd.addr_size = 36;
+ trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
+ trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
+ }
+ trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
+
+ trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
+ if (!trans_pcie->txqs.tso_hdr_page) {
+ ret = -ENOMEM;
+ goto out_free_trans;
+ }
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ trans_pcie->txqs.bc_tbl_size =
+ sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
+ else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ trans_pcie->txqs.bc_tbl_size =
+ sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
+ else
+ trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
+ /*
+ * For gen2 devices, we use a single allocation for each byte-count
+ * table, but they're pretty small (1k) so use a DMA pool that we
+ * allocate here.
+ */
+ if (trans->trans_cfg->gen2) {
+ trans_pcie->txqs.bc_pool =
+ dmam_pool_create("iwlwifi:bc", trans->dev,
+ trans_pcie->txqs.bc_tbl_size,
+ 256, 0);
+ if (!trans_pcie->txqs.bc_pool) {
+ ret = -ENOMEM;
+ goto out_free_tso;
+ }
+ }
+
+ /* Some things must not change even if the config does */
+ WARN_ON(trans_pcie->txqs.tfd.addr_size !=
+ (trans->trans_cfg->gen2 ? 64 : 36));
+
+ /* Initialize NAPI here - it should be before registering to mac80211
+ * in the opmode but after the HW struct is allocated.
+ */
+ trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *));
+ if (!trans_pcie->napi_dev) {
+ ret = -ENOMEM;
+ goto out_free_tso;
+ }
+ /* The private struct in netdev is a pointer to struct iwl_trans_pcie */
+ priv = netdev_priv(trans_pcie->napi_dev);
+ *priv = trans_pcie;
+
trans_pcie->trans = trans;
trans_pcie->opmode_down = true;
spin_lock_init(&trans_pcie->irq_lock);
@@ -3637,7 +3627,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
WQ_HIGHPRI | WQ_UNBOUND, 0);
if (!trans_pcie->rba.alloc_wq) {
ret = -ENOMEM;
- goto out_free_trans;
+ goto out_free_ndev;
}
INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
@@ -3656,7 +3646,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
pci_set_master(pdev);
- addr_size = trans->txqs.tfd.addr_size;
+ addr_size = trans_pcie->txqs.tfd.addr_size;
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
if (ret) {
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -3757,6 +3747,10 @@ out_free_ict:
iwl_pcie_free_ict(trans);
out_no_pci:
destroy_workqueue(trans_pcie->rba.alloc_wq);
+out_free_ndev:
+ free_netdev(trans_pcie->napi_dev);
+out_free_tso:
+ free_percpu(trans_pcie->txqs.tso_hdr_page);
out_free_trans:
iwl_trans_free(trans);
return ERR_PTR(ret);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index aabbef114bc2..b1846abb99b7 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2018-2020, 2023 Intel Corporation
+ * Copyright (C) 2018-2020, 2023-2024 Intel Corporation
*/
#include <net/tso.h>
#include <linux/tcp.h>
@@ -11,7 +11,1183 @@
#include "iwl-io.h"
#include "internal.h"
#include "fw/api/tx.h"
-#include "queue/tx.h"
+#include "fw/api/commands.h"
+#include "fw/api/datapath.h"
+#include "iwl-scd.h"
+
+static struct page *get_workaround_page(struct iwl_trans *trans,
+ struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tso_page_info *info;
+ struct page **page_ptr;
+ struct page *ret;
+ dma_addr_t phys;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+
+ ret = alloc_page(GFP_ATOMIC);
+ if (!ret)
+ return NULL;
+
+ info = IWL_TSO_PAGE_INFO(page_address(ret));
+
+ /* Create a DMA mapping for the page */
+ phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(trans->dev, phys))) {
+ __free_page(ret);
+ return NULL;
+ }
+
+ /* Store physical address and set use count */
+ info->dma_addr = phys;
+ refcount_set(&info->use_count, 1);
+
+ /* set the chaining pointer to the previous page if there */
+ info->next = *page_ptr;
+ *page_ptr = ret;
+
+ return ret;
+}
+
+/*
+ * Add a TB and if needed apply the FH HW bug workaround;
+ * meta != NULL indicates that it's a page mapping and we
+ * need to dma_unmap_page() and set the meta->tbs bit in
+ * this case.
+ */
+static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ dma_addr_t phys, void *virt,
+ u16 len, struct iwl_cmd_meta *meta,
+ bool unmap)
+{
+ dma_addr_t oldphys = phys;
+ struct page *page;
+ int ret;
+
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+
+ if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
+ ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
+
+ if (ret < 0)
+ goto unmap;
+
+ if (meta)
+ meta->tbs |= BIT(ret);
+
+ ret = 0;
+ goto trace;
+ }
+
+ /*
+ * Work around a hardware bug. If (as expressed in the
+ * condition above) the TB ends on a 32-bit boundary,
+ * then the next TB may be accessed with the wrong
+ * address.
+ * To work around it, copy the data elsewhere and make
+ * a new mapping for it so the device will not fail.
+ */
+
+ if (WARN_ON(len > IWL_TSO_PAGE_DATA_SIZE)) {
+ ret = -ENOBUFS;
+ goto unmap;
+ }
+
+ page = get_workaround_page(trans, skb);
+ if (!page) {
+ ret = -ENOMEM;
+ goto unmap;
+ }
+
+ memcpy(page_address(page), virt, len);
+
+ /*
+ * This is a bit odd, but performance does not matter here, what
+ * matters are the expectations of the calling code and TB cleanup
+ * function.
+ *
+ * As such, if unmap is set, then create another mapping for the TB
+ * entry as it will be unmapped later. On the other hand, if it is not
+ * set, then the TB entry will not be unmapped and instead we simply
+ * reference and sync the mapping that get_workaround_page() created.
+ */
+ if (unmap) {
+ phys = dma_map_single(trans->dev, page_address(page), len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, phys)))
+ return -ENOMEM;
+ } else {
+ phys = iwl_pcie_get_tso_page_phys(page_address(page));
+ dma_sync_single_for_device(trans->dev, phys, len,
+ DMA_TO_DEVICE);
+ }
+
+ ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
+ if (ret < 0) {
+ /* unmap the new allocation as single */
+ oldphys = phys;
+ meta = NULL;
+ goto unmap;
+ }
+
+ IWL_DEBUG_TX(trans,
+ "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
+ len, (unsigned long long)oldphys,
+ (unsigned long long)phys);
+
+ ret = 0;
+unmap:
+ if (!unmap)
+ goto trace;
+
+ if (meta)
+ dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
+trace:
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
+
+ return ret;
+}
+
+static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ struct iwl_cmd_meta *out_meta,
+ int start_len,
+ u8 hdr_len,
+ struct iwl_device_tx_cmd *dev_cmd)
+{
+#ifdef CONFIG_INET
+ struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
+ unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int data_offset = 0;
+ dma_addr_t start_hdr_phys;
+ u16 length, amsdu_pad;
+ u8 *start_hdr;
+ struct sg_table *sgt;
+ struct tso_t tso;
+
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
+ &dev_cmd->hdr, start_len, 0);
+
+ ip_hdrlen = skb_network_header_len(skb);
+ snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
+ total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
+ amsdu_pad = 0;
+
+ /* total amount of header we may need for this A-MSDU */
+ hdr_room = DIV_ROUND_UP(total_len, mss) *
+ (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
+
+ /* Our device supports 9 segments at most, it will fit in 1 page */
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ if (!sgt)
+ return -ENOMEM;
+
+ start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+
+ /*
+ * Pull the ieee80211 header to be able to use TSO core,
+ * we will restore it for the tx_status flow.
+ */
+ skb_pull(skb, hdr_len);
+
+ /*
+ * Remove the length of all the headers that we don't actually
+ * have in the MPDU by themselves, but that we duplicate into
+ * all the different MSDUs inside the A-MSDU.
+ */
+ le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
+
+ tso_start(skb, &tso);
+
+ while (total_len) {
+ /* this is the data left for this subframe */
+ unsigned int data_left = min_t(unsigned int, mss, total_len);
+ unsigned int tb_len;
+ dma_addr_t tb_phys;
+ u8 *pos_hdr = start_hdr;
+
+ total_len -= data_left;
+
+ memset(pos_hdr, 0, amsdu_pad);
+ pos_hdr += amsdu_pad;
+ amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
+ data_left)) & 0x3;
+ ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
+ pos_hdr += ETH_ALEN;
+ ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
+ pos_hdr += ETH_ALEN;
+
+ length = snap_ip_tcp_hdrlen + data_left;
+ *((__be16 *)pos_hdr) = cpu_to_be16(length);
+ pos_hdr += sizeof(length);
+
+ /*
+ * This will copy the SNAP as well which will be considered
+ * as MAC header.
+ */
+ tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
+
+ pos_hdr += snap_ip_tcp_hdrlen;
+
+ tb_len = pos_hdr - start_hdr;
+ tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+
+ /*
+ * No need for _with_wa, this is from the TSO page and
+ * we leave some space at the end of it so can't hit
+ * the buggy scenario.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
+ trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
+ tb_phys, tb_len);
+ /* add this subframe's headers' length to the tx_cmd */
+ le16_add_cpu(&tx_cmd->len, tb_len);
+
+ /* prepare the start_hdr for the next subframe */
+ start_hdr = pos_hdr;
+
+ /* put the payload */
+ while (data_left) {
+ int ret;
+
+ tb_len = min_t(unsigned int, tso.size, data_left);
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset,
+ tb_len);
+ /* Not a real mapping error, use direct comparison */
+ if (unlikely(tb_phys == DMA_MAPPING_ERROR))
+ goto out_err;
+
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
+ tb_phys, tso.data,
+ tb_len, NULL, false);
+ if (ret)
+ goto out_err;
+
+ data_left -= tb_len;
+ data_offset += tb_len;
+ tso_build_data(skb, &tso, tb_len);
+ }
+ }
+
+ dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
+ DMA_TO_DEVICE);
+
+ /* re -add the WiFi header */
+ skb_push(skb, hdr_len);
+
+ return 0;
+
+out_err:
+#endif
+ return -EINVAL;
+}
+
+static struct
+iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int len;
+ void *tb1_addr;
+
+ tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
+
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ /* do not align A-MSDU to dword as the subframe header aligns it */
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
+
+ if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, out_meta,
+ len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd))
+ goto out_err;
+
+ /* building the A-MSDU might have changed this data, memcpy it now */
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+ return tfd;
+
+out_err:
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
+ struct sk_buff *skb,
+ struct iwl_tfh_tfd *tfd,
+ struct iwl_cmd_meta *out_meta)
+{
+ int i;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ dma_addr_t tb_phys;
+ unsigned int fragsz = skb_frag_size(frag);
+ int ret;
+
+ if (!fragsz)
+ continue;
+
+ tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
+ fragsz, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb_frag_address(frag),
+ fragsz, out_meta, true);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct
+iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta,
+ int hdr_len,
+ int tx_cmd_len,
+ bool pad)
+{
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ dma_addr_t tb_phys;
+ int len, tb1_len, tb2_len;
+ void *tb1_addr;
+ struct sk_buff *frag;
+
+ tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
+
+ /* The first TB points to bi-directional DMA data */
+ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
+
+ /*
+ * No need for _with_wa, the first TB allocation is aligned up
+ * to a 64-byte boundary and thus can't be at the end or cross
+ * a page boundary (much less a 2^32 boundary).
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
+
+ /*
+ * The second TB (tb1) points to the remainder of the TX command
+ * and the 802.11 header - dword aligned size
+ * (This calculation modifies the TX command, so do it before the
+ * setup of the first TB)
+ */
+ len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
+ IWL_FIRST_TB_SIZE;
+
+ if (pad)
+ tb1_len = ALIGN(len, 4);
+ else
+ tb1_len = len;
+
+ /* map the data for TB1 */
+ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
+ tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ goto out_err;
+ /*
+ * No need for _with_wa(), we ensure (via alignment) that the data
+ * here can never cross or end at a page boundary.
+ */
+ iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
+ trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
+ IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
+
+ /* set up TFD's third entry to point to remainder of skb's head */
+ tb2_len = skb_headlen(skb) - hdr_len;
+
+ if (tb2_len > 0) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
+ tb2_len, DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ skb->data + hdr_len, tb2_len,
+ NULL, true);
+ if (ret)
+ goto out_err;
+ }
+
+ if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
+ goto out_err;
+
+ skb_walk_frags(skb, frag) {
+ int ret;
+
+ tb_phys = dma_map_single(trans->dev, frag->data,
+ skb_headlen(frag), DMA_TO_DEVICE);
+ ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
+ frag->data,
+ skb_headlen(frag), NULL,
+ true);
+ if (ret)
+ goto out_err;
+ if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
+ goto out_err;
+ }
+
+ return tfd;
+
+out_err:
+ iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
+ return NULL;
+}
+
+static
+struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_device_tx_cmd *dev_cmd,
+ struct sk_buff *skb,
+ struct iwl_cmd_meta *out_meta)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
+ int len, hdr_len;
+ bool amsdu;
+
+ /* There must be data left over for TB1 or this code must be changed */
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
+ offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
+ IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
+ BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
+ offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
+ IWL_FIRST_TB_SIZE);
+
+ memset(tfd, 0, sizeof(*tfd));
+
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ len = sizeof(struct iwl_tx_cmd_gen2);
+ else
+ len = sizeof(struct iwl_tx_cmd_gen3);
+
+ amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
+ (*ieee80211_get_qos_ctl(hdr) &
+ IEEE80211_QOS_CTL_A_MSDU_PRESENT);
+
+ hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ /*
+ * Only build A-MSDUs here if doing so by GSO, otherwise it may be
+ * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
+ * built in the higher layers already.
+ */
+ if (amsdu && skb_shinfo(skb)->gso_size)
+ return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
+ out_meta, hdr_len, len);
+ return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
+ hdr_len, len, !amsdu);
+}
+
+int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
+{
+ unsigned int max;
+ unsigned int used;
+
+ /*
+ * To avoid ambiguity between empty and completely full queues, there
+ * should always be less than max_tfd_queue_size elements in the queue.
+ * If q->n_window is smaller than max_tfd_queue_size, there is no need
+ * to reserve any queue entries for this purpose.
+ */
+ if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
+ max = q->n_window;
+ else
+ max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
+
+ /*
+ * max_tfd_queue_size is a power of 2, so the following is equivalent to
+ * modulo by max_tfd_queue_size and is well defined.
+ */
+ used = (q->write_ptr - q->read_ptr) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+
+ if (WARN_ON(used > max))
+ return 0;
+
+ return max - used;
+}
+
+/*
+ * iwl_pcie_gen2_update_byte_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ u8 filled_tfd_size, num_fetch_chunks;
+ u16 len = byte_cnt;
+ __le16 bc_ent;
+
+ if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
+ return;
+
+ filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
+ num_tbs * sizeof(struct iwl_tfh_tb);
+ /*
+ * filled_tfd_size contains the number of filled bytes in the TFD.
+ * Dividing it by 64 will give the number of chunks to fetch
+ * to SRAM- 0 for one chunk, 1 for 2 and so on.
+ * If, for example, TFD contains only 3 TBs then 32 bytes
+ * of the TFD are used, and only one chunk of 64 bytes should
+ * be fetched
+ */
+ num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
+
+ /* Starting from AX210, the HW expects bytes */
+ WARN_ON(trans_pcie->txqs.bc_table_dword);
+ WARN_ON(len > 0x3FFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
+ scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
+ } else {
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
+
+ /* Before AX210, the HW expects DW */
+ WARN_ON(!trans_pcie->txqs.bc_table_dword);
+ len = DIV_ROUND_UP(len, 4);
+ WARN_ON(len > 0xFFF);
+ bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
+ scd_bc_tbl->tfd_offset[idx] = bc_ent;
+ }
+}
+
+static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd)
+{
+ return le16_to_cpu(tfd->num_tbs) & 0x1f;
+}
+
+int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
+ dma_addr_t addr, u16 len)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int idx = iwl_txq_gen2_get_num_tbs(tfd);
+ struct iwl_tfh_tb *tb;
+
+ /* Only WARN here so we know about the issue, but we mess up our
+ * unmap path because not every place currently checks for errors
+ * returned from this function - it can only return an error if
+ * there's no more space, and so when we know there is enough we
+ * don't always check ...
+ */
+ WARN(iwl_txq_crosses_4g_boundary(addr, len),
+ "possible DMA problem with iova:0x%llx, len:%d\n",
+ (unsigned long long)addr, len);
+
+ if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
+ return -EINVAL;
+ tb = &tfd->tbs[idx];
+
+ /* Each TFD can point to a maximum max_tbs Tx buffers */
+ if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Error can not send more than %d chunks\n",
+ trans_pcie->txqs.tfd.max_tbs);
+ return -EINVAL;
+ }
+
+ put_unaligned_le64(addr, &tb->addr);
+ tb->tb_len = cpu_to_le16(len);
+
+ tfd->num_tbs = cpu_to_le16(idx + 1);
+
+ return idx;
+}
+
+void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_tfh_tfd *tfd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_txq_gen2_get_num_tbs(tfd);
+
+ if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ return;
+ }
+
+ /* TB1 is mapped directly, the rest is the TSO page and SG list. */
+ if (meta->sg_offset)
+ num_tbs = 2;
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ le64_to_cpu(tfd->tbs[i].addr),
+ le16_to_cpu(tfd->tbs[i].tb_len),
+ DMA_TO_DEVICE);
+ }
+
+ iwl_txq_set_tfd_invalid_gen2(trans, tfd);
+}
+
+static void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&txq->lock);
+
+ if (!txq->entries)
+ return;
+
+ iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_txq_get_tfd(trans, txq, idx));
+
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+}
+
+/*
+ * iwl_txq_inc_wr_ptr - Send new write index to hardware
+ */
+static void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
+
+ /*
+ * if not in power-save mode, uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
+}
+
+int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_device_tx_cmd *dev_cmd, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_cmd_meta *out_meta;
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+ u16 cmd_len;
+ int idx;
+ void *tfd;
+
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return -EINVAL;
+
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
+ "TX on unused queue %d\n", txq_id))
+ return -EINVAL;
+
+ if (skb_is_nonlinear(skb) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
+ __skb_linearize(skb))
+ return -ENOMEM;
+
+ spin_lock(&txq->lock);
+
+ if (iwl_txq_space(trans, txq) < txq->high_mark) {
+ iwl_txq_stop(trans, txq);
+
+ /* don't put the packet on the ring, if there is no room */
+ if (unlikely(iwl_txq_space(trans, txq) < 3)) {
+ struct iwl_device_tx_cmd **dev_cmd_ptr;
+
+ dev_cmd_ptr = (void *)((u8 *)skb->cb +
+ trans_pcie->txqs.dev_cmd_offs);
+
+ *dev_cmd_ptr = dev_cmd;
+ __skb_queue_tail(&txq->overflow_q, skb);
+ spin_unlock(&txq->lock);
+ return 0;
+ }
+ }
+
+ idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+
+ /* Set up driver data for this TFD */
+ txq->entries[idx].skb = skb;
+ txq->entries[idx].cmd = dev_cmd;
+
+ dev_cmd->hdr.sequence =
+ cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
+ INDEX_TO_SEQ(idx)));
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_meta = &txq->entries[idx].meta;
+ memset(out_meta, 0, sizeof(*out_meta));
+
+ tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
+ if (!tfd) {
+ spin_unlock(&txq->lock);
+ return -1;
+ }
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen3->len);
+ } else {
+ struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
+ (void *)dev_cmd->payload;
+
+ cmd_len = le16_to_cpu(tx_cmd_gen2->len);
+ }
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
+ iwl_txq_gen2_get_num_tbs(tfd));
+
+ /* start timer if queue currently empty */
+ if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+
+ /* Tell device the write index *just past* this latest filled TFD */
+ txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
+ iwl_txq_inc_wr_ptr(trans, txq);
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually.
+ */
+ spin_unlock(&txq->lock);
+ return 0;
+}
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/*
+ * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+
+ spin_lock_bh(&txq->reclaim_lock);
+ spin_lock(&txq->lock);
+ while (txq->write_ptr != txq->read_ptr) {
+ IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
+ txq_id, txq->read_ptr);
+
+ if (txq_id != trans_pcie->txqs.cmd.q_id) {
+ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
+ struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta;
+ struct sk_buff *skb = txq->entries[idx].skb;
+
+ if (!WARN_ON_ONCE(!skb))
+ iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
+ }
+ iwl_txq_gen2_free_tfd(trans, txq);
+ txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
+ }
+
+ while (!skb_queue_empty(&txq->overflow_q)) {
+ struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
+
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ }
+
+ spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->reclaim_lock);
+
+ /* just in case - this queue may have been stopped */
+ iwl_trans_pcie_wake_queue(trans, txq);
+}
+
+static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
+ struct iwl_txq *txq)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct device *dev = trans->dev;
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->tfds) {
+ dma_free_coherent(dev,
+ trans_pcie->txqs.tfd.size * txq->n_window,
+ txq->tfds, txq->dma_addr);
+ dma_free_coherent(dev,
+ sizeof(*txq->first_tb_bufs) * txq->n_window,
+ txq->first_tb_bufs, txq->first_tb_dma);
+ }
+
+ kfree(txq->entries);
+ if (txq->bc_tbl.addr)
+ dma_pool_free(trans_pcie->txqs.bc_pool,
+ txq->bc_tbl.addr, txq->bc_tbl.dma);
+ kfree(txq);
+}
+
+/*
+ * iwl_pcie_txq_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq;
+ int i;
+
+ if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", txq_id))
+ return;
+
+ txq = trans_pcie->txqs.txq[txq_id];
+
+ if (WARN_ON(!txq))
+ return;
+
+ iwl_txq_gen2_unmap(trans, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ if (txq_id == trans_pcie->txqs.cmd.q_id)
+ for (i = 0; i < txq->n_window; i++) {
+ kfree_sensitive(txq->entries[i].cmd);
+ kfree_sensitive(txq->entries[i].free_buf);
+ }
+ del_timer_sync(&txq->stuck_timer);
+
+ iwl_txq_gen2_free_memory(trans, txq);
+
+ trans_pcie->txqs.txq[txq_id] = NULL;
+
+ clear_bit(txq_id, trans_pcie->txqs.queue_used);
+}
+
+static struct iwl_txq *
+iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t bc_tbl_size, bc_tbl_entries;
+ struct iwl_txq *txq;
+ int ret;
+
+ WARN_ON(!trans_pcie->txqs.bc_tbl_size);
+
+ bc_tbl_size = trans_pcie->txqs.bc_tbl_size;
+ bc_tbl_entries = bc_tbl_size / sizeof(u16);
+
+ if (WARN_ON(size > bc_tbl_entries))
+ return ERR_PTR(-EINVAL);
+
+ txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+ if (!txq)
+ return ERR_PTR(-ENOMEM);
+
+ txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->txqs.bc_pool, GFP_KERNEL,
+ &txq->bc_tbl.dma);
+ if (!txq->bc_tbl.addr) {
+ IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
+ kfree(txq);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = iwl_pcie_txq_alloc(trans, txq, size, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue alloc failed\n");
+ goto error;
+ }
+ ret = iwl_txq_init(trans, txq, size, false);
+ if (ret) {
+ IWL_ERR(trans, "Tx queue init failed\n");
+ goto error;
+ }
+
+ txq->wd_timeout = msecs_to_jiffies(timeout);
+
+ return txq;
+
+error:
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ERR_PTR(ret);
+}
+
+static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ struct iwl_host_cmd *hcmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tx_queue_cfg_rsp *rsp;
+ int ret, qid;
+ u32 wr_ptr;
+
+ if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
+ sizeof(*rsp))) {
+ ret = -EINVAL;
+ goto error_free_resp;
+ }
+
+ rsp = (void *)hcmd->resp_pkt->data;
+ qid = le16_to_cpu(rsp->queue_number);
+ wr_ptr = le16_to_cpu(rsp->write_pointer);
+
+ if (qid >= ARRAY_SIZE(trans_pcie->txqs.txq)) {
+ WARN_ONCE(1, "queue index %d unsupported", qid);
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ if (test_and_set_bit(qid, trans_pcie->txqs.queue_used)) {
+ WARN_ONCE(1, "queue %d already used", qid);
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ if (WARN_ONCE(trans_pcie->txqs.txq[qid],
+ "queue %d already allocated\n", qid)) {
+ ret = -EIO;
+ goto error_free_resp;
+ }
+
+ txq->id = qid;
+ trans_pcie->txqs.txq[qid] = txq;
+ wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
+
+ /* Place first TFD at index corresponding to start sequence number */
+ txq->read_ptr = wr_ptr;
+ txq->write_ptr = wr_ptr;
+
+ IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
+
+ iwl_free_resp(hcmd);
+ return qid;
+
+error_free_resp:
+ iwl_free_resp(hcmd);
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
+ u8 tid, int size, unsigned int timeout)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq;
+ union {
+ struct iwl_tx_queue_cfg_cmd old;
+ struct iwl_scd_queue_cfg_cmd new;
+ } cmd;
+ struct iwl_host_cmd hcmd = {
+ .flags = CMD_WANT_SKB,
+ };
+ int ret;
+
+ /* take the min with bytecount table entries allowed */
+ size = min_t(u32, size, trans_pcie->txqs.bc_tbl_size / sizeof(u16));
+ /* but must be power of 2 values for calculating read/write pointers */
+ size = rounddown_pow_of_two(size);
+
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
+ trans->hw_rev_step == SILICON_A_STEP) {
+ size = 4096;
+ txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
+ } else {
+ do {
+ txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
+ if (!IS_ERR(txq))
+ break;
+
+ IWL_DEBUG_TX_QUEUES(trans,
+ "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %ld\n",
+ size, sta_mask, tid,
+ PTR_ERR(txq));
+ size /= 2;
+ } while (size >= 16);
+ }
+
+ if (IS_ERR(txq))
+ return PTR_ERR(txq);
+
+ if (trans_pcie->txqs.queue_alloc_cmd_ver == 0) {
+ memset(&cmd.old, 0, sizeof(cmd.old));
+ cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
+ cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
+ cmd.old.tid = tid;
+
+ if (hweight32(sta_mask) != 1) {
+ ret = -EINVAL;
+ goto error;
+ }
+ cmd.old.sta_id = ffs(sta_mask) - 1;
+
+ hcmd.id = SCD_QUEUE_CFG;
+ hcmd.len[0] = sizeof(cmd.old);
+ hcmd.data[0] = &cmd.old;
+ } else if (trans_pcie->txqs.queue_alloc_cmd_ver == 3) {
+ memset(&cmd.new, 0, sizeof(cmd.new));
+ cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
+ cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
+ cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
+ cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
+ cmd.new.u.add.flags = cpu_to_le32(flags);
+ cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
+ cmd.new.u.add.tid = tid;
+
+ hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
+ hcmd.len[0] = sizeof(cmd.new);
+ hcmd.data[0] = &cmd.new;
+ } else {
+ ret = -EOPNOTSUPP;
+ goto error;
+ }
+
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret)
+ goto error;
+
+ return iwl_pcie_txq_alloc_response(trans, txq, &hcmd);
+
+error:
+ iwl_txq_gen2_free_memory(trans, txq);
+ return ret;
+}
+
+void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
+ "queue %d out of range", queue))
+ return;
+
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
+ if (!test_and_clear_bit(queue, trans_pcie->txqs.queue_used)) {
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", queue);
+ return;
+ }
+
+ iwl_txq_gen2_free(trans, queue);
+
+ IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
+}
+
+void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i;
+
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
+
+ /* Free all TX queues */
+ for (i = 0; i < ARRAY_SIZE(trans_pcie->txqs.txq); i++) {
+ if (!trans_pcie->txqs.txq[i])
+ continue;
+
+ iwl_txq_gen2_free(trans, i);
+ }
+}
+
+int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *queue;
+ int ret;
+
+ /* alloc and init the tx queue */
+ if (!trans_pcie->txqs.txq[txq_id]) {
+ queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+ if (!queue) {
+ IWL_ERR(trans, "Not enough memory for tx queue\n");
+ return -ENOMEM;
+ }
+ trans_pcie->txqs.txq[txq_id] = queue;
+ ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ } else {
+ queue = trans_pcie->txqs.txq[txq_id];
+ }
+
+ ret = iwl_txq_init(trans, queue, queue_size,
+ (txq_id == trans_pcie->txqs.cmd.q_id));
+ if (ret) {
+ IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
+ goto error;
+ }
+ trans_pcie->txqs.txq[txq_id]->id = txq_id;
+ set_bit(txq_id, trans_pcie->txqs.queue_used);
+
+ return 0;
+
+error:
+ iwl_txq_gen2_tx_free(trans);
+ return ret;
+}
/*************** HOST COMMAND QUEUE FUNCTIONS *****/
@@ -28,7 +1204,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
@@ -130,7 +1306,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
- /* re-initialize to NULL */
+ /* re-initialize, this also marks the SG list as unused */
memset(out_meta, 0, sizeof(*out_meta));
if (cmd->flags & CMD_WANT_SKB)
out_meta->source = cmd;
@@ -143,7 +1319,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -191,7 +1367,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
"Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
iwl_get_cmd_string(trans, cmd->id), group_id,
out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
+ cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index fa8eba47dc4c..9fe050f0ddc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1,16 +1,22 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2003-2014, 2018-2021, 2023 Intel Corporation
+ * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
+#include <linux/dmapool.h>
#include <linux/slab.h>
#include <linux/sched.h>
+#include <linux/tcp.h>
#include <net/ip6_checksum.h>
#include <net/tso.h>
+#include "fw/api/commands.h"
+#include "fw/api/datapath.h"
+#include "fw/api/debug.h"
+#include "iwl-fh.h"
#include "iwl-debug.h"
#include "iwl-csr.h"
#include "iwl-prph.h"
@@ -72,6 +78,7 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_txq *txq)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 reg = 0;
int txq_id = txq->id;
@@ -84,7 +91,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
* 3. there is a chance that the NIC is asleep
*/
if (!trans->trans_cfg->base_params->shadow_reg_enable &&
- txq_id != trans->txqs.cmd.q_id &&
+ txq_id != trans_pcie->txqs.cmd.q_id &&
test_bit(STATUS_TPOWER_PMI, &trans->status)) {
/*
* wake up nic if it's powered down ...
@@ -115,12 +122,13 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans->txqs.txq[i];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[i];
- if (!test_bit(i, trans->txqs.queue_used))
+ if (!test_bit(i, trans_pcie->txqs.queue_used))
continue;
spin_lock_bh(&txq->lock);
@@ -132,23 +140,43 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
}
}
+static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd,
+ u8 idx, dma_addr_t addr, u16 len)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ hi_n_len |= iwl_get_dma_hi_addr(addr);
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
+}
+
static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
dma_addr_t addr, u16 len, bool reset)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
void *tfd;
u32 num_tbs;
- tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr;
+ tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr;
if (reset)
- memset(tfd, 0, trans->txqs.tfd.size);
+ memset(tfd, 0, trans_pcie->txqs.tfd.size);
- num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
/* Each TFD can point to a maximum max_tbs Tx buffers */
- if (num_tbs >= trans->txqs.tfd.max_tbs) {
+ if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) {
IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans->txqs.tfd.max_tbs);
+ trans_pcie->txqs.tfd.max_tbs);
return -EINVAL;
}
@@ -156,7 +184,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
"Unaligned address = %llx\n", (unsigned long long)addr))
return -EINVAL;
- iwl_pcie_gen1_tfd_set_tb(trans, tfd, num_tbs, addr, len);
+ iwl_pcie_gen1_tfd_set_tb(tfd, num_tbs, addr, len);
return num_tbs;
}
@@ -181,36 +209,206 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
spin_unlock(&trans_pcie->reg_lock);
}
+static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
+ struct page *page)
+{
+ struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page));
+
+ /* Decrease internal use count and unmap/free page if needed */
+ if (refcount_dec_and_test(&info->use_count)) {
+ dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
+ DMA_TO_DEVICE);
+
+ __free_page(page);
+ }
+}
+
+void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct page **page_ptr;
+ struct page *next;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+ next = *page_ptr;
+ *page_ptr = NULL;
+
+ while (next) {
+ struct iwl_tso_page_info *info;
+ struct page *tmp = next;
+
+ info = IWL_TSO_PAGE_INFO(page_address(next));
+ next = info->next;
+
+ /* Unmap the scatter gather list that is on the last page */
+ if (!next && cmd_meta->sg_offset) {
+ struct sg_table *sgt;
+
+ sgt = (void *)((u8 *)page_address(tmp) +
+ cmd_meta->sg_offset);
+
+ dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
+ }
+
+ iwl_pcie_free_and_unmap_tso_page(trans, tmp);
+ }
+}
+
+static inline dma_addr_t
+iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
+{
+ struct iwl_tfd_tb *tb = &tfd->tbs[idx];
+ dma_addr_t addr;
+ dma_addr_t hi_len;
+
+ addr = get_unaligned_le32(&tb->lo);
+
+ if (sizeof(dma_addr_t) <= sizeof(u32))
+ return addr;
+
+ hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
+
+ /*
+ * shift by 16 twice to avoid warnings on 32-bit
+ * (where this code never runs anyway due to the
+ * if statement above)
+ */
+ return addr | ((hi_len << 16) << 16);
+}
+
+static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
+ struct iwl_tfd *tfd)
+{
+ tfd->num_tbs = 0;
+
+ iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma,
+ trans->invalid_tx_cmd.size);
+}
+
+static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
+ struct iwl_cmd_meta *meta,
+ struct iwl_txq *txq, int index)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int i, num_tbs;
+ struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
+
+ /* Sanity check on number of chunks */
+ num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
+
+ if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
+ IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* TB1 is mapped directly, the rest is the TSO page and SG list. */
+ if (meta->sg_offset)
+ num_tbs = 2;
+
+ /* first TB is never freed - it's the bidirectional DMA data */
+
+ for (i = 1; i < num_tbs; i++) {
+ if (meta->tbs & BIT(i))
+ dma_unmap_page(trans->dev,
+ iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
+ iwl_txq_gen1_tfd_tb_get_len(trans,
+ tfd, i),
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(trans->dev,
+ iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
+ iwl_txq_gen1_tfd_tb_get_len(trans,
+ tfd, i),
+ DMA_TO_DEVICE);
+ }
+
+ meta->tbs = 0;
+
+ iwl_txq_set_tfd_invalid_gen1(trans, tfd);
+}
+
+/**
+ * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @trans: transport private data
+ * @txq: tx queue
+ * @read_ptr: the TXQ read_ptr to free
+ *
+ * Does NOT advance any TFD circular buffer read/write indexes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
+ int read_ptr)
+{
+ /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
+ * idx is bounded by n_window
+ */
+ int idx = iwl_txq_get_cmd_index(txq, read_ptr);
+ struct sk_buff *skb;
+
+ lockdep_assert_held(&txq->reclaim_lock);
+
+ if (!txq->entries)
+ return;
+
+ /* We have only q->n_window txq->entries, but we use
+ * TFD_QUEUE_SIZE_MAX tfds
+ */
+ if (trans->trans_cfg->gen2)
+ iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
+ iwl_txq_get_tfd(trans, txq, read_ptr));
+ else
+ iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
+ txq, read_ptr);
+
+ /* free SKB */
+ skb = txq->entries[idx].skb;
+
+ /* Can be called from irqs-disabled context
+ * If skb is not NULL, it means that the whole queue is being
+ * freed and that the queue is not empty - free the skb
+ */
+ if (skb) {
+ iwl_op_mode_free_skb(trans->op_mode, skb);
+ txq->entries[idx].skb = NULL;
+ }
+}
+
/*
* iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
*/
static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
if (!txq) {
IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
return;
}
- spin_lock_bh(&txq->lock);
+ spin_lock_bh(&txq->reclaim_lock);
+ spin_lock(&txq->lock);
while (txq->write_ptr != txq->read_ptr) {
IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
txq_id, txq->read_ptr);
- if (txq_id != trans->txqs.cmd.q_id) {
+ if (txq_id != trans_pcie->txqs.cmd.q_id) {
struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
+ struct iwl_cmd_meta *cmd_meta =
+ &txq->entries[txq->read_ptr].meta;
if (WARN_ON_ONCE(!skb))
continue;
- iwl_txq_free_tso_page(trans, skb);
+ iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
}
- iwl_txq_free_tfd(trans, txq);
+ iwl_txq_free_tfd(trans, txq, txq->read_ptr);
txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr &&
- txq_id == trans->txqs.cmd.q_id)
+ txq_id == trans_pcie->txqs.cmd.q_id)
iwl_pcie_clear_cmd_in_flight(trans);
}
@@ -220,10 +418,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_op_mode_free_skb(trans->op_mode, skb);
}
- spin_unlock_bh(&txq->lock);
+ spin_unlock(&txq->lock);
+ spin_unlock_bh(&txq->reclaim_lock);
/* just in case - this queue may have been stopped */
- iwl_wake_queue(trans, txq);
+ iwl_trans_pcie_wake_queue(trans, txq);
}
/*
@@ -236,7 +435,8 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
*/
static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
struct device *dev = trans->dev;
int i;
@@ -246,7 +446,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
iwl_pcie_txq_unmap(trans, txq_id);
/* De-alloc array of command/tx buffers */
- if (txq_id == trans->txqs.cmd.q_id)
+ if (txq_id == trans_pcie->txqs.cmd.q_id)
for (i = 0; i < txq->n_window; i++) {
kfree_sensitive(txq->entries[i].cmd);
kfree_sensitive(txq->entries[i].free_buf);
@@ -255,7 +455,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
- trans->txqs.tfd.size *
+ trans_pcie->txqs.tfd.size *
trans->trans_cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
@@ -285,9 +485,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
/* make sure all queue are not stopped/used */
- memset(trans->txqs.queue_stopped, 0,
- sizeof(trans->txqs.queue_stopped));
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+ memset(trans_pcie->txqs.queue_stopped, 0,
+ sizeof(trans_pcie->txqs.queue_stopped));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
trans_pcie->scd_base_addr =
iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
@@ -301,7 +502,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
NULL, clear_dwords);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
- trans->txqs.scd_bc_tbls.dma >> 10);
+ trans_pcie->txqs.scd_bc_tbls.dma >> 10);
/* The chain extension of the SCD doesn't work well. This feature is
* enabled by default by the HW, so we need to disable it manually.
@@ -309,9 +510,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
if (trans->trans_cfg->base_params->scd_chain_ext_wa)
iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
- iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id,
- trans->txqs.cmd.fifo,
- trans->txqs.cmd.wdg_timeout);
+ iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id,
+ trans_pcie->txqs.cmd.fifo,
+ trans_pcie->txqs.cmd.wdg_timeout);
/* Activate all Tx DMA/FIFO channels */
iwl_scd_activate_fifos(trans);
@@ -347,7 +548,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
if (trans->trans_cfg->gen2)
iwl_write_direct64(trans,
FH_MEM_CBBC_QUEUE(trans, txq_id),
@@ -422,9 +623,10 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
* queues. This happens when we have an rfkill interrupt.
* Since we stop Tx altogether - mark the queues as stopped.
*/
- memset(trans->txqs.queue_stopped, 0,
- sizeof(trans->txqs.queue_stopped));
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+ memset(trans_pcie->txqs.queue_stopped, 0,
+ sizeof(trans_pcie->txqs.queue_stopped));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
/* This can happen: start_hw, stop_device */
if (!trans_pcie->txq_memory)
@@ -448,7 +650,8 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
int txq_id;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
+ memset(trans_pcie->txqs.queue_used, 0,
+ sizeof(trans_pcie->txqs.queue_used));
/* Tx queues */
if (trans_pcie->txq_memory) {
@@ -456,7 +659,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
iwl_pcie_txq_free(trans, txq_id);
- trans->txqs.txq[txq_id] = NULL;
+ trans_pcie->txqs.txq[txq_id] = NULL;
}
}
@@ -465,7 +668,135 @@ void iwl_pcie_tx_free(struct iwl_trans *trans)
iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
- iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls);
+ iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls);
+}
+
+void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
+{
+ u32 txq_id = txq->id;
+ u32 status;
+ bool active;
+ u8 fifo;
+
+ if (trans->trans_cfg->gen2) {
+ IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
+ txq->read_ptr, txq->write_ptr);
+ /* TODO: access new SCD registers and dump them */
+ return;
+ }
+
+ status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
+ fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
+ active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
+
+ IWL_ERR(trans,
+ "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
+ txq_id, active ? "" : "in", fifo,
+ jiffies_to_msecs(txq->wd_timeout),
+ txq->read_ptr, txq->write_ptr,
+ iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
+ (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
+ iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
+}
+
+static void iwl_txq_stuck_timer(struct timer_list *t)
+{
+ struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
+ struct iwl_trans *trans = txq->trans;
+
+ spin_lock(&txq->lock);
+ /* check if triggered erroneously */
+ if (txq->read_ptr == txq->write_ptr) {
+ spin_unlock(&txq->lock);
+ return;
+ }
+ spin_unlock(&txq->lock);
+
+ iwl_txq_log_scd_error(trans, txq);
+
+ iwl_force_nmi(trans);
+}
+
+int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ size_t num_entries = trans->trans_cfg->gen2 ?
+ slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
+ size_t tfd_sz;
+ size_t tb0_buf_sz;
+ int i;
+
+ if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
+ return -EINVAL;
+
+ if (WARN_ON(txq->entries || txq->tfds))
+ return -EINVAL;
+
+ tfd_sz = trans_pcie->txqs.tfd.size * num_entries;
+
+ timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
+ txq->trans = trans;
+
+ txq->n_window = slots_num;
+
+ txq->entries = kcalloc(slots_num,
+ sizeof(struct iwl_pcie_txq_entry),
+ GFP_KERNEL);
+
+ if (!txq->entries)
+ goto error;
+
+ if (cmd_queue)
+ for (i = 0; i < slots_num; i++) {
+ txq->entries[i].cmd =
+ kmalloc(sizeof(struct iwl_device_cmd),
+ GFP_KERNEL);
+ if (!txq->entries[i].cmd)
+ goto error;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device
+ */
+ txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
+ &txq->dma_addr, GFP_KERNEL);
+ if (!txq->tfds)
+ goto error;
+
+ BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
+
+ tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
+
+ txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
+ &txq->first_tb_dma,
+ GFP_KERNEL);
+ if (!txq->first_tb_bufs)
+ goto err_free_tfds;
+
+ for (i = 0; i < num_entries; i++) {
+ void *tfd = iwl_txq_get_tfd(trans, txq, i);
+
+ if (trans->trans_cfg->gen2)
+ iwl_txq_set_tfd_invalid_gen2(trans, tfd);
+ else
+ iwl_txq_set_tfd_invalid_gen1(trans, tfd);
+ }
+
+ return 0;
+err_free_tfds:
+ dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
+ txq->tfds = NULL;
+error:
+ if (txq->entries && cmd_queue)
+ for (i = 0; i < slots_num; i++)
+ kfree(txq->entries[i].cmd);
+ kfree(txq->entries);
+ txq->entries = NULL;
+
+ return -ENOMEM;
}
/*
@@ -491,7 +822,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
goto error;
}
- ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls,
+ ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls,
bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
@@ -517,7 +848,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
+ bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -525,14 +856,14 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_ba_txq_size);
- trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
- ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num,
- cmd_queue);
+ trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
+ ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],
+ slots_num, cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
goto error;
}
- trans->txqs.txq[txq_id]->id = txq_id;
+ trans_pcie->txqs.txq[txq_id]->id = txq_id;
}
return 0;
@@ -543,6 +874,69 @@ error:
return ret;
}
+/*
+ * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
+ */
+static int iwl_queue_init(struct iwl_txq *q, int slots_num)
+{
+ q->n_window = slots_num;
+
+ /* slots_num must be power-of-two size, otherwise
+ * iwl_txq_get_cmd_index is broken.
+ */
+ if (WARN_ON(!is_power_of_2(slots_num)))
+ return -EINVAL;
+
+ q->low_mark = q->n_window / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_window / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = 0;
+ q->read_ptr = 0;
+
+ return 0;
+}
+
+int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
+ int slots_num, bool cmd_queue)
+{
+ u32 tfd_queue_max_size =
+ trans->trans_cfg->base_params->max_tfd_queue_size;
+ int ret;
+
+ txq->need_update = false;
+
+ /* max_tfd_queue_size must be power-of-two size, otherwise
+ * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken.
+ */
+ if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
+ "Max tfd queue size must be a power of two, but is %d",
+ tfd_queue_max_size))
+ return -EINVAL;
+
+ /* Initialize queue's high/low-water marks, and head/tail indexes */
+ ret = iwl_queue_init(txq, slots_num);
+ if (ret)
+ return ret;
+
+ spin_lock_init(&txq->lock);
+ spin_lock_init(&txq->reclaim_lock);
+
+ if (cmd_queue) {
+ static struct lock_class_key iwl_txq_cmd_queue_lock_class;
+
+ lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
+ }
+
+ __skb_queue_head_init(&txq->overflow_q);
+
+ return 0;
+}
+
int iwl_pcie_tx_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -571,7 +965,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
txq_id++) {
- bool cmd_queue = (txq_id == trans->txqs.cmd.q_id);
+ bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
if (cmd_queue)
slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
@@ -579,7 +973,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
else
slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_ba_txq_size);
- ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num,
+ ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,
cmd_queue);
if (ret) {
IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
@@ -593,7 +987,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
* Circular buffer (TFD queue in DRAM) physical base address
*/
iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
- trans->txqs.txq[txq_id]->dma_addr >> 8);
+ trans_pcie->txqs.txq[txq_id]->dma_addr >> 8);
}
iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
@@ -641,6 +1035,42 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
return 0;
}
+static void iwl_txq_progress(struct iwl_txq *txq)
+{
+ lockdep_assert_held(&txq->lock);
+
+ if (!txq->wd_timeout)
+ return;
+
+ /*
+ * station is asleep and we send data - that must
+ * be uAPSD or PS-Poll. Don't rearm the timer.
+ */
+ if (txq->frozen)
+ return;
+
+ /*
+ * if empty delete timer, otherwise move timer forward
+ * since we're making progress on this queue
+ */
+ if (txq->read_ptr == txq->write_ptr)
+ del_timer(&txq->stuck_timer);
+ else
+ mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
+}
+
+static inline bool iwl_txq_used(const struct iwl_txq *q, int i,
+ int read_ptr, int write_ptr)
+{
+ int index = iwl_txq_get_cmd_index(q, i);
+ int r = iwl_txq_get_cmd_index(q, read_ptr);
+ int w = iwl_txq_get_cmd_index(q, write_ptr);
+
+ return w >= r ?
+ (index >= r && index < w) :
+ !(index < r && index >= w);
+}
+
/*
* iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
*
@@ -650,7 +1080,8 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
*/
static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
int nfreed = 0;
u16 r;
@@ -660,8 +1091,8 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
- (!iwl_txq_used(txq, idx))) {
- WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used),
+ (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {
+ WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx,
trans->trans_cfg->base_params->max_tfd_queue_size,
@@ -720,11 +1151,11 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
unsigned int wdg_timeout)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
int fifo = -1;
bool scd_bug = false;
- if (test_and_set_bit(txq_id, trans->txqs.queue_used))
+ if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
@@ -733,7 +1164,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
fifo = cfg->fifo;
/* Disable the scheduler prior configuring the cmd queue */
- if (txq_id == trans->txqs.cmd.q_id &&
+ if (txq_id == trans_pcie->txqs.cmd.q_id &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, 0);
@@ -741,7 +1172,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
iwl_scd_txq_set_inactive(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD */
- if (txq_id != trans->txqs.cmd.q_id)
+ if (txq_id != trans_pcie->txqs.cmd.q_id)
iwl_scd_txq_set_chain(trans, txq_id);
if (cfg->aggregate) {
@@ -811,7 +1242,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
SCD_QUEUE_STTS_REG_MSK);
/* enable the scheduler for this queue (only) */
- if (txq_id == trans->txqs.cmd.q_id &&
+ if (txq_id == trans_pcie->txqs.cmd.q_id &&
trans_pcie->scd_set_active)
iwl_scd_enable_set_active(trans, BIT(txq_id));
@@ -830,7 +1261,8 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode)
{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
txq->ampdu = !shared_mode;
}
@@ -843,8 +1275,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
- trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
- trans->txqs.txq[txq_id]->frozen = false;
+ trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
+ trans_pcie->txqs.txq[txq_id]->frozen = false;
/*
* Upon HW Rfkill - we stop the device, and then stop the queues
@@ -852,7 +1284,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
* allow the op_mode to call txq_disable after it already called
* stop_device.
*/
- if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) {
+ if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) {
WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
"queue %d not used", txq_id);
return;
@@ -866,7 +1298,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
}
iwl_pcie_txq_unmap(trans, txq_id);
- trans->txqs.txq[txq_id]->ampdu = false;
+ trans_pcie->txqs.txq[txq_id]->ampdu = false;
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
}
@@ -875,12 +1307,13 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i;
for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
- struct iwl_txq *txq = trans->txqs.txq[i];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[i];
- if (i == trans->txqs.cmd.q_id)
+ if (i == trans_pcie->txqs.cmd.q_id)
continue;
/* we skip the command queue (obviously) so it's OK to nest */
@@ -912,7 +1345,8 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
void *dup_buf = NULL;
@@ -1024,7 +1458,8 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
out_cmd = txq->entries[idx].cmd;
out_meta = &txq->entries[idx].meta;
- memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+ /* re-initialize, this also marks the SG list as unused */
+ memset(out_meta, 0, sizeof(*out_meta));
if (cmd->flags & CMD_WANT_SKB)
out_meta->source = cmd;
@@ -1038,7 +1473,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
sizeof(struct iwl_cmd_header_wide));
out_cmd->hdr_wide.reserved = 0;
out_cmd->hdr_wide.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
cmd_pos = sizeof(struct iwl_cmd_header_wide);
@@ -1046,7 +1481,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
} else {
out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
out_cmd->hdr.sequence =
- cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) |
+ cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
INDEX_TO_SEQ(txq->write_ptr));
out_cmd->hdr.group_id = 0;
@@ -1097,7 +1532,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id),
group_id, out_cmd->hdr.cmd,
le16_to_cpu(out_cmd->hdr.sequence),
- cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id);
+ cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
/* start the TFD with the minimum copy bytes */
tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
@@ -1196,14 +1631,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
struct iwl_device_cmd *cmd;
struct iwl_cmd_meta *meta;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
/* If a Tx command is being handled and it isn't in the actual
* command queue then there a command routing bug has been introduced
* in the queue management code. */
- if (WARN(txq_id != trans->txqs.cmd.q_id,
+ if (WARN(txq_id != trans_pcie->txqs.cmd.q_id,
"wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr,
+ txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr,
txq->write_ptr)) {
iwl_print_hex_error(trans, pkt, 32);
return;
@@ -1306,19 +1741,180 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
}
#ifdef CONFIG_INET
+static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
+ size_t len, struct sk_buff *skb)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);
+ struct iwl_tso_page_info *info;
+ struct page **page_ptr;
+ dma_addr_t phys;
+ void *ret;
+
+ page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
+
+ if (WARN_ON(*page_ptr))
+ return NULL;
+
+ if (!p->page)
+ goto alloc;
+
+ /*
+ * Check if there's enough room on this page
+ *
+ * Note that we put a page chaining pointer *last* in the
+ * page - we need it somewhere, and if it's there then we
+ * avoid DMA mapping the last bits of the page which may
+ * trigger the 32-bit boundary hardware bug.
+ *
+ * (see also get_workaround_page() in tx-gen2.c)
+ */
+ if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) {
+ info = IWL_TSO_PAGE_INFO(page_address(p->page));
+ goto out;
+ }
+
+ /* We don't have enough room on this page, get a new one. */
+ iwl_pcie_free_and_unmap_tso_page(trans, p->page);
+
+alloc:
+ p->page = alloc_page(GFP_ATOMIC);
+ if (!p->page)
+ return NULL;
+ p->pos = page_address(p->page);
+
+ info = IWL_TSO_PAGE_INFO(page_address(p->page));
+
+ /* set the chaining pointer to NULL */
+ info->next = NULL;
+
+ /* Create a DMA mapping for the page */
+ phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
+ DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ if (unlikely(dma_mapping_error(trans->dev, phys))) {
+ __free_page(p->page);
+ p->page = NULL;
+
+ return NULL;
+ }
+
+ /* Store physical address and set use count */
+ info->dma_addr = phys;
+ refcount_set(&info->use_count, 1);
+out:
+ *page_ptr = p->page;
+ /* Return an internal reference for the caller */
+ refcount_inc(&info->use_count);
+ ret = p->pos;
+ p->pos += len;
+
+ return ret;
+}
+
+/**
+ * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
+ * @sgt: scatter gather table
+ * @offset: Offset into the mapped memory (i.e. SKB payload data)
+ * @len: Length of the area
+ *
+ * Find the DMA address that corresponds to the SKB payload data at the
+ * position given by @offset.
+ *
+ * Returns: Address for TB entry
+ */
+dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
+ unsigned int len)
+{
+ struct scatterlist *sg;
+ unsigned int sg_offset = 0;
+ int i;
+
+ /*
+ * Search the mapped DMA areas in the SG for the area that contains the
+ * data at offset with the given length.
+ */
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ if (offset >= sg_offset &&
+ offset + len <= sg_offset + sg_dma_len(sg))
+ return sg_dma_address(sg) + offset - sg_offset;
+
+ sg_offset += sg_dma_len(sg);
+ }
+
+ WARN_ON_ONCE(1);
+
+ return DMA_MAPPING_ERROR;
+}
+
+/**
+ * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending
+ * @trans: transport private data
+ * @skb: the SKB to map
+ * @cmd_meta: command meta to store the scatter list information for unmapping
+ * @hdr: output argument for TSO headers
+ * @hdr_room: requested length for TSO headers
+ *
+ * Allocate space for a scatter gather list and TSO headers and map the SKB
+ * using the scatter gather list. The SKB is unmapped again when the page is
+ * free'ed again at the end of the operation.
+ *
+ * Returns: newly allocated and mapped scatter gather table with list
+ */
+struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
+ struct iwl_cmd_meta *cmd_meta,
+ u8 **hdr, unsigned int hdr_room)
+{
+ struct sg_table *sgt;
+
+ if (WARN_ON_ONCE(skb_has_frag_list(skb)))
+ return NULL;
+
+ *hdr = iwl_pcie_get_page_hdr(trans,
+ hdr_room + __alignof__(struct sg_table) +
+ sizeof(struct sg_table) +
+ (skb_shinfo(skb)->nr_frags + 1) *
+ sizeof(struct scatterlist),
+ skb);
+ if (!*hdr)
+ return NULL;
+
+ sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
+ sgt->sgl = (void *)(sgt + 1);
+
+ sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
+
+ /* Only map the data, not the header (it is copied to the TSO page) */
+ sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
+ skb->data_len);
+ if (WARN_ON_ONCE(sgt->orig_nents <= 0))
+ return NULL;
+
+ /* And map the entire SKB */
+ if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
+ return NULL;
+
+ /* Store non-zero (i.e. valid) offset for unmapping */
+ cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK;
+
+ return sgt;
+}
+
static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_txq *txq, u8 hdr_len,
struct iwl_cmd_meta *out_meta,
struct iwl_device_tx_cmd *dev_cmd,
u16 tb1_len)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
unsigned int mss = skb_shinfo(skb)->gso_size;
+ unsigned int data_offset = 0;
u16 length, iv_len, amsdu_pad;
- u8 *start_hdr;
- struct iwl_tso_hdr_page *hdr_page;
+ dma_addr_t start_hdr_phys;
+ u8 *start_hdr, *pos_hdr;
+ struct sg_table *sgt;
struct tso_t tso;
/* if the packet is protected, then it must be CCMP or GCMP */
@@ -1328,7 +1924,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_txq_get_tfd(trans, txq, txq->write_ptr),
- trans->txqs.tfd.size,
+ trans_pcie->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
ip_hdrlen = skb_network_header_len(skb);
@@ -1341,13 +1937,14 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
/* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room, skb);
- if (!hdr_page)
+ sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
+ if (!sgt)
return -ENOMEM;
- start_hdr = hdr_page->pos;
- memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
- hdr_page->pos += iv_len;
+ start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
+ pos_hdr = start_hdr;
+ memcpy(pos_hdr, skb->data + hdr_len, iv_len);
+ pos_hdr += iv_len;
/*
* Pull the ieee80211 header + IV to be able to use TSO core,
@@ -1370,45 +1967,43 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
min_t(unsigned int, mss, total_len);
unsigned int hdr_tb_len;
dma_addr_t hdr_tb_phys;
- u8 *subf_hdrs_start = hdr_page->pos;
+ u8 *subf_hdrs_start = pos_hdr;
total_len -= data_left;
- memset(hdr_page->pos, 0, amsdu_pad);
- hdr_page->pos += amsdu_pad;
+ memset(pos_hdr, 0, amsdu_pad);
+ pos_hdr += amsdu_pad;
amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
data_left)) & 0x3;
- ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
- hdr_page->pos += ETH_ALEN;
- ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
- hdr_page->pos += ETH_ALEN;
+ ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
+ pos_hdr += ETH_ALEN;
+ ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
+ pos_hdr += ETH_ALEN;
length = snap_ip_tcp_hdrlen + data_left;
- *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
- hdr_page->pos += sizeof(length);
+ *((__be16 *)pos_hdr) = cpu_to_be16(length);
+ pos_hdr += sizeof(length);
/*
* This will copy the SNAP as well which will be considered
* as MAC header.
*/
- tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
+ tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
- hdr_page->pos += snap_ip_tcp_hdrlen;
+ pos_hdr += snap_ip_tcp_hdrlen;
+
+ hdr_tb_len = pos_hdr - start_hdr;
+ hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
- hdr_tb_len = hdr_page->pos - start_hdr;
- hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
- hdr_tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys)))
- return -EINVAL;
iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
hdr_tb_len, false);
trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
hdr_tb_phys, hdr_tb_len);
/* add this subframe's headers' length to the tx_cmd */
- le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
+ le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start);
/* prepare the start_hdr for the next subframe */
- start_hdr = hdr_page->pos;
+ start_hdr = pos_hdr;
/* put the payload */
while (data_left) {
@@ -1416,9 +2011,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
data_left);
dma_addr_t tb_phys;
- tb_phys = dma_map_single(trans->dev, tso.data,
- size, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
+ tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size);
+ /* Not a real mapping error, use direct comparison */
+ if (unlikely(tb_phys == DMA_MAPPING_ERROR))
return -EINVAL;
iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
@@ -1427,10 +2022,14 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
tb_phys, size);
data_left -= size;
+ data_offset += size;
tso_build_data(skb, &tso, size);
}
}
+ dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
+ DMA_TO_DEVICE);
+
/* re -add the WiFi header and IV */
skb_push(skb, hdr_len + iv_len);
@@ -1450,9 +2049,61 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
}
#endif /* CONFIG_INET */
+#define IWL_TX_CRC_SIZE 4
+#define IWL_TX_DELIMITER_SIZE 4
+
+/*
+ * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq, u16 byte_cnt,
+ int num_tbs)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl;
+ int write_ptr = txq->write_ptr;
+ int txq_id = txq->id;
+ u8 sec_ctl = 0;
+ u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+ u8 sta_id = tx_cmd->sta_id;
+
+ scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+
+ sec_ctl = tx_cmd->sec_ctl;
+
+ switch (sec_ctl & TX_CMD_SEC_MSK) {
+ case TX_CMD_SEC_CCM:
+ len += IEEE80211_CCMP_MIC_LEN;
+ break;
+ case TX_CMD_SEC_TKIP:
+ len += IEEE80211_TKIP_ICV_LEN;
+ break;
+ case TX_CMD_SEC_WEP:
+ len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
+ break;
+ }
+ if (trans_pcie->txqs.bc_table_dword)
+ len = DIV_ROUND_UP(len, 4);
+
+ if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
+ return;
+
+ bc_ent = cpu_to_le16(len | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+ bc_ent;
+}
+
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd *dev_cmd, int txq_id)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr;
struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
@@ -1467,14 +2118,14 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
u16 wifi_seq;
bool amsdu;
- txq = trans->txqs.txq[txq_id];
+ txq = trans_pcie->txqs.txq[txq_id];
- if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
"TX on unused queue %d\n", txq_id))
return -EINVAL;
if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
+ skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
__skb_linearize(skb))
return -ENOMEM;
@@ -1495,7 +2146,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_tx_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans->txqs.dev_cmd_offs);
+ trans_pcie->txqs.dev_cmd_offs);
*dev_cmd_ptr = dev_cmd;
__skb_queue_tail(&txq->overflow_q, skb);
@@ -1533,7 +2184,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
/* Set up first empty entry in queue's array of Tx/cmd buffers */
out_meta = &txq->entries[txq->write_ptr].meta;
- out_meta->flags = 0;
+ memset(out_meta, 0, sizeof(*out_meta));
/*
* The second TB (tb1) points to the remainder of the TX command
@@ -1578,7 +2229,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
trace_iwlwifi_dev_tx(trans->dev, skb,
iwl_txq_get_tfd(trans, txq, txq->write_ptr),
- trans->txqs.tfd.size,
+ trans_pcie->txqs.tfd.size,
&dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
hdr_len);
@@ -1613,8 +2264,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
/* Set up entry for this TFD in Tx byte-count array */
iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
- iwl_txq_gen1_tfd_get_num_tbs(trans,
- tfd));
+ iwl_txq_gen1_tfd_get_num_tbs(tfd));
wait_write_ptr = ieee80211_has_morefrags(fc);
@@ -1649,3 +2299,379 @@ out_err:
spin_unlock(&txq->lock);
return -1;
}
+
+static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
+ struct iwl_txq *txq,
+ int read_ptr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+ int txq_id = txq->id;
+ u8 sta_id = 0;
+ __le16 bc_ent;
+ struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
+ struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
+
+ WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ if (txq_id != trans_pcie->txqs.cmd.q_id)
+ sta_id = tx_cmd->sta_id;
+
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
+
+ scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
+
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
+ bc_ent;
+}
+
+/* Frees buffers until index _not_ inclusive */
+void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
+ struct sk_buff_head *skbs, bool is_flush)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+ int tfd_num, read_ptr, last_to_free;
+ int txq_read_ptr, txq_write_ptr;
+
+ /* This function is not meant to release cmd queue*/
+ if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id))
+ return;
+
+ if (WARN_ON(!txq))
+ return;
+
+ tfd_num = iwl_txq_get_cmd_index(txq, ssn);
+
+ spin_lock_bh(&txq->reclaim_lock);
+
+ spin_lock(&txq->lock);
+ txq_read_ptr = txq->read_ptr;
+ txq_write_ptr = txq->write_ptr;
+ spin_unlock(&txq->lock);
+
+ read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr);
+
+ if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) {
+ IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
+ txq_id, ssn);
+ goto out;
+ }
+
+ if (read_ptr == tfd_num)
+ goto out;
+
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
+ txq_id, read_ptr, txq_read_ptr, tfd_num, ssn);
+
+ /* Since we free until index _not_ inclusive, the one before index is
+ * the last we will free. This one must be used
+ */
+ last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
+
+ if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) {
+ IWL_ERR(trans,
+ "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
+ __func__, txq_id, last_to_free,
+ trans->trans_cfg->base_params->max_tfd_queue_size,
+ txq_write_ptr, txq_read_ptr);
+
+ iwl_op_mode_time_point(trans->op_mode,
+ IWL_FW_INI_TIME_POINT_FAKE_TX,
+ NULL);
+ goto out;
+ }
+
+ if (WARN_ON(!skb_queue_empty(skbs)))
+ goto out;
+
+ for (;
+ read_ptr != tfd_num;
+ txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr),
+ read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) {
+ struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta;
+ struct sk_buff *skb = txq->entries[read_ptr].skb;
+
+ if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
+ read_ptr, txq_read_ptr, txq_id))
+ continue;
+
+ iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
+
+ __skb_queue_tail(skbs, skb);
+
+ txq->entries[read_ptr].skb = NULL;
+
+ if (!trans->trans_cfg->gen2)
+ iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,
+ txq_read_ptr);
+
+ iwl_txq_free_tfd(trans, txq, txq_read_ptr);
+ }
+
+ spin_lock(&txq->lock);
+ txq->read_ptr = txq_read_ptr;
+
+ iwl_txq_progress(txq);
+
+ if (iwl_txq_space(trans, txq) > txq->low_mark &&
+ test_bit(txq_id, trans_pcie->txqs.queue_stopped)) {
+ struct sk_buff_head overflow_skbs;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&overflow_skbs);
+ skb_queue_splice_init(&txq->overflow_q,
+ is_flush ? skbs : &overflow_skbs);
+
+ /*
+ * We are going to transmit from the overflow queue.
+ * Remember this state so that wait_for_txq_empty will know we
+ * are adding more packets to the TFD queue. It cannot rely on
+ * the state of &txq->overflow_q, as we just emptied it, but
+ * haven't TXed the content yet.
+ */
+ txq->overflow_tx = true;
+
+ /*
+ * This is tricky: we are in reclaim path and are holding
+ * reclaim_lock, so noone will try to access the txq data
+ * from that path. We stopped tx, so we can't have tx as well.
+ * Bottom line, we can unlock and re-lock later.
+ */
+ spin_unlock(&txq->lock);
+
+ while ((skb = __skb_dequeue(&overflow_skbs))) {
+ struct iwl_device_tx_cmd *dev_cmd_ptr;
+
+ dev_cmd_ptr = *(void **)((u8 *)skb->cb +
+ trans_pcie->txqs.dev_cmd_offs);
+
+ /*
+ * Note that we can very well be overflowing again.
+ * In that case, iwl_txq_space will be small again
+ * and we won't wake mac80211's queue.
+ */
+ iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
+ }
+
+ if (iwl_txq_space(trans, txq) > txq->low_mark)
+ iwl_trans_pcie_wake_queue(trans, txq);
+
+ spin_lock(&txq->lock);
+ txq->overflow_tx = false;
+ }
+
+ spin_unlock(&txq->lock);
+out:
+ spin_unlock_bh(&txq->reclaim_lock);
+}
+
+/* Set wr_ptr of specific device and txq */
+void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
+
+ spin_lock_bh(&txq->lock);
+
+ txq->write_ptr = ptr;
+ txq->read_ptr = txq->write_ptr;
+
+ spin_unlock_bh(&txq->lock);
+}
+
+void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
+ unsigned long txqs, bool freeze)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ int queue;
+
+ for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
+ struct iwl_txq *txq = trans_pcie->txqs.txq[queue];
+ unsigned long now;
+
+ spin_lock_bh(&txq->lock);
+
+ now = jiffies;
+
+ if (txq->frozen == freeze)
+ goto next_queue;
+
+ IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
+ freeze ? "Freezing" : "Waking", queue);
+
+ txq->frozen = freeze;
+
+ if (txq->read_ptr == txq->write_ptr)
+ goto next_queue;
+
+ if (freeze) {
+ if (unlikely(time_after(now,
+ txq->stuck_timer.expires))) {
+ /*
+ * The timer should have fired, maybe it is
+ * spinning right now on the lock.
+ */
+ goto next_queue;
+ }
+ /* remember how long until the timer fires */
+ txq->frozen_expiry_remainder =
+ txq->stuck_timer.expires - now;
+ del_timer(&txq->stuck_timer);
+ goto next_queue;
+ }
+
+ /*
+ * Wake a non-empty queue -> arm timer with the
+ * remainder before it froze
+ */
+ mod_timer(&txq->stuck_timer,
+ now + txq->frozen_expiry_remainder);
+
+next_queue:
+ spin_unlock_bh(&txq->lock);
+ }
+}
+
+#define HOST_COMPLETE_TIMEOUT (2 * HZ)
+
+static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
+ struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
+ int cmd_idx;
+ int ret;
+
+ IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
+
+ if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ "Command %s: a command is already active!\n", cmd_str))
+ return -EIO;
+
+ IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
+
+ if (trans->trans_cfg->gen2)
+ cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+ else
+ cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
+
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ cmd_str, ret);
+ return ret;
+ }
+
+ ret = wait_event_timeout(trans->wait_command_queue,
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
+ cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
+ txq->read_ptr, txq->write_ptr);
+
+ clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
+ IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
+ cmd_str);
+ ret = -ETIMEDOUT;
+
+ iwl_trans_sync_nmi(trans);
+ goto cancel;
+ }
+
+ if (test_bit(STATUS_FW_ERROR, &trans->status)) {
+ if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
+ &trans->status)) {
+ IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
+ dump_stack();
+ }
+ ret = -EIO;
+ goto cancel;
+ }
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
+ ret = -ERFKILL;
+ goto cancel;
+ }
+
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
+ IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
+ ret = -EIO;
+ goto cancel;
+ }
+
+ return 0;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
+ }
+
+ if (cmd->resp_pkt) {
+ iwl_free_resp(cmd);
+ cmd->resp_pkt = NULL;
+ }
+
+ return ret;
+}
+
+int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
+ struct iwl_host_cmd *cmd)
+{
+ /* Make sure the NIC is still alive in the bus */
+ if (test_bit(STATUS_TRANS_DEAD, &trans->status))
+ return -ENODEV;
+
+ if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
+ test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
+ IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
+ cmd->id);
+ return -ERFKILL;
+ }
+
+ if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
+ !(cmd->flags & CMD_SEND_IN_D3))) {
+ IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
+ return -EHOSTDOWN;
+ }
+
+ if (cmd->flags & CMD_ASYNC) {
+ int ret;
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ if (WARN_ON(cmd->flags & CMD_WANT_SKB))
+ return -EINVAL;
+
+ if (trans->trans_cfg->gen2)
+ ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
+ else
+ ret = iwl_pcie_enqueue_hcmd(trans, cmd);
+
+ if (ret < 0) {
+ IWL_ERR(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
+ iwl_get_cmd_string(trans, cmd->id), ret);
+ return ret;
+ }
+ return 0;
+ }
+
+ return iwl_trans_pcie_send_hcmd_sync(trans, cmd);
+}
+IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd);
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c
deleted file mode 100644
index 6229c785c845..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c
+++ /dev/null
@@ -1,1900 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
-/*
- * Copyright (C) 2020-2024 Intel Corporation
- */
-#include <net/tso.h>
-#include <linux/tcp.h>
-
-#include "iwl-debug.h"
-#include "iwl-io.h"
-#include "fw/api/commands.h"
-#include "fw/api/tx.h"
-#include "fw/api/datapath.h"
-#include "fw/api/debug.h"
-#include "queue/tx.h"
-#include "iwl-fh.h"
-#include "iwl-scd.h"
-#include <linux/dmapool.h>
-
-/*
- * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array
- */
-static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
-{
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- u8 filled_tfd_size, num_fetch_chunks;
- u16 len = byte_cnt;
- __le16 bc_ent;
-
- if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
- return;
-
- filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
- num_tbs * sizeof(struct iwl_tfh_tb);
- /*
- * filled_tfd_size contains the number of filled bytes in the TFD.
- * Dividing it by 64 will give the number of chunks to fetch
- * to SRAM- 0 for one chunk, 1 for 2 and so on.
- * If, for example, TFD contains only 3 TBs then 32 bytes
- * of the TFD are used, and only one chunk of 64 bytes should
- * be fetched
- */
- num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
-
- /* Starting from AX210, the HW expects bytes */
- WARN_ON(trans->txqs.bc_table_dword);
- WARN_ON(len > 0x3FFF);
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
- scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
- } else {
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
-
- /* Before AX210, the HW expects DW */
- WARN_ON(!trans->txqs.bc_table_dword);
- len = DIV_ROUND_UP(len, 4);
- WARN_ON(len > 0xFFF);
- bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl->tfd_offset[idx] = bc_ent;
- }
-}
-
-/*
- * iwl_txq_inc_wr_ptr - Send new write index to hardware
- */
-void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- lockdep_assert_held(&txq->lock);
-
- IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
-
- /*
- * if not in power-save mode, uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
-}
-
-static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd)
-{
- return le16_to_cpu(tfd->num_tbs) & 0x1f;
-}
-
-int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
- dma_addr_t addr, u16 len)
-{
- int idx = iwl_txq_gen2_get_num_tbs(trans, tfd);
- struct iwl_tfh_tb *tb;
-
- /* Only WARN here so we know about the issue, but we mess up our
- * unmap path because not every place currently checks for errors
- * returned from this function - it can only return an error if
- * there's no more space, and so when we know there is enough we
- * don't always check ...
- */
- WARN(iwl_txq_crosses_4g_boundary(addr, len),
- "possible DMA problem with iova:0x%llx, len:%d\n",
- (unsigned long long)addr, len);
-
- if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
- return -EINVAL;
- tb = &tfd->tbs[idx];
-
- /* Each TFD can point to a maximum max_tbs Tx buffers */
- if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Error can not send more than %d chunks\n",
- trans->txqs.tfd.max_tbs);
- return -EINVAL;
- }
-
- put_unaligned_le64(addr, &tb->addr);
- tb->tb_len = cpu_to_le16(len);
-
- tfd->num_tbs = cpu_to_le16(idx + 1);
-
- return idx;
-}
-
-static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd)
-{
- tfd->num_tbs = 0;
-
- iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma,
- trans->invalid_tx_cmd.size);
-}
-
-void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
- struct iwl_tfh_tfd *tfd)
-{
- int i, num_tbs;
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd);
-
- if (num_tbs > trans->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
- return;
- }
-
- /* first TB is never freed - it's the bidirectional DMA data */
- for (i = 1; i < num_tbs; i++) {
- if (meta->tbs & BIT(i))
- dma_unmap_page(trans->dev,
- le64_to_cpu(tfd->tbs[i].addr),
- le16_to_cpu(tfd->tbs[i].tb_len),
- DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev,
- le64_to_cpu(tfd->tbs[i].addr),
- le16_to_cpu(tfd->tbs[i].tb_len),
- DMA_TO_DEVICE);
- }
-
- iwl_txq_set_tfd_invalid_gen2(trans, tfd);
-}
-
-void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
- * idx is bounded by n_window
- */
- int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb;
-
- lockdep_assert_held(&txq->lock);
-
- if (!txq->entries)
- return;
-
- iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
- iwl_txq_get_tfd(trans, txq, idx));
-
- skb = txq->entries[idx].skb;
-
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
-}
-
-static struct page *get_workaround_page(struct iwl_trans *trans,
- struct sk_buff *skb)
-{
- struct page **page_ptr;
- struct page *ret;
-
- page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
-
- ret = alloc_page(GFP_ATOMIC);
- if (!ret)
- return NULL;
-
- /* set the chaining pointer to the previous page if there */
- *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr;
- *page_ptr = ret;
-
- return ret;
-}
-
-/*
- * Add a TB and if needed apply the FH HW bug workaround;
- * meta != NULL indicates that it's a page mapping and we
- * need to dma_unmap_page() and set the meta->tbs bit in
- * this case.
- */
-static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- dma_addr_t phys, void *virt,
- u16 len, struct iwl_cmd_meta *meta)
-{
- dma_addr_t oldphys = phys;
- struct page *page;
- int ret;
-
- if (unlikely(dma_mapping_error(trans->dev, phys)))
- return -ENOMEM;
-
- if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
- ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
-
- if (ret < 0)
- goto unmap;
-
- if (meta)
- meta->tbs |= BIT(ret);
-
- ret = 0;
- goto trace;
- }
-
- /*
- * Work around a hardware bug. If (as expressed in the
- * condition above) the TB ends on a 32-bit boundary,
- * then the next TB may be accessed with the wrong
- * address.
- * To work around it, copy the data elsewhere and make
- * a new mapping for it so the device will not fail.
- */
-
- if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) {
- ret = -ENOBUFS;
- goto unmap;
- }
-
- page = get_workaround_page(trans, skb);
- if (!page) {
- ret = -ENOMEM;
- goto unmap;
- }
-
- memcpy(page_address(page), virt, len);
-
- phys = dma_map_single(trans->dev, page_address(page), len,
- DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, phys)))
- return -ENOMEM;
- ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
- if (ret < 0) {
- /* unmap the new allocation as single */
- oldphys = phys;
- meta = NULL;
- goto unmap;
- }
- IWL_DEBUG_TX(trans,
- "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
- len, (unsigned long long)oldphys,
- (unsigned long long)phys);
-
- ret = 0;
-unmap:
- if (meta)
- dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
-trace:
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
-
- return ret;
-}
-
-#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
- struct sk_buff *skb)
-{
- struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page);
- struct page **page_ptr;
-
- page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
-
- if (WARN_ON(*page_ptr))
- return NULL;
-
- if (!p->page)
- goto alloc;
-
- /*
- * Check if there's enough room on this page
- *
- * Note that we put a page chaining pointer *last* in the
- * page - we need it somewhere, and if it's there then we
- * avoid DMA mapping the last bits of the page which may
- * trigger the 32-bit boundary hardware bug.
- *
- * (see also get_workaround_page() in tx-gen2.c)
- */
- if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE -
- sizeof(void *))
- goto out;
-
- /* We don't have enough room on this page, get a new one. */
- __free_page(p->page);
-
-alloc:
- p->page = alloc_page(GFP_ATOMIC);
- if (!p->page)
- return NULL;
- p->pos = page_address(p->page);
- /* set the chaining pointer to NULL */
- *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL;
-out:
- *page_ptr = p->page;
- get_page(p->page);
- return p;
-}
-#endif
-
-static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd, int start_len,
- u8 hdr_len,
- struct iwl_device_tx_cmd *dev_cmd)
-{
-#ifdef CONFIG_INET
- struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
- struct ieee80211_hdr *hdr = (void *)skb->data;
- unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- u16 length, amsdu_pad;
- u8 *start_hdr;
- struct iwl_tso_hdr_page *hdr_page;
- struct tso_t tso;
-
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
- &dev_cmd->hdr, start_len, 0);
-
- ip_hdrlen = skb_network_header_len(skb);
- snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
- total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
- amsdu_pad = 0;
-
- /* total amount of header we may need for this A-MSDU */
- hdr_room = DIV_ROUND_UP(total_len, mss) *
- (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
-
- /* Our device supports 9 segments at most, it will fit in 1 page */
- hdr_page = get_page_hdr(trans, hdr_room, skb);
- if (!hdr_page)
- return -ENOMEM;
-
- start_hdr = hdr_page->pos;
-
- /*
- * Pull the ieee80211 header to be able to use TSO core,
- * we will restore it for the tx_status flow.
- */
- skb_pull(skb, hdr_len);
-
- /*
- * Remove the length of all the headers that we don't actually
- * have in the MPDU by themselves, but that we duplicate into
- * all the different MSDUs inside the A-MSDU.
- */
- le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
-
- tso_start(skb, &tso);
-
- while (total_len) {
- /* this is the data left for this subframe */
- unsigned int data_left = min_t(unsigned int, mss, total_len);
- unsigned int tb_len;
- dma_addr_t tb_phys;
- u8 *subf_hdrs_start = hdr_page->pos;
-
- total_len -= data_left;
-
- memset(hdr_page->pos, 0, amsdu_pad);
- hdr_page->pos += amsdu_pad;
- amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
- data_left)) & 0x3;
- ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
- hdr_page->pos += ETH_ALEN;
- ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
- hdr_page->pos += ETH_ALEN;
-
- length = snap_ip_tcp_hdrlen + data_left;
- *((__be16 *)hdr_page->pos) = cpu_to_be16(length);
- hdr_page->pos += sizeof(length);
-
- /*
- * This will copy the SNAP as well which will be considered
- * as MAC header.
- */
- tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
-
- hdr_page->pos += snap_ip_tcp_hdrlen;
-
- tb_len = hdr_page->pos - start_hdr;
- tb_phys = dma_map_single(trans->dev, start_hdr,
- tb_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa, this is from the TSO page and
- * we leave some space at the end of it so can't hit
- * the buggy scenario.
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
- trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
- tb_phys, tb_len);
- /* add this subframe's headers' length to the tx_cmd */
- le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
-
- /* prepare the start_hdr for the next subframe */
- start_hdr = hdr_page->pos;
-
- /* put the payload */
- while (data_left) {
- int ret;
-
- tb_len = min_t(unsigned int, tso.size, data_left);
- tb_phys = dma_map_single(trans->dev, tso.data,
- tb_len, DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
- tb_phys, tso.data,
- tb_len, NULL);
- if (ret)
- goto out_err;
-
- data_left -= tb_len;
- tso_build_data(skb, &tso, tb_len);
- }
- }
-
- /* re -add the WiFi header */
- skb_push(skb, hdr_len);
-
- return 0;
-
-out_err:
-#endif
- return -EINVAL;
-}
-
-static struct
-iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta,
- int hdr_len,
- int tx_cmd_len)
-{
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
- dma_addr_t tb_phys;
- int len;
- void *tb1_addr;
-
- tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
-
- /*
- * No need for _with_wa, the first TB allocation is aligned up
- * to a 64-byte boundary and thus can't be at the end or cross
- * a page boundary (much less a 2^32 boundary).
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
- IWL_FIRST_TB_SIZE;
-
- /* do not align A-MSDU to dword as the subframe header aligns it */
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa(), we ensure (via alignment) that the data
- * here can never cross or end at a page boundary.
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
-
- if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE,
- hdr_len, dev_cmd))
- goto out_err;
-
- /* building the A-MSDU might have changed this data, memcpy it now */
- memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
- return tfd;
-
-out_err:
- iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
- return NULL;
-}
-
-static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
- struct sk_buff *skb,
- struct iwl_tfh_tfd *tfd,
- struct iwl_cmd_meta *out_meta)
-{
- int i;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- dma_addr_t tb_phys;
- unsigned int fragsz = skb_frag_size(frag);
- int ret;
-
- if (!fragsz)
- continue;
-
- tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
- fragsz, DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- skb_frag_address(frag),
- fragsz, out_meta);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static struct
-iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta,
- int hdr_len,
- int tx_cmd_len,
- bool pad)
-{
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
- dma_addr_t tb_phys;
- int len, tb1_len, tb2_len;
- void *tb1_addr;
- struct sk_buff *frag;
-
- tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
-
- /* The first TB points to bi-directional DMA data */
- memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
-
- /*
- * No need for _with_wa, the first TB allocation is aligned up
- * to a 64-byte boundary and thus can't be at the end or cross
- * a page boundary (much less a 2^32 boundary).
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
-
- /*
- * The second TB (tb1) points to the remainder of the TX command
- * and the 802.11 header - dword aligned size
- * (This calculation modifies the TX command, so do it before the
- * setup of the first TB)
- */
- len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
- IWL_FIRST_TB_SIZE;
-
- if (pad)
- tb1_len = ALIGN(len, 4);
- else
- tb1_len = len;
-
- /* map the data for TB1 */
- tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
- tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
- goto out_err;
- /*
- * No need for _with_wa(), we ensure (via alignment) that the data
- * here can never cross or end at a page boundary.
- */
- iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
- trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
- IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
-
- /* set up TFD's third entry to point to remainder of skb's head */
- tb2_len = skb_headlen(skb) - hdr_len;
-
- if (tb2_len > 0) {
- int ret;
-
- tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
- tb2_len, DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- skb->data + hdr_len, tb2_len,
- NULL);
- if (ret)
- goto out_err;
- }
-
- if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
- goto out_err;
-
- skb_walk_frags(skb, frag) {
- int ret;
-
- tb_phys = dma_map_single(trans->dev, frag->data,
- skb_headlen(frag), DMA_TO_DEVICE);
- ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
- frag->data,
- skb_headlen(frag), NULL);
- if (ret)
- goto out_err;
- if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
- goto out_err;
- }
-
- return tfd;
-
-out_err:
- iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
- return NULL;
-}
-
-static
-struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq,
- struct iwl_device_tx_cmd *dev_cmd,
- struct sk_buff *skb,
- struct iwl_cmd_meta *out_meta)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
- struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
- int len, hdr_len;
- bool amsdu;
-
- /* There must be data left over for TB1 or this code must be changed */
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_gen2, dram_info) >
- IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE);
- BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
- offsetofend(struct iwl_tx_cmd_gen3, dram_info) >
- IWL_FIRST_TB_SIZE);
-
- memset(tfd, 0, sizeof(*tfd));
-
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- len = sizeof(struct iwl_tx_cmd_gen2);
- else
- len = sizeof(struct iwl_tx_cmd_gen3);
-
- amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
- (*ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_A_MSDU_PRESENT);
-
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
-
- /*
- * Only build A-MSDUs here if doing so by GSO, otherwise it may be
- * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
- * built in the higher layers already.
- */
- if (amsdu && skb_shinfo(skb)->gso_size)
- return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
- out_meta, hdr_len, len);
- return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
- hdr_len, len, !amsdu);
-}
-
-int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
-{
- unsigned int max;
- unsigned int used;
-
- /*
- * To avoid ambiguity between empty and completely full queues, there
- * should always be less than max_tfd_queue_size elements in the queue.
- * If q->n_window is smaller than max_tfd_queue_size, there is no need
- * to reserve any queue entries for this purpose.
- */
- if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size)
- max = q->n_window;
- else
- max = trans->trans_cfg->base_params->max_tfd_queue_size - 1;
-
- /*
- * max_tfd_queue_size is a power of 2, so the following is equivalent to
- * modulo by max_tfd_queue_size and is well defined.
- */
- used = (q->write_ptr - q->read_ptr) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-
- if (WARN_ON(used > max))
- return 0;
-
- return max - used;
-}
-
-int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id)
-{
- struct iwl_cmd_meta *out_meta;
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
- u16 cmd_len;
- int idx;
- void *tfd;
-
- if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", txq_id))
- return -EINVAL;
-
- if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used),
- "TX on unused queue %d\n", txq_id))
- return -EINVAL;
-
- if (skb_is_nonlinear(skb) &&
- skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) &&
- __skb_linearize(skb))
- return -ENOMEM;
-
- spin_lock(&txq->lock);
-
- if (iwl_txq_space(trans, txq) < txq->high_mark) {
- iwl_txq_stop(trans, txq);
-
- /* don't put the packet on the ring, if there is no room */
- if (unlikely(iwl_txq_space(trans, txq) < 3)) {
- struct iwl_device_tx_cmd **dev_cmd_ptr;
-
- dev_cmd_ptr = (void *)((u8 *)skb->cb +
- trans->txqs.dev_cmd_offs);
-
- *dev_cmd_ptr = dev_cmd;
- __skb_queue_tail(&txq->overflow_q, skb);
- spin_unlock(&txq->lock);
- return 0;
- }
- }
-
- idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
-
- /* Set up driver data for this TFD */
- txq->entries[idx].skb = skb;
- txq->entries[idx].cmd = dev_cmd;
-
- dev_cmd->hdr.sequence =
- cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(idx)));
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_meta = &txq->entries[idx].meta;
- out_meta->flags = 0;
-
- tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
- if (!tfd) {
- spin_unlock(&txq->lock);
- return -1;
- }
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_gen3->len);
- } else {
- struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
- (void *)dev_cmd->payload;
-
- cmd_len = le16_to_cpu(tx_cmd_gen2->len);
- }
-
- /* Set up entry for this TFD in Tx byte-count array */
- iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
- iwl_txq_gen2_get_num_tbs(trans, tfd));
-
- /* start timer if queue currently empty */
- if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
-
- /* Tell device the write index *just past* this latest filled TFD */
- txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
- iwl_txq_inc_wr_ptr(trans, txq);
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually.
- */
- spin_unlock(&txq->lock);
- return 0;
-}
-
-/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-
-/*
- * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
- */
-void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
-
- spin_lock_bh(&txq->lock);
- while (txq->write_ptr != txq->read_ptr) {
- IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
- txq_id, txq->read_ptr);
-
- if (txq_id != trans->txqs.cmd.q_id) {
- int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
- struct sk_buff *skb = txq->entries[idx].skb;
-
- if (!WARN_ON_ONCE(!skb))
- iwl_txq_free_tso_page(trans, skb);
- }
- iwl_txq_gen2_free_tfd(trans, txq);
- txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
- }
-
- while (!skb_queue_empty(&txq->overflow_q)) {
- struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
-
- iwl_op_mode_free_skb(trans->op_mode, skb);
- }
-
- spin_unlock_bh(&txq->lock);
-
- /* just in case - this queue may have been stopped */
- iwl_wake_queue(trans, txq);
-}
-
-static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- struct device *dev = trans->dev;
-
- /* De-alloc circular buffer of TFDs */
- if (txq->tfds) {
- dma_free_coherent(dev,
- trans->txqs.tfd.size * txq->n_window,
- txq->tfds, txq->dma_addr);
- dma_free_coherent(dev,
- sizeof(*txq->first_tb_bufs) * txq->n_window,
- txq->first_tb_bufs, txq->first_tb_dma);
- }
-
- kfree(txq->entries);
- if (txq->bc_tbl.addr)
- dma_pool_free(trans->txqs.bc_pool,
- txq->bc_tbl.addr, txq->bc_tbl.dma);
- kfree(txq);
-}
-
-/*
- * iwl_pcie_txq_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_txq *txq;
- int i;
-
- if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", txq_id))
- return;
-
- txq = trans->txqs.txq[txq_id];
-
- if (WARN_ON(!txq))
- return;
-
- iwl_txq_gen2_unmap(trans, txq_id);
-
- /* De-alloc array of command/tx buffers */
- if (txq_id == trans->txqs.cmd.q_id)
- for (i = 0; i < txq->n_window; i++) {
- kfree_sensitive(txq->entries[i].cmd);
- kfree_sensitive(txq->entries[i].free_buf);
- }
- del_timer_sync(&txq->stuck_timer);
-
- iwl_txq_gen2_free_memory(trans, txq);
-
- trans->txqs.txq[txq_id] = NULL;
-
- clear_bit(txq_id, trans->txqs.queue_used);
-}
-
-/*
- * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-static int iwl_queue_init(struct iwl_txq *q, int slots_num)
-{
- q->n_window = slots_num;
-
- /* slots_num must be power-of-two size, otherwise
- * iwl_txq_get_cmd_index is broken. */
- if (WARN_ON(!is_power_of_2(slots_num)))
- return -EINVAL;
-
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
-
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
-
- q->write_ptr = 0;
- q->read_ptr = 0;
-
- return 0;
-}
-
-int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
- bool cmd_queue)
-{
- int ret;
- u32 tfd_queue_max_size =
- trans->trans_cfg->base_params->max_tfd_queue_size;
-
- txq->need_update = false;
-
- /* max_tfd_queue_size must be power-of-two size, otherwise
- * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */
- if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
- "Max tfd queue size must be a power of two, but is %d",
- tfd_queue_max_size))
- return -EINVAL;
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- ret = iwl_queue_init(txq, slots_num);
- if (ret)
- return ret;
-
- spin_lock_init(&txq->lock);
-
- if (cmd_queue) {
- static struct lock_class_key iwl_txq_cmd_queue_lock_class;
-
- lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
- }
-
- __skb_queue_head_init(&txq->overflow_q);
-
- return 0;
-}
-
-void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb)
-{
- struct page **page_ptr;
- struct page *next;
-
- page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs);
- next = *page_ptr;
- *page_ptr = NULL;
-
- while (next) {
- struct page *tmp = next;
-
- next = *(void **)((u8 *)page_address(next) + PAGE_SIZE -
- sizeof(void *));
- __free_page(tmp);
- }
-}
-
-void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- u32 txq_id = txq->id;
- u32 status;
- bool active;
- u8 fifo;
-
- if (trans->trans_cfg->gen2) {
- IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
- txq->read_ptr, txq->write_ptr);
- /* TODO: access new SCD registers and dump them */
- return;
- }
-
- status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
- fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
- active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
-
- IWL_ERR(trans,
- "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
- txq_id, active ? "" : "in", fifo,
- jiffies_to_msecs(txq->wd_timeout),
- txq->read_ptr, txq->write_ptr,
- iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
- iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
- iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
-}
-
-static void iwl_txq_stuck_timer(struct timer_list *t)
-{
- struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
- struct iwl_trans *trans = txq->trans;
-
- spin_lock(&txq->lock);
- /* check if triggered erroneously */
- if (txq->read_ptr == txq->write_ptr) {
- spin_unlock(&txq->lock);
- return;
- }
- spin_unlock(&txq->lock);
-
- iwl_txq_log_scd_error(trans, txq);
-
- iwl_force_nmi(trans);
-}
-
-static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
- struct iwl_tfd *tfd)
-{
- tfd->num_tbs = 0;
-
- iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma,
- trans->invalid_tx_cmd.size);
-}
-
-int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
- bool cmd_queue)
-{
- size_t num_entries = trans->trans_cfg->gen2 ?
- slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
- size_t tfd_sz;
- size_t tb0_buf_sz;
- int i;
-
- if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
- return -EINVAL;
-
- if (WARN_ON(txq->entries || txq->tfds))
- return -EINVAL;
-
- tfd_sz = trans->txqs.tfd.size * num_entries;
-
- timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
- txq->trans = trans;
-
- txq->n_window = slots_num;
-
- txq->entries = kcalloc(slots_num,
- sizeof(struct iwl_pcie_txq_entry),
- GFP_KERNEL);
-
- if (!txq->entries)
- goto error;
-
- if (cmd_queue)
- for (i = 0; i < slots_num; i++) {
- txq->entries[i].cmd =
- kmalloc(sizeof(struct iwl_device_cmd),
- GFP_KERNEL);
- if (!txq->entries[i].cmd)
- goto error;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
- &txq->dma_addr, GFP_KERNEL);
- if (!txq->tfds)
- goto error;
-
- BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
-
- tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
-
- txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
- &txq->first_tb_dma,
- GFP_KERNEL);
- if (!txq->first_tb_bufs)
- goto err_free_tfds;
-
- for (i = 0; i < num_entries; i++) {
- void *tfd = iwl_txq_get_tfd(trans, txq, i);
-
- if (trans->trans_cfg->gen2)
- iwl_txq_set_tfd_invalid_gen2(trans, tfd);
- else
- iwl_txq_set_tfd_invalid_gen1(trans, tfd);
- }
-
- return 0;
-err_free_tfds:
- dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
- txq->tfds = NULL;
-error:
- if (txq->entries && cmd_queue)
- for (i = 0; i < slots_num; i++)
- kfree(txq->entries[i].cmd);
- kfree(txq->entries);
- txq->entries = NULL;
-
- return -ENOMEM;
-}
-
-static struct iwl_txq *
-iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
-{
- size_t bc_tbl_size, bc_tbl_entries;
- struct iwl_txq *txq;
- int ret;
-
- WARN_ON(!trans->txqs.bc_tbl_size);
-
- bc_tbl_size = trans->txqs.bc_tbl_size;
- bc_tbl_entries = bc_tbl_size / sizeof(u16);
-
- if (WARN_ON(size > bc_tbl_entries))
- return ERR_PTR(-EINVAL);
-
- txq = kzalloc(sizeof(*txq), GFP_KERNEL);
- if (!txq)
- return ERR_PTR(-ENOMEM);
-
- txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL,
- &txq->bc_tbl.dma);
- if (!txq->bc_tbl.addr) {
- IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
- kfree(txq);
- return ERR_PTR(-ENOMEM);
- }
-
- ret = iwl_txq_alloc(trans, txq, size, false);
- if (ret) {
- IWL_ERR(trans, "Tx queue alloc failed\n");
- goto error;
- }
- ret = iwl_txq_init(trans, txq, size, false);
- if (ret) {
- IWL_ERR(trans, "Tx queue init failed\n");
- goto error;
- }
-
- txq->wd_timeout = msecs_to_jiffies(timeout);
-
- return txq;
-
-error:
- iwl_txq_gen2_free_memory(trans, txq);
- return ERR_PTR(ret);
-}
-
-static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq,
- struct iwl_host_cmd *hcmd)
-{
- struct iwl_tx_queue_cfg_rsp *rsp;
- int ret, qid;
- u32 wr_ptr;
-
- if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
- sizeof(*rsp))) {
- ret = -EINVAL;
- goto error_free_resp;
- }
-
- rsp = (void *)hcmd->resp_pkt->data;
- qid = le16_to_cpu(rsp->queue_number);
- wr_ptr = le16_to_cpu(rsp->write_pointer);
-
- if (qid >= ARRAY_SIZE(trans->txqs.txq)) {
- WARN_ONCE(1, "queue index %d unsupported", qid);
- ret = -EIO;
- goto error_free_resp;
- }
-
- if (test_and_set_bit(qid, trans->txqs.queue_used)) {
- WARN_ONCE(1, "queue %d already used", qid);
- ret = -EIO;
- goto error_free_resp;
- }
-
- if (WARN_ONCE(trans->txqs.txq[qid],
- "queue %d already allocated\n", qid)) {
- ret = -EIO;
- goto error_free_resp;
- }
-
- txq->id = qid;
- trans->txqs.txq[qid] = txq;
- wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-
- /* Place first TFD at index corresponding to start sequence number */
- txq->read_ptr = wr_ptr;
- txq->write_ptr = wr_ptr;
-
- IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
-
- iwl_free_resp(hcmd);
- return qid;
-
-error_free_resp:
- iwl_free_resp(hcmd);
- iwl_txq_gen2_free_memory(trans, txq);
- return ret;
-}
-
-int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
- u8 tid, int size, unsigned int timeout)
-{
- struct iwl_txq *txq;
- union {
- struct iwl_tx_queue_cfg_cmd old;
- struct iwl_scd_queue_cfg_cmd new;
- } cmd;
- struct iwl_host_cmd hcmd = {
- .flags = CMD_WANT_SKB,
- };
- int ret;
-
- if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
- trans->hw_rev_step == SILICON_A_STEP)
- size = 4096;
-
- txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
- if (IS_ERR(txq))
- return PTR_ERR(txq);
-
- if (trans->txqs.queue_alloc_cmd_ver == 0) {
- memset(&cmd.old, 0, sizeof(cmd.old));
- cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
- cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
- cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
- cmd.old.tid = tid;
-
- if (hweight32(sta_mask) != 1) {
- ret = -EINVAL;
- goto error;
- }
- cmd.old.sta_id = ffs(sta_mask) - 1;
-
- hcmd.id = SCD_QUEUE_CFG;
- hcmd.len[0] = sizeof(cmd.old);
- hcmd.data[0] = &cmd.old;
- } else if (trans->txqs.queue_alloc_cmd_ver == 3) {
- memset(&cmd.new, 0, sizeof(cmd.new));
- cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
- cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
- cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
- cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
- cmd.new.u.add.flags = cpu_to_le32(flags);
- cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
- cmd.new.u.add.tid = tid;
-
- hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
- hcmd.len[0] = sizeof(cmd.new);
- hcmd.data[0] = &cmd.new;
- } else {
- ret = -EOPNOTSUPP;
- goto error;
- }
-
- ret = iwl_trans_send_cmd(trans, &hcmd);
- if (ret)
- goto error;
-
- return iwl_txq_alloc_response(trans, txq, &hcmd);
-
-error:
- iwl_txq_gen2_free_memory(trans, txq);
- return ret;
-}
-
-void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
-{
- if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
- "queue %d out of range", queue))
- return;
-
- /*
- * Upon HW Rfkill - we stop the device, and then stop the queues
- * in the op_mode. Just for the sake of the simplicity of the op_mode,
- * allow the op_mode to call txq_disable after it already called
- * stop_device.
- */
- if (!test_and_clear_bit(queue, trans->txqs.queue_used)) {
- WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
- "queue %d not used", queue);
- return;
- }
-
- iwl_txq_gen2_free(trans, queue);
-
- IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
-}
-
-void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
-{
- int i;
-
- memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
-
- /* Free all TX queues */
- for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) {
- if (!trans->txqs.txq[i])
- continue;
-
- iwl_txq_gen2_free(trans, i);
- }
-}
-
-int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
-{
- struct iwl_txq *queue;
- int ret;
-
- /* alloc and init the tx queue */
- if (!trans->txqs.txq[txq_id]) {
- queue = kzalloc(sizeof(*queue), GFP_KERNEL);
- if (!queue) {
- IWL_ERR(trans, "Not enough memory for tx queue\n");
- return -ENOMEM;
- }
- trans->txqs.txq[txq_id] = queue;
- ret = iwl_txq_alloc(trans, queue, queue_size, true);
- if (ret) {
- IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- } else {
- queue = trans->txqs.txq[txq_id];
- }
-
- ret = iwl_txq_init(trans, queue, queue_size,
- (txq_id == trans->txqs.cmd.q_id));
- if (ret) {
- IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
- goto error;
- }
- trans->txqs.txq[txq_id]->id = txq_id;
- set_bit(txq_id, trans->txqs.queue_used);
-
- return 0;
-
-error:
- iwl_txq_gen2_tx_free(trans);
- return ret;
-}
-
-static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
- struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- dma_addr_t addr;
- dma_addr_t hi_len;
-
- addr = get_unaligned_le32(&tb->lo);
-
- if (sizeof(dma_addr_t) <= sizeof(u32))
- return addr;
-
- hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
-
- /*
- * shift by 16 twice to avoid warnings on 32-bit
- * (where this code never runs anyway due to the
- * if statement above)
- */
- return addr | ((hi_len << 16) << 16);
-}
-
-void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_txq *txq, int index)
-{
- int i, num_tbs;
- struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
-
- /* Sanity check on number of chunks */
- num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
-
- if (num_tbs > trans->txqs.tfd.max_tbs) {
- IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* first TB is never freed - it's the bidirectional DMA data */
-
- for (i = 1; i < num_tbs; i++) {
- if (meta->tbs & BIT(i))
- dma_unmap_page(trans->dev,
- iwl_txq_gen1_tfd_tb_get_addr(trans,
- tfd, i),
- iwl_txq_gen1_tfd_tb_get_len(trans,
- tfd, i),
- DMA_TO_DEVICE);
- else
- dma_unmap_single(trans->dev,
- iwl_txq_gen1_tfd_tb_get_addr(trans,
- tfd, i),
- iwl_txq_gen1_tfd_tb_get_len(trans,
- tfd, i),
- DMA_TO_DEVICE);
- }
-
- meta->tbs = 0;
-
- iwl_txq_set_tfd_invalid_gen1(trans, tfd);
-}
-
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
-/*
- * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs)
-{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl;
- int write_ptr = txq->write_ptr;
- int txq_id = txq->id;
- u8 sec_ctl = 0;
- u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
- struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
- u8 sta_id = tx_cmd->sta_id;
-
- scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
-
- sec_ctl = tx_cmd->sec_ctl;
-
- switch (sec_ctl & TX_CMD_SEC_MSK) {
- case TX_CMD_SEC_CCM:
- len += IEEE80211_CCMP_MIC_LEN;
- break;
- case TX_CMD_SEC_TKIP:
- len += IEEE80211_TKIP_ICV_LEN;
- break;
- case TX_CMD_SEC_WEP:
- len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
- break;
- }
- if (trans->txqs.bc_table_dword)
- len = DIV_ROUND_UP(len, 4);
-
- if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
- return;
-
- bc_ent = cpu_to_le16(len | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
- bc_ent;
-}
-
-void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr;
- int txq_id = txq->id;
- int read_ptr = txq->read_ptr;
- u8 sta_id = 0;
- __le16 bc_ent;
- struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
- struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
-
- WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
-
- if (txq_id != trans->txqs.cmd.q_id)
- sta_id = tx_cmd->sta_id;
-
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
-
- scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
-
- if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
- bc_ent;
-}
-
-/*
- * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @trans - transport private data
- * @txq - tx queue
- * @dma_dir - the direction of the DMA mapping
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
- * idx is bounded by n_window
- */
- int rd_ptr = txq->read_ptr;
- int idx = iwl_txq_get_cmd_index(txq, rd_ptr);
- struct sk_buff *skb;
-
- lockdep_assert_held(&txq->lock);
-
- if (!txq->entries)
- return;
-
- /* We have only q->n_window txq->entries, but we use
- * TFD_QUEUE_SIZE_MAX tfds
- */
- if (trans->trans_cfg->gen2)
- iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
- iwl_txq_get_tfd(trans, txq, rd_ptr));
- else
- iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
- txq, rd_ptr);
-
- /* free SKB */
- skb = txq->entries[idx].skb;
-
- /* Can be called from irqs-disabled context
- * If skb is not NULL, it means that the whole queue is being
- * freed and that the queue is not empty - free the skb
- */
- if (skb) {
- iwl_op_mode_free_skb(trans->op_mode, skb);
- txq->entries[idx].skb = NULL;
- }
-}
-
-void iwl_txq_progress(struct iwl_txq *txq)
-{
- lockdep_assert_held(&txq->lock);
-
- if (!txq->wd_timeout)
- return;
-
- /*
- * station is asleep and we send data - that must
- * be uAPSD or PS-Poll. Don't rearm the timer.
- */
- if (txq->frozen)
- return;
-
- /*
- * if empty delete timer, otherwise move timer forward
- * since we're making progress on this queue
- */
- if (txq->read_ptr == txq->write_ptr)
- del_timer(&txq->stuck_timer);
- else
- mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
-}
-
-/* Frees buffers until index _not_ inclusive */
-void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
- struct sk_buff_head *skbs, bool is_flush)
-{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
- int tfd_num, read_ptr, last_to_free;
-
- /* This function is not meant to release cmd queue*/
- if (WARN_ON(txq_id == trans->txqs.cmd.q_id))
- return;
-
- if (WARN_ON(!txq))
- return;
-
- tfd_num = iwl_txq_get_cmd_index(txq, ssn);
-
- spin_lock_bh(&txq->lock);
- read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr);
-
- if (!test_bit(txq_id, trans->txqs.queue_used)) {
- IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
- txq_id, ssn);
- goto out;
- }
-
- if (read_ptr == tfd_num)
- goto out;
-
- IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
- txq_id, read_ptr, txq->read_ptr, tfd_num, ssn);
-
- /*Since we free until index _not_ inclusive, the one before index is
- * the last we will free. This one must be used */
- last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
-
- if (!iwl_txq_used(txq, last_to_free)) {
- IWL_ERR(trans,
- "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
- __func__, txq_id, last_to_free,
- trans->trans_cfg->base_params->max_tfd_queue_size,
- txq->write_ptr, txq->read_ptr);
-
- iwl_op_mode_time_point(trans->op_mode,
- IWL_FW_INI_TIME_POINT_FAKE_TX,
- NULL);
- goto out;
- }
-
- if (WARN_ON(!skb_queue_empty(skbs)))
- goto out;
-
- for (;
- read_ptr != tfd_num;
- txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr),
- read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) {
- struct sk_buff *skb = txq->entries[read_ptr].skb;
-
- if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
- read_ptr, txq->read_ptr, txq_id))
- continue;
-
- iwl_txq_free_tso_page(trans, skb);
-
- __skb_queue_tail(skbs, skb);
-
- txq->entries[read_ptr].skb = NULL;
-
- if (!trans->trans_cfg->gen2)
- iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq);
-
- iwl_txq_free_tfd(trans, txq);
- }
-
- iwl_txq_progress(txq);
-
- if (iwl_txq_space(trans, txq) > txq->low_mark &&
- test_bit(txq_id, trans->txqs.queue_stopped)) {
- struct sk_buff_head overflow_skbs;
- struct sk_buff *skb;
-
- __skb_queue_head_init(&overflow_skbs);
- skb_queue_splice_init(&txq->overflow_q,
- is_flush ? skbs : &overflow_skbs);
-
- /*
- * We are going to transmit from the overflow queue.
- * Remember this state so that wait_for_txq_empty will know we
- * are adding more packets to the TFD queue. It cannot rely on
- * the state of &txq->overflow_q, as we just emptied it, but
- * haven't TXed the content yet.
- */
- txq->overflow_tx = true;
-
- /*
- * This is tricky: we are in reclaim path which is non
- * re-entrant, so noone will try to take the access the
- * txq data from that path. We stopped tx, so we can't
- * have tx as well. Bottom line, we can unlock and re-lock
- * later.
- */
- spin_unlock_bh(&txq->lock);
-
- while ((skb = __skb_dequeue(&overflow_skbs))) {
- struct iwl_device_tx_cmd *dev_cmd_ptr;
-
- dev_cmd_ptr = *(void **)((u8 *)skb->cb +
- trans->txqs.dev_cmd_offs);
-
- /*
- * Note that we can very well be overflowing again.
- * In that case, iwl_txq_space will be small again
- * and we won't wake mac80211's queue.
- */
- iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
- }
-
- if (iwl_txq_space(trans, txq) > txq->low_mark)
- iwl_wake_queue(trans, txq);
-
- spin_lock_bh(&txq->lock);
- txq->overflow_tx = false;
- }
-
-out:
- spin_unlock_bh(&txq->lock);
-}
-
-/* Set wr_ptr of specific device and txq */
-void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
-{
- struct iwl_txq *txq = trans->txqs.txq[txq_id];
-
- spin_lock_bh(&txq->lock);
-
- txq->write_ptr = ptr;
- txq->read_ptr = txq->write_ptr;
-
- spin_unlock_bh(&txq->lock);
-}
-
-void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
- bool freeze)
-{
- int queue;
-
- for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
- struct iwl_txq *txq = trans->txqs.txq[queue];
- unsigned long now;
-
- spin_lock_bh(&txq->lock);
-
- now = jiffies;
-
- if (txq->frozen == freeze)
- goto next_queue;
-
- IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
- freeze ? "Freezing" : "Waking", queue);
-
- txq->frozen = freeze;
-
- if (txq->read_ptr == txq->write_ptr)
- goto next_queue;
-
- if (freeze) {
- if (unlikely(time_after(now,
- txq->stuck_timer.expires))) {
- /*
- * The timer should have fired, maybe it is
- * spinning right now on the lock.
- */
- goto next_queue;
- }
- /* remember how long until the timer fires */
- txq->frozen_expiry_remainder =
- txq->stuck_timer.expires - now;
- del_timer(&txq->stuck_timer);
- goto next_queue;
- }
-
- /*
- * Wake a non-empty queue -> arm timer with the
- * remainder before it froze
- */
- mod_timer(&txq->stuck_timer,
- now + txq->frozen_expiry_remainder);
-
-next_queue:
- spin_unlock_bh(&txq->lock);
- }
-}
-
-#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-
-static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
- struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
- int cmd_idx;
- int ret;
-
- IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
-
- if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- "Command %s: a command is already active!\n", cmd_str))
- return -EIO;
-
- IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
-
- cmd_idx = trans->ops->send_cmd(trans, cmd);
- if (cmd_idx < 0) {
- ret = cmd_idx;
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
- cmd_str, ret);
- return ret;
- }
-
- ret = wait_event_timeout(trans->wait_command_queue,
- !test_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- HOST_COMPLETE_TIMEOUT);
- if (!ret) {
- IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
- cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
- IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
- txq->read_ptr, txq->write_ptr);
-
- clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
- IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
- cmd_str);
- ret = -ETIMEDOUT;
-
- iwl_trans_sync_nmi(trans);
- goto cancel;
- }
-
- if (test_bit(STATUS_FW_ERROR, &trans->status)) {
- if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
- &trans->status)) {
- IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
- dump_stack();
- }
- ret = -EIO;
- goto cancel;
- }
-
- if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
- IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
- ret = -ERFKILL;
- goto cancel;
- }
-
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
- IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
- ret = -EIO;
- goto cancel;
- }
-
- return 0;
-
-cancel:
- if (cmd->flags & CMD_WANT_SKB) {
- /*
- * Cancel the CMD_WANT_SKB flag for the cmd in the
- * TX cmd queue. Otherwise in case the cmd comes
- * in later, it will possibly set an invalid
- * address (cmd->meta.source).
- */
- txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
- }
-
- if (cmd->resp_pkt) {
- iwl_free_resp(cmd);
- cmd->resp_pkt = NULL;
- }
-
- return ret;
-}
-
-int iwl_trans_txq_send_hcmd(struct iwl_trans *trans,
- struct iwl_host_cmd *cmd)
-{
- /* Make sure the NIC is still alive in the bus */
- if (test_bit(STATUS_TRANS_DEAD, &trans->status))
- return -ENODEV;
-
- if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
- test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
- IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
- cmd->id);
- return -ERFKILL;
- }
-
- if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
- !(cmd->flags & CMD_SEND_IN_D3))) {
- IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
- return -EHOSTDOWN;
- }
-
- if (cmd->flags & CMD_ASYNC) {
- int ret;
-
- /* An asynchronous command can not expect an SKB to be set. */
- if (WARN_ON(cmd->flags & CMD_WANT_SKB))
- return -EINVAL;
-
- ret = trans->ops->send_cmd(trans, cmd);
- if (ret < 0) {
- IWL_ERR(trans,
- "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_get_cmd_string(trans, cmd->id), ret);
- return ret;
- }
- return 0;
- }
-
- return iwl_trans_txq_send_hcmd_sync(trans, cmd);
-}
-
diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h
deleted file mode 100644
index 124b29aac4a1..000000000000
--- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h
+++ /dev/null
@@ -1,191 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/*
- * Copyright (C) 2020-2023 Intel Corporation
- */
-#ifndef __iwl_trans_queue_tx_h__
-#define __iwl_trans_queue_tx_h__
-#include "iwl-fh.h"
-#include "fw/api/tx.h"
-
-struct iwl_tso_hdr_page {
- struct page *page;
- u8 *pos;
-};
-
-static inline dma_addr_t
-iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx)
-{
- return txq->first_tb_dma +
- sizeof(struct iwl_pcie_first_tb_buf) * idx;
-}
-
-static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index)
-{
- return index & (q->n_window - 1);
-}
-
-void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id);
-
-static inline void iwl_wake_queue(struct iwl_trans *trans,
- struct iwl_txq *txq)
-{
- if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) {
- IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
- iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
- }
-}
-
-static inline void *iwl_txq_get_tfd(struct iwl_trans *trans,
- struct iwl_txq *txq, int idx)
-{
- if (trans->trans_cfg->gen2)
- idx = iwl_txq_get_cmd_index(txq, idx);
-
- return (u8 *)txq->tfds + trans->txqs.tfd.size * idx;
-}
-
-int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
- bool cmd_queue);
-/*
- * We need this inline in case dma_addr_t is only 32-bits - since the
- * hardware is always 64-bit, the issue can still occur in that case,
- * so use u64 for 'phys' here to force the addition in 64-bit.
- */
-static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len)
-{
- return upper_32_bits(phys) != upper_32_bits(phys + len);
-}
-
-int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q);
-
-static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq)
-{
- if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) {
- iwl_op_mode_queue_full(trans->op_mode, txq->id);
- IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
- } else {
- IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
- txq->id);
- }
-}
-
-/**
- * iwl_txq_inc_wrap - increment queue index, wrap back to beginning
- * @trans: the transport (for configuration data)
- * @index: current index
- */
-static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index)
-{
- return ++index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-}
-
-/**
- * iwl_txq_dec_wrap - decrement queue index, wrap back to end
- * @trans: the transport (for configuration data)
- * @index: current index
- */
-static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index)
-{
- return --index &
- (trans->trans_cfg->base_params->max_tfd_queue_size - 1);
-}
-
-static inline bool iwl_txq_used(const struct iwl_txq *q, int i)
-{
- int index = iwl_txq_get_cmd_index(q, i);
- int r = iwl_txq_get_cmd_index(q, q->read_ptr);
- int w = iwl_txq_get_cmd_index(q, q->write_ptr);
-
- return w >= r ?
- (index >= r && index < w) :
- !(index < r && index >= w);
-}
-
-void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb);
-
-void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq);
-
-int iwl_txq_gen2_set_tb(struct iwl_trans *trans,
- struct iwl_tfh_tfd *tfd, dma_addr_t addr,
- u16 len);
-
-void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_tfh_tfd *tfd);
-
-int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags,
- u32 sta_mask, u8 tid,
- int size, unsigned int timeout);
-
-int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
- struct iwl_device_tx_cmd *dev_cmd, int txq_id);
-
-void iwl_txq_dyn_free(struct iwl_trans *trans, int queue);
-void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq);
-void iwl_txq_gen2_tx_free(struct iwl_trans *trans);
-int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num,
- bool cmd_queue);
-int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size);
-#ifdef CONFIG_INET
-struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
- struct sk_buff *skb);
-#endif
-static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
- struct iwl_tfd *tfd)
-{
- return tfd->num_tbs & 0x1f;
-}
-
-static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans,
- void *_tfd, u8 idx)
-{
- struct iwl_tfd *tfd;
- struct iwl_tfd_tb *tb;
-
- if (trans->trans_cfg->gen2) {
- struct iwl_tfh_tfd *tfh_tfd = _tfd;
- struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
-
- return le16_to_cpu(tfh_tb->tb_len);
- }
-
- tfd = (struct iwl_tfd *)_tfd;
- tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_trans *trans,
- struct iwl_tfd *tfd,
- u8 idx, dma_addr_t addr, u16 len)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
-
- put_unaligned_le32(addr, &tb->lo);
- hi_n_len |= iwl_get_dma_hi_addr(addr);
-
- tb->hi_n_len = cpu_to_le16(hi_n_len);
-
- tfd->num_tbs = idx + 1;
-}
-
-void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
- struct iwl_cmd_meta *meta,
- struct iwl_txq *txq, int index);
-void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq);
-void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
- struct iwl_txq *txq, u16 byte_cnt,
- int num_tbs);
-void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
- struct sk_buff_head *skbs, bool is_flush);
-void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr);
-void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs,
- bool freeze);
-void iwl_txq_progress(struct iwl_txq *txq);
-void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
-int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
-#endif /* __iwl_trans_queue_tx_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/Makefile b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
index 5658471bdf0a..84491488f589 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/tests/Makefile
@@ -2,6 +2,6 @@
iwlwifi-tests-y += module.o devinfo.o
-ccflags-y += -I$(srctree)/$(src)/../
+ccflags-y += -I$(src)/../
obj-$(CONFIG_IWLWIFI_KUNIT_TESTS) += iwlwifi-tests.o
diff --git a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
index 7aa47fce6e2d..7361b6d0cdb8 100644
--- a/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
+++ b/drivers/net/wireless/intel/iwlwifi/tests/devinfo.c
@@ -2,9 +2,10 @@
/*
* KUnit tests for the iwlwifi device info table
*
- * Copyright (C) 2023 Intel Corporation
+ * Copyright (C) 2023-2024 Intel Corporation
*/
#include <kunit/test.h>
+#include <linux/pci.h>
#include "iwl-drv.h"
#include "iwl-config.h"
@@ -41,8 +42,31 @@ static void devinfo_table_order(struct kunit *test)
}
}
+static void devinfo_pci_ids(struct kunit *test)
+{
+ struct pci_dev *dev;
+
+ dev = kunit_kmalloc(test, sizeof(*dev), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_NULL(test, dev);
+
+ for (int i = 0; iwl_hw_card_ids[i].vendor; i++) {
+ const struct pci_device_id *s, *t;
+
+ s = &iwl_hw_card_ids[i];
+ dev->vendor = s->vendor;
+ dev->device = s->device;
+ dev->subsystem_vendor = s->subvendor;
+ dev->subsystem_device = s->subdevice;
+ dev->class = s->class;
+
+ t = pci_match_id(iwl_hw_card_ids, dev);
+ KUNIT_EXPECT_PTR_EQ(test, t, s);
+ }
+}
+
static struct kunit_case devinfo_test_cases[] = {
KUNIT_CASE(devinfo_table_order),
+ KUNIT_CASE(devinfo_pci_ids),
{}
};
diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c
index c4fe70e05b9b..772084a9bd8d 100644
--- a/drivers/net/wireless/intersil/p54/fwio.c
+++ b/drivers/net/wireless/intersil/p54/fwio.c
@@ -216,7 +216,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
struct sk_buff *skb;
size_t eeprom_hdr_size;
int ret = 0;
- long timeout;
+ long time_left;
if (priv->fw_var >= 0x509)
eeprom_hdr_size = sizeof(*eeprom_hdr);
@@ -245,9 +245,9 @@ int p54_download_eeprom(struct p54_common *priv, void *buf,
p54_tx(priv, skb);
- timeout = wait_for_completion_interruptible_timeout(
+ time_left = wait_for_completion_interruptible_timeout(
&priv->eeprom_comp, HZ);
- if (timeout <= 0) {
+ if (time_left <= 0) {
wiphy_err(priv->hw->wiphy,
"device does not respond or signal received!\n");
ret = -EBUSY;
diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
index 687841b2fa2a..42111bb53f58 100644
--- a/drivers/net/wireless/intersil/p54/main.c
+++ b/drivers/net/wireless/intersil/p54/main.c
@@ -197,7 +197,7 @@ out:
return err;
}
-static void p54_stop(struct ieee80211_hw *dev)
+static void p54_stop(struct ieee80211_hw *dev, bool suspend)
{
struct p54_common *priv = dev->priv;
int i;
diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c
index e97ee547b9f3..6588f5477762 100644
--- a/drivers/net/wireless/intersil/p54/p54pci.c
+++ b/drivers/net/wireless/intersil/p54/p54pci.c
@@ -434,7 +434,7 @@ static int p54p_open(struct ieee80211_hw *dev)
{
struct p54p_priv *priv = dev->priv;
int err;
- long timeout;
+ long time_left;
init_completion(&priv->boot_comp);
err = request_irq(priv->pdev->irq, p54p_interrupt,
@@ -472,12 +472,12 @@ static int p54p_open(struct ieee80211_hw *dev)
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
P54P_READ(dev_int);
- timeout = wait_for_completion_interruptible_timeout(
+ time_left = wait_for_completion_interruptible_timeout(
&priv->boot_comp, HZ);
- if (timeout <= 0) {
+ if (time_left <= 0) {
wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
p54p_stop(dev);
- return timeout ? -ERESTARTSYS : -ETIMEDOUT;
+ return time_left ? -ERESTARTSYS : -ETIMEDOUT;
}
P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c
index 0073b5e0f9c9..d33a994906a7 100644
--- a/drivers/net/wireless/intersil/p54/p54spi.c
+++ b/drivers/net/wireless/intersil/p54/p54spi.c
@@ -518,7 +518,7 @@ out:
static int p54spi_op_start(struct ieee80211_hw *dev)
{
struct p54s_priv *priv = dev->priv;
- unsigned long timeout;
+ long time_left;
int ret = 0;
if (mutex_lock_interruptible(&priv->mutex)) {
@@ -538,10 +538,10 @@ static int p54spi_op_start(struct ieee80211_hw *dev)
mutex_unlock(&priv->mutex);
- timeout = msecs_to_jiffies(2000);
- timeout = wait_for_completion_interruptible_timeout(&priv->fw_comp,
- timeout);
- if (!timeout) {
+ time_left = msecs_to_jiffies(2000);
+ time_left = wait_for_completion_interruptible_timeout(&priv->fw_comp,
+ time_left);
+ if (!time_left) {
dev_err(&priv->spi->dev, "firmware boot failed");
p54spi_power_off(priv);
ret = -1;
diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c
index 9cca69fe04d7..b47a832b9ae2 100644
--- a/drivers/net/wireless/marvell/libertas_tf/main.c
+++ b/drivers/net/wireless/marvell/libertas_tf/main.c
@@ -267,7 +267,7 @@ static int lbtf_op_start(struct ieee80211_hw *hw)
return 0;
}
-static void lbtf_op_stop(struct ieee80211_hw *hw)
+static void lbtf_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct lbtf_private *priv = hw->priv;
unsigned long flags;
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index b909a7665e9c..155eb0fab12a 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -926,6 +926,8 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
return -EOPNOTSUPP;
}
+ priv->bss_num = mwifiex_get_unused_bss_num(adapter, priv->bss_type);
+
spin_lock_irqsave(&adapter->main_proc_lock, flags);
adapter->main_locked = false;
spin_unlock_irqrestore(&adapter->main_proc_lock, flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 175882485a19..c5164ae41b54 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -1287,6 +1287,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter,
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
+ if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED)
+ continue;
+
if ((adapter->priv[i]->bss_num == bss_num) &&
(adapter->priv[i]->bss_type == bss_type))
break;
diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
index 75f53c2f1e1f..bda9b2b8a1f3 100644
--- a/drivers/net/wireless/marvell/mwifiex/sdio.c
+++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
@@ -979,7 +979,6 @@ static struct sdio_driver mwifiex_sdio = {
.probe = mwifiex_sdio_probe,
.remove = mwifiex_sdio_remove,
.drv = {
- .owner = THIS_MODULE,
.coredump = mwifiex_sdio_coredump,
.pm = &mwifiex_sdio_pm_ops,
}
@@ -3183,7 +3182,7 @@ static struct mwifiex_if_ops sdio_ops = {
.up_dev = mwifiex_sdio_up_dev,
};
-module_driver(mwifiex_sdio, sdio_register_driver, sdio_unregister_driver);
+module_sdio_driver(mwifiex_sdio);
MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
@@ -3192,6 +3191,7 @@ MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE(SD8786_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8801_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8897_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8887_DEFAULT_FW_NAME);
MODULE_FIRMWARE(SD8977_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index ce8fea76dbb2..b130e057370f 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -587,12 +587,14 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv, char *fw_image,
}
struct mwl8k_cmd_pkt {
- __le16 code;
- __le16 length;
- __u8 seq_num;
- __u8 macid;
- __le16 result;
- char payload[];
+ __struct_group(mwl8k_cmd_pkt_hdr, hdr, __packed,
+ __le16 code;
+ __le16 length;
+ __u8 seq_num;
+ __u8 macid;
+ __le16 result;
+ );
+ char payload[];
} __packed;
/*
@@ -2201,7 +2203,7 @@ static void mwl8k_enable_bsses(struct ieee80211_hw *hw, bool enable,
/* Timeout firmware commands after 10s */
#define MWL8K_CMD_TIMEOUT_MS 10000
-static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
+static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd)
{
DECLARE_COMPLETION_ONSTACK(cmd_wait);
struct mwl8k_priv *priv = hw->priv;
@@ -2209,7 +2211,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
dma_addr_t dma_addr;
unsigned int dma_size;
int rc;
- unsigned long timeout = 0;
+ unsigned long time_left = 0;
u8 buf[32];
u32 bitmap = 0;
@@ -2256,8 +2258,8 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
iowrite32(MWL8K_H2A_INT_DUMMY,
regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS);
- timeout = wait_for_completion_timeout(&cmd_wait,
- msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS));
+ time_left = wait_for_completion_timeout(&cmd_wait,
+ msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS));
priv->hostcmd_wait = NULL;
@@ -2265,7 +2267,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
dma_unmap_single(&priv->pdev->dev, dma_addr, dma_size,
DMA_BIDIRECTIONAL);
- if (!timeout) {
+ if (!time_left) {
wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n",
mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
MWL8K_CMD_TIMEOUT_MS);
@@ -2273,7 +2275,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
} else {
int ms;
- ms = MWL8K_CMD_TIMEOUT_MS - jiffies_to_msecs(timeout);
+ ms = MWL8K_CMD_TIMEOUT_MS - jiffies_to_msecs(time_left);
rc = cmd->result ? -EINVAL : 0;
if (rc)
@@ -2298,7 +2300,7 @@ exit:
static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- struct mwl8k_cmd_pkt *cmd)
+ struct mwl8k_cmd_pkt_hdr *cmd)
{
if (vif != NULL)
cmd->macid = MWL8K_VIF(vif)->macid;
@@ -2350,7 +2352,7 @@ static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
* CMD_GET_HW_SPEC (STA version).
*/
struct mwl8k_cmd_get_hw_spec_sta {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__u8 hw_rev;
__u8 host_interface;
__le16 num_mcaddrs;
@@ -2499,7 +2501,7 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
* CMD_GET_HW_SPEC (AP version).
*/
struct mwl8k_cmd_get_hw_spec_ap {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__u8 hw_rev;
__u8 host_interface;
__le16 num_wcb;
@@ -2593,7 +2595,7 @@ done:
* CMD_SET_HW_SPEC.
*/
struct mwl8k_cmd_set_hw_spec {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__u8 hw_rev;
__u8 host_interface;
__le16 num_mcaddrs;
@@ -2670,7 +2672,7 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
* CMD_MAC_MULTICAST_ADR.
*/
struct mwl8k_cmd_mac_multicast_adr {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 numaddr;
__u8 addr[][ETH_ALEN];
@@ -2681,7 +2683,7 @@ struct mwl8k_cmd_mac_multicast_adr {
#define MWL8K_ENABLE_RX_ALL_MULTICAST 0x0004
#define MWL8K_ENABLE_RX_BROADCAST 0x0008
-static struct mwl8k_cmd_pkt *
+static struct mwl8k_cmd_pkt_hdr *
__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
struct netdev_hw_addr_list *mc_list)
{
@@ -2718,7 +2720,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
cmd->numaddr = cpu_to_le16(mc_count);
netdev_hw_addr_list_for_each(ha, mc_list) {
- memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
+ memcpy(cmd->addr[i++], ha->addr, ETH_ALEN);
}
}
@@ -2729,7 +2731,7 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
* CMD_GET_STAT.
*/
struct mwl8k_cmd_get_stat {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 stats[64];
} __packed;
@@ -2771,7 +2773,7 @@ static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw,
* CMD_RADIO_CONTROL.
*/
struct mwl8k_cmd_radio_control {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 control;
__le16 radio_on;
@@ -2832,7 +2834,7 @@ mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
#define MWL8K_RF_TX_POWER_LEVEL_TOTAL 8
struct mwl8k_cmd_rf_tx_power {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 support_level;
__le16 current_level;
@@ -2866,7 +2868,7 @@ static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
#define MWL8K_TX_POWER_LEVEL_TOTAL 12
struct mwl8k_cmd_tx_power {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 band;
__le16 channel;
@@ -2925,7 +2927,7 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
* CMD_RF_ANTENNA.
*/
struct mwl8k_cmd_rf_antenna {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 antenna;
__le16 mode;
} __packed;
@@ -2958,7 +2960,7 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
* CMD_SET_BEACON.
*/
struct mwl8k_cmd_set_beacon {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 beacon_len;
__u8 beacon[];
};
@@ -2988,7 +2990,7 @@ static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
* CMD_SET_PRE_SCAN.
*/
struct mwl8k_cmd_set_pre_scan {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
} __packed;
static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
@@ -3013,7 +3015,7 @@ static int mwl8k_cmd_set_pre_scan(struct ieee80211_hw *hw)
* CMD_BBP_REG_ACCESS.
*/
struct mwl8k_cmd_bbp_reg_access {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 offset;
u8 value;
@@ -3054,7 +3056,7 @@ mwl8k_cmd_bbp_reg_access(struct ieee80211_hw *hw,
* CMD_SET_POST_SCAN.
*/
struct mwl8k_cmd_set_post_scan {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 isibss;
__u8 bssid[ETH_ALEN];
} __packed;
@@ -3142,7 +3144,7 @@ static void mwl8k_update_survey(struct mwl8k_priv *priv,
* CMD_SET_RF_CHANNEL.
*/
struct mwl8k_cmd_set_rf_channel {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__u8 current_channel;
__le32 channel_flags;
@@ -3211,7 +3213,7 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
struct mwl8k_cmd_update_set_aid {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 aid;
/* AP's MAC address (BSSID) */
@@ -3283,7 +3285,7 @@ mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
* CMD_SET_RATE.
*/
struct mwl8k_cmd_set_rate {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__u8 legacy_rates[14];
/* Bitmap for supported MCS codes. */
@@ -3319,7 +3321,7 @@ mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
#define MWL8K_FJ_BEACON_MAXLEN 128
struct mwl8k_cmd_finalize_join {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 sleep_interval; /* Number of beacon periods to sleep */
__u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
} __packed;
@@ -3358,7 +3360,7 @@ static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
* CMD_SET_RTS_THRESHOLD.
*/
struct mwl8k_cmd_set_rts_threshold {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 threshold;
} __packed;
@@ -3388,7 +3390,7 @@ mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
* CMD_SET_SLOT.
*/
struct mwl8k_cmd_set_slot {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__u8 short_slot;
} __packed;
@@ -3417,7 +3419,7 @@ static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
* CMD_SET_EDCA_PARAMS.
*/
struct mwl8k_cmd_set_edca_params {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
/* See MWL8K_SET_EDCA_XXX below */
__le16 action;
@@ -3502,7 +3504,7 @@ mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
* CMD_SET_WMM_MODE.
*/
struct mwl8k_cmd_set_wmm_mode {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
} __packed;
@@ -3533,7 +3535,7 @@ static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
* CMD_MIMO_CONFIG.
*/
struct mwl8k_cmd_mimo_config {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
__u8 rx_antenna_map;
__u8 tx_antenna_map;
@@ -3564,7 +3566,7 @@ static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
* CMD_USE_FIXED_RATE (STA version).
*/
struct mwl8k_cmd_use_fixed_rate_sta {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
__le32 allow_rate_drop;
__le32 num_rates;
@@ -3606,7 +3608,7 @@ static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw)
* CMD_USE_FIXED_RATE (AP version).
*/
struct mwl8k_cmd_use_fixed_rate_ap {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
__le32 allow_rate_drop;
__le32 num_rates;
@@ -3647,7 +3649,7 @@ mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
* CMD_ENABLE_SNIFFER.
*/
struct mwl8k_cmd_enable_sniffer {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
} __packed;
@@ -3671,7 +3673,7 @@ static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
}
struct mwl8k_cmd_update_mac_addr {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
union {
struct {
__le16 mac_type;
@@ -3756,7 +3758,7 @@ static inline int mwl8k_cmd_del_mac_addr(struct ieee80211_hw *hw,
* CMD_SET_RATEADAPT_MODE.
*/
struct mwl8k_cmd_set_rate_adapt_mode {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 action;
__le16 mode;
} __packed;
@@ -3785,7 +3787,7 @@ static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
* CMD_GET_WATCHDOG_BITMAP.
*/
struct mwl8k_cmd_get_watchdog_bitmap {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
u8 bitmap;
} __packed;
@@ -3865,7 +3867,7 @@ done:
* CMD_BSS_START.
*/
struct mwl8k_cmd_bss_start {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 enable;
} __packed;
@@ -3960,7 +3962,7 @@ struct mwl8k_destroy_ba_stream {
} __packed;
struct mwl8k_cmd_bastream {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
union {
struct mwl8k_create_ba_stream create_params;
@@ -4070,7 +4072,7 @@ static void mwl8k_destroy_ba(struct ieee80211_hw *hw,
* CMD_SET_NEW_STN.
*/
struct mwl8k_cmd_set_new_stn {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le16 aid;
__u8 mac_addr[6];
__le16 stn_id;
@@ -4206,7 +4208,7 @@ static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
#define MIC_KEY_LENGTH 8
struct mwl8k_cmd_update_encryption {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
__le32 reserved;
@@ -4216,7 +4218,7 @@ struct mwl8k_cmd_update_encryption {
} __packed;
struct mwl8k_cmd_set_key {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
__le32 action;
__le32 reserved;
@@ -4504,7 +4506,7 @@ struct peer_capability_info {
} __packed;
struct mwl8k_cmd_update_stadb {
- struct mwl8k_cmd_pkt header;
+ struct mwl8k_cmd_pkt_hdr header;
/* See STADB_ACTION_TYPE */
__le32 action;
@@ -4766,7 +4768,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
return rc;
}
-static void mwl8k_stop(struct ieee80211_hw *hw)
+static void mwl8k_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mwl8k_priv *priv = hw->priv;
int i;
@@ -5174,7 +5176,7 @@ mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
- struct mwl8k_cmd_pkt *cmd;
+ struct mwl8k_cmd_pkt_hdr *cmd;
/*
* Synthesize and return a command packet that programs the
@@ -5234,7 +5236,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_pkt *cmd = (void *)(unsigned long)multicast;
+ struct mwl8k_cmd_pkt_hdr *cmd = (void *)(unsigned long)multicast;
/*
* AP firmware doesn't allow fine-grained control over
@@ -6021,7 +6023,7 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *vif, *tmp_vif;
- mwl8k_stop(hw);
+ mwl8k_stop(hw, false);
mwl8k_rxq_deinit(hw, 0);
/*
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index ae83be572b94..b6a2746c187d 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -33,8 +33,8 @@ mt76_napi_threaded_set(void *data, u64 val)
if (!mt76_is_mmio(dev))
return -EOPNOTSUPP;
- if (dev->napi_dev.threaded != val)
- return dev_set_threaded(&dev->napi_dev, val);
+ if (dev->napi_dev->threaded != val)
+ return dev_set_threaded(dev->napi_dev, val);
return 0;
}
@@ -44,7 +44,7 @@ mt76_napi_threaded_get(void *data, u64 *val)
{
struct mt76_dev *dev = data;
- *val = dev->napi_dev.threaded;
+ *val = dev->napi_dev->threaded;
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 72a7bd5a8576..5f46d6daeaa7 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -532,7 +532,7 @@ error:
}
static int
-mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+mt76_dma_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
enum mt76_txq_id qid, struct sk_buff *skb,
struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{
@@ -542,6 +542,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
struct mt76_tx_info tx_info = {
.skb = skb,
};
+ struct mt76_dev *dev = phy->dev;
struct ieee80211_hw *hw;
int len, n = 0, ret = -ENOMEM;
struct mt76_txwi_cache *t;
@@ -549,7 +550,7 @@ mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
dma_addr_t addr;
u8 *txwi;
- if (test_bit(MT76_RESET, &dev->phy.state))
+ if (test_bit(MT76_RESET, &phy->state))
goto free_skb;
t = mt76_get_txwi(dev);
@@ -915,7 +916,7 @@ int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
struct mt76_dev *dev;
int qid, done = 0, cur;
- dev = container_of(napi->dev, struct mt76_dev, napi_dev);
+ dev = mt76_priv(napi->dev);
qid = napi - dev->napi;
rcu_read_lock();
@@ -939,18 +940,35 @@ static int
mt76_dma_init(struct mt76_dev *dev,
int (*poll)(struct napi_struct *napi, int budget))
{
+ struct mt76_dev **priv;
int i;
- init_dummy_netdev(&dev->napi_dev);
- init_dummy_netdev(&dev->tx_napi_dev);
- snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
+ dev->napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
+ if (!dev->napi_dev)
+ return -ENOMEM;
+
+ /* napi_dev private data points to mt76_dev parent, so, mt76_dev
+ * can be retrieved given napi_dev
+ */
+ priv = netdev_priv(dev->napi_dev);
+ *priv = dev;
+
+ dev->tx_napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *));
+ if (!dev->tx_napi_dev) {
+ free_netdev(dev->napi_dev);
+ return -ENOMEM;
+ }
+ priv = netdev_priv(dev->tx_napi_dev);
+ *priv = dev;
+
+ snprintf(dev->napi_dev->name, sizeof(dev->napi_dev->name), "%s",
wiphy_name(dev->hw->wiphy));
- dev->napi_dev.threaded = 1;
+ dev->napi_dev->threaded = 1;
init_completion(&dev->mmio.wed_reset);
init_completion(&dev->mmio.wed_reset_complete);
mt76_for_each_q_rx(dev, i) {
- netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
+ netif_napi_add(dev->napi_dev, &dev->napi[i], poll);
mt76_dma_rx_fill(dev, &dev->q_rx[i], false);
napi_enable(&dev->napi[i]);
}
@@ -1018,5 +1036,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev)
mt76_free_pending_txwi(dev);
mt76_free_pending_rxwi(dev);
+ free_netdev(dev->napi_dev);
+ free_netdev(dev->tx_napi_dev);
}
EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h
index 1de5a2b20f74..e3ddc7a83757 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.h
+++ b/drivers/net/wireless/mediatek/mt76/dma.h
@@ -116,4 +116,13 @@ mt76_dma_should_drop_buf(bool *drop, u32 ctrl, u32 buf1, u32 info)
}
}
+static inline void *mt76_priv(struct net_device *dev)
+{
+ struct mt76_dev **priv;
+
+ priv = netdev_priv(dev);
+
+ return *priv;
+}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index 068206e48aec..bb291fe314fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -465,6 +465,7 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
+ ieee80211_hw_set(hw, SPECTRUM_MGMT);
if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD) &&
hw->max_tx_fragments > 1) {
@@ -1124,6 +1125,11 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
memcpy(status->chain_signal, mstat.chain_signal,
sizeof(mstat.chain_signal));
+ if (mstat.wcid) {
+ status->link_valid = mstat.wcid->link_valid;
+ status->link_id = mstat.wcid->link_id;
+ }
+
*sta = wcid_to_sta(mstat.wcid);
*hw = mt76_phy_hw(dev, mstat.phy_idx);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index a91f6ddacbd9..4a58a78d5ed2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -256,7 +256,7 @@ struct mt76_queue_ops {
int idx, int n_desc, int bufsize,
u32 ring_base);
- int (*tx_queue_skb)(struct mt76_dev *dev, struct mt76_queue *q,
+ int (*tx_queue_skb)(struct mt76_phy *phy, struct mt76_queue *q,
enum mt76_txq_id qid, struct sk_buff *skb,
struct mt76_wcid *wcid, struct ieee80211_sta *sta);
@@ -349,6 +349,8 @@ struct mt76_wcid {
u8 sta:1;
u8 amsdu:1;
u8 phy_idx:2;
+ u8 link_id:4;
+ bool link_valid;
u8 rx_check_pn;
u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6];
@@ -366,6 +368,8 @@ struct mt76_wcid {
struct mt76_sta_stats stats;
struct list_head poll_list;
+
+ struct mt76_wcid *def_wcid;
};
struct mt76_txq {
@@ -831,8 +835,8 @@ struct mt76_dev {
struct mt76_mcu mcu;
- struct net_device napi_dev;
- struct net_device tx_napi_dev;
+ struct net_device *napi_dev;
+ struct net_device *tx_napi_dev;
spinlock_t rx_lock;
struct napi_struct napi[__MT_RXQ_MAX];
struct sk_buff_head rx_skb[__MT_RXQ_MAX];
@@ -1081,6 +1085,7 @@ bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
void mt76_pci_disable_aspm(struct pci_dev *pdev);
+bool mt76_pci_aspm_supported(struct pci_dev *pdev);
static inline u16 mt76_chip(struct mt76_dev *dev)
{
@@ -1127,7 +1132,7 @@ static inline int mt76_wed_dma_setup(struct mt76_dev *dev, struct mt76_queue *q,
#define mt76_init_queues(dev, ...) (dev)->mt76.queue_ops->init(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_alloc(dev, ...) (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
#define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
-#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
+#define mt76_tx_queue_skb(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mphy), __VA_ARGS__)
#define mt76_queue_rx_reset(dev, ...) (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
#define mt76_queue_rx_cleanup(dev, ...) (dev)->mt76.queue_ops->rx_cleanup(&((dev)->mt76), __VA_ARGS__)
@@ -1256,6 +1261,9 @@ wcid_to_sta(struct mt76_wcid *wcid)
if (!wcid || !wcid->sta)
return NULL;
+ if (wcid->def_wcid)
+ ptr = wcid->def_wcid;
+
return container_of(ptr, struct ieee80211_sta, drv_priv);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
index 7a2f5d38562b..ea017f22fff2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c
@@ -4,6 +4,13 @@
#include "mac.h"
#include "../dma.h"
+static const u8 wmm_queue_map[] = {
+ [IEEE80211_AC_BK] = 0,
+ [IEEE80211_AC_BE] = 1,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_VO] = 3,
+};
+
static void
mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
{
@@ -22,10 +29,10 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
struct ieee80211_sta *sta;
struct mt7603_sta *msta;
struct mt76_wcid *wcid;
+ u8 tid = 0, hwq = 0;
void *priv;
int idx;
u32 val;
- u8 tid = 0;
if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
goto free;
@@ -42,19 +49,36 @@ mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
goto free;
priv = msta = container_of(wcid, struct mt7603_sta, wcid);
- val = le32_to_cpu(txd[0]);
- val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
- val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
- txd[0] = cpu_to_le32(val);
sta = container_of(priv, struct ieee80211_sta, drv_priv);
hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
- if (ieee80211_is_data_qos(hdr->frame_control))
+
+ hwq = wmm_queue_map[IEEE80211_AC_BE];
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
tid = *ieee80211_get_qos_ctl(hdr) &
- IEEE80211_QOS_CTL_TAG1D_MASK;
- skb_set_queue_mapping(skb, tid_to_ac[tid]);
+ IEEE80211_QOS_CTL_TAG1D_MASK;
+ u8 qid = tid_to_ac[tid];
+ hwq = wmm_queue_map[qid];
+ skb_set_queue_mapping(skb, qid);
+ } else if (ieee80211_is_data(hdr->frame_control)) {
+ skb_set_queue_mapping(skb, IEEE80211_AC_BE);
+ hwq = wmm_queue_map[IEEE80211_AC_BE];
+ } else {
+ skb_pull(skb, MT_TXD_SIZE);
+ if (!ieee80211_is_bufferable_mmpdu(skb))
+ goto free;
+ skb_push(skb, MT_TXD_SIZE);
+ skb_set_queue_mapping(skb, MT_TXQ_PSD);
+ hwq = MT_TX_HW_QUEUE_MGMT;
+ }
+
ieee80211_sta_set_buffered(sta, tid, true);
+ val = le32_to_cpu(txd[0]);
+ val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
+ val |= FIELD_PREP(MT_TXD0_Q_IDX, hwq);
+ txd[0] = cpu_to_le32(val);
+
spin_lock_bh(&dev->ps_lock);
__skb_queue_tail(&msta->psq, skb);
if (skb_queue_len(&msta->psq) >= 64) {
@@ -151,12 +175,6 @@ static int mt7603_poll_tx(struct napi_struct *napi, int budget)
int mt7603_dma_init(struct mt7603_dev *dev)
{
- static const u8 wmm_queue_map[] = {
- [IEEE80211_AC_BK] = 0,
- [IEEE80211_AC_BE] = 1,
- [IEEE80211_AC_VI] = 2,
- [IEEE80211_AC_VO] = 3,
- };
int ret;
int i;
@@ -224,7 +242,7 @@ int mt7603_dma_init(struct mt7603_dev *dev)
if (ret)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7603_poll_tx);
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index cf21d06257e5..dc8a77f0a1cc 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1393,6 +1393,7 @@ void mt7603_pse_client_reset(struct mt7603_dev *dev)
MT_CLIENT_RESET_TX_R_E_2_S);
/* Start PSE client TX abort */
+ mt76_set(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_FORCE_TX_EOF);
mt76_set(dev, addr, MT_CLIENT_RESET_TX_R_E_1);
mt76_poll_msec(dev, addr, MT_CLIENT_RESET_TX_R_E_1_S,
MT_CLIENT_RESET_TX_R_E_1_S, 500);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 9b49267b1eab..f35fa643c0da 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -23,7 +23,7 @@ mt7603_start(struct ieee80211_hw *hw)
}
static void
-mt7603_stop(struct ieee80211_hw *hw)
+mt7603_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7603_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
index e7135b2f1742..bcf7864312d7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c
@@ -67,7 +67,7 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget)
{
struct mt7615_dev *dev;
- dev = container_of(napi, struct mt7615_dev, mt76.tx_napi);
+ dev = mt76_priv(napi->dev);
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
napi_complete(napi);
queue_work(dev->mt76.wq, &dev->pm.wake_work);
@@ -89,7 +89,7 @@ static int mt7615_poll_rx(struct napi_struct *napi, int budget)
struct mt7615_dev *dev;
int done;
- dev = container_of(napi->dev, struct mt7615_dev, mt76.napi_dev);
+ dev = mt76_priv(napi->dev);
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
napi_complete(napi);
@@ -282,7 +282,7 @@ int mt7615_dma_init(struct mt7615_dev *dev)
if (ret < 0)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7615_poll_tx);
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index 0971c164b57e..50e262c1622f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -91,7 +91,7 @@ out:
return ret;
}
-static void mt7615_stop(struct ieee80211_hw *hw)
+static void mt7615_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7615_dev *dev = mt7615_hw_dev(hw);
struct mt7615_phy *phy = mt7615_hw_phy(hw);
@@ -1326,6 +1326,10 @@ static void mt7615_set_rekey_data(struct ieee80211_hw *hw,
#endif /* CONFIG_PM */
const struct ieee80211_ops mt7615_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7615_tx,
.start = mt7615_start,
.stop = mt7615_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index c807bd8d928d..d50d967828be 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -842,6 +842,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
struct sk_buff *skb, *sskb, *wskb = NULL;
+ struct ieee80211_link_sta *link_sta;
struct mt7615_dev *dev = phy->dev;
struct wtbl_req_hdr *wtbl_hdr;
struct mt7615_sta *msta;
@@ -849,6 +850,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
int cmd, err;
msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta;
+ link_sta = sta ? &sta->deflink : NULL;
sskb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
&msta->wcid);
@@ -861,8 +863,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif,
else
mvif->sta_added = true;
}
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, sta, enable,
- new_entry);
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, link_sta,
+ enable, new_entry);
if (enable && sta)
mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0,
MT76_STA_INFO_STATE_ASSOC);
@@ -1109,8 +1111,8 @@ mt7615_mcu_uni_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif,
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- return mt76_connac_mcu_uni_add_dev(phy->mt76, vif, &mvif->sta.wcid,
- enable);
+ return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf,
+ &mvif->sta.wcid, enable);
}
static int
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
index df737e1ff27b..9335ca0776fe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c
@@ -79,7 +79,7 @@ static void mt7663u_copy(struct mt76_dev *dev, u32 offset,
mutex_unlock(&usb->usb_ctrl_mtx);
}
-static void mt7663u_stop(struct ieee80211_hw *hw)
+static void mt7663u_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7615_phy *phy = mt7615_hw_phy(hw);
struct mt7615_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index 98d64d3d2993..445d0f0ab779 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -177,6 +177,11 @@ static inline bool is_mt7925(struct mt76_dev *dev)
return mt76_chip(dev) == 0x7925;
}
+static inline bool is_mt7920(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7920;
+}
+
static inline bool is_mt7922(struct mt76_dev *dev)
{
return mt76_chip(dev) == 0x7922;
@@ -184,7 +189,7 @@ static inline bool is_mt7922(struct mt76_dev *dev)
static inline bool is_mt7921(struct mt76_dev *dev)
{
- return mt76_chip(dev) == 0x7961 || is_mt7922(dev);
+ return mt76_chip(dev) == 0x7961 || is_mt7922(dev) || is_mt7920(dev);
}
static inline bool is_mt7663(struct mt76_dev *dev)
@@ -259,6 +264,7 @@ static inline bool is_mt76_fw_txp(struct mt76_dev *dev)
{
switch (mt76_chip(dev)) {
case 0x7961:
+ case 0x7920:
case 0x7922:
case 0x7925:
case 0x7663:
@@ -445,4 +451,6 @@ void mt76_connac2_tx_token_put(struct mt76_dev *dev);
/* connac3 */
void mt76_connac3_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv,
u8 mode);
+void mt76_connac3_mac_decode_eht_radiotap(struct sk_buff *skb, __le32 *rxv,
+ u8 mode);
#endif /* __MT76_CONNAC_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
index 73e9f283d0ae..92ad1ecf6c9d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.c
@@ -6,8 +6,11 @@
#include "dma.h"
#define HE_BITS(f) cpu_to_le16(IEEE80211_RADIOTAP_HE_##f)
+#define EHT_BITS(f) cpu_to_le32(IEEE80211_RADIOTAP_EHT_##f)
#define HE_PREP(f, m, v) le16_encode_bits(le32_get_bits(v, MT_CRXV_HE_##m),\
IEEE80211_RADIOTAP_HE_##f)
+#define EHT_PREP(f, m, v) le32_encode_bits(le32_get_bits(v, MT_CRXV_EHT_##m),\
+ IEEE80211_RADIOTAP_EHT_##f)
static void
mt76_connac3_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
@@ -180,3 +183,85 @@ void mt76_connac3_mac_decode_he_radiotap(struct sk_buff *skb, __le32 *rxv,
}
}
EXPORT_SYMBOL_GPL(mt76_connac3_mac_decode_he_radiotap);
+
+static void *
+mt76_connac3_mac_radiotap_push_tlv(struct sk_buff *skb, u16 type, u16 len)
+{
+ struct ieee80211_radiotap_tlv *tlv;
+
+ tlv = skb_push(skb, sizeof(*tlv) + len);
+ tlv->type = cpu_to_le16(type);
+ tlv->len = cpu_to_le16(len);
+ memset(tlv->data, 0, len);
+
+ return tlv->data;
+}
+
+void mt76_connac3_mac_decode_eht_radiotap(struct sk_buff *skb, __le32 *rxv,
+ u8 mode)
+{
+ struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
+ struct ieee80211_radiotap_eht_usig *usig;
+ struct ieee80211_radiotap_eht *eht;
+ u32 ltf_size = le32_get_bits(rxv[4], MT_CRXV_HE_LTF_SIZE) + 1;
+ u8 bw = FIELD_GET(MT_PRXV_FRAME_MODE, le32_to_cpu(rxv[2]));
+
+ if (WARN_ONCE(skb_mac_header(skb) != skb->data,
+ "Should push tlv at the top of mac hdr"))
+ return;
+
+ eht = mt76_connac3_mac_radiotap_push_tlv(skb, IEEE80211_RADIOTAP_EHT,
+ sizeof(*eht) + sizeof(u32));
+ usig = mt76_connac3_mac_radiotap_push_tlv(skb, IEEE80211_RADIOTAP_EHT_USIG,
+ sizeof(*usig));
+
+ status->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+
+ eht->known |= EHT_BITS(KNOWN_SPATIAL_REUSE) |
+ EHT_BITS(KNOWN_GI) |
+ EHT_BITS(KNOWN_EHT_LTF) |
+ EHT_BITS(KNOWN_LDPC_EXTRA_SYM_OM) |
+ EHT_BITS(KNOWN_PE_DISAMBIGUITY_OM) |
+ EHT_BITS(KNOWN_NSS_S);
+
+ eht->data[0] |=
+ EHT_PREP(DATA0_SPATIAL_REUSE, SR_MASK, rxv[13]) |
+ cpu_to_le32(FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_GI, status->eht.gi) |
+ FIELD_PREP(IEEE80211_RADIOTAP_EHT_DATA0_LTF, ltf_size)) |
+ EHT_PREP(DATA0_PE_DISAMBIGUITY_OM, PE_DISAMBIG, rxv[5]) |
+ EHT_PREP(DATA0_LDPC_EXTRA_SYM_OM, LDPC_EXT_SYM, rxv[4]);
+
+ eht->data[7] |= le32_encode_bits(status->nss, IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+
+ eht->user_info[0] |=
+ EHT_BITS(USER_INFO_MCS_KNOWN) |
+ EHT_BITS(USER_INFO_CODING_KNOWN) |
+ EHT_BITS(USER_INFO_NSS_KNOWN_O) |
+ EHT_BITS(USER_INFO_BEAMFORMING_KNOWN_O) |
+ EHT_BITS(USER_INFO_DATA_FOR_USER) |
+ le32_encode_bits(status->rate_idx, IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
+ le32_encode_bits(status->nss, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O);
+
+ if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
+ eht->user_info[0] |= EHT_BITS(USER_INFO_BEAMFORMING_O);
+
+ if (le32_to_cpu(rxv[0]) & MT_PRXV_HT_AD_CODE)
+ eht->user_info[0] |= EHT_BITS(USER_INFO_CODING);
+
+ if (mode == MT_PHY_TYPE_EHT_MU)
+ eht->user_info[0] |= EHT_BITS(USER_INFO_STA_ID_KNOWN) |
+ EHT_PREP(USER_INFO_STA_ID, MU_AID, rxv[8]);
+
+ usig->common |=
+ EHT_BITS(USIG_COMMON_PHY_VER_KNOWN) |
+ EHT_BITS(USIG_COMMON_BW_KNOWN) |
+ EHT_BITS(USIG_COMMON_UL_DL_KNOWN) |
+ EHT_BITS(USIG_COMMON_BSS_COLOR_KNOWN) |
+ EHT_BITS(USIG_COMMON_TXOP_KNOWN) |
+ le32_encode_bits(0, IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) |
+ le32_encode_bits(bw, IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) |
+ EHT_PREP(USIG_COMMON_UL_DL, UPLINK, rxv[5]) |
+ EHT_PREP(USIG_COMMON_BSS_COLOR, BSS_COLOR, rxv[9]) |
+ EHT_PREP(USIG_COMMON_TXOP, TXOP_DUR, rxv[9]);
+}
+EXPORT_SYMBOL_GPL(mt76_connac3_mac_decode_eht_radiotap);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
index 83dcd964bfd0..353e66069840 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac3_mac.h
@@ -142,6 +142,28 @@ enum {
#define MT_CRXV_HE_RU3_L GENMASK(31, 27)
#define MT_CRXV_HE_RU3_H GENMASK(3, 0)
+#define MT_CRXV_EHT_NUM_USER GENMASK(26, 20)
+#define MT_CRXV_EHT_LTF_SIZE GENMASK(28, 27)
+#define MT_CRXV_EHT_LDPC_EXT_SYM BIT(30)
+#define MT_CRXV_EHT_PE_DISAMBIG BIT(1)
+#define MT_CRXV_EHT_UPLINK BIT(2)
+#define MT_CRXV_EHT_MU_AID GENMASK(27, 17)
+#define MT_CRXV_EHT_BEAM_CHNG BIT(29)
+#define MT_CRXV_EHT_DOPPLER BIT(0)
+#define MT_CRXV_EHT_BSS_COLOR GENMASK(15, 10)
+#define MT_CRXV_EHT_TXOP_DUR GENMASK(23, 17)
+#define MT_CRXV_EHT_SR_MASK GENMASK(11, 8)
+#define MT_CRXV_EHT_SR1_MASK GENMASK(15, 12)
+#define MT_CRXV_EHT_SR2_MASK GENMASK(19, 16)
+#define MT_CRXV_EHT_SR3_MASK GENMASK(23, 20)
+#define MT_CRXV_EHT_RU0 GENMASK(8, 0)
+#define MT_CRXV_EHT_RU1 GENMASK(17, 9)
+#define MT_CRXV_EHT_RU2 GENMASK(26, 18)
+#define MT_CRXV_EHT_RU3_L GENMASK(31, 27)
+#define MT_CRXV_EHT_RU3_H GENMASK(3, 0)
+#define MT_CRXV_EHT_SIG_MCS GENMASK(19, 18)
+#define MT_CRXV_EHT_LTF_SYM GENMASK(22, 20)
+
enum tx_header_format {
MT_HDR_FORMAT_802_3,
MT_HDR_FORMAT_CMD,
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index af0c2b2aacb0..4dce03ddbfa4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -257,7 +257,7 @@ mt76_connac_mcu_add_nested_tlv(struct sk_buff *skb, int tag, int len,
};
u16 ntlv;
- ptlv = skb_put(skb, len);
+ ptlv = skb_put_zero(skb, len);
memcpy(ptlv, &tlv, sizeof(tlv));
ntlv = le16_to_cpu(ntlv_hdr->tlv_num);
@@ -283,7 +283,7 @@ __mt76_connac_mcu_alloc_sta_req(struct mt76_dev *dev, struct mt76_vif *mvif,
};
struct sk_buff *skb;
- if (is_mt799x(dev) && !wcid->sta)
+ if (wcid && !wcid->sta)
hdr.muar_idx = 0xe;
mt76_connac_mcu_get_wlan_idx(dev, wcid, &hdr.wlan_idx_lo,
@@ -370,7 +370,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ieee80211_link_sta *link_sta,
bool enable, bool newly)
{
struct sta_rec_basic *basic;
@@ -390,9 +390,16 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
basic->conn_state = CONN_STATE_DISCONNECT;
}
- if (!sta) {
+ if (!link_sta) {
basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC);
- eth_broadcast_addr(basic->peer_addr);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !is_zero_ether_addr(vif->bss_conf.bssid)) {
+ memcpy(basic->peer_addr, vif->bss_conf.bssid, ETH_ALEN);
+ basic->aid = cpu_to_le16(vif->cfg.aid);
+ } else {
+ eth_broadcast_addr(basic->peer_addr);
+ }
return;
}
@@ -404,7 +411,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
else
conn_type = CONNECTION_INFRA_STA;
basic->conn_type = cpu_to_le32(conn_type);
- basic->aid = cpu_to_le16(sta->aid);
+ basic->aid = cpu_to_le16(link_sta->sta->aid);
break;
case NL80211_IFTYPE_STATION:
if (vif->p2p && !is_mt7921(dev))
@@ -416,15 +423,15 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
break;
case NL80211_IFTYPE_ADHOC:
basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC);
- basic->aid = cpu_to_le16(sta->aid);
+ basic->aid = cpu_to_le16(link_sta->sta->aid);
break;
default:
WARN_ON(1);
break;
}
- memcpy(basic->peer_addr, sta->addr, ETH_ALEN);
- basic->qos = sta->wme;
+ memcpy(basic->peer_addr, link_sta->addr, ETH_ALEN);
+ basic->qos = link_sta->sta->wme;
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_basic_tlv);
@@ -786,7 +793,8 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_he_tlv_v2);
u8
mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
- enum nl80211_band band, struct ieee80211_sta *sta)
+ enum nl80211_band band,
+ struct ieee80211_link_sta *link_sta)
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
@@ -794,11 +802,11 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
const struct ieee80211_sta_eht_cap *eht_cap;
u8 mode = 0;
- if (sta) {
- ht_cap = &sta->deflink.ht_cap;
- vht_cap = &sta->deflink.vht_cap;
- he_cap = &sta->deflink.he_cap;
- eht_cap = &sta->deflink.eht_cap;
+ if (link_sta) {
+ ht_cap = &link_sta->ht_cap;
+ vht_cap = &link_sta->vht_cap;
+ he_cap = &link_sta->he_cap;
+ eht_cap = &link_sta->eht_cap;
} else {
struct ieee80211_supported_band *sband;
@@ -904,7 +912,8 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
phy = (struct sta_rec_phy *)tlv;
- phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta);
+ phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band,
+ &sta->deflink);
phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
phy->rcpi = rcpi;
phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR,
@@ -1037,6 +1046,7 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
struct mt76_sta_cmd_info *info)
{
struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv;
+ struct ieee80211_link_sta *link_sta;
struct mt76_dev *dev = phy->dev;
struct wtbl_req_hdr *wtbl_hdr;
struct tlv *sta_wtbl;
@@ -1046,9 +1056,11 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy,
if (IS_ERR(skb))
return PTR_ERR(skb);
+ link_sta = info->sta ? &info->sta->deflink : NULL;
if (info->sta || !info->offload_fw)
- mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, info->sta,
- info->enable, info->newly);
+ mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
+ link_sta, info->enable,
+ info->newly);
if (info->sta && info->enable)
mt76_connac_mcu_sta_tlv(phy, skb, info->sta,
info->vif, info->rcpi,
@@ -1125,11 +1137,11 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb,
EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ba_tlv);
int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
- struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
struct mt76_wcid *wcid,
bool enable)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt76_vif *mvif = (struct mt76_vif *)bss_conf->vif->drv_priv;
struct mt76_dev *dev = phy->dev;
struct {
struct {
@@ -1141,7 +1153,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
__le16 tag;
__le16 len;
u8 active;
- u8 pad;
+ u8 link_idx; /* not link_id */
u8 omac_addr[ETH_ALEN];
} __packed tlv;
} dev_req = {
@@ -1153,6 +1165,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
.tag = cpu_to_le16(DEV_INFO_ACTIVE),
.len = cpu_to_le16(sizeof(struct req_tlv)),
.active = enable,
+ .link_idx = mvif->idx,
},
};
struct {
@@ -1175,12 +1188,13 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
.bmc_tx_wlan_idx = cpu_to_le16(wcid->idx),
.sta_idx = cpu_to_le16(wcid->idx),
.conn_state = 1,
+ .link_idx = mvif->idx,
},
};
int err, idx, cmd, len;
void *data;
- switch (vif->type) {
+ switch (bss_conf->vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP:
@@ -1200,7 +1214,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
basic_req.basic.hw_bss_idx = idx;
- memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN);
+ memcpy(dev_req.tlv.omac_addr, bss_conf->addr, ETH_ALEN);
cmd = enable ? MCU_UNI_CMD(DEV_INFO_UPDATE) : MCU_UNI_CMD(BSS_INFO_UPDATE);
data = enable ? (void *)&dev_req : (void *)&basic_req;
@@ -1298,7 +1312,8 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba);
u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
- enum nl80211_band band, struct ieee80211_sta *sta)
+ enum nl80211_band band,
+ struct ieee80211_link_sta *link_sta)
{
struct mt76_dev *dev = phy->dev;
const struct ieee80211_sta_he_cap *he_cap;
@@ -1309,10 +1324,10 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
if (is_connac_v1(dev))
return 0x38;
- if (sta) {
- ht_cap = &sta->deflink.ht_cap;
- vht_cap = &sta->deflink.vht_cap;
- he_cap = &sta->deflink.he_cap;
+ if (link_sta) {
+ ht_cap = &link_sta->ht_cap;
+ vht_cap = &link_sta->vht_cap;
+ he_cap = &link_sta->he_cap;
} else {
struct ieee80211_supported_band *sband;
@@ -1670,7 +1685,7 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
set_bit(MT76_HW_SCANNING, &phy->state);
mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
- req = (struct mt76_connac_hw_scan_req *)skb_put(skb, sizeof(*req));
+ req = (struct mt76_connac_hw_scan_req *)skb_put_zero(skb, sizeof(*req));
req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
req->bss_idx = mvif->idx;
@@ -1798,7 +1813,7 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
mvif->scan_seq_num = (mvif->scan_seq_num + 1) & 0x7f;
- req = (struct mt76_connac_sched_scan_req *)skb_put(skb, sizeof(*req));
+ req = (struct mt76_connac_sched_scan_req *)skb_put_zero(skb, sizeof(*req));
req->version = 1;
req->seq_num = mvif->scan_seq_num | mvif->band_idx << 7;
@@ -2321,7 +2336,7 @@ int mt76_connac_mcu_update_gtk_rekey(struct ieee80211_hw *hw,
return -ENOMEM;
skb_put_data(skb, &hdr, sizeof(hdr));
- gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put(skb,
+ gtk_tlv = (struct mt76_connac_gtk_rekey_tlv *)skb_put_zero(skb,
sizeof(*gtk_tlv));
gtk_tlv->tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_GTK_REKEY);
gtk_tlv->len = cpu_to_le16(sizeof(*gtk_tlv));
@@ -2446,7 +2461,7 @@ mt76_connac_mcu_set_wow_pattern(struct mt76_dev *dev,
return -ENOMEM;
skb_put_data(skb, &hdr, sizeof(hdr));
- ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put(skb, sizeof(*ptlv));
+ ptlv = (struct mt76_connac_wow_pattern_tlv *)skb_put_zero(skb, sizeof(*ptlv));
ptlv->tag = cpu_to_le16(UNI_SUSPEND_WOW_PATTERN);
ptlv->len = cpu_to_le16(sizeof(*ptlv));
ptlv->data_len = pattern->pattern_len;
@@ -2527,6 +2542,7 @@ int mt76_connac_mcu_set_hif_suspend(struct mt76_dev *dev, bool suspend)
__le16 tag;
__le16 len;
u8 suspend;
+ u8 pad[7];
} __packed hif_suspend;
} req = {
.hif_suspend = {
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 657a4d1f856b..4242d436de26 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -545,6 +545,13 @@ struct sta_rec_muru {
} mimo_ul;
} __packed;
+struct sta_rec_remove {
+ __le16 tag;
+ __le16 len;
+ u8 action;
+ u8 pad[3];
+} __packed;
+
struct sta_phy {
u8 type;
u8 flag;
@@ -610,6 +617,12 @@ struct sta_rec_ra_fixed {
u8 mmps_mode;
} __packed;
+struct sta_rec_tx_proc {
+ __le16 tag;
+ __le16 len;
+ __le32 flag;
+} __packed;
+
/* wtbl_rec */
struct wtbl_req_hdr {
@@ -777,6 +790,7 @@ struct wtbl_raw {
sizeof(struct sta_rec_ra_fixed) + \
sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct sta_rec_pn_info) + \
+ sizeof(struct sta_rec_tx_proc) + \
sizeof(struct tlv) + \
MT76_CONNAC_WTBL_UPDATE_MAX_SIZE)
@@ -806,7 +820,10 @@ enum {
STA_REC_HE_6G = 0x17,
STA_REC_HE_V2 = 0x19,
STA_REC_MLD = 0x20,
+ STA_REC_EHT_MLD = 0x21,
STA_REC_EHT = 0x22,
+ STA_REC_MLD_OFF = 0x23,
+ STA_REC_REMOVE = 0x25,
STA_REC_PN_INFO = 0x26,
STA_REC_KEY_V3 = 0x27,
STA_REC_HDRT = 0x28,
@@ -1004,6 +1021,7 @@ enum {
MCU_EVENT_CH_PRIVILEGE = 0x18,
MCU_EVENT_SCHED_SCAN_DONE = 0x23,
MCU_EVENT_DBG_MSG = 0x27,
+ MCU_EVENT_RSSI_NOTIFY = 0x96,
MCU_EVENT_TXPWR = 0xd0,
MCU_EVENT_EXT = 0xed,
MCU_EVENT_RESTART_DL = 0xef,
@@ -1182,6 +1200,7 @@ enum {
MCU_EXT_CMD_EFUSE_ACCESS = 0x01,
MCU_EXT_CMD_RF_REG_ACCESS = 0x02,
MCU_EXT_CMD_RF_TEST = 0x04,
+ MCU_EXT_CMD_ID_RADIO_ON_OFF_CTRL = 0x05,
MCU_EXT_CMD_PM_STATE_CTRL = 0x07,
MCU_EXT_CMD_CHANNEL_SWITCH = 0x08,
MCU_EXT_CMD_SET_TX_POWER_CTRL = 0x11,
@@ -1213,6 +1232,7 @@ enum {
MCU_EXT_CMD_TXDPD_CAL = 0x60,
MCU_EXT_CMD_CAL_CACHE = 0x67,
MCU_EXT_CMD_RED_ENABLE = 0x68,
+ MCU_EXT_CMD_CP_SUPPORT = 0x75,
MCU_EXT_CMD_SET_RADAR_TH = 0x7c,
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
@@ -1303,6 +1323,7 @@ enum {
MCU_CE_CMD_SCHED_SCAN_ENABLE = 0x61,
MCU_CE_CMD_SCHED_SCAN_REQ = 0x62,
MCU_CE_CMD_GET_NIC_CAPAB = 0x8a,
+ MCU_CE_CMD_RSSI_MONITOR = 0xa1,
MCU_CE_CMD_SET_MU_EDCA_PARMS = 0xb0,
MCU_CE_CMD_REG_WRITE = 0xc0,
MCU_CE_CMD_REG_READ = 0xc0,
@@ -1381,6 +1402,7 @@ enum {
MT_NIC_CAP_WFDMA_REALLOC,
MT_NIC_CAP_6G,
MT_NIC_CAP_CHIP_CAP = 0x20,
+ MT_NIC_CAP_EML_CAP = 0x22,
};
#define UNI_WOW_DETECT_TYPE_MAGIC BIT(0)
@@ -1432,7 +1454,7 @@ struct mt76_connac_bss_basic_tlv {
__le16 sta_idx;
__le16 nonht_basic_phy;
u8 phymode_ext; /* bit(0) AX_6G */
- u8 pad[1];
+ u8 link_idx;
} __packed;
struct mt76_connac_bss_qos_tlv {
@@ -1448,6 +1470,10 @@ struct mt76_connac_beacon_loss_event {
u8 pad[2];
} __packed;
+struct mt76_connac_rssi_notify_event {
+ __le32 rssi[4];
+} __packed;
+
struct mt76_connac_mcu_bss_event {
u8 bss_idx;
u8 is_absent;
@@ -1718,7 +1744,10 @@ enum mt76_sta_info_state {
};
struct mt76_sta_cmd_info {
- struct ieee80211_sta *sta;
+ union {
+ struct ieee80211_sta *sta;
+ struct ieee80211_link_sta *link_sta;
+ };
struct mt76_wcid *wcid;
struct ieee80211_vif *vif;
@@ -1868,8 +1897,8 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy);
int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif);
void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable,
- bool newly);
+ struct ieee80211_link_sta *link_sta,
+ bool enable, bool newly);
void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, void *sta_wtbl,
@@ -1883,7 +1912,8 @@ int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev,
struct mt76_wcid *wcid, int cmd);
void mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta);
u8 mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
- enum nl80211_band band, struct ieee80211_sta *sta);
+ enum nl80211_band band,
+ struct ieee80211_link_sta *link_sta);
int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -1902,7 +1932,7 @@ void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb,
struct ieee80211_ampdu_params *params,
bool enable, bool tx);
int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy,
- struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
struct mt76_wcid *wcid,
bool enable);
int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif,
@@ -1977,7 +2007,8 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
const struct ieee80211_sta_eht_cap *
mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif);
u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
- enum nl80211_band band, struct ieee80211_sta *sta);
+ enum nl80211_band band,
+ struct ieee80211_link_sta *sta);
u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
enum nl80211_band band);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index 79b7996ad1a8..2ecee7c5c80d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -44,7 +44,7 @@ static void mt76x0e_stop_hw(struct mt76x02_dev *dev)
mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN);
}
-static void mt76x0e_stop(struct ieee80211_hw *hw)
+static void mt76x0e_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
index bba44f289b4e..390f502e97f0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c
@@ -77,7 +77,7 @@ static void mt76x0u_cleanup(struct mt76x02_dev *dev)
mt76u_queues_deinit(&dev->mt76);
}
-static void mt76x0u_stop(struct ieee80211_hw *hw)
+static void mt76x0u_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index e5ad635d3c56..35b7ebc2c9c6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -239,7 +239,7 @@ int mt76x02_dma_init(struct mt76x02_dev *dev)
if (ret)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt76x02_poll_tx);
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
index bfc8c69f43fa..6accea551319 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c
@@ -24,7 +24,7 @@ mt76x2_start(struct ieee80211_hw *hw)
}
static void
-mt76x2_stop(struct ieee80211_hw *hw)
+mt76x2_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
index 9fe390fdd730..ba0241c36672 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c
@@ -22,7 +22,7 @@ static int mt76x2u_start(struct ieee80211_hw *hw)
return 0;
}
-static void mt76x2u_stop(struct ieee80211_hw *hw)
+static void mt76x2u_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt76x02_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 6c3696c8c700..578013884e43 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -523,7 +523,8 @@ mt7915_fw_debug_wm_set(void *data, u64 val)
/* WM CPU info record control */
mt76_clear(dev, MT_CPU_UTIL_CTRL, BIT(0));
- mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw.debug_wm);
+ mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) |
+ (dev->fw.debug_wm ? 0 : BIT(0)));
mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR, BIT(5));
mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR, BIT(5));
@@ -1049,6 +1050,7 @@ static ssize_t
mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
+ int i, ret, pwr, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
struct mt7915_phy *phy = file->private_data;
struct mt7915_dev *dev = phy->dev;
struct mt76_phy *mphy = phy->mt76;
@@ -1057,7 +1059,6 @@ mt7915_rate_txpower_set(struct file *file, const char __user *user_buf,
.band_idx = phy->mt76->band_idx,
};
char buf[100];
- int i, ret, pwr160 = 0, pwr80 = 0, pwr40 = 0, pwr20 = 0;
enum mac80211_rx_encoding mode;
u32 offs = 0, len = 0;
@@ -1130,8 +1131,8 @@ skip:
if (ret)
goto out;
- mphy->txpower_cur = max(mphy->txpower_cur,
- max(pwr160, max(pwr80, max(pwr40, pwr20))));
+ pwr = max3(pwr80, pwr40, pwr20);
+ mphy->txpower_cur = max3(mphy->txpower_cur, pwr160, pwr);
out:
mutex_unlock(&dev->mt76.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
index 0baa82c8df5a..0c62272fe7d0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c
@@ -578,7 +578,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (ret < 0)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7915_poll_tx);
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
index 3bb2643d1b26..bfdbc15abaa9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.c
@@ -9,28 +9,34 @@ static int mt7915_eeprom_load_precal(struct mt7915_dev *dev)
{
struct mt76_dev *mdev = &dev->mt76;
u8 *eeprom = mdev->eeprom.data;
- u32 val = eeprom[MT_EE_DO_PRE_CAL];
- u32 offs;
+ u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2;
+ u32 size, val = eeprom[offs];
int ret;
- if (!dev->flash_mode)
+ if (!dev->flash_mode || !val)
return 0;
- if (val != (MT_EE_WIFI_CAL_DPD | MT_EE_WIFI_CAL_GROUP))
- return 0;
+ size = mt7915_get_cal_group_size(dev) + mt7915_get_cal_dpd_size(dev);
- val = MT_EE_CAL_GROUP_SIZE + MT_EE_CAL_DPD_SIZE;
- dev->cal = devm_kzalloc(mdev->dev, val, GFP_KERNEL);
+ dev->cal = devm_kzalloc(mdev->dev, size, GFP_KERNEL);
if (!dev->cal)
return -ENOMEM;
offs = is_mt7915(&dev->mt76) ? MT_EE_PRECAL : MT_EE_PRECAL_V2;
- ret = mt76_get_of_data_from_mtd(mdev, dev->cal, offs, val);
+ ret = mt76_get_of_data_from_mtd(mdev, dev->cal, offs, size);
if (!ret)
return ret;
- return mt76_get_of_data_from_nvmem(mdev, dev->cal, "precal", val);
+ ret = mt76_get_of_data_from_nvmem(mdev, dev->cal, "precal", size);
+ if (!ret)
+ return ret;
+
+ dev_warn(mdev->dev, "missing precal data, size=%d\n", size);
+ devm_kfree(mdev->dev, dev->cal);
+ dev->cal = NULL;
+
+ return ret;
}
static int mt7915_check_eeprom(struct mt7915_dev *dev)
@@ -256,10 +262,7 @@ int mt7915_eeprom_init(struct mt7915_dev *dev)
return ret;
}
- ret = mt7915_eeprom_load_precal(dev);
- if (ret)
- return ret;
-
+ mt7915_eeprom_load_precal(dev);
mt7915_eeprom_parse_hw_cap(dev, &dev->phy);
memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
ETH_ALEN);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
index adc26a222823..509fb43d8a68 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/eeprom.h
@@ -19,6 +19,7 @@ enum mt7915_eeprom_field {
MT_EE_DDIE_FT_VERSION = 0x050,
MT_EE_DO_PRE_CAL = 0x062,
MT_EE_WIFI_CONF = 0x190,
+ MT_EE_DO_PRE_CAL_V2 = 0x19a,
MT_EE_RATE_DELTA_2G = 0x252,
MT_EE_RATE_DELTA_5G = 0x29d,
MT_EE_TX0_POWER_2G = 0x2fc,
@@ -39,10 +40,19 @@ enum mt7915_eeprom_field {
};
#define MT_EE_WIFI_CAL_GROUP BIT(0)
-#define MT_EE_WIFI_CAL_DPD GENMASK(2, 1)
+#define MT_EE_WIFI_CAL_DPD_2G BIT(2)
+#define MT_EE_WIFI_CAL_DPD_5G BIT(1)
+#define MT_EE_WIFI_CAL_DPD_6G BIT(3)
+#define MT_EE_WIFI_CAL_DPD GENMASK(3, 1)
#define MT_EE_CAL_UNIT 1024
-#define MT_EE_CAL_GROUP_SIZE (49 * MT_EE_CAL_UNIT + 16)
-#define MT_EE_CAL_DPD_SIZE (54 * MT_EE_CAL_UNIT)
+#define MT_EE_CAL_GROUP_SIZE_7915 (49 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_GROUP_SIZE_7916 (54 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_GROUP_SIZE_7975 (54 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_GROUP_SIZE_7976 (94 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_GROUP_SIZE_7916_6G (94 * MT_EE_CAL_UNIT + 16)
+#define MT_EE_CAL_DPD_SIZE_V1 (54 * MT_EE_CAL_UNIT)
+#define MT_EE_CAL_DPD_SIZE_V2 (300 * MT_EE_CAL_UNIT)
+#define MT_EE_CAL_DPD_SIZE_V2_7981 (102 * MT_EE_CAL_UNIT) /* no 6g dpd data */
#define MT_EE_WIFI_CONF0_TX_PATH GENMASK(2, 0)
#define MT_EE_WIFI_CONF0_BAND_SEL GENMASK(7, 6)
@@ -156,6 +166,37 @@ mt7915_tssi_enabled(struct mt7915_dev *dev, enum nl80211_band band)
return val & MT_EE_WIFI_CONF7_TSSI0_5G;
}
+static inline u32
+mt7915_get_cal_group_size(struct mt7915_dev *dev)
+{
+ u8 *eep = dev->mt76.eeprom.data;
+ u32 val;
+
+ if (is_mt7915(&dev->mt76)) {
+ return MT_EE_CAL_GROUP_SIZE_7915;
+ } else if (is_mt7916(&dev->mt76)) {
+ val = eep[MT_EE_WIFI_CONF + 1];
+ val = FIELD_GET(MT_EE_WIFI_CONF0_BAND_SEL, val);
+ return (val == MT_EE_V2_BAND_SEL_6GHZ) ? MT_EE_CAL_GROUP_SIZE_7916_6G :
+ MT_EE_CAL_GROUP_SIZE_7916;
+ } else if (mt7915_check_adie(dev, false)) {
+ return MT_EE_CAL_GROUP_SIZE_7976;
+ } else {
+ return MT_EE_CAL_GROUP_SIZE_7975;
+ }
+}
+
+static inline u32
+mt7915_get_cal_dpd_size(struct mt7915_dev *dev)
+{
+ if (is_mt7915(&dev->mt76))
+ return MT_EE_CAL_DPD_SIZE_V1;
+ else if (is_mt7981(&dev->mt76))
+ return MT_EE_CAL_DPD_SIZE_V2_7981;
+ else
+ return MT_EE_CAL_DPD_SIZE_V2;
+}
+
extern const u8 mt7915_sku_group_len[MAX_SKU_RATE_GROUP_NUM];
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index cea2f6d9050a..a978f434dc5e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -823,7 +823,7 @@ mt7915_init_hardware(struct mt7915_dev *dev, struct mt7915_phy *phy2)
if (ret < 0)
return ret;
- if (dev->flash_mode) {
+ if (dev->cal) {
ret = mt7915_mcu_apply_group_cal(dev);
if (ret)
return ret;
@@ -934,11 +934,10 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
/* the maximum cap is 4 x 3, (Nr, Nc) = (3, 2) */
elem->phy_cap_info[7] |= min_t(int, sts - 1, 2) << 3;
- if (vif != NL80211_IFTYPE_AP)
+ if (vif != NL80211_IFTYPE_AP && vif != NL80211_IFTYPE_STATION)
return;
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
- elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
c = FIELD_PREP(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
sts - 1);
@@ -947,6 +946,11 @@ mt7915_set_stream_he_txbf_caps(struct mt7915_phy *phy,
sts_160 - 1);
elem->phy_cap_info[5] |= c;
+ if (vif != NL80211_IFTYPE_AP)
+ return;
+
+ elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
+
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB;
elem->phy_cap_info[6] |= c;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index e45361111f9b..8008ce3fa6c7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -140,8 +140,15 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
msta->airtime_ac[i] = mt76_rr(dev, addr);
msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
- tx_time[i] = msta->airtime_ac[i] - tx_last;
- rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+ if (msta->airtime_ac[i] <= tx_last)
+ tx_time[i] = 0;
+ else
+ tx_time[i] = msta->airtime_ac[i] - tx_last;
+
+ if (msta->airtime_ac[i + 4] <= rx_last)
+ rx_time[i] = 0;
+ else
+ rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
if ((tx_last | rx_last) & BIT(30))
clear = true;
@@ -1338,10 +1345,8 @@ mt7915_mac_restart(struct mt7915_dev *dev)
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
- if (ext_phy) {
+ if (ext_phy)
set_bit(MT76_RESET, &ext_phy->state);
- set_bit(MT76_MCU_RESET, &ext_phy->state);
- }
/* lock/unlock all queues to ensure that no tx is pending */
mt76_txq_schedule_all(&dev->mphy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index 3709d18da0e6..049223df9beb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -108,7 +108,7 @@ static int mt7915_start(struct ieee80211_hw *hw)
return ret;
}
-static void mt7915_stop(struct ieee80211_hw *hw)
+static void mt7915_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
@@ -329,7 +329,7 @@ int mt7915_set_channel(struct mt7915_phy *phy)
mt76_set_channel(phy->mt76);
- if (dev->flash_mode) {
+ if (dev->cal) {
ret = mt7915_mcu_apply_tx_dpd(phy);
if (ret)
goto out;
@@ -744,6 +744,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
bool ext_phy = mvif->phy != &dev->phy;
int ret, idx;
+ u32 addr;
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
if (idx < 0)
@@ -767,6 +768,9 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (ret)
return ret;
+ addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 30);
+ mt76_rmw_field(dev, addr, GENMASK(7, 0), 0xa0);
+
return mt7915_mcu_add_rate_ctrl(dev, vif, sta, false);
}
@@ -1657,6 +1661,10 @@ mt7915_net_fill_forward_path(struct ieee80211_hw *hw,
#endif
const struct ieee80211_ops mt7915_ops = {
+ .add_chanctx = ieee80211_emulate_add_chanctx,
+ .remove_chanctx = ieee80211_emulate_remove_chanctx,
+ .change_chanctx = ieee80211_emulate_change_chanctx,
+ .switch_vif_chanctx = ieee80211_emulate_switch_vif_chanctx,
.tx = mt7915_tx,
.start = mt7915_start,
.stop = mt7915_stop,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index d90f98c50039..2185cd24e2e1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -331,7 +331,7 @@ mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION)
return;
- ieee80211_color_change_finish(vif);
+ ieee80211_color_change_finish(vif, 0);
}
static void
@@ -424,7 +424,7 @@ mt7915_mcu_add_nested_subtlv(struct sk_buff *skb, int sub_tag, int sub_len,
.len = cpu_to_le16(sub_len),
};
- ptlv = skb_put(skb, sub_len);
+ ptlv = skb_put_zero(skb, sub_len);
memcpy(ptlv, &tlv, sizeof(tlv));
le16_add_cpu(sub_ntlv, 1);
@@ -1193,7 +1193,7 @@ mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
- const u8 matrix[4][4] = {
+ static const u8 matrix[4][4] = {
{0, 0, 0, 0},
{1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */
{2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */
@@ -1503,7 +1503,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
ra->valid = true;
ra->auto_rate = true;
- ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
+ ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink);
ra->channel = chandef->chan->hw_value;
ra->bw = sta->deflink.bandwidth;
ra->phy.bw = sta->deflink.bandwidth;
@@ -1656,11 +1656,13 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct ieee80211_link_sta *link_sta;
struct mt7915_sta *msta;
struct sk_buff *skb;
int ret;
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+ link_sta = sta ? &sta->deflink : NULL;
skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
&msta->wcid);
@@ -1668,7 +1670,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable,
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, enable,
!rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx]));
if (!enable)
goto out;
@@ -1919,8 +1921,7 @@ mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
bcn = (struct bss_info_bcn *)tlv;
bcn->enable = true;
- if (changed & BSS_CHANGED_FILS_DISCOVERY &&
- vif->bss_conf.fils_discovery.max_interval) {
+ if (changed & BSS_CHANGED_FILS_DISCOVERY) {
interval = vif->bss_conf.fils_discovery.max_interval;
skb = ieee80211_get_fils_discovery_tmpl(hw, vif);
} else if (changed & BSS_CHANGED_UNSOL_BCAST_PROBE_RESP &&
@@ -1957,7 +1958,7 @@ mt7915_mcu_add_inband_discov(struct mt7915_dev *dev, struct ieee80211_vif *vif,
discov->tx_type = !!(changed & BSS_CHANGED_FILS_DISCOVERY);
discov->tx_interval = interval;
discov->prob_rsp_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
- discov->enable = true;
+ discov->enable = !!interval;
buf = (u8 *)sub_tlv + sizeof(*discov);
@@ -2904,9 +2905,10 @@ static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx,
int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev)
{
u8 idx = 0, *cal = dev->cal, *eep = dev->mt76.eeprom.data;
- u32 total = MT_EE_CAL_GROUP_SIZE;
+ u32 total = mt7915_get_cal_group_size(dev);
+ u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2;
- if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_GROUP))
+ if (!(eep[offs] & MT_EE_WIFI_CAL_GROUP))
return 0;
/*
@@ -2942,9 +2944,9 @@ static int mt7915_find_freq_idx(const u16 *freqs, int n_freqs, u16 cur)
return -1;
}
-static int mt7915_dpd_freq_idx(u16 freq, u8 bw)
+static int mt7915_dpd_freq_idx(struct mt7915_dev *dev, u16 freq, u8 bw)
{
- static const u16 freq_list[] = {
+ static const u16 freq_list_v1[] = {
5180, 5200, 5220, 5240,
5260, 5280, 5300, 5320,
5500, 5520, 5540, 5560,
@@ -2952,65 +2954,135 @@ static int mt7915_dpd_freq_idx(u16 freq, u8 bw)
5660, 5680, 5700, 5745,
5765, 5785, 5805, 5825
};
- int offset_2g = ARRAY_SIZE(freq_list);
+ static const u16 freq_list_v2[] = {
+ /* 6G BW20*/
+ 5955, 5975, 5995, 6015,
+ 6035, 6055, 6075, 6095,
+ 6115, 6135, 6155, 6175,
+ 6195, 6215, 6235, 6255,
+ 6275, 6295, 6315, 6335,
+ 6355, 6375, 6395, 6415,
+ 6435, 6455, 6475, 6495,
+ 6515, 6535, 6555, 6575,
+ 6595, 6615, 6635, 6655,
+ 6675, 6695, 6715, 6735,
+ 6755, 6775, 6795, 6815,
+ 6835, 6855, 6875, 6895,
+ 6915, 6935, 6955, 6975,
+ 6995, 7015, 7035, 7055,
+ 7075, 7095, 7115,
+ /* 6G BW160 */
+ 6025, 6185, 6345, 6505,
+ 6665, 6825, 6985,
+ /* 5G BW20 */
+ 5180, 5200, 5220, 5240,
+ 5260, 5280, 5300, 5320,
+ 5500, 5520, 5540, 5560,
+ 5580, 5600, 5620, 5640,
+ 5660, 5680, 5700, 5720,
+ 5745, 5765, 5785, 5805,
+ 5825, 5845, 5865, 5885,
+ /* 5G BW160 */
+ 5250, 5570, 5815
+ };
+ static const u16 freq_list_v2_7981[] = {
+ /* 5G BW20 */
+ 5180, 5200, 5220, 5240,
+ 5260, 5280, 5300, 5320,
+ 5500, 5520, 5540, 5560,
+ 5580, 5600, 5620, 5640,
+ 5660, 5680, 5700, 5720,
+ 5745, 5765, 5785, 5805,
+ 5825, 5845, 5865, 5885,
+ /* 5G BW160 */
+ 5250, 5570, 5815
+ };
+ const u16 *freq_list = freq_list_v1;
+ int n_freqs = ARRAY_SIZE(freq_list_v1);
int idx;
+ if (!is_mt7915(&dev->mt76)) {
+ if (is_mt7981(&dev->mt76)) {
+ freq_list = freq_list_v2_7981;
+ n_freqs = ARRAY_SIZE(freq_list_v2_7981);
+ } else {
+ freq_list = freq_list_v2;
+ n_freqs = ARRAY_SIZE(freq_list_v2);
+ }
+ }
+
if (freq < 4000) {
if (freq < 2432)
- return offset_2g;
+ return n_freqs;
if (freq < 2457)
- return offset_2g + 1;
+ return n_freqs + 1;
- return offset_2g + 2;
+ return n_freqs + 2;
}
- if (bw == NL80211_CHAN_WIDTH_80P80 || bw == NL80211_CHAN_WIDTH_160)
+ if (bw == NL80211_CHAN_WIDTH_80P80)
return -1;
if (bw != NL80211_CHAN_WIDTH_20) {
- idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
- freq + 10);
+ idx = mt7915_find_freq_idx(freq_list, n_freqs, freq + 10);
if (idx >= 0)
return idx;
- idx = mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list),
- freq - 10);
+ idx = mt7915_find_freq_idx(freq_list, n_freqs, freq - 10);
if (idx >= 0)
return idx;
}
- return mt7915_find_freq_idx(freq_list, ARRAY_SIZE(freq_list), freq);
+ return mt7915_find_freq_idx(freq_list, n_freqs, freq);
}
int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
- u16 total = 2, center_freq = chandef->center_freq1;
+ enum nl80211_band band = chandef->chan->band;
+ u32 offs = is_mt7915(&dev->mt76) ? MT_EE_DO_PRE_CAL : MT_EE_DO_PRE_CAL_V2;
+ u16 center_freq = chandef->center_freq1;
u8 *cal = dev->cal, *eep = dev->mt76.eeprom.data;
+ u8 dpd_mask, cal_num = is_mt7915(&dev->mt76) ? 2 : 3;
int idx;
- if (!(eep[MT_EE_DO_PRE_CAL] & MT_EE_WIFI_CAL_DPD))
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ dpd_mask = MT_EE_WIFI_CAL_DPD_2G;
+ break;
+ case NL80211_BAND_5GHZ:
+ dpd_mask = MT_EE_WIFI_CAL_DPD_5G;
+ break;
+ case NL80211_BAND_6GHZ:
+ dpd_mask = MT_EE_WIFI_CAL_DPD_6G;
+ break;
+ default:
+ dpd_mask = 0;
+ break;
+ }
+
+ if (!(eep[offs] & dpd_mask))
return 0;
- idx = mt7915_dpd_freq_idx(center_freq, chandef->width);
+ idx = mt7915_dpd_freq_idx(dev, center_freq, chandef->width);
if (idx < 0)
return -EINVAL;
/* Items: Tx DPD, Tx Flatness */
- idx = idx * 2;
- cal += MT_EE_CAL_GROUP_SIZE;
+ idx = idx * cal_num;
+ cal += mt7915_get_cal_group_size(dev) + (idx * MT_EE_CAL_UNIT);
- while (total--) {
+ while (cal_num--) {
int ret;
- cal += (idx * MT_EE_CAL_UNIT);
ret = mt7915_mcu_set_pre_cal(dev, idx, cal, MT_EE_CAL_UNIT,
MCU_EXT_CMD(DPD_PRE_CAL_INFO));
if (ret)
return ret;
idx++;
+ cal += MT_EE_CAL_UNIT;
}
return 0;
@@ -3801,30 +3873,38 @@ int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wlan_idx)
{
struct {
__le32 cmd;
- __le32 num;
- __le32 __rsv;
- __le16 wlan_idx;
- } req = {
+ __le32 arg0;
+ __le32 arg1;
+ __le16 arg2;
+ } __packed req = {
.cmd = cpu_to_le32(0x15),
- .num = cpu_to_le32(1),
- .wlan_idx = cpu_to_le16(wlan_idx),
};
struct mt7915_mcu_wa_tx_stat {
- __le16 wlan_idx;
- u8 __rsv[2];
+ __le16 wcid;
+ u8 __rsv2[2];
/* tx_bytes is deprecated since WA byte counter uses u32,
* which easily leads to overflow.
*/
__le32 tx_bytes;
__le32 tx_packets;
- } *res;
+ } __packed *res;
struct mt76_wcid *wcid;
struct sk_buff *skb;
- int ret;
+ int ret, len;
+ u16 ret_wcid;
+
+ if (is_mt7915(&dev->mt76)) {
+ req.arg0 = cpu_to_le32(wlan_idx);
+ len = sizeof(req) - sizeof(req.arg2);
+ } else {
+ req.arg0 = cpu_to_le32(1);
+ req.arg2 = cpu_to_le16(wlan_idx);
+ len = sizeof(req);
+ }
ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_WA_PARAM_CMD(QUERY),
- &req, sizeof(req), true, &skb);
+ &req, len, true, &skb);
if (ret)
return ret;
@@ -3833,7 +3913,11 @@ int mt7915_mcu_wed_wa_tx_stats(struct mt7915_dev *dev, u16 wlan_idx)
res = (struct mt7915_mcu_wa_tx_stat *)skb->data;
- if (le16_to_cpu(res->wlan_idx) != wlan_idx) {
+ ret_wcid = le16_to_cpu(res->wcid);
+ if (is_mt7915(&dev->mt76))
+ ret_wcid &= 0xff;
+
+ if (ret_wcid != wlan_idx) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 6e79bc65f5a5..a30d08eb0656 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -302,6 +302,10 @@ struct mt7915_dev {
struct rchan *relay_fwlog;
void *cal;
+ u32 cur_prek_offset;
+ u8 dpd_chan_num_2g;
+ u8 dpd_chan_num_5g;
+ u8 dpd_chan_num_6g;
struct {
u8 debug_wm;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
index f5b99917c08e..90a6f61d1089 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/soc.c
@@ -7,7 +7,6 @@
#include <linux/pinctrl/consumer.h>
#include <linux/of.h>
#include <linux/of_reserved_mem.h>
-#include <linux/of_gpio.h>
#include <linux/iopoll.h>
#include <linux/reset.h>
#include <linux/of_net.h>
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 867e14f6b93a..047106b65d2b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -39,6 +39,7 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev)
};
struct ieee80211_sta *sta;
struct mt792x_sta *msta;
+ struct mt792x_link_sta *mlink;
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
LIST_HEAD(sta_poll_list);
struct rate_info *rate;
@@ -60,23 +61,25 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev)
spin_unlock_bh(&dev->mt76.sta_poll_lock);
break;
}
- msta = list_first_entry(&sta_poll_list,
- struct mt792x_sta, wcid.poll_list);
- list_del_init(&msta->wcid.poll_list);
+ mlink = list_first_entry(&sta_poll_list,
+ struct mt792x_link_sta,
+ wcid.poll_list);
+ msta = container_of(mlink, struct mt792x_sta, deflink);
+ list_del_init(&mlink->wcid.poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
- idx = msta->wcid.idx;
+ idx = mlink->wcid.idx;
addr = mt7921_mac_wtbl_lmac_addr(idx, MT_WTBL_AC0_CTT_OFFSET);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u32 tx_last = msta->airtime_ac[i];
- u32 rx_last = msta->airtime_ac[i + 4];
+ u32 tx_last = mlink->airtime_ac[i];
+ u32 rx_last = mlink->airtime_ac[i + 4];
- msta->airtime_ac[i] = mt76_rr(dev, addr);
- msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+ mlink->airtime_ac[i] = mt76_rr(dev, addr);
+ mlink->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
- tx_time[i] = msta->airtime_ac[i] - tx_last;
- rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+ tx_time[i] = mlink->airtime_ac[i] - tx_last;
+ rx_time[i] = mlink->airtime_ac[i + 4] - rx_last;
if ((tx_last | rx_last) & BIT(30))
clear = true;
@@ -87,10 +90,10 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev)
if (clear) {
mt7921_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac));
}
- if (!msta->wcid.sta)
+ if (!mlink->wcid.sta)
continue;
sta = container_of((void *)msta, struct ieee80211_sta,
@@ -113,7 +116,7 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev)
* we need to make sure that flags match so polling GI
* from per-sta counters directly.
*/
- rate = &msta->wcid.rate;
+ rate = &mlink->wcid.rate;
addr = mt7921_mac_wtbl_lmac_addr(idx,
MT_WTBL_TXRX_CAP_RATE_OFFSET);
val = mt76_rr(dev, addr);
@@ -154,10 +157,10 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev)
rssi[2] = to_rssi(GENMASK(23, 16), val);
rssi[3] = to_rssi(GENMASK(31, 14), val);
- msta->ack_signal =
+ mlink->ack_signal =
mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
- ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
+ ewma_avg_signal_add(&mlink->avg_ack_signal, -mlink->ack_signal);
}
}
@@ -180,6 +183,7 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
struct mt792x_sta *msta = NULL;
+ struct mt792x_link_sta *mlink;
u16 seq_ctrl = 0;
__le16 fc = 0;
u8 mode = 0;
@@ -210,10 +214,11 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
status->wcid = mt792x_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
- msta = container_of(status->wcid, struct mt792x_sta, wcid);
+ mlink = container_of(status->wcid, struct mt792x_link_sta, wcid);
+ msta = container_of(mlink, struct mt792x_sta, deflink);
spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
+ if (list_empty(&mlink->wcid.poll_list))
+ list_add_tail(&mlink->wcid.poll_list,
&dev->mt76.sta_poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
}
@@ -444,7 +449,7 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data)
{
- struct mt792x_sta *msta = NULL;
+ struct mt792x_link_sta *mlink;
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
@@ -468,15 +473,15 @@ void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data)
if (!wcid)
goto out;
- msta = container_of(wcid, struct mt792x_sta, wcid);
+ mlink = container_of(wcid, struct mt792x_link_sta, wcid);
mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
+ if (list_empty(&mlink->wcid.poll_list))
+ list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
out:
@@ -513,7 +518,7 @@ static void mt7921_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
* 1'b0: msdu_id with the same 'wcid pair' as above.
*/
if (info & MT_TX_FREE_PAIR) {
- struct mt792x_sta *msta;
+ struct mt792x_link_sta *mlink;
u16 idx;
count++;
@@ -523,10 +528,10 @@ static void mt7921_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
if (!sta)
continue;
- msta = container_of(wcid, struct mt792x_sta, wcid);
+ mlink = container_of(wcid, struct mt792x_link_sta, wcid);
spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
+ if (list_empty(&mlink->wcid.poll_list))
+ list_add_tail(&mlink->wcid.poll_list,
&mdev->sta_poll_list);
spin_unlock_bh(&mdev->sta_poll_lock);
continue;
@@ -641,11 +646,12 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_disconnect(vif, true);
- mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
+ mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf,
+ &mvif->sta.deflink.wcid, true);
mt7921_mcu_set_tx(dev, vif);
if (vif->type == NL80211_IFTYPE_AP) {
- mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid,
+ mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid,
true, NULL);
mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_NONE);
@@ -663,6 +669,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
int i, ret;
dev_dbg(dev->mt76.dev, "chip reset\n");
+ set_bit(MT76_RESET, &dev->mphy.state);
dev->hw_full_reset = true;
ieee80211_stop_queues(hw);
@@ -691,6 +698,7 @@ void mt7921_mac_reset_work(struct work_struct *work)
}
dev->hw_full_reset = false;
+ clear_bit(MT76_RESET, &dev->mphy.state);
pm->suspended = false;
ieee80211_wake_queues(hw);
ieee80211_iterate_active_interfaces(hw,
@@ -784,9 +792,9 @@ int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (sta) {
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) {
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- msta->last_txs = jiffies;
+ msta->deflink.last_txs = jiffies;
}
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index ca36de34171b..23b228804289 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -242,6 +242,15 @@ int __mt7921_start(struct mt792x_phy *phy)
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT792x_WATCHDOG_TIME);
+ if (mt76_is_mmio(mphy->dev)) {
+ err = mt7921_mcu_radio_led_ctrl(phy->dev, EXT_CMD_RADIO_LED_CTRL_ENABLE);
+ if (err)
+ return err;
+
+ err = mt7921_mcu_radio_led_ctrl(phy->dev, EXT_CMD_RADIO_ON_LED);
+ if (err)
+ return err;
+ }
return 0;
}
@@ -259,6 +268,22 @@ static int mt7921_start(struct ieee80211_hw *hw)
return err;
}
+static void mt7921_stop(struct ieee80211_hw *hw, bool suspend)
+{
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ int err = 0;
+
+ if (mt76_is_mmio(&dev->mt76)) {
+ mt792x_mutex_acquire(dev);
+ err = mt7921_mcu_radio_led_ctrl(dev, EXT_CMD_RADIO_OFF_LED);
+ mt792x_mutex_release(dev);
+ if (err)
+ return;
+ }
+
+ mt792x_stop(hw, false);
+}
+
static int
mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@@ -270,46 +295,49 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mt792x_mutex_acquire(dev);
- mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
- if (mvif->mt76.idx >= MT792x_MAX_INTERFACES) {
+ mvif->bss_conf.mt76.idx = __ffs64(~dev->mt76.vif_mask);
+ if (mvif->bss_conf.mt76.idx >= MT792x_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
}
- mvif->mt76.omac_idx = mvif->mt76.idx;
+ mvif->bss_conf.mt76.omac_idx = mvif->bss_conf.mt76.idx;
mvif->phy = phy;
- mvif->mt76.band_idx = 0;
- mvif->mt76.wmm_idx = mvif->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
+ mvif->bss_conf.vif = mvif;
+ mvif->bss_conf.mt76.band_idx = 0;
+ mvif->bss_conf.mt76.wmm_idx = mvif->bss_conf.mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
- ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid,
- true);
+ ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf,
+ &mvif->sta.deflink.wcid, true);
if (ret)
goto out;
- dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
- phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
+ dev->mt76.vif_mask |= BIT_ULL(mvif->bss_conf.mt76.idx);
+ phy->omac_mask |= BIT_ULL(mvif->bss_conf.mt76.omac_idx);
- idx = MT792x_WTBL_RESERVED - mvif->mt76.idx;
+ idx = MT792x_WTBL_RESERVED - mvif->bss_conf.mt76.idx;
- INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
- mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
- mvif->sta.wcid.hw_key_idx = -1;
- mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mt76_wcid_init(&mvif->sta.wcid);
+ INIT_LIST_HEAD(&mvif->sta.deflink.wcid.poll_list);
+ mvif->sta.deflink.wcid.idx = idx;
+ mvif->sta.deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx;
+ mvif->sta.deflink.wcid.hw_key_idx = -1;
+ mvif->sta.deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt76_wcid_init(&mvif->sta.deflink.wcid);
mt7921_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- ewma_rssi_init(&mvif->rssi);
+ ewma_rssi_init(&mvif->bss_conf.rssi);
- rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.deflink.wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = idx;
}
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ if (phy->chip_cap & MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN)
+ vif->driver_flags |= IEEE80211_VIF_SUPPORTS_CQM_RSSI;
out:
mt792x_mutex_release(dev);
@@ -467,7 +495,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv :
&mvif->sta;
- struct mt76_wcid *wcid = &msta->wcid;
+ struct mt76_wcid *wcid = &msta->deflink.wcid;
u8 *wcid_keyidx = &wcid->hw_key_idx;
int idx = key->keyidx, err = 0;
@@ -514,18 +542,18 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
mt76_wcid_key_setup(&dev->mt76, wcid, key);
- err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->deflink.bip,
key, MCU_UNI_CMD(STA_REC_UPDATE),
- &msta->wcid, cmd);
+ &msta->deflink.wcid, cmd);
if (err)
goto out;
if (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
key->cipher == WLAN_CIPHER_SUITE_WEP40)
err = mt76_connac_mcu_add_key(&dev->mt76, vif,
- &mvif->wep_sta->bip,
+ &mvif->wep_sta->deflink.bip,
key, MCU_UNI_CMD(STA_REC_UPDATE),
- &mvif->wep_sta->wcid, cmd);
+ &mvif->wep_sta->deflink.wcid, cmd);
out:
mt792x_mutex_release(dev);
@@ -679,6 +707,9 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_PS)
mt7921_mcu_uni_bss_ps(dev, vif);
+ if (changed & BSS_CHANGED_CQM)
+ mt7921_mcu_set_rssimonitor(dev, vif);
+
if (changed & BSS_CHANGED_ASSOC) {
mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_ASSOC);
@@ -688,7 +719,7 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ARP_FILTER) {
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76,
+ mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->bss_conf.mt76,
info);
}
@@ -769,13 +800,13 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
- INIT_LIST_HEAD(&msta->wcid.poll_list);
+ INIT_LIST_HEAD(&msta->deflink.wcid.poll_list);
msta->vif = mvif;
- msta->wcid.sta = 1;
- msta->wcid.idx = idx;
- msta->wcid.phy_idx = mvif->mt76.band_idx;
- msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
- msta->last_txs = jiffies;
+ msta->deflink.wcid.sta = 1;
+ msta->deflink.wcid.idx = idx;
+ msta->deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx;
+ msta->deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ msta->deflink.last_txs = jiffies;
ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
if (ret)
@@ -810,14 +841,14 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt792x_mutex_acquire(dev);
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
- mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid,
- true, mvif->mt76.ctx);
+ mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.deflink.wcid,
+ true, mvif->bss_conf.mt76.ctx);
- ewma_avg_signal_init(&msta->avg_ack_signal);
+ ewma_avg_signal_init(&msta->deflink.avg_ack_signal);
- mt7921_mac_wtbl_update(dev, msta->wcid.idx,
+ mt7921_mac_wtbl_update(dev, msta->deflink.wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ memset(msta->deflink.airtime_ac, 0, sizeof(msta->deflink.airtime_ac));
mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
@@ -831,27 +862,27 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
+ mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->deflink.wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
mt7921_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE);
- mt7921_mac_wtbl_update(dev, msta->wcid.idx,
+ mt7921_mac_wtbl_update(dev, msta->deflink.wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
if (vif->type == NL80211_IFTYPE_STATION) {
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
mvif->wep_sta = NULL;
- ewma_rssi_init(&mvif->rssi);
+ ewma_rssi_init(&mvif->bss_conf.rssi);
if (!sta->tdls)
mt76_connac_mcu_uni_add_bss(&dev->mphy, vif,
- &mvif->sta.wcid, false,
- mvif->mt76.ctx);
+ &mvif->sta.deflink.wcid, false,
+ mvif->bss_conf.mt76.ctx);
}
spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (!list_empty(&msta->wcid.poll_list))
- list_del_init(&msta->wcid.poll_list);
+ if (!list_empty(&msta->deflink.wcid.poll_list))
+ list_del_init(&msta->deflink.wcid.poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
mt7921_regd_set_6ghz_power_type(vif, false);
@@ -893,12 +924,12 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt792x_mutex_acquire(dev);
switch (action) {
case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+ mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn,
params->buf_size);
mt7921_mcu_uni_rx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
mt7921_mcu_uni_rx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
@@ -909,16 +940,16 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
- clear_bit(tid, &msta->wcid.ampdu_state);
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
mt7921_mcu_uni_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
- set_bit(tid, &msta->wcid.ampdu_state);
+ set_bit(tid, &msta->deflink.wcid.ampdu_state);
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
- clear_bit(tid, &msta->wcid.ampdu_state);
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
mt7921_mcu_uni_tx_ba(dev, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -1136,11 +1167,11 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
mt792x_mutex_acquire(dev);
if (enabled)
- set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->deflink.wcid.flags);
else
- clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->deflink.wcid.flags);
- mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid,
+ mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->deflink.wcid,
MCU_UNI_CMD(STA_REC_UPDATE));
mt792x_mutex_release(dev);
@@ -1152,7 +1183,7 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
struct inet6_dev *idev)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_dev *dev = mvif->phy->dev;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct inet6_ifaddr *ifa;
struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
struct sk_buff *skb;
@@ -1166,7 +1197,7 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw,
struct mt76_connac_arpns_tlv arpns;
} req_hdr = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
},
.arpns = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
@@ -1264,8 +1295,8 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt792x_mutex_acquire(dev);
- err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid,
- true, mvif->mt76.ctx);
+ err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.deflink.wcid,
+ true, mvif->bss_conf.mt76.ctx);
if (err)
goto out;
@@ -1296,8 +1327,8 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (err)
goto out;
- mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false,
- mvif->mt76.ctx);
+ mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.deflink.wcid, false,
+ mvif->bss_conf.mt76.ctx);
out:
mt792x_mutex_release(dev);
@@ -1316,32 +1347,27 @@ mt7921_remove_chanctx(struct ieee80211_hw *hw,
{
}
-static void mt7921_ctx_iter(void *priv, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct ieee80211_chanctx_conf *ctx = priv;
-
- if (ctx != mvif->mt76.ctx)
- return;
-
- if (vif->type == NL80211_IFTYPE_MONITOR)
- mt7921_mcu_config_sniffer(mvif, ctx);
- else
- mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
-}
-
static void
mt7921_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
+ struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ struct ieee80211_vif *vif;
+ struct mt792x_vif *mvif;
+
+ if (!mctx->bss_conf)
+ return;
+
+ mvif = container_of(mctx->bss_conf, struct mt792x_vif, bss_conf);
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
mt792x_mutex_acquire(phy->dev);
- ieee80211_iterate_active_interfaces(phy->mt76->hw,
- IEEE80211_IFACE_ITER_ACTIVE,
- mt7921_ctx_iter, ctx);
+ if (vif->type == NL80211_IFTYPE_MONITOR)
+ mt7921_mcu_config_sniffer(mvif, ctx);
+ else
+ mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->bss_conf.mt76, ctx);
mt792x_mutex_release(phy->dev);
}
@@ -1355,7 +1381,7 @@ static void mt7921_mgd_prepare_tx(struct ieee80211_hw *hw,
jiffies_to_msecs(HZ);
mt792x_mutex_acquire(dev);
- mt7921_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration,
+ mt7921_set_roc(mvif->phy, mvif, mvif->bss_conf.mt76.ctx->def.chan, duration,
MT7921_ROC_REQ_JOIN);
mt792x_mutex_release(dev);
}
@@ -1372,7 +1398,7 @@ static void mt7921_mgd_complete_tx(struct ieee80211_hw *hw,
const struct ieee80211_ops mt7921_ops = {
.tx = mt792x_tx,
.start = mt7921_start,
- .stop = mt792x_stop,
+ .stop = mt7921_stop,
.add_interface = mt7921_add_interface,
.remove_interface = mt792x_remove_interface,
.config = mt7921_config,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 8b4ce32a2cd1..394fcd799345 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -105,7 +105,7 @@ mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev,
struct mt76_connac_arpns_tlv arpns;
} req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
},
.arpns = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
@@ -254,6 +254,42 @@ mt7921_mcu_tx_done_event(struct mt792x_dev *dev, struct sk_buff *skb)
}
static void
+mt7921_mcu_rssi_monitor_iter(void *priv, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt76_connac_rssi_notify_event *event = priv;
+ enum nl80211_cqm_rssi_threshold_event nl_event;
+ s32 rssi = le32_to_cpu(event->rssi[mvif->bss_conf.mt76.idx]);
+
+ if (!rssi)
+ return;
+
+ if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ return;
+
+ if (rssi > vif->bss_conf.cqm_rssi_thold)
+ nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
+ else
+ nl_event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
+
+ ieee80211_cqm_rssi_notify(vif, nl_event, rssi, GFP_KERNEL);
+}
+
+static void
+mt7921_mcu_rssi_monitor_event(struct mt792x_dev *dev, struct sk_buff *skb)
+{
+ struct mt76_connac_rssi_notify_event *event;
+
+ skb_pull(skb, sizeof(struct mt76_connac2_mcu_rxd));
+ event = (struct mt76_connac_rssi_notify_event *)skb->data;
+
+ ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7921_mcu_rssi_monitor_iter, event);
+}
+
+static void
mt7921_mcu_rx_unsolicited_event(struct mt792x_dev *dev, struct sk_buff *skb)
{
struct mt76_connac2_mcu_rxd *rxd;
@@ -281,6 +317,9 @@ mt7921_mcu_rx_unsolicited_event(struct mt792x_dev *dev, struct sk_buff *skb)
case MCU_EVENT_TX_DONE:
mt7921_mcu_tx_done_event(dev, skb);
break;
+ case MCU_EVENT_RSSI_NOTIFY:
+ mt7921_mcu_rssi_monitor_event(dev, skb);
+ break;
default:
break;
}
@@ -327,6 +366,7 @@ void mt7921_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb)
if (rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT ||
rxd->eid == MCU_EVENT_BSS_BEACON_LOSS ||
rxd->eid == MCU_EVENT_SCHED_SCAN_DONE ||
+ rxd->eid == MCU_EVENT_RSSI_NOTIFY ||
rxd->eid == MCU_EVENT_SCAN_DONE ||
rxd->eid == MCU_EVENT_TX_DONE ||
rxd->eid == MCU_EVENT_DBG_MSG ||
@@ -346,9 +386,9 @@ int mt7921_mcu_uni_tx_ba(struct mt792x_dev *dev,
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
if (enable && !params->amsdu)
- msta->wcid.amsdu = false;
+ msta->deflink.wcid.amsdu = false;
- return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->bss_conf.mt76, params,
MCU_UNI_CMD(STA_REC_UPDATE),
enable, true);
}
@@ -359,7 +399,7 @@ int mt7921_mcu_uni_rx_ba(struct mt792x_dev *dev,
{
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
- return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params,
+ return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->bss_conf.mt76, params,
MCU_UNI_CMD(STA_REC_UPDATE),
enable, false);
}
@@ -606,6 +646,20 @@ int mt7921_run_firmware(struct mt792x_dev *dev)
}
EXPORT_SYMBOL_GPL(mt7921_run_firmware);
+int mt7921_mcu_radio_led_ctrl(struct mt792x_dev *dev, u8 value)
+{
+ struct {
+ u8 ctrlid;
+ u8 rsv[3];
+ } __packed req = {
+ .ctrlid = value,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(ID_RADIO_ON_OFF_CTRL),
+ &req, sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt7921_mcu_radio_led_ctrl);
+
int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
@@ -624,9 +678,9 @@ int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
u8 wmm_idx;
u8 pad;
} __packed req = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
.qos = vif->bss_conf.qos,
- .wmm_idx = mvif->mt76.wmm_idx,
+ .wmm_idx = mvif->bss_conf.mt76.wmm_idx,
};
struct mu_edca {
u8 cw_min;
@@ -647,15 +701,15 @@ int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
struct mu_edca edca[IEEE80211_NUM_ACS];
u8 pad3[32];
} __packed req_mu = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
.qos = vif->bss_conf.qos,
- .wmm_idx = mvif->mt76.wmm_idx,
+ .wmm_idx = mvif->bss_conf.mt76.wmm_idx,
};
static const int to_aci[] = { 1, 0, 2, 3 };
int ac, ret;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
+ struct ieee80211_tx_queue_params *q = &mvif->bss_conf.queue_params[ac];
struct edca *e = &req.edca[to_aci[ac]];
e->aifs = cpu_to_le16(q->aifs);
@@ -684,10 +738,10 @@ int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
struct ieee80211_he_mu_edca_param_ac_rec *q;
struct mu_edca *e;
- if (!mvif->queue_params[ac].mu_edca)
+ if (!mvif->bss_conf.queue_params[ac].mu_edca)
break;
- q = &mvif->queue_params[ac].mu_edca_param_rec;
+ q = &mvif->bss_conf.queue_params[ac].mu_edca_param_rec;
e = &(req_mu.edca[to_aci[ac]]);
e->cw_min = q->ecw_min_max & 0xf;
@@ -736,7 +790,7 @@ int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
.tokenid = token_id,
.reqtype = type,
.maxinterval = cpu_to_le32(duration),
- .bss_idx = vif->mt76.idx,
+ .bss_idx = vif->bss_conf.mt76.idx,
.control_channel = chan->hw_value,
.bw = CMD_CBW_20MHZ,
.bw_from_ap = CMD_CBW_20MHZ,
@@ -788,7 +842,7 @@ int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
.tag = cpu_to_le16(UNI_ROC_ABORT),
.len = cpu_to_le16(sizeof(struct roc_abort_tlv)),
.tokenid = token_id,
- .bss_idx = vif->mt76.idx,
+ .bss_idx = vif->bss_conf.mt76.idx,
.dbdcband = 0xff, /* auto*/
},
};
@@ -893,7 +947,7 @@ int mt7921_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif)
} __packed ps;
} __packed ps_req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
},
.ps = {
.tag = cpu_to_le16(UNI_BSS_INFO_PS),
@@ -928,7 +982,7 @@ mt7921_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif,
} __packed bcnft;
} __packed bcnft_req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
},
.bcnft = {
.tag = cpu_to_le16(UNI_BSS_INFO_BCNFT),
@@ -961,7 +1015,7 @@ mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
u8 bmc_triggered_ac;
u8 pad;
} req = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
.aid = cpu_to_le16(vif->cfg.aid),
.dtim_period = vif->bss_conf.dtim_period,
.bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
@@ -970,7 +1024,7 @@ mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
u8 bss_idx;
u8 pad[3];
} req_hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
};
int err;
@@ -988,7 +1042,7 @@ int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
enum mt76_sta_info_state state)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- int rssi = -ewma_rssi_read(&mvif->rssi);
+ int rssi = -ewma_rssi_read(&mvif->bss_conf.rssi);
struct mt76_sta_cmd_info info = {
.sta = sta,
.vif = vif,
@@ -1001,7 +1055,7 @@ int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
struct mt792x_sta *msta;
msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL;
- info.wcid = msta ? &msta->wcid : &mvif->sta.wcid;
+ info.wcid = msta ? &msta->deflink.wcid : &mvif->sta.deflink.wcid;
info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true;
return mt76_connac_mcu_sta_cmd(&dev->mphy, &info);
@@ -1100,12 +1154,12 @@ int mt7921_mcu_config_sniffer(struct mt792x_vif *vif,
{
struct cfg80211_chan_def *chandef = &ctx->def;
int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
- const u8 ch_band[] = {
+ static const u8 ch_band[] = {
[NL80211_BAND_2GHZ] = 1,
[NL80211_BAND_5GHZ] = 2,
[NL80211_BAND_6GHZ] = 3,
};
- const u8 ch_width[] = {
+ static const u8 ch_width[] = {
[NL80211_CHAN_WIDTH_20_NOHT] = 0,
[NL80211_CHAN_WIDTH_20] = 0,
[NL80211_CHAN_WIDTH_40] = 0,
@@ -1136,7 +1190,7 @@ int mt7921_mcu_config_sniffer(struct mt792x_vif *vif,
} __packed tlv;
} __packed req = {
.hdr = {
- .band_idx = vif->mt76.band_idx,
+ .band_idx = vif->bss_conf.mt76.band_idx,
},
.tlv = {
.tag = cpu_to_le16(1),
@@ -1197,7 +1251,7 @@ mt7921_mcu_uni_add_beacon_offload(struct mt792x_dev *dev,
} __packed beacon_tlv;
} req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
},
.beacon_tlv = {
.tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
@@ -1391,3 +1445,24 @@ int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(SET_RX_FILTER),
&data, sizeof(data), false);
}
+
+int mt7921_mcu_set_rssimonitor(struct mt792x_dev *dev, struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct {
+ u8 enable;
+ s8 cqm_rssi_high;
+ s8 cqm_rssi_low;
+ u8 bss_idx;
+ u16 duration;
+ u8 rsv2[2];
+ } __packed data = {
+ .enable = vif->cfg.assoc,
+ .cqm_rssi_high = vif->bss_conf.cqm_rssi_thold + vif->bss_conf.cqm_rssi_hyst,
+ .cqm_rssi_low = vif->bss_conf.cqm_rssi_thold - vif->bss_conf.cqm_rssi_hyst,
+ .bss_idx = mvif->bss_conf.mt76.idx,
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(RSSI_MONITOR),
+ &data, sizeof(data), false);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 3016636d18c6..6c5392c5d207 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -27,6 +27,10 @@
#define MCU_UNI_EVENT_ROC 0x27
#define MCU_UNI_EVENT_CLC 0x80
+#define EXT_CMD_RADIO_LED_CTRL_ENABLE 0x1
+#define EXT_CMD_RADIO_ON_LED 0x2
+#define EXT_CMD_RADIO_OFF_LED 0x3
+
enum {
UNI_ROC_ACQUIRE,
UNI_ROC_ABORT,
@@ -196,6 +200,7 @@ int mt7921_mcu_fw_log_2_host(struct mt792x_dev *dev, u8 ctrl);
void mt7921_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb);
int mt7921_mcu_set_rxfilter(struct mt792x_dev *dev, u32 fif,
u8 bit_op, u32 bit_map);
+int mt7921_mcu_radio_led_ctrl(struct mt792x_dev *dev, u8 value);
static inline u32
mt7921_reg_map_l1(struct mt792x_dev *dev, u32 addr)
@@ -323,4 +328,5 @@ int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
u8 token_id);
void mt7921_roc_abort_sync(struct mt792x_dev *dev);
+int mt7921_mcu_set_rssimonitor(struct mt792x_dev *dev, struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index cda853e86676..a7430216a80d 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -24,6 +24,8 @@ static const struct pci_device_id mt7921_pci_device_table[] = {
.driver_data = (kernel_ulong_t)MT7921_FIRMWARE_WM },
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x0616),
.driver_data = (kernel_ulong_t)MT7922_FIRMWARE_WM },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7920),
+ .driver_data = (kernel_ulong_t)MT7920_FIRMWARE_WM },
{ },
};
@@ -217,7 +219,7 @@ static int mt7921_dma_init(struct mt792x_dev *dev)
if (ret < 0)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt792x_poll_tx);
napi_enable(&dev->mt76.tx_napi);
@@ -269,9 +271,9 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
struct mt76_bus_ops *bus_ops;
struct mt792x_dev *dev;
struct mt76_dev *mdev;
+ u16 cmd, chipid;
u8 features;
int ret;
- u16 cmd;
ret = pcim_enable_device(pdev);
if (ret)
@@ -337,6 +339,9 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
bus_ops->rmw = mt7921_rmw;
dev->mt76.bus = bus_ops;
+ if (!mt7921_disable_aspm && mt76_pci_aspm_supported(pdev))
+ dev->aspm_supported = true;
+
ret = mt792xe_mcu_fw_pmctrl(dev);
if (ret)
goto err_free_dev;
@@ -345,7 +350,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free_dev;
- mdev->rev = (mt7921_l1_rr(dev, MT_HW_CHIPID) << 16) |
+ chipid = mt7921_l1_rr(dev, MT_HW_CHIPID);
+ if (chipid == 0x7961 && (mt7921_l1_rr(dev, MT_HW_BOUND) & BIT(7)))
+ chipid = 0x7920;
+ mdev->rev = (chipid << 16) |
(mt7921_l1_rr(dev, MT_HW_REV) & 0xff);
dev_info(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
@@ -422,6 +430,10 @@ static int mt7921_pci_suspend(struct device *device)
wait_event_timeout(dev->wait,
!dev->regd_in_progress, 5 * HZ);
+ err = mt7921_mcu_radio_led_ctrl(dev, EXT_CMD_RADIO_OFF_LED);
+ if (err < 0)
+ goto restore_suspend;
+
err = mt76_connac_mcu_set_hif_suspend(mdev, true);
if (err)
goto restore_suspend;
@@ -520,9 +532,11 @@ static int mt7921_pci_resume(struct device *device)
mt76_connac_mcu_set_deep_sleep(&dev->mt76, false);
err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+ if (err < 0)
+ goto failed;
mt7921_regd_update(dev);
-
+ err = mt7921_mcu_radio_led_ctrl(dev, EXT_CMD_RADIO_ON_LED);
failed:
pm->suspended = false;
@@ -551,6 +565,8 @@ static struct pci_driver mt7921_pci_driver = {
module_pci_driver(mt7921_pci_driver);
MODULE_DEVICE_TABLE(pci, mt7921_pci_device_table);
+MODULE_FIRMWARE(MT7920_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7920_ROM_PATCH);
MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
MODULE_FIRMWARE(MT7921_ROM_PATCH);
MODULE_FIRMWARE(MT7922_FIRMWARE_WM);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
index c866144ff061..2452b1a2d118 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -34,9 +34,9 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (sta) {
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) {
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- msta->last_txs = jiffies;
+ msta->deflink.last_txs = jiffies;
}
}
@@ -64,7 +64,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
mt76_wr(dev, dev->irq_map->host_irq_enable, 0);
mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
- set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
skb_queue_purge(&dev->mt76.mcu.res_q);
@@ -115,7 +114,6 @@ int mt7921e_mac_reset(struct mt792x_dev *dev)
err = __mt7921_start(&dev->phy);
out:
- clear_bit(MT76_RESET, &dev->mphy.state);
local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
index 389eb0903807..1f77cf71ca70 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
@@ -98,7 +98,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
mt76_txq_schedule_all(&dev->mphy);
mt76_worker_disable(&dev->mt76.tx_worker);
- set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
skb_queue_purge(&dev->mt76.mcu.res_q);
@@ -135,7 +134,6 @@ int mt7921s_mac_reset(struct mt792x_dev *dev)
err = __mt7921_start(&dev->phy);
out:
- clear_bit(MT76_RESET, &dev->mphy.state);
mt76_worker_enable(&dev->mt76.tx_worker);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
index c4cbc8976046..039949b344b9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c
@@ -179,6 +179,12 @@ static void mt7925_init_work(struct work_struct *work)
mt76_set_stream_caps(&dev->mphy, true);
mt7925_set_stream_he_eht_caps(&dev->phy);
+ ret = mt7925_init_mlo_caps(&dev->phy);
+ if (ret) {
+ dev_err(dev->mt76.dev, "MLO init failed\n");
+ return;
+ }
+
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
index 1b9fbd9a140d..cf36750cf709 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
@@ -28,6 +28,7 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
};
struct ieee80211_sta *sta;
struct mt792x_sta *msta;
+ struct mt792x_link_sta *mlink;
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
LIST_HEAD(sta_poll_list);
struct rate_info *rate;
@@ -46,24 +47,25 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
if (list_empty(&sta_poll_list))
break;
- msta = list_first_entry(&sta_poll_list,
- struct mt792x_sta, wcid.poll_list);
+ mlink = list_first_entry(&sta_poll_list,
+ struct mt792x_link_sta, wcid.poll_list);
+ msta = container_of(mlink, struct mt792x_sta, deflink);
spin_lock_bh(&dev->mt76.sta_poll_lock);
- list_del_init(&msta->wcid.poll_list);
+ list_del_init(&mlink->wcid.poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
- idx = msta->wcid.idx;
+ idx = mlink->wcid.idx;
addr = mt7925_mac_wtbl_lmac_addr(dev, idx, MT_WTBL_AC0_CTT_OFFSET);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
- u32 tx_last = msta->airtime_ac[i];
- u32 rx_last = msta->airtime_ac[i + 4];
+ u32 tx_last = mlink->airtime_ac[i];
+ u32 rx_last = mlink->airtime_ac[i + 4];
- msta->airtime_ac[i] = mt76_rr(dev, addr);
- msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
+ mlink->airtime_ac[i] = mt76_rr(dev, addr);
+ mlink->airtime_ac[i + 4] = mt76_rr(dev, addr + 4);
- tx_time[i] = msta->airtime_ac[i] - tx_last;
- rx_time[i] = msta->airtime_ac[i + 4] - rx_last;
+ tx_time[i] = mlink->airtime_ac[i] - tx_last;
+ rx_time[i] = mlink->airtime_ac[i + 4] - rx_last;
if ((tx_last | rx_last) & BIT(30))
clear = true;
@@ -74,10 +76,10 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
if (clear) {
mt7925_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac));
}
- if (!msta->wcid.sta)
+ if (!mlink->wcid.sta)
continue;
sta = container_of((void *)msta, struct ieee80211_sta,
@@ -100,7 +102,7 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
* we need to make sure that flags match so polling GI
* from per-sta counters directly.
*/
- rate = &msta->wcid.rate;
+ rate = &mlink->wcid.rate;
switch (rate->bw) {
case RATE_INFO_BW_160:
@@ -144,10 +146,10 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev)
rssi[2] = to_rssi(GENMASK(23, 16), val);
rssi[3] = to_rssi(GENMASK(31, 14), val);
- msta->ack_signal =
+ mlink->ack_signal =
mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi);
- ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal);
+ ewma_avg_signal_add(&mlink->avg_ack_signal, -mlink->ack_signal);
}
}
@@ -365,7 +367,7 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
u32 rxd2 = le32_to_cpu(rxd[2]);
u32 rxd3 = le32_to_cpu(rxd[3]);
u32 rxd4 = le32_to_cpu(rxd[4]);
- struct mt792x_sta *msta = NULL;
+ struct mt792x_link_sta *mlink;
u8 mode = 0; /* , band_idx; */
u16 seq_ctrl = 0;
__le16 fc = 0;
@@ -393,10 +395,10 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
status->wcid = mt792x_rx_get_wcid(dev, idx, unicast);
if (status->wcid) {
- msta = container_of(status->wcid, struct mt792x_sta, wcid);
+ mlink = container_of(status->wcid, struct mt792x_link_sta, wcid);
spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
+ if (list_empty(&mlink->wcid.poll_list))
+ list_add_tail(&mlink->wcid.poll_list,
&dev->mt76.sta_poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
}
@@ -590,14 +592,25 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb)
seq_ctrl = le16_to_cpu(hdr->seq_ctrl);
qos_ctl = *ieee80211_get_qos_ctl(hdr);
}
+ skb_set_mac_header(skb, (unsigned char *)hdr - skb->data);
} else {
status->flag |= RX_FLAG_8023;
}
mt792x_mac_assoc_rssi(dev, skb);
- if (rxv && mode >= MT_PHY_TYPE_HE_SU && !(status->flag & RX_FLAG_8023))
- mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
+ if (rxv && !(status->flag & RX_FLAG_8023)) {
+ switch (status->encoding) {
+ case RX_ENC_EHT:
+ mt76_connac3_mac_decode_eht_radiotap(skb, rxv, mode);
+ break;
+ case RX_ENC_HE:
+ mt76_connac3_mac_decode_he_radiotap(skb, rxv, mode);
+ break;
+ default:
+ break;
+ }
+ }
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
@@ -727,8 +740,12 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
BSS_CHANGED_BEACON_ENABLED));
bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP |
BSS_CHANGED_FILS_DISCOVERY));
+ struct mt792x_bss_conf *mconf;
+
+ mconf = vif ? mt792x_vif_to_link((struct mt792x_vif *)vif->drv_priv,
+ wcid->link_id) : NULL;
+ mvif = mconf ? (struct mt76_vif *)&mconf->mt76 : NULL;
- mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL;
if (mvif) {
omac_idx = mvif->omac_idx;
wmm_idx = mvif->wmm_idx;
@@ -789,8 +806,10 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
txwi[5] = cpu_to_le32(val);
- val = MT_TXD6_DIS_MAT | MT_TXD6_DAS |
- FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
+ val = MT_TXD6_DAS | FIELD_PREP(MT_TXD6_MSDU_CNT, 1);
+ if (!ieee80211_vif_is_mld(vif) ||
+ (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0))
+ val |= MT_TXD6_DIS_MAT;
txwi[6] = cpu_to_le32(val);
txwi[7] = 0;
@@ -820,27 +839,53 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
}
EXPORT_SYMBOL_GPL(mt7925_mac_write_txwi);
-static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb,
+ struct mt76_wcid *wcid)
{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_link_sta *link_sta;
+ struct mt792x_link_sta *mlink;
struct mt792x_sta *msta;
+ bool is_8023;
u16 fc, tid;
- u32 val;
- if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he))
+ link_sta = rcu_dereference(sta->link[wcid->link_id]);
+ if (!link_sta)
return;
- tid = le32_get_bits(txwi[1], MT_TXD1_TID);
- if (tid >= 6) /* skip VO queue */
+ if (!sta || !(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he))
return;
- val = le32_to_cpu(txwi[2]);
- fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 |
- FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4;
+ tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
+ is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+
+ if (is_8023) {
+ fc = IEEE80211_FTYPE_DATA |
+ (sta->wme ? IEEE80211_STYPE_QOS_DATA :
+ IEEE80211_STYPE_DATA);
+ } else {
+ /* No need to get precise TID for Action/Management Frame,
+ * since it will not meet the following Frame Control
+ * condition anyway.
+ */
+
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ fc = le16_to_cpu(hdr->frame_control) &
+ (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE);
+ }
+
if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA)))
return;
msta = (struct mt792x_sta *)sta->drv_priv;
- if (!test_and_set_bit(tid, &msta->wcid.ampdu_state))
+
+ if (sta->mlo && msta->deflink_id != IEEE80211_LINK_UNSPECIFIED)
+ mlink = rcu_dereference(msta->link[msta->deflink_id]);
+ else
+ mlink = &msta->deflink;
+
+ if (!test_and_set_bit(tid, &mlink->wcid.ampdu_state))
ieee80211_start_tx_ba_session(sta, tid, 0);
}
@@ -980,7 +1025,7 @@ out_no_skb:
void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data)
{
- struct mt792x_sta *msta = NULL;
+ struct mt792x_link_sta *mlink = NULL;
struct mt76_wcid *wcid;
__le32 *txs_data = data;
u16 wcidx;
@@ -1004,15 +1049,15 @@ void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data)
if (!wcid)
goto out;
- msta = container_of(wcid, struct mt792x_sta, wcid);
+ mlink = container_of(wcid, struct mt792x_link_sta, wcid);
mt7925_mac_add_txs_skb(dev, wcid, pid, txs_data);
if (!wcid->sta)
goto out;
spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list);
+ if (list_empty(&mlink->wcid.poll_list))
+ list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list);
spin_unlock_bh(&dev->mt76.sta_poll_lock);
out:
@@ -1020,7 +1065,7 @@ out:
}
void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t,
- struct ieee80211_sta *sta, bool clear_status,
+ struct ieee80211_sta *sta, struct mt76_wcid *wcid,
struct list_head *free_list)
{
struct mt76_dev *mdev = &dev->mt76;
@@ -1033,10 +1078,8 @@ void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t,
txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
if (sta) {
- struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
-
if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7925_tx_check_aggr(sta, txwi);
+ mt7925_tx_check_aggr(sta, t->skb, wcid);
wcid_idx = wcid->idx;
} else {
@@ -1083,7 +1126,7 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
*/
info = le32_to_cpu(*cur_info);
if (info & MT_TXFREE_INFO_PAIR) {
- struct mt792x_sta *msta;
+ struct mt792x_link_sta *mlink;
u16 idx;
idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info);
@@ -1092,10 +1135,10 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
if (!sta)
continue;
- msta = container_of(wcid, struct mt792x_sta, wcid);
+ mlink = container_of(wcid, struct mt792x_link_sta, wcid);
spin_lock_bh(&mdev->sta_poll_lock);
- if (list_empty(&msta->wcid.poll_list))
- list_add_tail(&msta->wcid.poll_list,
+ if (list_empty(&mlink->wcid.poll_list))
+ list_add_tail(&mlink->wcid.poll_list,
&mdev->sta_poll_list);
spin_unlock_bh(&mdev->sta_poll_lock);
continue;
@@ -1121,7 +1164,7 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len)
if (!txwi)
continue;
- mt7925_txwi_free(dev, txwi, sta, 0, &free_list);
+ mt7925_txwi_free(dev, txwi, sta, wcid, &free_list);
}
}
@@ -1224,17 +1267,26 @@ mt7925_vif_connect_iter(void *priv, u8 *mac,
struct ieee80211_vif *vif)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ unsigned long valid = ieee80211_vif_is_mld(vif) ?
+ mvif->valid_links : BIT(0);
struct mt792x_dev *dev = mvif->phy->dev;
struct ieee80211_hw *hw = mt76_hw(dev);
+ struct ieee80211_bss_conf *bss_conf;
+ int i;
if (vif->type == NL80211_IFTYPE_STATION)
ieee80211_disconnect(vif, true);
- mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true);
- mt7925_mcu_set_tx(dev, vif);
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+
+ mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf,
+ &mvif->sta.deflink.wcid, true);
+ mt7925_mcu_set_tx(dev, bss_conf);
+ }
if (vif->type == NL80211_IFTYPE_AP) {
- mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid,
+ mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid,
true, NULL);
mt7925_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_NONE);
@@ -1369,9 +1421,9 @@ int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (sta) {
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) {
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- msta->last_txs = jiffies;
+ msta->deflink.last_txs = jiffies;
}
}
@@ -1406,7 +1458,7 @@ void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev,
sta = wcid_to_sta(wcid);
if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7925_tx_check_aggr(sta, txwi);
+ mt76_connac2_tx_check_aggr(sta, txwi);
skb_pull(e->skb, headroom);
mt76_tx_complete_skb(mdev, e->wcid, e->skb);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
index 6179798a8845..8c0768bf9343 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c
@@ -236,6 +236,35 @@ mt7925_init_eht_caps(struct mt792x_phy *phy, enum nl80211_band band,
eht_nss->bw._160.rx_tx_mcs13_max_nss = val;
}
+int mt7925_init_mlo_caps(struct mt792x_phy *phy)
+{
+ struct wiphy *wiphy = phy->mt76->hw->wiphy;
+ static const u8 ext_capa_sta[] = {
+ [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF,
+ };
+ static struct wiphy_iftype_ext_capab ext_capab[] = {
+ {
+ .iftype = NL80211_IFTYPE_STATION,
+ .extended_capabilities = ext_capa_sta,
+ .extended_capabilities_mask = ext_capa_sta,
+ .extended_capabilities_len = sizeof(ext_capa_sta),
+ },
+ };
+
+ if (!(phy->chip_cap & MT792x_CHIP_CAP_MLO_EVT_EN))
+ return 0;
+
+ ext_capab[0].eml_capabilities = phy->eml_cap;
+ ext_capab[0].mld_capa_and_ops =
+ u16_encode_bits(1, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS);
+
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+ wiphy->iftype_ext_capab = ext_capab;
+ wiphy->num_iftype_ext_capab = ARRAY_SIZE(ext_capab);
+
+ return 0;
+}
+
static void
__mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy,
struct ieee80211_supported_band *sband,
@@ -317,62 +346,83 @@ static int mt7925_start(struct ieee80211_hw *hw)
return err;
}
-static int
-mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+static int mt7925_mac_link_bss_add(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
+ struct mt792x_link_sta *mlink)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_dev *dev = mt792x_hw_dev(hw);
- struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct mt792x_vif *mvif = mconf->vif;
struct mt76_txq *mtxq;
int idx, ret = 0;
- mt792x_mutex_acquire(dev);
-
- mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask);
- if (mvif->mt76.idx >= MT792x_MAX_INTERFACES) {
+ mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask);
+ if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) {
ret = -ENOSPC;
goto out;
}
- mvif->mt76.omac_idx = mvif->mt76.idx;
- mvif->phy = phy;
- mvif->mt76.band_idx = 0;
- mvif->mt76.wmm_idx = mvif->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
+ mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ?
+ 0 : mconf->mt76.idx;
+ mconf->mt76.band_idx = 0xff;
+ mconf->mt76.wmm_idx = mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS;
- if (phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
- mvif->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4;
+ if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ)
+ mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4;
else
- mvif->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL;
+ mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL;
- ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid,
- true);
+ ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf,
+ &mlink->wcid, true);
if (ret)
goto out;
- dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx);
- phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx);
+ dev->mt76.vif_mask |= BIT_ULL(mconf->mt76.idx);
+ mvif->phy->omac_mask |= BIT_ULL(mconf->mt76.omac_idx);
- idx = MT792x_WTBL_RESERVED - mvif->mt76.idx;
+ idx = MT792x_WTBL_RESERVED - mconf->mt76.idx;
- INIT_LIST_HEAD(&mvif->sta.wcid.poll_list);
- mvif->sta.wcid.idx = idx;
- mvif->sta.wcid.phy_idx = mvif->mt76.band_idx;
- mvif->sta.wcid.hw_key_idx = -1;
- mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
- mvif->sta.vif = mvif;
- mt76_wcid_init(&mvif->sta.wcid);
+ INIT_LIST_HEAD(&mlink->wcid.poll_list);
+ mlink->wcid.idx = idx;
+ mlink->wcid.phy_idx = mconf->mt76.band_idx;
+ mlink->wcid.hw_key_idx = -1;
+ mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt76_wcid_init(&mlink->wcid);
mt7925_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- ewma_rssi_init(&mvif->rssi);
+ ewma_rssi_init(&mconf->rssi);
- rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
+ rcu_assign_pointer(dev->mt76.wcid[idx], &mlink->wcid);
if (vif->txq) {
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = idx;
}
+out:
+ return ret;
+}
+
+static int
+mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ int ret = 0;
+
+ mt792x_mutex_acquire(dev);
+
+ mvif->phy = phy;
+ mvif->bss_conf.vif = mvif;
+ mvif->sta.vif = mvif;
+ mvif->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+
+ ret = mt7925_mac_link_bss_add(dev, &vif->bss_conf, &mvif->sta.deflink);
+ if (ret < 0)
+ goto out;
+
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
out:
mt792x_mutex_release(dev);
@@ -386,7 +436,7 @@ static void mt7925_roc_iter(void *priv, u8 *mac,
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_phy *phy = priv;
- mt7925_mcu_abort_roc(phy, mvif, phy->roc_token_id);
+ mt7925_mcu_abort_roc(phy, &mvif->bss_conf, phy->roc_token_id);
}
void mt7925_roc_work(struct work_struct *work)
@@ -407,7 +457,8 @@ void mt7925_roc_work(struct work_struct *work)
ieee80211_remain_on_channel_expired(phy->mt76->hw);
}
-static int mt7925_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif)
+static int mt7925_abort_roc(struct mt792x_phy *phy,
+ struct mt792x_bss_conf *mconf)
{
int err = 0;
@@ -416,14 +467,14 @@ static int mt7925_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif)
mt792x_mutex_acquire(phy->dev);
if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state))
- err = mt7925_mcu_abort_roc(phy, vif, phy->roc_token_id);
+ err = mt7925_mcu_abort_roc(phy, mconf, phy->roc_token_id);
mt792x_mutex_release(phy->dev);
return err;
}
static int mt7925_set_roc(struct mt792x_phy *phy,
- struct mt792x_vif *vif,
+ struct mt792x_bss_conf *mconf,
struct ieee80211_channel *chan,
int duration,
enum mt7925_roc_req type)
@@ -435,7 +486,7 @@ static int mt7925_set_roc(struct mt792x_phy *phy,
phy->roc_grant = false;
- err = mt7925_mcu_set_roc(phy, vif, chan, duration, type,
+ err = mt7925_mcu_set_roc(phy, mconf, chan, duration, type,
++phy->roc_token_id);
if (err < 0) {
clear_bit(MT76_STATE_ROC, &phy->mt76->state);
@@ -443,7 +494,34 @@ static int mt7925_set_roc(struct mt792x_phy *phy,
}
if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, 4 * HZ)) {
- mt7925_mcu_abort_roc(phy, vif, phy->roc_token_id);
+ mt7925_mcu_abort_roc(phy, mconf, phy->roc_token_id);
+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+ err = -ETIMEDOUT;
+ }
+
+out:
+ return err;
+}
+
+static int mt7925_set_mlo_roc(struct mt792x_phy *phy,
+ struct mt792x_bss_conf *mconf,
+ u16 sel_links)
+{
+ int err;
+
+ if (WARN_ON_ONCE(test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state)))
+ return -EBUSY;
+
+ phy->roc_grant = false;
+
+ err = mt7925_mcu_set_mlo_roc(mconf, sel_links, 5, ++phy->roc_token_id);
+ if (err < 0) {
+ clear_bit(MT76_STATE_ROC, &phy->mt76->state);
+ goto out;
+ }
+
+ if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, 4 * HZ)) {
+ mt7925_mcu_abort_roc(phy, mconf, phy->roc_token_id);
clear_bit(MT76_STATE_ROC, &phy->mt76->state);
err = -ETIMEDOUT;
}
@@ -463,7 +541,8 @@ static int mt7925_remain_on_channel(struct ieee80211_hw *hw,
int err;
mt792x_mutex_acquire(phy->dev);
- err = mt7925_set_roc(phy, mvif, chan, duration, MT7925_ROC_REQ_ROC);
+ err = mt7925_set_roc(phy, &mvif->bss_conf,
+ chan, duration, MT7925_ROC_REQ_ROC);
mt792x_mutex_release(phy->dev);
return err;
@@ -475,30 +554,31 @@ static int mt7925_cancel_remain_on_channel(struct ieee80211_hw *hw,
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_phy *phy = mt792x_hw_phy(hw);
- return mt7925_abort_roc(phy, mvif);
+ return mt7925_abort_roc(phy, &mvif->bss_conf);
}
-static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+static int mt7925_set_link_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key, int link_id)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv :
&mvif->sta;
- struct mt76_wcid *wcid = &msta->wcid;
- u8 *wcid_keyidx = &wcid->hw_key_idx;
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_link_sta *link_sta;
int idx = key->keyidx, err = 0;
-
- /* The hardware does not support per-STA RX GTK, fallback
- * to software mode for these.
- */
- if ((vif->type == NL80211_IFTYPE_ADHOC ||
- vif->type == NL80211_IFTYPE_MESH_POINT) &&
- (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
- key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EOPNOTSUPP;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_bss_conf *mconf;
+ struct mt76_wcid *wcid;
+ u8 *wcid_keyidx;
+
+ link_conf = mt792x_vif_to_bss_conf(vif, link_id);
+ link_sta = sta ? mt792x_sta_to_link_sta(vif, sta, link_id) : NULL;
+ mconf = mt792x_vif_to_link(mvif, link_id);
+ mlink = mt792x_sta_to_link(msta, link_id);
+ wcid = &mlink->wcid;
+ wcid_keyidx = &wcid->hw_key_idx;
/* fall back to sw encryption for unsupported ciphers */
switch (key->cipher) {
@@ -522,13 +602,12 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return -EOPNOTSUPP;
}
- mt792x_mutex_acquire(dev);
-
- if (cmd == SET_KEY && !mvif->mt76.cipher) {
+ if (cmd == SET_KEY && !mconf->mt76.cipher) {
struct mt792x_phy *phy = mt792x_hw_phy(hw);
- mvif->mt76.cipher = mt7925_mcu_get_cipher(key->cipher);
- mt7925_mcu_add_bss_info(phy, mvif->mt76.ctx, vif, sta, true);
+ mconf->mt76.cipher = mt7925_mcu_get_cipher(key->cipher);
+ mt7925_mcu_add_bss_info(phy, mconf->mt76.ctx, link_conf,
+ link_sta, true);
}
if (cmd == SET_KEY)
@@ -541,20 +620,59 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
mt76_wcid_key_setup(&dev->mt76, wcid,
cmd == SET_KEY ? key : NULL);
- err = mt7925_mcu_add_key(&dev->mt76, vif, &msta->bip,
+ err = mt7925_mcu_add_key(&dev->mt76, vif, &mlink->bip,
key, MCU_UNI_CMD(STA_REC_UPDATE),
- &msta->wcid, cmd);
+ &mlink->wcid, cmd, msta);
if (err)
goto out;
if (key->cipher == WLAN_CIPHER_SUITE_WEP104 ||
key->cipher == WLAN_CIPHER_SUITE_WEP40)
- err = mt7925_mcu_add_key(&dev->mt76, vif, &mvif->wep_sta->bip,
+ err = mt7925_mcu_add_key(&dev->mt76, vif, &mvif->wep_sta->deflink.bip,
key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE),
- &mvif->wep_sta->wcid, cmd);
-
+ &mvif->wep_sta->deflink.wcid, cmd, msta);
out:
+ return err;
+}
+
+static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv :
+ &mvif->sta;
+ int err;
+
+ /* The hardware does not support per-STA RX GTK, fallback
+ * to software mode for these.
+ */
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
+ (key->cipher == WLAN_CIPHER_SUITE_TKIP ||
+ key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ mt792x_mutex_acquire(dev);
+
+ if (ieee80211_vif_is_mld(vif)) {
+ unsigned int link_id;
+ unsigned long add;
+
+ add = key->link_id != -1 ? BIT(key->link_id) : msta->valid_links;
+
+ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ err = mt7925_set_link_key(hw, cmd, vif, sta, key, link_id);
+ if (err < 0)
+ break;
+ }
+ } else {
+ err = mt7925_set_link_key(hw, cmd, vif, sta, key, vif->bss_conf.link_id);
+ }
+
mt792x_mutex_release(dev);
return err;
@@ -695,166 +813,388 @@ mt7925_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
return mvif->basic_rates_idx;
}
-static void mt7925_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u64 changed)
+static int mt7925_mac_link_sta_add(struct mt76_dev *mdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
- struct mt792x_phy *phy = mt792x_hw_phy(hw);
- struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct ieee80211_bss_conf *link_conf;
+ struct mt792x_bss_conf *mconf;
+ u8 link_id = link_sta->link_id;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_sta *msta;
+ int ret, idx;
- mt792x_mutex_acquire(dev);
+ msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ mlink = mt792x_sta_to_link(msta, link_id);
- if (changed & BSS_CHANGED_ERP_SLOT) {
- int slottime = info->use_short_slot ? 9 : 20;
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1);
+ if (idx < 0)
+ return -ENOSPC;
- if (slottime != phy->slottime) {
- phy->slottime = slottime;
- mt7925_mcu_set_timing(phy, vif);
- }
- }
+ mconf = mt792x_vif_to_link(mvif, link_id);
+ INIT_LIST_HEAD(&mlink->wcid.poll_list);
+ mlink->wcid.sta = 1;
+ mlink->wcid.idx = idx;
+ mlink->wcid.phy_idx = mconf->mt76.band_idx;
+ mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mlink->last_txs = jiffies;
+ mlink->wcid.link_id = link_sta->link_id;
+ mlink->wcid.link_valid = !!link_sta->sta->valid_links;
- if (changed & BSS_CHANGED_MCAST_RATE)
- mvif->mcast_rates_idx =
- mt7925_get_rates_table(hw, vif, false, true);
+ ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
+ if (ret)
+ return ret;
- if (changed & BSS_CHANGED_BASIC_RATES)
- mvif->basic_rates_idx =
- mt7925_get_rates_table(hw, vif, false, false);
+ mt7925_mac_wtbl_update(dev, idx,
+ MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- if (changed & (BSS_CHANGED_BEACON |
- BSS_CHANGED_BEACON_ENABLED)) {
- mvif->beacon_rates_idx =
- mt7925_get_rates_table(hw, vif, true, false);
+ link_conf = mt792x_vif_to_bss_conf(vif, link_id);
- mt7925_mcu_uni_add_beacon_offload(dev, hw, vif,
- info->enable_beacon);
+ /* should update bss info before STA add */
+ if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls)
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
+ link_conf, link_sta, false);
+
+ if (ieee80211_vif_is_mld(vif) &&
+ link_sta == mlink->pri_link) {
+ ret = mt7925_mcu_sta_update(dev, link_sta, vif, true,
+ MT76_STA_INFO_STATE_NONE);
+ if (ret)
+ return ret;
+ } else if (ieee80211_vif_is_mld(vif) &&
+ link_sta != mlink->pri_link) {
+ ret = mt7925_mcu_sta_update(dev, mlink->pri_link, vif,
+ true, MT76_STA_INFO_STATE_ASSOC);
+ if (ret)
+ return ret;
+
+ ret = mt7925_mcu_sta_update(dev, link_sta, vif, true,
+ MT76_STA_INFO_STATE_ASSOC);
+ if (ret)
+ return ret;
+ } else {
+ ret = mt7925_mcu_sta_update(dev, link_sta, vif, true,
+ MT76_STA_INFO_STATE_NONE);
+ if (ret)
+ return ret;
}
- /* ensure that enable txcmd_mode after bss_info */
- if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
- mt7925_mcu_set_tx(dev, vif);
+ mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
- if (changed & BSS_CHANGED_PS)
- mt7925_mcu_uni_bss_ps(dev, vif);
+ return 0;
+}
- if (changed & BSS_CHANGED_ASSOC) {
- mt7925_mcu_sta_update(dev, NULL, vif, true,
- MT76_STA_INFO_STATE_ASSOC);
- mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc);
- }
+static int
+mt7925_mac_sta_add_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, unsigned long new_links)
+{
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct mt76_wcid *wcid;
+ unsigned int link_id;
+ int err = 0;
- if (changed & BSS_CHANGED_ARP_FILTER) {
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ for_each_set_bit(link_id, &new_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_link_sta *link_sta;
+ struct mt792x_link_sta *mlink;
+
+ if (msta->deflink_id == IEEE80211_LINK_UNSPECIFIED) {
+ mlink = &msta->deflink;
+ msta->deflink_id = link_id;
+ } else {
+ mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink), GFP_KERNEL);
+ if (!mlink) {
+ err = -ENOMEM;
+ break;
+ }
- mt7925_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, info);
+ wcid = &mlink->wcid;
+ ewma_signal_init(&wcid->rssi);
+ rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid);
+ mt76_wcid_init(wcid);
+ ewma_avg_signal_init(&mlink->avg_ack_signal);
+ memset(mlink->airtime_ac, 0,
+ sizeof(msta->deflink.airtime_ac));
+ }
+
+ msta->valid_links |= BIT(link_id);
+ rcu_assign_pointer(msta->link[link_id], mlink);
+ mlink->sta = msta;
+ mlink->pri_link = &sta->deflink;
+ mlink->wcid.def_wcid = &msta->deflink.wcid;
+
+ link_sta = mt792x_sta_to_link_sta(vif, sta, link_id);
+ mt7925_mac_link_sta_add(&dev->mt76, vif, link_sta);
}
- mt792x_mutex_release(dev);
+ return err;
}
int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
- struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- int ret, idx;
-
- idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1);
- if (idx < 0)
- return -ENOSPC;
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ int err;
- INIT_LIST_HEAD(&msta->wcid.poll_list);
msta->vif = mvif;
- msta->wcid.sta = 1;
- msta->wcid.idx = idx;
- msta->wcid.phy_idx = mvif->mt76.band_idx;
- msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
- msta->last_txs = jiffies;
-
- ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
- if (ret)
- return ret;
if (vif->type == NL80211_IFTYPE_STATION)
mvif->wep_sta = msta;
- mt7925_mac_wtbl_update(dev, idx,
- MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ if (ieee80211_vif_is_mld(vif)) {
+ msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
- /* should update bss info before STA add */
- if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
- mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta,
- false);
+ err = mt7925_mac_sta_add_links(dev, vif, sta, sta->valid_links);
+ } else {
+ err = mt7925_mac_link_sta_add(mdev, vif, &sta->deflink);
+ }
- ret = mt7925_mcu_sta_update(dev, sta, vif, true,
- MT76_STA_INFO_STATE_NONE);
- if (ret)
- return ret;
+ return err;
+}
+EXPORT_SYMBOL_GPL(mt7925_mac_sta_add);
- mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
+static u16
+mt7925_mac_select_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
+{
+ unsigned long usable_links = ieee80211_vif_usable_links(vif);
+ struct {
+ u8 link_id;
+ enum nl80211_band band;
+ } data[IEEE80211_MLD_MAX_NUM_LINKS];
+ u8 link_id, i, j, n_data = 0;
+ u16 sel_links = 0;
+
+ if (!ieee80211_vif_is_mld(vif))
+ return 0;
+
+ if (vif->active_links == usable_links)
+ return vif->active_links;
+
+ rcu_read_lock();
+ for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_bss_conf *link_conf =
+ rcu_dereference(vif->link_conf[link_id]);
+
+ if (WARN_ON_ONCE(!link_conf))
+ continue;
- return 0;
+ data[n_data].link_id = link_id;
+ data[n_data].band = link_conf->chanreq.oper.chan->band;
+ n_data++;
+ }
+ rcu_read_unlock();
+
+ for (i = 0; i < n_data; i++) {
+ if (!(BIT(data[i].link_id) & vif->active_links))
+ continue;
+
+ sel_links = BIT(data[i].link_id);
+
+ for (j = 0; j < n_data; j++) {
+ if (data[i].band != data[j].band) {
+ sel_links |= BIT(data[j].link_id);
+ break;
+ }
+ }
+
+ break;
+ }
+
+ return sel_links;
}
-EXPORT_SYMBOL_GPL(mt7925_mac_sta_add);
-void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static void
+mt7925_mac_set_links(struct mt76_dev *mdev, struct ieee80211_vif *vif)
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
- struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct ieee80211_bss_conf *link_conf =
+ mt792x_vif_to_bss_conf(vif, mvif->deflink_id);
+ struct cfg80211_chan_def *chandef = &link_conf->chanreq.oper;
+ enum nl80211_band band = chandef->chan->band, secondary_band;
+
+ u16 sel_links = mt7925_mac_select_links(mdev, vif);
+ u8 secondary_link_id = __ffs(~BIT(mvif->deflink_id) & sel_links);
+
+ if (!ieee80211_vif_is_mld(vif) || hweight16(sel_links) < 2)
+ return;
+
+ link_conf = mt792x_vif_to_bss_conf(vif, secondary_link_id);
+ secondary_band = link_conf->chanreq.oper.chan->band;
+
+ if (band == NL80211_BAND_2GHZ ||
+ (band == NL80211_BAND_5GHZ && secondary_band == NL80211_BAND_6GHZ)) {
+ mt7925_abort_roc(mvif->phy, &mvif->bss_conf);
+
+ mt792x_mutex_acquire(dev);
+
+ mt7925_set_mlo_roc(mvif->phy, &mvif->bss_conf, sel_links);
+
+ mt792x_mutex_release(dev);
+ }
+
+ ieee80211_set_active_links_async(vif, sel_links);
+}
+
+static void mt7925_mac_link_sta_assoc(struct mt76_dev *mdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta)
+{
+ struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ struct ieee80211_bss_conf *link_conf;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_sta *msta;
+
+ msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ mlink = mt792x_sta_to_link(msta, link_sta->link_id);
mt792x_mutex_acquire(dev);
- if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
- mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta,
- true);
+ if (ieee80211_vif_is_mld(vif)) {
+ link_conf = mt792x_vif_to_bss_conf(vif, msta->deflink_id);
+ } else {
+ link_conf = mt792x_vif_to_bss_conf(vif, vif->bss_conf.link_id);
+ }
+
+ if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) {
+ struct mt792x_bss_conf *mconf;
- ewma_avg_signal_init(&msta->avg_ack_signal);
+ mconf = mt792x_link_conf_to_mconf(link_conf);
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx,
+ link_conf, link_sta, true);
+ }
+
+ ewma_avg_signal_init(&mlink->avg_ack_signal);
- mt7925_mac_wtbl_update(dev, msta->wcid.idx,
+ mt7925_mac_wtbl_update(dev, mlink->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac));
+ memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac));
- mt7925_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
+ mt7925_mcu_sta_update(dev, link_sta, vif, true, MT76_STA_INFO_STATE_ASSOC);
mt792x_mutex_release(dev);
}
+
+void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ if (ieee80211_vif_is_mld(vif)) {
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct ieee80211_link_sta *link_sta;
+
+ link_sta = mt792x_sta_to_link_sta(vif, sta, msta->deflink_id);
+
+ mt7925_mac_set_links(mdev, vif);
+
+ mt7925_mac_link_sta_assoc(mdev, vif, link_sta);
+ } else {
+ mt7925_mac_link_sta_assoc(mdev, vif, &sta->deflink);
+ }
+}
EXPORT_SYMBOL_GPL(mt7925_mac_sta_assoc);
-void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta)
{
struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
- struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct ieee80211_bss_conf *link_conf;
+ u8 link_id = link_sta->link_id;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_sta *msta;
- mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
+ msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ mlink = mt792x_sta_to_link(msta, link_id);
+
+ mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid);
mt76_connac_pm_wake(&dev->mphy, &dev->pm);
- mt7925_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE);
- mt7925_mac_wtbl_update(dev, msta->wcid.idx,
+ mt7925_mcu_sta_update(dev, link_sta, vif, false,
+ MT76_STA_INFO_STATE_NONE);
+ mt7925_mac_wtbl_update(dev, mlink->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- if (vif->type == NL80211_IFTYPE_STATION) {
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ link_conf = mt792x_vif_to_bss_conf(vif, link_id);
- mvif->wep_sta = NULL;
- ewma_rssi_init(&mvif->rssi);
- if (!sta->tdls)
- mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta,
- false);
+ if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) {
+ struct mt792x_bss_conf *mconf;
+
+ mconf = mt792x_link_conf_to_mconf(link_conf);
+ mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf,
+ link_sta, false);
}
spin_lock_bh(&mdev->sta_poll_lock);
- if (!list_empty(&msta->wcid.poll_list))
- list_del_init(&msta->wcid.poll_list);
+ if (!list_empty(&mlink->wcid.poll_list))
+ list_del_init(&mlink->wcid.poll_list);
spin_unlock_bh(&mdev->sta_poll_lock);
mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
}
+
+static int
+mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, unsigned long old_links)
+{
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_wcid *wcid;
+ unsigned int link_id;
+
+ for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct ieee80211_link_sta *link_sta;
+ struct mt792x_link_sta *mlink;
+
+ link_sta = mt792x_sta_to_link_sta(vif, sta, link_id);
+ if (!link_sta)
+ continue;
+
+ mlink = mt792x_sta_to_link(msta, link_id);
+ if (!mlink)
+ continue;
+
+ mt7925_mac_link_sta_remove(&dev->mt76, vif, link_sta);
+
+ wcid = &mlink->wcid;
+ rcu_assign_pointer(msta->link[link_id], NULL);
+ msta->valid_links &= ~BIT(link_id);
+ mlink->sta = NULL;
+ mlink->pri_link = NULL;
+
+ if (link_sta != mlink->pri_link) {
+ mt76_wcid_cleanup(mdev, wcid);
+ mt76_wcid_mask_clear(mdev->wcid_mask, wcid->idx);
+ mt76_wcid_mask_clear(mdev->wcid_phy_mask, wcid->idx);
+ }
+
+ if (msta->deflink_id == link_id)
+ msta->deflink_id = IEEE80211_LINK_UNSPECIFIED;
+ }
+
+ return 0;
+}
+
+void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76);
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ unsigned long rem;
+
+ rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0);
+
+ mt7925_mac_sta_remove_links(dev, vif, sta, rem);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+
+ mvif->wep_sta = NULL;
+ ewma_rssi_init(&mvif->bss_conf.rssi);
+ }
+}
EXPORT_SYMBOL_GPL(mt7925_mac_sta_remove);
static int mt7925_set_rts_threshold(struct ieee80211_hw *hw, u32 val)
@@ -890,12 +1230,12 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt792x_mutex_acquire(dev);
switch (action) {
case IEEE80211_AMPDU_RX_START:
- mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn,
+ mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn,
params->buf_size);
mt7925_mcu_uni_rx_ba(dev, params, true);
break;
case IEEE80211_AMPDU_RX_STOP:
- mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid);
+ mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid);
mt7925_mcu_uni_rx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
@@ -906,16 +1246,16 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
- clear_bit(tid, &msta->wcid.ampdu_state);
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
mt7925_mcu_uni_tx_ba(dev, params, false);
break;
case IEEE80211_AMPDU_TX_START:
- set_bit(tid, &msta->wcid.ampdu_state);
+ set_bit(tid, &msta->deflink.wcid.ampdu_state);
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
- clear_bit(tid, &msta->wcid.ampdu_state);
+ clear_bit(tid, &msta->deflink.wcid.ampdu_state);
mt7925_mcu_uni_tx_ba(dev, params, false);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
@@ -1156,27 +1496,38 @@ static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw,
bool enabled)
{
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ unsigned long valid = mvif->valid_links;
+ u8 i;
mt792x_mutex_acquire(dev);
- if (enabled)
- set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
- else
- clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags);
+ valid = ieee80211_vif_is_mld(vif) ? mvif->valid_links : BIT(0);
+
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ struct mt792x_link_sta *mlink;
- mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta);
+ mlink = mt792x_sta_to_link(msta, i);
+
+ if (enabled)
+ set_bit(MT_WCID_FLAG_HDR_TRANS, &mlink->wcid.flags);
+ else
+ clear_bit(MT_WCID_FLAG_HDR_TRANS, &mlink->wcid.flags);
+
+ mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta, i);
+ }
mt792x_mutex_release(dev);
}
#if IS_ENABLED(CONFIG_IPV6)
-static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct inet6_dev *idev)
+static void __mt7925_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_bss_conf *link_conf,
+ struct inet6_dev *idev)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_dev *dev = mvif->phy->dev;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct inet6_ifaddr *ifa;
struct sk_buff *skb;
u8 idx = 0;
@@ -1190,7 +1541,7 @@ static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw,
struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
} req_hdr = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
},
.arpns = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND),
@@ -1225,6 +1576,23 @@ static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw,
ieee80211_queue_work(dev->mt76.hw, &dev->ipv6_ns_work);
}
+
+static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct inet6_dev *idev)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ unsigned long valid = ieee80211_vif_is_mld(vif) ?
+ mvif->valid_links : BIT(0);
+ struct ieee80211_bss_conf *bss_conf;
+ int i;
+
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ __mt7925_ipv6_addr_change(hw, bss_conf, idev);
+ }
+}
+
#endif
int mt7925_set_tx_sar_pwr(struct ieee80211_hw *hw,
@@ -1280,6 +1648,7 @@ mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct ieee80211_tx_queue_params *params)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_vif_to_link(mvif, link_id);
static const u8 mq_to_aci[] = {
[IEEE80211_AC_VO] = 3,
[IEEE80211_AC_VI] = 2,
@@ -1288,7 +1657,7 @@ mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
};
/* firmware uses access class index */
- mvif->queue_params[mq_to_aci[queue]] = *params;
+ mconf->queue_params[mq_to_aci[queue]] = *params;
return 0;
}
@@ -1303,12 +1672,12 @@ mt7925_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt792x_mutex_acquire(dev);
- err = mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, NULL,
- true);
+ err = mt7925_mcu_add_bss_info(&dev->phy, mvif->bss_conf.mt76.ctx,
+ link_conf, NULL, true);
if (err)
goto out;
- err = mt7925_mcu_set_bss_pm(dev, vif, true);
+ err = mt7925_mcu_set_bss_pm(dev, link_conf, true);
if (err)
goto out;
@@ -1330,12 +1699,12 @@ mt7925_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
mt792x_mutex_acquire(dev);
- err = mt7925_mcu_set_bss_pm(dev, vif, false);
+ err = mt7925_mcu_set_bss_pm(dev, link_conf, false);
if (err)
goto out;
- mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, NULL,
- false);
+ mt7925_mcu_add_bss_info(&dev->phy, mvif->bss_conf.mt76.ctx, link_conf,
+ NULL, false);
out:
mt792x_mutex_release(dev);
@@ -1354,34 +1723,52 @@ mt7925_remove_chanctx(struct ieee80211_hw *hw,
{
}
-static void mt7925_ctx_iter(void *priv, u8 *mac,
- struct ieee80211_vif *vif)
+static void
+mt7925_change_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *ctx,
+ u32 changed)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct ieee80211_chanctx_conf *ctx = priv;
+ struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
+ struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ struct mt792x_bss_conf *mconf;
+ struct ieee80211_vif *vif;
+ struct mt792x_vif *mvif;
- if (ctx != mvif->mt76.ctx)
+ if (!mctx->bss_conf)
return;
+ mconf = mctx->bss_conf;
+ mvif = mconf->vif;
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ mt792x_mutex_acquire(phy->dev);
if (vif->type == NL80211_IFTYPE_MONITOR) {
mt7925_mcu_set_sniffer(mvif->phy->dev, vif, true);
mt7925_mcu_config_sniffer(mvif, ctx);
} else {
- mt7925_mcu_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx);
- }
-}
+ if (ieee80211_vif_is_mld(vif)) {
+ unsigned long valid = mvif->valid_links;
+ u8 i;
-static void
-mt7925_change_chanctx(struct ieee80211_hw *hw,
- struct ieee80211_chanctx_conf *ctx,
- u32 changed)
-{
- struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ mconf = mt792x_vif_to_link(mvif, i);
+ if (mconf && mconf->mt76.ctx == ctx)
+ break;
+ }
+
+ } else {
+ mconf = &mvif->bss_conf;
+ }
+
+ if (mconf) {
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id);
+ mt7925_mcu_set_chctx(mvif->phy->mt76, &mconf->mt76,
+ link_conf, ctx);
+ }
+ }
- mt792x_mutex_acquire(phy->dev);
- ieee80211_iterate_active_interfaces(phy->mt76->hw,
- IEEE80211_IFACE_ITER_ACTIVE,
- mt7925_ctx_iter, ctx);
mt792x_mutex_release(phy->dev);
}
@@ -1395,7 +1782,8 @@ static void mt7925_mgd_prepare_tx(struct ieee80211_hw *hw,
jiffies_to_msecs(HZ);
mt792x_mutex_acquire(dev);
- mt7925_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration,
+ mt7925_set_roc(mvif->phy, &mvif->bss_conf,
+ mvif->bss_conf.mt76.ctx->def.chan, duration,
MT7925_ROC_REQ_JOIN);
mt792x_mutex_release(dev);
}
@@ -1406,7 +1794,285 @@ static void mt7925_mgd_complete_tx(struct ieee80211_hw *hw,
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- mt7925_abort_roc(mvif->phy, mvif);
+ mt7925_abort_roc(mvif->phy, &mvif->bss_conf);
+}
+
+static void mt7925_vif_cfg_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u64 changed)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ unsigned long valid = ieee80211_vif_is_mld(vif) ?
+ mvif->valid_links : BIT(0);
+ struct ieee80211_bss_conf *bss_conf;
+ int i;
+
+ mt792x_mutex_acquire(dev);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ mt7925_mcu_sta_update(dev, NULL, vif, true,
+ MT76_STA_INFO_STATE_ASSOC);
+ mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc);
+ }
+
+ if (changed & BSS_CHANGED_ARP_FILTER) {
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ mt7925_mcu_update_arp_filter(&dev->mt76, bss_conf);
+ }
+ }
+
+ if (changed & BSS_CHANGED_PS) {
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ mt7925_mcu_uni_bss_ps(dev, bss_conf);
+ }
+ }
+
+ mt792x_mutex_release(dev);
+}
+
+static void mt7925_link_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info,
+ u64 changed)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt792x_bss_conf *mconf;
+
+ mconf = mt792x_vif_to_link(mvif, info->link_id);
+
+ mt792x_mutex_acquire(dev);
+
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ int slottime = info->use_short_slot ? 9 : 20;
+
+ if (slottime != phy->slottime) {
+ phy->slottime = slottime;
+ mt7925_mcu_set_timing(phy, info);
+ }
+ }
+
+ if (changed & BSS_CHANGED_MCAST_RATE)
+ mconf->mt76.mcast_rates_idx =
+ mt7925_get_rates_table(hw, vif, false, true);
+
+ if (changed & BSS_CHANGED_BASIC_RATES)
+ mconf->mt76.basic_rates_idx =
+ mt7925_get_rates_table(hw, vif, false, false);
+
+ if (changed & (BSS_CHANGED_BEACON |
+ BSS_CHANGED_BEACON_ENABLED)) {
+ mconf->mt76.beacon_rates_idx =
+ mt7925_get_rates_table(hw, vif, true, false);
+
+ mt7925_mcu_uni_add_beacon_offload(dev, hw, vif,
+ info->enable_beacon);
+ }
+
+ /* ensure that enable txcmd_mode after bss_info */
+ if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED))
+ mt7925_mcu_set_tx(dev, info);
+
+ mt792x_mutex_release(dev);
+}
+
+static int
+mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 old_links, u16 new_links,
+ struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS])
+{
+ struct mt792x_bss_conf *mconfs[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *mconf;
+ struct mt792x_link_sta *mlinks[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *mlink;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ unsigned long add = new_links & ~old_links;
+ unsigned long rem = old_links & ~new_links;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct mt792x_phy *phy = mt792x_hw_phy(hw);
+ struct ieee80211_bss_conf *link_conf;
+ unsigned int link_id;
+ int err;
+
+ if (old_links == new_links)
+ return 0;
+
+ mt792x_mutex_acquire(dev);
+
+ for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) {
+ mconf = mt792x_vif_to_link(mvif, link_id);
+ mlink = mt792x_sta_to_link(&mvif->sta, link_id);
+
+ if (!mconf || !mlink)
+ continue;
+
+ if (mconf != &mvif->bss_conf) {
+ mt792x_mac_link_bss_remove(dev, mconf, mlink);
+ devm_kfree(dev->mt76.dev, mconf);
+ devm_kfree(dev->mt76.dev, mlink);
+ }
+
+ rcu_assign_pointer(mvif->link_conf[link_id], NULL);
+ rcu_assign_pointer(mvif->sta.link[link_id], NULL);
+ }
+
+ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (!old_links) {
+ mvif->deflink_id = link_id;
+ mconf = &mvif->bss_conf;
+ mlink = &mvif->sta.deflink;
+ } else {
+ mconf = devm_kzalloc(dev->mt76.dev, sizeof(*mconf),
+ GFP_KERNEL);
+ mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink),
+ GFP_KERNEL);
+ }
+
+ mconfs[link_id] = mconf;
+ mlinks[link_id] = mlink;
+ mconf->link_id = link_id;
+ mconf->vif = mvif;
+ mlink->wcid.link_id = link_id;
+ mlink->wcid.link_valid = !!vif->valid_links;
+ mlink->wcid.def_wcid = &mvif->sta.deflink.wcid;
+ }
+
+ if (hweight16(mvif->valid_links) == 0)
+ mt792x_mac_link_bss_remove(dev, &mvif->bss_conf,
+ &mvif->sta.deflink);
+
+ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ mconf = mconfs[link_id];
+ mlink = mlinks[link_id];
+ link_conf = mt792x_vif_to_bss_conf(vif, link_id);
+
+ rcu_assign_pointer(mvif->link_conf[link_id], mconf);
+ rcu_assign_pointer(mvif->sta.link[link_id], mlink);
+
+ err = mt7925_mac_link_bss_add(dev, link_conf, mlink);
+ if (err < 0)
+ goto free;
+
+ if (mconf != &mvif->bss_conf) {
+ err = mt7925_set_mlo_roc(phy, &mvif->bss_conf,
+ vif->active_links);
+ if (err < 0)
+ goto free;
+ }
+ }
+
+ mvif->valid_links = new_links;
+
+ mt792x_mutex_release(dev);
+
+ return 0;
+
+free:
+ for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+ rcu_assign_pointer(mvif->link_conf[link_id], NULL);
+ rcu_assign_pointer(mvif->sta.link[link_id], NULL);
+
+ if (mconf != &mvif->bss_conf)
+ devm_kfree(dev->mt76.dev, mconfs[link_id]);
+ if (mlink != &mvif->sta.deflink)
+ devm_kfree(dev->mt76.dev, mlinks[link_id]);
+ }
+
+ mt792x_mutex_release(dev);
+
+ return err;
+}
+
+static int
+mt7925_change_sta_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 old_links, u16 new_links)
+{
+ unsigned long add = new_links & ~old_links;
+ unsigned long rem = old_links & ~new_links;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ int err = 0;
+
+ if (old_links == new_links)
+ return 0;
+
+ mt792x_mutex_acquire(dev);
+
+ err = mt7925_mac_sta_remove_links(dev, vif, sta, rem);
+ if (err < 0)
+ goto out;
+
+ err = mt7925_mac_sta_add_links(dev, vif, sta, add);
+ if (err < 0)
+ goto out;
+
+out:
+ mt792x_mutex_release(dev);
+
+ return err;
+}
+
+static int mt7925_assign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct ieee80211_bss_conf *pri_link_conf;
+ struct mt792x_bss_conf *mconf;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (ieee80211_vif_is_mld(vif)) {
+ mconf = mt792x_vif_to_link(mvif, link_conf->link_id);
+ pri_link_conf = mt792x_vif_to_bss_conf(vif, mvif->deflink_id);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mconf == &mvif->bss_conf)
+ mt7925_mcu_add_bss_info(&dev->phy, NULL, pri_link_conf,
+ NULL, true);
+ } else {
+ mconf = &mvif->bss_conf;
+ }
+
+ mconf->mt76.ctx = ctx;
+ mctx->bss_conf = mconf;
+ mutex_unlock(&dev->mt76.mutex);
+
+ return 0;
+}
+
+static void mt7925_unassign_vif_chanctx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_dev *dev = mt792x_hw_dev(hw);
+ struct ieee80211_bss_conf *pri_link_conf;
+ struct mt792x_bss_conf *mconf;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (ieee80211_vif_is_mld(vif)) {
+ mconf = mt792x_vif_to_link(mvif, link_conf->link_id);
+ pri_link_conf = mt792x_vif_to_bss_conf(vif, mvif->deflink_id);
+
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ mconf == &mvif->bss_conf)
+ mt7925_mcu_add_bss_info(&dev->phy, NULL, pri_link_conf,
+ NULL, false);
+ } else {
+ mconf = &mvif->bss_conf;
+ }
+
+ mctx->bss_conf = NULL;
+ mconf->mt76.ctx = NULL;
+ mutex_unlock(&dev->mt76.mutex);
}
const struct ieee80211_ops mt7925_ops = {
@@ -1418,7 +2084,6 @@ const struct ieee80211_ops mt7925_ops = {
.config = mt7925_config,
.conf_tx = mt7925_conf_tx,
.configure_filter = mt7925_configure_filter,
- .bss_info_changed = mt7925_bss_info_changed,
.start_ap = mt7925_start_ap,
.stop_ap = mt7925_stop_ap,
.sta_state = mt76_sta_state,
@@ -1462,10 +2127,14 @@ const struct ieee80211_ops mt7925_ops = {
.add_chanctx = mt7925_add_chanctx,
.remove_chanctx = mt7925_remove_chanctx,
.change_chanctx = mt7925_change_chanctx,
- .assign_vif_chanctx = mt792x_assign_vif_chanctx,
- .unassign_vif_chanctx = mt792x_unassign_vif_chanctx,
+ .assign_vif_chanctx = mt7925_assign_vif_chanctx,
+ .unassign_vif_chanctx = mt7925_unassign_vif_chanctx,
.mgd_prepare_tx = mt7925_mgd_prepare_tx,
.mgd_complete_tx = mt7925_mgd_complete_tx,
+ .vif_cfg_changed = mt7925_vif_cfg_changed,
+ .link_info_changed = mt7925_link_info_changed,
+ .change_vif_links = mt7925_change_vif_links,
+ .change_sta_links = mt7925_change_sta_links,
};
EXPORT_SYMBOL_GPL(mt7925_ops);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
index bd37cb8d734b..9dc22fbe25d3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c
@@ -121,11 +121,12 @@ int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set)
EXPORT_SYMBOL_GPL(mt7925_mcu_regval);
int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
- struct mt76_vif *vif,
- struct ieee80211_bss_conf *info)
+ struct ieee80211_bss_conf *link_conf)
{
- struct ieee80211_vif *mvif = container_of(info, struct ieee80211_vif,
- bss_conf);
+ struct ieee80211_vif *mvif = container_of((void *)link_conf->vif,
+ struct ieee80211_vif,
+ drv_priv);
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct sk_buff *skb;
int i, len = min_t(int, mvif->cfg.arp_addr_cnt,
IEEE80211_BSS_ARP_ADDR_LIST_LEN);
@@ -137,7 +138,7 @@ int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
struct mt7925_arpns_tlv arp;
} req = {
.hdr = {
- .bss_idx = vif->idx,
+ .bss_idx = mconf->mt76.idx,
},
.arp = {
.tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP),
@@ -308,22 +309,23 @@ mt7925_mcu_roc_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
struct mt7925_roc_grant_tlv *grant = priv;
+ if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION)
+ return;
+
if (mvif->idx != grant->bss_idx)
return;
mvif->band_idx = grant->dbdcband;
}
-static void
-mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb)
+static void mt7925_mcu_roc_handle_grant(struct mt792x_dev *dev,
+ struct tlv *tlv)
{
struct ieee80211_hw *hw = dev->mt76.hw;
struct mt7925_roc_grant_tlv *grant;
- struct mt7925_mcu_rxd *rxd;
int duration;
- rxd = (struct mt7925_mcu_rxd *)skb->data;
- grant = (struct mt7925_roc_grant_tlv *)(rxd->tlv + 4);
+ grant = (struct mt7925_roc_grant_tlv *)tlv;
/* should never happen */
WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT));
@@ -342,6 +344,29 @@ mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb)
}
static void
+mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb)
+{
+ struct tlv *tlv;
+ int i = 0;
+
+ skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4);
+
+ while (i < skb->len) {
+ tlv = (struct tlv *)(skb->data + i);
+
+ switch (le16_to_cpu(tlv->tag)) {
+ case UNI_EVENT_ROC_GRANT:
+ mt7925_mcu_roc_handle_grant(dev, tlv);
+ break;
+ case UNI_EVENT_ROC_GRANT_SUB_LINK:
+ break;
+ }
+
+ i += le16_to_cpu(tlv->len);
+ }
+}
+
+static void
mt7925_mcu_scan_event(struct mt792x_dev *dev, struct sk_buff *skb)
{
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -544,9 +569,9 @@ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev,
struct mt792x_vif *mvif = msta->vif;
if (enable && !params->amsdu)
- msta->wcid.amsdu = false;
+ msta->deflink.wcid.amsdu = false;
- return mt7925_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
enable, true);
}
@@ -557,7 +582,7 @@ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv;
struct mt792x_vif *mvif = msta->vif;
- return mt7925_mcu_sta_ba(&dev->mt76, &mvif->mt76, params,
+ return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params,
enable, false);
}
@@ -726,6 +751,20 @@ mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data)
dev->has_eht = cap->eht;
}
+static void
+mt7925_mcu_parse_eml_cap(struct mt792x_dev *dev, char *data)
+{
+ struct mt7925_mcu_eml_cap {
+ u8 rsv[4];
+ __le16 eml_cap;
+ u8 rsv2[6];
+ } __packed * cap;
+
+ cap = (struct mt7925_mcu_eml_cap *)data;
+
+ dev->phy.eml_cap = le16_to_cpu(cap->eml_cap);
+}
+
static int
mt7925_mcu_get_nic_capability(struct mt792x_dev *dev)
{
@@ -780,6 +819,12 @@ mt7925_mcu_get_nic_capability(struct mt792x_dev *dev)
case MT_NIC_CAP_PHY:
mt7925_mcu_parse_phy_cap(dev, tlv->data);
break;
+ case MT_NIC_CAP_CHIP_CAP:
+ memcpy(&dev->phy.chip_cap, (void *)skb->data, sizeof(u64));
+ break;
+ case MT_NIC_CAP_EML_CAP:
+ mt7925_mcu_parse_eml_cap(dev, tlv->data);
+ break;
default:
break;
}
@@ -848,7 +893,7 @@ EXPORT_SYMBOL_GPL(mt7925_run_firmware);
static void
mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_link_sta *link_sta)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct sta_rec_hdr_trans *hdr_trans;
@@ -864,10 +909,15 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
else
hdr_trans->from_ds = true;
- if (sta)
- wcid = (struct mt76_wcid *)sta->drv_priv;
- else
- wcid = &mvif->sta.wcid;
+ if (link_sta) {
+ struct mt792x_sta *msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ struct mt792x_link_sta *mlink;
+
+ mlink = mt792x_sta_to_link(msta, link_sta->link_id);
+ wcid = &mlink->wcid;
+ } else {
+ wcid = &mvif->sta.deflink.wcid;
+ }
if (!wcid)
return;
@@ -881,27 +931,36 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb,
int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta,
+ int link_id)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct ieee80211_link_sta *link_sta = sta ? &sta->deflink : NULL;
+ struct mt792x_link_sta *mlink;
+ struct mt792x_bss_conf *mconf;
struct mt792x_sta *msta;
struct sk_buff *skb;
msta = sta ? (struct mt792x_sta *)sta->drv_priv : &mvif->sta;
- skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
- &msta->wcid,
+ mlink = mt792x_sta_to_link(msta, link_id);
+ link_sta = mt792x_sta_to_link_sta(vif, sta, link_id);
+ mconf = mt792x_vif_to_link(mvif, link_id);
+
+ skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mconf->mt76,
+ &mlink->wcid,
MT7925_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
/* starec hdr trans */
- mt7925_mcu_sta_hdr_trans_tlv(skb, vif, sta);
+ mt7925_mcu_sta_hdr_trans_tlv(skb, vif, link_sta);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true);
}
-int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
+int mt7925_mcu_set_tx(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *bss_conf)
{
#define MCU_EDCA_AC_PARAM 0
#define WMM_AIFS_SET BIT(0)
@@ -910,12 +969,12 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
#define WMM_TXOP_SET BIT(3)
#define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \
WMM_CW_MAX_SET | WMM_TXOP_SET)
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(bss_conf);
struct {
u8 bss_idx;
u8 __rsv[3];
} __packed hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
};
struct sk_buff *skb;
int len = sizeof(hdr) + IEEE80211_NUM_ACS * sizeof(struct edca);
@@ -928,7 +987,7 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif)
skb_put_data(skb, &hdr, sizeof(hdr));
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
- struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
+ struct ieee80211_tx_queue_params *q = &mconf->queue_params[ac];
struct edca *e;
struct tlv *tlv;
@@ -960,11 +1019,12 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid,
struct mt76_connac_sta_key_conf *sta_key_conf,
struct sk_buff *skb,
struct ieee80211_key_conf *key,
- enum set_key_cmd cmd)
+ enum set_key_cmd cmd,
+ struct mt792x_sta *msta)
{
- struct mt792x_sta *msta = container_of(wcid, struct mt792x_sta, wcid);
- struct sta_rec_sec_uni *sec;
struct mt792x_vif *mvif = msta->vif;
+ struct mt792x_bss_conf *mconf = mt792x_vif_to_link(mvif, wcid->link_id);
+ struct sta_rec_sec_uni *sec;
struct ieee80211_sta *sta;
struct ieee80211_vif *vif;
struct tlv *tlv;
@@ -976,17 +1036,27 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V3, sizeof(*sec));
sec = (struct sta_rec_sec_uni *)tlv;
- sec->bss_idx = mvif->mt76.idx;
+ sec->bss_idx = mconf->mt76.idx;
sec->is_authenticator = 0;
- sec->mgmt_prot = 0;
+ sec->mgmt_prot = 1; /* only used in MLO mode */
sec->wlan_idx = (u8)wcid->idx;
if (sta) {
+ struct ieee80211_link_sta *link_sta;
+
sec->tx_key = 1;
sec->key_type = 1;
- memcpy(sec->peer_addr, sta->addr, ETH_ALEN);
+ link_sta = mt792x_sta_to_link_sta(vif, sta, wcid->link_id);
+
+ if (link_sta)
+ memcpy(sec->peer_addr, link_sta->addr, ETH_ALEN);
} else {
- memcpy(sec->peer_addr, vif->bss_conf.bssid, ETH_ALEN);
+ struct ieee80211_bss_conf *link_conf;
+
+ link_conf = mt792x_vif_to_bss_conf(vif, wcid->link_id);
+
+ if (link_conf)
+ memcpy(sec->peer_addr, link_conf->bssid, ETH_ALEN);
}
if (cmd == SET_KEY) {
@@ -1031,25 +1101,121 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid,
int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct mt76_connac_sta_key_conf *sta_key_conf,
struct ieee80211_key_conf *key, int mcu_cmd,
- struct mt76_wcid *wcid, enum set_key_cmd cmd)
+ struct mt76_wcid *wcid, enum set_key_cmd cmd,
+ struct mt792x_sta *msta)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_vif_to_link(mvif, wcid->link_id);
struct sk_buff *skb;
int ret;
- skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid,
+ skb = __mt76_connac_mcu_alloc_sta_req(dev, &mconf->mt76, wcid,
MT7925_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
- ret = mt7925_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd);
+ ret = mt7925_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd, msta);
if (ret)
return ret;
return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true);
}
-int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
+int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
+ int duration, u8 token_id)
+{
+ struct mt792x_vif *mvif = mconf->vif;
+ struct ieee80211_vif *vif = container_of((void *)mvif,
+ struct ieee80211_vif, drv_priv);
+ struct ieee80211_bss_conf *link_conf;
+ struct ieee80211_channel *chan;
+ const u8 ch_band[] = {
+ [NL80211_BAND_2GHZ] = 1,
+ [NL80211_BAND_5GHZ] = 2,
+ [NL80211_BAND_6GHZ] = 3,
+ };
+ enum mt7925_roc_req type;
+ int center_ch, i = 0;
+ bool is_AG_band = false;
+ struct {
+ u8 id;
+ u8 bss_idx;
+ u16 tag;
+ struct mt792x_bss_conf *mconf;
+ struct ieee80211_channel *chan;
+ } links[2];
+
+ struct {
+ struct {
+ u8 rsv[4];
+ } __packed hdr;
+ struct roc_acquire_tlv roc[2];
+ } __packed req;
+
+ if (!mconf || hweight16(vif->valid_links) < 2 ||
+ hweight16(sel_links) != 2)
+ return -EPERM;
+
+ for (i = 0; i < ARRAY_SIZE(links); i++) {
+ links[i].id = i ? __ffs(~BIT(mconf->link_id) & sel_links) :
+ mconf->link_id;
+ link_conf = mt792x_vif_to_bss_conf(vif, links[i].id);
+ if (WARN_ON_ONCE(!link_conf))
+ return -EPERM;
+
+ links[i].chan = link_conf->chanreq.oper.chan;
+ if (WARN_ON_ONCE(!links[i].chan))
+ return -EPERM;
+
+ links[i].mconf = mt792x_vif_to_link(mvif, links[i].id);
+ links[i].tag = links[i].id == mconf->link_id ?
+ UNI_ROC_ACQUIRE : UNI_ROC_SUB_LINK;
+
+ is_AG_band |= links[i].chan->band == NL80211_BAND_2GHZ;
+ }
+
+ if (vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP)
+ type = is_AG_band ? MT7925_ROC_REQ_MLSR_AG :
+ MT7925_ROC_REQ_MLSR_AA;
+ else
+ type = MT7925_ROC_REQ_JOIN;
+
+ for (i = 0; i < ARRAY_SIZE(links) && i < hweight16(vif->active_links); i++) {
+ if (WARN_ON_ONCE(!links[i].mconf || !links[i].chan))
+ continue;
+
+ chan = links[i].chan;
+ center_ch = ieee80211_frequency_to_channel(chan->center_freq);
+ req.roc[i].len = cpu_to_le16(sizeof(struct roc_acquire_tlv));
+ req.roc[i].tag = cpu_to_le16(links[i].tag);
+ req.roc[i].tokenid = token_id;
+ req.roc[i].reqtype = type;
+ req.roc[i].maxinterval = cpu_to_le32(duration);
+ req.roc[i].bss_idx = links[i].mconf->mt76.idx;
+ req.roc[i].control_channel = chan->hw_value;
+ req.roc[i].bw = CMD_CBW_20MHZ;
+ req.roc[i].bw_from_ap = CMD_CBW_20MHZ;
+ req.roc[i].center_chan = center_ch;
+ req.roc[i].center_chan_from_ap = center_ch;
+
+ /* STR : 0xfe indicates BAND_ALL with enabling DBDC
+ * EMLSR : 0xff indicates (BAND_AUTO) without DBDC
+ */
+ req.roc[i].dbdcband = type == MT7925_ROC_REQ_JOIN ? 0xfe : 0xff;
+
+ if (chan->hw_value < center_ch)
+ req.roc[i].sco = 1; /* SCA */
+ else if (chan->hw_value > center_ch)
+ req.roc[i].sco = 3; /* SCB */
+
+ req.roc[i].band = ch_band[chan->band];
+ }
+
+ return mt76_mcu_send_msg(&mvif->phy->dev->mt76, MCU_UNI_CMD(ROC),
+ &req, sizeof(req), false);
+}
+
+int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
struct ieee80211_channel *chan, int duration,
enum mt7925_roc_req type, u8 token_id)
{
@@ -1059,25 +1225,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
struct {
u8 rsv[4];
} __packed hdr;
- struct roc_acquire_tlv {
- __le16 tag;
- __le16 len;
- u8 bss_idx;
- u8 tokenid;
- u8 control_channel;
- u8 sco;
- u8 band;
- u8 bw;
- u8 center_chan;
- u8 center_chan2;
- u8 bw_from_ap;
- u8 center_chan_from_ap;
- u8 center_chan2_from_ap;
- u8 reqtype;
- __le32 maxinterval;
- u8 dbdcband;
- u8 rsv[3];
- } __packed roc;
+ struct roc_acquire_tlv roc;
} __packed req = {
.roc = {
.tag = cpu_to_le16(UNI_ROC_ACQUIRE),
@@ -1085,7 +1233,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
.tokenid = token_id,
.reqtype = type,
.maxinterval = cpu_to_le32(duration),
- .bss_idx = vif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
.control_channel = chan->hw_value,
.bw = CMD_CBW_20MHZ,
.bw_from_ap = CMD_CBW_20MHZ,
@@ -1116,7 +1264,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
&req, sizeof(req), false);
}
-int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
+int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
u8 token_id)
{
struct mt792x_dev *dev = phy->dev;
@@ -1137,7 +1285,7 @@ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
.tag = cpu_to_le16(UNI_ROC_ABORT),
.len = cpu_to_le16(sizeof(struct roc_abort_tlv)),
.tokenid = token_id,
- .bss_idx = vif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
.dbdcband = 0xff, /* auto*/
},
};
@@ -1146,80 +1294,6 @@ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
&req, sizeof(req), false);
}
-int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag)
-{
- static const u8 ch_band[] = {
- [NL80211_BAND_2GHZ] = 0,
- [NL80211_BAND_5GHZ] = 1,
- [NL80211_BAND_6GHZ] = 2,
- };
- struct mt792x_dev *dev = phy->dev;
- struct cfg80211_chan_def *chandef = &phy->mt76->chandef;
- int freq1 = chandef->center_freq1;
- u8 band_idx = chandef->chan->band != NL80211_BAND_2GHZ;
- struct {
- /* fixed field */
- u8 __rsv[4];
-
- __le16 tag;
- __le16 len;
- u8 control_ch;
- u8 center_ch;
- u8 bw;
- u8 tx_path_num;
- u8 rx_path; /* mask or num */
- u8 switch_reason;
- u8 band_idx;
- u8 center_ch2; /* for 80+80 only */
- __le16 cac_case;
- u8 channel_band;
- u8 rsv0;
- __le32 outband_freq;
- u8 txpower_drop;
- u8 ap_bw;
- u8 ap_center_ch;
- u8 rsv1[53];
- } __packed req = {
- .tag = cpu_to_le16(tag),
- .len = cpu_to_le16(sizeof(req) - 4),
- .control_ch = chandef->chan->hw_value,
- .center_ch = ieee80211_frequency_to_channel(freq1),
- .bw = mt76_connac_chan_bw(chandef),
- .tx_path_num = hweight8(phy->mt76->antenna_mask),
- .rx_path = phy->mt76->antenna_mask,
- .band_idx = band_idx,
- .channel_band = ch_band[chandef->chan->band],
- };
-
- if (chandef->chan->band == NL80211_BAND_6GHZ)
- req.channel_band = 2;
- else
- req.channel_band = chandef->chan->band;
-
- if (tag == UNI_CHANNEL_RX_PATH ||
- dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR)
- req.switch_reason = CH_SWITCH_NORMAL;
- else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
- req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
- else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef,
- NL80211_IFTYPE_AP))
- req.switch_reason = CH_SWITCH_DFS;
- else
- req.switch_reason = CH_SWITCH_NORMAL;
-
- if (tag == UNI_CHANNEL_SWITCH)
- req.rx_path = hweight8(req.rx_path);
-
- if (chandef->width == NL80211_CHAN_WIDTH_80P80) {
- int freq2 = chandef->center_freq2;
-
- req.center_ch2 = ieee80211_frequency_to_channel(freq2);
- }
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(CHANNEL_SWITCH),
- &req, sizeof(req), true);
-}
-
int mt7925_mcu_set_eeprom(struct mt792x_dev *dev)
{
struct {
@@ -1242,9 +1316,10 @@ int mt7925_mcu_set_eeprom(struct mt792x_dev *dev)
}
EXPORT_SYMBOL_GPL(mt7925_mcu_set_eeprom);
-int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif)
+int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct {
struct {
u8 bss_idx;
@@ -1263,16 +1338,16 @@ int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif)
} __packed ps;
} __packed ps_req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
},
.ps = {
.tag = cpu_to_le16(UNI_BSS_INFO_PS),
.len = cpu_to_le16(sizeof(struct ps_tlv)),
- .ps_state = vif->cfg.ps ? 2 : 0,
+ .ps_state = link_conf->vif->cfg.ps ? 2 : 0,
},
};
- if (vif->type != NL80211_IFTYPE_STATION)
+ if (link_conf->vif->type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
@@ -1280,10 +1355,10 @@ int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif)
}
static int
-mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif,
- bool enable)
+mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf, bool enable)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct {
struct {
u8 bss_idx;
@@ -1300,17 +1375,17 @@ mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif,
} __packed bcnft;
} __packed bcnft_req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
},
.bcnft = {
.tag = cpu_to_le16(UNI_BSS_INFO_BCNFT),
.len = cpu_to_le16(sizeof(struct bcnft_tlv)),
- .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
- .dtim_period = vif->bss_conf.dtim_period,
+ .bcn_interval = cpu_to_le16(link_conf->beacon_int),
+ .dtim_period = link_conf->dtim_period,
},
};
- if (vif->type != NL80211_IFTYPE_STATION)
+ if (link_conf->vif->type != NL80211_IFTYPE_STATION)
return 0;
return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE),
@@ -1318,10 +1393,11 @@ mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif,
}
int
-mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+mt7925_mcu_set_bss_pm(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
bool enable)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct {
struct {
u8 bss_idx;
@@ -1338,13 +1414,13 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
} __packed enable;
} req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
},
.enable = {
.tag = cpu_to_le16(UNI_BSS_INFO_BCNFT),
.len = cpu_to_le16(sizeof(struct bcnft_tlv)),
- .dtim_period = vif->bss_conf.dtim_period,
- .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int),
+ .dtim_period = link_conf->dtim_period,
+ .bcn_interval = cpu_to_le16(link_conf->beacon_int),
},
};
struct {
@@ -1358,7 +1434,7 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
} __packed disable;
} req1 = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mconf->mt76.idx,
},
.disable = {
.tag = cpu_to_le16(UNI_BSS_INFO_PM_DISABLE),
@@ -1377,42 +1453,43 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
}
static void
-mt7925_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7925_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta)
{
- if (!sta->deflink.he_cap.has_he)
+ if (!link_sta->he_cap.has_he)
return;
- mt76_connac_mcu_sta_he_tlv_v2(skb, sta);
+ mt76_connac_mcu_sta_he_tlv_v2(skb, link_sta->sta);
}
static void
-mt7925_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7925_mcu_sta_he_6g_tlv(struct sk_buff *skb,
+ struct ieee80211_link_sta *link_sta)
{
struct sta_rec_he_6g_capa *he_6g;
struct tlv *tlv;
- if (!sta->deflink.he_6ghz_capa.capa)
+ if (!link_sta->he_6ghz_capa.capa)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G, sizeof(*he_6g));
he_6g = (struct sta_rec_he_6g_capa *)tlv;
- he_6g->capa = sta->deflink.he_6ghz_capa.capa;
+ he_6g->capa = link_sta->he_6ghz_capa.capa;
}
static void
-mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta)
{
struct ieee80211_eht_mcs_nss_supp *mcs_map;
struct ieee80211_eht_cap_elem_fixed *elem;
struct sta_rec_eht *eht;
struct tlv *tlv;
- if (!sta->deflink.eht_cap.has_eht)
+ if (!link_sta->eht_cap.has_eht)
return;
- mcs_map = &sta->deflink.eht_cap.eht_mcs_nss_supp;
- elem = &sta->deflink.eht_cap.eht_cap_elem;
+ mcs_map = &link_sta->eht_cap.eht_mcs_nss_supp;
+ elem = &link_sta->eht_cap.eht_cap_elem;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT, sizeof(*eht));
@@ -1422,50 +1499,52 @@ mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info);
eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]);
- if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+ if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20));
memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80));
memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160));
}
static void
-mt7925_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7925_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta)
{
struct sta_rec_ht *ht;
struct tlv *tlv;
- if (!sta->deflink.ht_cap.ht_supported)
+ if (!link_sta->ht_cap.ht_supported)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
ht = (struct sta_rec_ht *)tlv;
- ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap);
+ ht->ht_cap = cpu_to_le16(link_sta->ht_cap.cap);
}
static void
-mt7925_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7925_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta)
{
struct sta_rec_vht *vht;
struct tlv *tlv;
/* For 6G band, this tlv is necessary to let hw work normally */
- if (!sta->deflink.he_6ghz_capa.capa && !sta->deflink.vht_cap.vht_supported)
+ if (!link_sta->he_6ghz_capa.capa && !link_sta->vht_cap.vht_supported)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
vht = (struct sta_rec_vht *)tlv;
- vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap);
- vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
- vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
+ vht->vht_cap = cpu_to_le32(link_sta->vht_cap.cap);
+ vht->vht_rx_mcs_map = link_sta->vht_cap.vht_mcs.rx_mcs_map;
+ vht->vht_tx_mcs_map = link_sta->vht_cap.vht_mcs.tx_mcs_map;
}
static void
mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta)
{
- struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ struct mt792x_sta *msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ struct mt792x_link_sta *mlink;
struct sta_rec_amsdu *amsdu;
struct tlv *tlv;
@@ -1473,16 +1552,18 @@ mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb,
vif->type != NL80211_IFTYPE_AP)
return;
- if (!sta->deflink.agg.max_amsdu_len)
+ if (!link_sta->agg.max_amsdu_len)
return;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu));
amsdu = (struct sta_rec_amsdu *)tlv;
amsdu->max_amsdu_num = 8;
amsdu->amsdu_en = true;
- msta->wcid.amsdu = true;
- switch (sta->deflink.agg.max_amsdu_len) {
+ mlink = mt792x_sta_to_link(msta, link_sta->link_id);
+ mlink->wcid.amsdu = true;
+
+ switch (link_sta->agg.max_amsdu_len) {
case IEEE80211_MAX_MPDU_LEN_VHT_11454:
amsdu->max_mpdu_size =
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
@@ -1499,34 +1580,44 @@ mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb,
static void
mt7925_mcu_sta_phy_tlv(struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->mt76.ctx->def;
+ struct ieee80211_bss_conf *link_conf;
+ struct cfg80211_chan_def *chandef;
+ struct mt792x_bss_conf *mconf;
struct sta_rec_phy *phy;
struct tlv *tlv;
u8 af = 0, mm = 0;
+ link_conf = mt792x_vif_to_bss_conf(vif, link_sta->link_id);
+ mconf = mt792x_vif_to_link(mvif, link_sta->link_id);
+ chandef = mconf->mt76.ctx ? &mconf->mt76.ctx->def :
+ &link_conf->chanreq.oper;
+
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
phy = (struct sta_rec_phy *)tlv;
- phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, chandef->chan->band, sta);
- phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates);
- if (sta->deflink.ht_cap.ht_supported) {
- af = sta->deflink.ht_cap.ampdu_factor;
- mm = sta->deflink.ht_cap.ampdu_density;
+ phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif,
+ chandef->chan->band,
+ link_sta);
+ phy->basic_rate = cpu_to_le16((u16)link_conf->basic_rates);
+ if (link_sta->ht_cap.ht_supported) {
+ af = link_sta->ht_cap.ampdu_factor;
+ mm = link_sta->ht_cap.ampdu_density;
}
- if (sta->deflink.vht_cap.vht_supported) {
+ if (link_sta->vht_cap.vht_supported) {
u8 vht_af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
- sta->deflink.vht_cap.cap);
+ link_sta->vht_cap.cap);
af = max_t(u8, af, vht_af);
}
- if (sta->deflink.he_6ghz_capa.capa) {
- af = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ if (link_sta->he_6ghz_capa.capa) {
+ af = le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
- mm = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
+ mm = le16_get_bits(link_sta->he_6ghz_capa.capa,
IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
}
@@ -1537,7 +1628,7 @@ mt7925_mcu_sta_phy_tlv(struct sk_buff *skb,
static void
mt7925_mcu_sta_state_v2_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
- struct ieee80211_sta *sta,
+ struct ieee80211_link_sta *link_sta,
struct ieee80211_vif *vif,
u8 rcpi, u8 sta_state)
{
@@ -1557,28 +1648,37 @@ mt7925_mcu_sta_state_v2_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
state = (struct sta_rec_state_v2 *)tlv;
state->state = sta_state;
- if (sta->deflink.vht_cap.vht_supported) {
- state->vht_opmode = sta->deflink.bandwidth;
- state->vht_opmode |= sta->deflink.rx_nss <<
+ if (link_sta->vht_cap.vht_supported) {
+ state->vht_opmode = link_sta->bandwidth;
+ state->vht_opmode |= link_sta->rx_nss <<
IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
}
}
static void
mt7925_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+ struct ieee80211_vif *vif,
+ struct ieee80211_link_sta *link_sta)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct cfg80211_chan_def *chandef = &mvif->mt76.ctx->def;
- enum nl80211_band band = chandef->chan->band;
+ struct ieee80211_bss_conf *link_conf;
+ struct cfg80211_chan_def *chandef;
struct sta_rec_ra_info *ra_info;
+ struct mt792x_bss_conf *mconf;
+ enum nl80211_band band;
struct tlv *tlv;
u16 supp_rates;
+ link_conf = mt792x_vif_to_bss_conf(vif, link_sta->link_id);
+ mconf = mt792x_vif_to_link(mvif, link_sta->link_id);
+ chandef = mconf->mt76.ctx ? &mconf->mt76.ctx->def :
+ &link_conf->chanreq.oper;
+ band = chandef->chan->band;
+
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
ra_info = (struct sta_rec_ra_info *)tlv;
- supp_rates = sta->deflink.supp_rates[band];
+ supp_rates = link_sta->supp_rates[band];
if (band == NL80211_BAND_2GHZ)
supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) |
FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf);
@@ -1587,29 +1687,80 @@ mt7925_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb,
ra_info->legacy = cpu_to_le16(supp_rates);
- if (sta->deflink.ht_cap.ht_supported)
+ if (link_sta->ht_cap.ht_supported)
memcpy(ra_info->rx_mcs_bitmask,
- sta->deflink.ht_cap.mcs.rx_mask,
+ link_sta->ht_cap.mcs.rx_mask,
HT_MCS_MASK_NUM);
}
static void
+mt7925_mcu_sta_eht_mld_tlv(struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct wiphy *wiphy = mvif->phy->mt76->hw->wiphy;
+ const struct wiphy_iftype_ext_capab *ext_capa;
+ struct sta_rec_eht_mld *eht_mld;
+ struct tlv *tlv;
+ u16 eml_cap;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT_MLD, sizeof(*eht_mld));
+ eht_mld = (struct sta_rec_eht_mld *)tlv;
+ eht_mld->mld_type = 0xff;
+
+ if (!ieee80211_vif_is_mld(vif))
+ return;
+
+ ext_capa = cfg80211_get_iftype_ext_capa(wiphy,
+ ieee80211_vif_type_p2p(vif));
+ if (!ext_capa)
+ return;
+
+ eml_cap = (vif->cfg.eml_cap & (IEEE80211_EML_CAP_EMLSR_SUPP |
+ IEEE80211_EML_CAP_TRANSITION_TIMEOUT)) |
+ (ext_capa->eml_capabilities & (IEEE80211_EML_CAP_EMLSR_PADDING_DELAY |
+ IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY));
+
+ if (eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP) {
+ eht_mld->eml_cap[0] = u16_get_bits(eml_cap, GENMASK(7, 0));
+ eht_mld->eml_cap[1] = u16_get_bits(eml_cap, GENMASK(15, 8));
+ } else {
+ eht_mld->str_cap[0] = BIT(1);
+ }
+}
+
+static void
mt7925_mcu_sta_mld_tlv(struct sk_buff *skb,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
- struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
+ unsigned long valid = mvif->valid_links;
+ struct mt792x_bss_conf *mconf;
+ struct mt792x_link_sta *mlink;
struct sta_rec_mld *mld;
struct tlv *tlv;
+ int i, cnt = 0;
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MLD, sizeof(*mld));
mld = (struct sta_rec_mld *)tlv;
- memcpy(mld->mac_addr, vif->addr, ETH_ALEN);
- mld->primary_id = cpu_to_le16(wcid->idx);
- mld->wlan_id = cpu_to_le16(wcid->idx);
+ memcpy(mld->mac_addr, sta->addr, ETH_ALEN);
+ mld->primary_id = cpu_to_le16(msta->deflink.wcid.idx);
+ mld->wlan_id = cpu_to_le16(msta->deflink.wcid.idx);
+ mld->link_num = min_t(u8, hweight16(mvif->valid_links), 2);
+
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ if (cnt == mld->link_num)
+ break;
+
+ mconf = mt792x_vif_to_link(mvif, i);
+ mlink = mt792x_sta_to_link(msta, i);
+ mld->link[cnt].wlan_id = cpu_to_le16(mlink->wcid.idx);
+ mld->link[cnt++].bss_idx = mconf->mt76.idx;
- /* TODO: 0 means deflink only, add secondary link(1) later */
- mld->link_num = !!(hweight8(vif->active_links) > 1);
- WARN_ON_ONCE(mld->link_num);
+ if (mlink != &msta->deflink)
+ mld->secondary_id = cpu_to_le16(mlink->wcid.idx);
+ }
}
static int
@@ -1625,39 +1776,106 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy,
if (IS_ERR(skb))
return PTR_ERR(skb);
- if (info->sta || !info->offload_fw)
- mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, info->sta,
+ if (info->link_sta)
+ mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
+ info->link_sta,
info->enable, info->newly);
- if (info->sta && info->enable) {
- mt7925_mcu_sta_phy_tlv(skb, info->vif, info->sta);
- mt7925_mcu_sta_ht_tlv(skb, info->sta);
- mt7925_mcu_sta_vht_tlv(skb, info->sta);
- mt76_connac_mcu_sta_uapsd(skb, info->vif, info->sta);
- mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->sta);
- mt7925_mcu_sta_he_tlv(skb, info->sta);
- mt7925_mcu_sta_he_6g_tlv(skb, info->sta);
- mt7925_mcu_sta_eht_tlv(skb, info->sta);
- mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif, info->sta);
- mt7925_mcu_sta_state_v2_tlv(phy, skb, info->sta,
+ if (info->link_sta && info->enable) {
+ mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
+ mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_vht_tlv(skb, info->link_sta);
+ mt76_connac_mcu_sta_uapsd(skb, info->vif, info->link_sta->sta);
+ mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta);
+ mt7925_mcu_sta_he_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_he_6g_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_eht_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif,
+ info->link_sta);
+ mt7925_mcu_sta_state_v2_tlv(phy, skb, info->link_sta,
info->vif, info->rcpi,
info->state);
- mt7925_mcu_sta_mld_tlv(skb, info->vif, info->sta);
+ mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta);
}
if (info->enable)
- mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta);
+ mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
}
-int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
+static void
+mt7925_mcu_sta_remove_tlv(struct sk_buff *skb)
+{
+ struct sta_rec_remove *rem;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, 0x25, sizeof(*rem));
+ rem = (struct sta_rec_remove *)tlv;
+ rem->action = 0;
+}
+
+static int
+mt7925_mcu_mlo_sta_cmd(struct mt76_phy *phy,
+ struct mt76_sta_cmd_info *info)
+{
+ struct mt792x_vif *mvif = (struct mt792x_vif *)info->vif->drv_priv;
+ struct mt76_dev *dev = phy->dev;
+ struct mt792x_bss_conf *mconf;
+ struct sk_buff *skb;
+
+ mconf = mt792x_vif_to_link(mvif, info->wcid->link_id);
+
+ skb = __mt76_connac_mcu_alloc_sta_req(dev, &mconf->mt76, info->wcid,
+ MT7925_STA_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ if (info->enable)
+ mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif,
+ info->link_sta,
+ info->enable, info->newly);
+
+ if (info->enable && info->link_sta) {
+ mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta);
+ mt7925_mcu_sta_ht_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_vht_tlv(skb, info->link_sta);
+ mt76_connac_mcu_sta_uapsd(skb, info->vif, info->link_sta->sta);
+ mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta);
+ mt7925_mcu_sta_he_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_he_6g_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_eht_tlv(skb, info->link_sta);
+ mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif,
+ info->link_sta);
+ mt7925_mcu_sta_state_v2_tlv(phy, skb, info->link_sta,
+ info->vif, info->rcpi,
+ info->state);
+
+ if (info->state != MT76_STA_INFO_STATE_NONE) {
+ mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta);
+ mt7925_mcu_sta_eht_mld_tlv(skb, info->vif, info->link_sta->sta);
+ }
+
+ mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta);
+ }
+
+ if (!info->enable) {
+ mt7925_mcu_sta_remove_tlv(skb);
+ mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF,
+ sizeof(struct tlv));
+ }
+
+ return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true);
+}
+
+int mt7925_mcu_sta_update(struct mt792x_dev *dev,
+ struct ieee80211_link_sta *link_sta,
struct ieee80211_vif *vif, bool enable,
enum mt76_sta_info_state state)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- int rssi = -ewma_rssi_read(&mvif->rssi);
+ int rssi = -ewma_rssi_read(&mvif->bss_conf.rssi);
struct mt76_sta_cmd_info info = {
- .sta = sta,
+ .link_sta = link_sta,
.vif = vif,
.enable = enable,
.cmd = MCU_UNI_CMD(STA_REC_UPDATE),
@@ -1666,12 +1884,22 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
.rcpi = to_rcpi(rssi),
};
struct mt792x_sta *msta;
+ struct mt792x_link_sta *mlink;
+ int err;
+
+ if (link_sta) {
+ msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ mlink = mt792x_sta_to_link(msta, link_sta->link_id);
+ }
+ info.wcid = link_sta ? &mlink->wcid : &mvif->sta.deflink.wcid;
+ info.newly = link_sta ? state != MT76_STA_INFO_STATE_ASSOC : true;
- msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL;
- info.wcid = msta ? &msta->wcid : &mvif->sta.wcid;
- info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true;
+ if (ieee80211_vif_is_mld(vif))
+ err = mt7925_mcu_mlo_sta_cmd(&dev->mphy, &info);
+ else
+ err = mt7925_mcu_sta_cmd(&dev->mphy, &info);
- return mt7925_mcu_sta_cmd(&dev->mphy, &info);
+ return err;
}
int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
@@ -1680,21 +1908,32 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
{
#define MT7925_FIF_BIT_CLR BIT(1)
#define MT7925_FIF_BIT_SET BIT(0)
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ unsigned long valid = ieee80211_vif_is_mld(vif) ?
+ mvif->valid_links : BIT(0);
+ struct ieee80211_bss_conf *bss_conf;
int err = 0;
+ int i;
if (enable) {
- err = mt7925_mcu_uni_bss_bcnft(dev, vif, true);
- if (err)
- return err;
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ err = mt7925_mcu_uni_bss_bcnft(dev, bss_conf, true);
+ if (err < 0)
+ return err;
+ }
return mt7925_mcu_set_rxfilter(dev, 0,
MT7925_FIF_BIT_SET,
MT_WF_RFCR_DROP_OTHER_BEACON);
}
- err = mt7925_mcu_set_bss_pm(dev, vif, false);
- if (err)
- return err;
+ for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) {
+ bss_conf = mt792x_vif_to_bss_conf(vif, i);
+ err = mt7925_mcu_set_bss_pm(dev, bss_conf, false);
+ if (err)
+ return err;
+ }
return mt7925_mcu_set_rxfilter(dev, 0,
MT7925_FIF_BIT_CLR,
@@ -1746,7 +1985,7 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif,
} __packed enable;
} __packed req = {
.hdr = {
- .band_idx = mvif->mt76.band_idx,
+ .band_idx = mvif->bss_conf.mt76.band_idx,
},
.enable = {
.tag = cpu_to_le16(UNI_SNIFFER_ENABLE),
@@ -1768,12 +2007,12 @@ int mt7925_mcu_config_sniffer(struct mt792x_vif *vif,
struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &mphy->chandef;
int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
- const u8 ch_band[] = {
+ static const u8 ch_band[] = {
[NL80211_BAND_2GHZ] = 1,
[NL80211_BAND_5GHZ] = 2,
[NL80211_BAND_6GHZ] = 3,
};
- const u8 ch_width[] = {
+ static const u8 ch_width[] = {
[NL80211_CHAN_WIDTH_20_NOHT] = 0,
[NL80211_CHAN_WIDTH_20] = 0,
[NL80211_CHAN_WIDTH_40] = 0,
@@ -1805,7 +2044,7 @@ int mt7925_mcu_config_sniffer(struct mt792x_vif *vif,
} __packed tlv;
} __packed req = {
.hdr = {
- .band_idx = vif->mt76.band_idx,
+ .band_idx = vif->bss_conf.mt76.band_idx,
},
.tlv = {
.tag = cpu_to_le16(UNI_SNIFFER_CONFIG),
@@ -1866,7 +2105,7 @@ mt7925_mcu_uni_add_beacon_offload(struct mt792x_dev *dev,
} __packed beacon_tlv;
} req = {
.hdr = {
- .bss_idx = mvif->mt76.idx,
+ .bss_idx = mvif->bss_conf.mt76.idx,
},
.beacon_tlv = {
.tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT),
@@ -1918,83 +2157,59 @@ mt7925_mcu_uni_add_beacon_offload(struct mt792x_dev *dev,
&req, sizeof(req), true);
}
-int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
- struct ieee80211_chanctx_conf *ctx)
+static
+void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
{
- struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
+ struct cfg80211_chan_def *chandef = ctx ? &ctx->def :
+ &link_conf->chanreq.oper;
int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2;
enum nl80211_band band = chandef->chan->band;
- struct mt76_dev *mdev = phy->dev;
- struct {
- struct {
- u8 bss_idx;
- u8 pad[3];
- } __packed hdr;
- struct rlm_tlv {
- __le16 tag;
- __le16 len;
- u8 control_channel;
- u8 center_chan;
- u8 center_chan2;
- u8 bw;
- u8 tx_streams;
- u8 rx_streams;
- u8 ht_op_info;
- u8 sco;
- u8 band;
- u8 pad[3];
- } __packed rlm;
- } __packed rlm_req = {
- .hdr = {
- .bss_idx = mvif->idx,
- },
- .rlm = {
- .tag = cpu_to_le16(UNI_BSS_INFO_RLM),
- .len = cpu_to_le16(sizeof(struct rlm_tlv)),
- .control_channel = chandef->chan->hw_value,
- .center_chan = ieee80211_frequency_to_channel(freq1),
- .center_chan2 = ieee80211_frequency_to_channel(freq2),
- .tx_streams = hweight8(phy->antenna_mask),
- .ht_op_info = 4, /* set HT 40M allowed */
- .rx_streams = hweight8(phy->antenna_mask),
- .band = band,
- },
- };
+ struct bss_rlm_tlv *req;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_RLM, sizeof(*req));
+ req = (struct bss_rlm_tlv *)tlv;
+ req->control_channel = chandef->chan->hw_value,
+ req->center_chan = ieee80211_frequency_to_channel(freq1),
+ req->center_chan2 = ieee80211_frequency_to_channel(freq2),
+ req->tx_streams = hweight8(phy->antenna_mask),
+ req->ht_op_info = 4, /* set HT 40M allowed */
+ req->rx_streams = hweight8(phy->antenna_mask),
+ req->band = band;
switch (chandef->width) {
case NL80211_CHAN_WIDTH_40:
- rlm_req.rlm.bw = CMD_CBW_40MHZ;
+ req->bw = CMD_CBW_40MHZ;
break;
case NL80211_CHAN_WIDTH_80:
- rlm_req.rlm.bw = CMD_CBW_80MHZ;
+ req->bw = CMD_CBW_80MHZ;
break;
case NL80211_CHAN_WIDTH_80P80:
- rlm_req.rlm.bw = CMD_CBW_8080MHZ;
+ req->bw = CMD_CBW_8080MHZ;
break;
case NL80211_CHAN_WIDTH_160:
- rlm_req.rlm.bw = CMD_CBW_160MHZ;
+ req->bw = CMD_CBW_160MHZ;
break;
case NL80211_CHAN_WIDTH_5:
- rlm_req.rlm.bw = CMD_CBW_5MHZ;
+ req->bw = CMD_CBW_5MHZ;
break;
case NL80211_CHAN_WIDTH_10:
- rlm_req.rlm.bw = CMD_CBW_10MHZ;
+ req->bw = CMD_CBW_10MHZ;
break;
case NL80211_CHAN_WIDTH_20_NOHT:
case NL80211_CHAN_WIDTH_20:
default:
- rlm_req.rlm.bw = CMD_CBW_20MHZ;
- rlm_req.rlm.ht_op_info = 0;
+ req->bw = CMD_CBW_20MHZ;
+ req->ht_op_info = 0;
break;
}
- if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan)
- rlm_req.rlm.sco = 1; /* SCA */
- else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan)
- rlm_req.rlm.sco = 3; /* SCB */
-
- return mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &rlm_req,
- sizeof(rlm_req), true);
+ if (req->control_channel < req->center_chan)
+ req->sco = 1; /* SCA */
+ else if (req->control_channel > req->center_chan)
+ req->sco = 3; /* SCB */
}
static struct sk_buff *
@@ -2014,18 +2229,36 @@ __mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len)
return skb;
}
+int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_chanctx_conf *ctx)
+{
+ struct sk_buff *skb;
+
+ skb = __mt7925_mcu_alloc_bss_req(phy->dev, mvif,
+ MT7925_BSS_UPDATE_MAX_SIZE);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ mt7925_mcu_bss_rlm_tlv(skb, phy, link_conf, ctx);
+
+ return mt76_mcu_skb_send_msg(phy->dev, skb,
+ MCU_UNI_CMD(BSS_INFO_UPDATE), true);
+}
+
static u8
mt7925_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
- enum nl80211_band band, struct ieee80211_sta *sta)
+ enum nl80211_band band,
+ struct ieee80211_link_sta *link_sta)
{
struct ieee80211_he_6ghz_capa *he_6ghz_capa;
const struct ieee80211_sta_eht_cap *eht_cap;
__le16 capa = 0;
u8 mode = 0;
- if (sta) {
- he_6ghz_capa = &sta->deflink.he_6ghz_capa;
- eht_cap = &sta->deflink.eht_cap;
+ if (link_sta) {
+ he_6ghz_capa = &link_sta->he_6ghz_capa;
+ eht_cap = &link_sta->eht_cap;
} else {
struct ieee80211_supported_band *sband;
@@ -2061,18 +2294,19 @@ mt7925_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif,
static void
mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
struct ieee80211_chanctx_conf *ctx,
struct mt76_phy *phy, u16 wlan_idx,
bool enable)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv :
- &mvif->sta;
- struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef;
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ struct cfg80211_chan_def *chandef = ctx ? &ctx->def :
+ &link_conf->chanreq.oper;
enum nl80211_band band = chandef->chan->band;
struct mt76_connac_bss_basic_tlv *basic_req;
+ struct mt792x_link_sta *mlink;
struct tlv *tlv;
int conn_type;
u8 idx;
@@ -2080,26 +2314,39 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*basic_req));
basic_req = (struct mt76_connac_bss_basic_tlv *)tlv;
- idx = mvif->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0 :
- mvif->mt76.omac_idx;
+ idx = mconf->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0 :
+ mconf->mt76.omac_idx;
basic_req->hw_bss_idx = idx;
- basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, sta);
+ basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band,
+ link_sta);
if (band == NL80211_BAND_2GHZ)
basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_ERP_INDEX);
else
basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_OFDM_INDEX);
- memcpy(basic_req->bssid, vif->bss_conf.bssid, ETH_ALEN);
- basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, sta);
- basic_req->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
- basic_req->dtim_period = vif->bss_conf.dtim_period;
+ memcpy(basic_req->bssid, link_conf->bssid, ETH_ALEN);
+ basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, link_sta);
+ basic_req->bcn_interval = cpu_to_le16(link_conf->beacon_int);
+ basic_req->dtim_period = link_conf->dtim_period;
basic_req->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx);
- basic_req->sta_idx = cpu_to_le16(msta->wcid.idx);
- basic_req->omac_idx = mvif->mt76.omac_idx;
- basic_req->band_idx = mvif->mt76.band_idx;
- basic_req->wmm_idx = mvif->mt76.wmm_idx;
+ basic_req->link_idx = mconf->mt76.idx;
+
+ if (link_sta) {
+ struct mt792x_sta *msta;
+
+ msta = (struct mt792x_sta *)link_sta->sta->drv_priv;
+ mlink = mt792x_sta_to_link(msta, link_sta->link_id);
+
+ } else {
+ mlink = &mconf->vif->sta.deflink;
+ }
+
+ basic_req->sta_idx = cpu_to_le16(mlink->wcid.idx);
+ basic_req->omac_idx = mconf->mt76.omac_idx;
+ basic_req->band_idx = mconf->mt76.band_idx;
+ basic_req->wmm_idx = mconf->mt76.wmm_idx;
basic_req->conn_state = !enable;
switch (vif->type) {
@@ -2131,9 +2378,11 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb,
}
static void
-mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+mt7925_mcu_bss_sec_tlv(struct sk_buff *skb,
+ struct ieee80211_bss_conf *link_conf)
{
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ struct mt76_vif *mvif = &mconf->mt76;
struct bss_sec_tlv {
__le16 tag;
__le16 len;
@@ -2178,12 +2427,13 @@ mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
static void
mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_bss_conf *link_conf)
{
- struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->mt76->chandef;
- struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = ctx ? &ctx->def :
+ &link_conf->chanreq.oper;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
enum nl80211_band band = chandef->chan->band;
+ struct mt76_vif *mvif = &mconf->mt76;
struct bss_rate_tlv *bmc;
struct tlv *tlv;
u8 idx = mvif->mcast_rates_idx ?
@@ -2205,39 +2455,44 @@ mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy,
static void
mt7925_mcu_bss_mld_tlv(struct sk_buff *skb,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_bss_conf *link_conf)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- bool is_mld = ieee80211_vif_is_mld(vif);
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
+ struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv;
struct bss_mld_tlv *mld;
struct tlv *tlv;
+ bool is_mld;
+
+ is_mld = ieee80211_vif_is_mld(link_conf->vif) ||
+ (hweight16(mvif->valid_links) > 1);
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_MLD, sizeof(*mld));
mld = (struct bss_mld_tlv *)tlv;
- mld->link_id = sta ? (is_mld ? vif->bss_conf.link_id : 0) : 0xff;
- mld->group_mld_id = is_mld ? mvif->mt76.idx : 0xff;
- mld->own_mld_id = mvif->mt76.idx + 32;
+ mld->link_id = is_mld ? link_conf->link_id : 0xff;
+ /* apply the index of the primary link */
+ mld->group_mld_id = is_mld ? mvif->bss_conf.mt76.idx : 0xff;
+ mld->own_mld_id = mconf->mt76.idx + 32;
mld->remap_idx = 0xff;
+ mld->eml_enable = !!(link_conf->vif->cfg.eml_cap &
+ IEEE80211_EML_CAP_EMLSR_SUPP);
- if (sta)
- memcpy(mld->mac_addr, sta->addr, ETH_ALEN);
+ memcpy(mld->mac_addr, link_conf->addr, ETH_ALEN);
}
static void
-mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf)
{
struct mt76_connac_bss_qos_tlv *qos;
struct tlv *tlv;
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_QBSS, sizeof(*qos));
qos = (struct mt76_connac_bss_qos_tlv *)tlv;
- qos->qos = vif->bss_conf.qos;
+ qos->qos = link_conf->qos;
}
static void
-mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf,
struct mt792x_phy *phy)
{
#define DEFAULT_HE_PE_DURATION 4
@@ -2246,16 +2501,16 @@ mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_uni_he *he;
struct tlv *tlv;
- cap = mt76_connac_get_he_phy_cap(phy->mt76, vif);
+ cap = mt76_connac_get_he_phy_cap(phy->mt76, link_conf->vif);
tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_HE_BASIC, sizeof(*he));
he = (struct bss_info_uni_he *)tlv;
- he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext;
+ he->he_pe_duration = link_conf->htc_trig_based_pkt_ext;
if (!he->he_pe_duration)
he->he_pe_duration = DEFAULT_HE_PE_DURATION;
- he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th);
+ he->he_rts_thres = cpu_to_le16(link_conf->frame_time_rts_th);
if (!he->he_rts_thres)
he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES);
@@ -2265,7 +2520,7 @@ mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
static void
-mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf,
bool enable)
{
struct bss_info_uni_bss_color *color;
@@ -2275,15 +2530,16 @@ mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
color = (struct bss_info_uni_bss_color *)tlv;
color->enable = enable ?
- vif->bss_conf.he_bss_color.enabled : 0;
+ link_conf->he_bss_color.enabled : 0;
color->bss_color = enable ?
- vif->bss_conf.he_bss_color.color : 0;
+ link_conf->he_bss_color.color : 0;
}
static void
-mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
+mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb,
+ struct ieee80211_bss_conf *link_conf)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv;
struct mt792x_phy *phy = mvif->phy;
struct bss_ifs_time_tlv *ifs_time;
struct tlv *tlv;
@@ -2295,18 +2551,18 @@ mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
}
int mt7925_mcu_set_timing(struct mt792x_phy *phy,
- struct ieee80211_vif *vif)
+ struct ieee80211_bss_conf *link_conf)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct mt792x_dev *dev = phy->dev;
struct sk_buff *skb;
- skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mconf->mt76,
MT7925_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
- mt7925_mcu_bss_ifs_tlv(skb, vif);
+ mt7925_mcu_bss_ifs_tlv(skb, link_conf);
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_UNI_CMD(BSS_INFO_UPDATE), true);
@@ -2314,41 +2570,42 @@ int mt7925_mcu_set_timing(struct mt792x_phy *phy,
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
int enable)
{
- struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv;
+ struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf);
struct mt792x_dev *dev = phy->dev;
+ struct mt792x_link_sta *mlink_bc;
struct sk_buff *skb;
- int err;
- skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76,
+ skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mconf->mt76,
MT7925_BSS_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
- /* bss_basic must be first */
- mt7925_mcu_bss_basic_tlv(skb, vif, sta, ctx, phy->mt76,
- mvif->sta.wcid.idx, enable);
- mt7925_mcu_bss_sec_tlv(skb, vif);
+ mlink_bc = mt792x_sta_to_link(&mvif->sta, mconf->link_id);
- mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, vif, sta);
- mt7925_mcu_bss_qos_tlv(skb, vif);
- mt7925_mcu_bss_mld_tlv(skb, vif, sta);
- mt7925_mcu_bss_ifs_tlv(skb, vif);
+ /* bss_basic must be first */
+ mt7925_mcu_bss_basic_tlv(skb, link_conf, link_sta, ctx, phy->mt76,
+ mlink_bc->wcid.idx, enable);
+ mt7925_mcu_bss_sec_tlv(skb, link_conf);
+ mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, link_conf);
+ mt7925_mcu_bss_qos_tlv(skb, link_conf);
+ mt7925_mcu_bss_mld_tlv(skb, link_conf);
+ mt7925_mcu_bss_ifs_tlv(skb, link_conf);
- if (vif->bss_conf.he_support) {
- mt7925_mcu_bss_he_tlv(skb, vif, phy);
- mt7925_mcu_bss_color_tlv(skb, vif, enable);
+ if (link_conf->he_support) {
+ mt7925_mcu_bss_he_tlv(skb, link_conf, phy);
+ mt7925_mcu_bss_color_tlv(skb, link_conf, enable);
}
- err = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_UNI_CMD(BSS_INFO_UPDATE), true);
- if (err < 0)
- return err;
+ if (enable)
+ mt7925_mcu_bss_rlm_tlv(skb, phy->mt76, link_conf, ctx);
- return mt7925_mcu_set_chctx(phy->mt76, &mvif->mt76, ctx);
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_UNI_CMD(BSS_INFO_UPDATE), true);
}
int mt7925_mcu_set_dbdc(struct mt76_phy *phy)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
index 2a0bbfe7bfa5..ac53bdc99332 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h
@@ -366,7 +366,10 @@ struct bss_mld_tlv {
u8 mac_addr[ETH_ALEN];
u8 remap_idx;
u8 link_id;
- u8 __rsv[2];
+ u8 eml_enable;
+ u8 max_link_num;
+ u8 hybrid_mode;
+ u8 __rsv[3];
} __packed;
struct sta_rec_ba_uni {
@@ -440,6 +443,17 @@ struct sta_rec_mld {
} __packed link[2];
} __packed;
+struct sta_rec_eht_mld {
+ __le16 tag;
+ __le16 len;
+ u8 nsep;
+ u8 mld_type;
+ u8 __rsv1[1];
+ u8 str_cap[3];
+ u8 eml_cap[3];
+ u8 __rsv2[3];
+} __packed;
+
struct bss_ifs_time_tlv {
__le16 tag;
__le16 len;
@@ -456,6 +470,21 @@ struct bss_ifs_time_tlv {
__le16 eifs_cck_time;
} __packed;
+struct bss_rlm_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 control_channel;
+ u8 center_chan;
+ u8 center_chan2;
+ u8 bw;
+ u8 tx_streams;
+ u8 rx_streams;
+ u8 ht_op_info;
+ u8 sco;
+ u8 band;
+ u8 pad[3];
+} __packed;
+
#define MT7925_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct sta_rec_basic) + \
sizeof(struct sta_rec_bf) + \
@@ -474,7 +503,8 @@ struct bss_ifs_time_tlv {
sizeof(struct sta_rec_eht) + \
sizeof(struct sta_rec_hdr_trans) + \
sizeof(struct sta_rec_mld) + \
- sizeof(struct tlv))
+ sizeof(struct tlv) * 2 + \
+ sizeof(struct sta_rec_remove))
#define MT7925_BSS_UPDATE_MAX_SIZE (sizeof(struct bss_req_hdr) + \
sizeof(struct mt76_connac_bss_basic_tlv) + \
@@ -484,6 +514,7 @@ struct bss_ifs_time_tlv {
sizeof(struct bss_info_uni_he) + \
sizeof(struct bss_info_uni_bss_color) + \
sizeof(struct bss_ifs_time_tlv) + \
+ sizeof(struct bss_rlm_tlv) + \
sizeof(struct tlv))
#define MT_CONNAC3_SKU_POWER_LIMIT 449
@@ -535,7 +566,27 @@ struct mt7925_wow_pattern_tlv {
u8 offset;
u8 mask[MT76_CONNAC_WOW_MASK_MAX_LEN];
u8 pattern[MT76_CONNAC_WOW_PATTEN_MAX_LEN];
- u8 rsv[4];
+ u8 rsv[7];
+} __packed;
+
+struct roc_acquire_tlv {
+ __le16 tag;
+ __le16 len;
+ u8 bss_idx;
+ u8 tokenid;
+ u8 control_channel;
+ u8 sco;
+ u8 band;
+ u8 bw;
+ u8 center_chan;
+ u8 center_chan2;
+ u8 bw_from_ap;
+ u8 center_chan_from_ap;
+ u8 center_chan2_from_ap;
+ u8 reqtype;
+ __le32 maxinterval;
+ u8 dbdcband;
+ u8 rsv[3];
} __packed;
static inline enum connac3_mcu_cipher_type
@@ -578,18 +629,18 @@ int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy,
bool enable);
int mt7925_mcu_add_bss_info(struct mt792x_phy *phy,
struct ieee80211_chanctx_conf *ctx,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
+ struct ieee80211_bss_conf *link_conf,
+ struct ieee80211_link_sta *link_sta,
int enable);
int mt7925_mcu_set_timing(struct mt792x_phy *phy,
- struct ieee80211_vif *vif);
+ struct ieee80211_bss_conf *link_conf);
int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable);
int mt7925_mcu_set_channel_domain(struct mt76_phy *phy);
int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable);
int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif,
+ struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx);
int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy);
int mt7925_mcu_update_arp_filter(struct mt76_dev *dev,
- struct mt76_vif *vif,
- struct ieee80211_bss_conf *info);
+ struct ieee80211_bss_conf *link_conf);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
index 8a4a71f6bcb6..669f3a079d04 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h
@@ -30,17 +30,22 @@
enum {
UNI_ROC_ACQUIRE,
UNI_ROC_ABORT,
+ UNI_ROC_SUB_LINK = 3,
UNI_ROC_NUM
};
enum mt7925_roc_req {
MT7925_ROC_REQ_JOIN,
MT7925_ROC_REQ_ROC,
+ MT7925_ROC_REQ_SUB_LINK,
+ MT7925_ROC_REQ_MLSR_AG = 10,
+ MT7925_ROC_REQ_MLSR_AA,
MT7925_ROC_REQ_NUM
};
enum {
UNI_EVENT_ROC_GRANT = 0,
+ UNI_EVENT_ROC_GRANT_SUB_LINK = 4,
UNI_EVENT_ROC_TAG_NUM
};
@@ -192,13 +197,15 @@ int __mt7925_start(struct mt792x_phy *phy);
int mt7925_register_device(struct mt792x_dev *dev);
void mt7925_unregister_device(struct mt792x_dev *dev);
int mt7925_run_firmware(struct mt792x_dev *dev);
-int mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif,
+int mt7925_mcu_set_bss_pm(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf,
bool enable);
-int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta,
+int mt7925_mcu_sta_update(struct mt792x_dev *dev,
+ struct ieee80211_link_sta *link_sta,
struct ieee80211_vif *vif, bool enable,
enum mt76_sta_info_state state);
int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag);
-int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif);
+int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_bss_conf *bss_conf);
int mt7925_mcu_set_eeprom(struct mt792x_dev *dev);
int mt7925_mcu_get_rx_rate(struct mt792x_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
@@ -228,6 +235,7 @@ void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb, u32 *info);
void mt7925_stats_work(struct work_struct *work);
void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy);
+int mt7925_init_mlo_caps(struct mt792x_phy *phy);
int mt7925_init_debugfs(struct mt792x_dev *dev);
int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev,
@@ -241,7 +249,8 @@ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev,
bool enable);
void mt7925_scan_work(struct work_struct *work);
void mt7925_roc_work(struct work_struct *work);
-int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif);
+int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev,
+ struct ieee80211_bss_conf *link_conf);
void mt7925_coredump_work(struct work_struct *work);
int mt7925_get_txpwr_info(struct mt792x_dev *dev, u8 band_idx,
struct mt7925_txpwr *txpwr);
@@ -252,7 +261,7 @@ void mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi,
struct ieee80211_key_conf *key, int pid,
enum mt76_txq_id qid, u32 changed);
void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t,
- struct ieee80211_sta *sta, bool clear_status,
+ struct ieee80211_sta *sta, struct mt76_wcid *wcid,
struct list_head *free_list);
int mt7925_mcu_parse_response(struct mt76_dev *mdev, int cmd,
struct sk_buff *skb, int seq);
@@ -291,20 +300,24 @@ int mt7925_set_tx_sar_pwr(struct ieee80211_hw *hw,
int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set);
int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2,
enum environment_cap env_cap);
-int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
+int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links,
+ int duration, u8 token_id);
+int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
struct ieee80211_channel *chan, int duration,
enum mt7925_roc_req type, u8 token_id);
-int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif,
+int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf,
u8 token_id);
int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
int cmd, int *wait_seq);
int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif,
struct mt76_connac_sta_key_conf *sta_key_conf,
struct ieee80211_key_conf *key, int mcu_cmd,
- struct mt76_wcid *wcid, enum set_key_cmd cmd);
+ struct mt76_wcid *wcid, enum set_key_cmd cmd,
+ struct mt792x_sta *msta);
int mt7925_mcu_set_rts_thresh(struct mt792x_phy *phy, u32 val);
int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev,
struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+ struct ieee80211_sta *sta,
+ int link_id);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
index 07b74d492ce1..6e4f4e78c350 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c
@@ -254,7 +254,7 @@ static int mt7925_dma_init(struct mt792x_dev *dev)
if (ret < 0)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt792x_poll_tx);
napi_enable(&dev->mt76.tx_napi);
@@ -373,6 +373,9 @@ static int mt7925_pci_probe(struct pci_dev *pdev,
bus_ops->rmw = mt7925_rmw;
dev->mt76.bus = bus_ops;
+ if (!mt7925_disable_aspm && mt76_pci_aspm_supported(pdev))
+ dev->aspm_supported = true;
+
ret = __mt792x_mcu_fw_pmctrl(dev);
if (ret)
goto err_free_dev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
index 9fca887977d2..faedbf766d1a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c
@@ -34,9 +34,9 @@ int mt7925e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (sta) {
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) {
info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
- msta->last_txs = jiffies;
+ msta->deflink.last_txs = jiffies;
}
}
@@ -60,7 +60,7 @@ void mt7925_tx_token_put(struct mt792x_dev *dev)
spin_lock_bh(&dev->mt76.token_lock);
idr_for_each_entry(&dev->mt76.token, txwi, id) {
- mt7925_txwi_free(dev, txwi, NULL, false, NULL);
+ mt7925_txwi_free(dev, txwi, NULL, NULL, NULL);
dev->mt76.token_count--;
}
spin_unlock_bh(&dev->mt76.token_lock);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h
index a8556de3d480..7fa74d59cc48 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x.h
+++ b/drivers/net/wireless/mediatek/mt76/mt792x.h
@@ -26,6 +26,8 @@
#define MT792x_FW_CAP_CNM BIT(7)
#define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0)
+#define MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN BIT(1)
+#define MT792x_CHIP_CAP_MLO_EVT_EN BIT(2)
/* NOTE: used to map mt76_rates. idx may change if firmware expands table */
#define MT792x_BASIC_RATES_TBL 11
@@ -36,10 +38,12 @@
#define MT792x_MCU_INIT_RETRY_COUNT 10
#define MT792x_WFSYS_INIT_RETRY_COUNT 2
+#define MT7920_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1a.bin"
#define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin"
#define MT7922_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7922_1.bin"
#define MT7925_FIRMWARE_WM "mediatek/mt7925/WIFI_RAM_CODE_MT7925_1_1.bin"
+#define MT7920_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1a_2_hdr.bin"
#define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin"
#define MT7922_ROM_PATCH "mediatek/WIFI_MT7922_patch_mcu_1_1_hdr.bin"
#define MT7925_ROM_PATCH "mediatek/mt7925/WIFI_MT7925_PATCH_MCU_1_1_hdr.bin"
@@ -78,11 +82,9 @@ enum mt792x_reg_power_type {
DECLARE_EWMA(avg_signal, 10, 8)
-struct mt792x_sta {
+struct mt792x_link_sta {
struct mt76_wcid wcid; /* must be first */
- struct mt792x_vif *vif;
-
u32 airtime_ac[8];
int ack_signal;
@@ -91,21 +93,46 @@ struct mt792x_sta {
unsigned long last_txs;
struct mt76_connac_sta_key_conf bip;
+
+ struct mt792x_sta *sta;
+
+ struct ieee80211_link_sta *pri_link;
+};
+
+struct mt792x_sta {
+ struct mt792x_link_sta deflink; /* must be first */
+ struct mt792x_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+
+ struct mt792x_vif *vif;
+
+ u16 valid_links;
+ u8 deflink_id;
};
DECLARE_EWMA(rssi, 10, 8);
-struct mt792x_vif {
+struct mt792x_chanctx {
+ struct mt792x_bss_conf *bss_conf;
+};
+
+struct mt792x_bss_conf {
struct mt76_vif mt76; /* must be first */
+ struct mt792x_vif *vif;
+ struct ewma_rssi rssi;
+ struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ unsigned int link_id;
+};
+
+struct mt792x_vif {
+ struct mt792x_bss_conf bss_conf; /* must be first */
+ struct mt792x_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS];
struct mt792x_sta sta;
struct mt792x_sta *wep_sta;
struct mt792x_phy *phy;
-
- struct ewma_rssi rssi;
-
- struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS];
+ u16 valid_links;
+ u8 deflink_id;
};
struct mt792x_phy {
@@ -137,6 +164,7 @@ struct mt792x_phy {
#endif
void *clc[MT792x_CLC_MAX_NUM];
u64 chip_cap;
+ u16 eml_cap;
struct work_struct roc_work;
struct timer_list roc_timer;
@@ -187,6 +215,7 @@ struct mt792x_dev {
bool fw_assert:1;
bool has_eht:1;
bool regd_in_progress:1;
+ bool aspm_supported:1;
wait_queue_head_t wait;
struct work_struct init_work;
@@ -208,6 +237,66 @@ struct mt792x_dev {
u32 backup_l2;
};
+static inline struct mt792x_bss_conf *
+mt792x_vif_to_link(struct mt792x_vif *mvif, u8 link_id)
+{
+ struct ieee80211_vif *vif;
+
+ vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv);
+
+ if (!ieee80211_vif_is_mld(vif) ||
+ link_id >= IEEE80211_LINK_UNSPECIFIED)
+ return &mvif->bss_conf;
+
+ return rcu_dereference_protected(mvif->link_conf[link_id],
+ lockdep_is_held(&mvif->phy->dev->mt76.mutex));
+}
+
+static inline struct mt792x_link_sta *
+mt792x_sta_to_link(struct mt792x_sta *msta, u8 link_id)
+{
+ struct ieee80211_vif *vif;
+
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+
+ if (!ieee80211_vif_is_mld(vif) ||
+ link_id >= IEEE80211_LINK_UNSPECIFIED)
+ return &msta->deflink;
+
+ return rcu_dereference_protected(msta->link[link_id],
+ lockdep_is_held(&msta->vif->phy->dev->mt76.mutex));
+}
+
+static inline struct mt792x_bss_conf *
+mt792x_link_conf_to_mconf(struct ieee80211_bss_conf *link_conf)
+{
+ struct ieee80211_vif *vif = link_conf->vif;
+ struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
+
+ return mt792x_vif_to_link(mvif, link_conf->link_id);
+}
+
+static inline struct ieee80211_bss_conf *
+mt792x_vif_to_bss_conf(struct ieee80211_vif *vif, unsigned int link_id)
+{
+ if (!ieee80211_vif_is_mld(vif) ||
+ link_id >= IEEE80211_LINK_UNSPECIFIED)
+ return &vif->bss_conf;
+
+ return link_conf_dereference_protected(vif, link_id);
+}
+
+static inline struct ieee80211_link_sta *
+mt792x_sta_to_link_sta(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ unsigned int link_id)
+{
+ if (!ieee80211_vif_is_mld(vif) ||
+ link_id >= IEEE80211_LINK_UNSPECIFIED)
+ return &sta->deflink;
+
+ return link_sta_dereference_protected(sta, link_id);
+}
+
static inline struct mt792x_dev *
mt792x_hw_dev(struct ieee80211_hw *hw)
{
@@ -248,7 +337,7 @@ static inline bool mt792x_dma_need_reinit(struct mt792x_dev *dev)
#define mt792x_mutex_release(dev) \
mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm)
-void mt792x_stop(struct ieee80211_hw *hw);
+void mt792x_stop(struct ieee80211_hw *hw, bool suspend);
void mt792x_pm_wake_work(struct work_struct *work);
void mt792x_pm_power_save_work(struct work_struct *work);
void mt792x_reset(struct mt76_dev *mdev);
@@ -322,10 +411,15 @@ mt792x_get_mac80211_ops(struct device *dev,
int mt792x_init_wcid(struct mt792x_dev *dev);
int mt792x_mcu_drv_pmctrl(struct mt792x_dev *dev);
int mt792x_mcu_fw_pmctrl(struct mt792x_dev *dev);
+void mt792x_mac_link_bss_remove(struct mt792x_dev *dev,
+ struct mt792x_bss_conf *mconf,
+ struct mt792x_link_sta *mlink);
static inline char *mt792x_ram_name(struct mt792x_dev *dev)
{
switch (mt76_chip(&dev->mt76)) {
+ case 0x7920:
+ return MT7920_FIRMWARE_WM;
case 0x7922:
return MT7922_FIRMWARE_WM;
case 0x7925:
@@ -338,6 +432,8 @@ static inline char *mt792x_ram_name(struct mt792x_dev *dev)
static inline char *mt792x_patch_name(struct mt792x_dev *dev)
{
switch (mt76_chip(&dev->mt76)) {
+ case 0x7920:
+ return MT7920_ROM_PATCH;
case 0x7922:
return MT7922_ROM_PATCH;
case 0x7925:
@@ -361,7 +457,7 @@ void mt792xu_wr(struct mt76_dev *dev, u32 addr, u32 val);
u32 mt792xu_rmw(struct mt76_dev *dev, u32 addr, u32 mask, u32 val);
void mt792xu_copy(struct mt76_dev *dev, u32 offset, const void *data, int len);
void mt792xu_disconnect(struct usb_interface *usb_intf);
-void mt792xu_stop(struct ieee80211_hw *hw);
+void mt792xu_stop(struct ieee80211_hw *hw, bool suspend);
static inline void
mt792x_skb_add_usb_sdio_hdr(struct mt792x_dev *dev, struct sk_buff *skb,
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
index a405af8d9052..78fe37c2e07b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c
@@ -59,20 +59,42 @@ void mt792x_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
+ u8 link_id;
int qid;
if (control->sta) {
+ struct mt792x_link_sta *mlink;
struct mt792x_sta *sta;
-
+ link_id = u32_get_bits(info->control.flags,
+ IEEE80211_TX_CTRL_MLO_LINK);
sta = (struct mt792x_sta *)control->sta->drv_priv;
- wcid = &sta->wcid;
+ mlink = mt792x_sta_to_link(sta, link_id);
+ wcid = &mlink->wcid;
}
if (vif && !control->sta) {
struct mt792x_vif *mvif;
mvif = (struct mt792x_vif *)vif->drv_priv;
- wcid = &mvif->sta.wcid;
+ wcid = &mvif->sta.deflink.wcid;
+ }
+
+ if (vif && control->sta && ieee80211_vif_is_mld(vif)) {
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_link_sta *link_sta;
+ struct ieee80211_bss_conf *conf;
+
+ link_id = wcid->link_id;
+ rcu_read_lock();
+ conf = rcu_dereference(vif->link_conf[link_id]);
+ memcpy(hdr->addr2, conf->addr, ETH_ALEN);
+
+ link_sta = rcu_dereference(control->sta->link[link_id]);
+ memcpy(hdr->addr1, link_sta->addr, ETH_ALEN);
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ memcpy(hdr->addr3, conf->bssid, ETH_ALEN);
+ rcu_read_unlock();
}
if (mt76_connac_pm_ref(mphy, &dev->pm)) {
@@ -91,7 +113,7 @@ void mt792x_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
}
EXPORT_SYMBOL_GPL(mt792x_tx);
-void mt792x_stop(struct ieee80211_hw *hw)
+void mt792x_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
struct mt792x_phy *phy = mt792x_hw_phy(hw);
@@ -113,31 +135,47 @@ void mt792x_stop(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(mt792x_stop);
+void mt792x_mac_link_bss_remove(struct mt792x_dev *dev,
+ struct mt792x_bss_conf *mconf,
+ struct mt792x_link_sta *mlink)
+{
+ struct ieee80211_vif *vif = container_of((void *)mconf->vif,
+ struct ieee80211_vif, drv_priv);
+ struct ieee80211_bss_conf *link_conf;
+ int idx = mlink->wcid.idx;
+
+ link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id);
+
+ mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid);
+ mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mlink->wcid, false);
+
+ rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+
+ dev->mt76.vif_mask &= ~BIT_ULL(mconf->mt76.idx);
+ mconf->vif->phy->omac_mask &= ~BIT_ULL(mconf->mt76.omac_idx);
+
+ spin_lock_bh(&dev->mt76.sta_poll_lock);
+ if (!list_empty(&mlink->wcid.poll_list))
+ list_del_init(&mlink->wcid.poll_list);
+ spin_unlock_bh(&dev->mt76.sta_poll_lock);
+
+ mt76_wcid_cleanup(&dev->mt76, &mlink->wcid);
+}
+EXPORT_SYMBOL_GPL(mt792x_mac_link_bss_remove);
+
void mt792x_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
- struct mt792x_sta *msta = &mvif->sta;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
- struct mt792x_phy *phy = mt792x_hw_phy(hw);
- int idx = msta->wcid.idx;
+ struct mt792x_bss_conf *mconf;
mt792x_mutex_acquire(dev);
- mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
- mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false);
- rcu_assign_pointer(dev->mt76.wcid[idx], NULL);
+ mconf = mt792x_link_conf_to_mconf(&vif->bss_conf);
+ mt792x_mac_link_bss_remove(dev, mconf, &mvif->sta.deflink);
- dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx);
- phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx);
mt792x_mutex_release(dev);
-
- spin_lock_bh(&dev->mt76.sta_poll_lock);
- if (!list_empty(&msta->wcid.poll_list))
- list_del_init(&msta->wcid.poll_list);
- spin_unlock_bh(&dev->mt76.sta_poll_lock);
-
- mt76_wcid_cleanup(&dev->mt76, &msta->wcid);
}
EXPORT_SYMBOL_GPL(mt792x_remove_interface);
@@ -149,7 +187,7 @@ int mt792x_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
/* no need to update right away, we'll get BSS_CHANGED_QOS */
queue = mt76_connac_lmac_mapping(queue);
- mvif->queue_params[queue] = *params;
+ mvif->bss_conf.queue_params[queue] = *params;
return 0;
}
@@ -178,7 +216,7 @@ u64 mt792x_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
- u8 omac_idx = mvif->mt76.omac_idx;
+ u8 omac_idx = mvif->bss_conf.mt76.omac_idx;
union {
u64 t64;
u32 t32[2];
@@ -204,7 +242,7 @@ void mt792x_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
{
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
- u8 omac_idx = mvif->mt76.omac_idx;
+ u8 omac_idx = mvif->bss_conf.mt76.omac_idx;
union {
u64 t64;
u32 t32[2];
@@ -261,11 +299,13 @@ int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
+ struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mvif->mt76.ctx = ctx;
+ mvif->bss_conf.mt76.ctx = ctx;
+ mctx->bss_conf = &mvif->bss_conf;
mutex_unlock(&dev->mt76.mutex);
return 0;
@@ -277,11 +317,13 @@ void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
+ struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv;
struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv;
struct mt792x_dev *dev = mt792x_hw_dev(hw);
mutex_lock(&dev->mt76.mutex);
- mvif->mt76.ctx = NULL;
+ mctx->bss_conf = NULL;
+ mvif->bss_conf.mt76.ctx = NULL;
mutex_unlock(&dev->mt76.mutex);
}
EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx);
@@ -405,10 +447,10 @@ mt792x_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
struct mt76_ethtool_worker_info *wi = wi_data;
- if (msta->vif->mt76.idx != wi->idx)
+ if (msta->vif->bss_conf.mt76.idx != wi->idx)
return;
- mt76_ethtool_worker(wi, &msta->wcid.stats, true);
+ mt76_ethtool_worker(wi, &msta->deflink.wcid.stats, true);
}
void mt792x_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -421,7 +463,7 @@ void mt792x_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct mt76_mib_stats *mib = &phy->mib;
struct mt76_ethtool_worker_info wi = {
.data = data,
- .idx = mvif->mt76.idx,
+ .idx = mvif->bss_conf.mt76.idx,
};
int i, ei = 0;
@@ -487,7 +529,7 @@ void mt792x_sta_statistics(struct ieee80211_hw *hw,
struct station_info *sinfo)
{
struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv;
- struct rate_info *txrate = &msta->wcid.rate;
+ struct rate_info *txrate = &msta->deflink.wcid.rate;
if (!txrate->legacy && !txrate->flags)
return;
@@ -502,19 +544,19 @@ void mt792x_sta_statistics(struct ieee80211_hw *hw,
sinfo->txrate.he_dcm = txrate->he_dcm;
sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc;
}
- sinfo->tx_failed = msta->wcid.stats.tx_failed;
+ sinfo->tx_failed = msta->deflink.wcid.stats.tx_failed;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
- sinfo->tx_retries = msta->wcid.stats.tx_retries;
+ sinfo->tx_retries = msta->deflink.wcid.stats.tx_retries;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
- sinfo->ack_signal = (s8)msta->ack_signal;
+ sinfo->ack_signal = (s8)msta->deflink.ack_signal;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL);
- sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal);
+ sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->deflink.avg_ack_signal);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG);
}
EXPORT_SYMBOL_GPL(mt792x_sta_statistics);
@@ -556,6 +598,7 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw)
hw->sta_data_size = sizeof(struct mt792x_sta);
hw->vif_data_size = sizeof(struct mt792x_vif);
+ hw->chanctx_data_size = sizeof(struct mt792x_chanctx);
if (dev->fw_features & MT792x_FW_CAP_CNM) {
wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
@@ -766,6 +809,10 @@ int __mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev)
for (i = 0; i < MT792x_DRV_OWN_RETRY_COUNT; i++) {
mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN);
+
+ if (dev->aspm_supported)
+ usleep_range(2000, 3000);
+
if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL,
PCIE_LPCR_HOST_OWN_SYNC, 0, 50, 1))
break;
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
index 5cc2d59b774a..6f9db782338e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c
@@ -340,7 +340,7 @@ int mt792x_poll_rx(struct napi_struct *napi, int budget)
struct mt792x_dev *dev;
int done;
- dev = container_of(napi->dev, struct mt792x_dev, mt76.napi_dev);
+ dev = mt76_priv(napi->dev);
if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
napi_complete(napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
index eb29434abee1..106273935b26 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c
@@ -138,6 +138,7 @@ EXPORT_SYMBOL_GPL(mt792x_mac_update_mib_stats);
struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx,
bool unicast)
{
+ struct mt792x_link_sta *link;
struct mt792x_sta *sta;
struct mt76_wcid *wcid;
@@ -151,11 +152,12 @@ struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx,
if (!wcid->sta)
return NULL;
- sta = container_of(wcid, struct mt792x_sta, wcid);
+ link = container_of(wcid, struct mt792x_link_sta, wcid);
+ sta = container_of(link, struct mt792x_sta, deflink);
if (!sta->vif)
return NULL;
- return &sta->vif->sta.wcid;
+ return &sta->vif->sta.deflink.wcid;
}
EXPORT_SYMBOL_GPL(mt792x_rx_get_wcid);
@@ -173,7 +175,7 @@ mt792x_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!ether_addr_equal(vif->addr, hdr->addr1))
return;
- ewma_rssi_add(&mvif->rssi, -status->signal);
+ ewma_rssi_add(&mvif->bss_conf.rssi, -status->signal);
}
void mt792x_mac_assoc_rssi(struct mt792x_dev *dev, struct sk_buff *skb)
diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
index b49668a4b784..76272a03b22e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
+++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c
@@ -285,12 +285,12 @@ int mt792xu_init_reset(struct mt792x_dev *dev)
}
EXPORT_SYMBOL_GPL(mt792xu_init_reset);
-void mt792xu_stop(struct ieee80211_hw *hw)
+void mt792xu_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt792x_dev *dev = mt792x_hw_dev(hw);
mt76u_stop_tx(&dev->mt76);
- mt792x_stop(hw);
+ mt792x_stop(hw, false);
}
EXPORT_SYMBOL_GPL(mt792xu_stop);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
index 9bd953586b04..62c03d088925 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/debugfs.c
@@ -225,6 +225,11 @@ mt7996_radar_trigger(void *data, u64 val)
if (val > MT_RX_SEL2)
return -EINVAL;
+ if (val == MT_RX_SEL2 && !dev->rdd2_phy) {
+ dev_err(dev->mt76.dev, "Background radar is not enabled\n");
+ return -EINVAL;
+ }
+
return mt7996_mcu_rdd_cmd(dev, RDD_RADAR_EMULATE,
val, 0, 0);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
index 73e633d0d700..69a7d9b2e38b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c
@@ -641,7 +641,7 @@ int mt7996_dma_init(struct mt7996_dev *dev)
if (ret < 0)
return ret;
- netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
+ netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi,
mt7996_poll_tx);
napi_enable(&dev->mt76.tx_napi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
index 0384fb059ddf..bc7111a71f98 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
@@ -1655,14 +1655,10 @@ mt7996_mac_restart(struct mt7996_dev *dev)
set_bit(MT76_RESET, &dev->mphy.state);
set_bit(MT76_MCU_RESET, &dev->mphy.state);
wake_up(&dev->mt76.mcu.wait);
- if (phy2) {
+ if (phy2)
set_bit(MT76_RESET, &phy2->mt76->state);
- set_bit(MT76_MCU_RESET, &phy2->mt76->state);
- }
- if (phy3) {
+ if (phy3)
set_bit(MT76_RESET, &phy3->mt76->state);
- set_bit(MT76_MCU_RESET, &phy3->mt76->state);
- }
/* lock/unlock all queues to ensure that no tx is pending */
mt76_txq_schedule_all(&dev->mphy);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
index f7da8d6dd903..bce082038219 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c
@@ -35,6 +35,14 @@ int mt7996_run(struct ieee80211_hw *hw)
ret = mt7996_mcu_set_hdr_trans(dev, true);
if (ret)
goto out;
+
+ if (is_mt7992(&dev->mt76)) {
+ u8 queue = mt76_connac_lmac_mapping(IEEE80211_AC_VI);
+
+ ret = mt7996_mcu_cp_support(dev, queue);
+ if (ret)
+ goto out;
+ }
}
mt7996_mac_enable_nf(dev, phy->mt76->band_idx);
@@ -85,7 +93,7 @@ static int mt7996_start(struct ieee80211_hw *hw)
return ret;
}
-static void mt7996_stop(struct ieee80211_hw *hw)
+static void mt7996_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7996_dev *dev = mt7996_hw_dev(hw);
struct mt7996_phy *phy = mt7996_hw_phy(hw);
@@ -238,7 +246,11 @@ static int mt7996_add_interface(struct ieee80211_hw *hw,
mt7996_init_bitrate_mask(vif);
mt7996_mcu_add_bss_info(phy, vif, true);
- mt7996_mcu_add_sta(dev, vif, NULL, true);
+ /* defer the first STA_REC of BMC entry to BSS_CHANGED_BSSID for STA
+ * interface, since firmware only records BSSID when the entry is new
+ */
+ if (vif->type != NL80211_IFTYPE_STATION)
+ mt7996_mcu_add_sta(dev, vif, NULL, true, true);
rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid);
out:
@@ -256,7 +268,7 @@ static void mt7996_remove_interface(struct ieee80211_hw *hw,
struct mt7996_phy *phy = mt7996_hw_phy(hw);
int idx = msta->wcid.idx;
- mt7996_mcu_add_sta(dev, vif, NULL, false);
+ mt7996_mcu_add_sta(dev, vif, NULL, false, false);
mt7996_mcu_add_bss_info(phy, vif, false);
if (vif == phy->monitor_vif)
@@ -340,10 +352,6 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
/* fall back to sw encryption for unsupported ciphers */
switch (key->cipher) {
- case WLAN_CIPHER_SUITE_AES_CMAC:
- wcid_keyidx = &wcid->hw_key_idx2;
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
- break;
case WLAN_CIPHER_SUITE_TKIP:
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
@@ -351,6 +359,11 @@ static int mt7996_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
case WLAN_CIPHER_SUITE_GCMP_256:
case WLAN_CIPHER_SUITE_SMS4:
break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ wcid_keyidx = &wcid->hw_key_idx2;
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIE;
+ fallthrough;
+ case WLAN_CIPHER_SUITE_BIP_CMAC_256:
case WLAN_CIPHER_SUITE_BIP_GMAC_128:
case WLAN_CIPHER_SUITE_BIP_GMAC_256:
if (key->keyidx == 6 || key->keyidx == 7)
@@ -438,7 +451,7 @@ mt7996_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct ieee80211_tx_queue_params *params)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
- const u8 mq_to_aci[] = {
+ static const u8 mq_to_aci[] = {
[IEEE80211_AC_VO] = 3,
[IEEE80211_AC_VI] = 2,
[IEEE80211_AC_BE] = 0,
@@ -599,7 +612,8 @@ static void mt7996_bss_info_changed(struct ieee80211_hw *hw,
(changed & BSS_CHANGED_ASSOC && vif->cfg.assoc) ||
(changed & BSS_CHANGED_BEACON_ENABLED && info->enable_beacon)) {
mt7996_mcu_add_bss_info(phy, vif, true);
- mt7996_mcu_add_sta(dev, vif, NULL, true);
+ mt7996_mcu_add_sta(dev, vif, NULL, true,
+ !!(changed & BSS_CHANGED_BSSID));
}
if (changed & BSS_CHANGED_ERP_CTS_PROT)
@@ -688,7 +702,7 @@ int mt7996_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7996_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
- ret = mt7996_mcu_add_sta(dev, vif, sta, true);
+ ret = mt7996_mcu_add_sta(dev, vif, sta, true, true);
if (ret)
return ret;
@@ -702,7 +716,7 @@ void mt7996_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7996_sta *msta = (struct mt7996_sta *)sta->drv_priv;
int i;
- mt7996_mcu_add_sta(dev, vif, sta, false);
+ mt7996_mcu_add_sta(dev, vif, sta, false, false);
mt7996_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
index b44abe2acc81..2e4fa9f48dfb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c
@@ -355,7 +355,10 @@ mt7996_mcu_rx_radar_detected(struct mt7996_dev *dev, struct sk_buff *skb)
if (r->band_idx >= ARRAY_SIZE(dev->mt76.phys))
return;
- if (dev->rdd2_phy && r->band_idx == MT_RX_SEL2)
+ if (r->band_idx == MT_RX_SEL2 && !dev->rdd2_phy)
+ return;
+
+ if (r->band_idx == MT_RX_SEL2)
mphy = dev->rdd2_phy->mt76;
else
mphy = dev->mt76.phys[r->band_idx];
@@ -418,7 +421,7 @@ mt7996_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
if (!vif->bss_conf.color_change_active || vif->type == NL80211_IFTYPE_STATION)
return;
- ieee80211_color_change_finish(vif);
+ ieee80211_color_change_finish(vif, 0);
}
static void
@@ -819,11 +822,14 @@ mt7996_mcu_bss_mbssid_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
struct bss_info_uni_mbssid *mbssid;
struct tlv *tlv;
+ if (!vif->bss_conf.bssid_indicator)
+ return;
+
tlv = mt7996_mcu_add_uni_tlv(skb, UNI_BSS_INFO_11V_MBSSID, sizeof(*mbssid));
mbssid = (struct bss_info_uni_mbssid *)tlv;
- if (enable && vif->bss_conf.bssid_indicator) {
+ if (enable) {
mbssid->max_indicator = vif->bss_conf.bssid_indicator;
mbssid->mbss_idx = vif->bss_conf.bssid_index;
mbssid->tx_bss_omac_idx = 0;
@@ -1650,7 +1656,7 @@ mt7996_mcu_sta_bfer_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
- const u8 matrix[4][4] = {
+ static const u8 matrix[4][4] = {
{0, 0, 0, 0},
{1, 1, 0, 0}, /* 2x1, 2x2, 2x3, 2x4 */
{2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */
@@ -1749,6 +1755,18 @@ mt7996_mcu_sta_bfee_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
}
static void
+mt7996_mcu_sta_tx_proc_tlv(struct sk_buff *skb)
+{
+ struct sta_rec_tx_proc *tx_proc;
+ struct tlv *tlv;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_TX_PROC, sizeof(*tx_proc));
+
+ tx_proc = (struct sta_rec_tx_proc *)tlv;
+ tx_proc->flag = cpu_to_le32(0);
+}
+
+static void
mt7996_mcu_sta_hdrt_tlv(struct mt7996_dev *dev, struct sk_buff *skb)
{
struct sta_rec_hdrt *hdrt;
@@ -1778,10 +1796,10 @@ mt7996_mcu_sta_hdr_trans_tlv(struct mt7996_dev *dev, struct sk_buff *skb,
else
hdr_trans->from_ds = true;
- wcid = (struct mt76_wcid *)sta->drv_priv;
- if (!wcid)
+ if (!sta)
return;
+ wcid = (struct mt76_wcid *)sta->drv_priv;
hdr_trans->dis_rx_hdr_tran = !test_bit(MT_WCID_FLAG_HDR_TRANS, &wcid->flags);
if (test_bit(MT_WCID_FLAG_4ADDR, &wcid->flags)) {
hdr_trans->to_ds = true;
@@ -1968,6 +1986,7 @@ static void
mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
+#define INIT_RCPI 180
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
struct mt76_phy *mphy = mvif->phy->mt76;
struct cfg80211_chan_def *chandef = &mphy->chandef;
@@ -1983,7 +2002,7 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
ra->valid = true;
ra->auto_rate = true;
- ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta);
+ ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink);
ra->channel = chandef->chan->hw_value;
ra->bw = (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) ?
CMD_CBW_320MHZ : sta->deflink.bandwidth;
@@ -2065,6 +2084,8 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev,
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
}
ra->sta_cap = cpu_to_le32(cap);
+
+ memset(ra->rx_rcpi, INIT_RCPI, sizeof(ra->rx_rcpi));
}
int mt7996_mcu_add_rate_ctrl(struct mt7996_dev *dev, struct ieee80211_vif *vif,
@@ -2133,14 +2154,16 @@ mt7996_mcu_add_group(struct mt7996_dev *dev, struct ieee80211_vif *vif,
}
int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
+ struct ieee80211_sta *sta, bool enable, bool newly)
{
struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv;
+ struct ieee80211_link_sta *link_sta;
struct mt7996_sta *msta;
struct sk_buff *skb;
int ret;
msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta;
+ link_sta = sta ? &sta->deflink : NULL;
skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76,
&msta->wcid,
@@ -2149,11 +2172,17 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
return PTR_ERR(skb);
/* starec basic */
- mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable,
- !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx]));
+ mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta,
+ enable, newly);
+
if (!enable)
goto out;
+ /* starec hdr trans */
+ mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, sta);
+ /* starec tx proc */
+ mt7996_mcu_sta_tx_proc_tlv(skb);
+
/* tag order is in accordance with firmware dependency. */
if (sta) {
/* starec hdrt mode */
@@ -2178,8 +2207,6 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
mt7996_mcu_sta_muru_tlv(dev, skb, vif, sta);
/* starec bfee */
mt7996_mcu_sta_bfee_tlv(dev, skb, vif, sta);
- /* starec hdr trans */
- mt7996_mcu_sta_hdr_trans_tlv(dev, skb, vif, sta);
}
ret = mt7996_mcu_add_group(dev, vif, sta);
@@ -3729,6 +3756,7 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy)
} __packed * res;
struct sk_buff *skb;
int ret;
+ u32 temp;
ret = mt76_mcu_send_and_get_msg(&phy->dev->mt76, MCU_WM_UNI_CMD(THERMAL),
&req, sizeof(req), true, &skb);
@@ -3736,8 +3764,10 @@ int mt7996_mcu_get_temperature(struct mt7996_phy *phy)
return ret;
res = (void *)skb->data;
+ temp = le32_to_cpu(res->temperature);
+ dev_kfree_skb(skb);
- return le32_to_cpu(res->temperature);
+ return temp;
}
int mt7996_mcu_set_thermal_throttling(struct mt7996_phy *phy, u8 state)
@@ -4464,7 +4494,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
u8 band_idx;
} __packed req = {
.tag = cpu_to_le16(UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL),
- .len = cpu_to_le16(sizeof(req) + MT7996_SKU_RATE_NUM - 4),
+ .len = cpu_to_le16(sizeof(req) + MT7996_SKU_PATH_NUM - 4),
.power_ctrl_id = UNI_TXPOWER_POWER_LIMIT_TABLE_CTRL,
.power_limit_type = TX_POWER_LIMIT_TABLE_RATE,
.band_idx = phy->mt76->band_idx,
@@ -4479,7 +4509,7 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
mphy->txpower_cur = tx_power;
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL,
- sizeof(req) + MT7996_SKU_RATE_NUM);
+ sizeof(req) + MT7996_SKU_PATH_NUM);
if (!skb)
return -ENOMEM;
@@ -4503,6 +4533,22 @@ int mt7996_mcu_set_txpower_sku(struct mt7996_phy *phy)
/* eht */
skb_put_data(skb, &la.eht[0], sizeof(la.eht));
+ /* padding */
+ skb_put_zero(skb, MT7996_SKU_PATH_NUM - MT7996_SKU_RATE_NUM);
+
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_WM_UNI_CMD(TXPOWER), true);
}
+
+int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode)
+{
+ __le32 cp_mode;
+
+ if (mode < mt76_connac_lmac_mapping(IEEE80211_AC_BE) ||
+ mode > mt76_connac_lmac_mapping(IEEE80211_AC_VO))
+ return -EINVAL;
+
+ cp_mode = cpu_to_le32(mode);
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(CP_SUPPORT),
+ &cp_mode, sizeof(cp_mode), true);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
index 304e5fd14803..928a9663b49e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mmio.c
@@ -519,7 +519,7 @@ static void mt7996_irq_tasklet(struct tasklet_struct *t)
struct mt7996_dev *dev = from_tasklet(dev, t, mt76.irq_tasklet);
struct mtk_wed_device *wed = &dev->mt76.mmio.wed;
struct mtk_wed_device *wed_hif2 = &dev->mt76.mmio.wed_hif2;
- u32 i, intr, mask, intr1;
+ u32 i, intr, mask, intr1 = 0;
if (dev->hif2 && mtk_wed_device_active(wed_hif2)) {
mtk_wed_device_irq_set_mask(wed_hif2, 0);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
index 36d1f247d55a..177cfff31120 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7996/mt7996.h
@@ -50,6 +50,7 @@
#define MT7996_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
#define MT7996_SKU_RATE_NUM 417
+#define MT7996_SKU_PATH_NUM 494
#define MT7996_MAX_TWT_AGRT 16
#define MT7996_MAX_STA_TWT_AGRT 8
@@ -450,7 +451,7 @@ int mt7996_mcu_add_dev_info(struct mt7996_phy *phy,
int mt7996_mcu_add_bss_info(struct mt7996_phy *phy,
struct ieee80211_vif *vif, int enable);
int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable);
+ struct ieee80211_sta *sta, bool enable, bool newly);
int mt7996_mcu_add_tx_ba(struct mt7996_dev *dev,
struct ieee80211_ampdu_params *params,
bool add);
@@ -612,6 +613,7 @@ int mt7996_mcu_bcn_prot_enable(struct mt7996_dev *dev, struct ieee80211_vif *vif
int mt7996_mcu_wtbl_update_hdr_trans(struct mt7996_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
+int mt7996_mcu_cp_support(struct mt7996_dev *dev, u8 mode);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7996_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
diff --git a/drivers/net/wireless/mediatek/mt76/pci.c b/drivers/net/wireless/mediatek/mt76/pci.c
index 4c1c159fbb62..b5031ca7f73f 100644
--- a/drivers/net/wireless/mediatek/mt76/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/pci.c
@@ -45,3 +45,26 @@ void mt76_pci_disable_aspm(struct pci_dev *pdev)
aspm_conf);
}
EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm);
+
+bool mt76_pci_aspm_supported(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 aspm_conf, parent_aspm_conf = 0;
+ bool result = true;
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf);
+ aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+ if (parent) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+ &parent_aspm_conf);
+ parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC;
+ }
+
+ if (!aspm_conf && (!parent || !parent_aspm_conf)) {
+ /* aspm already disabled */
+ result = false;
+ }
+
+ return result;
+}
+EXPORT_SYMBOL_GPL(mt76_pci_aspm_supported);
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 3e88798df017..8e9576747052 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -499,7 +499,8 @@ static void mt76s_tx_status_data(struct mt76_worker *worker)
dev = container_of(sdio, struct mt76_dev, sdio);
while (true) {
- if (test_bit(MT76_REMOVED, &dev->phy.state))
+ if (test_bit(MT76_RESET, &dev->phy.state) ||
+ test_bit(MT76_REMOVED, &dev->phy.state))
break;
if (!dev->drv->tx_status_data(dev, &update))
@@ -514,13 +515,14 @@ static void mt76s_tx_status_data(struct mt76_worker *worker)
}
static int
-mt76s_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+mt76s_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
enum mt76_txq_id qid, struct sk_buff *skb,
struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{
struct mt76_tx_info tx_info = {
.skb = skb,
};
+ struct mt76_dev *dev = phy->dev;
int err, len = skb->len;
u16 idx = q->head;
@@ -548,10 +550,7 @@ static int
mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
struct sk_buff *skb, u32 tx_info)
{
- int ret = -ENOSPC, len = skb->len, pad;
-
- if (q->queued == q->ndesc)
- goto error;
+ int ret, len = skb->len, pad;
pad = round_up(skb->len, 4) - skb->len;
ret = mt76_skb_adjust_pad(skb, pad);
@@ -560,6 +559,12 @@ mt76s_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
spin_lock_bh(&q->lock);
+ if (q->queued == q->ndesc) {
+ ret = -ENOSPC;
+ spin_unlock_bh(&q->lock);
+ goto error;
+ }
+
q->entry[q->head].buf_sz = len;
q->entry[q->head].skb = skb;
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index 4644dace9bb3..ca4feccf38ca 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -53,7 +53,7 @@ void mt76_testmode_tx_pending(struct mt76_phy *phy)
q->queued < q->ndesc / 2) {
int ret;
- ret = dev->queue_ops->tx_queue_skb(dev, q, qid, skb_get(skb),
+ ret = dev->queue_ops->tx_queue_skb(phy, q, qid, skb_get(skb),
wcid, NULL);
if (ret < 0)
break;
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index 1809b03292c3..5cf6edee4d13 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -308,7 +308,7 @@ __mt76_tx_queue_skb(struct mt76_phy *phy, int qid, struct sk_buff *skb,
int idx;
non_aql = !info->tx_time_est;
- idx = dev->queue_ops->tx_queue_skb(dev, q, qid, skb, wcid, sta);
+ idx = dev->queue_ops->tx_queue_skb(phy, q, qid, skb, wcid, sta);
if (idx < 0 || !sta)
return idx;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 342c3aea549d..58ff06823389 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -850,13 +850,14 @@ mt76u_tx_setup_buffers(struct mt76_dev *dev, struct sk_buff *skb,
}
static int
-mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
+mt76u_tx_queue_skb(struct mt76_phy *phy, struct mt76_queue *q,
enum mt76_txq_id qid, struct sk_buff *skb,
struct mt76_wcid *wcid, struct ieee80211_sta *sta)
{
struct mt76_tx_info tx_info = {
.skb = skb,
};
+ struct mt76_dev *dev = phy->dev;
u16 idx = q->head;
int err;
diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c
index a7330576486b..7570c6ceecea 100644
--- a/drivers/net/wireless/mediatek/mt7601u/main.c
+++ b/drivers/net/wireless/mediatek/mt7601u/main.c
@@ -28,7 +28,7 @@ out:
return ret;
}
-static void mt7601u_stop(struct ieee80211_hw *hw)
+static void mt7601u_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mt7601u_dev *dev = hw->priv;
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 089102ed9ae5..eb37b228d54e 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -1617,23 +1617,6 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
return 0;
}
-static int wilc_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow)
-{
- struct wilc *wl = wiphy_priv(wiphy);
-
- if (!wow && wilc_wlan_get_num_conn_ifcs(wl))
- wl->suspend_event = true;
- else
- wl->suspend_event = false;
-
- return 0;
-}
-
-static int wilc_resume(struct wiphy *wiphy)
-{
- return 0;
-}
-
static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
{
struct wilc *wl = wiphy_priv(wiphy);
@@ -1739,8 +1722,6 @@ static const struct cfg80211_ops wilc_cfg80211_ops = {
.set_power_mgmt = set_power_mgmt,
.set_cqm_rssi_config = set_cqm_rssi_config,
- .suspend = wilc_suspend,
- .resume = wilc_resume,
.set_wakeup = wilc_set_wakeup,
.set_tx_power = set_tx_power,
.get_tx_power = get_tx_power,
@@ -1780,7 +1761,6 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
const struct wilc_hif_func *ops)
{
struct wilc *wl;
- struct wilc_vif *vif;
int ret, i;
wl = wilc_create_wiphy(dev);
@@ -1809,18 +1789,9 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
ret = -ENOMEM;
goto free_cfg;
}
- vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE,
- NL80211_IFTYPE_STATION, false);
- if (IS_ERR(vif)) {
- ret = PTR_ERR(vif);
- goto free_hq;
- }
return 0;
-free_hq:
- destroy_workqueue(wl->hif_workqueue);
-
free_cfg:
wilc_wlan_cfg_deinit(wl);
diff --git a/drivers/net/wireless/microchip/wilc1000/fw.h b/drivers/net/wireless/microchip/wilc1000/fw.h
index 5c5cac4aab02..7a930e89614c 100644
--- a/drivers/net/wireless/microchip/wilc1000/fw.h
+++ b/drivers/net/wireless/microchip/wilc1000/fw.h
@@ -13,6 +13,12 @@
#define WILC_MAX_RATES_SUPPORTED 12
#define WILC_MAX_NUM_PMKIDS 16
#define WILC_MAX_NUM_SCANNED_CH 14
+#define WILC_NVMEM_MAX_NUM_BANK 6
+#define WILC_NVMEM_BANK_BASE 0x30000000
+#define WILC_NVMEM_LOW_BANK_OFFSET 0x102c
+#define WILC_NVMEM_HIGH_BANK_OFFSET 0x1380
+#define WILC_NVMEM_IS_BANK_USED BIT(31)
+#define WILC_NVMEM_IS_BANK_INVALID BIT(30)
struct wilc_assoc_resp {
__le16 capab_info;
@@ -127,4 +133,11 @@ struct wilc_external_auth_param {
__le32 key_mgmt_suites;
__le16 status;
} __packed;
+
+static inline u32 get_bank_offset_from_bank_index(unsigned int i)
+{
+ return (((i) < 2) ? WILC_NVMEM_LOW_BANK_OFFSET + ((i) * 32) :
+ WILC_NVMEM_HIGH_BANK_OFFSET + ((i) - 2) * 16);
+}
+
#endif
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index f1085ccb7eed..3c48e1a57b24 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -382,7 +382,8 @@ wilc_parse_join_bss_param(struct cfg80211_bss *bss,
struct ieee80211_p2p_noa_attr noa_attr;
const struct cfg80211_bss_ies *ies;
struct wilc_join_bss_param *param;
- u8 rates_len = 0, ies_len;
+ u8 rates_len = 0;
+ int ies_len;
int ret;
param = kzalloc(sizeof(*param), GFP_KERNEL);
@@ -1293,7 +1294,7 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
return result;
}
-int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr)
+int wilc_set_mac_address(struct wilc_vif *vif, const u8 *mac_addr)
{
struct wid wid;
int result;
@@ -1301,7 +1302,7 @@ int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr)
wid.id = WID_MAC_ADDR;
wid.type = WID_STR;
wid.size = ETH_ALEN;
- wid.val = mac_addr;
+ wid.val = (u8 *)mac_addr;
result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
if (result)
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index 0d380586b1d9..96eeaf31d237 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -167,7 +167,7 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
u8 cipher_mode);
int wilc_set_pmkid_info(struct wilc_vif *vif, struct wilc_pmkid_attr *pmkid);
int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr);
-int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr);
+int wilc_set_mac_address(struct wilc_vif *vif, const u8 *mac_addr);
int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ies,
size_t ies_len);
int wilc_disconnect(struct wilc_vif *vif);
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 710e29bea560..9ecf3fb29b55 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -590,7 +590,6 @@ static int wilc_mac_open(struct net_device *ndev)
struct wilc *wl = vif->wilc;
int ret = 0;
struct mgmt_frame_regs mgmt_regs = {};
- u8 addr[ETH_ALEN] __aligned(2);
if (!wl || !wl->dev) {
netdev_err(ndev, "device not ready\n");
@@ -609,25 +608,19 @@ static int wilc_mac_open(struct net_device *ndev)
return ret;
}
- wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
- vif->idx);
-
- if (is_valid_ether_addr(ndev->dev_addr)) {
- ether_addr_copy(addr, ndev->dev_addr);
- wilc_set_mac_address(vif, addr);
- } else {
- wilc_get_mac_address(vif, addr);
- eth_hw_addr_set(ndev, addr);
- }
netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr);
-
- if (!is_valid_ether_addr(ndev->dev_addr)) {
- netdev_err(ndev, "Wrong MAC address\n");
+ ret = wilc_set_mac_address(vif, ndev->dev_addr);
+ if (ret) {
+ netdev_err(ndev, "Failed to enforce MAC address in chip");
wilc_deinit_host_int(ndev);
- wilc_wlan_deinitialize(ndev);
- return -EINVAL;
+ if (!wl->open_ifcs)
+ wilc_wlan_deinitialize(ndev);
+ return ret;
}
+ wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
+ vif->idx);
+
mgmt_regs.interface_stypes = vif->mgmt_reg_stypes;
/* so we detect a change */
vif->mgmt_reg_stypes = 0;
@@ -681,7 +674,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p)
}
srcu_read_unlock(&wilc->srcu, srcu_idx);
- result = wilc_set_mac_address(vif, (u8 *)addr->sa_data);
+ result = wilc_set_mac_address(vif, addr->sa_data);
if (result)
return result;
@@ -948,6 +941,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
int vif_type, enum nl80211_iftype type,
bool rtnl_locked)
{
+ u8 mac_address[ETH_ALEN];
struct net_device *ndev;
struct wilc_vif *vif;
int ret;
@@ -972,36 +966,50 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
vif->priv.wdev.iftype = type;
vif->priv.dev = ndev;
- if (rtnl_locked)
- ret = cfg80211_register_netdevice(ndev);
- else
- ret = register_netdev(ndev);
-
- if (ret) {
- ret = -EFAULT;
- goto error;
- }
-
ndev->needs_free_netdev = true;
vif->iftype = vif_type;
vif->idx = wilc_get_available_idx(wl);
vif->mac_opened = 0;
+
+ memcpy(mac_address, wl->nv_mac_address, ETH_ALEN);
+ /* WILC firmware uses locally administered MAC address for the
+ * second virtual interface (bit 1 of first byte set), but
+ * since it is possibly not loaded/running yet, reproduce this behavior
+ * in the driver during interface creation.
+ */
+ if (vif->idx)
+ mac_address[0] |= 0x2;
+
+ eth_hw_addr_set(vif->ndev, mac_address);
+
mutex_lock(&wl->vif_mutex);
list_add_tail_rcu(&vif->list, &wl->vif_list);
wl->vif_num += 1;
mutex_unlock(&wl->vif_mutex);
synchronize_srcu(&wl->srcu);
- return vif;
-
-error:
if (rtnl_locked)
- cfg80211_unregister_netdevice(ndev);
+ ret = cfg80211_register_netdevice(ndev);
else
- unregister_netdev(ndev);
+ ret = register_netdev(ndev);
+
+ if (ret) {
+ ret = -EFAULT;
+ goto error_remove_vif;
+ }
+
+ return vif;
+
+error_remove_vif:
+ mutex_lock(&wl->vif_mutex);
+ list_del_rcu(&vif->list);
+ wl->vif_num -= 1;
+ mutex_unlock(&wl->vif_mutex);
+ synchronize_srcu(&wl->srcu);
free_netdev(ndev);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(wilc_netdev_ifc_init);
MODULE_DESCRIPTION("Atmel WILC1000 core wireless driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index 5937d6d45695..95bc8b8fe65a 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -14,6 +14,7 @@
#include <linux/if_arp.h>
#include <linux/gpio/consumer.h>
#include <linux/rculist.h>
+#include <uapi/linux/if_ether.h>
#include "hif.h"
#include "wlan.h"
@@ -220,6 +221,13 @@ struct wilc {
/* protect vif list */
struct mutex vif_mutex;
+ /* Sleepable RCU struct to manipulate vif list. Sleepable version is
+ * needed over the classic RCU version because the driver's current
+ * design involves some sleeping code while manipulating a vif
+ * retrieved from vif list (so in a SRCU critical section), like:
+ * - sending commands to the chip, using info from retrieved vif
+ * - registering a new monitoring net device
+ */
struct srcu_struct srcu;
u8 open_ifcs;
@@ -264,7 +272,6 @@ struct wilc {
const struct firmware *firmware;
struct device *dev;
- bool suspend_event;
struct workqueue_struct *hif_workqueue;
struct wilc_cfg cfg;
@@ -279,6 +286,7 @@ struct wilc {
struct ieee80211_rate bitrates[ARRAY_SIZE(wilc_bitrates)];
struct ieee80211_supported_band band;
u32 cipher_suites[ARRAY_SIZE(wilc_cipher_suites)];
+ u8 nv_mac_address[ETH_ALEN];
};
struct wilc_wfi_mon_priv {
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index d6d394693090..0043f7a0fdf9 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -24,6 +24,9 @@ MODULE_DEVICE_TABLE(sdio, wilc_sdio_ids);
#define WILC_SDIO_BLOCK_SIZE 512
+static int wilc_sdio_init(struct wilc *wilc, bool resume);
+static int wilc_sdio_deinit(struct wilc *wilc);
+
struct wilc_sdio {
bool irq_gpio;
u32 block_size;
@@ -136,9 +139,11 @@ out:
static int wilc_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
+ struct wilc_sdio *sdio_priv;
+ struct wilc_vif *vif;
struct wilc *wilc;
int ret;
- struct wilc_sdio *sdio_priv;
+
sdio_priv = kzalloc(sizeof(*sdio_priv), GFP_KERNEL);
if (!sdio_priv)
@@ -176,9 +181,28 @@ static int wilc_sdio_probe(struct sdio_func *func,
}
clk_prepare_enable(wilc->rtc_clk);
+ wilc_sdio_init(wilc, false);
+
+ ret = wilc_load_mac_from_nv(wilc);
+ if (ret) {
+ pr_err("Can not retrieve MAC address from chip\n");
+ goto clk_disable_unprepare;
+ }
+
+ wilc_sdio_deinit(wilc);
+
+ vif = wilc_netdev_ifc_init(wilc, "wlan%d", WILC_STATION_MODE,
+ NL80211_IFTYPE_STATION, false);
+ if (IS_ERR(vif)) {
+ ret = PTR_ERR(vif);
+ goto clk_disable_unprepare;
+ }
+
dev_info(&func->dev, "Driver Initializing success\n");
return 0;
+clk_disable_unprepare:
+ clk_disable_unprepare(wilc->rtc_clk);
dispose_irq:
irq_dispose_mapping(wilc->dev_irq_num);
wilc_netdev_cleanup(wilc);
@@ -225,33 +249,6 @@ static bool wilc_sdio_is_init(struct wilc *wilc)
return sdio_priv->isinit;
}
-static int wilc_sdio_suspend(struct device *dev)
-{
- struct sdio_func *func = dev_to_sdio_func(dev);
- struct wilc *wilc = sdio_get_drvdata(func);
- int ret;
-
- dev_info(dev, "sdio suspend\n");
- chip_wakeup(wilc);
-
- if (!IS_ERR(wilc->rtc_clk))
- clk_disable_unprepare(wilc->rtc_clk);
-
- if (wilc->suspend_event) {
- host_sleep_notify(wilc);
- chip_allow_sleep(wilc);
- }
-
- ret = wilc_sdio_reset(wilc);
- if (ret) {
- dev_err(&func->dev, "Fail reset sdio\n");
- return ret;
- }
- sdio_claim_host(func);
-
- return 0;
-}
-
static int wilc_sdio_enable_interrupt(struct wilc *dev)
{
struct sdio_func *func = container_of(dev->dev, struct sdio_func, dev);
@@ -617,7 +614,52 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
static int wilc_sdio_deinit(struct wilc *wilc)
{
+ struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
+ struct sdio_cmd52 cmd;
+ int ret;
+
+ cmd.read_write = 1;
+ cmd.function = 0;
+ cmd.raw = 1;
+
+ /* Disable all functions interrupts */
+ cmd.address = SDIO_CCCR_IENx;
+ cmd.data = 0;
+ ret = wilc_sdio_cmd52(wilc, &cmd);
+ if (ret) {
+ dev_err(&func->dev, "Failed to disable functions interrupts\n");
+ return ret;
+ }
+
+ /* Disable all functions */
+ cmd.address = SDIO_CCCR_IOEx;
+ cmd.data = 0;
+ ret = wilc_sdio_cmd52(wilc, &cmd);
+ if (ret) {
+ dev_err(&func->dev,
+ "Failed to reset all functions\n");
+ return ret;
+ }
+
+ /* Disable CSA */
+ cmd.read_write = 0;
+ cmd.address = SDIO_FBR_BASE(1);
+ ret = wilc_sdio_cmd52(wilc, &cmd);
+ if (ret) {
+ dev_err(&func->dev,
+ "Failed to read CSA for function 1\n");
+ return ret;
+ }
+ cmd.read_write = 1;
+ cmd.address = SDIO_FBR_BASE(1);
+ cmd.data &= ~SDIO_FBR_ENABLE_CSA;
+ ret = wilc_sdio_cmd52(wilc, &cmd);
+ if (ret) {
+ dev_err(&func->dev,
+ "Failed to disable CSA for function 1\n");
+ return ret;
+ }
sdio_priv->isinit = false;
return 0;
@@ -838,27 +880,12 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint)
{
struct sdio_func *func = dev_to_sdio_func(wilc->dev);
struct wilc_sdio *sdio_priv = wilc->bus_data;
- u32 reg;
if (nint > MAX_NUM_INT) {
dev_err(&func->dev, "Too many interrupts (%d)...\n", nint);
return -EINVAL;
}
- /**
- * Disable power sequencer
- **/
- if (wilc_sdio_read_reg(wilc, WILC_MISC, &reg)) {
- dev_err(&func->dev, "Failed read misc reg...\n");
- return -EINVAL;
- }
-
- reg &= ~BIT(8);
- if (wilc_sdio_write_reg(wilc, WILC_MISC, reg)) {
- dev_err(&func->dev, "Failed write misc reg...\n");
- return -EINVAL;
- }
-
if (sdio_priv->irq_gpio) {
u32 reg;
int ret, i;
@@ -942,20 +969,40 @@ static const struct wilc_hif_func wilc_hif_sdio = {
.hif_is_init = wilc_sdio_is_init,
};
+static int wilc_sdio_suspend(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct wilc *wilc = sdio_get_drvdata(func);
+ int ret;
+
+ dev_info(dev, "sdio suspend\n");
+
+ if (!IS_ERR(wilc->rtc_clk))
+ clk_disable_unprepare(wilc->rtc_clk);
+
+ host_sleep_notify(wilc);
+
+ wilc_sdio_disable_interrupt(wilc);
+
+ ret = wilc_sdio_reset(wilc);
+ if (ret) {
+ dev_err(&func->dev, "Fail reset sdio\n");
+ return ret;
+ }
+
+ return 0;
+}
+
static int wilc_sdio_resume(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
struct wilc *wilc = sdio_get_drvdata(func);
dev_info(dev, "sdio resume\n");
- sdio_release_host(func);
- chip_wakeup(wilc);
wilc_sdio_init(wilc, true);
+ wilc_sdio_enable_interrupt(wilc);
- if (wilc->suspend_event)
- host_wakeup_notify(wilc);
-
- chip_allow_sleep(wilc);
+ host_wakeup_notify(wilc);
return 0;
}
@@ -981,8 +1028,7 @@ static struct sdio_driver wilc_sdio_driver = {
.of_match_table = wilc_of_match,
}
};
-module_driver(wilc_sdio_driver,
- sdio_register_driver,
- sdio_unregister_driver);
+module_sdio_driver(wilc_sdio_driver);
+
MODULE_DESCRIPTION("Atmel WILC1000 SDIO wireless driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index 61c3572ce321..5ff940c53ad9 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -206,9 +206,10 @@ static void wilc_wlan_power(struct wilc *wilc, bool on)
static int wilc_bus_probe(struct spi_device *spi)
{
- int ret;
- struct wilc *wilc;
struct wilc_spi *spi_priv;
+ struct wilc_vif *vif;
+ struct wilc *wilc;
+ int ret;
spi_priv = kzalloc(sizeof(*spi_priv), GFP_KERNEL);
if (!spi_priv)
@@ -249,7 +250,19 @@ static int wilc_bus_probe(struct spi_device *spi)
if (ret)
goto power_down;
+ ret = wilc_load_mac_from_nv(wilc);
+ if (ret) {
+ pr_err("Can not retrieve MAC address from chip\n");
+ goto power_down;
+ }
+
wilc_wlan_power(wilc, false);
+ vif = wilc_netdev_ifc_init(wilc, "wlan%d", WILC_STATION_MODE,
+ NL80211_IFTYPE_STATION, false);
+ if (IS_ERR(vif)) {
+ ret = PTR_ERR(vif);
+ goto power_down;
+ }
return 0;
power_down:
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index a9e872a7b2c3..533939e71534 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -678,17 +678,17 @@ EXPORT_SYMBOL_GPL(chip_wakeup);
void host_wakeup_notify(struct wilc *wilc)
{
- acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_INTERRUPT_2, 1);
- release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
}
EXPORT_SYMBOL_GPL(host_wakeup_notify);
void host_sleep_notify(struct wilc *wilc)
{
- acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_INTERRUPT_1, 1);
- release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
}
EXPORT_SYMBOL_GPL(host_sleep_notify);
@@ -1473,6 +1473,55 @@ u32 wilc_get_chipid(struct wilc *wilc, bool update)
return wilc->chipid;
}
+int wilc_load_mac_from_nv(struct wilc *wl)
+{
+ int ret = -EINVAL;
+ unsigned int i;
+
+ acquire_bus(wl, WILC_BUS_ACQUIRE_AND_WAKEUP);
+
+ for (i = 0; i < WILC_NVMEM_MAX_NUM_BANK; i++) {
+ int bank_offset = get_bank_offset_from_bank_index(i);
+ u32 reg1, reg2;
+ u8 invalid;
+ u8 used;
+
+ ret = wl->hif_func->hif_read_reg(wl,
+ WILC_NVMEM_BANK_BASE + bank_offset,
+ &reg1);
+ if (ret) {
+ pr_err("Can not read address %d lower part", i);
+ break;
+ }
+ ret = wl->hif_func->hif_read_reg(wl,
+ WILC_NVMEM_BANK_BASE + bank_offset + 4,
+ &reg2);
+ if (ret) {
+ pr_err("Can not read address %d upper part", i);
+ break;
+ }
+
+ used = FIELD_GET(WILC_NVMEM_IS_BANK_USED, reg1);
+ invalid = FIELD_GET(WILC_NVMEM_IS_BANK_INVALID, reg1);
+ if (!used || invalid)
+ continue;
+
+ wl->nv_mac_address[0] = FIELD_GET(GENMASK(23, 16), reg1);
+ wl->nv_mac_address[1] = FIELD_GET(GENMASK(15, 8), reg1);
+ wl->nv_mac_address[2] = FIELD_GET(GENMASK(7, 0), reg1);
+ wl->nv_mac_address[3] = FIELD_GET(GENMASK(31, 24), reg2);
+ wl->nv_mac_address[4] = FIELD_GET(GENMASK(23, 16), reg2);
+ wl->nv_mac_address[5] = FIELD_GET(GENMASK(15, 8), reg2);
+
+ ret = 0;
+ break;
+ }
+
+ release_bus(wl, WILC_BUS_RELEASE_ALLOW_SLEEP);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(wilc_load_mac_from_nv);
+
int wilc_wlan_init(struct net_device *dev)
{
int ret = 0;
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index 54643d8fef04..dd2fb3c2f06a 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -56,7 +56,6 @@
#define WILC_HOST_RX_CTRL (WILC_PERIPH_REG_BASE + 0x80)
#define WILC_HOST_RX_EXTRA_SIZE (WILC_PERIPH_REG_BASE + 0x84)
#define WILC_HOST_TX_CTRL_1 (WILC_PERIPH_REG_BASE + 0x88)
-#define WILC_MISC (WILC_PERIPH_REG_BASE + 0x428)
#define WILC_INTR_REG_BASE (WILC_PERIPH_REG_BASE + 0xa00)
#define WILC_INTR_ENABLE WILC_INTR_REG_BASE
#define WILC_INTR2_ENABLE (WILC_INTR_REG_BASE + 4)
@@ -445,4 +444,5 @@ int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
u32 count);
int wilc_wlan_init(struct net_device *dev);
u32 wilc_get_chipid(struct wilc *wilc, bool update);
+int wilc_load_mac_from_nv(struct wilc *wilc);
#endif
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c
index 641f847d47ab..eae93efa6150 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.c
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.c
@@ -111,7 +111,7 @@ int plfxlc_op_start(struct ieee80211_hw *hw)
return 0;
}
-void plfxlc_op_stop(struct ieee80211_hw *hw)
+void plfxlc_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct plfxlc_mac *mac = plfxlc_hw_mac(hw);
diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.h b/drivers/net/wireless/purelifi/plfxlc/mac.h
index 49b92413729b..9384acddcf26 100644
--- a/drivers/net/wireless/purelifi/plfxlc/mac.h
+++ b/drivers/net/wireless/purelifi/plfxlc/mac.h
@@ -178,7 +178,7 @@ int plfxlc_mac_rx(struct ieee80211_hw *hw, const u8 *buffer,
void plfxlc_mac_tx_failed(struct urb *urb);
void plfxlc_mac_tx_to_dev(struct sk_buff *skb, int error);
int plfxlc_op_start(struct ieee80211_hw *hw);
-void plfxlc_op_stop(struct ieee80211_hw *hw);
+void plfxlc_op_stop(struct ieee80211_hw *hw, bool suspend);
int plfxlc_restore_settings(struct plfxlc_mac *mac);
#endif /* PLFXLC_MAC_H */
diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c
index 311676c1ece0..15334940287d 100644
--- a/drivers/net/wireless/purelifi/plfxlc/usb.c
+++ b/drivers/net/wireless/purelifi/plfxlc/usb.c
@@ -408,7 +408,7 @@ void plfxlc_usb_init(struct plfxlc_usb *usb, struct ieee80211_hw *hw,
void plfxlc_usb_release(struct plfxlc_usb *usb)
{
- plfxlc_op_stop(plfxlc_usb_to_hw(usb));
+ plfxlc_op_stop(plfxlc_usb_to_hw(usb), false);
plfxlc_usb_disable_tx(usb);
plfxlc_usb_disable_rx(usb);
usb_set_intfdata(usb->intf, NULL);
@@ -761,7 +761,7 @@ static void plfxlc_usb_resume(struct plfxlc_usb *usb)
static void plfxlc_usb_stop(struct plfxlc_usb *usb)
{
- plfxlc_op_stop(plfxlc_usb_to_hw(usb));
+ plfxlc_op_stop(plfxlc_usb_to_hw(usb), false);
plfxlc_usb_disable_tx(usb);
plfxlc_usb_disable_rx(usb);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/bus.h b/drivers/net/wireless/quantenna/qtnfmac/bus.h
index 3334c45aac13..7f8646e77ee0 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/bus.h
+++ b/drivers/net/wireless/quantenna/qtnfmac/bus.h
@@ -59,7 +59,7 @@ struct qtnf_bus {
struct qtnf_qlink_transport trans;
struct qtnf_hw_info hw_info;
struct napi_struct mux_napi;
- struct net_device mux_dev;
+ struct net_device *mux_dev;
struct workqueue_struct *workqueue;
struct workqueue_struct *hprio_workqueue;
struct work_struct fw_work;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index 677bac835330..825b05dd3271 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -196,27 +196,12 @@ static int qtnf_netdev_port_parent_id(struct net_device *ndev,
return 0;
}
-static int qtnf_netdev_alloc_pcpu_stats(struct net_device *dev)
-{
- dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
-
- return dev->tstats ? 0 : -ENOMEM;
-}
-
-static void qtnf_netdev_free_pcpu_stats(struct net_device *dev)
-{
- free_percpu(dev->tstats);
-}
-
/* Network device ops handlers */
const struct net_device_ops qtnf_netdev_ops = {
- .ndo_init = qtnf_netdev_alloc_pcpu_stats,
- .ndo_uninit = qtnf_netdev_free_pcpu_stats,
.ndo_open = qtnf_netdev_open,
.ndo_stop = qtnf_netdev_close,
.ndo_start_xmit = qtnf_netdev_hard_start_xmit,
.ndo_tx_timeout = qtnf_netdev_tx_timeout,
- .ndo_get_stats64 = dev_get_tstats64,
.ndo_set_mac_address = qtnf_netdev_set_mac_address,
.ndo_get_port_parent_id = qtnf_netdev_port_parent_id,
};
@@ -483,6 +468,7 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT;
dev->tx_queue_len = 100;
dev->ethtool_ops = &qtnf_ethtool_ops;
+ dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
if (qtnf_hwcap_is_set(&mac->bus->hw_info, QLINK_HW_CAPAB_HW_BRIDGE))
dev->needed_tailroom = sizeof(struct qtnf_frame_meta_info);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index 9ad4c120fa28..f66eb43094d4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -372,7 +372,12 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto error;
}
- init_dummy_netdev(&bus->mux_dev);
+ bus->mux_dev = alloc_netdev_dummy(0);
+ if (!bus->mux_dev) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
qtnf_pcie_init_irq(pcie_priv, use_msi);
pcie_priv->sysctl_bar = sysctl_bar;
pcie_priv->dmareg_bar = dmareg_bar;
@@ -381,11 +386,13 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ret = pcie_priv->probe_cb(bus, tx_bd_size_param, rx_bd_size_param);
if (ret)
- goto error;
+ goto error_free;
qtnf_pcie_bringup_fw_async(bus);
return 0;
+error_free:
+ free_netdev(bus->mux_dev);
error:
destroy_workqueue(pcie_priv->workqueue);
pci_set_drvdata(pdev, NULL);
@@ -417,6 +424,7 @@ static void qtnf_pcie_remove(struct pci_dev *dev)
netif_napi_del(&bus->mux_napi);
destroy_workqueue(priv->workqueue);
tasklet_kill(&priv->reclaim_tq);
+ free_netdev(bus->mux_dev);
qtnf_pcie_free_shm_ipc(priv);
qtnf_debugfs_remove(bus);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
index 8c23a77d1671..c1a53e1ba3be 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c
@@ -761,12 +761,12 @@ static int qtnf_pcie_pearl_rx_poll(struct napi_struct *napi, int budget)
napi_gro_receive(napi, skb);
} else {
pr_debug("drop untagged skb\n");
- bus->mux_dev.stats.rx_dropped++;
+ bus->mux_dev->stats.rx_dropped++;
dev_kfree_skb_any(skb);
}
} else {
if (skb) {
- bus->mux_dev.stats.rx_dropped++;
+ bus->mux_dev->stats.rx_dropped++;
dev_kfree_skb_any(skb);
}
}
@@ -1146,7 +1146,7 @@ static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size,
}
tasklet_setup(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn);
- netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi,
+ netif_napi_add_weight(bus->mux_dev, &bus->mux_napi,
qtnf_pcie_pearl_rx_poll, 10);
ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int;
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
index d83362578374..ef5c069542d4 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c
@@ -667,12 +667,12 @@ static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget)
netif_receive_skb(skb);
} else {
pr_debug("drop untagged skb\n");
- bus->mux_dev.stats.rx_dropped++;
+ bus->mux_dev->stats.rx_dropped++;
dev_kfree_skb_any(skb);
}
} else {
if (skb) {
- bus->mux_dev.stats.rx_dropped++;
+ bus->mux_dev->stats.rx_dropped++;
dev_kfree_skb_any(skb);
}
}
@@ -1159,7 +1159,7 @@ static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus,
}
tasklet_setup(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn);
- netif_napi_add_weight(&bus->mux_dev, &bus->mux_napi,
+ netif_napi_add_weight(bus->mux_dev, &bus->mux_napi,
qtnf_topaz_rx_poll, 10);
ipc_int.fn = qtnf_topaz_ipc_gen_ep_int;
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 82af01448a0a..dfb4bb370f01 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -335,16 +335,6 @@ struct link {
struct delayed_work watchdog_work;
unsigned int watchdog_interval;
unsigned int watchdog;
-
- /*
- * Work structure for scheduling periodic AGC adjustments.
- */
- struct delayed_work agc_work;
-
- /*
- * Work structure for scheduling periodic VCO calibration.
- */
- struct delayed_work vco_work;
};
enum rt2x00_delayed_flags {
@@ -1460,7 +1450,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb);
int rt2x00mac_start(struct ieee80211_hw *hw);
-void rt2x00mac_stop(struct ieee80211_hw *hw);
+void rt2x00mac_stop(struct ieee80211_hw *hw, bool suspend);
void rt2x00mac_reconfig_complete(struct ieee80211_hw *hw,
enum ieee80211_reconfig_type reconfig_type);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 75fda72c14ca..451632488805 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -178,7 +178,7 @@ int rt2x00mac_start(struct ieee80211_hw *hw)
}
EXPORT_SYMBOL_GPL(rt2x00mac_start);
-void rt2x00mac_stop(struct ieee80211_hw *hw)
+void rt2x00mac_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
index 565a9a114134..1044105c2557 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/Makefile
@@ -3,4 +3,4 @@ rtl818x_pci-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o rtl8225se.o
obj-$(CONFIG_RTL8180) += rtl818x_pci.o
-ccflags-y += -I $(srctree)/$(src)/..
+ccflags-y += -I $(src)/..
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
index 77b6cb7e1f6b..ded8d4d59289 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
@@ -1249,7 +1249,7 @@ static int rtl8180_start(struct ieee80211_hw *dev)
return ret;
}
-static void rtl8180_stop(struct ieee80211_hw *dev)
+static void rtl8180_stop(struct ieee80211_hw *dev, bool suspend)
{
struct rtl8180_priv *priv = dev->priv;
u8 reg;
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
index 0bf64dfb233a..3deded2c05b6 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/Makefile
@@ -3,4 +3,4 @@ rtl8187-objs := dev.o rtl8225.o leds.o rfkill.o
obj-$(CONFIG_RTL8187) += rtl8187.o
-ccflags-y += -I $(srctree)/$(src)/..
+ccflags-y += -I $(src)/..
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
index 78d99afa373d..220ac5bdf279 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
@@ -1019,7 +1019,7 @@ rtl8187_start_exit:
return ret;
}
-static void rtl8187_stop(struct ieee80211_hw *dev)
+static void rtl8187_stop(struct ieee80211_hw *dev, bool suspend)
{
struct rtl8187_priv *priv = dev->priv;
struct sk_buff *skb;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c b/drivers/net/wireless/realtek/rtl8xxxu/8188e.c
index afe9cc1b49dc..3d04df0f5bf4 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8188e.c
@@ -13,24 +13,8 @@
* additional 8xxx chips like the 8192cu, 8188cus, etc.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static const struct rtl8xxxu_reg8val rtl8188e_mac_init_table[] = {
{0x026, 0x41}, {0x027, 0x35}, {0x040, 0x00}, {0x421, 0x0f},
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c
index 464216d007ce..3abf14d7044f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8188f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c
@@ -11,24 +11,8 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static const struct rtl8xxxu_reg8val rtl8188f_mac_init_table[] = {
{0x024, 0xDF}, {0x025, 0x07}, {0x02B, 0x1C}, {0x283, 0x20},
@@ -713,9 +697,14 @@ static void rtl8188fu_init_statistics(struct rtl8xxxu_priv *priv)
rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32);
}
+#define TX_POWER_INDEX_MAX 0x3F
+#define TX_POWER_INDEX_DEFAULT_CCK 0x22
+#define TX_POWER_INDEX_DEFAULT_HT40 0x27
+
static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv)
{
struct rtl8188fu_efuse *efuse = &priv->efuse_wifi.efuse8188fu;
+ int i;
if (efuse->rtl_id != cpu_to_le16(0x8129))
return -EINVAL;
@@ -729,6 +718,16 @@ static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv)
efuse->tx_power_index_A.ht40_base,
sizeof(efuse->tx_power_index_A.ht40_base));
+ for (i = 0; i < ARRAY_SIZE(priv->cck_tx_power_index_A); i++) {
+ if (priv->cck_tx_power_index_A[i] > TX_POWER_INDEX_MAX)
+ priv->cck_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_CCK;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(priv->ht40_1s_tx_power_index_A); i++) {
+ if (priv->ht40_1s_tx_power_index_A[i] > TX_POWER_INDEX_MAX)
+ priv->ht40_1s_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_HT40;
+ }
+
priv->ofdm_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.a;
priv->ht20_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.b;
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c b/drivers/net/wireless/realtek/rtl8xxxu/8192c.c
index 3ee7d8f87da6..0abb1b092bc2 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192c.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8192c.c
@@ -13,24 +13,8 @@
* additional 8xxx chips like the 8192cu, 8188cus, etc.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
#ifdef CONFIG_RTL8XXXU_UNTESTED
static struct rtl8xxxu_power_base rtl8192c_power_base = {
@@ -77,6 +61,32 @@ static struct rtl8xxxu_power_base rtl8188r_power_base = {
.reg_0868 = 0x00020204,
};
+static const struct rtl8xxxu_reg8val rtl8192cu_mac_init_table[] = {
+ {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
+ {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+ {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
+ {0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05},
+ {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01},
+ {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f},
+ {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72},
+ {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08},
+ {0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
+ {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+ {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+ {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+ {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+ {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+ {0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16},
+ {0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00},
+ {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02},
+ {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
+ {0x652, 0x20}, {0x652, 0x20}, {0x63c, 0x08}, {0x63d, 0x08},
+ {0x63e, 0x0c}, {0x63f, 0x0c}, {0x66e, 0x05}, {0x700, 0x21},
+ {0x701, 0x43}, {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21},
+ {0x709, 0x43}, {0x70a, 0x65}, {0x70b, 0x87},
+ {0xffff, 0xff},
+};
+
static const struct rtl8xxxu_rfregval rtl8192cu_radioa_2t_init_table[] = {
{0x00, 0x00030159}, {0x01, 0x00031284},
{0x02, 0x00098000}, {0x03, 0x00018c63},
@@ -583,6 +593,26 @@ static int rtl8192cu_power_on(struct rtl8xxxu_priv *priv)
return 0;
}
+static int rtl8192cu_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct rtl8xxxu_priv *priv = container_of(led_cdev,
+ struct rtl8xxxu_priv,
+ led_cdev);
+ u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG0);
+
+ if (brightness == LED_OFF)
+ ledcfg = LEDCFG2_SW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE;
+ else if (brightness == LED_ON)
+ ledcfg = LEDCFG2_SW_LED_CONTROL;
+ else if (brightness == RTL8XXXU_HW_LED_CONTROL)
+ ledcfg = LEDCFG2_HW_LED_CONTROL | LEDCFG2_HW_LED_ENABLE;
+
+ rtl8xxxu_write8(priv, REG_LEDCFG0, ledcfg);
+
+ return 0;
+}
+
struct rtl8xxxu_fileops rtl8192cu_fops = {
.identify_chip = rtl8192cu_identify_chip,
.parse_efuse = rtl8192cu_parse_efuse,
@@ -609,6 +639,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.report_rssi = rtl8xxxu_gen1_report_rssi,
.fill_txdesc = rtl8xxxu_fill_txdesc_v1,
.cck_rssi = rtl8723a_cck_rssi,
+ .led_classdev_brightness_set = rtl8192cu_led_brightness_set,
.writeN_block_size = 128,
.rx_agg_buf_size = 16000,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc32),
@@ -621,7 +652,7 @@ struct rtl8xxxu_fileops rtl8192cu_fops = {
.trxff_boundary = 0x27ff,
.pbp_rx = PBP_PAGE_SIZE_128,
.pbp_tx = PBP_PAGE_SIZE_128,
- .mactable = rtl8xxxu_gen1_mac_init_table,
+ .mactable = rtl8192cu_mac_init_table,
.total_page_num = TX_TOTAL_PAGE_NUM,
.page_num_hi = TX_PAGE_NUM_HI_PQ,
.page_num_lo = TX_PAGE_NUM_LO_PQ,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c b/drivers/net/wireless/realtek/rtl8xxxu/8192e.c
index 63b73ace27ec..8e123bbfc665 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192e.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8192e.c
@@ -13,24 +13,8 @@
* additional 8xxx chips like the 8192cu, 8188cus, etc.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static const struct rtl8xxxu_reg8val rtl8192e_mac_init_table[] = {
{0x011, 0xeb}, {0x012, 0x07}, {0x014, 0x75}, {0x303, 0xa7},
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c b/drivers/net/wireless/realtek/rtl8xxxu/8192f.c
index 21e4204769d0..cd2156b7a57a 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8192f.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8192f.c
@@ -11,24 +11,8 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static const struct rtl8xxxu_reg8val rtl8192f_mac_init_table[] = {
{0x420, 0x00}, {0x422, 0x78}, {0x428, 0x0a}, {0x429, 0x10},
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c b/drivers/net/wireless/realtek/rtl8xxxu/8710b.c
index 46d57510e9fc..11c63c320eae 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8710b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8710b.c
@@ -11,24 +11,8 @@
* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static const struct rtl8xxxu_reg8val rtl8710b_mac_init_table[] = {
{0x421, 0x0F}, {0x428, 0x0A}, {0x429, 0x10}, {0x430, 0x00},
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c b/drivers/net/wireless/realtek/rtl8xxxu/8723a.c
index ad1bb9377ca2..ecbc324e4609 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723a.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8723a.c
@@ -13,24 +13,8 @@
* additional 8xxx chips like the 8192cu, 8188cus, etc.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static struct rtl8xxxu_power_base rtl8723a_power_base = {
.reg_0e00 = 0x0a0c0c0c,
@@ -54,6 +38,31 @@ static struct rtl8xxxu_power_base rtl8723a_power_base = {
.reg_0868 = 0x02040608,
};
+static const struct rtl8xxxu_reg8val rtl8723au_mac_init_table[] = {
+ {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
+ {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
+ {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
+ {0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05},
+ {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01},
+ {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f},
+ {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72},
+ {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08},
+ {0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
+ {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
+ {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
+ {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
+ {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
+ {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
+ {0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16},
+ {0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00},
+ {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02},
+ {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
+ {0x652, 0x20}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
+ {0x63f, 0x0e}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
+ {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
+ {0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff},
+};
+
static const struct rtl8xxxu_rfregval rtl8723au_radioa_1t_init_table[] = {
{0x00, 0x00030159}, {0x01, 0x00031284},
{0x02, 0x00098000}, {0x03, 0x00039c63},
@@ -518,7 +527,7 @@ struct rtl8xxxu_fileops rtl8723au_fops = {
.trxff_boundary = 0x27ff,
.pbp_rx = PBP_PAGE_SIZE_128,
.pbp_tx = PBP_PAGE_SIZE_128,
- .mactable = rtl8xxxu_gen1_mac_init_table,
+ .mactable = rtl8723au_mac_init_table,
.total_page_num = TX_TOTAL_PAGE_NUM,
.page_num_hi = TX_PAGE_NUM_HI_PQ,
.page_num_lo = TX_PAGE_NUM_LO_PQ,
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c b/drivers/net/wireless/realtek/rtl8xxxu/8723b.c
index 9640c841d20a..cc2e60b06f64 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_8723b.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/8723b.c
@@ -13,24 +13,8 @@
* additional 8xxx chips like the 8192cu, 8188cus, etc.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
-#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
static const struct rtl8xxxu_reg8val rtl8723b_mac_init_table[] = {
{0x02f, 0x30}, {0x035, 0x00}, {0x039, 0x08}, {0x04e, 0xe0},
@@ -1701,6 +1685,28 @@ static s8 rtl8723b_cck_rssi(struct rtl8xxxu_priv *priv, struct rtl8723au_phy_sta
return rx_pwr_all;
}
+static int rtl8723bu_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct rtl8xxxu_priv *priv = container_of(led_cdev,
+ struct rtl8xxxu_priv,
+ led_cdev);
+ u8 ledcfg = rtl8xxxu_read8(priv, REG_LEDCFG2);
+
+ ledcfg &= LEDCFG2_DPDT_SELECT;
+
+ if (brightness == LED_OFF)
+ ledcfg |= LEDCFG2_SW_LED_CONTROL | LEDCFG2_SW_LED_DISABLE;
+ else if (brightness == LED_ON)
+ ledcfg |= LEDCFG2_SW_LED_CONTROL;
+ else if (brightness == RTL8XXXU_HW_LED_CONTROL)
+ ledcfg |= LEDCFG2_HW_LED_CONTROL | LEDCFG2_HW_LED_ENABLE;
+
+ rtl8xxxu_write8(priv, REG_LEDCFG2, ledcfg);
+
+ return 0;
+}
+
struct rtl8xxxu_fileops rtl8723bu_fops = {
.identify_chip = rtl8723bu_identify_chip,
.parse_efuse = rtl8723bu_parse_efuse,
@@ -1731,6 +1737,7 @@ struct rtl8xxxu_fileops rtl8723bu_fops = {
.fill_txdesc = rtl8xxxu_fill_txdesc_v2,
.set_crystal_cap = rtl8723a_set_crystal_cap,
.cck_rssi = rtl8723b_cck_rssi,
+ .led_classdev_brightness_set = rtl8723bu_led_brightness_set,
.writeN_block_size = 1024,
.tx_desc_size = sizeof(struct rtl8xxxu_txdesc40),
.rx_desc_size = sizeof(struct rtl8xxxu_rxdesc24),
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/Makefile b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
index fa466589eccb..580a2fa675ee 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/Makefile
+++ b/drivers/net/wireless/realtek/rtl8xxxu/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_RTL8XXXU) += rtl8xxxu.o
-rtl8xxxu-y := rtl8xxxu_core.o rtl8xxxu_8192e.o rtl8xxxu_8723b.o \
- rtl8xxxu_8723a.o rtl8xxxu_8192c.o rtl8xxxu_8188f.o \
- rtl8xxxu_8188e.o rtl8xxxu_8710b.o rtl8xxxu_8192f.o
+rtl8xxxu-y := core.o 8192e.o 8723b.o \
+ 8723a.o 8192c.o 8188f.o \
+ 8188e.o 8710b.o 8192f.o
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c
index 4a49f8f9d80f..043fa364e701 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c
@@ -13,24 +13,9 @@
* additional 8xxx chips like the 8192cu, 8188cus, etc.
*/
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/errno.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/list.h>
-#include <linux/usb.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/ethtool.h>
-#include <linux/wireless.h>
#include <linux/firmware.h>
-#include <linux/moduleparam.h>
-#include <net/mac80211.h>
+#include "regs.h"
#include "rtl8xxxu.h"
-#include "rtl8xxxu_regs.h"
#define DRIVER_NAME "rtl8xxxu"
@@ -132,31 +117,6 @@ static struct ieee80211_supported_band rtl8xxxu_supported_band = {
.n_bitrates = ARRAY_SIZE(rtl8xxxu_rates),
};
-const struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[] = {
- {0x420, 0x80}, {0x423, 0x00}, {0x430, 0x00}, {0x431, 0x00},
- {0x432, 0x00}, {0x433, 0x01}, {0x434, 0x04}, {0x435, 0x05},
- {0x436, 0x06}, {0x437, 0x07}, {0x438, 0x00}, {0x439, 0x00},
- {0x43a, 0x00}, {0x43b, 0x01}, {0x43c, 0x04}, {0x43d, 0x05},
- {0x43e, 0x06}, {0x43f, 0x07}, {0x440, 0x5d}, {0x441, 0x01},
- {0x442, 0x00}, {0x444, 0x15}, {0x445, 0xf0}, {0x446, 0x0f},
- {0x447, 0x00}, {0x458, 0x41}, {0x459, 0xa8}, {0x45a, 0x72},
- {0x45b, 0xb9}, {0x460, 0x66}, {0x461, 0x66}, {0x462, 0x08},
- {0x463, 0x03}, {0x4c8, 0xff}, {0x4c9, 0x08}, {0x4cc, 0xff},
- {0x4cd, 0xff}, {0x4ce, 0x01}, {0x500, 0x26}, {0x501, 0xa2},
- {0x502, 0x2f}, {0x503, 0x00}, {0x504, 0x28}, {0x505, 0xa3},
- {0x506, 0x5e}, {0x507, 0x00}, {0x508, 0x2b}, {0x509, 0xa4},
- {0x50a, 0x5e}, {0x50b, 0x00}, {0x50c, 0x4f}, {0x50d, 0xa4},
- {0x50e, 0x00}, {0x50f, 0x00}, {0x512, 0x1c}, {0x514, 0x0a},
- {0x515, 0x10}, {0x516, 0x0a}, {0x517, 0x10}, {0x51a, 0x16},
- {0x524, 0x0f}, {0x525, 0x4f}, {0x546, 0x40}, {0x547, 0x00},
- {0x550, 0x10}, {0x551, 0x10}, {0x559, 0x02}, {0x55a, 0x02},
- {0x55d, 0xff}, {0x605, 0x30}, {0x608, 0x0e}, {0x609, 0x2a},
- {0x652, 0x20}, {0x63c, 0x0a}, {0x63d, 0x0a}, {0x63e, 0x0e},
- {0x63f, 0x0e}, {0x66e, 0x05}, {0x700, 0x21}, {0x701, 0x43},
- {0x702, 0x65}, {0x703, 0x87}, {0x708, 0x21}, {0x709, 0x43},
- {0x70a, 0x65}, {0x70b, 0x87}, {0xffff, 0xff},
-};
-
static const struct rtl8xxxu_reg32val rtl8723a_phy_1t_init_table[] = {
{0x800, 0x80040000}, {0x804, 0x00000003},
{0x808, 0x0000fc00}, {0x80c, 0x0000000a},
@@ -1505,13 +1465,13 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
u8 cck[RTL8723A_MAX_RF_PATHS], ofdm[RTL8723A_MAX_RF_PATHS];
u8 ofdmbase[RTL8723A_MAX_RF_PATHS], mcsbase[RTL8723A_MAX_RF_PATHS];
u32 val32, ofdm_a, ofdm_b, mcs_a, mcs_b;
- u8 val8;
+ u8 val8, base;
int group, i;
group = rtl8xxxu_gen1_channel_to_group(channel);
- cck[0] = priv->cck_tx_power_index_A[group] - 1;
- cck[1] = priv->cck_tx_power_index_B[group] - 1;
+ cck[0] = priv->cck_tx_power_index_A[group];
+ cck[1] = priv->cck_tx_power_index_B[group];
if (priv->hi_pa) {
if (cck[0] > 0x20)
@@ -1522,10 +1482,6 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
ofdm[0] = priv->ht40_1s_tx_power_index_A[group];
ofdm[1] = priv->ht40_1s_tx_power_index_B[group];
- if (ofdm[0])
- ofdm[0] -= 1;
- if (ofdm[1])
- ofdm[1] -= 1;
ofdmbase[0] = ofdm[0] + priv->ofdm_tx_power_index_diff[group].a;
ofdmbase[1] = ofdm[1] + priv->ofdm_tx_power_index_diff[group].b;
@@ -1614,20 +1570,19 @@ rtl8xxxu_gen1_set_tx_power(struct rtl8xxxu_priv *priv, int channel, bool ht40)
rtl8xxxu_write32(priv, REG_TX_AGC_A_MCS15_MCS12,
mcs_a + power_base->reg_0e1c);
+ val8 = u32_get_bits(mcs_a + power_base->reg_0e1c, 0xff000000);
for (i = 0; i < 3; i++) {
- if (i != 2)
- val8 = (mcsbase[0] > 8) ? (mcsbase[0] - 8) : 0;
- else
- val8 = (mcsbase[0] > 6) ? (mcsbase[0] - 6) : 0;
+ base = i != 2 ? 8 : 6;
+ val8 = max_t(int, val8 - base, 0);
rtl8xxxu_write8(priv, REG_OFDM0_XC_TX_IQ_IMBALANCE + i, val8);
}
+
rtl8xxxu_write32(priv, REG_TX_AGC_B_MCS15_MCS12,
mcs_b + power_base->reg_0868);
+ val8 = u32_get_bits(mcs_b + power_base->reg_0868, 0xff000000);
for (i = 0; i < 3; i++) {
- if (i != 2)
- val8 = (mcsbase[1] > 8) ? (mcsbase[1] - 8) : 0;
- else
- val8 = (mcsbase[1] > 6) ? (mcsbase[1] - 6) : 0;
+ base = i != 2 ? 8 : 6;
+ val8 = max_t(int, val8 - base, 0);
rtl8xxxu_write8(priv, REG_OFDM0_XD_TX_IQ_IMBALANCE + i, val8);
}
}
@@ -4385,7 +4340,7 @@ static int rtl8xxxu_init_device(struct ieee80211_hw *hw)
/* Let the 8051 take control of antenna setting */
if (priv->rtl_chip != RTL8192E && priv->rtl_chip != RTL8188F &&
- priv->rtl_chip != RTL8710B) {
+ priv->rtl_chip != RTL8710B && priv->rtl_chip != RTL8192C) {
val8 = rtl8xxxu_read8(priv, REG_LEDCFG2);
val8 |= LEDCFG2_DPDT_SELECT;
rtl8xxxu_write8(priv, REG_LEDCFG2, val8);
@@ -6473,7 +6428,8 @@ int rtl8xxxu_parse_rxdesc16(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
- if (!rx_desc->swdec)
+ if (!rx_desc->swdec &&
+ rx_desc->security != RX_DESC_ENC_NONE)
rx_status->flag |= RX_FLAG_DECRYPTED;
if (rx_desc->crc32)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -6578,7 +6534,8 @@ int rtl8xxxu_parse_rxdesc24(struct rtl8xxxu_priv *priv, struct sk_buff *skb)
rx_status->mactime = rx_desc->tsfl;
rx_status->flag |= RX_FLAG_MACTIME_START;
- if (!rx_desc->swdec)
+ if (!rx_desc->swdec &&
+ rx_desc->security != RX_DESC_ENC_NONE)
rx_status->flag |= RX_FLAG_DECRYPTED;
if (rx_desc->crc32)
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
@@ -6722,7 +6679,6 @@ static void rtl8xxxu_switch_ports(struct rtl8xxxu_priv *priv)
u8 macid[ETH_ALEN], bssid[ETH_ALEN], macid_1[ETH_ALEN], bssid_1[ETH_ALEN];
u8 msr, bcn_ctrl, bcn_ctrl_1, atimwnd[2], atimwnd_1[2];
struct rtl8xxxu_vif *rtlvif;
- struct ieee80211_vif *vif;
u8 tsftr[8], tsftr_1[8];
int i;
@@ -6787,10 +6743,7 @@ static void rtl8xxxu_switch_ports(struct rtl8xxxu_priv *priv)
/* write bcn ctl */
rtl8xxxu_write8(priv, REG_BEACON_CTRL, bcn_ctrl_1);
rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, bcn_ctrl);
-
- vif = priv->vifs[0];
- priv->vifs[0] = priv->vifs[1];
- priv->vifs[1] = vif;
+ swap(priv->vifs[0], priv->vifs[1]);
/* priv->vifs[0] is NULL here, based on how this function is currently
* called from rtl8xxxu_add_interface().
@@ -7564,7 +7517,7 @@ error_out:
return ret;
}
-static void rtl8xxxu_stop(struct ieee80211_hw *hw)
+static void rtl8xxxu_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rtl8xxxu_priv *priv = hw->priv;
unsigned long flags;
@@ -7998,6 +7951,7 @@ static int rtl8xxxu_probe(struct usb_interface *interface,
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/regs.h
index 61c0c0ec07b3..61c0c0ec07b3 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/regs.h
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
index fd92d23c43d9..f42463e595cc 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu.h
@@ -5,8 +5,9 @@
* Register definitions taken from original Realtek rtl8723au driver
*/
-#include <asm/byteorder.h>
#include <linux/average.h>
+#include <linux/usb.h>
+#include <net/mac80211.h>
#define RTL8XXXU_DEBUG_REG_WRITE 0x01
#define RTL8XXXU_DEBUG_REG_READ 0x02
@@ -122,6 +123,15 @@ enum rtl8xxxu_rx_type {
RX_TYPE_ERROR = -1
};
+enum rtl8xxxu_rx_desc_enc {
+ RX_DESC_ENC_NONE = 0,
+ RX_DESC_ENC_WEP40 = 1,
+ RX_DESC_ENC_TKIP_WO_MIC = 2,
+ RX_DESC_ENC_TKIP_MIC = 3,
+ RX_DESC_ENC_AES = 4,
+ RX_DESC_ENC_WEP104 = 5,
+};
+
struct rtl8xxxu_rxdesc16 {
#ifdef __LITTLE_ENDIAN
u32 pktlen:14;
@@ -2022,7 +2032,6 @@ struct rtl8xxxu_fileops {
extern int rtl8xxxu_debug;
-extern const struct rtl8xxxu_reg8val rtl8xxxu_gen1_mac_init_table[];
extern const u32 rtl8xxxu_iqk_phy_iq_bb_reg[];
u8 rtl8xxxu_read8(struct rtl8xxxu_priv *priv, u16 addr);
u16 rtl8xxxu_read16(struct rtl8xxxu_priv *priv, u16 addr);
diff --git a/drivers/net/wireless/realtek/rtlwifi/Kconfig b/drivers/net/wireless/realtek/rtlwifi/Kconfig
index 9f6a4e35543c..1e66c1bf7c8b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/Kconfig
+++ b/drivers/net/wireless/realtek/rtlwifi/Kconfig
@@ -37,6 +37,7 @@ config RTL8192SE
config RTL8192DE
tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
depends on PCI
+ select RTL8192D_COMMON
select RTLWIFI
select RTLWIFI_PCI
help
@@ -118,6 +119,18 @@ config RTL8192CU
If you choose to build it as a module, it will be called rtl8192cu
+config RTL8192DU
+ tristate "Realtek RTL8192DU USB Wireless Network Adapter"
+ depends on USB
+ select RTLWIFI
+ select RTLWIFI_USB
+ select RTL8192D_COMMON
+ help
+ This is the driver for Realtek RTL8192DU 802.11n USB
+ wireless network adapters.
+
+ If you choose to build it as a module, it will be called rtl8192du
+
config RTLWIFI
tristate
select FW_LOADER
@@ -142,6 +155,9 @@ config RTL8192C_COMMON
depends on RTL8192CE || RTL8192CU
default y
+config RTL8192D_COMMON
+ tristate
+
config RTL8723_COMMON
tristate
depends on RTL8723AE || RTL8723BE
diff --git a/drivers/net/wireless/realtek/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile
index 09c30e428375..9cf32277c7f1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/Makefile
+++ b/drivers/net/wireless/realtek/rtlwifi/Makefile
@@ -23,7 +23,9 @@ obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
obj-$(CONFIG_RTL8192CE) += rtl8192ce/
obj-$(CONFIG_RTL8192CU) += rtl8192cu/
obj-$(CONFIG_RTL8192SE) += rtl8192se/
+obj-$(CONFIG_RTL8192D_COMMON) += rtl8192d/
obj-$(CONFIG_RTL8192DE) += rtl8192de/
+obj-$(CONFIG_RTL8192DU) += rtl8192du/
obj-$(CONFIG_RTL8723AE) += rtl8723ae/
obj-$(CONFIG_RTL8723BE) += rtl8723be/
obj-$(CONFIG_RTL8188EE) += rtl8188ee/
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 1a8d715b7c07..aab4605de9c4 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -2272,7 +2272,7 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops;
+ const struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops;
const struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops;
u8 cmd_id, cmd_len;
u8 *cmd_buf = NULL;
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.c b/drivers/net/wireless/realtek/rtlwifi/cam.c
index 32970ea4b4e7..f9d0d1394442 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.c
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.c
@@ -18,7 +18,8 @@ void rtl_cam_reset_sec_info(struct ieee80211_hw *hw)
}
static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
- u8 *mac_addr, u8 *key_cont_128, u16 us_config)
+ const u8 *mac_addr, u8 *key_cont_128,
+ u16 us_config)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -94,7 +95,7 @@ static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
"after set key, usconfig:%x\n", us_config);
}
-u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, const u8 *mac_addr,
u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
u32 ul_default_key, u8 *key_content)
{
diff --git a/drivers/net/wireless/realtek/rtlwifi/cam.h b/drivers/net/wireless/realtek/rtlwifi/cam.h
index 2461fa9afda0..144807a405b7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/cam.h
+++ b/drivers/net/wireless/realtek/rtlwifi/cam.h
@@ -14,9 +14,9 @@
#define CAM_CONFIG_NO_USEDK 0
void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
-u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
- u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
- u32 ul_default_key, u8 *key_content);
+u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, const u8 *mac_addr,
+ u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
+ u32 ul_default_key, u8 *key_content);
int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
u32 ul_key_id);
void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index 2e60a6991ca1..7537f04b1930 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -144,7 +144,7 @@ static int rtl_op_start(struct ieee80211_hw *hw)
return err;
}
-static void rtl_op_stop(struct ieee80211_hw *hw)
+static void rtl_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -547,7 +547,7 @@ static int rtl_op_suspend(struct ieee80211_hw *hw,
rtlhal->enter_pnp_sleep = true;
rtl_lps_leave(hw, true);
- rtl_op_stop(hw);
+ rtl_op_stop(hw, false);
device_set_wakeup_enable(wiphy_dev(hw->wiphy), true);
return 0;
}
@@ -633,21 +633,6 @@ static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
}
}
- if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
- rtl_dbg(rtlpriv, COMP_MAC80211, DBG_LOUD,
- "IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
- hw->conf.long_frame_max_tx_count);
- /* brought up everything changes (changed == ~0) indicates first
- * open, so use our default value instead of that of wiphy.
- */
- if (changed != ~0) {
- mac->retry_long = hw->conf.long_frame_max_tx_count;
- mac->retry_short = hw->conf.long_frame_max_tx_count;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
- (u8 *)(&hw->conf.long_frame_max_tx_count));
- }
- }
-
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
!rtlpriv->proximity.proxim_on) {
struct ieee80211_channel *channel = hw->conf.chandef.chan;
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.c b/drivers/net/wireless/realtek/rtlwifi/efuse.c
index c1fbc29d5ca1..82cf5fb5175f 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.c
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.c
@@ -1211,7 +1211,7 @@ static u8 efuse_calculate_word_cnts(u8 word_en)
}
int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
- int max_size, u8 *hwinfo, int *params)
+ int max_size, u8 *hwinfo, const int *params)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_pci_priv *rtlpcipriv = rtl_pcipriv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/efuse.h b/drivers/net/wireless/realtek/rtlwifi/efuse.h
index 4821625ad1e5..e250ffb0f4b2 100644
--- a/drivers/net/wireless/realtek/rtlwifi/efuse.h
+++ b/drivers/net/wireless/realtek/rtlwifi/efuse.h
@@ -89,7 +89,7 @@ void efuse_force_write_vendor_id(struct ieee80211_hw *hw);
void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
void efuse_power_switch(struct ieee80211_hw *hw, u8 write, u8 pwrstate);
int rtl_get_hwinfo(struct ieee80211_hw *hw, struct rtl_priv *rtlpriv,
- int max_size, u8 *hwinfo, int *params);
+ int max_size, u8 *hwinfo, const int *params);
void rtl_fill_dummy(u8 *pfwbuf, u32 *pfwlen);
void rtl_fw_page_write(struct ieee80211_hw *hw, u32 page, u8 *buffer,
u32 size);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
index 37bb59fa8bfa..35875cda30fc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
@@ -27,7 +27,7 @@ static void rtl88e_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
*/
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
index e20f2bec45c4..ce7c28d9c874 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
@@ -31,7 +31,7 @@ static void rtl92c_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
* */
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
index 4217c9a08d01..0195c9a3e9e8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/hw.c
@@ -489,7 +489,6 @@ static int _rtl92cu_init_power_on(struct ieee80211_hw *hw)
}
static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 out_ep_num,
u8 queue_sel)
{
@@ -505,66 +504,39 @@ static void _rtl92cu_init_queue_reserved_page(struct ieee80211_hw *hw,
u8 value8;
u32 txqpagenum, txqpageunit, txqremaininpage;
- if (!wmm_enable) {
- numpubq = (ischipn) ? CHIP_B_PAGE_NUM_PUBQ :
- CHIP_A_PAGE_NUM_PUBQ;
- txqpagenum = TX_TOTAL_PAGE_NUMBER - numpubq;
-
- txqpageunit = txqpagenum / outepnum;
- txqremaininpage = txqpagenum % outepnum;
- if (queue_sel & TX_SELE_HQ)
- numhq = txqpageunit;
- if (queue_sel & TX_SELE_LQ)
- numlq = txqpageunit;
- /* HIGH priority queue always present in the configuration of
- * 2 out-ep. Remainder pages have assigned to High queue */
- if (outepnum > 1 && txqremaininpage)
- numhq += txqremaininpage;
- /* NOTE: This step done before writing REG_RQPN. */
- if (ischipn) {
- if (queue_sel & TX_SELE_NQ)
- numnq = txqpageunit;
- value8 = (u8)_NPQ(numnq);
- rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
- }
- } else {
- /* for WMM ,number of out-ep must more than or equal to 2! */
- numpubq = ischipn ? WMM_CHIP_B_PAGE_NUM_PUBQ :
- WMM_CHIP_A_PAGE_NUM_PUBQ;
- if (queue_sel & TX_SELE_HQ) {
- numhq = ischipn ? WMM_CHIP_B_PAGE_NUM_HPQ :
- WMM_CHIP_A_PAGE_NUM_HPQ;
- }
- if (queue_sel & TX_SELE_LQ) {
- numlq = ischipn ? WMM_CHIP_B_PAGE_NUM_LPQ :
- WMM_CHIP_A_PAGE_NUM_LPQ;
- }
- /* NOTE: This step done before writing REG_RQPN. */
- if (ischipn) {
- if (queue_sel & TX_SELE_NQ)
- numnq = WMM_CHIP_B_PAGE_NUM_NPQ;
- value8 = (u8)_NPQ(numnq);
- rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
- }
+ numpubq = (ischipn) ? CHIP_B_PAGE_NUM_PUBQ :
+ CHIP_A_PAGE_NUM_PUBQ;
+ txqpagenum = TX_TOTAL_PAGE_NUMBER - numpubq;
+
+ txqpageunit = txqpagenum / outepnum;
+ txqremaininpage = txqpagenum % outepnum;
+ if (queue_sel & TX_SELE_HQ)
+ numhq = txqpageunit;
+ if (queue_sel & TX_SELE_LQ)
+ numlq = txqpageunit;
+ /* HIGH priority queue always present in the configuration of
+ * 2 out-ep. Remainder pages have assigned to High queue.
+ */
+ if (outepnum > 1 && txqremaininpage)
+ numhq += txqremaininpage;
+ /* NOTE: This step done before writing REG_RQPN. */
+ if (ischipn) {
+ if (queue_sel & TX_SELE_NQ)
+ numnq = txqpageunit;
+ value8 = (u8)_NPQ(numnq);
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, value8);
}
/* TX DMA */
value32 = _HPQ(numhq) | _LPQ(numlq) | _PUBQ(numpubq) | LD_RQPN;
rtl_write_dword(rtlpriv, REG_RQPN, value32);
}
-static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw, bool wmm_enable)
+static void _rtl92c_init_trx_buffer(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 txpktbuf_bndy;
+ u8 txpktbuf_bndy = TX_PAGE_BOUNDARY;
u8 value8;
- if (!wmm_enable)
- txpktbuf_bndy = TX_PAGE_BOUNDARY;
- else /* for WMM */
- txpktbuf_bndy = (IS_NORMAL_CHIP(rtlhal->version))
- ? WMM_CHIP_B_TX_PAGE_BOUNDARY
- : WMM_CHIP_A_TX_PAGE_BOUNDARY;
rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
@@ -589,7 +561,6 @@ static void _rtl92c_init_chipn_reg_priority(struct ieee80211_hw *hw, u16 beq,
}
static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 queue_sel)
{
u16 value;
@@ -614,7 +585,6 @@ static void _rtl92cu_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw,
}
static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 queue_sel)
{
u16 beq, bkq, viq, voq, mgtq, hiq;
@@ -638,67 +608,47 @@ static void _rtl92cu_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
valuelow = QUEUE_NORMAL;
break;
}
- if (!wmm_enable) {
- beq = valuelow;
- bkq = valuelow;
- viq = valuehi;
- voq = valuehi;
- mgtq = valuehi;
- hiq = valuehi;
- } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
- beq = valuehi;
- bkq = valuelow;
- viq = valuelow;
- voq = valuehi;
- mgtq = valuehi;
- hiq = valuehi;
- }
+
+ beq = valuelow;
+ bkq = valuelow;
+ viq = valuehi;
+ voq = valuehi;
+ mgtq = valuehi;
+ hiq = valuehi;
+
_rtl92c_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq);
pr_info("Tx queue select: 0x%02x\n", queue_sel);
}
static void _rtl92cu_init_chipn_three_out_ep_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 queue_sel)
{
u16 beq, bkq, viq, voq, mgtq, hiq;
- if (!wmm_enable) { /* typical setting */
- beq = QUEUE_LOW;
- bkq = QUEUE_LOW;
- viq = QUEUE_NORMAL;
- voq = QUEUE_HIGH;
- mgtq = QUEUE_HIGH;
- hiq = QUEUE_HIGH;
- } else { /* for WMM */
- beq = QUEUE_LOW;
- bkq = QUEUE_NORMAL;
- viq = QUEUE_NORMAL;
- voq = QUEUE_HIGH;
- mgtq = QUEUE_HIGH;
- hiq = QUEUE_HIGH;
- }
+ beq = QUEUE_LOW;
+ bkq = QUEUE_LOW;
+ viq = QUEUE_NORMAL;
+ voq = QUEUE_HIGH;
+ mgtq = QUEUE_HIGH;
+ hiq = QUEUE_HIGH;
+
_rtl92c_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq);
pr_info("Tx queue select :0x%02x..\n", queue_sel);
}
static void _rtl92cu_init_chipn_queue_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 out_ep_num,
u8 queue_sel)
{
switch (out_ep_num) {
case 1:
- _rtl92cu_init_chipn_one_out_ep_priority(hw, wmm_enable,
- queue_sel);
+ _rtl92cu_init_chipn_one_out_ep_priority(hw, queue_sel);
break;
case 2:
- _rtl92cu_init_chipn_two_out_ep_priority(hw, wmm_enable,
- queue_sel);
+ _rtl92cu_init_chipn_two_out_ep_priority(hw, queue_sel);
break;
case 3:
- _rtl92cu_init_chipn_three_out_ep_priority(hw, wmm_enable,
- queue_sel);
+ _rtl92cu_init_chipn_three_out_ep_priority(hw, queue_sel);
break;
default:
WARN_ON(1); /* Shall not reach here! */
@@ -707,7 +657,6 @@ static void _rtl92cu_init_chipn_queue_priority(struct ieee80211_hw *hw,
}
static void _rtl92cu_init_chipt_queue_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 out_ep_num,
u8 queue_sel)
{
@@ -716,12 +665,7 @@ static void _rtl92cu_init_chipt_queue_priority(struct ieee80211_hw *hw,
switch (out_ep_num) {
case 2: /* (TX_SELE_HQ|TX_SELE_LQ) */
- if (!wmm_enable) /* typical setting */
- hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ |
- HQSEL_HIQ;
- else /* for WMM */
- hq_sele = HQSEL_VOQ | HQSEL_BEQ | HQSEL_MGTQ |
- HQSEL_HIQ;
+ hq_sele = HQSEL_VOQ | HQSEL_VIQ | HQSEL_MGTQ | HQSEL_HIQ;
break;
case 1:
if (TX_SELE_LQ == queue_sel) {
@@ -742,18 +686,15 @@ static void _rtl92cu_init_chipt_queue_priority(struct ieee80211_hw *hw,
}
static void _rtl92cu_init_queue_priority(struct ieee80211_hw *hw,
- bool wmm_enable,
u8 out_ep_num,
u8 queue_sel)
{
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
if (IS_NORMAL_CHIP(rtlhal->version))
- _rtl92cu_init_chipn_queue_priority(hw, wmm_enable, out_ep_num,
- queue_sel);
+ _rtl92cu_init_chipn_queue_priority(hw, out_ep_num, queue_sel);
else
- _rtl92cu_init_chipt_queue_priority(hw, wmm_enable, out_ep_num,
- queue_sel);
+ _rtl92cu_init_chipt_queue_priority(hw, out_ep_num, queue_sel);
}
static void _rtl92cu_init_wmac_setting(struct ieee80211_hw *hw)
@@ -810,8 +751,7 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
int err = 0;
- u32 boundary = 0;
- u8 wmm_enable = false; /* TODO */
+ u32 boundary = TX_PAGE_BOUNDARY;
u8 out_ep_nums = rtlusb->out_ep_nums;
u8 queue_sel = rtlusb->out_queue_sel;
@@ -821,22 +761,13 @@ static int _rtl92cu_init_mac(struct ieee80211_hw *hw)
pr_err("Failed to init power on!\n");
return err;
}
- if (!wmm_enable) {
- boundary = TX_PAGE_BOUNDARY;
- } else { /* for WMM */
- boundary = (IS_NORMAL_CHIP(rtlhal->version))
- ? WMM_CHIP_B_TX_PAGE_BOUNDARY
- : WMM_CHIP_A_TX_PAGE_BOUNDARY;
- }
if (!rtl92c_init_llt_table(hw, boundary)) {
pr_err("Failed to init LLT Table!\n");
return -EINVAL;
}
- _rtl92cu_init_queue_reserved_page(hw, wmm_enable, out_ep_nums,
- queue_sel);
- _rtl92c_init_trx_buffer(hw, wmm_enable);
- _rtl92cu_init_queue_priority(hw, wmm_enable, out_ep_nums,
- queue_sel);
+ _rtl92cu_init_queue_reserved_page(hw, out_ep_nums, queue_sel);
+ _rtl92c_init_trx_buffer(hw);
+ _rtl92cu_init_queue_priority(hw, out_ep_nums, queue_sel);
/* Get Rx PHY status in order to report RSSI and others. */
rtl92c_init_driver_info_size(hw, RTL92C_DRIVER_INFO_SIZE);
rtl92c_init_interrupt(hw);
@@ -1553,7 +1484,6 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- enum wireless_mode wirelessmode = mac->mode;
u8 idx = 0;
switch (variable) {
@@ -1605,36 +1535,15 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
}
case HW_VAR_SLOT_TIME:{
u8 e_aci;
- u8 QOS_MODE = 1;
rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
"HW_VAR_SLOT_TIME %x\n", val[0]);
- if (QOS_MODE) {
- for (e_aci = 0; e_aci < AC_MAX; e_aci++)
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- &e_aci);
- } else {
- u8 sifstime = 0;
- u8 u1baifs;
- if (IS_WIRELESS_MODE_A(wirelessmode) ||
- IS_WIRELESS_MODE_N_24G(wirelessmode) ||
- IS_WIRELESS_MODE_N_5G(wirelessmode))
- sifstime = 16;
- else
- sifstime = 10;
- u1baifs = sifstime + (2 * val[0]);
- rtl_write_byte(rtlpriv, REG_EDCA_VO_PARAM,
- u1baifs);
- rtl_write_byte(rtlpriv, REG_EDCA_VI_PARAM,
- u1baifs);
- rtl_write_byte(rtlpriv, REG_EDCA_BE_PARAM,
- u1baifs);
- rtl_write_byte(rtlpriv, REG_EDCA_BK_PARAM,
- u1baifs);
- }
+ for (e_aci = 0; e_aci < AC_MAX; e_aci++)
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AC_PARAM,
+ &e_aci);
break;
}
case HW_VAR_ACK_PREAMBLE:{
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
index 48be7e346efc..c9b9e2bc90cc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
@@ -53,8 +53,6 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
} else {
fw_name = "rtlwifi/rtl8192cufw_TMSC.bin";
}
- /* provide name of alternative file */
- rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin";
pr_info("Loading firmware %s\n", fw_name);
rtlpriv->max_fw_size = 0x4000;
err = request_firmware_nowait(THIS_MODULE, 1,
@@ -160,6 +158,7 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = {
static struct rtl_hal_cfg rtl92cu_hal_cfg = {
.name = "rtl92c_usb",
+ .alt_fw_name = "rtlwifi/rtl8192cufw.bin",
.ops = &rtl8192cu_hal_ops,
.mod_params = &rtl92cu_mod_params,
.usb_interface_cfg = &rtl92cu_interface_cfg,
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile
new file mode 100644
index 000000000000..beebdfa3f7ff
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+rtl8192d-common-objs := \
+ dm_common.o \
+ fw_common.o \
+ hw_common.o \
+ main.o \
+ phy_common.o \
+ rf_common.o \
+ trx_common.o
+
+obj-$(CONFIG_RTL8192D_COMMON) += rtl8192d-common.o
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/def.h
index 21726d9b4aef..21726d9b4aef 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/def.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/def.h
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c
new file mode 100644
index 000000000000..20373ce998bf
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.c
@@ -0,0 +1,1061 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../core.h"
+#include "reg.h"
+#include "def.h"
+#include "phy_common.h"
+#include "dm_common.h"
+
+static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
+ 0x7f8001fe, /* 0, +6.0dB */
+ 0x788001e2, /* 1, +5.5dB */
+ 0x71c001c7, /* 2, +5.0dB */
+ 0x6b8001ae, /* 3, +4.5dB */
+ 0x65400195, /* 4, +4.0dB */
+ 0x5fc0017f, /* 5, +3.5dB */
+ 0x5a400169, /* 6, +3.0dB */
+ 0x55400155, /* 7, +2.5dB */
+ 0x50800142, /* 8, +2.0dB */
+ 0x4c000130, /* 9, +1.5dB */
+ 0x47c0011f, /* 10, +1.0dB */
+ 0x43c0010f, /* 11, +0.5dB */
+ 0x40000100, /* 12, +0dB */
+ 0x3c8000f2, /* 13, -0.5dB */
+ 0x390000e4, /* 14, -1.0dB */
+ 0x35c000d7, /* 15, -1.5dB */
+ 0x32c000cb, /* 16, -2.0dB */
+ 0x300000c0, /* 17, -2.5dB */
+ 0x2d4000b5, /* 18, -3.0dB */
+ 0x2ac000ab, /* 19, -3.5dB */
+ 0x288000a2, /* 20, -4.0dB */
+ 0x26000098, /* 21, -4.5dB */
+ 0x24000090, /* 22, -5.0dB */
+ 0x22000088, /* 23, -5.5dB */
+ 0x20000080, /* 24, -6.0dB */
+ 0x1e400079, /* 25, -6.5dB */
+ 0x1c800072, /* 26, -7.0dB */
+ 0x1b00006c, /* 27. -7.5dB */
+ 0x19800066, /* 28, -8.0dB */
+ 0x18000060, /* 29, -8.5dB */
+ 0x16c0005b, /* 30, -9.0dB */
+ 0x15800056, /* 31, -9.5dB */
+ 0x14400051, /* 32, -10.0dB */
+ 0x1300004c, /* 33, -10.5dB */
+ 0x12000048, /* 34, -11.0dB */
+ 0x11000044, /* 35, -11.5dB */
+ 0x10000040, /* 36, -12.0dB */
+ 0x0f00003c, /* 37, -12.5dB */
+ 0x0e400039, /* 38, -13.0dB */
+ 0x0d800036, /* 39, -13.5dB */
+ 0x0cc00033, /* 40, -14.0dB */
+ 0x0c000030, /* 41, -14.5dB */
+ 0x0b40002d, /* 42, -15.0dB */
+};
+
+static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
+};
+
+static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
+};
+
+static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
+{
+ static const u8 index_mapping[RX_INDEX_MAPPING_NUM] = {
+ 0x0f, 0x0f, 0x0d, 0x0c, 0x0b,
+ 0x0a, 0x09, 0x08, 0x07, 0x06,
+ 0x05, 0x04, 0x04, 0x03, 0x02
+ };
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int i, idx;
+ u32 u4tmp;
+
+ idx = rtlpriv->efuse.eeprom_thermalmeter - rtlpriv->dm.thermalvalue_rxgain;
+ u4tmp = index_mapping[idx] << 12;
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "===> Rx Gain %x\n", u4tmp);
+
+ for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
+ rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK,
+ (rtlpriv->phy.reg_rf3c[i] & ~0xF000) | u4tmp);
+}
+
+static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
+ u8 *cck_index_old)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned long flag = 0;
+ const u8 *cckswing;
+ long temp_cck;
+ int i;
+
+ /* Query CCK default setting From 0xa24 */
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2,
+ MASKDWORD) & MASKCCK;
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+
+ for (i = 0; i < CCK_TABLE_LENGTH; i++) {
+ if (rtlpriv->dm.cck_inch14)
+ cckswing = &cckswing_table_ch14[i][2];
+ else
+ cckswing = &cckswing_table_ch1ch13[i][2];
+
+ if (temp_cck == le32_to_cpu(*((__le32 *)cckswing))) {
+ *cck_index_old = (u8)i;
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
+ RCCK0_TXFILTER2, temp_cck,
+ *cck_index_old,
+ rtlpriv->dm.cck_inch14);
+ break;
+ }
+ }
+ *temp_cckg = temp_cck;
+}
+
+static void rtl92d_bandtype_5G(struct rtl_hal *rtlhal, u8 *ofdm_index,
+ bool *internal_pa, u8 thermalvalue, u8 delta,
+ u8 rf, struct rtl_efuse *rtlefuse,
+ struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy,
+ const u8 index_mapping[5][INDEX_MAPPING_NUM],
+ const u8 index_mapping_pa[8][INDEX_MAPPING_NUM])
+{
+ u8 offset = 0;
+ u8 index;
+ int i;
+
+ for (i = 0; i < rf; i++) {
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ rtlhal->interfaceindex == 1) /* MAC 1 5G */
+ *internal_pa = rtlefuse->internal_pa_5g[1];
+ else
+ *internal_pa = rtlefuse->internal_pa_5g[i];
+
+ if (*internal_pa) {
+ if (rtlhal->interfaceindex == 1 || i == rf)
+ offset = 4;
+ else
+ offset = 0;
+ if (rtlphy->current_channel >= 100 &&
+ rtlphy->current_channel <= 165)
+ offset += 2;
+ } else {
+ if (rtlhal->interfaceindex == 1 || i == rf)
+ offset = 2;
+ else
+ offset = 0;
+ }
+
+ if (thermalvalue > rtlefuse->eeprom_thermalmeter)
+ offset++;
+
+ if (*internal_pa) {
+ if (delta > INDEX_MAPPING_NUM - 1)
+ index = index_mapping_pa[offset]
+ [INDEX_MAPPING_NUM - 1];
+ else
+ index =
+ index_mapping_pa[offset][delta];
+ } else {
+ if (delta > INDEX_MAPPING_NUM - 1)
+ index =
+ index_mapping[offset][INDEX_MAPPING_NUM - 1];
+ else
+ index = index_mapping[offset][delta];
+ }
+
+ if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
+ if (*internal_pa && thermalvalue > 0x12) {
+ ofdm_index[i] = rtlpriv->dm.ofdm_index[i] -
+ ((delta / 2) * 3 + (delta % 2));
+ } else {
+ ofdm_index[i] -= index;
+ }
+ } else {
+ ofdm_index[i] += index;
+ }
+ }
+}
+
+static void
+rtl92d_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw *hw)
+{
+ static const u8 index_mapping[5][INDEX_MAPPING_NUM] = {
+ /* 5G, path A/MAC 0, decrease power */
+ {0, 1, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
+ /* 5G, path A/MAC 0, increase power */
+ {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
+ /* 5G, path B/MAC 1, decrease power */
+ {0, 2, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
+ /* 5G, path B/MAC 1, increase power */
+ {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
+ /* 2.4G, for decreas power */
+ {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10},
+ };
+ static const u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = {
+ /* 5G, path A/MAC 0, ch36-64, decrease power */
+ {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
+ /* 5G, path A/MAC 0, ch36-64, increase power */
+ {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
+ /* 5G, path A/MAC 0, ch100-165, decrease power */
+ {0, 1, 2, 3, 5, 6, 8, 10, 11, 13, 14, 15, 15},
+ /* 5G, path A/MAC 0, ch100-165, increase power */
+ {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
+ /* 5G, path B/MAC 1, ch36-64, decrease power */
+ {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
+ /* 5G, path B/MAC 1, ch36-64, increase power */
+ {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
+ /* 5G, path B/MAC 1, ch100-165, decrease power */
+ {0, 1, 2, 3, 5, 6, 8, 9, 10, 12, 13, 14, 14},
+ /* 5G, path B/MAC 1, ch100-165, increase power */
+ {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
+ };
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_dm *dm = &rtlpriv->dm;
+ u8 thermalvalue, delta, delta_lck, delta_iqk, delta_rxgain;
+ u8 ofdm_min_index = 6, ofdm_min_index_internal_pa = 3, rf;
+ long ele_a = 0, ele_d, temp_cck, val_x, value32;
+ bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
+ u8 offset, thermalvalue_avg_count = 0;
+ u8 ofdm_index_old[2] = {0, 0};
+ u32 thermalvalue_avg = 0;
+ bool internal_pa = false;
+ long val_y, ele_c = 0;
+ s8 cck_index_old = 0;
+ u8 indexforchannel;
+ u8 ofdm_index[2];
+ s8 cck_index = 0;
+ u8 index, swing;
+ int i;
+
+ indexforchannel = rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel);
+
+ dm->txpower_trackinginit = true;
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n");
+
+ thermalvalue = (u8)rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800);
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
+ thermalvalue,
+ dm->thermalvalue, rtlefuse->eeprom_thermalmeter);
+
+ if (!thermalvalue)
+ goto exit;
+
+ if (is2t)
+ rf = 2;
+ else
+ rf = 1;
+
+ if (dm->thermalvalue && !rtlhal->reloadtxpowerindex)
+ goto old_index_done;
+
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & MASKOFDM_D;
+
+ for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
+ if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
+ ofdm_index_old[0] = (u8)i;
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
+ ROFDM0_XATXIQIMBALANCE,
+ ele_d, ofdm_index_old[0]);
+ break;
+ }
+ }
+
+ if (is2t) {
+ ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD);
+ ele_d &= MASKOFDM_D;
+
+ for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
+ if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
+ ofdm_index_old[1] = (u8)i;
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
+ DBG_LOUD,
+ "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
+ ROFDM0_XBTXIQIMBALANCE, ele_d,
+ ofdm_index_old[1]);
+ break;
+ }
+ }
+ }
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old);
+ } else {
+ temp_cck = 0x090e1317;
+ cck_index_old = 12;
+ }
+
+ if (!dm->thermalvalue) {
+ dm->thermalvalue = rtlefuse->eeprom_thermalmeter;
+ dm->thermalvalue_lck = thermalvalue;
+ dm->thermalvalue_iqk = thermalvalue;
+ dm->thermalvalue_rxgain = rtlefuse->eeprom_thermalmeter;
+
+ for (i = 0; i < rf; i++)
+ dm->ofdm_index[i] = ofdm_index_old[i];
+
+ dm->cck_index = cck_index_old;
+ }
+
+ if (rtlhal->reloadtxpowerindex) {
+ for (i = 0; i < rf; i++)
+ dm->ofdm_index[i] = ofdm_index_old[i];
+
+ dm->cck_index = cck_index_old;
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "reload ofdm index for band switch\n");
+ }
+
+old_index_done:
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] = dm->ofdm_index[i];
+
+ dm->thermalvalue_avg[dm->thermalvalue_avg_index] = thermalvalue;
+ dm->thermalvalue_avg_index++;
+
+ if (dm->thermalvalue_avg_index == AVG_THERMAL_NUM)
+ dm->thermalvalue_avg_index = 0;
+
+ for (i = 0; i < AVG_THERMAL_NUM; i++) {
+ if (dm->thermalvalue_avg[i]) {
+ thermalvalue_avg += dm->thermalvalue_avg[i];
+ thermalvalue_avg_count++;
+ }
+ }
+
+ if (thermalvalue_avg_count)
+ thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count);
+
+ if (rtlhal->reloadtxpowerindex) {
+ delta = abs_diff(thermalvalue, rtlefuse->eeprom_thermalmeter);
+ rtlhal->reloadtxpowerindex = false;
+ dm->done_txpower = false;
+ } else if (dm->done_txpower) {
+ delta = abs_diff(thermalvalue, dm->thermalvalue);
+ } else {
+ delta = abs_diff(thermalvalue, rtlefuse->eeprom_thermalmeter);
+ }
+
+ delta_lck = abs_diff(thermalvalue, dm->thermalvalue_lck);
+ delta_iqk = abs_diff(thermalvalue, dm->thermalvalue_iqk);
+ delta_rxgain = abs_diff(thermalvalue, dm->thermalvalue_rxgain);
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
+ thermalvalue, dm->thermalvalue, rtlefuse->eeprom_thermalmeter,
+ delta, delta_lck, delta_iqk);
+
+ if (delta_lck > rtlefuse->delta_lck && rtlefuse->delta_lck != 0) {
+ dm->thermalvalue_lck = thermalvalue;
+ rtlpriv->cfg->ops->phy_lc_calibrate(hw, is2t);
+ }
+
+ if (delta == 0 || !dm->txpower_track_control)
+ goto check_delta;
+
+ dm->done_txpower = true;
+ delta = abs_diff(thermalvalue, rtlefuse->eeprom_thermalmeter);
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ offset = 4;
+ if (delta > INDEX_MAPPING_NUM - 1)
+ index = index_mapping[offset][INDEX_MAPPING_NUM - 1];
+ else
+ index = index_mapping[offset][delta];
+
+ if (thermalvalue > dm->thermalvalue) {
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] -= delta;
+
+ cck_index -= delta;
+ } else {
+ for (i = 0; i < rf; i++)
+ ofdm_index[i] += index;
+
+ cck_index += index;
+ }
+ } else if (rtlhal->current_bandtype == BAND_ON_5G) {
+ rtl92d_bandtype_5G(rtlhal, ofdm_index, &internal_pa,
+ thermalvalue, delta, rf, rtlefuse, rtlpriv,
+ rtlphy, index_mapping,
+ index_mapping_internal_pa);
+ }
+
+ if (is2t) {
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
+ dm->ofdm_index[0], dm->ofdm_index[1], dm->cck_index);
+ } else {
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "temp OFDM_A_index=0x%x, cck_index = 0x%x\n",
+ dm->ofdm_index[0], dm->cck_index);
+ }
+
+ for (i = 0; i < rf; i++) {
+ if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1) {
+ ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1;
+ } else if (internal_pa ||
+ rtlhal->current_bandtype == BAND_ON_2_4G) {
+ if (ofdm_index[i] < ofdm_min_index_internal_pa)
+ ofdm_index[i] = ofdm_min_index_internal_pa;
+ } else if (ofdm_index[i] < ofdm_min_index) {
+ ofdm_index[i] = ofdm_min_index;
+ }
+ }
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ if (cck_index > CCK_TABLE_SIZE - 1)
+ cck_index = CCK_TABLE_SIZE - 1;
+ else if (cck_index < 0)
+ cck_index = 0;
+ }
+
+ if (is2t) {
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
+ ofdm_index[0], ofdm_index[1], cck_index);
+ } else {
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "new OFDM_A_index=0x%x, cck_index = 0x%x\n",
+ ofdm_index[0], cck_index);
+ }
+
+ ele_d = (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
+ val_x = rtlphy->iqk_matrix[indexforchannel].value[0][0];
+ val_y = rtlphy->iqk_matrix[indexforchannel].value[0][1];
+
+ if (val_x != 0) {
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+ ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
+
+ /* write new elements A, C, D to regC80 and
+ * regC94, element B is always 0
+ */
+ value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a;
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD, value32);
+
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, value32);
+
+ value32 = ((val_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), value32);
+
+ } else {
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
+ ofdmswing_table[ofdm_index[0]]);
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0x00);
+ }
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
+ rtlhal->interfaceindex,
+ val_x, val_y, ele_a, ele_c, ele_d,
+ val_x, val_y);
+
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* Adjust CCK according to IQK result */
+ for (i = 0; i < 8; i++) {
+ if (dm->cck_inch14)
+ swing = cckswing_table_ch14[cck_index][i];
+ else
+ swing = cckswing_table_ch1ch13[cck_index][i];
+
+ rtl_write_byte(rtlpriv, 0xa22 + i, swing);
+ }
+ }
+
+ if (is2t) {
+ ele_d = (ofdmswing_table[ofdm_index[1]] & 0xFFC00000) >> 22;
+ val_x = rtlphy->iqk_matrix[indexforchannel].value[0][4];
+ val_y = rtlphy->iqk_matrix[indexforchannel].value[0][5];
+
+ if (val_x != 0) {
+ if ((val_x & 0x00000200) != 0)
+ /* consider minus */
+ val_x = val_x | 0xFFFFFC00;
+ ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+ ele_c = ((val_y * ele_d) >> 8) & 0x00003FF;
+
+ /* write new elements A, C, D to regC88
+ * and regC9C, element B is always 0
+ */
+ value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a;
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD, value32);
+
+ value32 = (ele_c & 0x000003C0) >> 6;
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, value32);
+
+ value32 = ((val_x * ele_d) >> 7) & 0x01;
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), value32);
+ } else {
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD, ofdmswing_table[ofdm_index[1]]);
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE, MASKH4BITS, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), 0x00);
+ }
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
+ val_x, val_y, ele_a, ele_c, ele_d, val_x, val_y);
+ }
+
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
+ rtl_get_bbreg(hw, 0xc80, MASKDWORD),
+ rtl_get_bbreg(hw, 0xc94, MASKDWORD),
+ rtl_get_rfreg(hw, RF90_PATH_A, 0x24, RFREG_OFFSET_MASK));
+
+check_delta:
+ if (delta_iqk > rtlefuse->delta_iqk && rtlefuse->delta_iqk != 0) {
+ rtl92d_phy_reset_iqk_result(hw);
+ dm->thermalvalue_iqk = thermalvalue;
+ rtlpriv->cfg->ops->phy_iq_calibrate(hw);
+ }
+
+ if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G &&
+ thermalvalue <= rtlefuse->eeprom_thermalmeter) {
+ dm->thermalvalue_rxgain = thermalvalue;
+ rtl92d_dm_rxgain_tracking_thermalmeter(hw);
+ }
+
+ if (dm->txpower_track_control)
+ dm->thermalvalue = thermalvalue;
+
+exit:
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
+}
+
+void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.txpower_tracking = true;
+ rtlpriv->dm.txpower_trackinginit = false;
+ rtlpriv->dm.txpower_track_control = true;
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "pMgntInfo->txpower_tracking = %d\n",
+ rtlpriv->dm.txpower_tracking);
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_initialize_txpower_tracking);
+
+void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (!rtlpriv->dm.txpower_tracking)
+ return;
+
+ if (!rtlpriv->dm.tm_trigger) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) |
+ BIT(16), 0x03);
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Trigger 92S Thermal Meter!!\n");
+ rtlpriv->dm.tm_trigger = 1;
+ } else {
+ rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
+ "Schedule TxPowerTracking direct call!!\n");
+ rtl92d_dm_txpower_tracking_callback_thermalmeter(hw);
+ rtlpriv->dm.tm_trigger = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_check_txpower_tracking_thermal_meter);
+
+void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt;
+ unsigned long flag = 0;
+ u32 ret_value;
+
+ /* hold ofdm counter */
+ rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */
+ rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /* hold page D counter */
+
+ ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
+ falsealm_cnt->cnt_fast_fsync_fail = ret_value & 0xffff;
+ falsealm_cnt->cnt_sb_search_fail = (ret_value & 0xffff0000) >> 16;
+
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
+ falsealm_cnt->cnt_parity_fail = (ret_value & 0xffff0000) >> 16;
+
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
+ falsealm_cnt->cnt_rate_illegal = ret_value & 0xffff;
+ falsealm_cnt->cnt_crc8_fail = (ret_value & 0xffff0000) >> 16;
+
+ ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
+ falsealm_cnt->cnt_mcs_fail = ret_value & 0xffff;
+
+ falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
+ falsealm_cnt->cnt_rate_illegal +
+ falsealm_cnt->cnt_crc8_fail +
+ falsealm_cnt->cnt_mcs_fail +
+ falsealm_cnt->cnt_fast_fsync_fail +
+ falsealm_cnt->cnt_sb_search_fail;
+
+ if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
+ falsealm_cnt->cnt_cck_fail = ret_value;
+ ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
+ falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ } else {
+ falsealm_cnt->cnt_cck_fail = 0;
+ }
+
+ falsealm_cnt->cnt_all = falsealm_cnt->cnt_ofdm_fail +
+ falsealm_cnt->cnt_cck_fail;
+
+ /* reset false alarm counter registers */
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
+
+ /* update ofdm counter */
+ rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0); /* update page C counter */
+ rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0); /* update page D counter */
+
+ if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
+ /* reset cck counter */
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
+ /* enable cck counter */
+ rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ }
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n",
+ falsealm_cnt->cnt_fast_fsync_fail,
+ falsealm_cnt->cnt_sb_search_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n",
+ falsealm_cnt->cnt_parity_fail,
+ falsealm_cnt->cnt_rate_illegal,
+ falsealm_cnt->cnt_crc8_fail,
+ falsealm_cnt->cnt_mcs_fail);
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n",
+ falsealm_cnt->cnt_ofdm_fail,
+ falsealm_cnt->cnt_cck_fail,
+ falsealm_cnt->cnt_all);
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_false_alarm_counter_statistics);
+
+void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ /* Determine the minimum RSSI */
+ if (mac->link_state < MAC80211_LINKED &&
+ rtlpriv->dm.entry_min_undec_sm_pwdb == 0) {
+ de_digtable->min_undec_pwdb_for_dm = 0;
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "Not connected to any\n");
+ }
+ if (mac->link_state >= MAC80211_LINKED) {
+ if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC) {
+ de_digtable->min_undec_pwdb_for_dm =
+ rtlpriv->dm.entry_min_undec_sm_pwdb;
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Client PWDB = 0x%lx\n",
+ rtlpriv->dm.entry_min_undec_sm_pwdb);
+ } else {
+ de_digtable->min_undec_pwdb_for_dm =
+ rtlpriv->dm.undec_sm_pwdb;
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "STA Default Port PWDB = 0x%x\n",
+ de_digtable->min_undec_pwdb_for_dm);
+ }
+ } else {
+ de_digtable->min_undec_pwdb_for_dm =
+ rtlpriv->dm.entry_min_undec_sm_pwdb;
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
+ "AP Ext Port or disconnect PWDB = 0x%x\n",
+ de_digtable->min_undec_pwdb_for_dm);
+ }
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
+ de_digtable->min_undec_pwdb_for_dm);
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_find_minimum_rssi);
+
+static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+ unsigned long flag = 0;
+
+ if (de_digtable->cursta_cstate == DIG_STA_CONNECT) {
+ if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
+ if (de_digtable->min_undec_pwdb_for_dm <= 25)
+ de_digtable->cur_cck_pd_state =
+ CCK_PD_STAGE_LOWRSSI;
+ else
+ de_digtable->cur_cck_pd_state =
+ CCK_PD_STAGE_HIGHRSSI;
+ } else {
+ if (de_digtable->min_undec_pwdb_for_dm <= 20)
+ de_digtable->cur_cck_pd_state =
+ CCK_PD_STAGE_LOWRSSI;
+ else
+ de_digtable->cur_cck_pd_state =
+ CCK_PD_STAGE_HIGHRSSI;
+ }
+ } else {
+ de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
+ }
+ if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) {
+ if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ } else {
+ rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
+ rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
+ rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
+ }
+ de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
+ }
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
+ de_digtable->cursta_cstate == DIG_STA_CONNECT ?
+ "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
+ de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
+ "Low RSSI " : "High RSSI ");
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
+ IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
+}
+
+void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
+ de_digtable->cur_igvalue, de_digtable->pre_igvalue,
+ de_digtable->back_val);
+ if (!de_digtable->dig_enable_flag) {
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
+ de_digtable->pre_igvalue = 0x17;
+ return;
+ }
+ if (de_digtable->pre_igvalue != de_digtable->cur_igvalue) {
+ rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
+ de_digtable->cur_igvalue);
+ rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
+ de_digtable->cur_igvalue);
+ de_digtable->pre_igvalue = de_digtable->cur_igvalue;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_write_dig);
+
+static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
+{
+ struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED &&
+ rtlpriv->mac80211.vendor == PEER_CISCO) {
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
+ if (de_digtable->last_min_undec_pwdb_for_dm >= 50 &&
+ de_digtable->min_undec_pwdb_for_dm < 50) {
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Early Mode Off\n");
+ } else if (de_digtable->last_min_undec_pwdb_for_dm <= 55 &&
+ de_digtable->min_undec_pwdb_for_dm > 55) {
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "Early Mode On\n");
+ }
+ } else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) {
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n");
+ }
+}
+
+void rtl92d_dm_dig(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+ u8 value_igi = de_digtable->cur_igvalue;
+ struct false_alarm_statistics *falsealm_cnt = &rtlpriv->falsealm_cnt;
+
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
+ if (rtlpriv->rtlhal.earlymode_enable) {
+ rtl92d_early_mode_enabled(rtlpriv);
+ de_digtable->last_min_undec_pwdb_for_dm =
+ de_digtable->min_undec_pwdb_for_dm;
+ }
+ if (!rtlpriv->dm.dm_initialgain_enable)
+ return;
+
+ /* because we will send data pkt when scanning
+ * this will cause some ap like gear-3700 wep TP
+ * lower if we return here, this is the diff of
+ * mac80211 driver vs ieee80211 driver
+ */
+ /* if (rtlpriv->mac80211.act_scanning)
+ * return;
+ */
+
+ /* Not STA mode return tmp */
+ if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+ return;
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
+ /* Decide the current status and if modify initial gain or not */
+ if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
+ de_digtable->cursta_cstate = DIG_STA_CONNECT;
+ else
+ de_digtable->cursta_cstate = DIG_STA_DISCONNECT;
+
+ /* adjust initial gain according to false alarm counter */
+ if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
+ value_igi--;
+ else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH1)
+ value_igi += 0;
+ else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH2)
+ value_igi++;
+ else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2)
+ value_igi += 2;
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
+ de_digtable->large_fa_hit, de_digtable->forbidden_igi);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
+ de_digtable->recover_cnt, de_digtable->rx_gain_min);
+
+ /* deal with abnormally large false alarm */
+ if (falsealm_cnt->cnt_all > 10000) {
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG(): Abnormally false alarm case\n");
+
+ de_digtable->large_fa_hit++;
+ if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) {
+ de_digtable->forbidden_igi = de_digtable->cur_igvalue;
+ de_digtable->large_fa_hit = 1;
+ }
+ if (de_digtable->large_fa_hit >= 3) {
+ if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX)
+ de_digtable->rx_gain_min = DM_DIG_MAX;
+ else
+ de_digtable->rx_gain_min =
+ (de_digtable->forbidden_igi + 1);
+ de_digtable->recover_cnt = 3600; /* 3600=2hr */
+ }
+ } else {
+ /* Recovery mechanism for IGI lower bound */
+ if (de_digtable->recover_cnt != 0) {
+ de_digtable->recover_cnt--;
+ } else {
+ if (de_digtable->large_fa_hit == 0) {
+ if ((de_digtable->forbidden_igi - 1) <
+ DM_DIG_FA_LOWER) {
+ de_digtable->forbidden_igi =
+ DM_DIG_FA_LOWER;
+ de_digtable->rx_gain_min =
+ DM_DIG_FA_LOWER;
+
+ } else {
+ de_digtable->forbidden_igi--;
+ de_digtable->rx_gain_min =
+ (de_digtable->forbidden_igi + 1);
+ }
+ } else if (de_digtable->large_fa_hit == 3) {
+ de_digtable->large_fa_hit = 0;
+ }
+ }
+ }
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
+ de_digtable->large_fa_hit, de_digtable->forbidden_igi);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
+ "dm_DIG() After: recover_cnt=%d, rx_gain_min=%x\n",
+ de_digtable->recover_cnt, de_digtable->rx_gain_min);
+
+ if (value_igi > DM_DIG_MAX)
+ value_igi = DM_DIG_MAX;
+ else if (value_igi < de_digtable->rx_gain_min)
+ value_igi = de_digtable->rx_gain_min;
+ de_digtable->cur_igvalue = value_igi;
+ rtl92d_dm_write_dig(hw);
+ if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
+ rtl92d_dm_cck_packet_detection_thresh(hw);
+ rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n");
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_dig);
+
+void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.current_turbo_edca = false;
+ rtlpriv->dm.is_any_nonbepkts = false;
+ rtlpriv->dm.is_cur_rdlstate = false;
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_init_edca_turbo);
+
+void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ const u32 edca_be_ul = 0x5ea42b;
+ const u32 edca_be_dl = 0x5ea42b;
+ static u64 last_txok_cnt;
+ static u64 last_rxok_cnt;
+ u64 cur_txok_cnt;
+ u64 cur_rxok_cnt;
+
+ if (mac->link_state != MAC80211_LINKED) {
+ rtlpriv->dm.current_turbo_edca = false;
+ goto exit;
+ }
+
+ if (!rtlpriv->dm.is_any_nonbepkts &&
+ !rtlpriv->dm.disable_framebursting) {
+ cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
+ cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
+ if (cur_rxok_cnt > 4 * cur_txok_cnt) {
+ if (!rtlpriv->dm.is_cur_rdlstate ||
+ !rtlpriv->dm.current_turbo_edca) {
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
+ edca_be_dl);
+ rtlpriv->dm.is_cur_rdlstate = true;
+ }
+ } else {
+ if (rtlpriv->dm.is_cur_rdlstate ||
+ !rtlpriv->dm.current_turbo_edca) {
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
+ edca_be_ul);
+ rtlpriv->dm.is_cur_rdlstate = false;
+ }
+ }
+ rtlpriv->dm.current_turbo_edca = true;
+ } else {
+ if (rtlpriv->dm.current_turbo_edca) {
+ u8 tmp = AC0_BE;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
+ &tmp);
+ rtlpriv->dm.current_turbo_edca = false;
+ }
+ }
+
+exit:
+ rtlpriv->dm.is_any_nonbepkts = false;
+ last_txok_cnt = rtlpriv->stats.txbytesunicast;
+ last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_check_edca_turbo);
+
+void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rate_adaptive *ra = &rtlpriv->ra;
+
+ ra->ratr_state = DM_RATR_STA_INIT;
+ ra->pre_ratr_state = DM_RATR_STA_INIT;
+ if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
+ rtlpriv->dm.useramask = true;
+ else
+ rtlpriv->dm.useramask = false;
+}
+EXPORT_SYMBOL_GPL(rtl92d_dm_init_rate_adaptive_mask);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h
new file mode 100644
index 000000000000..a146fc975421
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/dm_common.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#ifndef __RTL92D_DM_COMMON_H__
+#define __RTL92D_DM_COMMON_H__
+
+#define HAL_DM_DIG_DISABLE BIT(0)
+#define HAL_DM_HIPWR_DISABLE BIT(1)
+
+#define OFDM_TABLE_LENGTH 37
+#define OFDM_TABLE_SIZE_92D 43
+#define CCK_TABLE_LENGTH 33
+
+#define CCK_TABLE_SIZE 33
+
+#define BW_AUTO_SWITCH_HIGH_LOW 25
+#define BW_AUTO_SWITCH_LOW_HIGH 30
+
+#define DM_DIG_FA_UPPER 0x32
+#define DM_DIG_FA_LOWER 0x20
+#define DM_DIG_FA_TH0 0x100
+#define DM_DIG_FA_TH1 0x400
+#define DM_DIG_FA_TH2 0x600
+
+#define RXPATHSELECTION_SS_TH_LOW 30
+#define RXPATHSELECTION_DIFF_TH 18
+
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+#define CTS2SELF_THVAL 30
+#define REGC38_TH 20
+
+#define WAIOTTHVAL 25
+
+#define TXHIGHPWRLEVEL_NORMAL 0
+#define TXHIGHPWRLEVEL_LEVEL1 1
+#define TXHIGHPWRLEVEL_LEVEL2 2
+#define TXHIGHPWRLEVEL_BT1 3
+#define TXHIGHPWRLEVEL_BT2 4
+
+#define DM_TYPE_BYFW 0
+#define DM_TYPE_BYDRIVER 1
+
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define INDEX_MAPPING_NUM 13
+
+enum dm_1r_cca {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf {
+ RF_SAVE = 0,
+ RF_NORMAL = 1,
+ RF_MAX = 2,
+};
+
+enum dm_sw_ant_switch {
+ ANS_ANTENNA_B = 1,
+ ANS_ANTENNA_A = 2,
+ ANS_ANTENNA_MAX = 3,
+};
+
+void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw);
+void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw);
+void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw);
+void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw);
+void rtl92d_dm_write_dig(struct ieee80211_hw *hw);
+void rtl92d_dm_dig(struct ieee80211_hw *hw);
+void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw);
+void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw);
+void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c
new file mode 100644
index 000000000000..aa54dbde6ea8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../pci.h"
+#include "../base.h"
+#include "../efuse.h"
+#include "def.h"
+#include "reg.h"
+#include "fw_common.h"
+
+bool rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv)
+{
+ return !!(rtl_read_dword(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY);
+}
+EXPORT_SYMBOL_GPL(rtl92d_is_fw_downloaded);
+
+void rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp;
+
+ if (enable) {
+ tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
+ } else {
+ tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
+ /* Reserved for fw extension.
+ * 0x81[7] is used for mac0 status ,
+ * so don't write this reg here
+ * rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
+ */
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_enable_fw_download);
+
+void rtl92d_write_fw(struct ieee80211_hw *hw,
+ enum version_8192d version, u8 *buffer, u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 *bufferptr = buffer;
+ u32 pagenums, remainsize;
+ u32 page, offset;
+
+ rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
+ rtl_fill_dummy(bufferptr, &size);
+
+ pagenums = size / FW_8192D_PAGE_SIZE;
+ remainsize = size % FW_8192D_PAGE_SIZE;
+
+ if (pagenums > 8)
+ pr_err("Page numbers should not greater then 8\n");
+
+ for (page = 0; page < pagenums; page++) {
+ offset = page * FW_8192D_PAGE_SIZE;
+ rtl_fw_page_write(hw, page, (bufferptr + offset),
+ FW_8192D_PAGE_SIZE);
+ }
+
+ if (remainsize) {
+ offset = pagenums * FW_8192D_PAGE_SIZE;
+ page = pagenums;
+ rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_write_fw);
+
+int rtl92d_fw_free_to_go(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 counter = 0;
+ u32 value32;
+
+ do {
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) &&
+ (!(value32 & FWDL_CHKSUM_RPT)));
+
+ if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) {
+ pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n",
+ value32);
+ return -EIO;
+ }
+
+ value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
+ value32 |= MCUFWDL_RDY;
+ rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl92d_fw_free_to_go);
+
+#define RTL_USB_DELAY_FACTOR 60
+
+void rtl92d_firmware_selfreset(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u8 u1b_tmp;
+ u8 delay = 100;
+
+ if (rtlhal->interface == INTF_USB) {
+ delay *= RTL_USB_DELAY_FACTOR;
+
+ rtl_write_byte(rtlpriv, REG_FSIMR, 0);
+
+ /* We need to disable other HRCV INT to influence 8051 reset. */
+ rtl_write_byte(rtlpriv, REG_FWIMR, 0x20);
+
+ /* Close mask to prevent incorrect FW write operation. */
+ rtl_write_byte(rtlpriv, REG_FTIMR, 0);
+ }
+
+ /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */
+ rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
+
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+
+ while (u1b_tmp & (FEN_CPUEN >> 8)) {
+ delay--;
+ if (delay == 0)
+ break;
+ udelay(50);
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ }
+
+ if (rtlhal->interface == INTF_USB) {
+ if ((u1b_tmp & (FEN_CPUEN >> 8)) && delay == 0)
+ rtl_write_byte(rtlpriv, REG_FWIMR, 0);
+ }
+
+ WARN_ONCE((delay <= 0), "rtl8192de: 8051 reset failed!\n");
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "=====> 8051 reset success (%d)\n", delay);
+}
+EXPORT_SYMBOL_GPL(rtl92d_firmware_selfreset);
+
+int rtl92d_fw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 counter;
+
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n");
+ /* polling for FW ready */
+ counter = 0;
+ do {
+ if (rtlhal->interfaceindex == 0) {
+ if (rtl_read_byte(rtlpriv, FW_MAC0_READY) &
+ MAC0_READY) {
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
+ rtl_read_byte(rtlpriv,
+ FW_MAC0_READY));
+ return 0;
+ }
+ udelay(5);
+ } else {
+ if (rtl_read_byte(rtlpriv, FW_MAC1_READY) &
+ MAC1_READY) {
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
+ rtl_read_byte(rtlpriv,
+ FW_MAC1_READY));
+ return 0;
+ }
+ udelay(5);
+ }
+ } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
+
+ if (rtlhal->interfaceindex == 0) {
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n",
+ rtl_read_byte(rtlpriv, FW_MAC0_READY));
+ } else {
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n",
+ rtl_read_byte(rtlpriv, FW_MAC1_READY));
+ }
+ rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
+ "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
+ rtl_read_dword(rtlpriv, REG_MCUFWDL));
+ return -1;
+}
+EXPORT_SYMBOL_GPL(rtl92d_fw_init);
+
+static bool _rtl92d_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 val_hmetfr;
+ bool result = false;
+
+ val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
+ if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
+ result = true;
+ return result;
+}
+
+void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw,
+ u8 element_id, u32 cmd_len, u8 *cmdbuffer)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 boxcontent[4], boxextcontent[2];
+ u16 box_reg = 0, box_extreg = 0;
+ u8 wait_writeh2c_limmit = 100;
+ bool bwrite_success = false;
+ u8 wait_h2c_limmit = 100;
+ u32 h2c_waitcounter = 0;
+ bool isfw_read = false;
+ unsigned long flag;
+ u8 u1b_tmp;
+ u8 boxnum;
+ u8 idx;
+
+ if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Return as RF is off!!!\n");
+ return;
+ }
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
+
+ while (true) {
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ if (rtlhal->h2c_setinprogress) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "H2C set in progress! Wait to set..element_id(%d)\n",
+ element_id);
+
+ while (rtlhal->h2c_setinprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
+ flag);
+ h2c_waitcounter++;
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Wait 100 us (%d times)...\n",
+ h2c_waitcounter);
+ udelay(100);
+
+ if (h2c_waitcounter > 1000)
+ return;
+
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
+ flag);
+ }
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ } else {
+ rtlhal->h2c_setinprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ break;
+ }
+ }
+
+ while (!bwrite_success) {
+ wait_writeh2c_limmit--;
+ if (wait_writeh2c_limmit == 0) {
+ pr_err("Write H2C fail because no trigger for FW INT!\n");
+ break;
+ }
+
+ boxnum = rtlhal->last_hmeboxnum;
+ if (boxnum > 3) {
+ pr_err("boxnum %#x too big\n", boxnum);
+ break;
+ }
+
+ box_reg = REG_HMEBOX_0 + boxnum * SIZE_OF_REG_HMEBOX;
+ box_extreg = REG_HMEBOX_EXT_0 + boxnum * SIZE_OF_REG_HMEBOX_EXT;
+
+ isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
+ while (!isfw_read) {
+ wait_h2c_limmit--;
+ if (wait_h2c_limmit == 0) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting too long for FW read clear HMEBox(%d)!\n",
+ boxnum);
+ break;
+ }
+
+ udelay(10);
+
+ isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
+ u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
+ boxnum, u1b_tmp);
+ }
+
+ if (!isfw_read) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
+ boxnum);
+ break;
+ }
+
+ memset(boxcontent, 0, sizeof(boxcontent));
+ memset(boxextcontent, 0, sizeof(boxextcontent));
+ boxcontent[0] = element_id;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "Write element_id box_reg(%4x) = %2x\n",
+ box_reg, element_id);
+
+ switch (cmd_len) {
+ case 1 ... 3:
+ /* BOX: | ID | A0 | A1 | A2 |
+ * BOX_EXT: --- N/A ------
+ */
+ boxcontent[0] &= ~BIT(7);
+ memcpy(boxcontent + 1, cmdbuffer, cmd_len);
+
+ for (idx = 0; idx < 4; idx++)
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ break;
+ case 4 ... 5:
+ /* * ID ext = ID | BIT(7)
+ * BOX: | ID ext | A2 | A3 | A4 |
+ * BOX_EXT: | A0 | A1 |
+ */
+ boxcontent[0] |= BIT(7);
+ memcpy(boxextcontent, cmdbuffer, 2);
+ memcpy(boxcontent + 1, cmdbuffer + 2, cmd_len - 2);
+
+ for (idx = 0; idx < 2; idx++)
+ rtl_write_byte(rtlpriv, box_extreg + idx,
+ boxextcontent[idx]);
+
+ for (idx = 0; idx < 4; idx++)
+ rtl_write_byte(rtlpriv, box_reg + idx,
+ boxcontent[idx]);
+ break;
+ default:
+ pr_err("switch case %#x not processed\n", cmd_len);
+ break;
+ }
+
+ bwrite_success = true;
+ rtlhal->last_hmeboxnum = boxnum + 1;
+ if (rtlhal->last_hmeboxnum == 4)
+ rtlhal->last_hmeboxnum = 0;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
+ "pHalData->last_hmeboxnum = %d\n",
+ rtlhal->last_hmeboxnum);
+ }
+ spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
+ rtlhal->h2c_setinprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
+}
+EXPORT_SYMBOL_GPL(rtl92d_fill_h2c_cmd);
+
+void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
+{
+ u8 u1_joinbssrpt_parm[1] = {0};
+
+ u1_joinbssrpt_parm[0] = mstatus;
+ rtl92d_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
+}
+EXPORT_SYMBOL_GPL(rtl92d_set_fw_joinbss_report_cmd);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h
new file mode 100644
index 000000000000..4b73e0bd4ac4
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/fw_common.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#ifndef __RTL92D_FW_COMMON_H__
+#define __RTL92D_FW_COMMON_H__
+
+#define FW_8192D_START_ADDRESS 0x1000
+#define FW_8192D_PAGE_SIZE 4096
+#define FW_8192D_POLLING_TIMEOUT_COUNT 1000
+
+#define IS_FW_HEADER_EXIST(_pfwhdr) \
+ ((GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x92C0 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x88C0 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D0 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D1 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D2 || \
+ (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D3)
+
+/* Firmware Header(8-byte alinment required) */
+/* --- LONG WORD 0 ---- */
+#define GET_FIRMWARE_HDR_SIGNATURE(__fwhdr) \
+ le32_get_bits(*(__le32 *)__fwhdr, GENMASK(15, 0))
+#define GET_FIRMWARE_HDR_VERSION(__fwhdr) \
+ le32_get_bits(*(__le32 *)((__fwhdr) + 4), GENMASK(15, 0))
+#define GET_FIRMWARE_HDR_SUB_VER(__fwhdr) \
+ le32_get_bits(*(__le32 *)((__fwhdr) + 4), GENMASK(23, 16))
+
+#define RAID_MASK GENMASK(31, 28)
+#define RATE_MASK_MASK GENMASK(27, 0)
+#define SHORT_GI_MASK BIT(5)
+#define MACID_MASK GENMASK(4, 0)
+
+struct rtl92d_rate_mask_h2c {
+ __le32 rate_mask_and_raid;
+ u8 macid_and_short_gi;
+} __packed;
+
+bool rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv);
+void rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable);
+void rtl92d_write_fw(struct ieee80211_hw *hw,
+ enum version_8192d version, u8 *buffer, u32 size);
+int rtl92d_fw_free_to_go(struct ieee80211_hw *hw);
+void rtl92d_firmware_selfreset(struct ieee80211_hw *hw);
+int rtl92d_fw_init(struct ieee80211_hw *hw);
+void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
+ u32 cmd_len, u8 *p_cmdbuffer);
+void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c
new file mode 100644
index 000000000000..97e0d9c01e0a
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c
@@ -0,0 +1,1225 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../cam.h"
+#include "../efuse.h"
+#include "../pci.h"
+#include "../regd.h"
+#include "def.h"
+#include "reg.h"
+#include "dm_common.h"
+#include "fw_common.h"
+#include "hw_common.h"
+#include "phy_common.h"
+
+void rtl92d_stop_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte &= ~(BIT(0));
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+EXPORT_SYMBOL_GPL(rtl92d_stop_tx_beacon);
+
+void rtl92d_resume_tx_beacon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 tmp1byte;
+
+ tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0x0a);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
+ tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
+ tmp1byte |= BIT(0);
+ rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
+}
+EXPORT_SYMBOL_GPL(rtl92d_resume_tx_beacon);
+
+void rtl92d_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+
+ switch (variable) {
+ case HW_VAR_RF_STATE:
+ *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
+ break;
+ case HW_VAR_FWLPS_RF_ON:{
+ enum rf_pwrstate rfstate;
+ u32 val_rcr;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
+ (u8 *)(&rfstate));
+ if (rfstate == ERFOFF) {
+ *((bool *)(val)) = true;
+ } else {
+ val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
+ val_rcr &= 0x00070000;
+ if (val_rcr)
+ *((bool *)(val)) = false;
+ else
+ *((bool *)(val)) = true;
+ }
+ break;
+ }
+ case HW_VAR_FW_PSMODE_STATUS:
+ *((bool *)(val)) = ppsc->fw_current_inpsmode;
+ break;
+ case HW_VAR_CORRECT_TSF:{
+ u64 tsf;
+ u32 *ptsf_low = (u32 *)&tsf;
+ u32 *ptsf_high = ((u32 *)&tsf) + 1;
+
+ *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
+ *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
+ *((u64 *)(val)) = tsf;
+ break;
+ }
+ case HW_VAR_INT_MIGRATION:
+ *((bool *)(val)) = rtlpriv->dm.interrupt_migration;
+ break;
+ case HW_VAR_INT_AC:
+ *((bool *)(val)) = rtlpriv->dm.disable_tx_int;
+ break;
+ case HAL_DEF_WOWLAN:
+ break;
+ default:
+ pr_err("switch case %#x not processed\n", variable);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_get_hw_reg);
+
+void rtl92d_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ u8 idx;
+
+ switch (variable) {
+ case HW_VAR_ETHER_ADDR:
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_MACID + idx),
+ val[idx]);
+ }
+ break;
+ case HW_VAR_BASIC_RATE: {
+ u16 rate_cfg = ((u16 *)val)[0];
+ u8 rate_index = 0;
+
+ rate_cfg = rate_cfg & 0x15f;
+ if (mac->vendor == PEER_CISCO &&
+ ((rate_cfg & 0x150) == 0))
+ rate_cfg |= 0x01;
+ rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
+ rtl_write_byte(rtlpriv, REG_RRSR + 1,
+ (rate_cfg >> 8) & 0xff);
+ while (rate_cfg > 0x1) {
+ rate_cfg = (rate_cfg >> 1);
+ rate_index++;
+ }
+ if (rtlhal->fw_version > 0xe)
+ rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
+ rate_index);
+ break;
+ }
+ case HW_VAR_BSSID:
+ for (idx = 0; idx < ETH_ALEN; idx++) {
+ rtl_write_byte(rtlpriv, (REG_BSSID + idx),
+ val[idx]);
+ }
+ break;
+ case HW_VAR_SIFS:
+ rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
+ rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
+ rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
+ if (!mac->ht_enable)
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ 0x0e0e);
+ else
+ rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
+ *((u16 *)val));
+ break;
+ case HW_VAR_SLOT_TIME: {
+ u8 e_aci;
+
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "HW_VAR_SLOT_TIME %x\n", val[0]);
+ rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
+ for (e_aci = 0; e_aci < AC_MAX; e_aci++)
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AC_PARAM,
+ (&e_aci));
+ break;
+ }
+ case HW_VAR_ACK_PREAMBLE: {
+ u8 reg_tmp;
+ u8 short_preamble = (bool)(*val);
+
+ reg_tmp = (mac->cur_40_prime_sc) << 5;
+ if (short_preamble)
+ reg_tmp |= 0x80;
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
+ break;
+ }
+ case HW_VAR_AMPDU_MIN_SPACE: {
+ u8 min_spacing_to_set;
+
+ min_spacing_to_set = *val;
+ if (min_spacing_to_set <= 7) {
+ mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) |
+ min_spacing_to_set);
+ *val = min_spacing_to_set;
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
+ mac->min_space_cfg);
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ }
+ break;
+ }
+ case HW_VAR_SHORTGI_DENSITY: {
+ u8 density_to_set;
+
+ density_to_set = *val;
+ mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
+ mac->min_space_cfg |= (density_to_set << 3);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
+ mac->min_space_cfg);
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
+ mac->min_space_cfg);
+ break;
+ }
+ case HW_VAR_AMPDU_FACTOR: {
+ u8 factor_toset;
+ u32 regtoset;
+ u8 *ptmp_byte = NULL;
+ u8 index;
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY)
+ regtoset = 0xb9726641;
+ else if (rtlhal->macphymode == DUALMAC_SINGLEPHY)
+ regtoset = 0x66626641;
+ else
+ regtoset = 0xb972a841;
+ factor_toset = *val;
+ if (factor_toset <= 3) {
+ factor_toset = (1 << (factor_toset + 2));
+ if (factor_toset > 0xf)
+ factor_toset = 0xf;
+ for (index = 0; index < 4; index++) {
+ ptmp_byte = (u8 *)(&regtoset) + index;
+ if ((*ptmp_byte & 0xf0) >
+ (factor_toset << 4))
+ *ptmp_byte = (*ptmp_byte & 0x0f)
+ | (factor_toset << 4);
+ if ((*ptmp_byte & 0x0f) > factor_toset)
+ *ptmp_byte = (*ptmp_byte & 0xf0)
+ | (factor_toset);
+ }
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoset);
+ rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
+ "Set HW_VAR_AMPDU_FACTOR: %#x\n",
+ factor_toset);
+ }
+ break;
+ }
+ case HW_VAR_RETRY_LIMIT: {
+ u8 retry_limit = val[0];
+
+ rtl_write_word(rtlpriv, REG_RL,
+ retry_limit << RETRY_LIMIT_SHORT_SHIFT |
+ retry_limit << RETRY_LIMIT_LONG_SHIFT);
+ break;
+ }
+ case HW_VAR_DUAL_TSF_RST:
+ rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
+ break;
+ case HW_VAR_EFUSE_BYTES:
+ rtlefuse->efuse_usedbytes = *((u16 *)val);
+ break;
+ case HW_VAR_EFUSE_USAGE:
+ rtlefuse->efuse_usedpercentage = *val;
+ break;
+ case HW_VAR_IO_CMD:
+ rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val));
+ break;
+ case HW_VAR_WPA_CONFIG:
+ rtl_write_byte(rtlpriv, REG_SECCFG, *val);
+ break;
+ case HW_VAR_SET_RPWM:
+ rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (val));
+ break;
+ case HW_VAR_H2C_FW_PWRMODE:
+ break;
+ case HW_VAR_FW_PSMODE_STATUS:
+ ppsc->fw_current_inpsmode = *((bool *)val);
+ break;
+ case HW_VAR_AID: {
+ u16 u2btmp;
+
+ u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
+ u2btmp &= 0xC000;
+ rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
+ mac->assoc_id));
+ break;
+ }
+ default:
+ pr_err("switch case %#x not processed\n", variable);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_set_hw_reg);
+
+bool rtl92d_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ bool status = true;
+ long count = 0;
+ u32 value = _LLT_INIT_ADDR(address) |
+ _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+
+ rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
+ do {
+ value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
+ if (_LLT_OP_VALUE(value) == _LLT_NO_ACTIVE)
+ break;
+ if (count > POLLING_LLT_THRESHOLD) {
+ pr_err("Failed to polling write LLT done at address %d!\n",
+ address);
+ status = false;
+ break;
+ }
+ } while (++count);
+ return status;
+}
+EXPORT_SYMBOL_GPL(rtl92d_llt_write);
+
+void rtl92d_enable_hw_security_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 sec_reg_value;
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
+ rtlpriv->sec.pairwise_enc_algorithm,
+ rtlpriv->sec.group_enc_algorithm);
+ if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "not open hw encryption\n");
+ return;
+ }
+ sec_reg_value = SCR_TXENCENABLE | SCR_RXENCENABLE;
+ if (rtlpriv->sec.use_defaultkey) {
+ sec_reg_value |= SCR_TXUSEDK;
+ sec_reg_value |= SCR_RXUSEDK;
+ }
+ sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
+ rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The SECR-value %x\n", sec_reg_value);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
+}
+EXPORT_SYMBOL_GPL(rtl92d_enable_hw_security_config);
+
+/* don't set REG_EDCA_BE_PARAM here because
+ * mac80211 will send pkt when scan
+ */
+void rtl92d_set_qos(struct ieee80211_hw *hw, int aci)
+{
+ rtl92d_dm_init_edca_turbo(hw);
+}
+EXPORT_SYMBOL_GPL(rtl92d_set_qos);
+
+static enum version_8192d _rtl92d_read_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ enum version_8192d version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
+ u32 value32;
+
+ value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
+ if (!(value32 & 0x000f0000)) {
+ version = VERSION_TEST_CHIP_92D_SINGLEPHY;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "TEST CHIP!!!\n");
+ } else {
+ version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Normal CHIP!!!\n");
+ }
+ return version;
+}
+
+static void _rtl92d_readpowervalue_fromprom(struct txpower_info *pwrinfo,
+ u8 *efuse, bool autoloadfail)
+{
+ u32 rfpath, eeaddr, group, offset, offset1, offset2;
+ u8 i, val8;
+
+ memset(pwrinfo, 0, sizeof(struct txpower_info));
+ if (autoloadfail) {
+ for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ if (group < CHANNEL_GROUP_MAX_2G) {
+ pwrinfo->cck_index[rfpath][group] =
+ EEPROM_DEFAULT_TXPOWERLEVEL_2G;
+ pwrinfo->ht40_1sindex[rfpath][group] =
+ EEPROM_DEFAULT_TXPOWERLEVEL_2G;
+ } else {
+ pwrinfo->ht40_1sindex[rfpath][group] =
+ EEPROM_DEFAULT_TXPOWERLEVEL_5G;
+ }
+ pwrinfo->ht40_2sindexdiff[rfpath][group] =
+ EEPROM_DEFAULT_HT40_2SDIFF;
+ pwrinfo->ht20indexdiff[rfpath][group] =
+ EEPROM_DEFAULT_HT20_DIFF;
+ pwrinfo->ofdmindexdiff[rfpath][group] =
+ EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
+ pwrinfo->ht40maxoffset[rfpath][group] =
+ EEPROM_DEFAULT_HT40_PWRMAXOFFSET;
+ pwrinfo->ht20maxoffset[rfpath][group] =
+ EEPROM_DEFAULT_HT20_PWRMAXOFFSET;
+ }
+ }
+ for (i = 0; i < 3; i++) {
+ pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI;
+ pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI;
+ }
+ return;
+ }
+
+ /* Maybe autoload OK,buf the tx power index value is not filled.
+ * If we find it, we set it to default value.
+ */
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ for (group = 0; group < CHANNEL_GROUP_MAX_2G; group++) {
+ eeaddr = EEPROM_CCK_TX_PWR_INX_2G + (rfpath * 3) + group;
+
+ pwrinfo->cck_index[rfpath][group] =
+ efuse[eeaddr] == 0xFF ?
+ (eeaddr > 0x7B ?
+ EEPROM_DEFAULT_TXPOWERLEVEL_5G :
+ EEPROM_DEFAULT_TXPOWERLEVEL_2G) :
+ efuse[eeaddr];
+ }
+ }
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
+ offset1 = group / 3;
+ offset2 = group % 3;
+ eeaddr = EEPROM_HT40_1S_TX_PWR_INX_2G + (rfpath * 3);
+ eeaddr += offset2 + offset1 * 21;
+
+ pwrinfo->ht40_1sindex[rfpath][group] =
+ efuse[eeaddr] == 0xFF ?
+ (eeaddr > 0x7B ?
+ EEPROM_DEFAULT_TXPOWERLEVEL_5G :
+ EEPROM_DEFAULT_TXPOWERLEVEL_2G) :
+ efuse[eeaddr];
+ }
+ }
+
+ /* These just for 92D efuse offset. */
+ for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ offset1 = group / 3;
+ offset2 = group % 3;
+ offset = offset2 + offset1 * 21;
+
+ val8 = efuse[EEPROM_HT40_2S_TX_PWR_INX_DIFF_2G + offset];
+ if (val8 != 0xFF)
+ pwrinfo->ht40_2sindexdiff[rfpath][group] =
+ (val8 >> (rfpath * 4)) & 0xF;
+ else
+ pwrinfo->ht40_2sindexdiff[rfpath][group] =
+ EEPROM_DEFAULT_HT40_2SDIFF;
+
+ val8 = efuse[EEPROM_HT20_TX_PWR_INX_DIFF_2G + offset];
+ if (val8 != 0xFF)
+ pwrinfo->ht20indexdiff[rfpath][group] =
+ (val8 >> (rfpath * 4)) & 0xF;
+ else
+ pwrinfo->ht20indexdiff[rfpath][group] =
+ EEPROM_DEFAULT_HT20_DIFF;
+
+ val8 = efuse[EEPROM_OFDM_TX_PWR_INX_DIFF_2G + offset];
+ if (val8 != 0xFF)
+ pwrinfo->ofdmindexdiff[rfpath][group] =
+ (val8 >> (rfpath * 4)) & 0xF;
+ else
+ pwrinfo->ofdmindexdiff[rfpath][group] =
+ EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
+
+ val8 = efuse[EEPROM_HT40_MAX_PWR_OFFSET_2G + offset];
+ if (val8 != 0xFF)
+ pwrinfo->ht40maxoffset[rfpath][group] =
+ (val8 >> (rfpath * 4)) & 0xF;
+ else
+ pwrinfo->ht40maxoffset[rfpath][group] =
+ EEPROM_DEFAULT_HT40_PWRMAXOFFSET;
+
+ val8 = efuse[EEPROM_HT20_MAX_PWR_OFFSET_2G + offset];
+ if (val8 != 0xFF)
+ pwrinfo->ht20maxoffset[rfpath][group] =
+ (val8 >> (rfpath * 4)) & 0xF;
+ else
+ pwrinfo->ht20maxoffset[rfpath][group] =
+ EEPROM_DEFAULT_HT20_PWRMAXOFFSET;
+ }
+ }
+
+ if (efuse[EEPROM_TSSI_A_5G] != 0xFF) {
+ /* 5GL */
+ pwrinfo->tssi_a[0] = efuse[EEPROM_TSSI_A_5G] & 0x3F;
+ pwrinfo->tssi_b[0] = efuse[EEPROM_TSSI_B_5G] & 0x3F;
+ /* 5GM */
+ pwrinfo->tssi_a[1] = efuse[EEPROM_TSSI_AB_5G] & 0x3F;
+ pwrinfo->tssi_b[1] = (efuse[EEPROM_TSSI_AB_5G] & 0xC0) >> 6 |
+ (efuse[EEPROM_TSSI_AB_5G + 1] & 0x0F) << 2;
+ /* 5GH */
+ pwrinfo->tssi_a[2] = (efuse[EEPROM_TSSI_AB_5G + 1] & 0xF0) >> 4 |
+ (efuse[EEPROM_TSSI_AB_5G + 2] & 0x03) << 4;
+ pwrinfo->tssi_b[2] = (efuse[EEPROM_TSSI_AB_5G + 2] & 0xFC) >> 2;
+ } else {
+ for (i = 0; i < 3; i++) {
+ pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI;
+ pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI;
+ }
+ }
+}
+
+static void _rtl92d_read_txpower_info(struct ieee80211_hw *hw,
+ bool autoload_fail, u8 *hwinfo)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct txpower_info pwrinfo;
+ u8 tempval[2], i, pwr, diff;
+ u32 ch, rfpath, group;
+
+ _rtl92d_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail);
+ if (!autoload_fail) {
+ /* bit0~2 */
+ rtlefuse->eeprom_regulatory = (hwinfo[EEPROM_RF_OPT1] & 0x7);
+ rtlefuse->eeprom_thermalmeter =
+ hwinfo[EEPROM_THERMAL_METER] & 0x1f;
+ rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_K];
+ tempval[0] = hwinfo[EEPROM_IQK_DELTA] & 0x03;
+ tempval[1] = (hwinfo[EEPROM_LCK_DELTA] & 0x0C) >> 2;
+ rtlefuse->txpwr_fromeprom = true;
+ if (IS_92D_D_CUT(rtlpriv->rtlhal.version) ||
+ IS_92D_E_CUT(rtlpriv->rtlhal.version)) {
+ rtlefuse->internal_pa_5g[0] =
+ !((hwinfo[EEPROM_TSSI_A_5G] & BIT(6)) >> 6);
+ rtlefuse->internal_pa_5g[1] =
+ !((hwinfo[EEPROM_TSSI_B_5G] & BIT(6)) >> 6);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
+ "Is D cut,Internal PA0 %d Internal PA1 %d\n",
+ rtlefuse->internal_pa_5g[0],
+ rtlefuse->internal_pa_5g[1]);
+ }
+ rtlefuse->eeprom_c9 = hwinfo[EEPROM_RF_OPT6];
+ rtlefuse->eeprom_cc = hwinfo[EEPROM_RF_OPT7];
+ } else {
+ rtlefuse->eeprom_regulatory = 0;
+ rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
+ rtlefuse->crystalcap = EEPROM_DEFAULT_CRYSTALCAP;
+ tempval[0] = 3;
+ tempval[1] = tempval[0];
+ }
+
+ /* Use default value to fill parameters if
+ * efuse is not filled on some place.
+ */
+
+ /* ThermalMeter from EEPROM */
+ if (rtlefuse->eeprom_thermalmeter < 0x06 ||
+ rtlefuse->eeprom_thermalmeter > 0x1c)
+ rtlefuse->eeprom_thermalmeter = 0x12;
+ rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
+
+ /* check XTAL_K */
+ if (rtlefuse->crystalcap == 0xFF)
+ rtlefuse->crystalcap = 0;
+ if (rtlefuse->eeprom_regulatory > 3)
+ rtlefuse->eeprom_regulatory = 0;
+
+ for (i = 0; i < 2; i++) {
+ switch (tempval[i]) {
+ case 0:
+ tempval[i] = 5;
+ break;
+ case 1:
+ tempval[i] = 4;
+ break;
+ case 2:
+ tempval[i] = 3;
+ break;
+ case 3:
+ default:
+ tempval[i] = 0;
+ break;
+ }
+ }
+
+ rtlefuse->delta_iqk = tempval[0];
+ if (tempval[1] > 0)
+ rtlefuse->delta_lck = tempval[1] - 1;
+ if (rtlefuse->eeprom_c9 == 0xFF)
+ rtlefuse->eeprom_c9 = 0x00;
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "CrystalCap = 0x%x\n", rtlefuse->crystalcap);
+ rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
+ "Delta_IQK = 0x%x Delta_LCK = 0x%x\n",
+ rtlefuse->delta_iqk, rtlefuse->delta_lck);
+
+ for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
+ group = rtl92d_get_chnlgroup_fromarray((u8)ch);
+ if (ch < CHANNEL_MAX_NUMBER_2G)
+ rtlefuse->txpwrlevel_cck[rfpath][ch] =
+ pwrinfo.cck_index[rfpath][group];
+ rtlefuse->txpwrlevel_ht40_1s[rfpath][ch] =
+ pwrinfo.ht40_1sindex[rfpath][group];
+ rtlefuse->txpwr_ht20diff[rfpath][ch] =
+ pwrinfo.ht20indexdiff[rfpath][group];
+ rtlefuse->txpwr_legacyhtdiff[rfpath][ch] =
+ pwrinfo.ofdmindexdiff[rfpath][group];
+ rtlefuse->pwrgroup_ht20[rfpath][ch] =
+ pwrinfo.ht20maxoffset[rfpath][group];
+ rtlefuse->pwrgroup_ht40[rfpath][ch] =
+ pwrinfo.ht40maxoffset[rfpath][group];
+ pwr = pwrinfo.ht40_1sindex[rfpath][group];
+ diff = pwrinfo.ht40_2sindexdiff[rfpath][group];
+ rtlefuse->txpwrlevel_ht40_2s[rfpath][ch] =
+ (pwr > diff) ? (pwr - diff) : 0;
+ }
+ }
+}
+
+static void _rtl92d_read_macphymode_from_prom(struct ieee80211_hw *hw,
+ u8 *content)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ bool is_single_mac = true;
+
+ if (rtlhal->interface == INTF_PCI)
+ is_single_mac = !!(content[EEPROM_MAC_FUNCTION] & BIT(3));
+ else if (rtlhal->interface == INTF_USB)
+ is_single_mac = !(content[EEPROM_ENDPOINT_SETTING] & BIT(0));
+
+ if (is_single_mac) {
+ rtlhal->macphymode = SINGLEMAC_SINGLEPHY;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode SINGLEMAC_SINGLEPHY\n");
+ } else {
+ rtlhal->macphymode = DUALMAC_DUALPHY;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode DUALMAC_DUALPHY\n");
+ }
+}
+
+static void _rtl92d_read_macphymode_and_bandtype(struct ieee80211_hw *hw,
+ u8 *content)
+{
+ _rtl92d_read_macphymode_from_prom(hw, content);
+ rtl92d_phy_config_macphymode(hw);
+ rtl92d_phy_config_macphymode_info(hw);
+}
+
+static void _rtl92d_efuse_update_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ enum version_8192d chipver = rtlpriv->rtlhal.version;
+ u8 cutvalue[2];
+ u16 chipvalue;
+
+ read_efuse_byte(hw, EEPROME_CHIP_VERSION_H, &cutvalue[1]);
+ read_efuse_byte(hw, EEPROME_CHIP_VERSION_L, &cutvalue[0]);
+ chipvalue = (cutvalue[1] << 8) | cutvalue[0];
+ switch (chipvalue) {
+ case 0xAA55:
+ chipver |= CHIP_92D_C_CUT;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "C-CUT!!!\n");
+ break;
+ case 0x9966:
+ chipver |= CHIP_92D_D_CUT;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n");
+ break;
+ case 0xCC33:
+ case 0x33CC:
+ chipver |= CHIP_92D_E_CUT;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n");
+ break;
+ default:
+ chipver |= CHIP_92D_D_CUT;
+ pr_err("Unknown CUT!\n");
+ break;
+ }
+ rtlpriv->rtlhal.version = chipver;
+}
+
+static void _rtl92d_read_adapter_info(struct ieee80211_hw *hw)
+{
+ static const int params_pci[] = {
+ RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID,
+ EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR_MAC0_92D,
+ EEPROM_CHANNEL_PLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ COUNTRY_CODE_WORLD_WIDE_13
+ };
+ static const int params_usb[] = {
+ RTL8190_EEPROM_ID, EEPROM_VID_USB, EEPROM_PID_USB,
+ EEPROM_VID_USB, EEPROM_PID_USB, EEPROM_MAC_ADDR_MAC0_92DU,
+ EEPROM_CHANNEL_PLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
+ COUNTRY_CODE_WORLD_WIDE_13
+ };
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ const int *params = params_pci;
+ u8 *hwinfo;
+
+ if (rtlhal->interface == INTF_USB)
+ params = params_usb;
+
+ hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL);
+ if (!hwinfo)
+ return;
+
+ if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
+ goto exit;
+
+ _rtl92d_efuse_update_chip_version(hw);
+ _rtl92d_read_macphymode_and_bandtype(hw, hwinfo);
+
+ /* Read Permanent MAC address for 2nd interface */
+ if (rtlhal->interfaceindex != 0)
+ ether_addr_copy(rtlefuse->dev_addr,
+ &hwinfo[EEPROM_MAC_ADDR_MAC1_92D]);
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR,
+ rtlefuse->dev_addr);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
+ _rtl92d_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo);
+
+ /* Read Channel Plan */
+ switch (rtlhal->bandset) {
+ case BAND_ON_2_4G:
+ rtlefuse->channel_plan = COUNTRY_CODE_TELEC;
+ break;
+ case BAND_ON_5G:
+ rtlefuse->channel_plan = COUNTRY_CODE_FCC;
+ break;
+ case BAND_ON_BOTH:
+ rtlefuse->channel_plan = COUNTRY_CODE_FCC;
+ break;
+ default:
+ rtlefuse->channel_plan = COUNTRY_CODE_FCC;
+ break;
+ }
+ rtlefuse->txpwr_fromeprom = true;
+exit:
+ kfree(hwinfo);
+}
+
+void rtl92d_read_eeprom_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp_u1b;
+
+ rtlhal->version = _rtl92d_read_chip_version(hw);
+ tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
+ rtlefuse->autoload_status = tmp_u1b;
+ if (tmp_u1b & BIT(4)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
+ rtlefuse->epromtype = EEPROM_93C46;
+ } else {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
+ rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
+ }
+ if (tmp_u1b & BIT(5)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
+
+ rtlefuse->autoload_failflag = false;
+ _rtl92d_read_adapter_info(hw);
+ } else {
+ pr_err("Autoload ERR!!\n");
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_read_eeprom_info);
+
+static void rtl92d_update_hal_rate_table(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ enum wireless_mode wirelessmode;
+ u8 mimo_ps = IEEE80211_SMPS_OFF;
+ u8 curtxbw_40mhz = mac->bw_40;
+ u8 nmode = mac->ht_enable;
+ u8 curshortgi_40mhz;
+ u8 curshortgi_20mhz;
+ u32 tmp_ratr_value;
+ u8 ratr_index = 0;
+ u16 shortgi_rate;
+ u32 ratr_value;
+
+ curshortgi_40mhz = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+ curshortgi_20mhz = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+ wirelessmode = mac->mode;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_value = sta->deflink.supp_rates[1] << 4;
+ else
+ ratr_value = sta->deflink.supp_rates[0];
+ ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
+ switch (wirelessmode) {
+ case WIRELESS_MODE_A:
+ ratr_value &= 0x00000FF0;
+ break;
+ case WIRELESS_MODE_B:
+ if (ratr_value & 0x0000000c)
+ ratr_value &= 0x0000000d;
+ else
+ ratr_value &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_value &= 0x00000FF5;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ nmode = 1;
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
+ ratr_value &= 0x0007F005;
+ } else {
+ u32 ratr_mask;
+
+ if (get_rf_type(rtlphy) == RF_1T2R ||
+ get_rf_type(rtlphy) == RF_1T1R) {
+ ratr_mask = 0x000ff005;
+ } else {
+ ratr_mask = 0x0f0ff005;
+ }
+
+ ratr_value &= ratr_mask;
+ }
+ break;
+ default:
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_value &= 0x000ff0ff;
+ else
+ ratr_value &= 0x0f0ff0ff;
+
+ break;
+ }
+ ratr_value &= 0x0FFFFFFF;
+ if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz))) {
+ ratr_value |= 0x10000000;
+ tmp_ratr_value = (ratr_value >> 12);
+ for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
+ if ((1 << shortgi_rate) & tmp_ratr_value)
+ break;
+ }
+ shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
+ (shortgi_rate << 4) | (shortgi_rate);
+ }
+ rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
+ rtl_read_dword(rtlpriv, REG_ARFR0));
+}
+
+static void rtl92d_update_hal_rate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level, bool update_bw)
+{
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl92d_rate_mask_h2c rate_mask = {};
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_sta_info *sta_entry = NULL;
+ enum wireless_mode wirelessmode;
+ bool shortgi = false;
+ u8 curshortgi_40mhz;
+ u8 curshortgi_20mhz;
+ u8 curtxbw_40mhz;
+ u32 ratr_bitmap;
+ u8 ratr_index;
+ u8 macid = 0;
+ u8 mimo_ps;
+
+ curtxbw_40mhz = sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40;
+ curshortgi_40mhz = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+ curshortgi_20mhz = !!(sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ mimo_ps = sta_entry->mimo_ps;
+ wirelessmode = sta_entry->wireless_mode;
+
+ if (mac->opmode == NL80211_IFTYPE_STATION)
+ curtxbw_40mhz = mac->bw_40;
+ else if (mac->opmode == NL80211_IFTYPE_AP ||
+ mac->opmode == NL80211_IFTYPE_ADHOC)
+ macid = sta->aid + 1;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ ratr_bitmap = sta->deflink.supp_rates[1] << 4;
+ else
+ ratr_bitmap = sta->deflink.supp_rates[0];
+ ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
+ sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
+
+ switch (wirelessmode) {
+ case WIRELESS_MODE_B:
+ ratr_index = RATR_INX_WIRELESS_B;
+ if (ratr_bitmap & 0x0000000c)
+ ratr_bitmap &= 0x0000000d;
+ else
+ ratr_bitmap &= 0x0000000f;
+ break;
+ case WIRELESS_MODE_G:
+ ratr_index = RATR_INX_WIRELESS_GB;
+
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00000f00;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x00000ff0;
+ else
+ ratr_bitmap &= 0x00000ff5;
+ break;
+ case WIRELESS_MODE_A:
+ ratr_index = RATR_INX_WIRELESS_G;
+ ratr_bitmap &= 0x00000ff0;
+ break;
+ case WIRELESS_MODE_N_24G:
+ case WIRELESS_MODE_N_5G:
+ if (wirelessmode == WIRELESS_MODE_N_24G)
+ ratr_index = RATR_INX_WIRELESS_NGB;
+ else
+ ratr_index = RATR_INX_WIRELESS_NG;
+
+ if (mimo_ps == IEEE80211_SMPS_STATIC) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x00070000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0007f000;
+ else
+ ratr_bitmap &= 0x0007f005;
+ } else {
+ if (rtlphy->rf_type == RF_1T2R ||
+ rtlphy->rf_type == RF_1T1R) {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x000f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x000ff000;
+ else
+ ratr_bitmap &= 0x000ff005;
+ }
+ } else {
+ if (curtxbw_40mhz) {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff015;
+ } else {
+ if (rssi_level == 1)
+ ratr_bitmap &= 0x0f0f0000;
+ else if (rssi_level == 2)
+ ratr_bitmap &= 0x0f0ff000;
+ else
+ ratr_bitmap &= 0x0f0ff005;
+ }
+ }
+ }
+
+ if ((curtxbw_40mhz && curshortgi_40mhz) ||
+ (!curtxbw_40mhz && curshortgi_20mhz)) {
+ if (macid == 0)
+ shortgi = true;
+ else if (macid == 1)
+ shortgi = false;
+ }
+ break;
+ default:
+ ratr_index = RATR_INX_WIRELESS_NGB;
+
+ if (rtlphy->rf_type == RF_1T2R)
+ ratr_bitmap &= 0x000ff0ff;
+ else
+ ratr_bitmap &= 0x0f0ff0ff;
+ break;
+ }
+
+ le32p_replace_bits(&rate_mask.rate_mask_and_raid, ratr_bitmap, RATE_MASK_MASK);
+ le32p_replace_bits(&rate_mask.rate_mask_and_raid, ratr_index, RAID_MASK);
+ u8p_replace_bits(&rate_mask.macid_and_short_gi, macid, MACID_MASK);
+ u8p_replace_bits(&rate_mask.macid_and_short_gi, shortgi, SHORT_GI_MASK);
+ u8p_replace_bits(&rate_mask.macid_and_short_gi, 1, BIT(7));
+
+ rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
+ "Rate_index:%x, ratr_val:%x, %5phC\n",
+ ratr_index, ratr_bitmap, &rate_mask);
+
+ if (rtlhal->interface == INTF_PCI) {
+ rtl92d_fill_h2c_cmd(hw, H2C_RA_MASK, sizeof(rate_mask),
+ (u8 *)&rate_mask);
+ } else {
+ /* rtl92d_fill_h2c_cmd() does USB I/O and will result in a
+ * "scheduled while atomic" if called directly
+ */
+ memcpy(rtlpriv->rate_mask, &rate_mask,
+ sizeof(rtlpriv->rate_mask));
+ schedule_work(&rtlpriv->works.fill_h2c_cmd);
+ }
+
+ if (macid != 0)
+ sta_entry->ratr_index = ratr_index;
+}
+
+void rtl92d_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level, bool update_bw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->dm.useramask)
+ rtl92d_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
+ else
+ rtl92d_update_hal_rate_table(hw, sta);
+}
+EXPORT_SYMBOL_GPL(rtl92d_update_hal_rate_tbl);
+
+void rtl92d_update_channel_access_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 sifs_timer;
+
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
+ &mac->slot_time);
+ if (!mac->ht_enable)
+ sifs_timer = 0x0a0a;
+ else
+ sifs_timer = 0x1010;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
+}
+EXPORT_SYMBOL_GPL(rtl92d_update_channel_access_setting);
+
+bool rtl92d_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
+ enum rf_pwrstate e_rfpowerstate_toset;
+ u8 u1tmp;
+ bool actuallyset = false;
+ unsigned long flag;
+
+ if (rtlpriv->rtlhal.interface == INTF_PCI &&
+ rtlpci->being_init_adapter)
+ return false;
+ if (ppsc->swrf_processing)
+ return false;
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ if (ppsc->rfchange_inprogress) {
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ return false;
+ }
+
+ ppsc->rfchange_inprogress = true;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG,
+ rtl_read_byte(rtlpriv, REG_MAC_PINMUX_CFG) & ~(BIT(3)));
+ u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
+ e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
+ if (ppsc->hwradiooff && e_rfpowerstate_toset == ERFON) {
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio ON, RF ON\n");
+ e_rfpowerstate_toset = ERFON;
+ ppsc->hwradiooff = false;
+ actuallyset = true;
+ } else if (!ppsc->hwradiooff && e_rfpowerstate_toset == ERFOFF) {
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "GPIOChangeRF - HW Radio OFF, RF OFF\n");
+ e_rfpowerstate_toset = ERFOFF;
+ ppsc->hwradiooff = true;
+ actuallyset = true;
+ }
+ if (actuallyset) {
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ } else {
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
+ ppsc->rfchange_inprogress = false;
+ spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
+ }
+ *valid = 1;
+ return !ppsc->hwradiooff;
+}
+EXPORT_SYMBOL_GPL(rtl92d_gpio_radio_on_off_checking);
+
+void rtl92d_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all)
+{
+ static const u8 cam_const_addr[4][6] = {
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
+ };
+ static const u8 cam_const_broad[] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ const u8 *macaddr = p_macaddr;
+ bool is_pairwise = false;
+ u32 entry_id;
+
+ if (clear_all) {
+ u8 idx;
+ u8 cam_offset = 0;
+ u8 clear_number = 5;
+
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
+ for (idx = 0; idx < clear_number; idx++) {
+ rtl_cam_mark_invalid(hw, cam_offset + idx);
+ rtl_cam_empty_entry(hw, cam_offset + idx);
+
+ if (idx < 5) {
+ memset(rtlpriv->sec.key_buf[idx], 0,
+ MAX_KEY_LEN);
+ rtlpriv->sec.key_len[idx] = 0;
+ }
+ }
+
+ return;
+ }
+
+ switch (enc_algo) {
+ case WEP40_ENCRYPTION:
+ enc_algo = CAM_WEP40;
+ break;
+ case WEP104_ENCRYPTION:
+ enc_algo = CAM_WEP104;
+ break;
+ case TKIP_ENCRYPTION:
+ enc_algo = CAM_TKIP;
+ break;
+ case AESCCMP_ENCRYPTION:
+ enc_algo = CAM_AES;
+ break;
+ default:
+ pr_err("switch case %#x not processed\n",
+ enc_algo);
+ enc_algo = CAM_TKIP;
+ break;
+ }
+ if (is_wepkey || rtlpriv->sec.use_defaultkey) {
+ macaddr = cam_const_addr[key_index];
+ entry_id = key_index;
+ } else {
+ if (is_group) {
+ macaddr = cam_const_broad;
+ entry_id = key_index;
+ } else {
+ if (mac->opmode == NL80211_IFTYPE_AP) {
+ entry_id = rtl_cam_get_free_entry(hw, p_macaddr);
+ if (entry_id >= TOTAL_CAM_ENTRY) {
+ pr_err("Can not find free hw security cam entry\n");
+ return;
+ }
+ } else {
+ entry_id = CAM_PAIRWISE_KEY_POSITION;
+ }
+ key_index = PAIRWISE_KEYIDX;
+ is_pairwise = true;
+ }
+ }
+ if (rtlpriv->sec.key_len[key_index] == 0) {
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "delete one entry, entry_id is %d\n",
+ entry_id);
+ if (mac->opmode == NL80211_IFTYPE_AP)
+ rtl_cam_del_entry(hw, p_macaddr);
+ rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
+ } else {
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY length is %d\n",
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
+ "The insert KEY is %x %x\n",
+ rtlpriv->sec.key_buf[0][0],
+ rtlpriv->sec.key_buf[0][1]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "add one entry\n");
+ if (is_pairwise) {
+ RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
+ "Pairwise Key content",
+ rtlpriv->sec.pairwise_key,
+ rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set Pairwise key\n");
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[key_index]);
+ } else {
+ rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
+ "set group key\n");
+ if (mac->opmode == NL80211_IFTYPE_ADHOC) {
+ rtl_cam_add_one_entry(hw,
+ rtlefuse->dev_addr,
+ PAIRWISE_KEYIDX,
+ CAM_PAIRWISE_KEY_POSITION,
+ enc_algo, CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf[entry_id]);
+ }
+ rtl_cam_add_one_entry(hw, macaddr, key_index,
+ entry_id, enc_algo,
+ CAM_CONFIG_NO_USEDK,
+ rtlpriv->sec.key_buf
+ [entry_id]);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_set_key);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h
new file mode 100644
index 000000000000..4da1bab15f36
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#ifndef __RTL92D_HW_COMMON_H__
+#define __RTL92D_HW_COMMON_H__
+
+void rtl92d_stop_tx_beacon(struct ieee80211_hw *hw);
+void rtl92d_resume_tx_beacon(struct ieee80211_hw *hw);
+void rtl92d_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92d_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+bool rtl92d_llt_write(struct ieee80211_hw *hw, u32 address, u32 data);
+void rtl92d_enable_hw_security_config(struct ieee80211_hw *hw);
+void rtl92d_set_qos(struct ieee80211_hw *hw, int aci);
+void rtl92d_read_eeprom_info(struct ieee80211_hw *hw);
+void rtl92d_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level, bool update_bw);
+void rtl92d_update_channel_access_setting(struct ieee80211_hw *hw);
+bool rtl92d_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
+void rtl92d_set_key(struct ieee80211_hw *hw, u32 key_index,
+ u8 *p_macaddr, bool is_group, u8 enc_algo,
+ bool is_wepkey, bool clear_all);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c
new file mode 100644
index 000000000000..e58dc4000c19
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/main.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include <linux/module.h>
+
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192D 802.11n common routines");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c
new file mode 100644
index 000000000000..228c84ab5b90
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.c
@@ -0,0 +1,856 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "def.h"
+#include "reg.h"
+#include "dm_common.h"
+#include "phy_common.h"
+#include "rf_common.h"
+
+static const u8 channel_all[59] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
+ 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
+ 114, 116, 118, 120, 122, 124, 126, 128, 130,
+ 132, 134, 136, 138, 140, 149, 151, 153, 155,
+ 157, 159, 161, 163, 165
+};
+
+static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
+ enum radio_path rfpath, u32 offset)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+ u32 newoffset;
+ u32 tmplong, tmplong2;
+ u8 rfpi_enable = 0;
+ u32 retvalue;
+
+ newoffset = offset;
+ tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
+ if (rfpath == RF90_PATH_A)
+ tmplong2 = tmplong;
+ else
+ tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
+ tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
+ (newoffset << 23) | BLSSIREADEDGE;
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong & (~BLSSIREADEDGE));
+ udelay(10);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
+ udelay(100);
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
+ tmplong | BLSSIREADEDGE);
+ udelay(10);
+ if (rfpath == RF90_PATH_A)
+ rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+ else if (rfpath == RF90_PATH_B)
+ rfpi_enable = (u8)rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
+ BIT(8));
+ if (rfpi_enable)
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
+ BLSSIREADBACKDATA);
+ else
+ retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
+ BLSSIREADBACKDATA);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n",
+ rfpath, pphyreg->rf_rb, retvalue);
+ return retvalue;
+}
+
+static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw,
+ enum radio_path rfpath,
+ u32 offset, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+ u32 data_and_addr;
+ u32 newoffset;
+
+ newoffset = offset;
+ /* T65 RF */
+ data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
+ rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
+ rfpath, pphyreg->rf3wire_offset, data_and_addr);
+}
+
+u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 original_value, readback_value, bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
+ regaddr, rfpath, bitmask);
+ rtl92d_pci_lock(rtlpriv);
+ original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
+ bitshift = calculate_bit_shift(bitmask);
+ readback_value = (original_value & bitmask) >> bitshift;
+ rtl92d_pci_unlock(rtlpriv);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
+ regaddr, rfpath, bitmask, original_value);
+ return readback_value;
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_query_rf_reg);
+
+void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 original_value, bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
+ if (bitmask == 0)
+ return;
+ rtl92d_pci_lock(rtlpriv);
+ if (rtlphy->rf_mode != RF_OP_BY_FW) {
+ if (bitmask != RFREG_OFFSET_MASK) {
+ original_value = _rtl92d_phy_rf_serial_read(hw,
+ rfpath,
+ regaddr);
+ bitshift = calculate_bit_shift(bitmask);
+ data = ((original_value & (~bitmask)) |
+ (data << bitshift));
+ }
+ _rtl92d_phy_rf_serial_write(hw, rfpath, regaddr, data);
+ }
+ rtl92d_pci_unlock(rtlpriv);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
+ regaddr, bitmask, data, rfpath);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_set_rf_reg);
+
+void rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+
+ /* RF Interface Sowrtware Control */
+ /* 16 LSBs if read 32-bit from 0x870 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
+ /* 16 LSBs if read 32-bit from 0x874 */
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+ /* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */
+
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
+ /* RF Interface Readback Value */
+ /* 16 LSBs if read 32-bit from 0x8E0 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ /* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
+ /* 16 LSBs if read 32-bit from 0x8E4 */
+ rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+ /* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */
+ rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
+
+ /* RF Interface Output (and Enable) */
+ /* 16 LSBs if read 32-bit from 0x860 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
+ /* 16 LSBs if read 32-bit from 0x864 */
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
+
+ /* RF Interface (Output and) Enable */
+ /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
+ rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
+ /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */
+ rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
+
+ /* Addr of LSSI. Write RF register by driver */
+ /* LSSI Parameter */
+ rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
+ RFPGA0_XA_LSSIPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
+ RFPGA0_XB_LSSIPARAMETER;
+
+ /* RF parameter */
+ /* BB Band Select */
+ rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER;
+ rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER;
+
+ /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
+ /* Tx gain stage */
+ rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ /* Tx gain stage */
+ rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ /* Tx gain stage */
+ rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+ /* Tx gain stage */
+ rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
+
+ /* Transceiver A~D HSSI Parameter-1 */
+ /* wire control parameter1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
+ /* wire control parameter1 */
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
+
+ /* Transceiver A~D HSSI Parameter-2 */
+ /* wire control parameter2 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
+ /* wire control parameter2 */
+ rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
+
+ /* RF switch Control */
+ /* TR/Ant switch control */
+ rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+ rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
+
+ /* AGC control 1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
+
+ /* AGC control 2 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
+ rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
+
+ /* RX AFE control 1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
+
+ /*RX AFE control 1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
+
+ /* Tx AFE control 1 */
+ rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
+
+ /* Tx AFE control 2 */
+ rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
+ rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
+ rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
+
+ /* Transceiver LSSI Readback SI mode */
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
+ rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
+
+ /* Transceiver LSSI Readback PI mode */
+ rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVERA_HSPI_READBACK;
+ rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVERB_HSPI_READBACK;
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_init_bb_rf_register_definition);
+
+void rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ int index;
+
+ if (regaddr == RTXAGC_A_RATE18_06)
+ index = 0;
+ else if (regaddr == RTXAGC_A_RATE54_24)
+ index = 1;
+ else if (regaddr == RTXAGC_A_CCK1_MCS32)
+ index = 6;
+ else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00)
+ index = 7;
+ else if (regaddr == RTXAGC_A_MCS03_MCS00)
+ index = 2;
+ else if (regaddr == RTXAGC_A_MCS07_MCS04)
+ index = 3;
+ else if (regaddr == RTXAGC_A_MCS11_MCS08)
+ index = 4;
+ else if (regaddr == RTXAGC_A_MCS15_MCS12)
+ index = 5;
+ else if (regaddr == RTXAGC_B_RATE18_06)
+ index = 8;
+ else if (regaddr == RTXAGC_B_RATE54_24)
+ index = 9;
+ else if (regaddr == RTXAGC_B_CCK1_55_MCS32)
+ index = 14;
+ else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff)
+ index = 15;
+ else if (regaddr == RTXAGC_B_MCS03_MCS00)
+ index = 10;
+ else if (regaddr == RTXAGC_B_MCS07_MCS04)
+ index = 11;
+ else if (regaddr == RTXAGC_B_MCS11_MCS08)
+ index = 12;
+ else if (regaddr == RTXAGC_B_MCS15_MCS12)
+ index = 13;
+ else
+ return;
+
+ rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n",
+ rtlphy->pwrgroup_cnt, index,
+ rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
+ if (index == 13)
+ rtlphy->pwrgroup_cnt++;
+}
+EXPORT_SYMBOL_GPL(rtl92d_store_pwrindex_diffrate_offset);
+
+void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+
+ rtlphy->default_initialgain[0] =
+ rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[1] =
+ rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[2] =
+ rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
+ rtlphy->default_initialgain[3] =
+ rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
+ rtlphy->default_initialgain[0],
+ rtlphy->default_initialgain[1],
+ rtlphy->default_initialgain[2],
+ rtlphy->default_initialgain[3]);
+ rtlphy->framesync = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3, MASKBYTE0);
+ rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2, MASKDWORD);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Default framesync (0x%x) = 0x%x\n",
+ ROFDM0_RXDETECTOR3, rtlphy->framesync);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_get_hw_reg_originalvalue);
+
+static void _rtl92d_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
+ u8 *cckpowerlevel, u8 *ofdmpowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ u8 index = channel - 1;
+
+ /* 1. CCK */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* RF-A */
+ cckpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
+ /* RF-B */
+ cckpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
+ } else {
+ cckpowerlevel[RF90_PATH_A] = 0;
+ cckpowerlevel[RF90_PATH_B] = 0;
+ }
+ /* 2. OFDM for 1S or 2S */
+ if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_1T1R) {
+ /* Read HT 40 OFDM TX power */
+ ofdmpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
+ ofdmpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
+ } else if (rtlphy->rf_type == RF_2T2R) {
+ /* Read HT 40 OFDM TX power */
+ ofdmpowerlevel[RF90_PATH_A] =
+ rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
+ ofdmpowerlevel[RF90_PATH_B] =
+ rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
+ }
+}
+
+static void _rtl92d_ccxpower_index_check(struct ieee80211_hw *hw,
+ u8 channel, u8 *cckpowerlevel,
+ u8 *ofdmpowerlevel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+
+ rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
+ rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
+}
+
+static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
+{
+ u8 place = chnl;
+
+ if (chnl > 14) {
+ for (place = 14; place < ARRAY_SIZE(channel_all); place++) {
+ if (channel_all[place] == chnl) {
+ place++;
+ break;
+ }
+ }
+ }
+ return place;
+}
+
+void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 cckpowerlevel[2], ofdmpowerlevel[2];
+
+ if (!rtlefuse->txpwr_fromeprom)
+ return;
+ channel = _rtl92c_phy_get_rightchnlplace(channel);
+ _rtl92d_get_txpower_index(hw, channel, &cckpowerlevel[0],
+ &ofdmpowerlevel[0]);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+ _rtl92d_ccxpower_index_check(hw, channel, &cckpowerlevel[0],
+ &ofdmpowerlevel[0]);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
+ rtl92d_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
+ rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_set_txpower_level);
+
+void rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw, u8 rfpath,
+ u32 *pu4_regval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
+ /*----Store original RFENV control type----*/
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ *pu4_regval = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ *pu4_regval =
+ rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16);
+ break;
+ }
+ /*----Set RF_ENV enable----*/
+ rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
+ /*----Set RF_ENV output high----*/
+ rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
+ /* Set bit number of Address and Data for RF register */
+ /* Set 1 to 4 bits for 8255 */
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREADDRESSLENGTH, 0x0);
+ udelay(1);
+ /*Set 0 to 12 bits for 8255 */
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_enable_rf_env);
+
+void rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
+ u32 *pu4_regval)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
+ /*----Restore RFENV control type----*/
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, *pu4_regval);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16,
+ *pu4_regval);
+ break;
+ }
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<=====\n");
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_restore_rf_env);
+
+u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl)
+{
+ u8 place;
+
+ if (chnl > 14) {
+ for (place = 14; place < ARRAY_SIZE(channel_all); place++) {
+ if (channel_all[place] == chnl)
+ return place - 13;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rtl92d_get_rightchnlplace_for_iqk);
+
+void rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw, const u32 *adda_reg,
+ u32 *adda_backup, u32 regnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save ADDA parameters.\n");
+ for (i = 0; i < regnum; i++)
+ adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], MASKDWORD);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_save_adda_registers);
+
+void rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw,
+ const u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save MAC parameters.\n");
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
+ macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_save_mac_registers);
+
+void rtl92d_phy_path_adda_on(struct ieee80211_hw *hw,
+ const u32 *adda_reg, bool patha_on, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 pathon;
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "ADDA ON.\n");
+ pathon = patha_on ? 0x04db25a4 : 0x0b1b25a4;
+ if (patha_on)
+ pathon = rtlpriv->rtlhal.interfaceindex == 0 ?
+ 0x04db25a4 : 0x0b1b25a4;
+ for (i = 0; i < IQK_ADDA_REG_NUM; i++)
+ rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, pathon);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_path_adda_on);
+
+void rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+ const u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "MAC settings for Calibration.\n");
+ rtl_write_byte(rtlpriv, macreg[0], 0x3F);
+
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i], (u8)(macbackup[i] &
+ (~BIT(3))));
+ rtl_write_byte(rtlpriv, macreg[i], (u8)(macbackup[i] & (~BIT(5))));
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_mac_setting_calibration);
+
+static u32 _rtl92d_phy_get_abs(u32 val1, u32 val2)
+{
+ u32 ret;
+
+ if (val1 >= val2)
+ ret = val1 - val2;
+ else
+ ret = val2 - val1;
+ return ret;
+}
+
+static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(channel5g); i++)
+ if (channel == channel5g[i])
+ return true;
+ return false;
+}
+
+void rtl92d_phy_calc_curvindex(struct ieee80211_hw *hw,
+ const u32 *targetchnl, u32 *curvecount_val,
+ bool is5g, u32 *curveindex)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 smallest_abs_val = 0xffffffff, u4tmp;
+ u8 i, j;
+ u8 chnl_num = is5g ? TARGET_CHNL_NUM_5G : TARGET_CHNL_NUM_2G;
+
+ for (i = 0; i < chnl_num; i++) {
+ if (is5g && !_rtl92d_is_legal_5g_channel(hw, i + 1))
+ continue;
+ curveindex[i] = 0;
+ for (j = 0; j < (CV_CURVE_CNT * 2); j++) {
+ u4tmp = _rtl92d_phy_get_abs(targetchnl[i],
+ curvecount_val[j]);
+
+ if (u4tmp < smallest_abs_val) {
+ curveindex[i] = j;
+ smallest_abs_val = u4tmp;
+ }
+ }
+ smallest_abs_val = 0xffffffff;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "curveindex[%d] = %x\n",
+ i, curveindex[i]);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_calc_curvindex);
+
+void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 i;
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "settings regs %zu default regs %d\n",
+ ARRAY_SIZE(rtlphy->iqk_matrix),
+ IQK_MATRIX_REG_NUM);
+ /* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */
+ for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
+ rtlphy->iqk_matrix[i].value[0][0] = 0x100;
+ rtlphy->iqk_matrix[i].value[0][2] = 0x100;
+ rtlphy->iqk_matrix[i].value[0][4] = 0x100;
+ rtlphy->iqk_matrix[i].value[0][6] = 0x100;
+ rtlphy->iqk_matrix[i].value[0][1] = 0x0;
+ rtlphy->iqk_matrix[i].value[0][3] = 0x0;
+ rtlphy->iqk_matrix[i].value[0][5] = 0x0;
+ rtlphy->iqk_matrix[i].value[0][7] = 0x0;
+ rtlphy->iqk_matrix[i].iqk_done = false;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_reset_iqk_result);
+
+static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct dig_t *de_digtable = &rtlpriv->dm_digtable;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "--->Cmd(%#x), set_io_inprogress(%d)\n",
+ rtlphy->current_io_type, rtlphy->set_io_inprogress);
+
+ switch (rtlphy->current_io_type) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ de_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
+ rtl92d_dm_write_dig(hw);
+ rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ rtlphy->initgain_backup.xaagccore1 = de_digtable->cur_igvalue;
+ de_digtable->cur_igvalue = 0x37;
+ if (rtlpriv->rtlhal.interface == INTF_USB)
+ de_digtable->cur_igvalue = 0x17;
+ rtl92d_dm_write_dig(hw);
+ break;
+ default:
+ pr_err("switch case %#x not processed\n",
+ rtlphy->current_io_type);
+ break;
+ }
+
+ rtlphy->set_io_inprogress = false;
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n",
+ rtlphy->current_io_type);
+}
+
+bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ bool postprocessing = false;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
+ iotype, rtlphy->set_io_inprogress);
+
+ do {
+ switch (iotype) {
+ case IO_CMD_RESUME_DM_BY_SCAN:
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Resume DM after scan\n");
+ postprocessing = true;
+ break;
+ case IO_CMD_PAUSE_DM_BY_SCAN:
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
+ "[IO CMD] Pause DM before scan\n");
+ postprocessing = true;
+ break;
+ default:
+ pr_err("switch case %#x not processed\n",
+ iotype);
+ break;
+ }
+ } while (false);
+
+ if (postprocessing && !rtlphy->set_io_inprogress) {
+ rtlphy->set_io_inprogress = true;
+ rtlphy->current_io_type = iotype;
+ } else {
+ return false;
+ }
+
+ rtl92d_phy_set_io(hw);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype);
+ return true;
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_set_io_cmd);
+
+void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 offset = REG_MAC_PHY_CTRL_NORMAL;
+ u8 phy_ctrl = 0xf0;
+
+ if (rtlhal->interface == INTF_USB) {
+ phy_ctrl = rtl_read_byte(rtlpriv, offset);
+ phy_ctrl &= ~(BIT(0) | BIT(1) | BIT(2));
+ }
+
+ switch (rtlhal->macphymode) {
+ case DUALMAC_DUALPHY:
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode: DUALMAC_DUALPHY\n");
+ rtl_write_byte(rtlpriv, offset, phy_ctrl | BIT(0) | BIT(1));
+ break;
+ case SINGLEMAC_SINGLEPHY:
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode: SINGLEMAC_SINGLEPHY\n");
+ rtl_write_byte(rtlpriv, offset, phy_ctrl | BIT(2));
+ break;
+ case DUALMAC_SINGLEPHY:
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "MacPhyMode: DUALMAC_SINGLEPHY\n");
+ rtl_write_byte(rtlpriv, offset, phy_ctrl | BIT(0));
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_config_macphymode);
+
+void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+
+ switch (rtlhal->macphymode) {
+ case DUALMAC_SINGLEPHY:
+ rtlphy->rf_type = RF_2T2R;
+ rtlhal->version |= RF_TYPE_2T2R;
+ rtlhal->bandset = BAND_ON_BOTH;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ break;
+
+ case SINGLEMAC_SINGLEPHY:
+ rtlphy->rf_type = RF_2T2R;
+ rtlhal->version |= RF_TYPE_2T2R;
+ rtlhal->bandset = BAND_ON_BOTH;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ break;
+
+ case DUALMAC_DUALPHY:
+ rtlphy->rf_type = RF_1T1R;
+ rtlhal->version &= RF_TYPE_1T1R;
+ /* Now we let MAC0 run on 5G band. */
+ if (rtlhal->interfaceindex == 0) {
+ rtlhal->bandset = BAND_ON_5G;
+ rtlhal->current_bandtype = BAND_ON_5G;
+ } else {
+ rtlhal->bandset = BAND_ON_2_4G;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ }
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_config_macphymode_info);
+
+u8 rtl92d_get_chnlgroup_fromarray(u8 chnl)
+{
+ u8 group;
+
+ if (channel_all[chnl] <= 3)
+ group = 0;
+ else if (channel_all[chnl] <= 9)
+ group = 1;
+ else if (channel_all[chnl] <= 14)
+ group = 2;
+ else if (channel_all[chnl] <= 44)
+ group = 3;
+ else if (channel_all[chnl] <= 54)
+ group = 4;
+ else if (channel_all[chnl] <= 64)
+ group = 5;
+ else if (channel_all[chnl] <= 112)
+ group = 6;
+ else if (channel_all[chnl] <= 126)
+ group = 7;
+ else if (channel_all[chnl] <= 140)
+ group = 8;
+ else if (channel_all[chnl] <= 153)
+ group = 9;
+ else if (channel_all[chnl] <= 159)
+ group = 10;
+ else
+ group = 11;
+ return group;
+}
+EXPORT_SYMBOL_GPL(rtl92d_get_chnlgroup_fromarray);
+
+u8 rtl92d_phy_get_chnlgroup_bypg(u8 chnlindex)
+{
+ u8 group;
+
+ if (channel_all[chnlindex] <= 3) /* Chanel 1-3 */
+ group = 0;
+ else if (channel_all[chnlindex] <= 9) /* Channel 4-9 */
+ group = 1;
+ else if (channel_all[chnlindex] <= 14) /* Channel 10-14 */
+ group = 2;
+ else if (channel_all[chnlindex] <= 64)
+ group = 6;
+ else if (channel_all[chnlindex] <= 140)
+ group = 7;
+ else
+ group = 8;
+ return group;
+}
+
+void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ switch (rtlpriv->rtlhal.macphymode) {
+ case DUALMAC_DUALPHY:
+ rtl_write_byte(rtlpriv, REG_DMC, 0x0);
+ rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08);
+ rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff);
+ break;
+ case DUALMAC_SINGLEPHY:
+ rtl_write_byte(rtlpriv, REG_DMC, 0xf8);
+ rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08);
+ rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff);
+ break;
+ case SINGLEMAC_SINGLEPHY:
+ rtl_write_byte(rtlpriv, REG_DMC, 0x0);
+ rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x10);
+ rtl_write_word(rtlpriv, (REG_TRXFF_BNDY + 2), 0x27FF);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_config_maccoexist_rfpage);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h
new file mode 100644
index 000000000000..0f794557af47
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/phy_common.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#ifndef __RTL92D_PHY_COMMON_H__
+#define __RTL92D_PHY_COMMON_H__
+
+#define TARGET_CHNL_NUM_5G 221
+#define TARGET_CHNL_NUM_2G 14
+#define CV_CURVE_CNT 64
+#define RT_CANNOT_IO(hw) false
+#define RX_INDEX_MAPPING_NUM 15
+#define IQK_BB_REG_NUM 10
+
+#define IQK_DELAY_TIME 1
+#define MAX_TOLERANCE 5
+#define MAX_TOLERANCE_92D 3
+
+enum baseband_config_type {
+ BASEBAND_CONFIG_PHY_REG = 0,
+ BASEBAND_CONFIG_AGC_TAB = 1,
+};
+
+enum rf_content {
+ radioa_txt = 0,
+ radiob_txt = 1,
+ radioc_txt = 2,
+ radiod_txt = 3
+};
+
+static inline void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
+ unsigned long *flag)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->rtlhal.interface == INTF_USB)
+ return;
+
+ if (rtlpriv->rtlhal.interfaceindex == 1)
+ spin_lock_irqsave(&rtlpriv->locks.cck_and_rw_pagea_lock, *flag);
+}
+
+static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
+ unsigned long *flag)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->rtlhal.interface == INTF_USB)
+ return;
+
+ if (rtlpriv->rtlhal.interfaceindex == 1)
+ spin_unlock_irqrestore(&rtlpriv->locks.cck_and_rw_pagea_lock,
+ *flag);
+}
+
+u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask);
+void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data);
+void rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
+void rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
+void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
+void rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw, u8 rfpath,
+ u32 *pu4_regval);
+void rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
+ u32 *pu4_regval);
+u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl);
+void rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw, const u32 *adda_reg,
+ u32 *adda_backup, u32 regnum);
+void rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw,
+ const u32 *macreg, u32 *macbackup);
+void rtl92d_phy_path_adda_on(struct ieee80211_hw *hw,
+ const u32 *adda_reg, bool patha_on, bool is2t);
+void rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
+ const u32 *macreg, u32 *macbackup);
+void rtl92d_phy_calc_curvindex(struct ieee80211_hw *hw,
+ const u32 *targetchnl, u32 *curvecount_val,
+ bool is5g, u32 *curveindex);
+void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw);
+bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
+void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
+void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
+u8 rtl92d_get_chnlgroup_fromarray(u8 chnl);
+u8 rtl92d_phy_get_chnlgroup_bypg(u8 chnlindex);
+void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw);
+/* Without these declarations sparse warns about context imbalance. */
+void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
+ unsigned long *flag);
+void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
+ unsigned long *flag);
+
+/* Without these helpers and the declarations sparse warns about
+ * context imbalance.
+ */
+static inline void rtl92d_pci_lock(struct rtl_priv *rtlpriv)
+{
+ if (rtlpriv->rtlhal.interface == INTF_PCI)
+ spin_lock(&rtlpriv->locks.rf_lock);
+}
+
+static inline void rtl92d_pci_unlock(struct rtl_priv *rtlpriv)
+{
+ if (rtlpriv->rtlhal.interface == INTF_PCI)
+ spin_unlock(&rtlpriv->locks.rf_lock);
+}
+
+void rtl92d_pci_lock(struct rtl_priv *rtlpriv);
+void rtl92d_pci_unlock(struct rtl_priv *rtlpriv);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/reg.h
index 2783d7e7b227..b5b906b799cb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/reg.h
@@ -50,6 +50,9 @@
#define REG_HMEBOX_EXT_1 0x008A
#define REG_HMEBOX_EXT_2 0x008C
#define REG_HMEBOX_EXT_3 0x008E
+#define SIZE_OF_REG_HMEBOX_EXT 2
+
+#define REG_EFUSE_ACCESS 0x00CF
#define REG_BIST_SCAN 0x00D0
#define REG_BIST_RPT 0x00D4
@@ -86,6 +89,7 @@
#define REG_CPWM 0x012F
#define REG_FWIMR 0x0130
#define REG_FWISR 0x0134
+#define REG_FTIMR 0x0138
#define REG_PKTBUF_DBG_CTRL 0x0140
#define REG_PKTBUF_DBG_DATA_L 0x0144
#define REG_PKTBUF_DBG_DATA_H 0x0148
@@ -109,6 +113,7 @@
#define REG_HMEBOX_1 0x01D4
#define REG_HMEBOX_2 0x01D8
#define REG_HMEBOX_3 0x01DC
+#define SIZE_OF_REG_HMEBOX 4
#define REG_LLT_INIT 0x01E0
#define REG_BB_ACCEESS_CTRL 0x01E8
@@ -197,6 +202,8 @@
#define REG_POWER_STAGE1 0x04B4
#define REG_POWER_STAGE2 0x04B8
#define REG_PKT_LIFE_TIME 0x04C0
+#define REG_PKT_VO_VI_LIFE_TIME 0x04C0
+#define REG_PKT_BE_BK_LIFE_TIME 0x04C2
#define REG_STBC_SETTING 0x04C4
#define REG_PROT_MODE_CTRL 0x04C8
#define REG_MAX_AGGR_NUM 0x04CA
@@ -233,6 +240,7 @@
#define REG_RD_NAV_NXT 0x0544
#define REG_NAV_PROT_LEN 0x0546
#define REG_BCN_CTRL 0x0550
+#define REG_BCN_CTRL_1 0x0551
#define REG_MBID_NUM 0x0552
#define REG_DUAL_TSF_RST 0x0553
#define REG_BCN_INTERVAL 0x0554
@@ -319,6 +327,8 @@
#define REG_BT_COEX_TABLE 0x06C0
#define REG_WMAC_RESP_TXINFO 0x06D8
+#define REG_USB_Queue_Select_MAC0 0xFE44
+#define REG_USB_Queue_Select_MAC1 0xFE47
/* ----------------------------------------------------- */
/* Redifine 8192C register definition for compatibility */
@@ -355,27 +365,27 @@
#define RRSR_RSC_UPSUBCHNL 0x400000
#define RRSR_RSC_LOWSUBCHNL 0x200000
#define RRSR_SHORT 0x800000
-#define RRSR_1M BIT0
-#define RRSR_2M BIT1
-#define RRSR_5_5M BIT2
-#define RRSR_11M BIT3
-#define RRSR_6M BIT4
-#define RRSR_9M BIT5
-#define RRSR_12M BIT6
-#define RRSR_18M BIT7
-#define RRSR_24M BIT8
-#define RRSR_36M BIT9
-#define RRSR_48M BIT10
-#define RRSR_54M BIT11
-#define RRSR_MCS0 BIT12
-#define RRSR_MCS1 BIT13
-#define RRSR_MCS2 BIT14
-#define RRSR_MCS3 BIT15
-#define RRSR_MCS4 BIT16
-#define RRSR_MCS5 BIT17
-#define RRSR_MCS6 BIT18
-#define RRSR_MCS7 BIT19
-#define BRSR_ACKSHORTPMB BIT23
+#define RRSR_1M BIT(0)
+#define RRSR_2M BIT(1)
+#define RRSR_5_5M BIT(2)
+#define RRSR_11M BIT(3)
+#define RRSR_6M BIT(4)
+#define RRSR_9M BIT(5)
+#define RRSR_12M BIT(6)
+#define RRSR_18M BIT(7)
+#define RRSR_24M BIT(8)
+#define RRSR_36M BIT(9)
+#define RRSR_48M BIT(10)
+#define RRSR_54M BIT(11)
+#define RRSR_MCS0 BIT(12)
+#define RRSR_MCS1 BIT(13)
+#define RRSR_MCS2 BIT(14)
+#define RRSR_MCS3 BIT(15)
+#define RRSR_MCS4 BIT(16)
+#define RRSR_MCS5 BIT(17)
+#define RRSR_MCS6 BIT(18)
+#define RRSR_MCS7 BIT(19)
+#define BRSR_ACKSHORTPMB BIT(23)
/* ----------------------------------------------------- */
/* 8192C Rate Definition */
@@ -600,7 +610,11 @@
#define EEPROM_SVID 0x2C /* SE Vendor ID.E-F */
#define EEPROM_SMID 0x2E /* SE PCI Subsystem ID. 10-11 */
+#define EEPROM_VID_USB 0xC
+#define EEPROM_PID_USB 0xE
+#define EEPROM_ENDPOINT_SETTING 0x10
#define EEPROM_MAC_ADDR 0x16 /* SEMAC Address. 12-17 */
+#define EEPROM_MAC_ADDR_MAC0_92DU 0x19
#define EEPROM_MAC_ADDR_MAC0_92D 0x55
#define EEPROM_MAC_ADDR_MAC1_92D 0x5B
@@ -915,6 +929,42 @@
#define BD_HCI_SEL BIT(26)
#define TYPE_ID BIT(27)
+#define HCI_TXDMA_EN BIT(0)
+#define HCI_RXDMA_EN BIT(1)
+#define TXDMA_EN BIT(2)
+#define RXDMA_EN BIT(3)
+#define PROTOCOL_EN BIT(4)
+#define SCHEDULE_EN BIT(5)
+#define MACTXEN BIT(6)
+#define MACRXEN BIT(7)
+#define ENSWBCN BIT(8)
+#define ENSEC BIT(9)
+
+#define HQSEL_VOQ BIT(0)
+#define HQSEL_VIQ BIT(1)
+#define HQSEL_BEQ BIT(2)
+#define HQSEL_BKQ BIT(3)
+#define HQSEL_MGTQ BIT(4)
+#define HQSEL_HIQ BIT(5)
+
+#define TXDMA_HIQ_MAP GENMASK(15, 14)
+#define TXDMA_MGQ_MAP GENMASK(13, 12)
+#define TXDMA_BKQ_MAP GENMASK(11, 10)
+#define TXDMA_BEQ_MAP GENMASK(9, 8)
+#define TXDMA_VIQ_MAP GENMASK(7, 6)
+#define TXDMA_VOQ_MAP GENMASK(5, 4)
+
+#define QUEUE_LOW 1
+#define QUEUE_NORMAL 2
+#define QUEUE_HIGH 3
+
+#define HPQ_MASK GENMASK(7, 0)
+#define LPQ_MASK GENMASK(15, 8)
+#define PUBQ_MASK GENMASK(23, 16)
+#define LD_RQPN BIT(31)
+
+#define DROP_DATA_EN BIT(9)
+
/* LLT_INIT */
#define _LLT_NO_ACTIVE 0x0
#define _LLT_WRITE_ACCESS 0x1
@@ -929,6 +979,10 @@
/* ----------------------------------------------------- */
/* 0x0400h ~ 0x047Fh Protocol Configuration */
/* ----------------------------------------------------- */
+/* FWHW_TXQ_CTRL */
+#define EN_AMPDU_RTY_NEW BIT(7)
+#define EN_BCNQ_DL BIT(22)
+
#define RETRY_LIMIT_SHORT_SHIFT 8
#define RETRY_LIMIT_LONG_SHIFT 0
@@ -942,6 +996,13 @@
#define AC_PARAM_ECW_MIN_OFFSET 8
#define AC_PARAM_AIFS_OFFSET 0
+/* REG_RD_CTRL */
+#define DIS_EDCA_CNT_DWN BIT(11)
+
+/* REG_BCN_CTRL */
+#define EN_BCN_FUNCTION BIT(3)
+#define DIS_TSF_UDT BIT(4)
+
/* ACMHWCTRL */
#define ACMHW_HWEN BIT(0)
#define ACMHW_BEQEN BIT(1)
@@ -1073,6 +1134,11 @@
#define RCCK0_FACOUNTERLOWER 0xa5c
#define RCCK0_FACOUNTERUPPER 0xa58
+#define RPDP_ANTA 0xb00
+#define RCONFIG_ANTA 0xb68
+#define RCONFIG_ANTB 0xb6c
+#define RPDP_ANTB 0xb70
+
/* 6. PageC(0xC00) */
#define ROFDM0_LSTF 0xc00
@@ -1126,6 +1192,7 @@
#define ROFDM0_TXPSEUDONOISEWGT 0xce4
#define ROFDM0_FRAMESYNC 0xcf0
#define ROFDM0_DFSREPORT 0xcf4
+#define ROFDM0_RXIQEXTANTA 0xca0
#define ROFDM0_TXCOEFF1 0xca4
#define ROFDM0_TXCOEFF2 0xca8
#define ROFDM0_TXCOEFF3 0xcac
@@ -1184,17 +1251,70 @@
#define RTXAGC_B_MCS15_MCS12 0x868
#define RTXAGC_B_CCK11_A_CCK2_11 0x86c
+#define RFPGA0_IQK 0xe28
+#define RTX_IQK_TONE_A 0xe30
+#define RRX_IQK_TONE_A 0xe34
+#define RTX_IQK_PI_A 0xe38
+#define RRX_IQK_PI_A 0xe3c
+
+#define RTX_IQK 0xe40
+#define RRX_IQK 0xe44
+#define RIQK_AGC_PTS 0xe48
+#define RIQK_AGC_RSP 0xe4c
+#define RTX_IQK_TONE_B 0xe50
+#define RRX_IQK_TONE_B 0xe54
+#define RTX_IQK_PI_B 0xe58
+#define RRX_IQK_PI_B 0xe5c
+#define RIQK_AGC_CONT 0xe60
+
+#define RBLUE_TOOTH 0xe6c
+#define RRX_WAIT_CCA 0xe70
+#define RTX_CCK_RFON 0xe74
+#define RTX_CCK_BBON 0xe78
+#define RTX_OFDM_RFON 0xe7c
+#define RTX_OFDM_BBON 0xe80
+#define RTX_TO_RX 0xe84
+#define RTX_TO_TX 0xe88
+#define RRX_CCK 0xe8c
+
+#define RTX_POWER_BEFORE_IQK_A 0xe94
+#define RTX_POWER_AFTER_IQK_A 0xe9c
+
+#define RRX_POWER_BEFORE_IQK_A 0xea0
+#define RRX_POWER_BEFORE_IQK_A_2 0xea4
+#define RRX_POWER_AFTER_IQK_A 0xea8
+#define RRX_POWER_AFTER_IQK_A_2 0xeac
+
+#define RTX_POWER_BEFORE_IQK_B 0xeb4
+#define RTX_POWER_AFTER_IQK_B 0xebc
+
+#define RRX_POWER_BEFORE_IQK_B 0xec0
+#define RRX_POWER_BEFORE_IQK_B_2 0xec4
+#define RRX_POWER_AFTER_IQK_B 0xec8
+#define RRX_POWER_AFTER_IQK_B_2 0xecc
+
+#define MASK_IQK_RESULT 0x03ff0000
+
+#define RRX_OFDM 0xed0
+#define RRX_WAIT_RIFS 0xed4
+#define RRX_TO_RX 0xed8
+#define RSTANDBY 0xedc
+#define RSLEEP 0xee0
+#define RPMPD_ANAEN 0xeec
+
/* RL6052 Register definition */
#define RF_AC 0x00
#define RF_IQADJ_G1 0x01
#define RF_IQADJ_G2 0x02
+#define RF_BS_PA_APSET_G1_G4 0x03
#define RF_POW_TRSW 0x05
#define RF_GAIN_RX 0x06
#define RF_GAIN_TX 0x07
#define RF_TXM_IDAC 0x08
+#define RF_TXPA_AG 0x0B
#define RF_BS_IQGEN 0x0F
#define RF_MODE1 0x10
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c
new file mode 100644
index 000000000000..427d1877f431
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "def.h"
+#include "reg.h"
+#include "phy_common.h"
+#include "rf_common.h"
+
+void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 rfpath;
+
+ switch (bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] &= 0xfffff3ff;
+ rtlphy->rfreg_chnlval[rfpath] |= 0x0400;
+
+ rtl_set_rfreg(hw, rfpath, RF_CHNLBW,
+ BIT(10) | BIT(11), 0x01);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "20M RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[rfpath]);
+ }
+
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] &= 0xfffff3ff;
+
+ rtl_set_rfreg(hw, rfpath, RF_CHNLBW,
+ BIT(10) | BIT(11), 0x00);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "40M RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[rfpath]);
+ }
+ break;
+ default:
+ pr_err("unknown bandwidth: %#X\n", bandwidth);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_rf6052_set_bandwidth);
+
+void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 tx_agc[2] = {0, 0}, tmpval;
+ bool turbo_scanoff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+
+ if (rtlefuse->eeprom_regulatory != 0)
+ turbo_scanoff = true;
+ if (mac->act_scanning) {
+ tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
+ tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
+ if (turbo_scanoff) {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ }
+ } else {
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ tx_agc[idx1] = ppowerlevel[idx1] |
+ (ppowerlevel[idx1] << 8) |
+ (ppowerlevel[idx1] << 16) |
+ (ppowerlevel[idx1] << 24);
+ }
+ if (rtlefuse->eeprom_regulatory == 0) {
+ tmpval = (rtlphy->mcs_offset[0][6]) +
+ (rtlphy->mcs_offset[0][7] << 8);
+ tx_agc[RF90_PATH_A] += tmpval;
+ tmpval = (rtlphy->mcs_offset[0][14]) +
+ (rtlphy->mcs_offset[0][15] << 24);
+ tx_agc[RF90_PATH_B] += tmpval;
+ }
+ }
+
+ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
+ ptr = (u8 *)(&tx_agc[idx1]);
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+
+ tmpval = tx_agc[RF90_PATH_A] & 0xff;
+ rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_A_CCK1_MCS32);
+ tmpval = tx_agc[RF90_PATH_A] >> 8;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK11_A_CCK2_11);
+ tmpval = tx_agc[RF90_PATH_B] >> 24;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK11_A_CCK2_11);
+ tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
+ rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n",
+ tmpval, RTXAGC_B_CCK1_55_MCS32);
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_rf6052_set_cck_txpower);
+
+static void _rtl92d_phy_get_power_base(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel,
+ u32 *ofdmbase, u32 *mcsbase)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 powerbase0, powerbase1;
+ u8 legacy_pwrdiff, ht20_pwrdiff;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerlevel[i] = ppowerlevel[i];
+ legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
+ powerbase0 = powerlevel[i] + legacy_pwrdiff;
+ powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) |
+ (powerbase0 << 8) | powerbase0;
+ *(ofdmbase + i) = powerbase0;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ " [OFDM power base index rf(%c) = 0x%x]\n",
+ i == 0 ? 'A' : 'B', *(ofdmbase + i));
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
+ ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
+ powerlevel[i] += ht20_pwrdiff;
+ }
+ powerbase1 = powerlevel[i];
+ powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) |
+ (powerbase1 << 8) | powerbase1;
+ *(mcsbase + i) = powerbase1;
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ " [MCS power base index rf(%c) = 0x%x]\n",
+ i == 0 ? 'A' : 'B', *(mcsbase + i));
+ }
+}
+
+static void _rtl92d_get_pwr_diff_limit(struct ieee80211_hw *hw, u8 channel,
+ u8 index, u8 rf, u8 pwr_diff_limit[4])
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 mcs_offset;
+ u8 limit;
+ int i;
+
+ mcs_offset = rtlphy->mcs_offset[0][index + (rf ? 8 : 0)];
+
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] = (mcs_offset >> (i * 8)) & 0x7f;
+
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40)
+ limit = rtlefuse->pwrgroup_ht40[rf][channel - 1];
+ else
+ limit = rtlefuse->pwrgroup_ht20[rf][channel - 1];
+
+ if (pwr_diff_limit[i] > limit)
+ pwr_diff_limit[i] = limit;
+ }
+}
+
+static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
+ u8 channel, u8 index,
+ u32 *powerbase0,
+ u32 *powerbase1,
+ u32 *p_outwriteval)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 writeval = 0, customer_limit, rf;
+ u8 chnlgroup = 0, pwr_diff_limit[4];
+
+ for (rf = 0; rf < 2; rf++) {
+ switch (rtlefuse->eeprom_regulatory) {
+ case 0:
+ writeval = rtlphy->mcs_offset[0][index + (rf ? 8 : 0)];
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "RTK better performance\n");
+ break;
+ case 1:
+ if (rtlphy->pwrgroup_cnt == 1)
+ chnlgroup = 0;
+
+ if (rtlphy->pwrgroup_cnt < MAX_PG_GROUP)
+ break;
+
+ chnlgroup = rtl92d_phy_get_chnlgroup_bypg(channel - 1);
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20)
+ chnlgroup++;
+ else
+ chnlgroup += 4;
+
+ writeval = rtlphy->mcs_offset
+ [chnlgroup][index + (rf ? 8 : 0)];
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "Realtek regulatory, 20MHz\n");
+ break;
+ case 2:
+ writeval = 0;
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Better regulatory\n");
+ break;
+ case 3:
+ if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "customer's limit, 40MHz rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B',
+ rtlefuse->pwrgroup_ht40[rf][channel - 1]);
+ } else {
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "customer's limit, 20MHz rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B',
+ rtlefuse->pwrgroup_ht20[rf][channel - 1]);
+ }
+
+ _rtl92d_get_pwr_diff_limit(hw, channel, index, rf,
+ pwr_diff_limit);
+
+ customer_limit = (pwr_diff_limit[3] << 24) |
+ (pwr_diff_limit[2] << 16) |
+ (pwr_diff_limit[1] << 8) |
+ (pwr_diff_limit[0]);
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "Customer's limit rf(%c) = 0x%x\n",
+ rf == 0 ? 'A' : 'B', customer_limit);
+
+ writeval = customer_limit;
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer\n");
+ break;
+ default:
+ writeval = rtlphy->mcs_offset[0][index + (rf ? 8 : 0)];
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "RTK better performance\n");
+ break;
+ }
+
+ if (index < 2)
+ writeval += powerbase0[rf];
+ else
+ writeval += powerbase1[rf];
+
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "writeval rf(%c)= 0x%x\n",
+ rf == 0 ? 'A' : 'B', writeval);
+
+ *(p_outwriteval + rf) = writeval;
+ }
+}
+
+static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw,
+ u8 index, u32 *pvalue)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ static const u16 regoffset_a[6] = {
+ RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
+ RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
+ RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
+ };
+ static const u16 regoffset_b[6] = {
+ RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
+ RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
+ RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
+ };
+ u8 i, rf, pwr_val[4];
+ u32 writeval;
+ u16 regoffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeval = pvalue[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8)((writeval & (0x7f <<
+ (i * 8))) >> (i * 8));
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
+ (pwr_val[1] << 8) | pwr_val[0];
+ if (rf == 0)
+ regoffset = regoffset_a[index];
+ else
+ regoffset = regoffset_b[index];
+ rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
+ RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
+ "Set 0x%x = %08x\n", regoffset, writeval);
+ if (((get_rf_type(rtlphy) == RF_2T2R) &&
+ (regoffset == RTXAGC_A_MCS15_MCS12 ||
+ regoffset == RTXAGC_B_MCS15_MCS12)) ||
+ ((get_rf_type(rtlphy) != RF_2T2R) &&
+ (regoffset == RTXAGC_A_MCS07_MCS04 ||
+ regoffset == RTXAGC_B_MCS07_MCS04))) {
+ writeval = pwr_val[3];
+ if (regoffset == RTXAGC_A_MCS15_MCS12 ||
+ regoffset == RTXAGC_A_MCS07_MCS04)
+ regoffset = 0xc90;
+ if (regoffset == RTXAGC_B_MCS15_MCS12 ||
+ regoffset == RTXAGC_B_MCS07_MCS04)
+ regoffset = 0xc98;
+ for (i = 0; i < 3; i++) {
+ if (i != 2)
+ writeval = (writeval > 8) ?
+ (writeval - 8) : 0;
+ else
+ writeval = (writeval > 6) ?
+ (writeval - 6) : 0;
+ rtl_write_byte(rtlpriv, (u32)(regoffset + i),
+ (u8)writeval);
+ }
+ }
+ }
+}
+
+void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel)
+{
+ u32 writeval[2], powerbase0[2], powerbase1[2];
+ u8 index;
+
+ _rtl92d_phy_get_power_base(hw, ppowerlevel, channel,
+ &powerbase0[0], &powerbase1[0]);
+ for (index = 0; index < 6; index++) {
+ _rtl92d_get_txpower_writeval_by_regulatory(hw, channel, index,
+ &powerbase0[0],
+ &powerbase1[0],
+ &writeval[0]);
+ _rtl92d_write_ofdm_power_reg(hw, index, &writeval[0]);
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_phy_rf6052_set_ofdm_txpower);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h
new file mode 100644
index 000000000000..c243ec08369b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/rf_common.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#ifndef __RTL92D_RF_COMMON_H__
+#define __RTL92D_RF_COMMON_H__
+
+void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
+void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel);
+void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
+ u8 *ppowerlevel, u8 channel);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c
new file mode 100644
index 000000000000..9f9a34492030
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c
@@ -0,0 +1,516 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../stats.h"
+#include "def.h"
+#include "trx_common.h"
+
+static long _rtl92d_translate_todbm(struct ieee80211_hw *hw,
+ u8 signal_strength_index)
+{
+ long signal_power;
+
+ signal_power = (long)((signal_strength_index + 1) >> 1);
+ signal_power -= 95;
+ return signal_power;
+}
+
+static void _rtl92d_query_rxphystatus(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats,
+ __le32 *pdesc,
+ struct rx_fwinfo_92d *p_drvinfo,
+ bool packet_match_bssid,
+ bool packet_toself,
+ bool packet_beacon)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct phy_sts_cck_8192d *cck_buf;
+ s8 rx_pwr_all, rx_pwr[4];
+ u8 rf_rx_num = 0, evm, pwdb_all;
+ u8 i, max_spatial_stream;
+ u32 rssi, total_rssi = 0;
+ bool is_cck_rate;
+ u8 rxmcs;
+
+ rxmcs = get_rx_desc_rxmcs(pdesc);
+ is_cck_rate = rxmcs <= DESC_RATE11M;
+ pstats->packet_matchbssid = packet_match_bssid;
+ pstats->packet_toself = packet_toself;
+ pstats->packet_beacon = packet_beacon;
+ pstats->is_cck = is_cck_rate;
+ pstats->rx_mimo_sig_qual[0] = -1;
+ pstats->rx_mimo_sig_qual[1] = -1;
+
+ if (is_cck_rate) {
+ u8 report, cck_highpwr;
+
+ cck_buf = (struct phy_sts_cck_8192d *)p_drvinfo;
+ if (ppsc->rfpwr_state == ERFON)
+ cck_highpwr = rtlphy->cck_high_power;
+ else
+ cck_highpwr = false;
+ if (!cck_highpwr) {
+ u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+
+ report = cck_buf->cck_agc_rpt & 0xc0;
+ report = report >> 6;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+ break;
+ }
+ } else {
+ u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
+
+ report = p_drvinfo->cfosho[0] & 0x60;
+ report = report >> 5;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
+ break;
+ }
+ }
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+ /* CCK gain is smaller than OFDM/MCS gain, */
+ /* so we add gain diff by experiences, the val is 6 */
+ pwdb_all += 6;
+ if (pwdb_all > 100)
+ pwdb_all = 100;
+ /* modify the offset to make the same gain index with OFDM. */
+ if (pwdb_all > 34 && pwdb_all <= 42)
+ pwdb_all -= 2;
+ else if (pwdb_all > 26 && pwdb_all <= 34)
+ pwdb_all -= 6;
+ else if (pwdb_all > 14 && pwdb_all <= 26)
+ pwdb_all -= 8;
+ else if (pwdb_all > 4 && pwdb_all <= 14)
+ pwdb_all -= 4;
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->recvsignalpower = rx_pwr_all;
+ if (packet_match_bssid) {
+ u8 sq;
+
+ if (pstats->rx_pwdb_all > 40) {
+ sq = 100;
+ } else {
+ sq = cck_buf->sq_rpt;
+ if (sq > 64)
+ sq = 0;
+ else if (sq < 20)
+ sq = 100;
+ else
+ sq = ((64 - sq) * 100) / 44;
+ }
+ pstats->signalquality = sq;
+ pstats->rx_mimo_sig_qual[0] = sq;
+ pstats->rx_mimo_sig_qual[1] = -1;
+ }
+ } else {
+ rtlpriv->dm.rfpath_rxenable[0] = true;
+ rtlpriv->dm.rfpath_rxenable[1] = true;
+ for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
+ if (rtlpriv->dm.rfpath_rxenable[i])
+ rf_rx_num++;
+ rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2)
+ - 110;
+ rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
+ total_rssi += rssi;
+ rtlpriv->stats.rx_snr_db[i] =
+ (long)(p_drvinfo->rxsnr[i] / 2);
+ if (packet_match_bssid)
+ pstats->rx_mimo_signalstrength[i] = (u8)rssi;
+ }
+ rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 106;
+ pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
+ pstats->rx_pwdb_all = pwdb_all;
+ pstats->rxpower = rx_pwr_all;
+ pstats->recvsignalpower = rx_pwr_all;
+ if (get_rx_desc_rxht(pdesc) && rxmcs >= DESC_RATEMCS8 &&
+ rxmcs <= DESC_RATEMCS15)
+ max_spatial_stream = 2;
+ else
+ max_spatial_stream = 1;
+ for (i = 0; i < max_spatial_stream; i++) {
+ evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
+ if (packet_match_bssid) {
+ if (i == 0)
+ pstats->signalquality =
+ (u8)(evm & 0xff);
+ pstats->rx_mimo_sig_qual[i] =
+ (u8)(evm & 0xff);
+ }
+ }
+ }
+ if (is_cck_rate)
+ pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+ pwdb_all));
+ else if (rf_rx_num != 0)
+ pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
+ total_rssi /= rf_rx_num));
+}
+
+static void rtl92d_loop_over_paths(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 rfpath;
+
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ pstats->rx_mimo_signalstrength[rfpath];
+ }
+ if (pstats->rx_mimo_signalstrength[rfpath] >
+ rtlpriv->stats.rx_rssi_percentage[rfpath]) {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ rtlpriv->stats.rx_rssi_percentage[rfpath] + 1;
+ } else {
+ rtlpriv->stats.rx_rssi_percentage[rfpath] =
+ ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_signalstrength[rfpath])) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+}
+
+static void _rtl92d_process_ui_rssi(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rt_smooth_data *ui_rssi;
+ u32 last_rssi, tmpval;
+
+ if (!pstats->packet_toself && !pstats->packet_beacon)
+ return;
+
+ ui_rssi = &rtlpriv->stats.ui_rssi;
+
+ rtlpriv->stats.rssi_calculate_cnt++;
+ if (ui_rssi->total_num++ >= PHY_RSSI_SLID_WIN_MAX) {
+ ui_rssi->total_num = PHY_RSSI_SLID_WIN_MAX;
+ last_rssi = ui_rssi->elements[ui_rssi->index];
+ ui_rssi->total_val -= last_rssi;
+ }
+ ui_rssi->total_val += pstats->signalstrength;
+ ui_rssi->elements[ui_rssi->index++] = pstats->signalstrength;
+ if (ui_rssi->index >= PHY_RSSI_SLID_WIN_MAX)
+ ui_rssi->index = 0;
+ tmpval = ui_rssi->total_val / ui_rssi->total_num;
+ rtlpriv->stats.signal_strength = _rtl92d_translate_todbm(hw, (u8)tmpval);
+ pstats->rssi = rtlpriv->stats.signal_strength;
+
+ if (!pstats->is_cck && pstats->packet_toself)
+ rtl92d_loop_over_paths(hw, pstats);
+}
+
+static void _rtl92d_update_rxsignalstatistics(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int weighting = 0;
+
+ if (rtlpriv->stats.recv_signal_power == 0)
+ rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
+ if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
+ weighting = 5;
+ else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
+ weighting = (-5);
+ rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power *
+ 5 + pstats->recvsignalpower + weighting) / 6;
+}
+
+static void _rtl92d_process_pwdb(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ long undec_sm_pwdb;
+
+ if (mac->opmode == NL80211_IFTYPE_ADHOC ||
+ mac->opmode == NL80211_IFTYPE_AP)
+ return;
+
+ undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
+
+ if (pstats->packet_toself || pstats->packet_beacon) {
+ if (undec_sm_pwdb < 0)
+ undec_sm_pwdb = pstats->rx_pwdb_all;
+ if (pstats->rx_pwdb_all > (u32)undec_sm_pwdb) {
+ undec_sm_pwdb = (((undec_sm_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ undec_sm_pwdb = undec_sm_pwdb + 1;
+ } else {
+ undec_sm_pwdb = (((undec_sm_pwdb) *
+ (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
+ }
+ rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
+ _rtl92d_update_rxsignalstatistics(hw, pstats);
+ }
+}
+
+static void rtl92d_loop_over_streams(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int stream;
+
+ for (stream = 0; stream < 2; stream++) {
+ if (pstats->rx_mimo_sig_qual[stream] != -1) {
+ if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
+ rtlpriv->stats.rx_evm_percentage[stream] =
+ pstats->rx_mimo_sig_qual[stream];
+ }
+ rtlpriv->stats.rx_evm_percentage[stream] =
+ ((rtlpriv->stats.rx_evm_percentage[stream]
+ * (RX_SMOOTH_FACTOR - 1)) +
+ (pstats->rx_mimo_sig_qual[stream] * 1)) /
+ (RX_SMOOTH_FACTOR);
+ }
+ }
+}
+
+static void _rtl92d_process_ui_link_quality(struct ieee80211_hw *hw,
+ struct rtl_stats *pstats)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rt_smooth_data *ui_link_quality;
+ u32 last_evm, tmpval;
+
+ if (pstats->signalquality == 0)
+ return;
+ if (!pstats->packet_toself && !pstats->packet_beacon)
+ return;
+
+ ui_link_quality = &rtlpriv->stats.ui_link_quality;
+
+ if (ui_link_quality->total_num++ >= PHY_LINKQUALITY_SLID_WIN_MAX) {
+ ui_link_quality->total_num = PHY_LINKQUALITY_SLID_WIN_MAX;
+ last_evm = ui_link_quality->elements[ui_link_quality->index];
+ ui_link_quality->total_val -= last_evm;
+ }
+ ui_link_quality->total_val += pstats->signalquality;
+ ui_link_quality->elements[ui_link_quality->index++] = pstats->signalquality;
+ if (ui_link_quality->index >= PHY_LINKQUALITY_SLID_WIN_MAX)
+ ui_link_quality->index = 0;
+ tmpval = ui_link_quality->total_val / ui_link_quality->total_num;
+ rtlpriv->stats.signal_quality = tmpval;
+ rtlpriv->stats.last_sigstrength_inpercent = tmpval;
+ rtl92d_loop_over_streams(hw, pstats);
+}
+
+static void _rtl92d_process_phyinfo(struct ieee80211_hw *hw,
+ u8 *buffer,
+ struct rtl_stats *pcurrent_stats)
+{
+ if (!pcurrent_stats->packet_matchbssid &&
+ !pcurrent_stats->packet_beacon)
+ return;
+
+ _rtl92d_process_ui_rssi(hw, pcurrent_stats);
+ _rtl92d_process_pwdb(hw, pcurrent_stats);
+ _rtl92d_process_ui_link_quality(hw, pcurrent_stats);
+}
+
+static void _rtl92d_translate_rx_signal_stuff(struct ieee80211_hw *hw,
+ struct sk_buff *skb,
+ struct rtl_stats *pstats,
+ __le32 *pdesc,
+ struct rx_fwinfo_92d *p_drvinfo)
+{
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ struct ieee80211_hdr *hdr;
+ bool packet_matchbssid;
+ bool packet_beacon;
+ bool packet_toself;
+ u16 type, cfc;
+ u8 *tmp_buf;
+ u8 *praddr;
+ __le16 fc;
+
+ tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
+ hdr = (struct ieee80211_hdr *)tmp_buf;
+ fc = hdr->frame_control;
+ cfc = le16_to_cpu(fc);
+ type = WLAN_FC_GET_TYPE(fc);
+ praddr = hdr->addr1;
+ packet_matchbssid = ((type != IEEE80211_FTYPE_CTL) &&
+ ether_addr_equal(mac->bssid,
+ (cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
+ (cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
+ hdr->addr3) &&
+ (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
+ packet_toself = packet_matchbssid &&
+ ether_addr_equal(praddr, rtlefuse->dev_addr);
+ packet_beacon = ieee80211_is_beacon(fc);
+ _rtl92d_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
+ packet_matchbssid, packet_toself,
+ packet_beacon);
+ _rtl92d_process_phyinfo(hw, tmp_buf, pstats);
+}
+
+bool rtl92d_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc8, struct sk_buff *skb)
+{
+ __le32 *pdesc = (__le32 *)pdesc8;
+ struct rx_fwinfo_92d *p_drvinfo;
+ u32 phystatus = get_rx_desc_physt(pdesc);
+
+ stats->length = (u16)get_rx_desc_pkt_len(pdesc);
+ stats->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) *
+ RX_DRV_INFO_SIZE_UNIT;
+ stats->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03);
+ stats->icv = (u16)get_rx_desc_icv(pdesc);
+ stats->crc = (u16)get_rx_desc_crc32(pdesc);
+ stats->hwerror = (stats->crc | stats->icv);
+ stats->decrypted = !get_rx_desc_swdec(pdesc) &&
+ get_rx_desc_enc_type(pdesc) != RX_DESC_ENC_NONE;
+ stats->rate = (u8)get_rx_desc_rxmcs(pdesc);
+ stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
+ stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
+ stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) &&
+ (get_rx_desc_faggr(pdesc) == 1));
+ stats->timestamp_low = get_rx_desc_tsfl(pdesc);
+ stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc);
+ stats->is_ht = (bool)get_rx_desc_rxht(pdesc);
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
+ if (get_rx_desc_crc32(pdesc))
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+ if (get_rx_desc_bw(pdesc))
+ rx_status->bw = RATE_INFO_BW_40;
+ if (get_rx_desc_rxht(pdesc))
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+ if (stats->decrypted)
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+ rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
+ false, stats->rate);
+ rx_status->mactime = get_rx_desc_tsfl(pdesc);
+ if (phystatus) {
+ p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
+ stats->rx_bufshift);
+ _rtl92d_translate_rx_signal_stuff(hw, skb, stats, pdesc,
+ p_drvinfo);
+ }
+ /*rx_status->qual = stats->signal; */
+ rx_status->signal = stats->recvsignalpower + 10;
+ return true;
+}
+EXPORT_SYMBOL_GPL(rtl92d_rx_query_desc);
+
+void rtl92d_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
+ u8 desc_name, u8 *val)
+{
+ __le32 *pdesc = (__le32 *)pdesc8;
+
+ if (istx) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ wmb();
+ set_tx_desc_own(pdesc, 1);
+ break;
+ case HW_DESC_TX_NEXTDESC_ADDR:
+ set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
+ break;
+ default:
+ WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
+ desc_name);
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_RXOWN:
+ wmb();
+ set_rx_desc_own(pdesc, 1);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ set_rx_desc_buff_addr(pdesc, *(u32 *)val);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ set_rx_desc_pkt_len(pdesc, *(u32 *)val);
+ break;
+ case HW_DESC_RXERO:
+ set_rx_desc_eor(pdesc, 1);
+ break;
+ default:
+ WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
+ desc_name);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(rtl92d_set_desc);
+
+u64 rtl92d_get_desc(struct ieee80211_hw *hw,
+ u8 *p_desc8, bool istx, u8 desc_name)
+{
+ __le32 *p_desc = (__le32 *)p_desc8;
+ u32 ret = 0;
+
+ if (istx) {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = get_tx_desc_own(p_desc);
+ break;
+ case HW_DESC_TXBUFF_ADDR:
+ ret = get_tx_desc_tx_buffer_address(p_desc);
+ break;
+ default:
+ WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
+ desc_name);
+ break;
+ }
+ } else {
+ switch (desc_name) {
+ case HW_DESC_OWN:
+ ret = get_rx_desc_own(p_desc);
+ break;
+ case HW_DESC_RXPKT_LEN:
+ ret = get_rx_desc_pkt_len(p_desc);
+ break;
+ case HW_DESC_RXBUFF_ADDR:
+ ret = get_rx_desc_buff_addr(p_desc);
+ break;
+ default:
+ WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
+ desc_name);
+ break;
+ }
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(rtl92d_get_desc);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h
new file mode 100644
index 000000000000..528182b1eba6
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h
@@ -0,0 +1,405 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2009-2012 Realtek Corporation.*/
+
+#ifndef __RTL92D_TRX_COMMON_H__
+#define __RTL92D_TRX_COMMON_H__
+
+#define RX_DRV_INFO_SIZE_UNIT 8
+
+enum rtl92d_rx_desc_enc {
+ RX_DESC_ENC_NONE = 0,
+ RX_DESC_ENC_WEP40 = 1,
+ RX_DESC_ENC_TKIP_WO_MIC = 2,
+ RX_DESC_ENC_TKIP_MIC = 3,
+ RX_DESC_ENC_AES = 4,
+ RX_DESC_ENC_WEP104 = 5,
+};
+
+/* macros to read/write various fields in RX or TX descriptors */
+
+static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
+}
+
+static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(25));
+}
+
+static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(26));
+}
+
+static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(27));
+}
+
+static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(28));
+}
+
+static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_tx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(31));
+}
+
+static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(5));
+}
+
+static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, BIT(7));
+}
+
+static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16));
+}
+
+static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22));
+}
+
+static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 1), __val, GENMASK(30, 26));
+}
+
+static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, BIT(17));
+}
+
+static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20));
+}
+
+static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16));
+}
+
+static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28));
+}
+
+static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0));
+}
+
+static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(6));
+}
+
+static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(7));
+}
+
+static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(8));
+}
+
+static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(10));
+}
+
+static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(11));
+}
+
+static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(12));
+}
+
+static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(13));
+}
+
+static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20));
+}
+
+static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(25));
+}
+
+static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(26));
+}
+
+static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, BIT(27));
+}
+
+static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28));
+}
+
+static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30));
+}
+
+static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0));
+}
+
+static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, BIT(6));
+}
+
+static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8));
+}
+
+static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13));
+}
+
+static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11));
+}
+
+static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0));
+}
+
+static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 8) = cpu_to_le32(__val);
+}
+
+static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 8));
+}
+
+static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 10) = cpu_to_le32(__val);
+}
+
+static inline u32 get_rx_desc_pkt_len(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(13, 0));
+}
+
+static inline u32 get_rx_desc_crc32(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(14));
+}
+
+static inline u32 get_rx_desc_icv(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(15));
+}
+
+static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(19, 16));
+}
+
+static inline u32 get_rx_desc_enc_type(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(22, 20));
+}
+
+static inline u32 get_rx_desc_shift(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, GENMASK(25, 24));
+}
+
+static inline u32 get_rx_desc_physt(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(26));
+}
+
+static inline u32 get_rx_desc_swdec(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(27));
+}
+
+static inline u32 get_rx_desc_own(__le32 *__pdesc)
+{
+ return le32_get_bits(*__pdesc, BIT(31));
+}
+
+static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
+}
+
+static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(30));
+}
+
+static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val)
+{
+ le32p_replace_bits(__pdesc, __val, BIT(31));
+}
+
+static inline u32 get_rx_desc_paggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(14));
+}
+
+static inline u32 get_rx_desc_faggr(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 1), BIT(15));
+}
+
+static inline u32 get_rx_desc_rxmcs(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0));
+}
+
+static inline u32 get_rx_desc_rxht(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(6));
+}
+
+static inline u32 get_rx_desc_splcp(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(8));
+}
+
+static inline u32 get_rx_desc_bw(__le32 *__pdesc)
+{
+ return le32_get_bits(*(__pdesc + 3), BIT(9));
+}
+
+static inline u32 get_rx_desc_tsfl(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 5));
+}
+
+static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc)
+{
+ return le32_to_cpu(*(__pdesc + 6));
+}
+
+static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val)
+{
+ *(__pdesc + 6) = cpu_to_le32(__val);
+}
+
+/* For 92D early mode */
+static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(2, 0));
+}
+
+static inline void set_earlymode_len0(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(15, 4));
+}
+
+static inline void set_earlymode_len1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(27, 16));
+}
+
+static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits(__paddr, __value, GENMASK(31, 28));
+}
+
+static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(7, 0));
+}
+
+static inline void set_earlymode_len3(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(19, 8));
+}
+
+static inline void set_earlymode_len4(__le32 *__paddr, u32 __value)
+{
+ le32p_replace_bits((__paddr + 1), __value, GENMASK(31, 20));
+}
+
+struct rx_fwinfo_92d {
+ u8 gain_trsw[4];
+ u8 pwdb_all;
+ u8 cfosho[4];
+ u8 cfotail[4];
+ s8 rxevm[2];
+ s8 rxsnr[4];
+ u8 pdsnr[2];
+ u8 csi_current[2];
+ u8 csi_target[2];
+ u8 sigevm;
+ u8 max_ex_pwr;
+#ifdef __LITTLE_ENDIAN
+ u8 ex_intf_flag:1;
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 reserve:4;
+#else
+ u8 reserve:4;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 ex_intf_flag:1;
+#endif
+} __packed;
+
+bool rtl92d_rx_query_desc(struct ieee80211_hw *hw,
+ struct rtl_stats *stats,
+ struct ieee80211_rx_status *rx_status,
+ u8 *pdesc, struct sk_buff *skb);
+void rtl92d_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
+ u8 desc_name, u8 *val);
+u64 rtl92d_get_desc(struct ieee80211_hw *hw,
+ u8 *p_desc, bool istx, u8 desc_name);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
index cf4aca83bd05..c6a2e8b22fa0 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.c
@@ -4,455 +4,16 @@
#include "../wifi.h"
#include "../base.h"
#include "../core.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/dm_common.h"
+#include "../rtl8192d/phy_common.h"
+#include "../rtl8192d/fw_common.h"
#include "phy.h"
#include "dm.h"
-#include "fw.h"
#define UNDEC_SM_PWDB entry_min_undec_sm_pwdb
-static const u32 ofdmswing_table[OFDM_TABLE_SIZE_92D] = {
- 0x7f8001fe, /* 0, +6.0dB */
- 0x788001e2, /* 1, +5.5dB */
- 0x71c001c7, /* 2, +5.0dB */
- 0x6b8001ae, /* 3, +4.5dB */
- 0x65400195, /* 4, +4.0dB */
- 0x5fc0017f, /* 5, +3.5dB */
- 0x5a400169, /* 6, +3.0dB */
- 0x55400155, /* 7, +2.5dB */
- 0x50800142, /* 8, +2.0dB */
- 0x4c000130, /* 9, +1.5dB */
- 0x47c0011f, /* 10, +1.0dB */
- 0x43c0010f, /* 11, +0.5dB */
- 0x40000100, /* 12, +0dB */
- 0x3c8000f2, /* 13, -0.5dB */
- 0x390000e4, /* 14, -1.0dB */
- 0x35c000d7, /* 15, -1.5dB */
- 0x32c000cb, /* 16, -2.0dB */
- 0x300000c0, /* 17, -2.5dB */
- 0x2d4000b5, /* 18, -3.0dB */
- 0x2ac000ab, /* 19, -3.5dB */
- 0x288000a2, /* 20, -4.0dB */
- 0x26000098, /* 21, -4.5dB */
- 0x24000090, /* 22, -5.0dB */
- 0x22000088, /* 23, -5.5dB */
- 0x20000080, /* 24, -6.0dB */
- 0x1e400079, /* 25, -6.5dB */
- 0x1c800072, /* 26, -7.0dB */
- 0x1b00006c, /* 27. -7.5dB */
- 0x19800066, /* 28, -8.0dB */
- 0x18000060, /* 29, -8.5dB */
- 0x16c0005b, /* 30, -9.0dB */
- 0x15800056, /* 31, -9.5dB */
- 0x14400051, /* 32, -10.0dB */
- 0x1300004c, /* 33, -10.5dB */
- 0x12000048, /* 34, -11.0dB */
- 0x11000044, /* 35, -11.5dB */
- 0x10000040, /* 36, -12.0dB */
- 0x0f00003c, /* 37, -12.5dB */
- 0x0e400039, /* 38, -13.0dB */
- 0x0d800036, /* 39, -13.5dB */
- 0x0cc00033, /* 40, -14.0dB */
- 0x0c000030, /* 41, -14.5dB */
- 0x0b40002d, /* 42, -15.0dB */
-};
-
-static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
- {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
-};
-
-static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
- {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
- {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
- {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
- {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
- {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
- {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
- {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
- {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
- {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
- {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
- {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
- {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
- {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
- {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
- {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
- {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
- {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
- {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
- {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
- {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
- {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
- {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
- {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
- {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
- {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
- {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
- {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
- {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
- {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
- {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
- {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
- {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
- {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
-};
-
-static void rtl92d_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
-{
- u32 ret_value;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
- unsigned long flag = 0;
-
- /* hold ofdm counter */
- rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 1); /* hold page C counter */
- rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 1); /*hold page D counter */
-
- ret_value = rtl_get_bbreg(hw, ROFDM0_FRAMESYNC, MASKDWORD);
- falsealm_cnt->cnt_fast_fsync_fail = (ret_value & 0xffff);
- falsealm_cnt->cnt_sb_search_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
- falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
- falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
- falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
- falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
- falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
- falsealm_cnt->cnt_rate_illegal +
- falsealm_cnt->cnt_crc8_fail +
- falsealm_cnt->cnt_mcs_fail +
- falsealm_cnt->cnt_fast_fsync_fail +
- falsealm_cnt->cnt_sb_search_fail;
-
- if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
- /* hold cck counter */
- rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
- falsealm_cnt->cnt_cck_fail = ret_value;
- ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
- falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
- rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
- } else {
- falsealm_cnt->cnt_cck_fail = 0;
- }
-
- /* reset false alarm counter registers */
- falsealm_cnt->cnt_all = falsealm_cnt->cnt_fast_fsync_fail +
- falsealm_cnt->cnt_sb_search_fail +
- falsealm_cnt->cnt_parity_fail +
- falsealm_cnt->cnt_rate_illegal +
- falsealm_cnt->cnt_crc8_fail +
- falsealm_cnt->cnt_mcs_fail +
- falsealm_cnt->cnt_cck_fail;
-
- rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
- /* update ofdm counter */
- rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
- /* update page C counter */
- rtl_set_bbreg(hw, ROFDM0_LSTF, BIT(31), 0);
- /* update page D counter */
- rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(31), 0);
- if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G) {
- /* reset cck counter */
- rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
- /* enable cck counter */
- rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
- rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
- }
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "Cnt_Fast_Fsync_fail = %x, Cnt_SB_Search_fail = %x\n",
- falsealm_cnt->cnt_fast_fsync_fail,
- falsealm_cnt->cnt_sb_search_fail);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "Cnt_Parity_Fail = %x, Cnt_Rate_Illegal = %x, Cnt_Crc8_fail = %x, Cnt_Mcs_fail = %x\n",
- falsealm_cnt->cnt_parity_fail,
- falsealm_cnt->cnt_rate_illegal,
- falsealm_cnt->cnt_crc8_fail,
- falsealm_cnt->cnt_mcs_fail);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "Cnt_Ofdm_fail = %x, Cnt_Cck_fail = %x, Cnt_all = %x\n",
- falsealm_cnt->cnt_ofdm_fail,
- falsealm_cnt->cnt_cck_fail,
- falsealm_cnt->cnt_all);
-}
-
-static void rtl92d_dm_find_minimum_rssi(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
- struct rtl_mac *mac = rtl_mac(rtlpriv);
-
- /* Determine the minimum RSSI */
- if ((mac->link_state < MAC80211_LINKED) &&
- (rtlpriv->dm.UNDEC_SM_PWDB == 0)) {
- de_digtable->min_undec_pwdb_for_dm = 0;
- rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "Not connected to any\n");
- }
- if (mac->link_state >= MAC80211_LINKED) {
- if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC) {
- de_digtable->min_undec_pwdb_for_dm =
- rtlpriv->dm.UNDEC_SM_PWDB;
- rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Client PWDB = 0x%lx\n",
- rtlpriv->dm.UNDEC_SM_PWDB);
- } else {
- de_digtable->min_undec_pwdb_for_dm =
- rtlpriv->dm.undec_sm_pwdb;
- rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "STA Default Port PWDB = 0x%x\n",
- de_digtable->min_undec_pwdb_for_dm);
- }
- } else {
- de_digtable->min_undec_pwdb_for_dm = rtlpriv->dm.UNDEC_SM_PWDB;
- rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_LOUD,
- "AP Ext Port or disconnect PWDB = 0x%x\n",
- de_digtable->min_undec_pwdb_for_dm);
- }
-
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "MinUndecoratedPWDBForDM =%d\n",
- de_digtable->min_undec_pwdb_for_dm);
-}
-
-static void rtl92d_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
- unsigned long flag = 0;
-
- if (de_digtable->cursta_cstate == DIG_STA_CONNECT) {
- if (de_digtable->pre_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
- if (de_digtable->min_undec_pwdb_for_dm <= 25)
- de_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_LOWRSSI;
- else
- de_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_HIGHRSSI;
- } else {
- if (de_digtable->min_undec_pwdb_for_dm <= 20)
- de_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_LOWRSSI;
- else
- de_digtable->cur_cck_pd_state =
- CCK_PD_STAGE_HIGHRSSI;
- }
- } else {
- de_digtable->cur_cck_pd_state = CCK_PD_STAGE_LOWRSSI;
- }
- if (de_digtable->pre_cck_pd_state != de_digtable->cur_cck_pd_state) {
- if (de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI) {
- rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0x83);
- rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
- } else {
- rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
- rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
- }
- de_digtable->pre_cck_pd_state = de_digtable->cur_cck_pd_state;
- }
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CurSTAConnectState=%s\n",
- de_digtable->cursta_cstate == DIG_STA_CONNECT ?
- "DIG_STA_CONNECT " : "DIG_STA_DISCONNECT");
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "CCKPDStage=%s\n",
- de_digtable->cur_cck_pd_state == CCK_PD_STAGE_LOWRSSI ?
- "Low RSSI " : "High RSSI ");
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "is92d single phy =%x\n",
- IS_92D_SINGLEPHY(rtlpriv->rtlhal.version));
-
-}
-
-void rtl92d_dm_write_dig(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
-
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "cur_igvalue = 0x%x, pre_igvalue = 0x%x, back_val = %d\n",
- de_digtable->cur_igvalue, de_digtable->pre_igvalue,
- de_digtable->back_val);
- if (de_digtable->dig_enable_flag == false) {
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "DIG is disabled\n");
- de_digtable->pre_igvalue = 0x17;
- return;
- }
- if (de_digtable->pre_igvalue != de_digtable->cur_igvalue) {
- rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
- de_digtable->cur_igvalue);
- rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
- de_digtable->cur_igvalue);
- de_digtable->pre_igvalue = de_digtable->cur_igvalue;
- }
-}
-
-static void rtl92d_early_mode_enabled(struct rtl_priv *rtlpriv)
-{
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
-
- if ((rtlpriv->mac80211.link_state >= MAC80211_LINKED) &&
- (rtlpriv->mac80211.vendor == PEER_CISCO)) {
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "IOT_PEER = CISCO\n");
- if (de_digtable->last_min_undec_pwdb_for_dm >= 50
- && de_digtable->min_undec_pwdb_for_dm < 50) {
- rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x00);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "Early Mode Off\n");
- } else if (de_digtable->last_min_undec_pwdb_for_dm <= 55 &&
- de_digtable->min_undec_pwdb_for_dm > 55) {
- rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "Early Mode On\n");
- }
- } else if (!(rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL) & 0xf)) {
- rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0x0f);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "Early Mode On\n");
- }
-}
-
-static void rtl92d_dm_dig(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
- u8 value_igi = de_digtable->cur_igvalue;
- struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
-
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "==>\n");
- if (rtlpriv->rtlhal.earlymode_enable) {
- rtl92d_early_mode_enabled(rtlpriv);
- de_digtable->last_min_undec_pwdb_for_dm =
- de_digtable->min_undec_pwdb_for_dm;
- }
- if (!rtlpriv->dm.dm_initialgain_enable)
- return;
-
- /* because we will send data pkt when scanning
- * this will cause some ap like gear-3700 wep TP
- * lower if we return here, this is the diff of
- * mac80211 driver vs ieee80211 driver */
- /* if (rtlpriv->mac80211.act_scanning)
- * return; */
-
- /* Not STA mode return tmp */
- if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
- return;
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "progress\n");
- /* Decide the current status and if modify initial gain or not */
- if (rtlpriv->mac80211.link_state >= MAC80211_LINKED)
- de_digtable->cursta_cstate = DIG_STA_CONNECT;
- else
- de_digtable->cursta_cstate = DIG_STA_DISCONNECT;
-
- /* adjust initial gain according to false alarm counter */
- if (falsealm_cnt->cnt_all < DM_DIG_FA_TH0)
- value_igi--;
- else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH1)
- value_igi += 0;
- else if (falsealm_cnt->cnt_all < DM_DIG_FA_TH2)
- value_igi++;
- else if (falsealm_cnt->cnt_all >= DM_DIG_FA_TH2)
- value_igi += 2;
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() Before: large_fa_hit=%d, forbidden_igi=%x\n",
- de_digtable->large_fa_hit, de_digtable->forbidden_igi);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() Before: Recover_cnt=%d, rx_gain_min=%x\n",
- de_digtable->recover_cnt, de_digtable->rx_gain_min);
-
- /* deal with abnormally large false alarm */
- if (falsealm_cnt->cnt_all > 10000) {
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG(): Abnormally false alarm case\n");
-
- de_digtable->large_fa_hit++;
- if (de_digtable->forbidden_igi < de_digtable->cur_igvalue) {
- de_digtable->forbidden_igi = de_digtable->cur_igvalue;
- de_digtable->large_fa_hit = 1;
- }
- if (de_digtable->large_fa_hit >= 3) {
- if ((de_digtable->forbidden_igi + 1) > DM_DIG_MAX)
- de_digtable->rx_gain_min = DM_DIG_MAX;
- else
- de_digtable->rx_gain_min =
- (de_digtable->forbidden_igi + 1);
- de_digtable->recover_cnt = 3600; /* 3600=2hr */
- }
- } else {
- /* Recovery mechanism for IGI lower bound */
- if (de_digtable->recover_cnt != 0) {
- de_digtable->recover_cnt--;
- } else {
- if (de_digtable->large_fa_hit == 0) {
- if ((de_digtable->forbidden_igi - 1) <
- DM_DIG_FA_LOWER) {
- de_digtable->forbidden_igi =
- DM_DIG_FA_LOWER;
- de_digtable->rx_gain_min =
- DM_DIG_FA_LOWER;
-
- } else {
- de_digtable->forbidden_igi--;
- de_digtable->rx_gain_min =
- (de_digtable->forbidden_igi + 1);
- }
- } else if (de_digtable->large_fa_hit == 3) {
- de_digtable->large_fa_hit = 0;
- }
- }
- }
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() After: large_fa_hit=%d, forbidden_igi=%x\n",
- de_digtable->large_fa_hit, de_digtable->forbidden_igi);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD,
- "dm_DIG() After: recover_cnt=%d, rx_gain_min=%x\n",
- de_digtable->recover_cnt, de_digtable->rx_gain_min);
-
- if (value_igi > DM_DIG_MAX)
- value_igi = DM_DIG_MAX;
- else if (value_igi < de_digtable->rx_gain_min)
- value_igi = de_digtable->rx_gain_min;
- de_digtable->cur_igvalue = value_igi;
- rtl92d_dm_write_dig(hw);
- if (rtlpriv->rtlhal.current_bandtype != BAND_ON_5G)
- rtl92d_dm_cck_packet_detection_thresh(hw);
- rtl_dbg(rtlpriv, COMP_DIG, DBG_LOUD, "<<==\n");
-}
-
static void rtl92d_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -579,626 +140,7 @@ static void rtl92d_dm_pwdb_monitor(struct ieee80211_hw *hw)
}
}
-void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm.current_turbo_edca = false;
- rtlpriv->dm.is_any_nonbepkts = false;
- rtlpriv->dm.is_cur_rdlstate = false;
-}
-
-static void rtl92d_dm_check_edca_turbo(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- const u32 edca_be_ul = 0x5ea42b;
- const u32 edca_be_dl = 0x5ea42b;
- static u64 last_txok_cnt;
- static u64 last_rxok_cnt;
- u64 cur_txok_cnt;
- u64 cur_rxok_cnt;
-
- if (mac->link_state != MAC80211_LINKED) {
- rtlpriv->dm.current_turbo_edca = false;
- goto exit;
- }
-
- if ((!rtlpriv->dm.is_any_nonbepkts) &&
- (!rtlpriv->dm.disable_framebursting)) {
- cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
- cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
- if (cur_rxok_cnt > 4 * cur_txok_cnt) {
- if (!rtlpriv->dm.is_cur_rdlstate ||
- !rtlpriv->dm.current_turbo_edca) {
- rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
- edca_be_dl);
- rtlpriv->dm.is_cur_rdlstate = true;
- }
- } else {
- if (rtlpriv->dm.is_cur_rdlstate ||
- !rtlpriv->dm.current_turbo_edca) {
- rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
- edca_be_ul);
- rtlpriv->dm.is_cur_rdlstate = false;
- }
- }
- rtlpriv->dm.current_turbo_edca = true;
- } else {
- if (rtlpriv->dm.current_turbo_edca) {
- u8 tmp = AC0_BE;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AC_PARAM,
- &tmp);
- rtlpriv->dm.current_turbo_edca = false;
- }
- }
-
-exit:
- rtlpriv->dm.is_any_nonbepkts = false;
- last_txok_cnt = rtlpriv->stats.txbytesunicast;
- last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
-}
-
-static void rtl92d_dm_rxgain_tracking_thermalmeter(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 index_mapping[RX_INDEX_MAPPING_NUM] = {
- 0x0f, 0x0f, 0x0d, 0x0c, 0x0b,
- 0x0a, 0x09, 0x08, 0x07, 0x06,
- 0x05, 0x04, 0x04, 0x03, 0x02
- };
- int i;
- u32 u4tmp;
-
- u4tmp = (index_mapping[(rtlpriv->efuse.eeprom_thermalmeter -
- rtlpriv->dm.thermalvalue_rxgain)]) << 12;
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "===> Rx Gain %x\n", u4tmp);
- for (i = RF90_PATH_A; i < rtlpriv->phy.num_total_rfpath; i++)
- rtl_set_rfreg(hw, i, 0x3C, RFREG_OFFSET_MASK,
- (rtlpriv->phy.reg_rf3c[i] & (~(0xF000))) | u4tmp);
-}
-
-static void rtl92d_bandtype_2_4G(struct ieee80211_hw *hw, long *temp_cckg,
- u8 *cck_index_old)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int i;
- unsigned long flag = 0;
- long temp_cck;
- const u8 *cckswing;
-
- /* Query CCK default setting From 0xa24 */
- rtl92d_acquire_cckandrw_pagea_ctl(hw, &flag);
- temp_cck = rtl_get_bbreg(hw, RCCK0_TXFILTER2,
- MASKDWORD) & MASKCCK;
- rtl92d_release_cckandrw_pagea_ctl(hw, &flag);
- for (i = 0; i < CCK_TABLE_LENGTH; i++) {
- if (rtlpriv->dm.cck_inch14)
- cckswing = &cckswing_table_ch14[i][2];
- else
- cckswing = &cckswing_table_ch1ch13[i][2];
-
- if (temp_cck == le32_to_cpu(*((__le32 *)cckswing))) {
- *cck_index_old = (u8)i;
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial reg0x%x = 0x%lx, cck_index = 0x%x, ch14 %d\n",
- RCCK0_TXFILTER2, temp_cck,
- *cck_index_old,
- rtlpriv->dm.cck_inch14);
- break;
- }
- }
- *temp_cckg = temp_cck;
-}
-
-static void rtl92d_bandtype_5G(struct rtl_hal *rtlhal, u8 *ofdm_index,
- bool *internal_pa, u8 thermalvalue, u8 delta,
- u8 rf, struct rtl_efuse *rtlefuse,
- struct rtl_priv *rtlpriv, struct rtl_phy *rtlphy,
- const u8 index_mapping[5][INDEX_MAPPING_NUM],
- const u8 index_mapping_pa[8][INDEX_MAPPING_NUM])
-{
- int i;
- u8 index;
- u8 offset = 0;
-
- for (i = 0; i < rf; i++) {
- if (rtlhal->macphymode == DUALMAC_DUALPHY &&
- rtlhal->interfaceindex == 1) /* MAC 1 5G */
- *internal_pa = rtlefuse->internal_pa_5g[1];
- else
- *internal_pa = rtlefuse->internal_pa_5g[i];
- if (*internal_pa) {
- if (rtlhal->interfaceindex == 1 || i == rf)
- offset = 4;
- else
- offset = 0;
- if (rtlphy->current_channel >= 100 &&
- rtlphy->current_channel <= 165)
- offset += 2;
- } else {
- if (rtlhal->interfaceindex == 1 || i == rf)
- offset = 2;
- else
- offset = 0;
- }
- if (thermalvalue > rtlefuse->eeprom_thermalmeter)
- offset++;
- if (*internal_pa) {
- if (delta > INDEX_MAPPING_NUM - 1)
- index = index_mapping_pa[offset]
- [INDEX_MAPPING_NUM - 1];
- else
- index =
- index_mapping_pa[offset][delta];
- } else {
- if (delta > INDEX_MAPPING_NUM - 1)
- index =
- index_mapping[offset][INDEX_MAPPING_NUM - 1];
- else
- index = index_mapping[offset][delta];
- }
- if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
- if (*internal_pa && thermalvalue > 0x12) {
- ofdm_index[i] = rtlpriv->dm.ofdm_index[i] -
- ((delta / 2) * 3 + (delta % 2));
- } else {
- ofdm_index[i] -= index;
- }
- } else {
- ofdm_index[i] += index;
- }
- }
-}
-
-static void rtl92d_dm_txpower_tracking_callback_thermalmeter(
- struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 thermalvalue, delta, delta_lck, delta_iqk, delta_rxgain;
- u8 offset, thermalvalue_avg_count = 0;
- u32 thermalvalue_avg = 0;
- bool internal_pa = false;
- long ele_a = 0, ele_d, temp_cck, val_x, value32;
- long val_y, ele_c = 0;
- u8 ofdm_index[2];
- s8 cck_index = 0;
- u8 ofdm_index_old[2] = {0, 0};
- s8 cck_index_old = 0;
- u8 index;
- int i;
- bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
- u8 ofdm_min_index = 6, ofdm_min_index_internal_pa = 3, rf;
- u8 indexforchannel =
- rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel);
- static const u8 index_mapping[5][INDEX_MAPPING_NUM] = {
- /* 5G, path A/MAC 0, decrease power */
- {0, 1, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
- /* 5G, path A/MAC 0, increase power */
- {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
- /* 5G, path B/MAC 1, decrease power */
- {0, 2, 3, 6, 8, 9, 11, 13, 14, 16, 17, 18, 18},
- /* 5G, path B/MAC 1, increase power */
- {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
- /* 2.4G, for decreas power */
- {0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10},
- };
- static const u8 index_mapping_internal_pa[8][INDEX_MAPPING_NUM] = {
- /* 5G, path A/MAC 0, ch36-64, decrease power */
- {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
- /* 5G, path A/MAC 0, ch36-64, increase power */
- {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
- /* 5G, path A/MAC 0, ch100-165, decrease power */
- {0, 1, 2, 3, 5, 6, 8, 10, 11, 13, 14, 15, 15},
- /* 5G, path A/MAC 0, ch100-165, increase power */
- {0, 2, 4, 5, 7, 10, 12, 14, 16, 18, 18, 18, 18},
- /* 5G, path B/MAC 1, ch36-64, decrease power */
- {0, 1, 2, 4, 6, 7, 9, 11, 12, 14, 15, 16, 16},
- /* 5G, path B/MAC 1, ch36-64, increase power */
- {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
- /* 5G, path B/MAC 1, ch100-165, decrease power */
- {0, 1, 2, 3, 5, 6, 8, 9, 10, 12, 13, 14, 14},
- /* 5G, path B/MAC 1, ch100-165, increase power */
- {0, 2, 4, 5, 7, 10, 13, 16, 16, 18, 18, 18, 18},
- };
-
- rtlpriv->dm.txpower_trackinginit = true;
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "\n");
- thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0xf800);
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x\n",
- thermalvalue,
- rtlpriv->dm.thermalvalue, rtlefuse->eeprom_thermalmeter);
- rtl92d_phy_ap_calibrate(hw, (thermalvalue -
- rtlefuse->eeprom_thermalmeter));
-
- if (!thermalvalue)
- goto exit;
-
- if (is2t)
- rf = 2;
- else
- rf = 1;
-
- if (rtlpriv->dm.thermalvalue && !rtlhal->reloadtxpowerindex)
- goto old_index_done;
-
- ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD) & MASKOFDM_D;
- for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
- if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
- ofdm_index_old[0] = (u8)i;
-
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Initial pathA ele_d reg0x%x = 0x%lx, ofdm_index=0x%x\n",
- ROFDM0_XATXIQIMBALANCE,
- ele_d, ofdm_index_old[0]);
- break;
- }
- }
- if (is2t) {
- ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD) & MASKOFDM_D;
- for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) {
- if (ele_d ==
- (ofdmswing_table[i] & MASKOFDM_D)) {
- ofdm_index_old[1] = (u8)i;
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING,
- DBG_LOUD,
- "Initial pathB ele_d reg 0x%x = 0x%lx, ofdm_index = 0x%x\n",
- ROFDM0_XBTXIQIMBALANCE, ele_d,
- ofdm_index_old[1]);
- break;
- }
- }
- }
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- rtl92d_bandtype_2_4G(hw, &temp_cck, &cck_index_old);
- } else {
- temp_cck = 0x090e1317;
- cck_index_old = 12;
- }
-
- if (!rtlpriv->dm.thermalvalue) {
- rtlpriv->dm.thermalvalue = rtlefuse->eeprom_thermalmeter;
- rtlpriv->dm.thermalvalue_lck = thermalvalue;
- rtlpriv->dm.thermalvalue_iqk = thermalvalue;
- rtlpriv->dm.thermalvalue_rxgain = rtlefuse->eeprom_thermalmeter;
- for (i = 0; i < rf; i++)
- rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
- rtlpriv->dm.cck_index = cck_index_old;
- }
- if (rtlhal->reloadtxpowerindex) {
- for (i = 0; i < rf; i++)
- rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
- rtlpriv->dm.cck_index = cck_index_old;
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "reload ofdm index for band switch\n");
- }
-old_index_done:
- for (i = 0; i < rf; i++)
- ofdm_index[i] = rtlpriv->dm.ofdm_index[i];
-
- rtlpriv->dm.thermalvalue_avg
- [rtlpriv->dm.thermalvalue_avg_index] = thermalvalue;
- rtlpriv->dm.thermalvalue_avg_index++;
- if (rtlpriv->dm.thermalvalue_avg_index == AVG_THERMAL_NUM)
- rtlpriv->dm.thermalvalue_avg_index = 0;
- for (i = 0; i < AVG_THERMAL_NUM; i++) {
- if (rtlpriv->dm.thermalvalue_avg[i]) {
- thermalvalue_avg += rtlpriv->dm.thermalvalue_avg[i];
- thermalvalue_avg_count++;
- }
- }
- if (thermalvalue_avg_count)
- thermalvalue = (u8)(thermalvalue_avg / thermalvalue_avg_count);
- if (rtlhal->reloadtxpowerindex) {
- delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
- (thermalvalue - rtlefuse->eeprom_thermalmeter) :
- (rtlefuse->eeprom_thermalmeter - thermalvalue);
- rtlhal->reloadtxpowerindex = false;
- rtlpriv->dm.done_txpower = false;
- } else if (rtlpriv->dm.done_txpower) {
- delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
- (thermalvalue - rtlpriv->dm.thermalvalue) :
- (rtlpriv->dm.thermalvalue - thermalvalue);
- } else {
- delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
- (thermalvalue - rtlefuse->eeprom_thermalmeter) :
- (rtlefuse->eeprom_thermalmeter - thermalvalue);
- }
- delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
- (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
- (rtlpriv->dm.thermalvalue_lck - thermalvalue);
- delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
- (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
- (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
- delta_rxgain =
- (thermalvalue > rtlpriv->dm.thermalvalue_rxgain) ?
- (thermalvalue - rtlpriv->dm.thermalvalue_rxgain) :
- (rtlpriv->dm.thermalvalue_rxgain - thermalvalue);
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Readback Thermal Meter = 0x%x pre thermal meter 0x%x eeprom_thermalmeter 0x%x delta 0x%x delta_lck 0x%x delta_iqk 0x%x\n",
- thermalvalue, rtlpriv->dm.thermalvalue,
- rtlefuse->eeprom_thermalmeter, delta, delta_lck,
- delta_iqk);
- if (delta_lck > rtlefuse->delta_lck && rtlefuse->delta_lck != 0) {
- rtlpriv->dm.thermalvalue_lck = thermalvalue;
- rtl92d_phy_lc_calibrate(hw);
- }
-
- if (delta == 0 || !rtlpriv->dm.txpower_track_control)
- goto check_delta;
-
- rtlpriv->dm.done_txpower = true;
- delta = (thermalvalue > rtlefuse->eeprom_thermalmeter) ?
- (thermalvalue - rtlefuse->eeprom_thermalmeter) :
- (rtlefuse->eeprom_thermalmeter - thermalvalue);
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- offset = 4;
- if (delta > INDEX_MAPPING_NUM - 1)
- index = index_mapping[offset][INDEX_MAPPING_NUM - 1];
- else
- index = index_mapping[offset][delta];
- if (thermalvalue > rtlpriv->dm.thermalvalue) {
- for (i = 0; i < rf; i++)
- ofdm_index[i] -= delta;
- cck_index -= delta;
- } else {
- for (i = 0; i < rf; i++)
- ofdm_index[i] += index;
- cck_index += index;
- }
- } else if (rtlhal->current_bandtype == BAND_ON_5G) {
- rtl92d_bandtype_5G(rtlhal, ofdm_index,
- &internal_pa, thermalvalue,
- delta, rf, rtlefuse, rtlpriv,
- rtlphy, index_mapping,
- index_mapping_internal_pa);
- }
- if (is2t) {
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x, OFDM_B_index = 0x%x,cck_index=0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.ofdm_index[1],
- rtlpriv->dm.cck_index);
- } else {
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "temp OFDM_A_index=0x%x,cck_index = 0x%x\n",
- rtlpriv->dm.ofdm_index[0],
- rtlpriv->dm.cck_index);
- }
- for (i = 0; i < rf; i++) {
- if (ofdm_index[i] > OFDM_TABLE_SIZE_92D - 1) {
- ofdm_index[i] = OFDM_TABLE_SIZE_92D - 1;
- } else if (internal_pa ||
- rtlhal->current_bandtype == BAND_ON_2_4G) {
- if (ofdm_index[i] < ofdm_min_index_internal_pa)
- ofdm_index[i] = ofdm_min_index_internal_pa;
- } else if (ofdm_index[i] < ofdm_min_index) {
- ofdm_index[i] = ofdm_min_index;
- }
- }
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- if (cck_index > CCK_TABLE_SIZE - 1) {
- cck_index = CCK_TABLE_SIZE - 1;
- } else if (cck_index < 0) {
- cck_index = 0;
- }
- }
- if (is2t) {
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x, OFDM_B_index = 0x%x, cck_index=0x%x\n",
- ofdm_index[0], ofdm_index[1],
- cck_index);
- } else {
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "new OFDM_A_index=0x%x,cck_index = 0x%x\n",
- ofdm_index[0], cck_index);
- }
- ele_d = (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
- val_x = rtlphy->iqk_matrix[indexforchannel].value[0][0];
- val_y = rtlphy->iqk_matrix[indexforchannel].value[0][1];
- if (val_x != 0) {
- if ((val_x & 0x00000200) != 0)
- val_x = val_x | 0xFFFFFC00;
- ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
-
- /* new element C = element D x Y */
- if ((val_y & 0x00000200) != 0)
- val_y = val_y | 0xFFFFFC00;
- ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
-
- /* write new elements A, C, D to regC80 and
- * regC94, element B is always 0
- */
- value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a;
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
- MASKDWORD, value32);
-
- value32 = (ele_c & 0x000003C0) >> 6;
- rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
- value32);
-
- value32 = ((val_x * ele_d) >> 7) & 0x01;
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
- value32);
-
- } else {
- rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
- MASKDWORD,
- ofdmswing_table[(u8)ofdm_index[0]]);
- rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
- 0x00);
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(24), 0x00);
- }
-
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking for interface %d path A: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xe94 = 0x%lx 0xe9c = 0x%lx\n",
- rtlhal->interfaceindex,
- val_x, val_y, ele_a, ele_c, ele_d,
- val_x, val_y);
-
- if (cck_index >= CCK_TABLE_SIZE)
- cck_index = CCK_TABLE_SIZE - 1;
- if (cck_index < 0)
- cck_index = 0;
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- /* Adjust CCK according to IQK result */
- if (!rtlpriv->dm.cck_inch14) {
- rtl_write_byte(rtlpriv, 0xa22,
- cckswing_table_ch1ch13[cck_index][0]);
- rtl_write_byte(rtlpriv, 0xa23,
- cckswing_table_ch1ch13[cck_index][1]);
- rtl_write_byte(rtlpriv, 0xa24,
- cckswing_table_ch1ch13[cck_index][2]);
- rtl_write_byte(rtlpriv, 0xa25,
- cckswing_table_ch1ch13[cck_index][3]);
- rtl_write_byte(rtlpriv, 0xa26,
- cckswing_table_ch1ch13[cck_index][4]);
- rtl_write_byte(rtlpriv, 0xa27,
- cckswing_table_ch1ch13[cck_index][5]);
- rtl_write_byte(rtlpriv, 0xa28,
- cckswing_table_ch1ch13[cck_index][6]);
- rtl_write_byte(rtlpriv, 0xa29,
- cckswing_table_ch1ch13[cck_index][7]);
- } else {
- rtl_write_byte(rtlpriv, 0xa22,
- cckswing_table_ch14[cck_index][0]);
- rtl_write_byte(rtlpriv, 0xa23,
- cckswing_table_ch14[cck_index][1]);
- rtl_write_byte(rtlpriv, 0xa24,
- cckswing_table_ch14[cck_index][2]);
- rtl_write_byte(rtlpriv, 0xa25,
- cckswing_table_ch14[cck_index][3]);
- rtl_write_byte(rtlpriv, 0xa26,
- cckswing_table_ch14[cck_index][4]);
- rtl_write_byte(rtlpriv, 0xa27,
- cckswing_table_ch14[cck_index][5]);
- rtl_write_byte(rtlpriv, 0xa28,
- cckswing_table_ch14[cck_index][6]);
- rtl_write_byte(rtlpriv, 0xa29,
- cckswing_table_ch14[cck_index][7]);
- }
- }
- if (is2t) {
- ele_d = (ofdmswing_table[ofdm_index[1]] & 0xFFC00000) >> 22;
- val_x = rtlphy->iqk_matrix[indexforchannel].value[0][4];
- val_y = rtlphy->iqk_matrix[indexforchannel].value[0][5];
- if (val_x != 0) {
- if ((val_x & 0x00000200) != 0)
- /* consider minus */
- val_x = val_x | 0xFFFFFC00;
- ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
- /* new element C = element D x Y */
- if ((val_y & 0x00000200) != 0)
- val_y = val_y | 0xFFFFFC00;
- ele_c = ((val_y * ele_d) >> 8) & 0x00003FF;
- /* write new elements A, C, D to regC88
- * and regC9C, element B is always 0
- */
- value32 = (ele_d << 22) | ((ele_c & 0x3F) << 16) | ele_a;
- rtl_set_bbreg(hw,
- ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD, value32);
- value32 = (ele_c & 0x000003C0) >> 6;
- rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
- MASKH4BITS, value32);
- value32 = ((val_x * ele_d) >> 7) & 0x01;
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(28), value32);
- } else {
- rtl_set_bbreg(hw,
- ROFDM0_XBTXIQIMBALANCE,
- MASKDWORD,
- ofdmswing_table[ofdm_index[1]]);
- rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
- MASKH4BITS, 0x00);
- rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
- BIT(28), 0x00);
- }
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking path B: X = 0x%lx, Y = 0x%lx ele_A = 0x%lx ele_C = 0x%lx ele_D = 0x%lx 0xeb4 = 0x%lx 0xebc = 0x%lx\n",
- val_x, val_y, ele_a, ele_c,
- ele_d, val_x, val_y);
- }
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
- rtl_get_bbreg(hw, 0xc80, MASKDWORD),
- rtl_get_bbreg(hw, 0xc94, MASKDWORD),
- rtl_get_rfreg(hw, RF90_PATH_A, 0x24,
- RFREG_OFFSET_MASK));
-
-check_delta:
- if (delta_iqk > rtlefuse->delta_iqk && rtlefuse->delta_iqk != 0) {
- rtl92d_phy_reset_iqk_result(hw);
- rtlpriv->dm.thermalvalue_iqk = thermalvalue;
- rtl92d_phy_iq_calibrate(hw);
- }
- if (delta_rxgain > 0 && rtlhal->current_bandtype == BAND_ON_5G &&
- thermalvalue <= rtlefuse->eeprom_thermalmeter) {
- rtlpriv->dm.thermalvalue_rxgain = thermalvalue;
- rtl92d_dm_rxgain_tracking_thermalmeter(hw);
- }
- if (rtlpriv->dm.txpower_track_control)
- rtlpriv->dm.thermalvalue = thermalvalue;
-
-exit:
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, "<===\n");
-}
-
-static void rtl92d_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- rtlpriv->dm.txpower_tracking = true;
- rtlpriv->dm.txpower_trackinginit = false;
- rtlpriv->dm.txpower_track_control = true;
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "pMgntInfo->txpower_tracking = %d\n",
- rtlpriv->dm.txpower_tracking);
-}
-
-void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (!rtlpriv->dm.txpower_tracking)
- return;
-
- if (!rtlpriv->dm.tm_trigger) {
- rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, BIT(17) |
- BIT(16), 0x03);
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Trigger 92S Thermal Meter!!\n");
- rtlpriv->dm.tm_trigger = 1;
- return;
- } else {
- rtl_dbg(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
- "Schedule TxPowerTracking direct call!!\n");
- rtl92d_dm_txpower_tracking_callback_thermalmeter(hw);
- rtlpriv->dm.tm_trigger = 0;
- }
-}
-
-void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rate_adaptive *ra = &(rtlpriv->ra);
-
- ra->ratr_state = DM_RATR_STA_INIT;
- ra->pre_ratr_state = DM_RATR_STA_INIT;
- if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
- rtlpriv->dm.useramask = true;
- else
- rtlpriv->dm.useramask = false;
-}
-
-void rtl92d_dm_init(struct ieee80211_hw *hw)
+void rtl92de_dm_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1212,7 +154,7 @@ void rtl92d_dm_init(struct ieee80211_hw *hw)
rtl92d_dm_initialize_txpower_tracking(hw);
}
-void rtl92d_dm_watchdog(struct ieee80211_hw *hw)
+void rtl92de_dm_watchdog(struct ieee80211_hw *hw)
{
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
bool fw_current_inpsmode = false;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
index 939cc45bfebd..beade227b442 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/dm.h
@@ -4,94 +4,7 @@
#ifndef __RTL92C_DM_H__
#define __RTL92C_DM_H__
-#define HAL_DM_DIG_DISABLE BIT(0)
-#define HAL_DM_HIPWR_DISABLE BIT(1)
-
-#define OFDM_TABLE_LENGTH 37
-#define OFDM_TABLE_SIZE_92D 43
-#define CCK_TABLE_LENGTH 33
-
-#define CCK_TABLE_SIZE 33
-
-#define BW_AUTO_SWITCH_HIGH_LOW 25
-#define BW_AUTO_SWITCH_LOW_HIGH 30
-
-#define DM_DIG_FA_UPPER 0x32
-#define DM_DIG_FA_LOWER 0x20
-#define DM_DIG_FA_TH0 0x100
-#define DM_DIG_FA_TH1 0x400
-#define DM_DIG_FA_TH2 0x600
-
-#define RXPATHSELECTION_SS_TH_LOW 30
-#define RXPATHSELECTION_DIFF_TH 18
-
-#define DM_RATR_STA_INIT 0
-#define DM_RATR_STA_HIGH 1
-#define DM_RATR_STA_MIDDLE 2
-#define DM_RATR_STA_LOW 3
-
-#define CTS2SELF_THVAL 30
-#define REGC38_TH 20
-
-#define WAIOTTHVAL 25
-
-#define TXHIGHPWRLEVEL_NORMAL 0
-#define TXHIGHPWRLEVEL_LEVEL1 1
-#define TXHIGHPWRLEVEL_LEVEL2 2
-#define TXHIGHPWRLEVEL_BT1 3
-#define TXHIGHPWRLEVEL_BT2 4
-
-#define DM_TYPE_BYFW 0
-#define DM_TYPE_BYDRIVER 1
-
-#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
-#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
-#define INDEX_MAPPING_NUM 13
-
-struct swat {
- u8 failure_cnt;
- u8 try_flag;
- u8 stop_trying;
- long pre_rssi;
- long trying_threshold;
- u8 cur_antenna;
- u8 pre_antenna;
-};
-
-enum tag_dynamic_init_gain_operation_type_definition {
- DIG_TYPE_THRESH_HIGH = 0,
- DIG_TYPE_THRESH_LOW = 1,
- DIG_TYPE_BACKOFF = 2,
- DIG_TYPE_RX_GAIN_MIN = 3,
- DIG_TYPE_RX_GAIN_MAX = 4,
- DIG_TYPE_ENABLE = 5,
- DIG_TYPE_DISABLE = 6,
- DIG_OP_TYPE_MAX
-};
-
-enum dm_1r_cca {
- CCA_1R = 0,
- CCA_2R = 1,
- CCA_MAX = 2,
-};
-
-enum dm_rf {
- RF_SAVE = 0,
- RF_NORMAL = 1,
- RF_MAX = 2,
-};
-
-enum dm_sw_ant_switch {
- ANS_ANTENNA_B = 1,
- ANS_ANTENNA_A = 2,
- ANS_ANTENNA_MAX = 3,
-};
-
-void rtl92d_dm_init(struct ieee80211_hw *hw);
-void rtl92d_dm_watchdog(struct ieee80211_hw *hw);
-void rtl92d_dm_init_edca_turbo(struct ieee80211_hw *hw);
-void rtl92d_dm_write_dig(struct ieee80211_hw *hw);
-void rtl92d_dm_check_txpower_tracking_thermal_meter(struct ieee80211_hw *hw);
-void rtl92d_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
+void rtl92de_dm_init(struct ieee80211_hw *hw);
+void rtl92de_dm_watchdog(struct ieee80211_hw *hw);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
index e1fb29962801..c8444a72ff69 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c
@@ -5,157 +5,12 @@
#include "../pci.h"
#include "../base.h"
#include "../efuse.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/fw_common.h"
#include "fw.h"
#include "sw.h"
-static bool _rtl92d_is_fw_downloaded(struct rtl_priv *rtlpriv)
-{
- return (rtl_read_dword(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY) ?
- true : false;
-}
-
-static void _rtl92d_enable_fw_download(struct ieee80211_hw *hw, bool enable)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp;
-
- if (enable) {
- tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
- rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, tmp | 0x04);
- tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
- rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
- tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
- rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
- } else {
- tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
- rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
- /* Reserved for fw extension.
- * 0x81[7] is used for mac0 status ,
- * so don't write this reg here
- * rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);*/
- }
-}
-
-static void _rtl92d_write_fw(struct ieee80211_hw *hw,
- enum version_8192d version, u8 *buffer, u32 size)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 *bufferptr = buffer;
- u32 pagenums, remainsize;
- u32 page, offset;
-
- rtl_dbg(rtlpriv, COMP_FW, DBG_TRACE, "FW size is %d bytes,\n", size);
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192DE)
- rtl_fill_dummy(bufferptr, &size);
- pagenums = size / FW_8192D_PAGE_SIZE;
- remainsize = size % FW_8192D_PAGE_SIZE;
- if (pagenums > 8)
- pr_err("Page numbers should not greater then 8\n");
- for (page = 0; page < pagenums; page++) {
- offset = page * FW_8192D_PAGE_SIZE;
- rtl_fw_page_write(hw, page, (bufferptr + offset),
- FW_8192D_PAGE_SIZE);
- }
- if (remainsize) {
- offset = pagenums * FW_8192D_PAGE_SIZE;
- page = pagenums;
- rtl_fw_page_write(hw, page, (bufferptr + offset), remainsize);
- }
-}
-
-static int _rtl92d_fw_free_to_go(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 counter = 0;
- u32 value32;
-
- do {
- value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- } while ((counter++ < FW_8192D_POLLING_TIMEOUT_COUNT) &&
- (!(value32 & FWDL_CHKSUM_RPT)));
- if (counter >= FW_8192D_POLLING_TIMEOUT_COUNT) {
- pr_err("chksum report fail! REG_MCUFWDL:0x%08x\n",
- value32);
- return -EIO;
- }
- value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
- value32 |= MCUFWDL_RDY;
- rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
- return 0;
-}
-
-void rtl92d_firmware_selfreset(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 u1b_tmp;
- u8 delay = 100;
-
- /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */
- rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
- u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
- while (u1b_tmp & BIT(2)) {
- delay--;
- if (delay == 0)
- break;
- udelay(50);
- u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
- }
- WARN_ONCE((delay <= 0), "rtl8192de: 8051 reset failed!\n");
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
- "=====> 8051 reset success (%d)\n", delay);
-}
-
-static int _rtl92d_fw_init(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u32 counter;
-
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG, "FW already have download\n");
- /* polling for FW ready */
- counter = 0;
- do {
- if (rtlhal->interfaceindex == 0) {
- if (rtl_read_byte(rtlpriv, FW_MAC0_READY) &
- MAC0_READY) {
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
- rtl_read_byte(rtlpriv,
- FW_MAC0_READY));
- return 0;
- }
- udelay(5);
- } else {
- if (rtl_read_byte(rtlpriv, FW_MAC1_READY) &
- MAC1_READY) {
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready success!! REG_MCUFWDL: 0x%x\n",
- rtl_read_byte(rtlpriv,
- FW_MAC1_READY));
- return 0;
- }
- udelay(5);
- }
- } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
-
- if (rtlhal->interfaceindex == 0) {
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! MAC0 FW init not ready: 0x%x\n",
- rtl_read_byte(rtlpriv, FW_MAC0_READY));
- } else {
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! MAC1 FW init not ready: 0x%x\n",
- rtl_read_byte(rtlpriv, FW_MAC1_READY));
- }
- rtl_dbg(rtlpriv, COMP_FW, DBG_DMESG,
- "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n",
- rtl_read_dword(rtlpriv, REG_MCUFWDL));
- return -1;
-}
-
int rtl92d_download_fw(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -189,7 +44,7 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
}
spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
- fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv);
+ fw_downloaded = rtl92d_is_fw_downloaded(rtlpriv);
if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5))
fwdl_in_process = true;
else
@@ -202,7 +57,7 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
for (count = 0; count < 5000; count++) {
udelay(500);
spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
- fw_downloaded = _rtl92d_is_fw_downloaded(rtlpriv);
+ fw_downloaded = rtl92d_is_fw_downloaded(rtlpriv);
if ((rtl_read_byte(rtlpriv, 0x1f) & BIT(5)) == BIT(5))
fwdl_in_process = true;
else
@@ -237,11 +92,11 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
rtl92d_firmware_selfreset(hw);
rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
}
- _rtl92d_enable_fw_download(hw, true);
- _rtl92d_write_fw(hw, version, pfwdata, fwsize);
- _rtl92d_enable_fw_download(hw, false);
+ rtl92d_enable_fw_download(hw, true);
+ rtl92d_write_fw(hw, version, pfwdata, fwsize);
+ rtl92d_enable_fw_download(hw, false);
spin_lock_irqsave(&globalmutex_for_fwdownload, flags);
- err = _rtl92d_fw_free_to_go(hw);
+ err = rtl92d_fw_free_to_go(hw);
/* download fw over,clear 0x1f[5] */
value = rtl_read_byte(rtlpriv, 0x1f);
value &= (~BIT(5));
@@ -250,207 +105,10 @@ int rtl92d_download_fw(struct ieee80211_hw *hw)
if (err)
pr_err("fw is not ready to run!\n");
exit:
- err = _rtl92d_fw_init(hw);
+ err = rtl92d_fw_init(hw);
return err;
}
-static bool _rtl92d_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 val_hmetfr;
- bool result = false;
-
- val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
- if (((val_hmetfr >> boxnum) & BIT(0)) == 0)
- result = true;
- return result;
-}
-
-static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw,
- u8 element_id, u32 cmd_len, u8 *cmdbuffer)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u8 boxnum;
- u16 box_reg = 0, box_extreg = 0;
- u8 u1b_tmp;
- bool isfw_read = false;
- u8 buf_index = 0;
- bool bwrite_success = false;
- u8 wait_h2c_limmit = 100;
- u8 wait_writeh2c_limmit = 100;
- u8 boxcontent[4], boxextcontent[2];
- u32 h2c_waitcounter = 0;
- unsigned long flag;
- u8 idx;
-
- if (ppsc->rfpwr_state == ERFOFF || ppsc->inactive_pwrstate == ERFOFF) {
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "Return as RF is off!!!\n");
- return;
- }
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "come in\n");
- while (true) {
- spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
- if (rtlhal->h2c_setinprogress) {
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "H2C set in progress! Wait to set..element_id(%d)\n",
- element_id);
-
- while (rtlhal->h2c_setinprogress) {
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
- flag);
- h2c_waitcounter++;
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "Wait 100 us (%d times)...\n",
- h2c_waitcounter);
- udelay(100);
-
- if (h2c_waitcounter > 1000)
- return;
-
- spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
- flag);
- }
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- } else {
- rtlhal->h2c_setinprogress = true;
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- break;
- }
- }
- while (!bwrite_success) {
- wait_writeh2c_limmit--;
- if (wait_writeh2c_limmit == 0) {
- pr_err("Write H2C fail because no trigger for FW INT!\n");
- break;
- }
- boxnum = rtlhal->last_hmeboxnum;
- switch (boxnum) {
- case 0:
- box_reg = REG_HMEBOX_0;
- box_extreg = REG_HMEBOX_EXT_0;
- break;
- case 1:
- box_reg = REG_HMEBOX_1;
- box_extreg = REG_HMEBOX_EXT_1;
- break;
- case 2:
- box_reg = REG_HMEBOX_2;
- box_extreg = REG_HMEBOX_EXT_2;
- break;
- case 3:
- box_reg = REG_HMEBOX_3;
- box_extreg = REG_HMEBOX_EXT_3;
- break;
- default:
- pr_err("switch case %#x not processed\n",
- boxnum);
- break;
- }
- isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
- while (!isfw_read) {
- wait_h2c_limmit--;
- if (wait_h2c_limmit == 0) {
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting too long for FW read clear HMEBox(%d)!\n",
- boxnum);
- break;
- }
- udelay(10);
- isfw_read = _rtl92d_check_fw_read_last_h2c(hw, boxnum);
- u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "Waiting for FW read clear HMEBox(%d)!!! 0x1BF = %2x\n",
- boxnum, u1b_tmp);
- }
- if (!isfw_read) {
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write H2C register BOX[%d] fail!!!!! Fw do not read.\n",
- boxnum);
- break;
- }
- memset(boxcontent, 0, sizeof(boxcontent));
- memset(boxextcontent, 0, sizeof(boxextcontent));
- boxcontent[0] = element_id;
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "Write element_id box_reg(%4x) = %2x\n",
- box_reg, element_id);
- switch (cmd_len) {
- case 1:
- boxcontent[0] &= ~(BIT(7));
- memcpy(boxcontent + 1, cmdbuffer + buf_index, 1);
- for (idx = 0; idx < 4; idx++)
- rtl_write_byte(rtlpriv, box_reg + idx,
- boxcontent[idx]);
- break;
- case 2:
- boxcontent[0] &= ~(BIT(7));
- memcpy(boxcontent + 1, cmdbuffer + buf_index, 2);
- for (idx = 0; idx < 4; idx++)
- rtl_write_byte(rtlpriv, box_reg + idx,
- boxcontent[idx]);
- break;
- case 3:
- boxcontent[0] &= ~(BIT(7));
- memcpy(boxcontent + 1, cmdbuffer + buf_index, 3);
- for (idx = 0; idx < 4; idx++)
- rtl_write_byte(rtlpriv, box_reg + idx,
- boxcontent[idx]);
- break;
- case 4:
- boxcontent[0] |= (BIT(7));
- memcpy(boxextcontent, cmdbuffer + buf_index, 2);
- memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 2);
- for (idx = 0; idx < 2; idx++)
- rtl_write_byte(rtlpriv, box_extreg + idx,
- boxextcontent[idx]);
- for (idx = 0; idx < 4; idx++)
- rtl_write_byte(rtlpriv, box_reg + idx,
- boxcontent[idx]);
- break;
- case 5:
- boxcontent[0] |= (BIT(7));
- memcpy(boxextcontent, cmdbuffer + buf_index, 2);
- memcpy(boxcontent + 1, cmdbuffer + buf_index + 2, 3);
- for (idx = 0; idx < 2; idx++)
- rtl_write_byte(rtlpriv, box_extreg + idx,
- boxextcontent[idx]);
- for (idx = 0; idx < 4; idx++)
- rtl_write_byte(rtlpriv, box_reg + idx,
- boxcontent[idx]);
- break;
- default:
- pr_err("switch case %#x not processed\n",
- cmd_len);
- break;
- }
- bwrite_success = true;
- rtlhal->last_hmeboxnum = boxnum + 1;
- if (rtlhal->last_hmeboxnum == 4)
- rtlhal->last_hmeboxnum = 0;
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD,
- "pHalData->last_hmeboxnum = %d\n",
- rtlhal->last_hmeboxnum);
- }
- spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
- rtlhal->h2c_setinprogress = false;
- spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
- rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "go out\n");
-}
-
-void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw,
- u8 element_id, u32 cmd_len, u8 *cmdbuffer)
-{
- u32 tmp_cmdbuf[2];
-
- memset(tmp_cmdbuf, 0, 8);
- memcpy(tmp_cmdbuf, cmdbuffer, cmd_len);
- _rtl92d_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf);
- return;
-}
-
static bool _rtl92d_cmd_send_packet(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
@@ -599,7 +257,7 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
struct sk_buff *skb = NULL;
u32 totalpacketlen;
bool rtstatus;
- u8 u1rsvdpageloc[3] = { 0 };
+ u8 u1rsvdpageloc[3] = { PROBERSP_PG, PSPOLL_PG, NULL_PG };
bool dlok = false;
u8 *beacon;
u8 *p_pspoll;
@@ -618,7 +276,6 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
- SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1rsvdpageloc, PSPOLL_PG);
/*--------------------------------------------------------
(3) null data
---------------------------------------------------------*/
@@ -626,7 +283,6 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
- SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1rsvdpageloc, NULL_PG);
/*---------------------------------------------------------
(4) probe response
----------------------------------------------------------*/
@@ -634,7 +290,6 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
- SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1rsvdpageloc, PROBERSP_PG);
totalpacketlen = TOTAL_RESERVED_PKT_LEN;
RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
"rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL",
@@ -663,11 +318,3 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished)
rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
"Set RSVD page location to Fw FAIL!!!!!!\n");
}
-
-void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
-{
- u8 u1_joinbssrpt_parm[1] = {0};
-
- SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
- rtl92d_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
-}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
index 7f0a17c1a9ea..9e1385ac17b1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.h
@@ -4,44 +4,7 @@
#ifndef __RTL92D__FW__H__
#define __RTL92D__FW__H__
-#define FW_8192D_START_ADDRESS 0x1000
-#define FW_8192D_PAGE_SIZE 4096
-#define FW_8192D_POLLING_TIMEOUT_COUNT 1000
-
-#define IS_FW_HEADER_EXIST(_pfwhdr) \
- ((GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x92C0 || \
- (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFF0) == 0x88C0 || \
- (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D0 || \
- (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D1 || \
- (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D2 || \
- (GET_FIRMWARE_HDR_SIGNATURE(_pfwhdr) & 0xFFFF) == 0x92D3)
-
-/* Firmware Header(8-byte alinment required) */
-/* --- LONG WORD 0 ---- */
-#define GET_FIRMWARE_HDR_SIGNATURE(__fwhdr) \
- le32_get_bits(*(__le32 *)__fwhdr, GENMASK(15, 0))
-#define GET_FIRMWARE_HDR_VERSION(__fwhdr) \
- le32_get_bits(*(__le32 *)(__fwhdr + 4), GENMASK(15, 0))
-#define GET_FIRMWARE_HDR_SUB_VER(__fwhdr) \
- le32_get_bits(*(__le32 *)(__fwhdr + 4), GENMASK(23, 16))
-
-#define pagenum_128(_len) \
- (u32)(((_len) >> 7) + ((_len) & 0x7F ? 1 : 0))
-
-#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
- *(u8 *)__ph2ccmd = __val;
-#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
- *(u8 *)__ph2ccmd = __val;
-#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
- *(u8 *)(__ph2ccmd + 1) = __val;
-#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
- *(u8 *)(__ph2ccmd + 2) = __val;
-
int rtl92d_download_fw(struct ieee80211_hw *hw);
-void rtl92d_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
- u32 cmd_len, u8 *p_cmdbuffer);
-void rtl92d_firmware_selfreset(struct ieee80211_hw *hw);
void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
-void rtl92d_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
index 4ba42f6be3f2..03f4314bdb2e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c
@@ -8,8 +8,12 @@
#include "../cam.h"
#include "../ps.h"
#include "../pci.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/dm_common.h"
+#include "../rtl8192d/fw_common.h"
+#include "../rtl8192d/hw_common.h"
+#include "../rtl8192d/phy_common.h"
#include "phy.h"
#include "dm.h"
#include "fw.h"
@@ -50,34 +54,6 @@ static void _rtl92de_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
}
-static void _rtl92de_stop_tx_beacon(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp1byte;
-
- tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
- rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
- rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff);
- rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
- tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
- tmp1byte &= ~(BIT(0));
- rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
-}
-
-static void _rtl92de_resume_tx_beacon(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 tmp1byte;
-
- tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
- rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
- rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0x0a);
- rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
- tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
- tmp1byte |= BIT(0);
- rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
-}
-
static void _rtl92de_enable_bcn_sub_func(struct ieee80211_hw *hw)
{
_rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(1));
@@ -90,58 +66,14 @@ static void _rtl92de_disable_bcn_sub_func(struct ieee80211_hw *hw)
void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
switch (variable) {
case HW_VAR_RCR:
*((u32 *) (val)) = rtlpci->receive_config;
break;
- case HW_VAR_RF_STATE:
- *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
- break;
- case HW_VAR_FWLPS_RF_ON:{
- enum rf_pwrstate rfstate;
- u32 val_rcr;
-
- rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RF_STATE,
- (u8 *)(&rfstate));
- if (rfstate == ERFOFF) {
- *((bool *) (val)) = true;
- } else {
- val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
- val_rcr &= 0x00070000;
- if (val_rcr)
- *((bool *) (val)) = false;
- else
- *((bool *) (val)) = true;
- }
- break;
- }
- case HW_VAR_FW_PSMODE_STATUS:
- *((bool *) (val)) = ppsc->fw_current_inpsmode;
- break;
- case HW_VAR_CORRECT_TSF:{
- u64 tsf;
- u32 *ptsf_low = (u32 *)&tsf;
- u32 *ptsf_high = ((u32 *)&tsf) + 1;
-
- *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
- *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
- *((u64 *) (val)) = tsf;
- break;
- }
- case HW_VAR_INT_MIGRATION:
- *((bool *)(val)) = rtlpriv->dm.interrupt_migration;
- break;
- case HW_VAR_INT_AC:
- *((bool *)(val)) = rtlpriv->dm.disable_tx_int;
- break;
- case HAL_DEF_WOWLAN:
- break;
default:
- pr_err("switch case %#x not processed\n", variable);
+ rtl92d_get_hw_reg(hw, variable, val);
break;
}
}
@@ -151,141 +83,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- u8 idx;
switch (variable) {
- case HW_VAR_ETHER_ADDR:
- for (idx = 0; idx < ETH_ALEN; idx++) {
- rtl_write_byte(rtlpriv, (REG_MACID + idx),
- val[idx]);
- }
- break;
- case HW_VAR_BASIC_RATE: {
- u16 rate_cfg = ((u16 *) val)[0];
- u8 rate_index = 0;
-
- rate_cfg = rate_cfg & 0x15f;
- if (mac->vendor == PEER_CISCO &&
- ((rate_cfg & 0x150) == 0))
- rate_cfg |= 0x01;
- rtl_write_byte(rtlpriv, REG_RRSR, rate_cfg & 0xff);
- rtl_write_byte(rtlpriv, REG_RRSR + 1,
- (rate_cfg >> 8) & 0xff);
- while (rate_cfg > 0x1) {
- rate_cfg = (rate_cfg >> 1);
- rate_index++;
- }
- if (rtlhal->fw_version > 0xe)
- rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
- rate_index);
- break;
- }
- case HW_VAR_BSSID:
- for (idx = 0; idx < ETH_ALEN; idx++) {
- rtl_write_byte(rtlpriv, (REG_BSSID + idx),
- val[idx]);
- }
- break;
- case HW_VAR_SIFS:
- rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
- rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
- rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
- rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
- if (!mac->ht_enable)
- rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
- 0x0e0e);
- else
- rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
- *((u16 *) val));
- break;
- case HW_VAR_SLOT_TIME: {
- u8 e_aci;
-
- rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
- "HW_VAR_SLOT_TIME %x\n", val[0]);
- rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
- for (e_aci = 0; e_aci < AC_MAX; e_aci++)
- rtlpriv->cfg->ops->set_hw_reg(hw,
- HW_VAR_AC_PARAM,
- (&e_aci));
- break;
- }
- case HW_VAR_ACK_PREAMBLE: {
- u8 reg_tmp;
- u8 short_preamble = (bool) (*val);
-
- reg_tmp = (mac->cur_40_prime_sc) << 5;
- if (short_preamble)
- reg_tmp |= 0x80;
- rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
- break;
- }
- case HW_VAR_AMPDU_MIN_SPACE: {
- u8 min_spacing_to_set;
-
- min_spacing_to_set = *val;
- if (min_spacing_to_set <= 7) {
- mac->min_space_cfg = ((mac->min_space_cfg & 0xf8) |
- min_spacing_to_set);
- *val = min_spacing_to_set;
- rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
- mac->min_space_cfg);
- rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
- mac->min_space_cfg);
- }
- break;
- }
- case HW_VAR_SHORTGI_DENSITY: {
- u8 density_to_set;
-
- density_to_set = *val;
- mac->min_space_cfg = rtlpriv->rtlhal.minspace_cfg;
- mac->min_space_cfg |= (density_to_set << 3);
- rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_SHORTGI_DENSITY: %#x\n",
- mac->min_space_cfg);
- rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
- mac->min_space_cfg);
- break;
- }
- case HW_VAR_AMPDU_FACTOR: {
- u8 factor_toset;
- u32 regtoset;
- u8 *ptmp_byte = NULL;
- u8 index;
-
- if (rtlhal->macphymode == DUALMAC_DUALPHY)
- regtoset = 0xb9726641;
- else if (rtlhal->macphymode == DUALMAC_SINGLEPHY)
- regtoset = 0x66626641;
- else
- regtoset = 0xb972a841;
- factor_toset = *val;
- if (factor_toset <= 3) {
- factor_toset = (1 << (factor_toset + 2));
- if (factor_toset > 0xf)
- factor_toset = 0xf;
- for (index = 0; index < 4; index++) {
- ptmp_byte = (u8 *)(&regtoset) + index;
- if ((*ptmp_byte & 0xf0) >
- (factor_toset << 4))
- *ptmp_byte = (*ptmp_byte & 0x0f)
- | (factor_toset << 4);
- if ((*ptmp_byte & 0x0f) > factor_toset)
- *ptmp_byte = (*ptmp_byte & 0xf0)
- | (factor_toset);
- }
- rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, regtoset);
- rtl_dbg(rtlpriv, COMP_MLME, DBG_LOUD,
- "Set HW_VAR_AMPDU_FACTOR: %#x\n",
- factor_toset);
- }
- break;
- }
case HW_VAR_AC_PARAM: {
u8 e_aci = *val;
rtl92d_dm_init_edca_turbo(hw);
@@ -346,37 +145,6 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
rtlpci->receive_config = ((u32 *) (val))[0];
break;
- case HW_VAR_RETRY_LIMIT: {
- u8 retry_limit = val[0];
-
- rtl_write_word(rtlpriv, REG_RL,
- retry_limit << RETRY_LIMIT_SHORT_SHIFT |
- retry_limit << RETRY_LIMIT_LONG_SHIFT);
- break;
- }
- case HW_VAR_DUAL_TSF_RST:
- rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
- break;
- case HW_VAR_EFUSE_BYTES:
- rtlefuse->efuse_usedbytes = *((u16 *) val);
- break;
- case HW_VAR_EFUSE_USAGE:
- rtlefuse->efuse_usedpercentage = *val;
- break;
- case HW_VAR_IO_CMD:
- rtl92d_phy_set_io_cmd(hw, (*(enum io_type *)val));
- break;
- case HW_VAR_WPA_CONFIG:
- rtl_write_byte(rtlpriv, REG_SECCFG, *val);
- break;
- case HW_VAR_SET_RPWM:
- rtl92d_fill_h2c_cmd(hw, H2C_PWRM, 1, (val));
- break;
- case HW_VAR_H2C_FW_PWRMODE:
- break;
- case HW_VAR_FW_PSMODE_STATUS:
- ppsc->fw_current_inpsmode = *((bool *) val);
- break;
case HW_VAR_H2C_FW_JOINBSSRPT: {
u8 mstatus = (*val);
u8 tmp_regcr, tmp_reg422;
@@ -409,19 +177,11 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
rtl92d_set_fw_joinbss_report_cmd(hw, (*val));
break;
}
- case HW_VAR_AID: {
- u16 u2btmp;
- u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
- u2btmp &= 0xC000;
- rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
- mac->assoc_id));
- break;
- }
case HW_VAR_CORRECT_TSF: {
u8 btype_ibss = val[0];
if (btype_ibss)
- _rtl92de_stop_tx_beacon(hw);
+ rtl92d_stop_tx_beacon(hw);
_rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(3));
rtl_write_dword(rtlpriv, REG_TSFTR,
(u32) (mac->tsf & 0xffffffff));
@@ -429,7 +189,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
(u32) ((mac->tsf >> 32) & 0xffffffff));
_rtl92de_set_bcn_ctrl_reg(hw, BIT(3), 0);
if (btype_ibss)
- _rtl92de_resume_tx_beacon(hw);
+ rtl92d_resume_tx_beacon(hw);
break;
}
@@ -472,34 +232,11 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
break;
}
default:
- pr_err("switch case %#x not processed\n", variable);
+ rtl92d_set_hw_reg(hw, variable, val);
break;
}
}
-static bool _rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- bool status = true;
- long count = 0;
- u32 value = _LLT_INIT_ADDR(address) |
- _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
-
- rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
- do {
- value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
- if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
- break;
- if (count > POLLING_LLT_THRESHOLD) {
- pr_err("Failed to polling write LLT done at address %d!\n",
- address);
- status = false;
- break;
- }
- } while (++count);
- return status;
-}
-
static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -558,13 +295,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
/* 18. LLT_table_init(Adapter); */
for (i = 0; i < (txpktbuf_bndy - 1); i++) {
- status = _rtl92de_llt_write(hw, i, i + 1);
+ status = rtl92d_llt_write(hw, i, i + 1);
if (!status)
return status;
}
/* end of list */
- status = _rtl92de_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
+ status = rtl92d_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
if (!status)
return status;
@@ -573,13 +310,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw)
/* config this MAC as two MAC transfer. */
/* Otherwise used as local loopback buffer. */
for (i = txpktbuf_bndy; i < maxpage; i++) {
- status = _rtl92de_llt_write(hw, i, (i + 1));
+ status = rtl92d_llt_write(hw, i, (i + 1));
if (!status)
return status;
}
/* Let last entry point to the start entry of ring buffer */
- status = _rtl92de_llt_write(hw, maxpage, txpktbuf_bndy);
+ status = rtl92d_llt_write(hw, maxpage, txpktbuf_bndy);
if (!status)
return status;
@@ -842,32 +579,6 @@ static void _rtl92de_enable_aspm_back_door(struct ieee80211_hw *hw)
rtl_write_byte(rtlpriv, 0x352, 0x1);
}
-void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 sec_reg_value;
-
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
- rtlpriv->sec.pairwise_enc_algorithm,
- rtlpriv->sec.group_enc_algorithm);
- if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
- rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
- "not open hw encryption\n");
- return;
- }
- sec_reg_value = SCR_TXENCENABLE | SCR_RXENCENABLE;
- if (rtlpriv->sec.use_defaultkey) {
- sec_reg_value |= SCR_TXUSEDK;
- sec_reg_value |= SCR_RXUSEDK;
- }
- sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
- rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
- rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
- "The SECR-value %x\n", sec_reg_value);
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
-}
-
int rtl92de_hw_init(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -977,7 +688,7 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
/* reset hw sec */
rtl_cam_reset_all_entry(hw);
- rtl92de_enable_hw_security_config(hw);
+ rtl92d_enable_hw_security_config(hw);
/* Read EEPROM TX power index and PHY_REG_PG.txt to capture correct */
/* TX power index for different rate set. */
@@ -991,11 +702,11 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
_rtl92de_enable_aspm_back_door(hw);
/* rtlpriv->intf_ops->enable_aspm(hw); */
- rtl92d_dm_init(hw);
+ rtl92de_dm_init(hw);
rtlpci->being_init_adapter = false;
if (ppsc->rfpwr_state == ERFON) {
- rtl92d_phy_lc_calibrate(hw);
+ rtl92d_phy_lc_calibrate(hw, IS_92D_SINGLEPHY(rtlhal->version));
/* 5G and 2.4G must wait sometime to let RF LO ready */
if (rtlhal->macphymode == DUALMAC_DUALPHY) {
u32 tmp_rega;
@@ -1020,23 +731,6 @@ int rtl92de_hw_init(struct ieee80211_hw *hw)
return err;
}
-static enum version_8192d _rtl92de_read_chip_version(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- enum version_8192d version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
- u32 value32;
-
- value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
- if (!(value32 & 0x000f0000)) {
- version = VERSION_TEST_CHIP_92D_SINGLEPHY;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "TEST CHIP!!!\n");
- } else {
- version = VERSION_NORMAL_CHIP_92D_SINGLEPHY;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Normal CHIP!!!\n");
- }
- return version;
-}
-
static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
enum nl80211_iftype type)
{
@@ -1048,11 +742,11 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw,
if (type == NL80211_IFTYPE_UNSPECIFIED ||
type == NL80211_IFTYPE_STATION) {
- _rtl92de_stop_tx_beacon(hw);
+ rtl92d_stop_tx_beacon(hw);
_rtl92de_enable_bcn_sub_func(hw);
} else if (type == NL80211_IFTYPE_ADHOC ||
type == NL80211_IFTYPE_AP) {
- _rtl92de_resume_tx_beacon(hw);
+ rtl92d_resume_tx_beacon(hw);
_rtl92de_disable_bcn_sub_func(hw);
} else {
rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
@@ -1152,13 +846,6 @@ void rtl92d_linked_set_reg(struct ieee80211_hw *hw)
}
}
-/* don't set REG_EDCA_BE_PARAM here because
- * mac80211 will send pkt when scan */
-void rtl92de_set_qos(struct ieee80211_hw *hw, int aci)
-{
- rtl92d_dm_init_edca_turbo(hw);
-}
-
void rtl92de_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1383,825 +1070,6 @@ void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
rtl92de_enable_interrupt(hw);
}
-static void _rtl92de_readpowervalue_fromprom(struct txpower_info *pwrinfo,
- u8 *rom_content, bool autoloadfail)
-{
- u32 rfpath, eeaddr, group, offset1, offset2;
- u8 i;
-
- memset(pwrinfo, 0, sizeof(struct txpower_info));
- if (autoloadfail) {
- for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
- for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
- if (group < CHANNEL_GROUP_MAX_2G) {
- pwrinfo->cck_index[rfpath][group] =
- EEPROM_DEFAULT_TXPOWERLEVEL_2G;
- pwrinfo->ht40_1sindex[rfpath][group] =
- EEPROM_DEFAULT_TXPOWERLEVEL_2G;
- } else {
- pwrinfo->ht40_1sindex[rfpath][group] =
- EEPROM_DEFAULT_TXPOWERLEVEL_5G;
- }
- pwrinfo->ht40_2sindexdiff[rfpath][group] =
- EEPROM_DEFAULT_HT40_2SDIFF;
- pwrinfo->ht20indexdiff[rfpath][group] =
- EEPROM_DEFAULT_HT20_DIFF;
- pwrinfo->ofdmindexdiff[rfpath][group] =
- EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
- pwrinfo->ht40maxoffset[rfpath][group] =
- EEPROM_DEFAULT_HT40_PWRMAXOFFSET;
- pwrinfo->ht20maxoffset[rfpath][group] =
- EEPROM_DEFAULT_HT20_PWRMAXOFFSET;
- }
- }
- for (i = 0; i < 3; i++) {
- pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI;
- pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI;
- }
- return;
- }
-
- /* Maybe autoload OK,buf the tx power index value is not filled.
- * If we find it, we set it to default value. */
- for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
- for (group = 0; group < CHANNEL_GROUP_MAX_2G; group++) {
- eeaddr = EEPROM_CCK_TX_PWR_INX_2G + (rfpath * 3)
- + group;
- pwrinfo->cck_index[rfpath][group] =
- (rom_content[eeaddr] == 0xFF) ?
- (eeaddr > 0x7B ?
- EEPROM_DEFAULT_TXPOWERLEVEL_5G :
- EEPROM_DEFAULT_TXPOWERLEVEL_2G) :
- rom_content[eeaddr];
- }
- }
- for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
- for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
- offset1 = group / 3;
- offset2 = group % 3;
- eeaddr = EEPROM_HT40_1S_TX_PWR_INX_2G + (rfpath * 3) +
- offset2 + offset1 * 21;
- pwrinfo->ht40_1sindex[rfpath][group] =
- (rom_content[eeaddr] == 0xFF) ? (eeaddr > 0x7B ?
- EEPROM_DEFAULT_TXPOWERLEVEL_5G :
- EEPROM_DEFAULT_TXPOWERLEVEL_2G) :
- rom_content[eeaddr];
- }
- }
- /* These just for 92D efuse offset. */
- for (group = 0; group < CHANNEL_GROUP_MAX; group++) {
- for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
- int base1 = EEPROM_HT40_2S_TX_PWR_INX_DIFF_2G;
-
- offset1 = group / 3;
- offset2 = group % 3;
-
- if (rom_content[base1 + offset2 + offset1 * 21] != 0xFF)
- pwrinfo->ht40_2sindexdiff[rfpath][group] =
- (rom_content[base1 +
- offset2 + offset1 * 21] >> (rfpath * 4))
- & 0xF;
- else
- pwrinfo->ht40_2sindexdiff[rfpath][group] =
- EEPROM_DEFAULT_HT40_2SDIFF;
- if (rom_content[EEPROM_HT20_TX_PWR_INX_DIFF_2G + offset2
- + offset1 * 21] != 0xFF)
- pwrinfo->ht20indexdiff[rfpath][group] =
- (rom_content[EEPROM_HT20_TX_PWR_INX_DIFF_2G
- + offset2 + offset1 * 21] >> (rfpath * 4))
- & 0xF;
- else
- pwrinfo->ht20indexdiff[rfpath][group] =
- EEPROM_DEFAULT_HT20_DIFF;
- if (rom_content[EEPROM_OFDM_TX_PWR_INX_DIFF_2G + offset2
- + offset1 * 21] != 0xFF)
- pwrinfo->ofdmindexdiff[rfpath][group] =
- (rom_content[EEPROM_OFDM_TX_PWR_INX_DIFF_2G
- + offset2 + offset1 * 21] >> (rfpath * 4))
- & 0xF;
- else
- pwrinfo->ofdmindexdiff[rfpath][group] =
- EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
- if (rom_content[EEPROM_HT40_MAX_PWR_OFFSET_2G + offset2
- + offset1 * 21] != 0xFF)
- pwrinfo->ht40maxoffset[rfpath][group] =
- (rom_content[EEPROM_HT40_MAX_PWR_OFFSET_2G
- + offset2 + offset1 * 21] >> (rfpath * 4))
- & 0xF;
- else
- pwrinfo->ht40maxoffset[rfpath][group] =
- EEPROM_DEFAULT_HT40_PWRMAXOFFSET;
- if (rom_content[EEPROM_HT20_MAX_PWR_OFFSET_2G + offset2
- + offset1 * 21] != 0xFF)
- pwrinfo->ht20maxoffset[rfpath][group] =
- (rom_content[EEPROM_HT20_MAX_PWR_OFFSET_2G +
- offset2 + offset1 * 21] >> (rfpath * 4)) &
- 0xF;
- else
- pwrinfo->ht20maxoffset[rfpath][group] =
- EEPROM_DEFAULT_HT20_PWRMAXOFFSET;
- }
- }
- if (rom_content[EEPROM_TSSI_A_5G] != 0xFF) {
- /* 5GL */
- pwrinfo->tssi_a[0] = rom_content[EEPROM_TSSI_A_5G] & 0x3F;
- pwrinfo->tssi_b[0] = rom_content[EEPROM_TSSI_B_5G] & 0x3F;
- /* 5GM */
- pwrinfo->tssi_a[1] = rom_content[EEPROM_TSSI_AB_5G] & 0x3F;
- pwrinfo->tssi_b[1] =
- (rom_content[EEPROM_TSSI_AB_5G] & 0xC0) >> 6 |
- (rom_content[EEPROM_TSSI_AB_5G + 1] & 0x0F) << 2;
- /* 5GH */
- pwrinfo->tssi_a[2] = (rom_content[EEPROM_TSSI_AB_5G + 1] &
- 0xF0) >> 4 |
- (rom_content[EEPROM_TSSI_AB_5G + 2] & 0x03) << 4;
- pwrinfo->tssi_b[2] = (rom_content[EEPROM_TSSI_AB_5G + 2] &
- 0xFC) >> 2;
- } else {
- for (i = 0; i < 3; i++) {
- pwrinfo->tssi_a[i] = EEPROM_DEFAULT_TSSI;
- pwrinfo->tssi_b[i] = EEPROM_DEFAULT_TSSI;
- }
- }
-}
-
-static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw,
- bool autoload_fail, u8 *hwinfo)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct txpower_info pwrinfo;
- u8 tempval[2], i, pwr, diff;
- u32 ch, rfpath, group;
-
- _rtl92de_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail);
- if (!autoload_fail) {
- /* bit0~2 */
- rtlefuse->eeprom_regulatory = (hwinfo[EEPROM_RF_OPT1] & 0x7);
- rtlefuse->eeprom_thermalmeter =
- hwinfo[EEPROM_THERMAL_METER] & 0x1f;
- rtlefuse->crystalcap = hwinfo[EEPROM_XTAL_K];
- tempval[0] = hwinfo[EEPROM_IQK_DELTA] & 0x03;
- tempval[1] = (hwinfo[EEPROM_LCK_DELTA] & 0x0C) >> 2;
- rtlefuse->txpwr_fromeprom = true;
- if (IS_92D_D_CUT(rtlpriv->rtlhal.version) ||
- IS_92D_E_CUT(rtlpriv->rtlhal.version)) {
- rtlefuse->internal_pa_5g[0] =
- !((hwinfo[EEPROM_TSSI_A_5G] & BIT(6)) >> 6);
- rtlefuse->internal_pa_5g[1] =
- !((hwinfo[EEPROM_TSSI_B_5G] & BIT(6)) >> 6);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG,
- "Is D cut,Internal PA0 %d Internal PA1 %d\n",
- rtlefuse->internal_pa_5g[0],
- rtlefuse->internal_pa_5g[1]);
- }
- rtlefuse->eeprom_c9 = hwinfo[EEPROM_RF_OPT6];
- rtlefuse->eeprom_cc = hwinfo[EEPROM_RF_OPT7];
- } else {
- rtlefuse->eeprom_regulatory = 0;
- rtlefuse->eeprom_thermalmeter = EEPROM_DEFAULT_THERMALMETER;
- rtlefuse->crystalcap = EEPROM_DEFAULT_CRYSTALCAP;
- tempval[0] = tempval[1] = 3;
- }
-
- /* Use default value to fill parameters if
- * efuse is not filled on some place. */
-
- /* ThermalMeter from EEPROM */
- if (rtlefuse->eeprom_thermalmeter < 0x06 ||
- rtlefuse->eeprom_thermalmeter > 0x1c)
- rtlefuse->eeprom_thermalmeter = 0x12;
- rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
-
- /* check XTAL_K */
- if (rtlefuse->crystalcap == 0xFF)
- rtlefuse->crystalcap = 0;
- if (rtlefuse->eeprom_regulatory > 3)
- rtlefuse->eeprom_regulatory = 0;
-
- for (i = 0; i < 2; i++) {
- switch (tempval[i]) {
- case 0:
- tempval[i] = 5;
- break;
- case 1:
- tempval[i] = 4;
- break;
- case 2:
- tempval[i] = 3;
- break;
- case 3:
- default:
- tempval[i] = 0;
- break;
- }
- }
-
- rtlefuse->delta_iqk = tempval[0];
- if (tempval[1] > 0)
- rtlefuse->delta_lck = tempval[1] - 1;
- if (rtlefuse->eeprom_c9 == 0xFF)
- rtlefuse->eeprom_c9 = 0x00;
- rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
- "EEPROMRegulatory = 0x%x\n", rtlefuse->eeprom_regulatory);
- rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
- "ThermalMeter = 0x%x\n", rtlefuse->eeprom_thermalmeter);
- rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
- "CrystalCap = 0x%x\n", rtlefuse->crystalcap);
- rtl_dbg(rtlpriv, COMP_INTR, DBG_LOUD,
- "Delta_IQK = 0x%x Delta_LCK = 0x%x\n",
- rtlefuse->delta_iqk, rtlefuse->delta_lck);
-
- for (rfpath = 0; rfpath < RF6052_MAX_PATH; rfpath++) {
- for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
- group = rtl92d_get_chnlgroup_fromarray((u8) ch);
- if (ch < CHANNEL_MAX_NUMBER_2G)
- rtlefuse->txpwrlevel_cck[rfpath][ch] =
- pwrinfo.cck_index[rfpath][group];
- rtlefuse->txpwrlevel_ht40_1s[rfpath][ch] =
- pwrinfo.ht40_1sindex[rfpath][group];
- rtlefuse->txpwr_ht20diff[rfpath][ch] =
- pwrinfo.ht20indexdiff[rfpath][group];
- rtlefuse->txpwr_legacyhtdiff[rfpath][ch] =
- pwrinfo.ofdmindexdiff[rfpath][group];
- rtlefuse->pwrgroup_ht20[rfpath][ch] =
- pwrinfo.ht20maxoffset[rfpath][group];
- rtlefuse->pwrgroup_ht40[rfpath][ch] =
- pwrinfo.ht40maxoffset[rfpath][group];
- pwr = pwrinfo.ht40_1sindex[rfpath][group];
- diff = pwrinfo.ht40_2sindexdiff[rfpath][group];
- rtlefuse->txpwrlevel_ht40_2s[rfpath][ch] =
- (pwr > diff) ? (pwr - diff) : 0;
- }
- }
-}
-
-static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw,
- u8 *content)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 macphy_crvalue = content[EEPROM_MAC_FUNCTION];
-
- if (macphy_crvalue & BIT(3)) {
- rtlhal->macphymode = SINGLEMAC_SINGLEPHY;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode SINGLEMAC_SINGLEPHY\n");
- } else {
- rtlhal->macphymode = DUALMAC_DUALPHY;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode DUALMAC_DUALPHY\n");
- }
-}
-
-static void _rtl92de_read_macphymode_and_bandtype(struct ieee80211_hw *hw,
- u8 *content)
-{
- _rtl92de_read_macphymode_from_prom(hw, content);
- rtl92d_phy_config_macphymode(hw);
- rtl92d_phy_config_macphymode_info(hw);
-}
-
-static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- enum version_8192d chipver = rtlpriv->rtlhal.version;
- u8 cutvalue[2];
- u16 chipvalue;
-
- read_efuse_byte(hw, EEPROME_CHIP_VERSION_H, &cutvalue[1]);
- read_efuse_byte(hw, EEPROME_CHIP_VERSION_L, &cutvalue[0]);
- chipvalue = (cutvalue[1] << 8) | cutvalue[0];
- switch (chipvalue) {
- case 0xAA55:
- chipver |= CHIP_92D_C_CUT;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "C-CUT!!!\n");
- break;
- case 0x9966:
- chipver |= CHIP_92D_D_CUT;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "D-CUT!!!\n");
- break;
- case 0xCC33:
- chipver |= CHIP_92D_E_CUT;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "E-CUT!!!\n");
- break;
- default:
- chipver |= CHIP_92D_D_CUT;
- pr_err("Unknown CUT!\n");
- break;
- }
- rtlpriv->rtlhal.version = chipver;
-}
-
-static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- int params[] = {RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID,
- EEPROM_SVID, EEPROM_SMID, EEPROM_MAC_ADDR_MAC0_92D,
- EEPROM_CHANNEL_PLAN, EEPROM_VERSION, EEPROM_CUSTOMER_ID,
- COUNTRY_CODE_WORLD_WIDE_13};
- int i;
- u16 usvalue;
- u8 *hwinfo;
-
- hwinfo = kzalloc(HWSET_MAX_SIZE, GFP_KERNEL);
- if (!hwinfo)
- return;
-
- if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params))
- goto exit;
-
- _rtl92de_efuse_update_chip_version(hw);
- _rtl92de_read_macphymode_and_bandtype(hw, hwinfo);
-
- /* Read Permanent MAC address for 2nd interface */
- if (rtlhal->interfaceindex != 0) {
- for (i = 0; i < 6; i += 2) {
- usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR_MAC1_92D + i];
- *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
- }
- }
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR,
- rtlefuse->dev_addr);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr);
- _rtl92de_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo);
-
- /* Read Channel Plan */
- switch (rtlhal->bandset) {
- case BAND_ON_2_4G:
- rtlefuse->channel_plan = COUNTRY_CODE_TELEC;
- break;
- case BAND_ON_5G:
- rtlefuse->channel_plan = COUNTRY_CODE_FCC;
- break;
- case BAND_ON_BOTH:
- rtlefuse->channel_plan = COUNTRY_CODE_FCC;
- break;
- default:
- rtlefuse->channel_plan = COUNTRY_CODE_FCC;
- break;
- }
- rtlefuse->txpwr_fromeprom = true;
-exit:
- kfree(hwinfo);
-}
-
-void rtl92de_read_eeprom_info(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 tmp_u1b;
-
- rtlhal->version = _rtl92de_read_chip_version(hw);
- tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
- rtlefuse->autoload_status = tmp_u1b;
- if (tmp_u1b & BIT(4)) {
- rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EEPROM\n");
- rtlefuse->epromtype = EEPROM_93C46;
- } else {
- rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "Boot from EFUSE\n");
- rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
- }
- if (tmp_u1b & BIT(5)) {
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n");
-
- rtlefuse->autoload_failflag = false;
- _rtl92de_read_adapter_info(hw);
- } else {
- pr_err("Autoload ERR!!\n");
- }
- return;
-}
-
-static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u32 ratr_value;
- u8 ratr_index = 0;
- u8 nmode = mac->ht_enable;
- u8 mimo_ps = IEEE80211_SMPS_OFF;
- u16 shortgi_rate;
- u32 tmp_ratr_value;
- u8 curtxbw_40mhz = mac->bw_40;
- u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
- 1 : 0;
- u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
- 1 : 0;
- enum wireless_mode wirelessmode = mac->mode;
-
- if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_value = sta->deflink.supp_rates[1] << 4;
- else
- ratr_value = sta->deflink.supp_rates[0];
- ratr_value |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
- sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
- switch (wirelessmode) {
- case WIRELESS_MODE_A:
- ratr_value &= 0x00000FF0;
- break;
- case WIRELESS_MODE_B:
- if (ratr_value & 0x0000000c)
- ratr_value &= 0x0000000d;
- else
- ratr_value &= 0x0000000f;
- break;
- case WIRELESS_MODE_G:
- ratr_value &= 0x00000FF5;
- break;
- case WIRELESS_MODE_N_24G:
- case WIRELESS_MODE_N_5G:
- nmode = 1;
- if (mimo_ps == IEEE80211_SMPS_STATIC) {
- ratr_value &= 0x0007F005;
- } else {
- u32 ratr_mask;
-
- if (get_rf_type(rtlphy) == RF_1T2R ||
- get_rf_type(rtlphy) == RF_1T1R) {
- ratr_mask = 0x000ff005;
- } else {
- ratr_mask = 0x0f0ff005;
- }
-
- ratr_value &= ratr_mask;
- }
- break;
- default:
- if (rtlphy->rf_type == RF_1T2R)
- ratr_value &= 0x000ff0ff;
- else
- ratr_value &= 0x0f0ff0ff;
-
- break;
- }
- ratr_value &= 0x0FFFFFFF;
- if (nmode && ((curtxbw_40mhz && curshortgi_40mhz) ||
- (!curtxbw_40mhz && curshortgi_20mhz))) {
- ratr_value |= 0x10000000;
- tmp_ratr_value = (ratr_value >> 12);
- for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
- if ((1 << shortgi_rate) & tmp_ratr_value)
- break;
- }
- shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
- (shortgi_rate << 4) | (shortgi_rate);
- }
- rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
- rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG, "%x\n",
- rtl_read_dword(rtlpriv, REG_ARFR0));
-}
-
-static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_sta_info *sta_entry = NULL;
- u32 ratr_bitmap;
- u8 ratr_index;
- u8 curtxbw_40mhz = (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) ? 1 : 0;
- u8 curshortgi_40mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_40) ?
- 1 : 0;
- u8 curshortgi_20mhz = (sta->deflink.ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ?
- 1 : 0;
- enum wireless_mode wirelessmode = 0;
- bool shortgi = false;
- u32 value[2];
- u8 macid = 0;
- u8 mimo_ps = IEEE80211_SMPS_OFF;
-
- sta_entry = (struct rtl_sta_info *) sta->drv_priv;
- mimo_ps = sta_entry->mimo_ps;
- wirelessmode = sta_entry->wireless_mode;
- if (mac->opmode == NL80211_IFTYPE_STATION)
- curtxbw_40mhz = mac->bw_40;
- else if (mac->opmode == NL80211_IFTYPE_AP ||
- mac->opmode == NL80211_IFTYPE_ADHOC)
- macid = sta->aid + 1;
-
- if (rtlhal->current_bandtype == BAND_ON_5G)
- ratr_bitmap = sta->deflink.supp_rates[1] << 4;
- else
- ratr_bitmap = sta->deflink.supp_rates[0];
- ratr_bitmap |= (sta->deflink.ht_cap.mcs.rx_mask[1] << 20 |
- sta->deflink.ht_cap.mcs.rx_mask[0] << 12);
- switch (wirelessmode) {
- case WIRELESS_MODE_B:
- ratr_index = RATR_INX_WIRELESS_B;
- if (ratr_bitmap & 0x0000000c)
- ratr_bitmap &= 0x0000000d;
- else
- ratr_bitmap &= 0x0000000f;
- break;
- case WIRELESS_MODE_G:
- ratr_index = RATR_INX_WIRELESS_GB;
-
- if (rssi_level == 1)
- ratr_bitmap &= 0x00000f00;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x00000ff0;
- else
- ratr_bitmap &= 0x00000ff5;
- break;
- case WIRELESS_MODE_A:
- ratr_index = RATR_INX_WIRELESS_G;
- ratr_bitmap &= 0x00000ff0;
- break;
- case WIRELESS_MODE_N_24G:
- case WIRELESS_MODE_N_5G:
- if (wirelessmode == WIRELESS_MODE_N_24G)
- ratr_index = RATR_INX_WIRELESS_NGB;
- else
- ratr_index = RATR_INX_WIRELESS_NG;
- if (mimo_ps == IEEE80211_SMPS_STATIC) {
- if (rssi_level == 1)
- ratr_bitmap &= 0x00070000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x0007f000;
- else
- ratr_bitmap &= 0x0007f005;
- } else {
- if (rtlphy->rf_type == RF_1T2R ||
- rtlphy->rf_type == RF_1T1R) {
- if (curtxbw_40mhz) {
- if (rssi_level == 1)
- ratr_bitmap &= 0x000f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x000ff000;
- else
- ratr_bitmap &= 0x000ff015;
- } else {
- if (rssi_level == 1)
- ratr_bitmap &= 0x000f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x000ff000;
- else
- ratr_bitmap &= 0x000ff005;
- }
- } else {
- if (curtxbw_40mhz) {
- if (rssi_level == 1)
- ratr_bitmap &= 0x0f0f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x0f0ff000;
- else
- ratr_bitmap &= 0x0f0ff015;
- } else {
- if (rssi_level == 1)
- ratr_bitmap &= 0x0f0f0000;
- else if (rssi_level == 2)
- ratr_bitmap &= 0x0f0ff000;
- else
- ratr_bitmap &= 0x0f0ff005;
- }
- }
- }
- if ((curtxbw_40mhz && curshortgi_40mhz) ||
- (!curtxbw_40mhz && curshortgi_20mhz)) {
-
- if (macid == 0)
- shortgi = true;
- else if (macid == 1)
- shortgi = false;
- }
- break;
- default:
- ratr_index = RATR_INX_WIRELESS_NGB;
-
- if (rtlphy->rf_type == RF_1T2R)
- ratr_bitmap &= 0x000ff0ff;
- else
- ratr_bitmap &= 0x0f0ff0ff;
- break;
- }
-
- value[0] = (ratr_bitmap & 0x0fffffff) | (ratr_index << 28);
- value[1] = macid | (shortgi ? 0x20 : 0x00) | 0x80;
- rtl_dbg(rtlpriv, COMP_RATR, DBG_DMESG,
- "ratr_bitmap :%x value0:%x value1:%x\n",
- ratr_bitmap, value[0], value[1]);
- rtl92d_fill_h2c_cmd(hw, H2C_RA_MASK, 5, (u8 *) value);
- if (macid != 0)
- sta_entry->ratr_index = ratr_index;
-}
-
-void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level, bool update_bw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->dm.useramask)
- rtl92de_update_hal_rate_mask(hw, sta, rssi_level, update_bw);
- else
- rtl92de_update_hal_rate_table(hw, sta);
-}
-
-void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- u16 sifs_timer;
-
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
- &mac->slot_time);
- if (!mac->ht_enable)
- sifs_timer = 0x0a0a;
- else
- sifs_timer = 0x1010;
- rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
-}
-
-bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
- struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
- enum rf_pwrstate e_rfpowerstate_toset;
- u8 u1tmp;
- bool actuallyset = false;
- unsigned long flag;
-
- if (rtlpci->being_init_adapter)
- return false;
- if (ppsc->swrf_processing)
- return false;
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
- if (ppsc->rfchange_inprogress) {
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
- return false;
- } else {
- ppsc->rfchange_inprogress = true;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
- }
- rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, rtl_read_byte(rtlpriv,
- REG_MAC_PINMUX_CFG) & ~(BIT(3)));
- u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
- e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
- if (ppsc->hwradiooff && (e_rfpowerstate_toset == ERFON)) {
- rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio ON, RF ON\n");
- e_rfpowerstate_toset = ERFON;
- ppsc->hwradiooff = false;
- actuallyset = true;
- } else if (!ppsc->hwradiooff && (e_rfpowerstate_toset == ERFOFF)) {
- rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
- "GPIOChangeRF - HW Radio OFF, RF OFF\n");
- e_rfpowerstate_toset = ERFOFF;
- ppsc->hwradiooff = true;
- actuallyset = true;
- }
- if (actuallyset) {
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
- ppsc->rfchange_inprogress = false;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
- } else {
- if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
- RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
- spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
- ppsc->rfchange_inprogress = false;
- spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
- }
- *valid = 1;
- return !ppsc->hwradiooff;
-}
-
-void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
- u8 *p_macaddr, bool is_group, u8 enc_algo,
- bool is_wepkey, bool clear_all)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 *macaddr = p_macaddr;
- u32 entry_id;
- bool is_pairwise = false;
- static u8 cam_const_addr[4][6] = {
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
- {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
- };
- static u8 cam_const_broad[] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
-
- if (clear_all) {
- u8 idx;
- u8 cam_offset = 0;
- u8 clear_number = 5;
- rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG, "clear_all\n");
- for (idx = 0; idx < clear_number; idx++) {
- rtl_cam_mark_invalid(hw, cam_offset + idx);
- rtl_cam_empty_entry(hw, cam_offset + idx);
-
- if (idx < 5) {
- memset(rtlpriv->sec.key_buf[idx], 0,
- MAX_KEY_LEN);
- rtlpriv->sec.key_len[idx] = 0;
- }
- }
- } else {
- switch (enc_algo) {
- case WEP40_ENCRYPTION:
- enc_algo = CAM_WEP40;
- break;
- case WEP104_ENCRYPTION:
- enc_algo = CAM_WEP104;
- break;
- case TKIP_ENCRYPTION:
- enc_algo = CAM_TKIP;
- break;
- case AESCCMP_ENCRYPTION:
- enc_algo = CAM_AES;
- break;
- default:
- pr_err("switch case %#x not processed\n",
- enc_algo);
- enc_algo = CAM_TKIP;
- break;
- }
- if (is_wepkey || rtlpriv->sec.use_defaultkey) {
- macaddr = cam_const_addr[key_index];
- entry_id = key_index;
- } else {
- if (is_group) {
- macaddr = cam_const_broad;
- entry_id = key_index;
- } else {
- if (mac->opmode == NL80211_IFTYPE_AP) {
- entry_id = rtl_cam_get_free_entry(hw,
- p_macaddr);
- if (entry_id >= TOTAL_CAM_ENTRY) {
- pr_err("Can not find free hw security cam entry\n");
- return;
- }
- } else {
- entry_id = CAM_PAIRWISE_KEY_POSITION;
- }
- key_index = PAIRWISE_KEYIDX;
- is_pairwise = true;
- }
- }
- if (rtlpriv->sec.key_len[key_index] == 0) {
- rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
- "delete one entry, entry_id is %d\n",
- entry_id);
- if (mac->opmode == NL80211_IFTYPE_AP)
- rtl_cam_del_entry(hw, p_macaddr);
- rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
- } else {
- rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY length is %d\n",
- rtlpriv->sec.key_len[PAIRWISE_KEYIDX]);
- rtl_dbg(rtlpriv, COMP_SEC, DBG_LOUD,
- "The insert KEY is %x %x\n",
- rtlpriv->sec.key_buf[0][0],
- rtlpriv->sec.key_buf[0][1]);
- rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
- "add one entry\n");
- if (is_pairwise) {
- RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
- "Pairwise Key content",
- rtlpriv->sec.pairwise_key,
- rtlpriv->
- sec.key_len[PAIRWISE_KEYIDX]);
- rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
- "set Pairwise key\n");
- rtl_cam_add_one_entry(hw, macaddr, key_index,
- entry_id, enc_algo,
- CAM_CONFIG_NO_USEDK,
- rtlpriv->
- sec.key_buf[key_index]);
- } else {
- rtl_dbg(rtlpriv, COMP_SEC, DBG_DMESG,
- "set group key\n");
- if (mac->opmode == NL80211_IFTYPE_ADHOC) {
- rtl_cam_add_one_entry(hw,
- rtlefuse->dev_addr,
- PAIRWISE_KEYIDX,
- CAM_PAIRWISE_KEY_POSITION,
- enc_algo, CAM_CONFIG_NO_USEDK,
- rtlpriv->sec.key_buf[entry_id]);
- }
- rtl_cam_add_one_entry(hw, macaddr, key_index,
- entry_id, enc_algo,
- CAM_CONFIG_NO_USEDK,
- rtlpriv->sec.key_buf
- [entry_id]);
- }
- }
- }
-}
-
void rtl92de_suspend(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
index ea495216d394..bda4a1a7c91d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.h
@@ -5,7 +5,6 @@
#define __RTL92DE_HW_H__
void rtl92de_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
-void rtl92de_read_eeprom_info(struct ieee80211_hw *hw);
void rtl92de_interrupt_recognized(struct ieee80211_hw *hw,
struct rtl_int *int_vec);
int rtl92de_hw_init(struct ieee80211_hw *hw);
@@ -14,21 +13,11 @@ void rtl92de_enable_interrupt(struct ieee80211_hw *hw);
void rtl92de_disable_interrupt(struct ieee80211_hw *hw);
int rtl92de_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
void rtl92de_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
-void rtl92de_set_qos(struct ieee80211_hw *hw, int aci);
void rtl92de_set_beacon_related_registers(struct ieee80211_hw *hw);
void rtl92de_set_beacon_interval(struct ieee80211_hw *hw);
void rtl92de_update_interrupt_mask(struct ieee80211_hw *hw,
u32 add_msr, u32 rm_msr);
void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
-void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u8 rssi_level,
- bool update_bw);
-void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw);
-bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
-void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw);
-void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index,
- u8 *p_macaddr, bool is_group, u8 enc_algo,
- bool is_wepkey, bool clear_all);
void rtl92de_write_dword_dbi(struct ieee80211_hw *hw, u16 offset, u32 value,
u8 direct);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
index 4bd708570992..33aede56c81b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/led.c
@@ -3,7 +3,7 @@
#include "../wifi.h"
#include "../pci.h"
-#include "reg.h"
+#include "../rtl8192d/reg.h"
#include "led.h"
void rtl92de_sw_led_on(struct ieee80211_hw *hw, enum rtl_led_pin pin)
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
index d835a27429f0..d429560009bb 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.c
@@ -5,8 +5,11 @@
#include "../pci.h"
#include "../ps.h"
#include "../core.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/dm_common.h"
+#include "../rtl8192d/phy_common.h"
+#include "../rtl8192d/rf_common.h"
#include "phy.h"
#include "rf.h"
#include "dm.h"
@@ -21,9 +24,6 @@
#define RF_REG_NUM_FOR_C_CUT_2G 5
#define RF_CHNL_NUM_5G 19
#define RF_CHNL_NUM_5G_40M 17
-#define TARGET_CHNL_NUM_5G 221
-#define TARGET_CHNL_NUM_2G 14
-#define CV_CURVE_CNT 64
static u32 rf_reg_for_5g_swchnl_normal[MAX_RF_IMR_INDEX_NORMAL] = {
0, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x0
@@ -160,15 +160,6 @@ static u32 targetchnl_2g[TARGET_CHNL_NUM_2G] = {
25711, 25658, 25606, 25554, 25502, 25451, 25328
};
-static const u8 channel_all[59] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
- 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
- 114, 116, 118, 120, 122, 124, 126, 128, 130,
- 132, 134, 136, 138, 140, 149, 151, 153, 155,
- 157, 159, 161, 163, 165
-};
-
u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -235,119 +226,6 @@ void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
regaddr, bitmask, data);
}
-static u32 _rtl92d_phy_rf_serial_read(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 offset)
-{
-
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
- u32 newoffset;
- u32 tmplong, tmplong2;
- u8 rfpi_enable = 0;
- u32 retvalue;
-
- newoffset = offset;
- tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
- if (rfpath == RF90_PATH_A)
- tmplong2 = tmplong;
- else
- tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
- tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
- (newoffset << 23) | BLSSIREADEDGE;
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
- tmplong & (~BLSSIREADEDGE));
- udelay(10);
- rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
- udelay(50);
- udelay(50);
- rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
- tmplong | BLSSIREADEDGE);
- udelay(10);
- if (rfpath == RF90_PATH_A)
- rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
- BIT(8));
- else if (rfpath == RF90_PATH_B)
- rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
- BIT(8));
- if (rfpi_enable)
- retvalue = rtl_get_bbreg(hw, pphyreg->rf_rbpi,
- BLSSIREADBACKDATA);
- else
- retvalue = rtl_get_bbreg(hw, pphyreg->rf_rb,
- BLSSIREADBACKDATA);
- rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFR-%d Addr[0x%x] = 0x%x\n",
- rfpath, pphyreg->rf_rb, retvalue);
- return retvalue;
-}
-
-static void _rtl92d_phy_rf_serial_write(struct ieee80211_hw *hw,
- enum radio_path rfpath,
- u32 offset, u32 data)
-{
- u32 data_and_addr;
- u32 newoffset;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-
- newoffset = offset;
- /* T65 RF */
- data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
- rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
- rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "RFW-%d Addr[0x%x]=0x%x\n",
- rfpath, pphyreg->rf3wire_offset, data_and_addr);
-}
-
-u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr, u32 bitmask)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 original_value, readback_value, bitshift;
-
- rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x)\n",
- regaddr, rfpath, bitmask);
- spin_lock(&rtlpriv->locks.rf_lock);
- original_value = _rtl92d_phy_rf_serial_read(hw, rfpath, regaddr);
- bitshift = calculate_bit_shift(bitmask);
- readback_value = (original_value & bitmask) >> bitshift;
- spin_unlock(&rtlpriv->locks.rf_lock);
- rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), rfpath(%#x), bitmask(%#x), original_value(%#x)\n",
- regaddr, rfpath, bitmask, original_value);
- return readback_value;
-}
-
-void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw, enum radio_path rfpath,
- u32 regaddr, u32 bitmask, u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u32 original_value, bitshift;
-
- rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
- if (bitmask == 0)
- return;
- spin_lock(&rtlpriv->locks.rf_lock);
- if (rtlphy->rf_mode != RF_OP_BY_FW) {
- if (bitmask != RFREG_OFFSET_MASK) {
- original_value = _rtl92d_phy_rf_serial_read(hw,
- rfpath, regaddr);
- bitshift = calculate_bit_shift(bitmask);
- data = ((original_value & (~bitmask)) |
- (data << bitshift));
- }
- _rtl92d_phy_rf_serial_write(hw, rfpath, regaddr, data);
- }
- spin_unlock(&rtlpriv->locks.rf_lock);
- rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
- "regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
- regaddr, bitmask, data, rfpath);
-}
-
bool rtl92d_phy_mac_config(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -374,133 +252,6 @@ bool rtl92d_phy_mac_config(struct ieee80211_hw *hw)
return true;
}
-static void _rtl92d_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- /* RF Interface Sowrtware Control */
- /* 16 LSBs if read 32-bit from 0x870 */
- rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
- /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
- rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
- /* 16 LSBs if read 32-bit from 0x874 */
- rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
- /* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */
-
- rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
- /* RF Interface Readback Value */
- /* 16 LSBs if read 32-bit from 0x8E0 */
- rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
- /* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
- rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
- /* 16 LSBs if read 32-bit from 0x8E4 */
- rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
- /* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */
- rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
-
- /* RF Interface Output (and Enable) */
- /* 16 LSBs if read 32-bit from 0x860 */
- rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
- /* 16 LSBs if read 32-bit from 0x864 */
- rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
-
- /* RF Interface (Output and) Enable */
- /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
- rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
- /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */
- rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
-
- /* Addr of LSSI. Wirte RF register by driver */
- /* LSSI Parameter */
- rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
- RFPGA0_XA_LSSIPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
- RFPGA0_XB_LSSIPARAMETER;
-
- /* RF parameter */
- /* BB Band Select */
- rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = RFPGA0_XAB_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = RFPGA0_XAB_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = RFPGA0_XCD_RFPARAMETER;
- rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = RFPGA0_XCD_RFPARAMETER;
-
- /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
- /* Tx gain stage */
- rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- /* Tx gain stage */
- rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- /* Tx gain stage */
- rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
- /* Tx gain stage */
- rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
-
- /* Tranceiver A~D HSSI Parameter-1 */
- /* wire control parameter1 */
- rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
- /* wire control parameter1 */
- rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
-
- /* Tranceiver A~D HSSI Parameter-2 */
- /* wire control parameter2 */
- rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
- /* wire control parameter2 */
- rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
-
- /* RF switch Control */
- /* TR/Ant switch control */
- rtlphy->phyreg_def[RF90_PATH_A].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_B].rfsw_ctrl = RFPGA0_XAB_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_C].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
- rtlphy->phyreg_def[RF90_PATH_D].rfsw_ctrl = RFPGA0_XCD_SWITCHCONTROL;
-
- /* AGC control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
- rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
-
- /* AGC control 2 */
- rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
- rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
-
- /* RX AFE control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbal = ROFDM0_XARXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbal = ROFDM0_XBRXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbal = ROFDM0_XCRXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbal = ROFDM0_XDRXIQIMBALANCE;
-
- /*RX AFE control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
- rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
- rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
- rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
-
- /* Tx AFE control 1 */
- rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbal = ROFDM0_XATXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbal = ROFDM0_XBTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbal = ROFDM0_XCTXIQIMBALANCE;
- rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbal = ROFDM0_XDTXIQIMBALANCE;
-
- /* Tx AFE control 2 */
- rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
- rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
- rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
- rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
-
- /* Tranceiver LSSI Readback SI mode */
- rtlphy->phyreg_def[RF90_PATH_A].rf_rb = RFPGA0_XA_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rf_rb = RFPGA0_XB_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_C].rf_rb = RFPGA0_XC_LSSIREADBACK;
- rtlphy->phyreg_def[RF90_PATH_D].rf_rb = RFPGA0_XD_LSSIREADBACK;
-
- /* Tranceiver LSSI Readback PI mode */
- rtlphy->phyreg_def[RF90_PATH_A].rf_rbpi = TRANSCEIVERA_HSPI_READBACK;
- rtlphy->phyreg_def[RF90_PATH_B].rf_rbpi = TRANSCEIVERB_HSPI_READBACK;
-}
-
static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
u8 configtype)
{
@@ -601,58 +352,6 @@ static bool _rtl92d_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
return true;
}
-static void _rtl92d_store_pwrindex_diffrate_offset(struct ieee80211_hw *hw,
- u32 regaddr, u32 bitmask,
- u32 data)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- int index;
-
- if (regaddr == RTXAGC_A_RATE18_06)
- index = 0;
- else if (regaddr == RTXAGC_A_RATE54_24)
- index = 1;
- else if (regaddr == RTXAGC_A_CCK1_MCS32)
- index = 6;
- else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00)
- index = 7;
- else if (regaddr == RTXAGC_A_MCS03_MCS00)
- index = 2;
- else if (regaddr == RTXAGC_A_MCS07_MCS04)
- index = 3;
- else if (regaddr == RTXAGC_A_MCS11_MCS08)
- index = 4;
- else if (regaddr == RTXAGC_A_MCS15_MCS12)
- index = 5;
- else if (regaddr == RTXAGC_B_RATE18_06)
- index = 8;
- else if (regaddr == RTXAGC_B_RATE54_24)
- index = 9;
- else if (regaddr == RTXAGC_B_CCK1_55_MCS32)
- index = 14;
- else if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff)
- index = 15;
- else if (regaddr == RTXAGC_B_MCS03_MCS00)
- index = 10;
- else if (regaddr == RTXAGC_B_MCS07_MCS04)
- index = 11;
- else if (regaddr == RTXAGC_B_MCS11_MCS08)
- index = 12;
- else if (regaddr == RTXAGC_B_MCS15_MCS12)
- index = 13;
- else
- return;
-
- rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index] = data;
- rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
- "MCSTxPowerLevelOriginalOffset[%d][%d] = 0x%x\n",
- rtlphy->pwrgroup_cnt, index,
- rtlphy->mcs_offset[rtlphy->pwrgroup_cnt][index]);
- if (index == 13)
- rtlphy->pwrgroup_cnt++;
-}
-
static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
u8 configtype)
{
@@ -666,7 +365,7 @@ static bool _rtl92d_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
if (configtype == BASEBAND_CONFIG_PHY_REG) {
for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
rtl_addr_delay(phy_regarray_table_pg[i]);
- _rtl92d_store_pwrindex_diffrate_offset(hw,
+ rtl92d_store_pwrindex_diffrate_offset(hw,
phy_regarray_table_pg[i],
phy_regarray_table_pg[i + 1],
phy_regarray_table_pg[i + 2]);
@@ -726,7 +425,7 @@ bool rtl92d_phy_bb_config(struct ieee80211_hw *hw)
u32 regvaldw;
u8 value;
- _rtl92d_phy_init_bb_rf_register_definition(hw);
+ rtl92d_phy_init_bb_rf_register_definition(hw);
regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
regval | BIT(13) | BIT(0) | BIT(1));
@@ -812,115 +511,6 @@ bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
return true;
}
-void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- rtlphy->default_initialgain[0] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
- rtlphy->default_initialgain[1] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
- rtlphy->default_initialgain[2] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
- rtlphy->default_initialgain[3] =
- (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x\n",
- rtlphy->default_initialgain[0],
- rtlphy->default_initialgain[1],
- rtlphy->default_initialgain[2],
- rtlphy->default_initialgain[3]);
- rtlphy->framesync = (u8)rtl_get_bbreg(hw, ROFDM0_RXDETECTOR3,
- MASKBYTE0);
- rtlphy->framesync_c34 = rtl_get_bbreg(hw, ROFDM0_RXDETECTOR2,
- MASKDWORD);
- rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
- "Default framesync (0x%x) = 0x%x\n",
- ROFDM0_RXDETECTOR3, rtlphy->framesync);
-}
-
-static void _rtl92d_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
- u8 *cckpowerlevel, u8 *ofdmpowerlevel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_hal *rtlhal = &(rtlpriv->rtlhal);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 index = (channel - 1);
-
- /* 1. CCK */
- if (rtlhal->current_bandtype == BAND_ON_2_4G) {
- /* RF-A */
- cckpowerlevel[RF90_PATH_A] =
- rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
- /* RF-B */
- cckpowerlevel[RF90_PATH_B] =
- rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
- } else {
- cckpowerlevel[RF90_PATH_A] = 0;
- cckpowerlevel[RF90_PATH_B] = 0;
- }
- /* 2. OFDM for 1S or 2S */
- if (rtlphy->rf_type == RF_1T2R || rtlphy->rf_type == RF_1T1R) {
- /* Read HT 40 OFDM TX power */
- ofdmpowerlevel[RF90_PATH_A] =
- rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
- ofdmpowerlevel[RF90_PATH_B] =
- rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
- } else if (rtlphy->rf_type == RF_2T2R) {
- /* Read HT 40 OFDM TX power */
- ofdmpowerlevel[RF90_PATH_A] =
- rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
- ofdmpowerlevel[RF90_PATH_B] =
- rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
- }
-}
-
-static void _rtl92d_ccxpower_index_check(struct ieee80211_hw *hw,
- u8 channel, u8 *cckpowerlevel, u8 *ofdmpowerlevel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
- rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
-}
-
-static u8 _rtl92c_phy_get_rightchnlplace(u8 chnl)
-{
- u8 place = chnl;
-
- if (chnl > 14) {
- for (place = 14; place < ARRAY_SIZE(channel5g); place++) {
- if (channel5g[place] == chnl) {
- place++;
- break;
- }
- }
- }
- return place;
-}
-
-void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
-{
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u8 cckpowerlevel[2], ofdmpowerlevel[2];
-
- if (!rtlefuse->txpwr_fromeprom)
- return;
- channel = _rtl92c_phy_get_rightchnlplace(channel);
- _rtl92d_get_txpower_index(hw, channel, &cckpowerlevel[0],
- &ofdmpowerlevel[0]);
- if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
- _rtl92d_ccxpower_index_check(hw, channel, &cckpowerlevel[0],
- &ofdmpowerlevel[0]);
- if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G)
- rtl92d_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
- rtl92d_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
-}
-
void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type)
{
@@ -1122,65 +712,6 @@ static void _rtl92d_phy_reload_imr_setting(struct ieee80211_hw *hw,
rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
-static void _rtl92d_phy_enable_rf_env(struct ieee80211_hw *hw,
- u8 rfpath, u32 *pu4_regval)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-
- rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
- /*----Store original RFENV control type----*/
- switch (rfpath) {
- case RF90_PATH_A:
- case RF90_PATH_C:
- *pu4_regval = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV);
- break;
- case RF90_PATH_B:
- case RF90_PATH_D:
- *pu4_regval =
- rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16);
- break;
- }
- /*----Set RF_ENV enable----*/
- rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
- udelay(1);
- /*----Set RF_ENV output high----*/
- rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
- udelay(1);
- /* Set bit number of Address and Data for RF register */
- /* Set 1 to 4 bits for 8255 */
- rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREADDRESSLENGTH, 0x0);
- udelay(1);
- /*Set 0 to 12 bits for 8255 */
- rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
- udelay(1);
- rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
-}
-
-static void _rtl92d_phy_restore_rf_env(struct ieee80211_hw *hw, u8 rfpath,
- u32 *pu4_regval)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
-
- rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "=====>\n");
- /*----Restore RFENV control type----*/
- switch (rfpath) {
- case RF90_PATH_A:
- case RF90_PATH_C:
- rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, *pu4_regval);
- break;
- case RF90_PATH_B:
- case RF90_PATH_D:
- rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16,
- *pu4_regval);
- break;
- }
- rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<=====\n");
-}
-
static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1221,8 +752,8 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
rtlhal->during_mac1init_radioa = true;
/* asume no this case */
if (need_pwr_down)
- _rtl92d_phy_enable_rf_env(hw, path,
- &u4regvalue);
+ rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
}
for (i = 0; i < RF_REG_NUM_FOR_C_CUT_5G; i++) {
if (i == 0 && (rtlhal->macphymode == DUALMAC_DUALPHY)) {
@@ -1253,7 +784,7 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
RFREG_OFFSET_MASK));
}
if (need_pwr_down)
- _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+ rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
if (rtlhal->during_mac1init_radioa)
rtl92d_phy_powerdown_anotherphy(hw, false);
if (channel < 149)
@@ -1313,8 +844,8 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
rtlhal->during_mac0init_radiob = true;
if (need_pwr_down)
- _rtl92d_phy_enable_rf_env(hw, path,
- &u4regvalue);
+ rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
}
}
for (i = 0; i < RF_REG_NUM_FOR_C_CUT_2G; i++) {
@@ -1347,31 +878,13 @@ static void _rtl92d_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
RFREG_OFFSET_MASK,
rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
if (need_pwr_down)
- _rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+ rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
if (rtlhal->during_mac0init_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
}
rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
-u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl)
-{
- u8 place;
-
- if (chnl > 14) {
- for (place = 14; place < ARRAY_SIZE(channel_all); place++) {
- if (channel_all[place] == chnl)
- return place - 13;
- }
- }
-
- return 0;
-}
-
-#define MAX_TOLERANCE 5
-#define IQK_DELAY_TIME 1 /* ms */
-#define MAX_TOLERANCE_92D 3
-
/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
static u8 _rtl92d_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb)
{
@@ -1636,30 +1149,6 @@ static u8 _rtl92d_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
return result;
}
-static void _rtl92d_phy_save_adda_registers(struct ieee80211_hw *hw,
- u32 *adda_reg, u32 *adda_backup,
- u32 regnum)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save ADDA parameters.\n");
- for (i = 0; i < regnum; i++)
- adda_backup[i] = rtl_get_bbreg(hw, adda_reg[i], MASKDWORD);
-}
-
-static void _rtl92d_phy_save_mac_registers(struct ieee80211_hw *hw,
- u32 *macreg, u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- RTPRINT(rtlpriv, FINIT, INIT_IQK, "Save MAC parameters.\n");
- for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
- macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
- macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
-}
-
static void _rtl92d_phy_reload_adda_registers(struct ieee80211_hw *hw,
u32 *adda_reg, u32 *adda_backup,
u32 regnum)
@@ -1685,37 +1174,6 @@ static void _rtl92d_phy_reload_mac_registers(struct ieee80211_hw *hw,
rtl_write_byte(rtlpriv, macreg[i], macbackup[i]);
}
-static void _rtl92d_phy_path_adda_on(struct ieee80211_hw *hw,
- u32 *adda_reg, bool patha_on, bool is2t)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 pathon;
- u32 i;
-
- RTPRINT(rtlpriv, FINIT, INIT_IQK, "ADDA ON.\n");
- pathon = patha_on ? 0x04db25a4 : 0x0b1b25a4;
- if (patha_on)
- pathon = rtlpriv->rtlhal.interfaceindex == 0 ?
- 0x04db25a4 : 0x0b1b25a4;
- for (i = 0; i < IQK_ADDA_REG_NUM; i++)
- rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, pathon);
-}
-
-static void _rtl92d_phy_mac_setting_calibration(struct ieee80211_hw *hw,
- u32 *macreg, u32 *macbackup)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 i;
-
- RTPRINT(rtlpriv, FINIT, INIT_IQK, "MAC settings for Calibration.\n");
- rtl_write_byte(rtlpriv, macreg[0], 0x3F);
-
- for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
- rtl_write_byte(rtlpriv, macreg[i], (u8)(macbackup[i] &
- (~BIT(3))));
- rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
-}
-
static void _rtl92d_phy_patha_standby(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1772,14 +1230,16 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
is2t ? "2T2R" : "1T1R");
/* Save ADDA parameters, turn Path A ADDA on */
- _rtl92d_phy_save_adda_registers(hw, adda_reg,
- rtlphy->adda_backup, IQK_ADDA_REG_NUM);
- _rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
- rtlphy->iqk_mac_backup);
- _rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
- rtlphy->iqk_bb_backup, IQK_BB_REG_NUM);
- }
- _rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t);
+ rtl92d_phy_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+ rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ }
+ rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t);
if (t == 0)
rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
RFPGA0_XA_HSSIPARAMETER1, BIT(8));
@@ -1800,8 +1260,8 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
0x00010000);
}
/* MAC settings */
- _rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
- rtlphy->iqk_mac_backup);
+ rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
/* Page B init */
rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x0f600000);
if (is2t)
@@ -1841,7 +1301,7 @@ static void _rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
if (is2t) {
_rtl92d_phy_patha_standby(hw);
/* Turn Path B ADDA on */
- _rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
+ rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
for (i = 0; i < retrycount; i++) {
pathb_ok = _rtl92d_phy_pathb_iqk(hw);
if (pathb_ok == 0x03) {
@@ -1938,24 +1398,24 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
is2t ? "2T2R" : "1T1R");
/* Save ADDA parameters, turn Path A ADDA on */
- _rtl92d_phy_save_adda_registers(hw, adda_reg,
- rtlphy->adda_backup,
- IQK_ADDA_REG_NUM);
- _rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
- rtlphy->iqk_mac_backup);
+ rtl92d_phy_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+ rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
if (is2t)
- _rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
- rtlphy->iqk_bb_backup,
- IQK_BB_REG_NUM);
+ rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
else
- _rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
- rtlphy->iqk_bb_backup,
- IQK_BB_REG_NUM - 1);
+ rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM - 1);
}
- _rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t);
+ rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t);
/* MAC settings */
- _rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
- rtlphy->iqk_mac_backup);
+ rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
if (t == 0)
rtlphy->rfpi_enable = (u8) rtl_get_bbreg(hw,
RFPGA0_XA_HSSIPARAMETER1, BIT(8));
@@ -2002,7 +1462,7 @@ static void _rtl92d_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
if (is2t) {
/* _rtl92d_phy_patha_standby(hw); */
/* Turn Path B ADDA on */
- _rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
+ rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
pathb_ok = _rtl92d_phy_pathb_iqk_5g_normal(hw);
if (pathb_ok == 0x03) {
RTPRINT(rtlpriv, FINIT, INIT_IQK,
@@ -2401,56 +1861,6 @@ void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
}
-static u32 _rtl92d_phy_get_abs(u32 val1, u32 val2)
-{
- u32 ret;
-
- if (val1 >= val2)
- ret = val1 - val2;
- else
- ret = val2 - val1;
- return ret;
-}
-
-static bool _rtl92d_is_legal_5g_channel(struct ieee80211_hw *hw, u8 channel)
-{
-
- int i;
-
- for (i = 0; i < ARRAY_SIZE(channel5g); i++)
- if (channel == channel5g[i])
- return true;
- return false;
-}
-
-static void _rtl92d_phy_calc_curvindex(struct ieee80211_hw *hw,
- u32 *targetchnl, u32 * curvecount_val,
- bool is5g, u32 *curveindex)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 smallest_abs_val = 0xffffffff, u4tmp;
- u8 i, j;
- u8 chnl_num = is5g ? TARGET_CHNL_NUM_5G : TARGET_CHNL_NUM_2G;
-
- for (i = 0; i < chnl_num; i++) {
- if (is5g && !_rtl92d_is_legal_5g_channel(hw, i + 1))
- continue;
- curveindex[i] = 0;
- for (j = 0; j < (CV_CURVE_CNT * 2); j++) {
- u4tmp = _rtl92d_phy_get_abs(targetchnl[i],
- curvecount_val[j]);
-
- if (u4tmp < smallest_abs_val) {
- curveindex[i] = j;
- smallest_abs_val = u4tmp;
- }
- }
- smallest_abs_val = 0xffffffff;
- RTPRINT(rtlpriv, FINIT, INIT_IQK, "curveindex[%d] = %x\n",
- i, curveindex[i]);
- }
-}
-
static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
u8 channel)
{
@@ -2477,12 +1887,12 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
rtlpriv->rtlhal.during_mac1init_radioa = true;
/* asume no this case */
if (bneed_powerdown_radio)
- _rtl92d_phy_enable_rf_env(hw, erfpath,
- &u4regvalue);
+ rtl92d_phy_enable_rf_env(hw, erfpath,
+ &u4regvalue);
}
rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp);
if (bneed_powerdown_radio)
- _rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
+ rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
if (rtlpriv->rtlhal.during_mac1init_radioa)
rtl92d_phy_powerdown_anotherphy(hw, false);
} else if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) {
@@ -2495,15 +1905,15 @@ static void _rtl92d_phy_reload_lck_setting(struct ieee80211_hw *hw,
rtl92d_phy_enable_anotherphy(hw, true);
rtlpriv->rtlhal.during_mac0init_radiob = true;
if (bneed_powerdown_radio)
- _rtl92d_phy_enable_rf_env(hw, erfpath,
- &u4regvalue);
+ rtl92d_phy_enable_rf_env(hw, erfpath,
+ &u4regvalue);
}
rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp);
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
rtl_get_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800));
if (bneed_powerdown_radio)
- _rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
+ rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
if (rtlpriv->rtlhal.during_mac0init_radiob)
rtl92d_phy_powerdown_anotherphy(hw, true);
}
@@ -2588,13 +1998,13 @@ static void _rtl92d_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
readval2);
}
if (index == 0 && rtlhal->interfaceindex == 0)
- _rtl92d_phy_calc_curvindex(hw, targetchnl_5g,
- curvecount_val,
- true, curveindex_5g);
+ rtl92d_phy_calc_curvindex(hw, targetchnl_5g,
+ curvecount_val,
+ true, curveindex_5g);
else
- _rtl92d_phy_calc_curvindex(hw, targetchnl_2g,
- curvecount_val,
- false, curveindex_2g);
+ rtl92d_phy_calc_curvindex(hw, targetchnl_2g,
+ curvecount_val,
+ false, curveindex_2g);
/* switch CV-curve control mode */
rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7,
BIT(17), 0x1);
@@ -2622,7 +2032,7 @@ static void _rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
_rtl92d_phy_lc_calibrate_sw(hw, is2t);
}
-void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw)
+void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
@@ -2638,12 +2048,9 @@ void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw)
RTPRINT(rtlpriv, FINIT, INIT_IQK,
"LCK:Start!!! currentband %x delay %d ms\n",
rtlhal->current_bandtype, timecount);
- if (IS_92D_SINGLEPHY(rtlhal->version)) {
- _rtl92d_phy_lc_calibrate(hw, true);
- } else {
- /* For 1T1R */
- _rtl92d_phy_lc_calibrate(hw, false);
- }
+
+ _rtl92d_phy_lc_calibrate(hw, is2t);
+
rtlphy->lck_inprogress = false;
RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n");
}
@@ -2674,30 +2081,6 @@ static bool _rtl92d_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
return true;
}
-void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u8 i;
-
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "settings regs %zu default regs %d\n",
- ARRAY_SIZE(rtlphy->iqk_matrix),
- IQK_MATRIX_REG_NUM);
- /* 0xe94, 0xe9c, 0xea4, 0xeac, 0xeb4, 0xebc, 0xec4, 0xecc */
- for (i = 0; i < IQK_MATRIX_SETTINGS_NUM; i++) {
- rtlphy->iqk_matrix[i].value[0][0] = 0x100;
- rtlphy->iqk_matrix[i].value[0][2] = 0x100;
- rtlphy->iqk_matrix[i].value[0][4] = 0x100;
- rtlphy->iqk_matrix[i].value[0][6] = 0x100;
- rtlphy->iqk_matrix[i].value[0][1] = 0x0;
- rtlphy->iqk_matrix[i].value[0][3] = 0x0;
- rtlphy->iqk_matrix[i].value[0][5] = 0x0;
- rtlphy->iqk_matrix[i].value[0][7] = 0x0;
- rtlphy->iqk_matrix[i].iqk_done = false;
- }
-}
-
static bool _rtl92d_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
u8 channel, u8 *stage, u8 *step,
u32 *delay)
@@ -2891,74 +2274,6 @@ u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw)
return 1;
}
-static void rtl92d_phy_set_io(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct dig_t *de_digtable = &rtlpriv->dm_digtable;
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
- "--->Cmd(%#x), set_io_inprogress(%d)\n",
- rtlphy->current_io_type, rtlphy->set_io_inprogress);
- switch (rtlphy->current_io_type) {
- case IO_CMD_RESUME_DM_BY_SCAN:
- de_digtable->cur_igvalue = rtlphy->initgain_backup.xaagccore1;
- rtl92d_dm_write_dig(hw);
- rtl92d_phy_set_txpower_level(hw, rtlphy->current_channel);
- break;
- case IO_CMD_PAUSE_DM_BY_SCAN:
- rtlphy->initgain_backup.xaagccore1 = de_digtable->cur_igvalue;
- de_digtable->cur_igvalue = 0x37;
- rtl92d_dm_write_dig(hw);
- break;
- default:
- pr_err("switch case %#x not processed\n",
- rtlphy->current_io_type);
- break;
- }
- rtlphy->set_io_inprogress = false;
- rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<---(%#x)\n",
- rtlphy->current_io_type);
-}
-
-bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- bool postprocessing = false;
-
- rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
- "-->IO Cmd(%#x), set_io_inprogress(%d)\n",
- iotype, rtlphy->set_io_inprogress);
- do {
- switch (iotype) {
- case IO_CMD_RESUME_DM_BY_SCAN:
- rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Resume DM after scan\n");
- postprocessing = true;
- break;
- case IO_CMD_PAUSE_DM_BY_SCAN:
- rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE,
- "[IO CMD] Pause DM before scan\n");
- postprocessing = true;
- break;
- default:
- pr_err("switch case %#x not processed\n",
- iotype);
- break;
- }
- } while (false);
- if (postprocessing && !rtlphy->set_io_inprogress) {
- rtlphy->set_io_inprogress = true;
- rtlphy->current_io_type = iotype;
- } else {
- return false;
- }
- rtl92d_phy_set_io(hw);
- rtl_dbg(rtlpriv, COMP_CMD, DBG_TRACE, "<--IO Type(%#x)\n", iotype);
- return true;
-}
-
static void _rtl92d_phy_set_rfon(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -3141,100 +2456,6 @@ bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
return bresult;
}
-void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- u8 offset = REG_MAC_PHY_CTRL_NORMAL;
-
- switch (rtlhal->macphymode) {
- case DUALMAC_DUALPHY:
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode: DUALMAC_DUALPHY\n");
- rtl_write_byte(rtlpriv, offset, 0xF3);
- break;
- case SINGLEMAC_SINGLEPHY:
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode: SINGLEMAC_SINGLEPHY\n");
- rtl_write_byte(rtlpriv, offset, 0xF4);
- break;
- case DUALMAC_SINGLEPHY:
- rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
- "MacPhyMode: DUALMAC_SINGLEPHY\n");
- rtl_write_byte(rtlpriv, offset, 0xF1);
- break;
- }
-}
-
-void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
-
- switch (rtlhal->macphymode) {
- case DUALMAC_SINGLEPHY:
- rtlphy->rf_type = RF_2T2R;
- rtlhal->version |= RF_TYPE_2T2R;
- rtlhal->bandset = BAND_ON_BOTH;
- rtlhal->current_bandtype = BAND_ON_2_4G;
- break;
-
- case SINGLEMAC_SINGLEPHY:
- rtlphy->rf_type = RF_2T2R;
- rtlhal->version |= RF_TYPE_2T2R;
- rtlhal->bandset = BAND_ON_BOTH;
- rtlhal->current_bandtype = BAND_ON_2_4G;
- break;
-
- case DUALMAC_DUALPHY:
- rtlphy->rf_type = RF_1T1R;
- rtlhal->version &= RF_TYPE_1T1R;
- /* Now we let MAC0 run on 5G band. */
- if (rtlhal->interfaceindex == 0) {
- rtlhal->bandset = BAND_ON_5G;
- rtlhal->current_bandtype = BAND_ON_5G;
- } else {
- rtlhal->bandset = BAND_ON_2_4G;
- rtlhal->current_bandtype = BAND_ON_2_4G;
- }
- break;
- default:
- break;
- }
-}
-
-u8 rtl92d_get_chnlgroup_fromarray(u8 chnl)
-{
- u8 group;
-
- if (channel_all[chnl] <= 3)
- group = 0;
- else if (channel_all[chnl] <= 9)
- group = 1;
- else if (channel_all[chnl] <= 14)
- group = 2;
- else if (channel_all[chnl] <= 44)
- group = 3;
- else if (channel_all[chnl] <= 54)
- group = 4;
- else if (channel_all[chnl] <= 64)
- group = 5;
- else if (channel_all[chnl] <= 112)
- group = 6;
- else if (channel_all[chnl] <= 126)
- group = 7;
- else if (channel_all[chnl] <= 140)
- group = 8;
- else if (channel_all[chnl] <= 153)
- group = 9;
- else if (channel_all[chnl] <= 159)
- group = 10;
- else
- group = 11;
- return group;
-}
-
void rtl92d_phy_set_poweron(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -3286,31 +2507,6 @@ void rtl92d_phy_set_poweron(struct ieee80211_hw *hw)
}
}
-void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- switch (rtlpriv->rtlhal.macphymode) {
- case DUALMAC_DUALPHY:
- rtl_write_byte(rtlpriv, REG_DMC, 0x0);
- rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08);
- rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff);
- break;
- case DUALMAC_SINGLEPHY:
- rtl_write_byte(rtlpriv, REG_DMC, 0xf8);
- rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x08);
- rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x13ff);
- break;
- case SINGLEMAC_SINGLEPHY:
- rtl_write_byte(rtlpriv, REG_DMC, 0x0);
- rtl_write_byte(rtlpriv, REG_RX_PKT_LIMIT, 0x10);
- rtl_write_word(rtlpriv, (REG_TRXFF_BNDY + 2), 0x27FF);
- break;
- default:
- break;
- }
-}
-
void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
index 8d07c783a023..bbe9ef77225e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/phy.h
@@ -10,11 +10,8 @@
#define MAX_DOZE_WAITING_TIMES_9x 64
-#define RT_CANNOT_IO(hw) false
#define HIGHPOWER_RADIOA_ARRAYLEN 22
-#define MAX_TOLERANCE 5
-
#define APK_BB_REG_NUM 5
#define APK_AFE_REG_NUM 16
#define APK_CURVE_REG_NUM 4
@@ -27,12 +24,8 @@
#define RESET_CNT_LIMIT 3
#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM 10
#define IQK_BB_REG_NUM_test 6
#define IQK_MAC_REG_NUM 4
-#define RX_INDEX_MAPPING_NUM 15
-
-#define IQK_DELAY_TIME 1
#define CT_OFFSET_MAC_ADDR 0X16
@@ -68,80 +61,30 @@ struct swchnlcmd {
u32 msdelay;
};
-enum baseband_config_type {
- BASEBAND_CONFIG_PHY_REG = 0,
- BASEBAND_CONFIG_AGC_TAB = 1,
-};
-
-enum rf_content {
- radioa_txt = 0,
- radiob_txt = 1,
- radioc_txt = 2,
- radiod_txt = 3
-};
-
-static inline void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
- unsigned long *flag)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->rtlhal.interfaceindex == 1)
- spin_lock_irqsave(&rtlpriv->locks.cck_and_rw_pagea_lock, *flag);
-}
-
-static inline void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
- unsigned long *flag)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->rtlhal.interfaceindex == 1)
- spin_unlock_irqrestore(&rtlpriv->locks.cck_and_rw_pagea_lock,
- *flag);
-}
-
u32 rtl92d_phy_query_bb_reg(struct ieee80211_hw *hw,
u32 regaddr, u32 bitmask);
void rtl92d_phy_set_bb_reg(struct ieee80211_hw *hw,
u32 regaddr, u32 bitmask, u32 data);
-u32 rtl92d_phy_query_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask);
-void rtl92d_phy_set_rf_reg(struct ieee80211_hw *hw,
- enum radio_path rfpath, u32 regaddr,
- u32 bitmask, u32 data);
bool rtl92d_phy_mac_config(struct ieee80211_hw *hw);
bool rtl92d_phy_bb_config(struct ieee80211_hw *hw);
bool rtl92d_phy_rf_config(struct ieee80211_hw *hw);
bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
enum radio_path rfpath);
-void rtl92d_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
-void rtl92d_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
void rtl92d_phy_set_bw_mode(struct ieee80211_hw *hw,
enum nl80211_channel_type ch_type);
u8 rtl92d_phy_sw_chnl(struct ieee80211_hw *hw);
bool rtl92d_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
enum rf_content content,
enum radio_path rfpath);
-bool rtl92d_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
bool rtl92d_phy_set_rf_power_state(struct ieee80211_hw *hw,
enum rf_pwrstate rfpwr_state);
-void rtl92d_phy_config_macphymode(struct ieee80211_hw *hw);
-void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw);
-u8 rtl92d_get_chnlgroup_fromarray(u8 chnl);
void rtl92d_phy_set_poweron(struct ieee80211_hw *hw);
-void rtl92d_phy_config_maccoexist_rfpage(struct ieee80211_hw *hw);
bool rtl92d_phy_check_poweroff(struct ieee80211_hw *hw);
-void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw);
+void rtl92d_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
void rtl92d_update_bbrf_configuration(struct ieee80211_hw *hw);
void rtl92d_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
void rtl92d_phy_iq_calibrate(struct ieee80211_hw *hw);
-void rtl92d_phy_reset_iqk_result(struct ieee80211_hw *hw);
-void rtl92d_release_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
- unsigned long *flag);
-void rtl92d_acquire_cckandrw_pagea_ctl(struct ieee80211_hw *hw,
- unsigned long *flag);
-u8 rtl92d_get_rightchnlplace_for_iqk(u8 chnl);
void rtl92d_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
index 83787fd293de..eb7d8b070cc7 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.c
@@ -2,383 +2,14 @@
/* Copyright(c) 2009-2012 Realtek Corporation.*/
#include "../wifi.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/phy_common.h"
#include "phy.h"
#include "rf.h"
#include "dm.h"
#include "hw.h"
-void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u8 rfpath;
-
- switch (bandwidth) {
- case HT_CHANNEL_WIDTH_20:
- for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
- rtlphy->rfreg_chnlval[rfpath] = ((rtlphy->rfreg_chnlval
- [rfpath] & 0xfffff3ff) | 0x0400);
- rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) |
- BIT(11), 0x01);
-
- rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
- "20M RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[rfpath]);
- }
-
- break;
- case HT_CHANNEL_WIDTH_20_40:
- for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
- rtlphy->rfreg_chnlval[rfpath] =
- ((rtlphy->rfreg_chnlval[rfpath] & 0xfffff3ff));
- rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(10) | BIT(11),
- 0x00);
- rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
- "40M RF 0x18 = 0x%x\n",
- rtlphy->rfreg_chnlval[rfpath]);
- }
- break;
- default:
- pr_err("unknown bandwidth: %#X\n", bandwidth);
- break;
- }
-}
-
-void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u32 tx_agc[2] = {0, 0}, tmpval;
- bool turbo_scanoff = false;
- u8 idx1, idx2;
- u8 *ptr;
-
- if (rtlefuse->eeprom_regulatory != 0)
- turbo_scanoff = true;
- if (mac->act_scanning) {
- tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
- tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
- if (turbo_scanoff) {
- for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
- tx_agc[idx1] = ppowerlevel[idx1] |
- (ppowerlevel[idx1] << 8) |
- (ppowerlevel[idx1] << 16) |
- (ppowerlevel[idx1] << 24);
- }
- }
- } else {
- for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
- tx_agc[idx1] = ppowerlevel[idx1] |
- (ppowerlevel[idx1] << 8) |
- (ppowerlevel[idx1] << 16) |
- (ppowerlevel[idx1] << 24);
- }
- if (rtlefuse->eeprom_regulatory == 0) {
- tmpval = (rtlphy->mcs_offset[0][6]) +
- (rtlphy->mcs_offset[0][7] << 8);
- tx_agc[RF90_PATH_A] += tmpval;
- tmpval = (rtlphy->mcs_offset[0][14]) +
- (rtlphy->mcs_offset[0][15] << 24);
- tx_agc[RF90_PATH_B] += tmpval;
- }
- }
-
- for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
- ptr = (u8 *) (&(tx_agc[idx1]));
- for (idx2 = 0; idx2 < 4; idx2++) {
- if (*ptr > RF6052_MAX_TX_PWR)
- *ptr = RF6052_MAX_TX_PWR;
- ptr++;
- }
- }
-
- tmpval = tx_agc[RF90_PATH_A] & 0xff;
- rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n",
- tmpval, RTXAGC_A_CCK1_MCS32);
- tmpval = tx_agc[RF90_PATH_A] >> 8;
- rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n",
- tmpval, RTXAGC_B_CCK11_A_CCK2_11);
- tmpval = tx_agc[RF90_PATH_B] >> 24;
- rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n",
- tmpval, RTXAGC_B_CCK11_A_CCK2_11);
- tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
- rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n",
- tmpval, RTXAGC_B_CCK1_55_MCS32);
-}
-
-static void _rtl92d_phy_get_power_base(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel,
- u32 *ofdmbase, u32 *mcsbase)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u32 powerbase0, powerbase1;
- u8 legacy_pwrdiff, ht20_pwrdiff;
- u8 i, powerlevel[2];
-
- for (i = 0; i < 2; i++) {
- powerlevel[i] = ppowerlevel[i];
- legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
- powerbase0 = powerlevel[i] + legacy_pwrdiff;
- powerbase0 = (powerbase0 << 24) | (powerbase0 << 16) |
- (powerbase0 << 8) | powerbase0;
- *(ofdmbase + i) = powerbase0;
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- " [OFDM power base index rf(%c) = 0x%x]\n",
- i == 0 ? 'A' : 'B', *(ofdmbase + i));
- }
-
- for (i = 0; i < 2; i++) {
- if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
- ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
- powerlevel[i] += ht20_pwrdiff;
- }
- powerbase1 = powerlevel[i];
- powerbase1 = (powerbase1 << 24) | (powerbase1 << 16) |
- (powerbase1 << 8) | powerbase1;
- *(mcsbase + i) = powerbase1;
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- " [MCS power base index rf(%c) = 0x%x]\n",
- i == 0 ? 'A' : 'B', *(mcsbase + i));
- }
-}
-
-static u8 _rtl92d_phy_get_chnlgroup_bypg(u8 chnlindex)
-{
- u8 group;
- u8 channel_info[59] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
- 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58,
- 60, 62, 64, 100, 102, 104, 106, 108, 110, 112,
- 114, 116, 118, 120, 122, 124, 126, 128, 130, 132,
- 134, 136, 138, 140, 149, 151, 153, 155, 157, 159,
- 161, 163, 165
- };
-
- if (channel_info[chnlindex] <= 3) /* Chanel 1-3 */
- group = 0;
- else if (channel_info[chnlindex] <= 9) /* Channel 4-9 */
- group = 1;
- else if (channel_info[chnlindex] <= 14) /* Channel 10-14 */
- group = 2;
- else if (channel_info[chnlindex] <= 64)
- group = 6;
- else if (channel_info[chnlindex] <= 140)
- group = 7;
- else
- group = 8;
- return group;
-}
-
-static void _rtl92d_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
- u8 channel, u8 index,
- u32 *powerbase0,
- u32 *powerbase1,
- u32 *p_outwriteval)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 i, chnlgroup = 0, pwr_diff_limit[4];
- u32 writeval = 0, customer_limit, rf;
-
- for (rf = 0; rf < 2; rf++) {
- switch (rtlefuse->eeprom_regulatory) {
- case 0:
- chnlgroup = 0;
- writeval = rtlphy->mcs_offset
- [chnlgroup][index +
- (rf ? 8 : 0)] + ((index < 2) ?
- powerbase0[rf] :
- powerbase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "RTK better performance, writeval(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeval);
- break;
- case 1:
- if (rtlphy->pwrgroup_cnt == 1)
- chnlgroup = 0;
- if (rtlphy->pwrgroup_cnt >= MAX_PG_GROUP) {
- chnlgroup = _rtl92d_phy_get_chnlgroup_bypg(
- channel - 1);
- if (rtlphy->current_chan_bw ==
- HT_CHANNEL_WIDTH_20)
- chnlgroup++;
- else
- chnlgroup += 4;
- writeval = rtlphy->mcs_offset
- [chnlgroup][index +
- (rf ? 8 : 0)] + ((index < 2) ?
- powerbase0[rf] :
- powerbase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Realtek regulatory, 20MHz, writeval(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeval);
- }
- break;
- case 2:
- writeval = ((index < 2) ? powerbase0[rf] :
- powerbase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Better regulatory, writeval(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeval);
- break;
- case 3:
- chnlgroup = 0;
- if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "customer's limit, 40MHz rf(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B',
- rtlefuse->pwrgroup_ht40[rf]
- [channel - 1]);
- } else {
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "customer's limit, 20MHz rf(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B',
- rtlefuse->pwrgroup_ht20[rf]
- [channel - 1]);
- }
- for (i = 0; i < 4; i++) {
- pwr_diff_limit[i] = (u8)((rtlphy->mcs_offset
- [chnlgroup][index + (rf ? 8 : 0)] &
- (0x7f << (i * 8))) >> (i * 8));
- if (rtlphy->current_chan_bw ==
- HT_CHANNEL_WIDTH_20_40) {
- if (pwr_diff_limit[i] >
- rtlefuse->pwrgroup_ht40[rf]
- [channel - 1])
- pwr_diff_limit[i] =
- rtlefuse->pwrgroup_ht40
- [rf][channel - 1];
- } else {
- if (pwr_diff_limit[i] >
- rtlefuse->pwrgroup_ht20[rf][
- channel - 1])
- pwr_diff_limit[i] =
- rtlefuse->pwrgroup_ht20[rf]
- [channel - 1];
- }
- }
- customer_limit = (pwr_diff_limit[3] << 24) |
- (pwr_diff_limit[2] << 16) |
- (pwr_diff_limit[1] << 8) |
- (pwr_diff_limit[0]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Customer's limit rf(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', customer_limit);
- writeval = customer_limit + ((index < 2) ?
- powerbase0[rf] : powerbase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Customer, writeval rf(%c)= 0x%x\n",
- rf == 0 ? 'A' : 'B', writeval);
- break;
- default:
- chnlgroup = 0;
- writeval = rtlphy->mcs_offset[chnlgroup][index +
- (rf ? 8 : 0)] + ((index < 2) ?
- powerbase0[rf] : powerbase1[rf]);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "RTK better performance, writeval rf(%c) = 0x%x\n",
- rf == 0 ? 'A' : 'B', writeval);
- break;
- }
- *(p_outwriteval + rf) = writeval;
- }
-}
-
-static void _rtl92d_write_ofdm_power_reg(struct ieee80211_hw *hw,
- u8 index, u32 *pvalue)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- static u16 regoffset_a[6] = {
- RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
- RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
- RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
- };
- static u16 regoffset_b[6] = {
- RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
- RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
- RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
- };
- u8 i, rf, pwr_val[4];
- u32 writeval;
- u16 regoffset;
-
- for (rf = 0; rf < 2; rf++) {
- writeval = pvalue[rf];
- for (i = 0; i < 4; i++) {
- pwr_val[i] = (u8) ((writeval & (0x7f <<
- (i * 8))) >> (i * 8));
- if (pwr_val[i] > RF6052_MAX_TX_PWR)
- pwr_val[i] = RF6052_MAX_TX_PWR;
- }
- writeval = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
- (pwr_val[1] << 8) | pwr_val[0];
- if (rf == 0)
- regoffset = regoffset_a[index];
- else
- regoffset = regoffset_b[index];
- rtl_set_bbreg(hw, regoffset, MASKDWORD, writeval);
- RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
- "Set 0x%x = %08x\n", regoffset, writeval);
- if (((get_rf_type(rtlphy) == RF_2T2R) &&
- (regoffset == RTXAGC_A_MCS15_MCS12 ||
- regoffset == RTXAGC_B_MCS15_MCS12)) ||
- ((get_rf_type(rtlphy) != RF_2T2R) &&
- (regoffset == RTXAGC_A_MCS07_MCS04 ||
- regoffset == RTXAGC_B_MCS07_MCS04))) {
- writeval = pwr_val[3];
- if (regoffset == RTXAGC_A_MCS15_MCS12 ||
- regoffset == RTXAGC_A_MCS07_MCS04)
- regoffset = 0xc90;
- if (regoffset == RTXAGC_B_MCS15_MCS12 ||
- regoffset == RTXAGC_B_MCS07_MCS04)
- regoffset = 0xc98;
- for (i = 0; i < 3; i++) {
- if (i != 2)
- writeval = (writeval > 8) ?
- (writeval - 8) : 0;
- else
- writeval = (writeval > 6) ?
- (writeval - 6) : 0;
- rtl_write_byte(rtlpriv, (u32) (regoffset + i),
- (u8) writeval);
- }
- }
- }
-}
-
-void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel)
-{
- u32 writeval[2], powerbase0[2], powerbase1[2];
- u8 index;
-
- _rtl92d_phy_get_power_base(hw, ppowerlevel, channel,
- &powerbase0[0], &powerbase1[0]);
- for (index = 0; index < 6; index++) {
- _rtl92d_get_txpower_writeval_by_regulatory(hw,
- channel, index, &powerbase0[0],
- &powerbase1[0], &writeval[0]);
- _rtl92d_write_ofdm_power_reg(hw, index, &writeval[0]);
- }
-}
-
bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
index 4e646cc9ebc0..c097d90cc99c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/rf.h
@@ -4,11 +4,6 @@
#ifndef __RTL92D_RF_H__
#define __RTL92D_RF_H__
-void rtl92d_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth);
-void rtl92d_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel);
-void rtl92d_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
- u8 *ppowerlevel, u8 channel);
bool rtl92d_phy_rf6052_config(struct ieee80211_hw *hw);
bool rtl92d_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
void rtl92d_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
index afd685ed460a..e36e4aeb9a95 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
@@ -5,8 +5,12 @@
#include "../core.h"
#include "../pci.h"
#include "../base.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/dm_common.h"
+#include "../rtl8192d/hw_common.h"
+#include "../rtl8192d/phy_common.h"
+#include "../rtl8192d/trx_common.h"
#include "phy.h"
#include "dm.h"
#include "hw.h"
@@ -28,7 +32,7 @@ static void rtl92d_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
* */
rtlpci->const_pci_aspm = 3;
@@ -183,7 +187,7 @@ static void rtl92d_deinit_sw_vars(struct ieee80211_hw *hw)
static struct rtl_hal_ops rtl8192de_hal_ops = {
.init_sw_vars = rtl92d_init_sw_vars,
.deinit_sw_vars = rtl92d_deinit_sw_vars,
- .read_eeprom_info = rtl92de_read_eeprom_info,
+ .read_eeprom_info = rtl92d_read_eeprom_info,
.interrupt_recognized = rtl92de_interrupt_recognized,
.hw_init = rtl92de_hw_init,
.hw_disable = rtl92de_card_disable,
@@ -193,36 +197,38 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
.disable_interrupt = rtl92de_disable_interrupt,
.set_network_type = rtl92de_set_network_type,
.set_chk_bssid = rtl92de_set_check_bssid,
- .set_qos = rtl92de_set_qos,
+ .set_qos = rtl92d_set_qos,
.set_bcn_reg = rtl92de_set_beacon_related_registers,
.set_bcn_intv = rtl92de_set_beacon_interval,
.update_interrupt_mask = rtl92de_update_interrupt_mask,
.get_hw_reg = rtl92de_get_hw_reg,
.set_hw_reg = rtl92de_set_hw_reg,
- .update_rate_tbl = rtl92de_update_hal_rate_tbl,
+ .update_rate_tbl = rtl92d_update_hal_rate_tbl,
.fill_tx_desc = rtl92de_tx_fill_desc,
.fill_tx_cmddesc = rtl92de_tx_fill_cmddesc,
- .query_rx_desc = rtl92de_rx_query_desc,
- .set_channel_access = rtl92de_update_channel_access_setting,
- .radio_onoff_checking = rtl92de_gpio_radio_on_off_checking,
+ .query_rx_desc = rtl92d_rx_query_desc,
+ .set_channel_access = rtl92d_update_channel_access_setting,
+ .radio_onoff_checking = rtl92d_gpio_radio_on_off_checking,
.set_bw_mode = rtl92d_phy_set_bw_mode,
.switch_channel = rtl92d_phy_sw_chnl,
- .dm_watchdog = rtl92d_dm_watchdog,
+ .dm_watchdog = rtl92de_dm_watchdog,
.scan_operation_backup = rtl_phy_scan_operation_backup,
.set_rf_power_state = rtl92d_phy_set_rf_power_state,
.led_control = rtl92de_led_control,
- .set_desc = rtl92de_set_desc,
- .get_desc = rtl92de_get_desc,
+ .set_desc = rtl92d_set_desc,
+ .get_desc = rtl92d_get_desc,
.is_tx_desc_closed = rtl92de_is_tx_desc_closed,
.tx_polling = rtl92de_tx_polling,
- .enable_hw_sec = rtl92de_enable_hw_security_config,
- .set_key = rtl92de_set_key,
+ .enable_hw_sec = rtl92d_enable_hw_security_config,
+ .set_key = rtl92d_set_key,
.get_bbreg = rtl92d_phy_query_bb_reg,
.set_bbreg = rtl92d_phy_set_bb_reg,
.get_rfreg = rtl92d_phy_query_rf_reg,
.set_rfreg = rtl92d_phy_set_rf_reg,
.linked_set_reg = rtl92d_linked_set_reg,
.get_btc_status = rtl_btc_status_false,
+ .phy_iq_calibrate = rtl92d_phy_iq_calibrate,
+ .phy_lc_calibrate = rtl92d_phy_lc_calibrate,
};
static struct rtl_mod_params rtl92de_mod_params = {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
index 192982ec8152..91bf399c9ef1 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c
@@ -5,8 +5,10 @@
#include "../pci.h"
#include "../base.h"
#include "../stats.h"
-#include "reg.h"
-#include "def.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/phy_common.h"
+#include "../rtl8192d/trx_common.h"
#include "phy.h"
#include "trx.h"
#include "led.h"
@@ -23,434 +25,6 @@ static u8 _rtl92de_map_hwqueue_to_fwqueue(struct sk_buff *skb, u8 hw_queue)
return skb->priority;
}
-static long _rtl92de_translate_todbm(struct ieee80211_hw *hw,
- u8 signal_strength_index)
-{
- long signal_power;
-
- signal_power = (long)((signal_strength_index + 1) >> 1);
- signal_power -= 95;
- return signal_power;
-}
-
-static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw,
- struct rtl_stats *pstats,
- struct rx_desc_92d *pdesc,
- struct rx_fwinfo_92d *p_drvinfo,
- bool packet_match_bssid,
- bool packet_toself,
- bool packet_beacon)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
- struct phy_sts_cck_8192d *cck_buf;
- s8 rx_pwr_all, rx_pwr[4];
- u8 rf_rx_num = 0, evm, pwdb_all;
- u8 i, max_spatial_stream;
- u32 rssi, total_rssi = 0;
- bool is_cck_rate;
-
- is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc->rxmcs);
- pstats->packet_matchbssid = packet_match_bssid;
- pstats->packet_toself = packet_toself;
- pstats->packet_beacon = packet_beacon;
- pstats->is_cck = is_cck_rate;
- pstats->rx_mimo_sig_qual[0] = -1;
- pstats->rx_mimo_sig_qual[1] = -1;
-
- if (is_cck_rate) {
- u8 report, cck_highpwr;
- cck_buf = (struct phy_sts_cck_8192d *)p_drvinfo;
- if (ppsc->rfpwr_state == ERFON)
- cck_highpwr = rtlphy->cck_high_power;
- else
- cck_highpwr = false;
- if (!cck_highpwr) {
- u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
- report = cck_buf->cck_agc_rpt & 0xc0;
- report = report >> 6;
- switch (report) {
- case 0x3:
- rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
- break;
- case 0x2:
- rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
- break;
- case 0x1:
- rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
- break;
- case 0x0:
- rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
- break;
- }
- } else {
- u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
- report = p_drvinfo->cfosho[0] & 0x60;
- report = report >> 5;
- switch (report) {
- case 0x3:
- rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
- break;
- case 0x2:
- rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
- break;
- case 0x1:
- rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
- break;
- case 0x0:
- rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
- break;
- }
- }
- pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
- /* CCK gain is smaller than OFDM/MCS gain, */
- /* so we add gain diff by experiences, the val is 6 */
- pwdb_all += 6;
- if (pwdb_all > 100)
- pwdb_all = 100;
- /* modify the offset to make the same gain index with OFDM. */
- if (pwdb_all > 34 && pwdb_all <= 42)
- pwdb_all -= 2;
- else if (pwdb_all > 26 && pwdb_all <= 34)
- pwdb_all -= 6;
- else if (pwdb_all > 14 && pwdb_all <= 26)
- pwdb_all -= 8;
- else if (pwdb_all > 4 && pwdb_all <= 14)
- pwdb_all -= 4;
- pstats->rx_pwdb_all = pwdb_all;
- pstats->recvsignalpower = rx_pwr_all;
- if (packet_match_bssid) {
- u8 sq;
- if (pstats->rx_pwdb_all > 40) {
- sq = 100;
- } else {
- sq = cck_buf->sq_rpt;
- if (sq > 64)
- sq = 0;
- else if (sq < 20)
- sq = 100;
- else
- sq = ((64 - sq) * 100) / 44;
- }
- pstats->signalquality = sq;
- pstats->rx_mimo_sig_qual[0] = sq;
- pstats->rx_mimo_sig_qual[1] = -1;
- }
- } else {
- rtlpriv->dm.rfpath_rxenable[0] = true;
- rtlpriv->dm.rfpath_rxenable[1] = true;
- for (i = RF90_PATH_A; i < RF6052_MAX_PATH; i++) {
- if (rtlpriv->dm.rfpath_rxenable[i])
- rf_rx_num++;
- rx_pwr[i] = ((p_drvinfo->gain_trsw[i] & 0x3f) * 2)
- - 110;
- rssi = rtl_query_rxpwrpercentage(rx_pwr[i]);
- total_rssi += rssi;
- rtlpriv->stats.rx_snr_db[i] =
- (long)(p_drvinfo->rxsnr[i] / 2);
- if (packet_match_bssid)
- pstats->rx_mimo_signalstrength[i] = (u8) rssi;
- }
- rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 106;
- pwdb_all = rtl_query_rxpwrpercentage(rx_pwr_all);
- pstats->rx_pwdb_all = pwdb_all;
- pstats->rxpower = rx_pwr_all;
- pstats->recvsignalpower = rx_pwr_all;
- if (pdesc->rxht && pdesc->rxmcs >= DESC_RATEMCS8 &&
- pdesc->rxmcs <= DESC_RATEMCS15)
- max_spatial_stream = 2;
- else
- max_spatial_stream = 1;
- for (i = 0; i < max_spatial_stream; i++) {
- evm = rtl_evm_db_to_percentage(p_drvinfo->rxevm[i]);
- if (packet_match_bssid) {
- if (i == 0)
- pstats->signalquality =
- (u8)(evm & 0xff);
- pstats->rx_mimo_sig_qual[i] =
- (u8)(evm & 0xff);
- }
- }
- }
- if (is_cck_rate)
- pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
- pwdb_all));
- else if (rf_rx_num != 0)
- pstats->signalstrength = (u8)(rtl_signal_scale_mapping(hw,
- total_rssi /= rf_rx_num));
-}
-
-static void rtl92d_loop_over_paths(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_phy *rtlphy = &(rtlpriv->phy);
- u8 rfpath;
-
- for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
- rfpath++) {
- if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- pstats->rx_mimo_signalstrength[rfpath];
-
- }
- if (pstats->rx_mimo_signalstrength[rfpath] >
- rtlpriv->stats.rx_rssi_percentage[rfpath]) {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- rtlpriv->stats.rx_rssi_percentage[rfpath] + 1;
- } else {
- rtlpriv->stats.rx_rssi_percentage[rfpath] =
- ((rtlpriv->stats.rx_rssi_percentage[rfpath] *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_signalstrength[rfpath])) /
- (RX_SMOOTH_FACTOR);
- }
- }
-}
-
-static void _rtl92de_process_ui_rssi(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 last_rssi, tmpval;
-
- if (pstats->packet_toself || pstats->packet_beacon) {
- rtlpriv->stats.rssi_calculate_cnt++;
- if (rtlpriv->stats.ui_rssi.total_num++ >=
- PHY_RSSI_SLID_WIN_MAX) {
- rtlpriv->stats.ui_rssi.total_num =
- PHY_RSSI_SLID_WIN_MAX;
- last_rssi = rtlpriv->stats.ui_rssi.elements[
- rtlpriv->stats.ui_rssi.index];
- rtlpriv->stats.ui_rssi.total_val -= last_rssi;
- }
- rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
- rtlpriv->stats.ui_rssi.elements
- [rtlpriv->stats.ui_rssi.index++] =
- pstats->signalstrength;
- if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
- rtlpriv->stats.ui_rssi.index = 0;
- tmpval = rtlpriv->stats.ui_rssi.total_val /
- rtlpriv->stats.ui_rssi.total_num;
- rtlpriv->stats.signal_strength = _rtl92de_translate_todbm(hw,
- (u8) tmpval);
- pstats->rssi = rtlpriv->stats.signal_strength;
- }
- if (!pstats->is_cck && pstats->packet_toself)
- rtl92d_loop_over_paths(hw, pstats);
-}
-
-static void _rtl92de_update_rxsignalstatistics(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int weighting = 0;
-
- if (rtlpriv->stats.recv_signal_power == 0)
- rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
- if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
- weighting = 5;
- else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
- weighting = (-5);
- rtlpriv->stats.recv_signal_power = (rtlpriv->stats.recv_signal_power *
- 5 + pstats->recvsignalpower + weighting) / 6;
-}
-
-static void _rtl92de_process_pwdb(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- long undec_sm_pwdb;
-
- if (mac->opmode == NL80211_IFTYPE_ADHOC ||
- mac->opmode == NL80211_IFTYPE_AP)
- return;
- else
- undec_sm_pwdb = rtlpriv->dm.undec_sm_pwdb;
-
- if (pstats->packet_toself || pstats->packet_beacon) {
- if (undec_sm_pwdb < 0)
- undec_sm_pwdb = pstats->rx_pwdb_all;
- if (pstats->rx_pwdb_all > (u32) undec_sm_pwdb) {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- undec_sm_pwdb = undec_sm_pwdb + 1;
- } else {
- undec_sm_pwdb = (((undec_sm_pwdb) *
- (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
- }
- rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb;
- _rtl92de_update_rxsignalstatistics(hw, pstats);
- }
-}
-
-static void rtl92d_loop_over_streams(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- int stream;
-
- for (stream = 0; stream < 2; stream++) {
- if (pstats->rx_mimo_sig_qual[stream] != -1) {
- if (rtlpriv->stats.rx_evm_percentage[stream] == 0) {
- rtlpriv->stats.rx_evm_percentage[stream] =
- pstats->rx_mimo_sig_qual[stream];
- }
- rtlpriv->stats.rx_evm_percentage[stream] =
- ((rtlpriv->stats.rx_evm_percentage[stream]
- * (RX_SMOOTH_FACTOR - 1)) +
- (pstats->rx_mimo_sig_qual[stream] * 1)) /
- (RX_SMOOTH_FACTOR);
- }
- }
-}
-
-static void _rtl92de_process_ui_link_quality(struct ieee80211_hw *hw,
- struct rtl_stats *pstats)
-{
- struct rtl_priv *rtlpriv = rtl_priv(hw);
- u32 last_evm, tmpval;
-
- if (pstats->signalquality == 0)
- return;
- if (pstats->packet_toself || pstats->packet_beacon) {
- if (rtlpriv->stats.ui_link_quality.total_num++ >=
- PHY_LINKQUALITY_SLID_WIN_MAX) {
- rtlpriv->stats.ui_link_quality.total_num =
- PHY_LINKQUALITY_SLID_WIN_MAX;
- last_evm = rtlpriv->stats.ui_link_quality.elements[
- rtlpriv->stats.ui_link_quality.index];
- rtlpriv->stats.ui_link_quality.total_val -= last_evm;
- }
- rtlpriv->stats.ui_link_quality.total_val +=
- pstats->signalquality;
- rtlpriv->stats.ui_link_quality.elements[
- rtlpriv->stats.ui_link_quality.index++] =
- pstats->signalquality;
- if (rtlpriv->stats.ui_link_quality.index >=
- PHY_LINKQUALITY_SLID_WIN_MAX)
- rtlpriv->stats.ui_link_quality.index = 0;
- tmpval = rtlpriv->stats.ui_link_quality.total_val /
- rtlpriv->stats.ui_link_quality.total_num;
- rtlpriv->stats.signal_quality = tmpval;
- rtlpriv->stats.last_sigstrength_inpercent = tmpval;
- rtl92d_loop_over_streams(hw, pstats);
- }
-}
-
-static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw,
- u8 *buffer,
- struct rtl_stats *pcurrent_stats)
-{
-
- if (!pcurrent_stats->packet_matchbssid &&
- !pcurrent_stats->packet_beacon)
- return;
-
- _rtl92de_process_ui_rssi(hw, pcurrent_stats);
- _rtl92de_process_pwdb(hw, pcurrent_stats);
- _rtl92de_process_ui_link_quality(hw, pcurrent_stats);
-}
-
-static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw,
- struct sk_buff *skb,
- struct rtl_stats *pstats,
- struct rx_desc_92d *pdesc,
- struct rx_fwinfo_92d *p_drvinfo)
-{
- struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
- struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- struct ieee80211_hdr *hdr;
- u8 *tmp_buf;
- u8 *praddr;
- u16 type, cfc;
- __le16 fc;
- bool packet_matchbssid, packet_toself, packet_beacon = false;
-
- tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
- hdr = (struct ieee80211_hdr *)tmp_buf;
- fc = hdr->frame_control;
- cfc = le16_to_cpu(fc);
- type = WLAN_FC_GET_TYPE(fc);
- praddr = hdr->addr1;
- packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
- ether_addr_equal(mac->bssid,
- (cfc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
- (cfc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
- hdr->addr3) &&
- (!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
- packet_toself = packet_matchbssid &&
- ether_addr_equal(praddr, rtlefuse->dev_addr);
- if (ieee80211_is_beacon(fc))
- packet_beacon = true;
- _rtl92de_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
- packet_matchbssid, packet_toself,
- packet_beacon);
- _rtl92de_process_phyinfo(hw, tmp_buf, pstats);
-}
-
-bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
- struct ieee80211_rx_status *rx_status,
- u8 *pdesc8, struct sk_buff *skb)
-{
- __le32 *pdesc = (__le32 *)pdesc8;
- struct rx_fwinfo_92d *p_drvinfo;
- u32 phystatus = get_rx_desc_physt(pdesc);
-
- stats->length = (u16)get_rx_desc_pkt_len(pdesc);
- stats->rx_drvinfo_size = (u8)get_rx_desc_drv_info_size(pdesc) *
- RX_DRV_INFO_SIZE_UNIT;
- stats->rx_bufshift = (u8)(get_rx_desc_shift(pdesc) & 0x03);
- stats->icv = (u16)get_rx_desc_icv(pdesc);
- stats->crc = (u16)get_rx_desc_crc32(pdesc);
- stats->hwerror = (stats->crc | stats->icv);
- stats->decrypted = !get_rx_desc_swdec(pdesc);
- stats->rate = (u8)get_rx_desc_rxmcs(pdesc);
- stats->shortpreamble = (u16)get_rx_desc_splcp(pdesc);
- stats->isampdu = (bool)(get_rx_desc_paggr(pdesc) == 1);
- stats->isfirst_ampdu = (bool)((get_rx_desc_paggr(pdesc) == 1) &&
- (get_rx_desc_faggr(pdesc) == 1));
- stats->timestamp_low = get_rx_desc_tsfl(pdesc);
- stats->rx_is40mhzpacket = (bool)get_rx_desc_bw(pdesc);
- stats->is_ht = (bool)get_rx_desc_rxht(pdesc);
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->band = hw->conf.chandef.chan->band;
- if (get_rx_desc_crc32(pdesc))
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (!get_rx_desc_swdec(pdesc))
- rx_status->flag |= RX_FLAG_DECRYPTED;
- if (get_rx_desc_bw(pdesc))
- rx_status->bw = RATE_INFO_BW_40;
- if (get_rx_desc_rxht(pdesc))
- rx_status->encoding = RX_ENC_HT;
- rx_status->flag |= RX_FLAG_MACTIME_START;
- if (stats->decrypted)
- rx_status->flag |= RX_FLAG_DECRYPTED;
- rx_status->rate_idx = rtlwifi_rate_mapping(hw, stats->is_ht,
- false, stats->rate);
- rx_status->mactime = get_rx_desc_tsfl(pdesc);
- if (phystatus) {
- p_drvinfo = (struct rx_fwinfo_92d *)(skb->data +
- stats->rx_bufshift);
- _rtl92de_translate_rx_signal_stuff(hw,
- skb, stats,
- (struct rx_desc_92d *)pdesc,
- p_drvinfo);
- }
- /*rx_status->qual = stats->signal; */
- rx_status->signal = stats->recvsignalpower + 10;
- return true;
-}
-
static void _rtl92de_insert_emcontent(struct rtl_tcb_desc *ptcb_desc,
u8 *virtualaddress8)
{
@@ -712,94 +286,13 @@ void rtl92de_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc8,
set_tx_desc_own(pdesc, 1);
}
-void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx,
- u8 desc_name, u8 *val)
-{
- __le32 *pdesc = (__le32 *)pdesc8;
-
- if (istx) {
- switch (desc_name) {
- case HW_DESC_OWN:
- wmb();
- set_tx_desc_own(pdesc, 1);
- break;
- case HW_DESC_TX_NEXTDESC_ADDR:
- set_tx_desc_next_desc_address(pdesc, *(u32 *)val);
- break;
- default:
- WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
- desc_name);
- break;
- }
- } else {
- switch (desc_name) {
- case HW_DESC_RXOWN:
- wmb();
- set_rx_desc_own(pdesc, 1);
- break;
- case HW_DESC_RXBUFF_ADDR:
- set_rx_desc_buff_addr(pdesc, *(u32 *)val);
- break;
- case HW_DESC_RXPKT_LEN:
- set_rx_desc_pkt_len(pdesc, *(u32 *)val);
- break;
- case HW_DESC_RXERO:
- set_rx_desc_eor(pdesc, 1);
- break;
- default:
- WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
- desc_name);
- break;
- }
- }
-}
-
-u64 rtl92de_get_desc(struct ieee80211_hw *hw,
- u8 *p_desc8, bool istx, u8 desc_name)
-{
- __le32 *p_desc = (__le32 *)p_desc8;
- u32 ret = 0;
-
- if (istx) {
- switch (desc_name) {
- case HW_DESC_OWN:
- ret = get_tx_desc_own(p_desc);
- break;
- case HW_DESC_TXBUFF_ADDR:
- ret = get_tx_desc_tx_buffer_address(p_desc);
- break;
- default:
- WARN_ONCE(true, "rtl8192de: ERR txdesc :%d not processed\n",
- desc_name);
- break;
- }
- } else {
- switch (desc_name) {
- case HW_DESC_OWN:
- ret = get_rx_desc_own(p_desc);
- break;
- case HW_DESC_RXPKT_LEN:
- ret = get_rx_desc_pkt_len(p_desc);
- break;
- case HW_DESC_RXBUFF_ADDR:
- ret = get_rx_desc_buff_addr(p_desc);
- break;
- default:
- WARN_ONCE(true, "rtl8192de: ERR rxdesc :%d not processed\n",
- desc_name);
- break;
- }
- }
- return ret;
-}
-
bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
u8 hw_queue, u16 index)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
u8 *entry = (u8 *)(&ring->desc[ring->idx]);
- u8 own = (u8)rtl92de_get_desc(hw, entry, true, HW_DESC_OWN);
+ u8 own = (u8)rtl92d_get_desc(hw, entry, true, HW_DESC_OWN);
/* a beacon packet will only use the first
* descriptor by defaut, and the own bit may not
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
index 2992668c156c..d3c480c75678 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.h
@@ -8,384 +8,17 @@
#define TX_DESC_AGGR_SUBFRAME_SIZE 32
#define RX_DESC_SIZE 32
-#define RX_DRV_INFO_SIZE_UNIT 8
#define TX_DESC_NEXT_DESC_OFFSET 40
#define USB_HWDESC_HEADER_LEN 32
#define CRCLENGTH 4
-/* macros to read/write various fields in RX or TX descriptors */
-
-static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, GENMASK(15, 0));
-}
-
-static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, GENMASK(23, 16));
-}
-
-static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(25));
-}
-
-static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(26));
-}
-
-static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(27));
-}
-
-static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(28));
-}
-
-static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(31));
-}
-
-static inline u32 get_tx_desc_own(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, BIT(31));
-}
-
-static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0));
-}
-
-static inline void set_tx_desc_agg_enable(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, BIT(5));
-}
-
-static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, BIT(7));
-}
-
-static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8));
-}
-
-static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16));
-}
-
-static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22));
-}
-
-static inline void set_tx_desc_pkt_offset(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 1), __val, GENMASK(30, 26));
-}
-
-static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 2), __val, BIT(17));
-}
-
-static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20));
-}
-
-static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16));
-}
-
-static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28));
-}
-
-static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0));
-}
-
-static inline void set_tx_desc_qos(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(6));
-}
-
-static inline void set_tx_desc_hwseq_en(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(7));
-}
-
-static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(8));
-}
-
-static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(10));
-}
-
-static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(11));
-}
-
-static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(12));
-}
-
-static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(13));
-}
-
-static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20));
-}
-
-static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(25));
-}
-
-static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(26));
-}
-
-static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, BIT(27));
-}
-
-static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28));
-}
-
-static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30));
-}
-
-static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0));
-}
-
-static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 5), __val, BIT(6));
-}
-
-static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8));
-}
-
-static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13));
-}
-
-static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11));
-}
-
-static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0));
-}
-
-static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val)
-{
- *(__pdesc + 8) = cpu_to_le32(__val);
-}
-
-static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc)
-{
- return le32_to_cpu(*(__pdesc + 8));
-}
-
-static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val)
-{
- *(__pdesc + 10) = cpu_to_le32(__val);
-}
-
-static inline u32 get_rx_desc_pkt_len(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, GENMASK(13, 0));
-}
-
-static inline u32 get_rx_desc_crc32(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, BIT(14));
-}
-
-static inline u32 get_rx_desc_icv(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, BIT(15));
-}
-
-static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, GENMASK(19, 16));
-}
-
-static inline u32 get_rx_desc_shift(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, GENMASK(25, 24));
-}
-
-static inline u32 get_rx_desc_physt(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, BIT(26));
-}
-
-static inline u32 get_rx_desc_swdec(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, BIT(27));
-}
-
-static inline u32 get_rx_desc_own(__le32 *__pdesc)
-{
- return le32_get_bits(*__pdesc, BIT(31));
-}
-
-static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, GENMASK(13, 0));
-}
-
-static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(30));
-}
-
-static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val)
-{
- le32p_replace_bits(__pdesc, __val, BIT(31));
-}
-
-static inline u32 get_rx_desc_paggr(__le32 *__pdesc)
-{
- return le32_get_bits(*(__pdesc + 1), BIT(14));
-}
-
-static inline u32 get_rx_desc_faggr(__le32 *__pdesc)
-{
- return le32_get_bits(*(__pdesc + 1), BIT(15));
-}
-
-static inline u32 get_rx_desc_rxmcs(__le32 *__pdesc)
-{
- return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0));
-}
-
-static inline u32 get_rx_desc_rxht(__le32 *__pdesc)
-{
- return le32_get_bits(*(__pdesc + 3), BIT(6));
-}
-
-static inline u32 get_rx_desc_splcp(__le32 *__pdesc)
-{
- return le32_get_bits(*(__pdesc + 3), BIT(8));
-}
-
-static inline u32 get_rx_desc_bw(__le32 *__pdesc)
-{
- return le32_get_bits(*(__pdesc + 3), BIT(9));
-}
-
-static inline u32 get_rx_desc_tsfl(__le32 *__pdesc)
-{
- return le32_to_cpu(*(__pdesc + 5));
-}
-
-static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc)
-{
- return le32_to_cpu(*(__pdesc + 6));
-}
-
-static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val)
-{
- *(__pdesc + 6) = cpu_to_le32(__val);
-}
-
static inline void clear_pci_tx_desc_content(__le32 *__pdesc, u32 _size)
{
memset((void *)__pdesc, 0,
min_t(size_t, _size, TX_DESC_NEXT_DESC_OFFSET));
}
-/* For 92D early mode */
-static inline void set_earlymode_pktnum(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits(__paddr, __value, GENMASK(2, 0));
-}
-
-static inline void set_earlymode_len0(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits(__paddr, __value, GENMASK(15, 4));
-}
-
-static inline void set_earlymode_len1(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits(__paddr, __value, GENMASK(27, 16));
-}
-
-static inline void set_earlymode_len2_1(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits(__paddr, __value, GENMASK(31, 28));
-}
-
-static inline void set_earlymode_len2_2(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits((__paddr + 1), __value, GENMASK(7, 0));
-}
-
-static inline void set_earlymode_len3(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits((__paddr + 1), __value, GENMASK(19, 8));
-}
-
-static inline void set_earlymode_len4(__le32 *__paddr, u32 __value)
-{
- le32p_replace_bits((__paddr + 1), __value, GENMASK(31, 20));
-}
-
-struct rx_fwinfo_92d {
- u8 gain_trsw[4];
- u8 pwdb_all;
- u8 cfosho[4];
- u8 cfotail[4];
- s8 rxevm[2];
- s8 rxsnr[4];
- u8 pdsnr[2];
- u8 csi_current[2];
- u8 csi_target[2];
- u8 sigevm;
- u8 max_ex_pwr;
- u8 ex_intf_flag:1;
- u8 sgi_en:1;
- u8 rxsc:2;
- u8 reserve:4;
-} __packed;
-
struct tx_desc_92d {
u32 pktsize:16;
u32 offset:8;
@@ -488,78 +121,12 @@ struct tx_desc_92d {
u32 reserve_pass_pcie_mm_limit[4];
} __packed;
-struct rx_desc_92d {
- u32 length:14;
- u32 crc32:1;
- u32 icverror:1;
- u32 drv_infosize:4;
- u32 security:3;
- u32 qos:1;
- u32 shift:2;
- u32 phystatus:1;
- u32 swdec:1;
- u32 lastseg:1;
- u32 firstseg:1;
- u32 eor:1;
- u32 own:1;
-
- u32 macid:5;
- u32 tid:4;
- u32 hwrsvd:5;
- u32 paggr:1;
- u32 faggr:1;
- u32 a1_fit:4;
- u32 a2_fit:4;
- u32 pam:1;
- u32 pwr:1;
- u32 moredata:1;
- u32 morefrag:1;
- u32 type:2;
- u32 mc:1;
- u32 bc:1;
-
- u32 seq:12;
- u32 frag:4;
- u32 nextpktlen:14;
- u32 nextind:1;
- u32 rsvd:1;
-
- u32 rxmcs:6;
- u32 rxht:1;
- u32 amsdu:1;
- u32 splcp:1;
- u32 bandwidth:1;
- u32 htc:1;
- u32 tcpchk_rpt:1;
- u32 ipcchk_rpt:1;
- u32 tcpchk_valid:1;
- u32 hwpcerr:1;
- u32 hwpcind:1;
- u32 iv0:16;
-
- u32 iv1;
-
- u32 tsfl;
-
- u32 bufferaddress;
- u32 bufferaddress64;
-
-} __packed;
-
void rtl92de_tx_fill_desc(struct ieee80211_hw *hw,
struct ieee80211_hdr *hdr, u8 *pdesc,
u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
struct ieee80211_sta *sta,
struct sk_buff *skb, u8 hw_queue,
struct rtl_tcb_desc *ptcb_desc);
-bool rtl92de_rx_query_desc(struct ieee80211_hw *hw,
- struct rtl_stats *stats,
- struct ieee80211_rx_status *rx_status,
- u8 *pdesc, struct sk_buff *skb);
-void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx,
- u8 desc_name, u8 *val);
-u64 rtl92de_get_desc(struct ieee80211_hw *hw,
- u8 *p_desc, bool istx, u8 desc_name);
bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw,
u8 hw_queue, u16 index);
void rtl92de_tx_polling(struct ieee80211_hw *hw, u8 hw_queue);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile
new file mode 100644
index 000000000000..569bfd3d5030
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+rtl8192du-objs := \
+ dm.o \
+ fw.o \
+ hw.o \
+ led.o \
+ phy.o \
+ rf.o \
+ sw.o \
+ table.o \
+ trx.o
+
+obj-$(CONFIG_RTL8192DU) += rtl8192du.o
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c
new file mode 100644
index 000000000000..dd57707a9184
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/dm_common.h"
+#include "../rtl8192d/fw_common.h"
+#include "dm.h"
+
+static void rtl92du_dm_init_1r_cca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
+
+ dm_pstable->pre_ccastate = CCA_MAX;
+ dm_pstable->cur_ccasate = CCA_MAX;
+}
+
+static void rtl92du_dm_1r_cca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct ps_t *dm_pstable = &rtlpriv->dm_pstable;
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ int pwdb = rtlpriv->dm_digtable.min_undec_pwdb_for_dm;
+
+ if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY ||
+ rtlhal->current_bandtype != BAND_ON_5G)
+ return;
+
+ if (pwdb != 0) {
+ if (dm_pstable->pre_ccastate == CCA_2R ||
+ dm_pstable->pre_ccastate == CCA_MAX)
+ dm_pstable->cur_ccasate = (pwdb >= 35) ? CCA_1R : CCA_2R;
+ else
+ dm_pstable->cur_ccasate = (pwdb <= 30) ? CCA_2R : CCA_1R;
+ } else {
+ dm_pstable->cur_ccasate = CCA_MAX;
+ }
+
+ if (dm_pstable->pre_ccastate == dm_pstable->cur_ccasate)
+ return;
+
+ rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_TRACE,
+ "Old CCA state: %d new CCA state: %d\n",
+ dm_pstable->pre_ccastate, dm_pstable->cur_ccasate);
+
+ if (dm_pstable->cur_ccasate == CCA_1R) {
+ if (rtlpriv->phy.rf_type == RF_2T2R)
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x13);
+ else /* Is this branch reachable? */
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
+ } else { /* CCA_2R or CCA_MAX */
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x33);
+ }
+}
+
+static void rtl92du_dm_pwdb_monitor(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ const u32 max_macid = 32;
+ u32 temp;
+
+ /* AP & ADHOC & MESH will return tmp */
+ if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION)
+ return;
+
+ /* Indicate Rx signal strength to FW. */
+ if (rtlpriv->dm.useramask) {
+ temp = rtlpriv->dm.undec_sm_pwdb << 16;
+ temp |= max_macid << 8;
+
+ rtl92d_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, (u8 *)(&temp));
+ } else {
+ rtl_write_byte(rtlpriv, 0x4fe, (u8)rtlpriv->dm.undec_sm_pwdb);
+ }
+}
+
+void rtl92du_dm_init(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
+ rtl_dm_diginit(hw, 0x20);
+ rtlpriv->dm_digtable.rx_gain_max = DM_DIG_FA_UPPER;
+ rtlpriv->dm_digtable.rx_gain_min = DM_DIG_FA_LOWER;
+ rtl92d_dm_init_edca_turbo(hw);
+ rtl92du_dm_init_1r_cca(hw);
+ rtl92d_dm_init_rate_adaptive_mask(hw);
+ rtl92d_dm_initialize_txpower_tracking(hw);
+}
+
+void rtl92du_dm_watchdog(struct ieee80211_hw *hw)
+{
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ bool fw_current_inpsmode = false;
+ bool fwps_awake = true;
+
+ /* 1. RF is OFF. (No need to do DM.)
+ * 2. Fw is under power saving mode for FwLPS.
+ * (Prevent from SW/FW I/O racing.)
+ * 3. IPS workitem is scheduled. (Prevent from IPS sequence
+ * to be swapped with DM.
+ * 4. RFChangeInProgress is TRUE.
+ * (Prevent from broken by IPS/HW/SW Rf off.)
+ */
+
+ if (ppsc->rfpwr_state != ERFON || fw_current_inpsmode ||
+ !fwps_awake || ppsc->rfchange_inprogress)
+ return;
+
+ rtl92du_dm_pwdb_monitor(hw);
+ rtl92d_dm_false_alarm_counter_statistics(hw);
+ rtl92d_dm_find_minimum_rssi(hw);
+ rtl92d_dm_dig(hw);
+ rtl92d_dm_check_txpower_tracking_thermal_meter(hw);
+ rtl92d_dm_check_edca_turbo(hw);
+ rtl92du_dm_1r_cca(hw);
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h
new file mode 100644
index 000000000000..2f283bf1e4d8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_DM_H__
+#define __RTL92DU_DM_H__
+
+void rtl92du_dm_init(struct ieee80211_hw *hw);
+void rtl92du_dm_watchdog(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c
new file mode 100644
index 000000000000..f74e4e84fe39
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/fw_common.h"
+#include "fw.h"
+
+int rtl92du_download_fw(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ enum version_8192d version = rtlhal->version;
+ u8 *pfwheader;
+ u8 *pfwdata;
+ u32 fwsize;
+ int err;
+
+ if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware)
+ return 1;
+
+ fwsize = rtlhal->fwsize;
+ pfwheader = rtlhal->pfirmware;
+ pfwdata = rtlhal->pfirmware;
+ rtlhal->fw_version = (u16)GET_FIRMWARE_HDR_VERSION(pfwheader);
+ rtlhal->fw_subversion = (u16)GET_FIRMWARE_HDR_SUB_VER(pfwheader);
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "FirmwareVersion(%d), FirmwareSubVersion(%d), Signature(%#x)\n",
+ rtlhal->fw_version, rtlhal->fw_subversion,
+ GET_FIRMWARE_HDR_SIGNATURE(pfwheader));
+
+ if (IS_FW_HEADER_EXIST(pfwheader)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Shift 32 bytes for FW header!!\n");
+ pfwdata = pfwdata + 32;
+ fwsize = fwsize - 32;
+ }
+
+ if (rtl92d_is_fw_downloaded(rtlpriv))
+ goto exit;
+
+ /* If 8051 is running in RAM code, driver should
+ * inform Fw to reset by itself, or it will cause
+ * download Fw fail.
+ */
+ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) {
+ rtl92d_firmware_selfreset(hw);
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+ }
+
+ rtl92d_enable_fw_download(hw, true);
+ rtl92d_write_fw(hw, version, pfwdata, fwsize);
+ rtl92d_enable_fw_download(hw, false);
+
+ err = rtl92d_fw_free_to_go(hw);
+ if (err)
+ pr_err("fw is not ready to run!\n");
+exit:
+ err = rtl92d_fw_init(hw);
+ return err;
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h
new file mode 100644
index 000000000000..7904bfbda4ba
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_FW_H__
+#define __RTL92DU_FW_H__
+
+int rtl92du_download_fw(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c
new file mode 100644
index 000000000000..ff458fb8514d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c
@@ -0,0 +1,1212 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../cam.h"
+#include "../usb.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/dm_common.h"
+#include "../rtl8192d/fw_common.h"
+#include "../rtl8192d/hw_common.h"
+#include "../rtl8192d/phy_common.h"
+#include "phy.h"
+#include "dm.h"
+#include "fw.h"
+#include "hw.h"
+#include "trx.h"
+
+static void _rtl92du_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
+ u8 set_bits, u8 clear_bits)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtlusb->reg_bcn_ctrl_val |= set_bits;
+ rtlusb->reg_bcn_ctrl_val &= ~clear_bits;
+ rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val);
+}
+
+static void _rtl92du_enable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl92du_set_bcn_ctrl_reg(hw, 0, BIT(1));
+}
+
+static void _rtl92du_disable_bcn_sub_func(struct ieee80211_hw *hw)
+{
+ _rtl92du_set_bcn_ctrl_reg(hw, BIT(1), 0);
+}
+
+void rtl92du_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+
+ switch (variable) {
+ case HW_VAR_RCR:
+ *((u32 *)val) = mac->rx_conf;
+ break;
+ default:
+ rtl92d_get_hw_reg(hw, variable, val);
+ break;
+ }
+}
+
+void rtl92du_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ switch (variable) {
+ case HW_VAR_AC_PARAM:
+ rtl92d_dm_init_edca_turbo(hw);
+ break;
+ case HW_VAR_ACM_CTRL: {
+ u8 e_aci = *val;
+ union aci_aifsn *p_aci_aifsn =
+ (union aci_aifsn *)(&mac->ac[0].aifs);
+ u8 acm = p_aci_aifsn->f.acm;
+ u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
+
+ if (acm) {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl |= ACMHW_BEQEN;
+ break;
+ case AC2_VI:
+ acm_ctrl |= ACMHW_VIQEN;
+ break;
+ case AC3_VO:
+ acm_ctrl |= ACMHW_VOQEN;
+ break;
+ default:
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n",
+ acm);
+ break;
+ }
+ } else {
+ switch (e_aci) {
+ case AC0_BE:
+ acm_ctrl &= (~ACMHW_BEQEN);
+ break;
+ case AC2_VI:
+ acm_ctrl &= (~ACMHW_VIQEN);
+ break;
+ case AC3_VO:
+ acm_ctrl &= (~ACMHW_VOQEN);
+ break;
+ default:
+ pr_err("%s:%d switch case %#x not processed\n",
+ __func__, __LINE__, e_aci);
+ break;
+ }
+ }
+ rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE,
+ "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n",
+ acm_ctrl);
+ rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
+ break;
+ }
+ case HW_VAR_RCR:
+ mac->rx_conf = ((u32 *)val)[0];
+ rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+ break;
+ case HW_VAR_H2C_FW_JOINBSSRPT: {
+ u8 tmp_regcr, tmp_reg422;
+ bool recover = false;
+ u8 mstatus = *val;
+
+ if (mstatus == RT_MEDIA_CONNECT) {
+ rtlpriv->cfg->ops->set_hw_reg(hw,
+ HW_VAR_AID, NULL);
+ tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ tmp_regcr | ENSWBCN);
+ _rtl92du_set_bcn_ctrl_reg(hw, 0, EN_BCN_FUNCTION);
+ _rtl92du_set_bcn_ctrl_reg(hw, DIS_TSF_UDT, 0);
+ tmp_reg422 = rtl_read_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2);
+ if (tmp_reg422 & (EN_BCNQ_DL >> 16))
+ recover = true;
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422 & ~(EN_BCNQ_DL >> 16));
+
+ /* We don't implement FW LPS so this is not needed. */
+ /* rtl92d_set_fw_rsvdpagepkt(hw, 0); */
+
+ _rtl92du_set_bcn_ctrl_reg(hw, EN_BCN_FUNCTION, 0);
+ _rtl92du_set_bcn_ctrl_reg(hw, 0, DIS_TSF_UDT);
+ if (recover)
+ rtl_write_byte(rtlpriv,
+ REG_FWHW_TXQ_CTRL + 2,
+ tmp_reg422);
+ rtl_write_byte(rtlpriv, REG_CR + 1,
+ tmp_regcr & ~ENSWBCN);
+ }
+ rtl92d_set_fw_joinbss_report_cmd(hw, (*val));
+ break;
+ }
+ case HW_VAR_CORRECT_TSF: {
+ u8 btype_ibss = val[0];
+
+ if (btype_ibss)
+ rtl92d_stop_tx_beacon(hw);
+ _rtl92du_set_bcn_ctrl_reg(hw, 0, EN_BCN_FUNCTION);
+ rtl_write_dword(rtlpriv, REG_TSFTR,
+ (u32)(mac->tsf & 0xffffffff));
+ rtl_write_dword(rtlpriv, REG_TSFTR + 4,
+ (u32)((mac->tsf >> 32) & 0xffffffff));
+ _rtl92du_set_bcn_ctrl_reg(hw, EN_BCN_FUNCTION, 0);
+ if (btype_ibss)
+ rtl92d_resume_tx_beacon(hw);
+
+ break;
+ }
+ case HW_VAR_KEEP_ALIVE:
+ /* Avoid "switch case not processed" error. RTL8192DU doesn't
+ * need to do anything here, maybe.
+ */
+ break;
+ default:
+ rtl92d_set_hw_reg(hw, variable, val);
+ break;
+ }
+}
+
+static void _rtl92du_init_queue_reserved_page(struct ieee80211_hw *hw,
+ u8 out_ep_num,
+ u8 queue_sel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u32 txqpagenum, txqpageunit;
+ u32 txqremainingpage;
+ u32 value32 = 0;
+ u32 numhq = 0;
+ u32 numlq = 0;
+ u32 numnq = 0;
+ u32 numpubq;
+
+ if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY) {
+ numpubq = NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC;
+ txqpagenum = TX_TOTAL_PAGE_NUMBER_92D_DUAL_MAC - numpubq;
+ } else {
+ numpubq = TEST_PAGE_NUM_PUBQ_92DU;
+ txqpagenum = TX_TOTAL_PAGE_NUMBER_92DU - numpubq;
+ }
+
+ if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY && out_ep_num == 3) {
+ numhq = NORMAL_PAGE_NUM_HPQ_92D_DUAL_MAC;
+ numlq = NORMAL_PAGE_NUM_LPQ_92D_DUAL_MAC;
+ numnq = NORMAL_PAGE_NUM_NORMALQ_92D_DUAL_MAC;
+ } else {
+ txqpageunit = txqpagenum / out_ep_num;
+ txqremainingpage = txqpagenum % out_ep_num;
+
+ if (queue_sel & TX_SELE_HQ)
+ numhq = txqpageunit;
+ if (queue_sel & TX_SELE_LQ)
+ numlq = txqpageunit;
+ if (queue_sel & TX_SELE_NQ)
+ numnq = txqpageunit;
+
+ /* HIGH priority queue always present in the
+ * configuration of 2 or 3 out-ep. Remainder pages
+ * assigned to High queue
+ */
+ if (out_ep_num > 1 && txqremainingpage)
+ numhq += txqremainingpage;
+ }
+
+ /* NOTE: This step done before writing REG_RQPN. */
+ rtl_write_byte(rtlpriv, REG_RQPN_NPQ, (u8)numnq);
+
+ /* TX DMA */
+ u32p_replace_bits(&value32, numhq, HPQ_MASK);
+ u32p_replace_bits(&value32, numlq, LPQ_MASK);
+ u32p_replace_bits(&value32, numpubq, PUBQ_MASK);
+ value32 |= LD_RQPN;
+ rtl_write_dword(rtlpriv, REG_RQPN, value32);
+}
+
+static void _rtl92du_init_tx_buffer_boundary(struct ieee80211_hw *hw,
+ u8 txpktbuf_bndy)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+
+ rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
+
+ /* TXRKTBUG_PG_BNDY */
+ rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy);
+
+ /* Beacon Head for TXDMA */
+ rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
+}
+
+static bool _rtl92du_llt_table_init(struct ieee80211_hw *hw, u8 txpktbuf_bndy)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned short i;
+ bool status;
+ u8 maxpage;
+
+ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY)
+ maxpage = 255;
+ else
+ maxpage = 127;
+
+ for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+ status = rtl92d_llt_write(hw, i, i + 1);
+ if (!status)
+ return status;
+ }
+
+ /* end of list */
+ status = rtl92d_llt_write(hw, txpktbuf_bndy - 1, 0xFF);
+ if (!status)
+ return status;
+
+ /* Make the other pages as ring buffer
+ * This ring buffer is used as beacon buffer if we
+ * config this MAC as two MAC transfer.
+ * Otherwise used as local loopback buffer.
+ */
+ for (i = txpktbuf_bndy; i < maxpage; i++) {
+ status = rtl92d_llt_write(hw, i, i + 1);
+ if (!status)
+ return status;
+ }
+
+ /* Let last entry point to the start entry of ring buffer */
+ status = rtl92d_llt_write(hw, maxpage, txpktbuf_bndy);
+ if (!status)
+ return status;
+
+ return true;
+}
+
+static void _rtl92du_init_chipn_reg_priority(struct ieee80211_hw *hw, u16 beq,
+ u16 bkq, u16 viq, u16 voq,
+ u16 mgtq, u16 hiq)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 value16;
+
+ value16 = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0x7;
+ u16p_replace_bits(&value16, beq, TXDMA_BEQ_MAP);
+ u16p_replace_bits(&value16, bkq, TXDMA_BKQ_MAP);
+ u16p_replace_bits(&value16, viq, TXDMA_VIQ_MAP);
+ u16p_replace_bits(&value16, voq, TXDMA_VOQ_MAP);
+ u16p_replace_bits(&value16, mgtq, TXDMA_MGQ_MAP);
+ u16p_replace_bits(&value16, hiq, TXDMA_HIQ_MAP);
+ rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, value16);
+}
+
+static void _rtl92du_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw,
+ u8 queue_sel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 value;
+
+ switch (queue_sel) {
+ case TX_SELE_HQ:
+ value = QUEUE_HIGH;
+ break;
+ case TX_SELE_LQ:
+ value = QUEUE_LOW;
+ break;
+ case TX_SELE_NQ:
+ value = QUEUE_NORMAL;
+ break;
+ default:
+ WARN_ON(1); /* Shall not reach here! */
+ return;
+ }
+ _rtl92du_init_chipn_reg_priority(hw, value, value, value, value,
+ value, value);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92du_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw,
+ u8 queue_sel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 beq, bkq, viq, voq, mgtq, hiq;
+ u16 valuehi, valuelow;
+
+ switch (queue_sel) {
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case (TX_SELE_HQ | TX_SELE_LQ):
+ valuehi = QUEUE_HIGH;
+ valuelow = QUEUE_LOW;
+ break;
+ case (TX_SELE_NQ | TX_SELE_LQ):
+ valuehi = QUEUE_NORMAL;
+ valuelow = QUEUE_LOW;
+ break;
+ case (TX_SELE_HQ | TX_SELE_NQ):
+ valuehi = QUEUE_HIGH;
+ valuelow = QUEUE_NORMAL;
+ break;
+ }
+
+ beq = valuelow;
+ bkq = valuelow;
+ viq = valuehi;
+ voq = valuehi;
+ mgtq = valuehi;
+ hiq = valuehi;
+
+ _rtl92du_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92du_init_chipn_three_out_ep_priority(struct ieee80211_hw *hw,
+ u8 queue_sel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 beq, bkq, viq, voq, mgtq, hiq;
+
+ beq = QUEUE_LOW;
+ bkq = QUEUE_LOW;
+ viq = QUEUE_NORMAL;
+ voq = QUEUE_HIGH;
+ mgtq = QUEUE_HIGH;
+ hiq = QUEUE_HIGH;
+
+ _rtl92du_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Tx queue select: 0x%02x\n", queue_sel);
+}
+
+static void _rtl92du_init_queue_priority(struct ieee80211_hw *hw,
+ u8 out_ep_num,
+ u8 queue_sel)
+{
+ switch (out_ep_num) {
+ case 1:
+ _rtl92du_init_chipn_one_out_ep_priority(hw, queue_sel);
+ break;
+ case 2:
+ _rtl92du_init_chipn_two_out_ep_priority(hw, queue_sel);
+ break;
+ case 3:
+ _rtl92du_init_chipn_three_out_ep_priority(hw, queue_sel);
+ break;
+ default:
+ WARN_ON(1); /* Shall not reach here! */
+ break;
+ }
+}
+
+static void _rtl92du_init_wmac_setting(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+
+ mac->rx_conf = RCR_APM | RCR_AM | RCR_AB | RCR_ADF | RCR_APP_ICV |
+ RCR_AMF | RCR_HTC_LOC_CTRL | RCR_APP_MIC |
+ RCR_APP_PHYST_RXFF | RCR_APPFCS;
+
+ rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf);
+
+ /* Set Multicast Address. */
+ rtl_write_dword(rtlpriv, REG_MAR, 0xffffffff);
+ rtl_write_dword(rtlpriv, REG_MAR + 4, 0xffffffff);
+}
+
+static void _rtl92du_init_adaptive_ctrl(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 val32;
+
+ val32 = rtl_read_dword(rtlpriv, REG_RRSR);
+ val32 &= ~0xfffff;
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
+ val32 |= 0xffff0; /* No CCK */
+ else
+ val32 |= 0xffff1;
+ rtl_write_dword(rtlpriv, REG_RRSR, val32);
+
+ /* Set Spec SIFS (used in NAV) */
+ rtl_write_word(rtlpriv, REG_SPEC_SIFS, 0x1010);
+
+ /* Retry limit 0x30 */
+ rtl_write_word(rtlpriv, REG_RL, 0x3030);
+}
+
+static void _rtl92du_init_edca(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 val16;
+
+ /* Disable EDCCA count down, to reduce collison and retry */
+ val16 = rtl_read_word(rtlpriv, REG_RD_CTRL);
+ val16 |= DIS_EDCA_CNT_DWN;
+ rtl_write_word(rtlpriv, REG_RD_CTRL, val16);
+
+ /* CCK SIFS shall always be 10us. */
+ rtl_write_word(rtlpriv, REG_SIFS_CTX, 0x0a0a);
+ /* Set SIFS for OFDM */
+ rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x1010);
+
+ rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0204);
+
+ rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x014004);
+
+ /* TXOP */
+ rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, 0x005EA42B);
+ rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0x0000A44F);
+ rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x005EA324);
+ rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x002FA226);
+
+ rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
+
+ rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+
+ rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
+
+ rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x2);
+ rtl_write_byte(rtlpriv, REG_ATIMWND, 0x2);
+}
+
+static void _rtl92du_init_retry_function(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 val8;
+
+ val8 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL);
+ val8 |= EN_AMPDU_RTY_NEW;
+ rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL, val8);
+
+ rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
+}
+
+static void _rtl92du_init_operation_mode(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, BW_OPMODE_20MHZ);
+
+ switch (rtlpriv->phy.rf_type) {
+ case RF_1T2R:
+ case RF_1T1R:
+ rtlhal->minspace_cfg = (MAX_MSS_DENSITY_1T << 3);
+ break;
+ case RF_2T2R:
+ case RF_2T2R_GREEN:
+ rtlhal->minspace_cfg = (MAX_MSS_DENSITY_2T << 3);
+ break;
+ }
+ rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, rtlhal->minspace_cfg);
+}
+
+static void _rtl92du_init_beacon_parameters(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1010);
+
+ rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x3c02);
+ rtl_write_byte(rtlpriv, REG_DRVERLYINT, 0x05);
+ rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x03);
+
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+}
+
+static void _rtl92du_init_ampdu_aggregation(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+
+ /* Aggregation threshold */
+ if (rtlhal->macphymode == DUALMAC_DUALPHY)
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x66525541);
+ else if (rtlhal->macphymode == DUALMAC_SINGLEPHY)
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x44444441);
+ else
+ rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x88728841);
+
+ rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
+}
+
+static bool _rtl92du_init_power_on(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ unsigned short wordtmp;
+ unsigned char bytetmp;
+ u16 retry = 0;
+
+ do {
+ if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN)
+ break;
+
+ if (retry++ > 1000)
+ return false;
+ } while (true);
+
+ /* Unlock ISO/CLK/Power control register */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+
+ /* SPS0_CTRL 0x11[7:0] = 0x2b enable SPS into PWM mode */
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+
+ msleep(1);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL);
+ if ((bytetmp & LDV12_EN) == 0) {
+ bytetmp |= LDV12_EN;
+ rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, bytetmp);
+
+ msleep(1);
+
+ bytetmp = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL);
+ bytetmp &= ~ISO_MD2PP;
+ rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, bytetmp);
+ }
+
+ /* Auto enable WLAN */
+ wordtmp = rtl_read_word(rtlpriv, REG_APS_FSMCO);
+ wordtmp |= APFM_ONMAC;
+ rtl_write_word(rtlpriv, REG_APS_FSMCO, wordtmp);
+
+ wordtmp = rtl_read_word(rtlpriv, REG_APS_FSMCO);
+ retry = 0;
+ while ((wordtmp & APFM_ONMAC) && retry < 1000) {
+ retry++;
+ wordtmp = rtl_read_word(rtlpriv, REG_APS_FSMCO);
+ }
+
+ /* Release RF digital isolation */
+ wordtmp = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL);
+ wordtmp &= ~ISO_DIOR;
+ rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, wordtmp);
+
+ /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
+ wordtmp = rtl_read_word(rtlpriv, REG_CR);
+ wordtmp |= HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN |
+ PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN | ENSEC;
+ rtl_write_word(rtlpriv, REG_CR, wordtmp);
+
+ return true;
+}
+
+static bool _rtl92du_init_mac(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 val8;
+
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
+
+ val8 = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
+ val8 &= ~(FEN_MREGEN >> 8);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, val8);
+
+ /* For s3/s4 may reset mac, Reg0xf8 may be set to 0,
+ * so reset macphy control reg here.
+ */
+ rtl92d_phy_config_macphymode(hw);
+
+ rtl92du_phy_set_poweron(hw);
+
+ if (!_rtl92du_init_power_on(hw)) {
+ pr_err("Failed to init power on!\n");
+ return false;
+ }
+
+ rtl92d_phy_config_maccoexist_rfpage(hw);
+
+ return true;
+}
+
+int rtl92du_hw_init(struct ieee80211_hw *hw)
+{
+ struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw);
+ struct rtl_usb *rtlusb = rtl_usbdev(usb_priv);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 val8, txpktbuf_bndy;
+ int err, i;
+ u32 val32;
+ u16 val16;
+
+ mutex_lock(rtlpriv->mutex_for_hw_init);
+
+ /* we should do iqk after disable/enable */
+ rtl92d_phy_reset_iqk_result(hw);
+
+ if (!_rtl92du_init_mac(hw)) {
+ pr_err("Init MAC failed\n");
+ mutex_unlock(rtlpriv->mutex_for_hw_init);
+ return 1;
+ }
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY)
+ txpktbuf_bndy = 249;
+ else
+ txpktbuf_bndy = 123;
+
+ if (!_rtl92du_llt_table_init(hw, txpktbuf_bndy)) {
+ pr_err("Init LLT failed\n");
+ mutex_unlock(rtlpriv->mutex_for_hw_init);
+ return 1;
+ }
+
+ err = rtl92du_download_fw(hw);
+
+ /* return fail only when part number check fail */
+ if (err && rtl_read_byte(rtlpriv, 0x1c5) == 0xe0) {
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Failed to download FW. Init HW without FW..\n");
+ mutex_unlock(rtlpriv->mutex_for_hw_init);
+ return 1;
+ }
+ rtlhal->last_hmeboxnum = 0;
+ rtlpriv->psc.fw_current_inpsmode = false;
+
+ rtl92du_phy_mac_config(hw);
+
+ /* Set reserved page for each queue */
+ _rtl92du_init_queue_reserved_page(hw, rtlusb->out_ep_nums,
+ rtlusb->out_queue_sel);
+
+ _rtl92du_init_tx_buffer_boundary(hw, txpktbuf_bndy);
+
+ _rtl92du_init_queue_priority(hw, rtlusb->out_ep_nums,
+ rtlusb->out_queue_sel);
+
+ /* Set Tx/Rx page size (Tx must be 128 Bytes,
+ * Rx can be 64, 128, 256, 512, 1024 bytes)
+ */
+ rtl_write_byte(rtlpriv, REG_PBP, 0x11);
+
+ /* Get Rx PHY status in order to report RSSI and others. */
+ rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
+
+ rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
+ rtl_write_dword(rtlpriv, REG_HIMR, 0xffffffff);
+
+ val8 = rtl_read_byte(rtlpriv, MSR);
+ val8 &= ~MSR_MASK;
+ val8 |= MSR_INFRA;
+ rtl_write_byte(rtlpriv, MSR, val8);
+
+ _rtl92du_init_wmac_setting(hw);
+ _rtl92du_init_adaptive_ctrl(hw);
+ _rtl92du_init_edca(hw);
+
+ rtl_write_dword(rtlpriv, REG_DARFRC, 0x00000000);
+ rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x10080404);
+ rtl_write_dword(rtlpriv, REG_RARFRC, 0x04030201);
+ rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x08070605);
+
+ _rtl92du_init_retry_function(hw);
+ /* _InitUsbAggregationSetting(padapter); no aggregation for now */
+ _rtl92du_init_operation_mode(hw);
+ _rtl92du_init_beacon_parameters(hw);
+ _rtl92du_init_ampdu_aggregation(hw);
+
+ rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff);
+
+ /* unit: 256us. 256ms */
+ rtl_write_word(rtlpriv, REG_PKT_VO_VI_LIFE_TIME, 0x0400);
+ rtl_write_word(rtlpriv, REG_PKT_BE_BK_LIFE_TIME, 0x0400);
+
+ /* Hardware-controlled blinking. */
+ rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8282);
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, 0x82);
+
+ val32 = rtl_read_dword(rtlpriv, REG_TXDMA_OFFSET_CHK);
+ val32 |= DROP_DATA_EN;
+ rtl_write_dword(rtlpriv, REG_TXDMA_OFFSET_CHK, val32);
+
+ if (mac->rdg_en) {
+ rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xff);
+ rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200);
+ rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05);
+ }
+
+ for (i = 0; i < 4; i++)
+ rtl_write_dword(rtlpriv, REG_ARFR0 + i * 4, 0x1f8ffff0);
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY) {
+ if (rtlusb->out_ep_nums == 2)
+ rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03066666);
+ else
+ rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0x8888);
+ } else {
+ rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0x5555);
+ }
+
+ val8 = rtl_read_byte(rtlpriv, 0x605);
+ val8 |= 0xf0;
+ rtl_write_byte(rtlpriv, 0x605, val8);
+
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x30);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x30);
+ rtl_write_byte(rtlpriv, 0x606, 0x30);
+
+ /* temp for high queue and mgnt Queue corrupt in time; it may
+ * cause hang when sw beacon use high_Q, other frame use mgnt_Q;
+ * or, sw beacon use mgnt_Q, other frame use high_Q;
+ */
+ rtl_write_byte(rtlpriv, REG_DIS_TXREQ_CLR, 0x10);
+ val16 = rtl_read_word(rtlpriv, REG_RD_CTRL);
+ val16 |= BIT(12);
+ rtl_write_word(rtlpriv, REG_RD_CTRL, val16);
+
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0);
+
+ /* usb suspend idle time count for bitfile0927 */
+ val8 = rtl_read_byte(rtlpriv, 0xfe56);
+ val8 |= BIT(0) | BIT(1);
+ rtl_write_byte(rtlpriv, 0xfe56, val8);
+
+ if (rtlhal->earlymode_enable) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "EarlyMode Enabled!!!\n");
+
+ val8 = rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL);
+ val8 |= 0x1f;
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, val8);
+
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL + 3, 0x80);
+
+ val8 = rtl_read_byte(rtlpriv, 0x605);
+ val8 |= 0x40;
+ rtl_write_byte(rtlpriv, 0x605, val8);
+ } else {
+ rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0);
+ }
+
+ rtl92du_phy_bb_config(hw);
+
+ rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
+ /* set before initialize RF */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
+
+ /* config RF */
+ rtl92du_phy_rf_config(hw);
+
+ /* set default value after initialize RF */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0);
+
+ /* After load BB, RF params, we need to do more for 92D. */
+ rtl92du_update_bbrf_configuration(hw);
+
+ rtlphy->rfreg_chnlval[0] =
+ rtl_get_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK);
+ rtlphy->rfreg_chnlval[1] =
+ rtl_get_rfreg(hw, RF90_PATH_B, RF_CHNLBW, RFREG_OFFSET_MASK);
+
+ /*---- Set CCK and OFDM Block "ON"----*/
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
+ rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
+
+ /* reset hw sec */
+ rtl_cam_reset_all_entry(hw);
+ rtl92d_enable_hw_security_config(hw);
+
+ rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
+
+ /* schmitt trigger, improve tx evm for 92du */
+ val8 = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL);
+ val8 |= BIT(1);
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, val8);
+
+ /* Disable bar */
+ rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0xffff);
+
+ /* Nav limit */
+ rtl_write_byte(rtlpriv, REG_NAV_CTRL + 2, 0);
+ rtl_write_byte(rtlpriv, ROFDM0_XATXAFE + 3, 0x50);
+
+ /* Read EEPROM TX power index and PHY_REG_PG.txt to capture correct
+ * TX power index for different rate set.
+ */
+ rtl92d_phy_get_hw_reg_originalvalue(hw);
+
+ ppsc->rfpwr_state = ERFON;
+
+ /* do IQK for 2.4G for better scan result */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ rtl92du_phy_iq_calibrate(hw);
+
+ rtl92du_phy_lc_calibrate(hw, IS_92D_SINGLEPHY(rtlhal->version));
+
+ rtl92du_phy_init_pa_bias(hw);
+
+ mutex_unlock(rtlpriv->mutex_for_hw_init);
+
+ rtl92du_dm_init(hw);
+
+ /* For 2 PORT TSF SYNC */
+ rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1818);
+ rtlusb->reg_bcn_ctrl_val = 0x18;
+
+ udelay(500);
+
+ if (rtlhal->macphymode != DUALMAC_DUALPHY) {
+ rtl_write_dword(rtlpriv, RFPGA1_TXINFO,
+ rtl_read_dword(rtlpriv, RFPGA1_TXINFO) & ~BIT(30));
+
+ rtl_write_dword(rtlpriv, RFPGA0_TXGAINSTAGE,
+ rtl_read_dword(rtlpriv, RFPGA0_TXGAINSTAGE) & ~BIT(31));
+
+ rtl_write_dword(rtlpriv, ROFDM0_XBTXAFE, 0xa0e40000);
+ }
+
+ val32 = rtl_read_dword(rtlpriv, REG_FWHW_TXQ_CTRL);
+ val32 |= BIT(12);
+ rtl_write_dword(rtlpriv, REG_FWHW_TXQ_CTRL, val32);
+
+ return err;
+}
+
+static int _rtl92du_set_media_status(struct ieee80211_hw *hw,
+ enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
+ u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
+
+ bt_msr &= 0xfc;
+
+ if (type == NL80211_IFTYPE_UNSPECIFIED ||
+ type == NL80211_IFTYPE_STATION) {
+ rtl92d_stop_tx_beacon(hw);
+ _rtl92du_enable_bcn_sub_func(hw);
+ } else if (type == NL80211_IFTYPE_ADHOC ||
+ type == NL80211_IFTYPE_AP) {
+ rtl92d_resume_tx_beacon(hw);
+ _rtl92du_disable_bcn_sub_func(hw);
+ } else {
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n",
+ type);
+ }
+
+ switch (type) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ bt_msr |= MSR_NOLINK;
+ ledaction = LED_CTL_LINK;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to NO LINK!\n");
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ bt_msr |= MSR_ADHOC;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to Ad Hoc!\n");
+ break;
+ case NL80211_IFTYPE_STATION:
+ bt_msr |= MSR_INFRA;
+ ledaction = LED_CTL_LINK;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to STA!\n");
+ break;
+ case NL80211_IFTYPE_AP:
+ bt_msr |= MSR_AP;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Set Network type to AP!\n");
+ break;
+ default:
+ pr_err("Network type %d not supported!\n", type);
+ return 1;
+ }
+ rtl_write_byte(rtlpriv, MSR, bt_msr);
+
+ rtlpriv->cfg->ops->led_control(hw, ledaction);
+
+ if ((bt_msr & MSR_MASK) == MSR_AP)
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
+ else
+ rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
+
+ return 0;
+}
+
+void rtl92du_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 reg_rcr;
+
+ if (rtlpriv->psc.rfpwr_state != ERFON)
+ return;
+
+ rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(&reg_rcr));
+
+ if (check_bssid) {
+ reg_rcr |= RCR_CBSSID_DATA | RCR_CBSSID_BCN;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)&reg_rcr);
+ _rtl92du_set_bcn_ctrl_reg(hw, 0, DIS_TSF_UDT);
+ } else if (!check_bssid) {
+ reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+ _rtl92du_set_bcn_ctrl_reg(hw, DIS_TSF_UDT, 0);
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)&reg_rcr);
+ }
+}
+
+int rtl92du_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (_rtl92du_set_media_status(hw, type))
+ return -EOPNOTSUPP;
+
+ /* check bssid */
+ if (rtlpriv->mac80211.link_state == MAC80211_LINKED) {
+ if (type != NL80211_IFTYPE_AP)
+ rtl92du_set_check_bssid(hw, true);
+ } else {
+ rtl92du_set_check_bssid(hw, false);
+ }
+
+ return 0;
+}
+
+/* do iqk or reload iqk */
+/* windows just rtl92d_phy_reload_iqk_setting in set channel,
+ * but it's very strict for time sequence so we add
+ * rtl92d_phy_reload_iqk_setting here
+ */
+void rtl92du_linked_set_reg(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 channel = rtlphy->current_channel;
+ u8 indexforchannel;
+
+ indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
+ if (!rtlphy->iqk_matrix[indexforchannel].iqk_done) {
+ rtl_dbg(rtlpriv, COMP_SCAN | COMP_INIT, DBG_DMESG,
+ "Do IQK for channel:%d\n", channel);
+ rtl92du_phy_iq_calibrate(hw);
+ }
+}
+
+void rtl92du_enable_interrupt(struct ieee80211_hw *hw)
+{
+ /* Nothing to do. */
+}
+
+void rtl92du_disable_interrupt(struct ieee80211_hw *hw)
+{
+ /* Nothing to do. */
+}
+
+static void _rtl92du_poweroff_adapter(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 retry = 100;
+ u8 u1b_tmp;
+ u16 val16;
+ u32 val32;
+
+ rtl_write_byte(rtlpriv, REG_LDOA15_CTRL, 0x04);
+
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
+
+ /* IF fw in RAM code, do reset */
+ if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY) {
+ rtl_write_byte(rtlpriv, REG_FSIMR, 0);
+
+ /* We need to disable other HRCV INT to influence 8051 reset. */
+ rtl_write_byte(rtlpriv, REG_FWIMR, 0x20);
+
+ /* Close mask to prevent incorrect FW write operation. */
+ rtl_write_byte(rtlpriv, REG_FTIMR, 0);
+
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0);
+
+ /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */
+ rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
+ val16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ while (val16 & FEN_CPUEN) {
+ retry--;
+ if (retry == 0)
+ break;
+ udelay(50);
+ val16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ }
+
+ if (retry == 0) {
+ rtl_write_byte(rtlpriv, REG_FWIMR, 0);
+
+ /* if 8051 reset fail, reset MAC directly. */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x50);
+
+ mdelay(10);
+ }
+ }
+
+ /* reset MCU, MAC register, DCORE */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x54);
+
+ /* reset MCU ready status */
+ rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
+
+ /* Pull GPIO PIN to balance level and LED control */
+
+ /* Disable GPIO[7:0] */
+ rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL + 2, 0x0000);
+ val32 = rtl_read_dword(rtlpriv, REG_GPIO_PIN_CTRL);
+ u32p_replace_bits(&val32, val32 & 0xff, 0x0000ff00);
+ u32p_replace_bits(&val32, 0xff, 0x00ff0000);
+ rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, val32);
+
+ /* Disable GPIO[10:8] */
+ rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, 0);
+ val16 = rtl_read_word(rtlpriv, REG_GPIO_IO_SEL);
+ u16p_replace_bits(&val16, val16 & 0xf, 0x00f0);
+ u16p_replace_bits(&val16, 0xf, 0x0780);
+ rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, val16);
+
+ /* Disable LED 0, 1, and 2 */
+ rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8888);
+ rtl_write_byte(rtlpriv, REG_LEDCFG2, 0x88);
+
+ /* Disable analog sequence */
+
+ /* enter PFM mode */
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
+
+ rtl_write_word(rtlpriv, REG_APS_FSMCO,
+ APDM_HOST | AFSM_HSUS | PFM_ALDN);
+
+ /* lock ISO/CLK/Power control register */
+ rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "In PowerOff,reg0x%x=%X\n",
+ REG_SPS0_CTRL, rtl_read_byte(rtlpriv, REG_SPS0_CTRL));
+
+ /* 0x17[7] 1b': power off in process 0b' : power off over */
+ if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY) {
+ mutex_lock(rtlpriv->mutex_for_power_on_off);
+ u1b_tmp = rtl_read_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS);
+ u1b_tmp &= ~BIT(7);
+ rtl_write_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS, u1b_tmp);
+ mutex_unlock(rtlpriv->mutex_for_power_on_off);
+ }
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<=======\n");
+}
+
+void rtl92du_card_disable(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ enum nl80211_iftype opmode;
+ u32 val32;
+ u16 val16;
+ u8 val8;
+
+ mac->link_state = MAC80211_NOLINK;
+ opmode = NL80211_IFTYPE_UNSPECIFIED;
+ _rtl92du_set_media_status(hw, opmode);
+
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ /* Power sequence for each MAC. */
+ /* a. stop tx DMA */
+ /* b. close RF */
+ /* c. clear rx buf */
+ /* d. stop rx DMA */
+ /* e. reset MAC */
+
+ val16 = rtl_read_word(rtlpriv, REG_GPIO_MUXCFG);
+ val16 &= ~BIT(12);
+ rtl_write_word(rtlpriv, REG_GPIO_MUXCFG, val16);
+
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xff);
+ udelay(500);
+ rtl_write_byte(rtlpriv, REG_CR, 0);
+
+ /* RF OFF sequence */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x00);
+
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+
+ val8 = FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTN;
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, val8);
+
+ /* Mac0 can not do Global reset. Mac1 can do. */
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY ||
+ rtlhal->interfaceindex == 1) {
+ /* before BB reset should do clock gated */
+ val32 = rtl_read_dword(rtlpriv, RFPGA0_XCD_RFPARAMETER);
+ val32 |= BIT(31);
+ rtl_write_dword(rtlpriv, RFPGA0_XCD_RFPARAMETER, val32);
+
+ val8 &= ~FEN_BB_GLB_RSTN;
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, val8);
+ }
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==> Do power off.......\n");
+ if (!rtl92du_phy_check_poweroff(hw))
+ return;
+
+ _rtl92du_poweroff_adapter(hw);
+}
+
+void rtl92du_set_beacon_related_registers(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ u16 bcn_interval, atim_window;
+
+ bcn_interval = mac->beacon_interval;
+ atim_window = 2;
+ rtl92du_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x20);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x30);
+ else
+ rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x20);
+ rtl_write_byte(rtlpriv, 0x606, 0x30);
+}
+
+void rtl92du_set_beacon_interval(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
+ u16 bcn_interval = mac->beacon_interval;
+
+ rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG,
+ "beacon_interval:%d\n", bcn_interval);
+ rtl92du_disable_interrupt(hw);
+ rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
+ rtl92du_enable_interrupt(hw);
+}
+
+void rtl92du_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr)
+{
+ /* Nothing to do here. */
+}
+
+void rtl92du_read_chip_version(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* Chip version reading is done in rtl92d_read_eeprom_info. */
+
+ rtlpriv->rtlhal.hw_type = HARDWARE_TYPE_RTL8192DU;
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h
new file mode 100644
index 000000000000..80ed00c90c16
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_HW_H__
+#define __RTL92DU_HW_H__
+
+void rtl92du_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92du_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
+void rtl92du_read_chip_version(struct ieee80211_hw *hw);
+int rtl92du_hw_init(struct ieee80211_hw *hw);
+void rtl92du_card_disable(struct ieee80211_hw *hw);
+void rtl92du_enable_interrupt(struct ieee80211_hw *hw);
+void rtl92du_disable_interrupt(struct ieee80211_hw *hw);
+int rtl92du_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
+void rtl92du_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
+void rtl92du_set_beacon_related_registers(struct ieee80211_hw *hw);
+void rtl92du_set_beacon_interval(struct ieee80211_hw *hw);
+void rtl92du_update_interrupt_mask(struct ieee80211_hw *hw,
+ u32 add_msr, u32 rm_msr);
+void rtl92du_linked_set_reg(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c
new file mode 100644
index 000000000000..6c12dfbd6367
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "led.h"
+
+void rtl92du_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction)
+{
+ /* The hardware has control. */
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h
new file mode 100644
index 000000000000..d7ebc8afcc7b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_LED_H__
+#define __RTL92DU_LED_H__
+
+void rtl92du_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c
new file mode 100644
index 000000000000..289ec71ce3e5
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c
@@ -0,0 +1,3123 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../ps.h"
+#include "../core.h"
+#include "../efuse.h"
+#include "../usb.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/phy_common.h"
+#include "../rtl8192d/rf_common.h"
+#include "phy.h"
+#include "rf.h"
+#include "table.h"
+
+#define MAX_RF_IMR_INDEX 12
+#define MAX_RF_IMR_INDEX_NORMAL 13
+#define RF_REG_NUM_FOR_C_CUT_5G 6
+#define RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA 7
+#define RF_REG_NUM_FOR_C_CUT_2G 5
+#define RF_CHNL_NUM_5G 19
+#define RF_CHNL_NUM_5G_40M 17
+#define CV_CURVE_CNT 64
+
+static const u32 rf_reg_for_5g_swchnl_normal[MAX_RF_IMR_INDEX_NORMAL] = {
+ 0, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x0
+};
+
+static const u8 rf_reg_for_c_cut_5g[RF_REG_NUM_FOR_C_CUT_5G] = {
+ RF_SYN_G1, RF_SYN_G2, RF_SYN_G3, RF_SYN_G4, RF_SYN_G5, RF_SYN_G6
+};
+
+static const u8 rf_reg_for_c_cut_2g[RF_REG_NUM_FOR_C_CUT_2G] = {
+ RF_SYN_G1, RF_SYN_G2, RF_SYN_G3, RF_SYN_G7, RF_SYN_G8
+};
+
+static const u8 rf_for_c_cut_5g_internal_pa[RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA] = {
+ 0x0B, 0x48, 0x49, 0x4B, 0x03, 0x04, 0x0E
+};
+
+static const u32 rf_reg_mask_for_c_cut_2g[RF_REG_NUM_FOR_C_CUT_2G] = {
+ BIT(19) | BIT(18) | BIT(17) | BIT(14) | BIT(1),
+ BIT(10) | BIT(9),
+ BIT(18) | BIT(17) | BIT(16) | BIT(1),
+ BIT(2) | BIT(1),
+ BIT(15) | BIT(14) | BIT(13) | BIT(12) | BIT(11)
+};
+
+static const u8 rf_chnl_5g[RF_CHNL_NUM_5G] = {
+ 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108,
+ 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 rf_chnl_5g_40m[RF_CHNL_NUM_5G_40M] = {
+ 38, 42, 46, 50, 54, 58, 62, 102, 106, 110, 114,
+ 118, 122, 126, 130, 134, 138
+};
+
+static const u32 rf_reg_pram_c_5g[5][RF_REG_NUM_FOR_C_CUT_5G] = {
+ {0xE43BE, 0xFC638, 0x77C0A, 0xDE471, 0xd7110, 0x8EB04},
+ {0xE43BE, 0xFC078, 0xF7C1A, 0xE0C71, 0xD7550, 0xAEB04},
+ {0xE43BF, 0xFF038, 0xF7C0A, 0xDE471, 0xE5550, 0xAEB04},
+ {0xE43BF, 0xFF079, 0xF7C1A, 0xDE471, 0xE5550, 0xAEB04},
+ {0xE43BF, 0xFF038, 0xF7C1A, 0xDE471, 0xd7550, 0xAEB04}
+};
+
+static const u32 rf_reg_param_for_c_cut_2g[3][RF_REG_NUM_FOR_C_CUT_2G] = {
+ {0x643BC, 0xFC038, 0x77C1A, 0x41289, 0x01840},
+ {0x643BC, 0xFC038, 0x07C1A, 0x41289, 0x01840},
+ {0x243BC, 0xFC438, 0x07C1A, 0x4128B, 0x0FC41}
+};
+
+static const u32 rf_syn_g4_for_c_cut_2g = 0xD1C31 & 0x7FF;
+
+static const u32 rf_pram_c_5g_int_pa[3][RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA] = {
+ {0x01a00, 0x40443, 0x00eb5, 0x89bec, 0x94a12, 0x94a12, 0x94a12},
+ {0x01800, 0xc0443, 0x00730, 0x896ee, 0x94a52, 0x94a52, 0x94a52},
+ {0x01800, 0xc0443, 0x00730, 0x896ee, 0x94a12, 0x94a12, 0x94a12}
+};
+
+/* [patha+b][reg] */
+static const u32 rf_imr_param_normal[3][MAX_RF_IMR_INDEX_NORMAL] = {
+ /* channels 1-14. */
+ {
+ 0x70000, 0x00ff0, 0x4400f, 0x00ff0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x64888, 0xe266c, 0x00090, 0x22fff
+ },
+ /* channels 36-64 */
+ {
+ 0x70000, 0x22880, 0x4470f, 0x55880, 0x00070, 0x88000,
+ 0x0, 0x88080, 0x70000, 0x64a82, 0xe466c, 0x00090,
+ 0x32c9a
+ },
+ /* channels 100-165 */
+ {
+ 0x70000, 0x44880, 0x4477f, 0x77880, 0x00070, 0x88000,
+ 0x0, 0x880b0, 0x0, 0x64b82, 0xe466c, 0x00090, 0x32c9a
+ }
+};
+
+static const u32 targetchnl_5g[TARGET_CHNL_NUM_5G] = {
+ 25141, 25116, 25091, 25066, 25041,
+ 25016, 24991, 24966, 24941, 24917,
+ 24892, 24867, 24843, 24818, 24794,
+ 24770, 24765, 24721, 24697, 24672,
+ 24648, 24624, 24600, 24576, 24552,
+ 24528, 24504, 24480, 24457, 24433,
+ 24409, 24385, 24362, 24338, 24315,
+ 24291, 24268, 24245, 24221, 24198,
+ 24175, 24151, 24128, 24105, 24082,
+ 24059, 24036, 24013, 23990, 23967,
+ 23945, 23922, 23899, 23876, 23854,
+ 23831, 23809, 23786, 23764, 23741,
+ 23719, 23697, 23674, 23652, 23630,
+ 23608, 23586, 23564, 23541, 23519,
+ 23498, 23476, 23454, 23432, 23410,
+ 23388, 23367, 23345, 23323, 23302,
+ 23280, 23259, 23237, 23216, 23194,
+ 23173, 23152, 23130, 23109, 23088,
+ 23067, 23046, 23025, 23003, 22982,
+ 22962, 22941, 22920, 22899, 22878,
+ 22857, 22837, 22816, 22795, 22775,
+ 22754, 22733, 22713, 22692, 22672,
+ 22652, 22631, 22611, 22591, 22570,
+ 22550, 22530, 22510, 22490, 22469,
+ 22449, 22429, 22409, 22390, 22370,
+ 22350, 22336, 22310, 22290, 22271,
+ 22251, 22231, 22212, 22192, 22173,
+ 22153, 22134, 22114, 22095, 22075,
+ 22056, 22037, 22017, 21998, 21979,
+ 21960, 21941, 21921, 21902, 21883,
+ 21864, 21845, 21826, 21807, 21789,
+ 21770, 21751, 21732, 21713, 21695,
+ 21676, 21657, 21639, 21620, 21602,
+ 21583, 21565, 21546, 21528, 21509,
+ 21491, 21473, 21454, 21436, 21418,
+ 21400, 21381, 21363, 21345, 21327,
+ 21309, 21291, 21273, 21255, 21237,
+ 21219, 21201, 21183, 21166, 21148,
+ 21130, 21112, 21095, 21077, 21059,
+ 21042, 21024, 21007, 20989, 20972,
+ 25679, 25653, 25627, 25601, 25575,
+ 25549, 25523, 25497, 25471, 25446,
+ 25420, 25394, 25369, 25343, 25318,
+ 25292, 25267, 25242, 25216, 25191,
+ 25166
+};
+
+/* channel 1~14 */
+static const u32 targetchnl_2g[TARGET_CHNL_NUM_2G] = {
+ 26084, 26030, 25976, 25923, 25869, 25816, 25764,
+ 25711, 25658, 25606, 25554, 25502, 25451, 25328
+};
+
+u32 rtl92du_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u32 returnvalue, originalvalue, bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n",
+ regaddr, bitmask);
+
+ if (rtlhal->during_mac1init_radioa)
+ regaddr |= MAC1_ACCESS_PHY0;
+ else if (rtlhal->during_mac0init_radiob)
+ regaddr |= MAC0_ACCESS_PHY1;
+
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = calculate_bit_shift(bitmask);
+ returnvalue = (originalvalue & bitmask) >> bitshift;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "BBR MASK=0x%x Addr[0x%x]=0x%x\n",
+ bitmask, regaddr, originalvalue);
+ return returnvalue;
+}
+
+void rtl92du_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u32 originalvalue, bitshift;
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
+
+ if (rtlhal->during_mac1init_radioa)
+ regaddr |= MAC1_ACCESS_PHY0;
+ else if (rtlhal->during_mac0init_radiob)
+ regaddr |= MAC0_ACCESS_PHY1;
+
+ if (bitmask != MASKDWORD) {
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ bitshift = calculate_bit_shift(bitmask);
+ data = (originalvalue & (~bitmask)) |
+ ((data << bitshift) & bitmask);
+ }
+
+ rtl_write_dword(rtlpriv, regaddr, data);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "regaddr(%#x), bitmask(%#x), data(%#x)\n",
+ regaddr, bitmask, data);
+}
+
+/* To avoid miswrite Reg0x800 for 92D */
+static void rtl92du_phy_set_bb_reg_1byte(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 originalvalue, bitshift, offset;
+ u8 value;
+
+ /* BitMask only support bit0~bit7 or bit8~bit15, bit16~bit23,
+ * bit24~bit31, should be in 1 byte scale;
+ */
+ bitshift = calculate_bit_shift(bitmask);
+ offset = bitshift / 8;
+
+ originalvalue = rtl_read_dword(rtlpriv, regaddr);
+ data = (originalvalue & (~bitmask)) | ((data << bitshift) & bitmask);
+
+ value = data >> (8 * offset);
+
+ rtl_write_byte(rtlpriv, regaddr + offset, value);
+}
+
+bool rtl92du_phy_mac_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 arraylength;
+ const u32 *ptrarray;
+ u32 i;
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n");
+
+ arraylength = MAC_2T_ARRAYLENGTH;
+ ptrarray = rtl8192du_mac_2tarray;
+
+ for (i = 0; i < arraylength; i = i + 2)
+ rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]);
+
+ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) {
+ /* improve 2-stream TX EVM */
+ /* rtl_write_byte(rtlpriv, 0x14,0x71); */
+ /* AMPDU aggregation number 9 */
+ /* rtl_write_word(rtlpriv, REG_MAX_AGGR_NUM, MAX_AGGR_NUM); */
+ rtl_write_byte(rtlpriv, REG_MAX_AGGR_NUM, 0x0B);
+ } else {
+ /* 92D need to test to decide the num. */
+ rtl_write_byte(rtlpriv, REG_MAX_AGGR_NUM, 0x07);
+ }
+
+ return true;
+}
+
+static bool _rtl92du_phy_config_bb(struct ieee80211_hw *hw, u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u16 phy_reg_arraylen, agctab_arraylen = 0;
+ const u32 *agctab_array_table = NULL;
+ const u32 *phy_regarray_table;
+ int i;
+
+ /* Normal chip, Mac0 use AGC_TAB.txt for 2G and 5G band. */
+ if (rtlhal->interfaceindex == 0) {
+ agctab_arraylen = AGCTAB_ARRAYLENGTH;
+ agctab_array_table = rtl8192du_agctab_array;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:MAC0, Rtl819XAGCTAB_Array\n");
+ } else {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ agctab_arraylen = AGCTAB_2G_ARRAYLENGTH;
+ agctab_array_table = rtl8192du_agctab_2garray;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:MAC1, Rtl819XAGCTAB_2GArray\n");
+ } else {
+ agctab_arraylen = AGCTAB_5G_ARRAYLENGTH;
+ agctab_array_table = rtl8192du_agctab_5garray;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:MAC1, Rtl819XAGCTAB_5GArray\n");
+ }
+ }
+ phy_reg_arraylen = PHY_REG_2T_ARRAYLENGTH;
+ phy_regarray_table = rtl8192du_phy_reg_2tarray;
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> phy:Rtl819XPHY_REG_Array_PG\n");
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_reg_arraylen; i = i + 2) {
+ rtl_addr_delay(phy_regarray_table[i]);
+ rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
+ phy_regarray_table[i + 1]);
+ udelay(1);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n",
+ phy_regarray_table[i],
+ phy_regarray_table[i + 1]);
+ }
+ } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
+ for (i = 0; i < agctab_arraylen; i = i + 2) {
+ rtl_set_bbreg(hw, agctab_array_table[i],
+ MASKDWORD, agctab_array_table[i + 1]);
+
+ /* Add 1us delay between BB/RF register setting. */
+ udelay(1);
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "AGC table %u %u\n",
+ agctab_array_table[i],
+ agctab_array_table[i + 1]);
+ }
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "Normal Chip, loaded AGC table\n");
+ }
+ return true;
+}
+
+static bool _rtl92du_phy_config_bb_pg(struct ieee80211_hw *hw, u8 configtype)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ const u32 *phy_regarray_table_pg;
+ u16 phy_regarray_pg_len;
+ int i;
+
+ phy_regarray_pg_len = PHY_REG_ARRAY_PG_LENGTH;
+ phy_regarray_table_pg = rtl8192du_phy_reg_array_pg;
+
+ if (configtype == BASEBAND_CONFIG_PHY_REG) {
+ for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
+ rtl_addr_delay(phy_regarray_table_pg[i]);
+ rtl92d_store_pwrindex_diffrate_offset(hw,
+ phy_regarray_table_pg[i],
+ phy_regarray_table_pg[i + 1],
+ phy_regarray_table_pg[i + 2]);
+ }
+ } else {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "configtype != BaseBand_Config_PHY_REG\n");
+ }
+ return true;
+}
+
+static bool _rtl92du_phy_bb_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ bool ret;
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n");
+ ret = _rtl92du_phy_config_bb(hw, BASEBAND_CONFIG_PHY_REG);
+ if (!ret) {
+ pr_err("Write BB Reg Fail!!\n");
+ return false;
+ }
+
+ if (!rtlefuse->autoload_failflag) {
+ rtlphy->pwrgroup_cnt = 0;
+ ret = _rtl92du_phy_config_bb_pg(hw, BASEBAND_CONFIG_PHY_REG);
+ }
+ if (!ret) {
+ pr_err("BB_PG Reg Fail!!\n");
+ return false;
+ }
+
+ ret = _rtl92du_phy_config_bb(hw, BASEBAND_CONFIG_AGC_TAB);
+ if (!ret) {
+ pr_err("AGC Table Fail\n");
+ return false;
+ }
+
+ rtlphy->cck_high_power = (bool)rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER2,
+ 0x200);
+
+ return true;
+}
+
+bool rtl92du_phy_bb_config(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ bool rtstatus;
+ u32 regvaldw;
+ u16 regval;
+ u8 value;
+
+ rtl92d_phy_init_bb_rf_register_definition(hw);
+
+ regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
+ regval | BIT(13) | BIT(0) | BIT(1));
+
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
+ rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
+
+ /* 0x1f bit7 bit6 represent for mac0/mac1 driver ready */
+ value = rtl_read_byte(rtlpriv, REG_RF_CTRL);
+ rtl_write_byte(rtlpriv, REG_RF_CTRL, value | RF_EN | RF_RSTB |
+ RF_SDMRSTB);
+
+ value = FEN_BB_GLB_RSTN | FEN_BBRSTB;
+ if (rtlhal->interface == INTF_PCI)
+ value |= FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE;
+ else if (rtlhal->interface == INTF_USB)
+ value |= FEN_USBA | FEN_USBD;
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value);
+
+ regvaldw = rtl_read_dword(rtlpriv, RFPGA0_XCD_RFPARAMETER);
+ regvaldw &= ~BIT(31);
+ rtl_write_dword(rtlpriv, RFPGA0_XCD_RFPARAMETER, regvaldw);
+
+ /* To Fix MAC loopback mode fail. */
+ rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f);
+ rtl_write_byte(rtlpriv, 0x15, 0xe9);
+
+ rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
+ if (!(IS_92D_SINGLEPHY(rtlpriv->rtlhal.version)) &&
+ rtlhal->interface == INTF_PCI) {
+ regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
+ rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
+ }
+
+ rtstatus = _rtl92du_phy_bb_config(hw);
+
+ /* Crystal calibration */
+ rtl_set_bbreg(hw, REG_AFE_XTAL_CTRL, 0xf0,
+ rtlpriv->efuse.crystalcap & 0x0f);
+ rtl_set_bbreg(hw, REG_AFE_PLL_CTRL, 0xf0000000,
+ (rtlpriv->efuse.crystalcap & 0xf0) >> 4);
+
+ return rtstatus;
+}
+
+bool rtl92du_phy_rf_config(struct ieee80211_hw *hw)
+{
+ return rtl92du_phy_rf6052_config(hw);
+}
+
+bool rtl92du_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum rf_content content,
+ enum radio_path rfpath)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u16 radioa_arraylen, radiob_arraylen;
+ const u32 *radioa_array_table;
+ const u32 *radiob_array_table;
+ int i;
+
+ radioa_arraylen = RADIOA_2T_ARRAYLENGTH;
+ radioa_array_table = rtl8192du_radioa_2tarray;
+ radiob_arraylen = RADIOB_2T_ARRAYLENGTH;
+ radiob_array_table = rtl8192du_radiob_2tarray;
+ if (rtlpriv->efuse.internal_pa_5g[0]) {
+ radioa_arraylen = RADIOA_2T_INT_PA_ARRAYLENGTH;
+ radioa_array_table = rtl8192du_radioa_2t_int_paarray;
+ }
+ if (rtlpriv->efuse.internal_pa_5g[1]) {
+ radiob_arraylen = RADIOB_2T_INT_PA_ARRAYLENGTH;
+ radiob_array_table = rtl8192du_radiob_2t_int_paarray;
+ }
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PHY_ConfigRFWithHeaderFile() Radio_A:Rtl819XRadioA_1TArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ "PHY_ConfigRFWithHeaderFile() Radio_B:Rtl819XRadioB_1TArray\n");
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath);
+
+ /* this only happens when DMDP, mac0 start on 2.4G,
+ * mac1 start on 5G, mac 0 has to set phy0 & phy1
+ * pathA or mac1 has to set phy0 & phy1 pathA
+ */
+ if (content == radiob_txt && rfpath == RF90_PATH_A) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD,
+ " ===> althougth Path A, we load radiob.txt\n");
+ radioa_arraylen = radiob_arraylen;
+ radioa_array_table = radiob_array_table;
+ }
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ for (i = 0; i < radioa_arraylen; i = i + 2) {
+ rtl_rfreg_delay(hw, rfpath, radioa_array_table[i],
+ RFREG_OFFSET_MASK,
+ radioa_array_table[i + 1]);
+ }
+ break;
+ case RF90_PATH_B:
+ for (i = 0; i < radiob_arraylen; i = i + 2) {
+ rtl_rfreg_delay(hw, rfpath, radiob_array_table[i],
+ RFREG_OFFSET_MASK,
+ radiob_array_table[i + 1]);
+ }
+ break;
+ case RF90_PATH_C:
+ case RF90_PATH_D:
+ pr_err("switch case %#x not processed\n", rfpath);
+ break;
+ }
+
+ return true;
+}
+
+void rtl92du_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ u8 reg_bw_opmode;
+ u8 reg_prsr_rsc;
+
+ if (rtlphy->set_bwmode_inprogress)
+ return;
+
+ if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
+ rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING,
+ "FALSE driver sleep or unload\n");
+ return;
+ }
+
+ rtlphy->set_bwmode_inprogress = true;
+
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n",
+ rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
+ "20MHz" : "40MHz");
+
+ reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
+ reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
+
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ reg_bw_opmode |= BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ reg_bw_opmode &= ~BW_OPMODE_20MHZ;
+ rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
+
+ reg_prsr_rsc = (reg_prsr_rsc & 0x90) |
+ (mac->cur_40_prime_sc << 5);
+ rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
+ break;
+ default:
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
+ break;
+ }
+
+ switch (rtlphy->current_chan_bw) {
+ case HT_CHANNEL_WIDTH_20:
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
+ /* SET BIT10 BIT11 for receive cck */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10) | BIT(11), 3);
+ break;
+ case HT_CHANNEL_WIDTH_20_40:
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
+ rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
+ /* Set Control channel to upper or lower.
+ * These settings are required only for 40MHz
+ */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCKSIDEBAND,
+ mac->cur_40_prime_sc >> 1);
+ rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
+ /* SET BIT10 BIT11 for receive cck */
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2,
+ BIT(10) | BIT(11), 0);
+ rtl_set_bbreg(hw, 0x818, BIT(26) | BIT(27),
+ mac->cur_40_prime_sc ==
+ HAL_PRIME_CHNL_OFFSET_LOWER ? 2 : 1);
+ break;
+ default:
+ pr_err("unknown bandwidth: %#X\n",
+ rtlphy->current_chan_bw);
+ break;
+ }
+
+ rtl92d_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
+
+ rtlphy->set_bwmode_inprogress = false;
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+}
+
+static void _rtl92du_phy_stop_trx_before_changeband(struct ieee80211_hw *hw)
+{
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BCCKEN | BOFDMEN, 0);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x00);
+ rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x0);
+}
+
+static void rtl92du_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u16 basic_rates;
+ u32 reg_mac;
+ u8 value8;
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
+ rtlhal->bandset = band;
+ rtlhal->current_bandtype = band;
+ if (IS_92D_SINGLEPHY(rtlhal->version))
+ rtlhal->bandset = BAND_ON_BOTH;
+
+ /* stop RX/Tx */
+ _rtl92du_phy_stop_trx_before_changeband(hw);
+
+ /* reconfig BB/RF according to wireless mode */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ /* BB & RF Config */
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "====>2.4G\n");
+ else
+ /* 5G band */
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "====>5G\n");
+
+ if (rtlhal->interfaceindex == 1)
+ _rtl92du_phy_config_bb(hw, BASEBAND_CONFIG_AGC_TAB);
+
+ rtl92du_update_bbrf_configuration(hw);
+
+ basic_rates = RRSR_6M | RRSR_12M | RRSR_24M;
+ if (rtlhal->current_bandtype == BAND_ON_2_4G)
+ basic_rates |= RRSR_1M | RRSR_2M | RRSR_5_5M | RRSR_11M;
+ rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
+ (u8 *)&basic_rates);
+
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BCCKEN | BOFDMEN, 0x3);
+
+ /* 20M BW. */
+ /* rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1); */
+ rtlhal->reloadtxpowerindex = true;
+
+ reg_mac = rtlhal->interfaceindex == 0 ? REG_MAC0 : REG_MAC1;
+
+ /* notice fw know band status 0x81[1]/0x53[1] = 0: 5G, 1: 2G */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ value8 = rtl_read_byte(rtlpriv, reg_mac);
+ value8 |= BIT(1);
+ rtl_write_byte(rtlpriv, reg_mac, value8);
+ } else {
+ value8 = rtl_read_byte(rtlpriv, reg_mac);
+ value8 &= ~BIT(1);
+ rtl_write_byte(rtlpriv, reg_mac, value8);
+ }
+ mdelay(1);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<==Switch Band OK\n");
+}
+
+static void _rtl92du_phy_reload_imr_setting(struct ieee80211_hw *hw,
+ u8 channel, u8 rfpath)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u8 group, i;
+
+ if (rtlusb->udev->speed != USB_SPEED_HIGH)
+ return;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>path %d\n", rfpath);
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD,
+ BOFDMEN | BCCKEN, 0);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf);
+
+ /* fc area 0xd2c */
+ if (channel >= 149)
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(13) |
+ BIT(14), 2);
+ else
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(13) |
+ BIT(14), 1);
+
+ /* leave 0 for channel1-14. */
+ group = channel <= 64 ? 1 : 2;
+ for (i = 0; i < MAX_RF_IMR_INDEX_NORMAL; i++)
+ rtl_set_rfreg(hw, (enum radio_path)rfpath,
+ rf_reg_for_5g_swchnl_normal[i],
+ RFREG_OFFSET_MASK,
+ rf_imr_param_normal[group][i]);
+
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0);
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD,
+ BOFDMEN | BCCKEN, 3);
+ } else {
+ /* G band. */
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Load RF IMR parameters for G band. IMR already setting %d\n",
+ rtlpriv->rtlhal.load_imrandiqk_setting_for2g);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
+
+ if (!rtlpriv->rtlhal.load_imrandiqk_setting_for2g) {
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Load RF IMR parameters for G band. %d\n",
+ rfpath);
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD,
+ BOFDMEN | BCCKEN, 0);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
+ 0x00f00000, 0xf);
+
+ for (i = 0; i < MAX_RF_IMR_INDEX_NORMAL; i++) {
+ rtl_set_rfreg(hw, (enum radio_path)rfpath,
+ rf_reg_for_5g_swchnl_normal[i],
+ RFREG_OFFSET_MASK,
+ rf_imr_param_normal[0][i]);
+ }
+
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4,
+ 0x00f00000, 0);
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD,
+ BOFDMEN | BCCKEN, 3);
+ }
+ }
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+}
+
+static void _rtl92du_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 path = rtlhal->current_bandtype == BAND_ON_5G ? RF90_PATH_A
+ : RF90_PATH_B;
+ u32 u4regvalue, mask = 0x1C000, value = 0, u4tmp, u4tmp2;
+ bool need_pwr_down = false, internal_pa = false;
+ u32 regb30 = rtl_get_bbreg(hw, 0xb30, BIT(27));
+ u8 index = 0, i, rfpath;
+
+ if (rtlusb->udev->speed != USB_SPEED_HIGH)
+ return;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>\n");
+ /* config path A for 5G */
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n");
+ u4tmp = rtlpriv->curveindex_5g[channel - 1];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "ver 1 set RF-A, 5G, 0x28 = 0x%x !!\n", u4tmp);
+
+ for (i = 0; i < RF_CHNL_NUM_5G; i++) {
+ if (channel == rf_chnl_5g[i] && channel <= 140)
+ index = 0;
+ }
+ for (i = 0; i < RF_CHNL_NUM_5G_40M; i++) {
+ if (channel == rf_chnl_5g_40m[i] && channel <= 140)
+ index = 1;
+ }
+ if (channel == 149 || channel == 155 || channel == 161)
+ index = 2;
+ else if (channel == 151 || channel == 153 || channel == 163 ||
+ channel == 165)
+ index = 3;
+ else if (channel == 157 || channel == 159)
+ index = 4;
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ rtlhal->interfaceindex == 1) {
+ need_pwr_down = rtl92du_phy_enable_anotherphy(hw, false);
+ rtlhal->during_mac1init_radioa = true;
+ /* asume no this case */
+ if (need_pwr_down)
+ rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
+ }
+
+ /* DMDP, if band = 5G, Mac0 need to set PHY1 when regB30[27]=1 */
+ if (regb30 && rtlhal->interfaceindex == 0) {
+ need_pwr_down = rtl92du_phy_enable_anotherphy(hw, true);
+ rtlhal->during_mac0init_radiob = true;
+ if (need_pwr_down)
+ rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
+ }
+
+ for (i = 0; i < RF_REG_NUM_FOR_C_CUT_5G; i++) {
+ if (i == 0 && rtlhal->macphymode == DUALMAC_DUALPHY) {
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ RFREG_OFFSET_MASK, 0xE439D);
+ } else if (rf_reg_for_c_cut_5g[i] == RF_SYN_G4) {
+ u4tmp2 = (rf_reg_pram_c_5g[index][i] &
+ 0x7FF) | (u4tmp << 11);
+ if (channel == 36)
+ u4tmp2 &= ~(BIT(7) | BIT(6));
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ RFREG_OFFSET_MASK, u4tmp2);
+ } else {
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ RFREG_OFFSET_MASK,
+ rf_reg_pram_c_5g[index][i]);
+ }
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "offset 0x%x value 0x%x path %d index %d readback 0x%x\n",
+ rf_reg_for_c_cut_5g[i],
+ rf_reg_pram_c_5g[index][i],
+ path, index,
+ rtl_get_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_5g[i],
+ RFREG_OFFSET_MASK));
+ }
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ rtlhal->interfaceindex == 1) {
+ if (need_pwr_down)
+ rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+
+ rtl92du_phy_powerdown_anotherphy(hw, false);
+ }
+
+ if (regb30 && rtlhal->interfaceindex == 0) {
+ if (need_pwr_down)
+ rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+
+ rtl92du_phy_powerdown_anotherphy(hw, true);
+ }
+
+ if (channel < 149)
+ value = 0x07;
+ else if (channel >= 149)
+ value = 0x02;
+ if (channel >= 36 && channel <= 64)
+ index = 0;
+ else if (channel >= 100 && channel <= 140)
+ index = 1;
+ else
+ index = 2;
+
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ rtlhal->interfaceindex == 1) /* MAC 1 5G */
+ internal_pa = rtlpriv->efuse.internal_pa_5g[1];
+ else
+ internal_pa =
+ rtlpriv->efuse.internal_pa_5g[rfpath];
+
+ if (internal_pa) {
+ for (i = 0;
+ i < RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA;
+ i++) {
+ if (rf_for_c_cut_5g_internal_pa[i] == 0x03 &&
+ channel >= 36 && channel <= 64)
+ rtl_set_rfreg(hw, rfpath,
+ rf_for_c_cut_5g_internal_pa[i],
+ RFREG_OFFSET_MASK,
+ 0x7bdef);
+ else
+ rtl_set_rfreg(hw, rfpath,
+ rf_for_c_cut_5g_internal_pa[i],
+ RFREG_OFFSET_MASK,
+ rf_pram_c_5g_int_pa[index][i]);
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD,
+ "offset 0x%x value 0x%x path %d index %d\n",
+ rf_for_c_cut_5g_internal_pa[i],
+ rf_pram_c_5g_int_pa[index][i],
+ rfpath, index);
+ }
+ } else {
+ rtl_set_rfreg(hw, (enum radio_path)rfpath, RF_TXPA_AG,
+ mask, value);
+ }
+ }
+ } else if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n");
+ u4tmp = rtlpriv->curveindex_2g[channel - 1];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", u4tmp);
+
+ if (channel == 1 || channel == 2 || channel == 4 ||
+ channel == 9 || channel == 10 || channel == 11 ||
+ channel == 12)
+ index = 0;
+ else if (channel == 3 || channel == 13 || channel == 14)
+ index = 1;
+ else if (channel >= 5 && channel <= 8)
+ index = 2;
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ path = RF90_PATH_A;
+ if (rtlhal->interfaceindex == 0) {
+ need_pwr_down =
+ rtl92du_phy_enable_anotherphy(hw, true);
+ rtlhal->during_mac0init_radiob = true;
+
+ if (need_pwr_down)
+ rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
+ }
+
+ /* DMDP, if band = 2G, MAC1 need to set PHY0 when regB30[27]=1 */
+ if (regb30 && rtlhal->interfaceindex == 1) {
+ need_pwr_down =
+ rtl92du_phy_enable_anotherphy(hw, false);
+ rtlhal->during_mac1init_radioa = true;
+
+ if (need_pwr_down)
+ rtl92d_phy_enable_rf_env(hw, path,
+ &u4regvalue);
+ }
+ }
+
+ for (i = 0; i < RF_REG_NUM_FOR_C_CUT_2G; i++) {
+ if (rf_reg_for_c_cut_2g[i] == RF_SYN_G7)
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_2g[i],
+ RFREG_OFFSET_MASK,
+ rf_reg_param_for_c_cut_2g[index][i] |
+ BIT(17));
+ else
+ rtl_set_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_2g[i],
+ RFREG_OFFSET_MASK,
+ rf_reg_param_for_c_cut_2g
+ [index][i]);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE,
+ "offset 0x%x value 0x%x mak 0x%x path %d index %d readback 0x%x\n",
+ rf_reg_for_c_cut_2g[i],
+ rf_reg_param_for_c_cut_2g[index][i],
+ rf_reg_mask_for_c_cut_2g[i], path, index,
+ rtl_get_rfreg(hw, (enum radio_path)path,
+ rf_reg_for_c_cut_2g[i],
+ RFREG_OFFSET_MASK));
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
+ rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
+
+ rtl_set_rfreg(hw, (enum radio_path)path, RF_SYN_G4,
+ RFREG_OFFSET_MASK,
+ rf_syn_g4_for_c_cut_2g | (u4tmp << 11));
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ rtlhal->interfaceindex == 0) {
+ if (need_pwr_down)
+ rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+
+ rtl92du_phy_powerdown_anotherphy(hw, true);
+ }
+
+ if (regb30 && rtlhal->interfaceindex == 1) {
+ if (need_pwr_down)
+ rtl92d_phy_restore_rf_env(hw, path, &u4regvalue);
+
+ rtl92du_phy_powerdown_anotherphy(hw, false);
+ }
+ }
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 _rtl92du_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 regeac, rege94, rege9c, regea4;
+ u8 result = 0;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
+
+ if (rtlhal->interfaceindex == 0) {
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x10008c1f);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x10008c1f);
+ } else {
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x10008c22);
+ }
+ rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD,
+ configpathb ? 0x28160202 : 0x28160502);
+ /* path-B IQK setting */
+ if (configpathb) {
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x10008c22);
+ rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82140102);
+ rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x28160206);
+ }
+
+ /* LO calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911);
+
+ /* One shot, path A LOK & IQK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Delay %d ms for One shot, path A LOK & IQK\n",
+ IQK_DELAY_TIME);
+ mdelay(IQK_DELAY_TIME);
+
+ /* Check failed */
+ regeac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
+ rege94 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94);
+ rege9c = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
+ regea4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
+
+ if (!(regeac & BIT(28)) &&
+ (((rege94 & 0x03FF0000) >> 16) != 0x142) &&
+ (((rege9c & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else /* if Tx not OK, ignore Rx */
+ return result;
+
+ /* if Tx is OK, check whether Rx is OK */
+ if (!(regeac & BIT(27)) &&
+ (((regea4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((regeac & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ else
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A Rx IQK fail!!\n");
+
+ return result;
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 _rtl92du_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw,
+ bool configpathb)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 TXOKBIT = BIT(28), RXOKBIT = BIT(27);
+ u32 regeac, rege94, rege9c, regea4;
+ u8 timeout = 20, timecount = 0;
+ u8 retrycount = 2;
+ u8 result = 0;
+ u8 i;
+
+ if (rtlhal->interfaceindex == 1) { /* PHY1 */
+ TXOKBIT = BIT(31);
+ RXOKBIT = BIT(30);
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n");
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82140307);
+ rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x68160960);
+ /* path-B IQK setting */
+ if (configpathb) {
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82110000);
+ rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x68110000);
+ }
+
+ /* LO calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911);
+
+ /* path-A PA on */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x07000f60);
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD, 0x66e60e30);
+
+ for (i = 0; i < retrycount; i++) {
+ /* One shot, path A LOK & IQK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "One shot, path A LOK & IQK!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000);
+ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Delay %d ms for One shot, path A LOK & IQK.\n",
+ IQK_DELAY_TIME);
+ mdelay(IQK_DELAY_TIME * 10);
+
+ while (timecount < timeout &&
+ rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, BIT(26)) == 0) {
+ udelay(IQK_DELAY_TIME * 1000 * 2);
+ timecount++;
+ }
+
+ timecount = 0;
+ while (timecount < timeout &&
+ rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASK_IQK_RESULT) == 0) {
+ udelay(IQK_DELAY_TIME * 1000 * 2);
+ timecount++;
+ }
+
+ /* Check failed */
+ regeac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
+ rege94 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94);
+ rege9c = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c);
+ regea4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4);
+
+ if (!(regeac & TXOKBIT) &&
+ (((rege94 & 0x03FF0000) >> 16) != 0x142)) {
+ result |= 0x01;
+ } else { /* if Tx not OK, ignore Rx */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A Tx IQK fail!!\n");
+ continue;
+ }
+
+ /* if Tx is OK, check whether Rx is OK */
+ if (!(regeac & RXOKBIT) &&
+ (((regea4 & 0x03FF0000) >> 16) != 0x132)) {
+ result |= 0x02;
+ break;
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A Rx IQK fail!!\n");
+ }
+
+ /* path A PA off */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD,
+ rtlphy->iqk_bb_backup[0]);
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD,
+ rtlphy->iqk_bb_backup[1]);
+
+ if (!(result & 0x01)) /* Tx IQK fail */
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x19008c00);
+
+ if (!(result & 0x02)) { /* Rx IQK fail */
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x19008c00);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A Rx IQK fail!! 0xe34 = %#x\n",
+ rtl_get_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD));
+ }
+
+ return result;
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 _rtl92du_phy_pathb_iqk(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 regeac, regeb4, regebc, regec4, regecc;
+ u8 result = 0;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path B LOK & IQK!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_CONT, MASKDWORD, 0x00000002);
+ rtl_set_bbreg(hw, RIQK_AGC_CONT, MASKDWORD, 0x00000000);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Delay %d ms for One shot, path B LOK & IQK\n", IQK_DELAY_TIME);
+ mdelay(IQK_DELAY_TIME);
+
+ /* Check failed */
+ regeac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
+ regeb4 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4);
+ regebc = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc);
+ regec4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4);
+ regecc = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc);
+
+ if (!(regeac & BIT(31)) &&
+ (((regeb4 & 0x03FF0000) >> 16) != 0x142) &&
+ (((regebc & 0x03FF0000) >> 16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+
+ if (!(regeac & BIT(30)) &&
+ (((regec4 & 0x03FF0000) >> 16) != 0x132) &&
+ (((regecc & 0x03FF0000) >> 16) != 0x36))
+ result |= 0x02;
+ else
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B Rx IQK fail!!\n");
+
+ return result;
+}
+
+/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+static u8 _rtl92du_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 regeac, regeb4, regebc, regec4, regecc;
+ u8 timeout = 20, timecount = 0;
+ u8 retrycount = 2;
+ u8 result = 0;
+ u8 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-B IQK setting!\n");
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x18008c1f);
+ rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82110000);
+ rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x68110000);
+
+ /* path-B IQK setting */
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x18008c2f);
+ rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82140307);
+ rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x68160960);
+
+ /* LO calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911);
+
+ /* path-B PA on */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x0f600700);
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD, 0x061f0d30);
+
+ for (i = 0; i < retrycount; i++) {
+ /* One shot, path B LOK & IQK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "One shot, path A LOK & IQK!\n");
+ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xfa000000);
+ rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Delay %d ms for One shot, path B LOK & IQK.\n", 10);
+ mdelay(IQK_DELAY_TIME * 10);
+
+ while (timecount < timeout &&
+ rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, BIT(29)) == 0) {
+ udelay(IQK_DELAY_TIME * 1000 * 2);
+ timecount++;
+ }
+
+ timecount = 0;
+ while (timecount < timeout &&
+ rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2, MASK_IQK_RESULT) == 0) {
+ udelay(IQK_DELAY_TIME * 1000 * 2);
+ timecount++;
+ }
+
+ /* Check failed */
+ regeac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac);
+ regeb4 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4);
+ regebc = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc);
+ regec4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4);
+ regecc = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc);
+
+ if (!(regeac & BIT(31)) &&
+ (((regeb4 & 0x03FF0000) >> 16) != 0x142))
+ result |= 0x01;
+ else
+ continue;
+
+ if (!(regeac & BIT(30)) &&
+ (((regec4 & 0x03FF0000) >> 16) != 0x132)) {
+ result |= 0x02;
+ break;
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B Rx IQK fail!!\n");
+ }
+
+ /* path B PA off */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD,
+ rtlphy->iqk_bb_backup[0]);
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD,
+ rtlphy->iqk_bb_backup[2]);
+
+ if (!(result & 0x01))
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x19008c00);
+
+ if (!(result & 0x02)) {
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x19008c00);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B Rx IQK fail!! 0xe54 = %#x\n",
+ rtl_get_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD));
+ }
+
+ return result;
+}
+
+static void _rtl92du_phy_reload_adda_registers(struct ieee80211_hw *hw,
+ const u32 *adda_reg,
+ u32 *adda_backup, u32 regnum)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Reload ADDA power saving parameters !\n");
+ for (i = 0; i < regnum; i++) {
+ /* path-A/B BB to initial gain */
+ if (adda_reg[i] == ROFDM0_XAAGCCORE1 ||
+ adda_reg[i] == ROFDM0_XBAGCCORE1)
+ rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, 0x50);
+
+ rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, adda_backup[i]);
+ }
+}
+
+static void _rtl92du_phy_reload_mac_registers(struct ieee80211_hw *hw,
+ const u32 *macreg, u32 *macbackup)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Reload MAC parameters !\n");
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
+ rtl_write_byte(rtlpriv, macreg[i], (u8)macbackup[i]);
+ rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
+}
+
+static void _rtl92du_phy_patha_standby(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A standby mode!\n");
+
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x0);
+ rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD, 0x00010000);
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x808000);
+}
+
+static void _rtl92du_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 mode;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "BB Switch to %s mode!\n", pi_mode ? "PI" : "SI");
+ mode = pi_mode ? 0x01000100 : 0x01000000;
+ rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, MASKDWORD, mode);
+ rtl_set_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, MASKDWORD, mode);
+}
+
+static void _rtl92du_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8],
+ u8 t, bool is2t)
+{
+ static const u32 adda_reg[IQK_ADDA_REG_NUM] = {
+ RFPGA0_XCD_SWITCHCONTROL, RBLUE_TOOTH, RRX_WAIT_CCA,
+ RTX_CCK_RFON, RTX_CCK_BBON, RTX_OFDM_RFON, RTX_OFDM_BBON,
+ RTX_TO_RX, RTX_TO_TX, RRX_CCK, RRX_OFDM, RRX_WAIT_RIFS,
+ RRX_TO_RX, RSTANDBY, RSLEEP, RPMPD_ANAEN
+ };
+ static const u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+ REG_TXPAUSE, REG_BCN_CTRL, REG_BCN_CTRL_1, REG_GPIO_MUXCFG
+ };
+ static const u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
+ RFPGA0_XAB_RFINTERFACESW, RFPGA0_XA_RFINTERFACEOE,
+ RFPGA0_XB_RFINTERFACEOE, ROFDM0_TRMUXPAR,
+ RFPGA0_XCD_RFINTERFACESW, ROFDM0_TRXPATHENABLE,
+ RFPGA0_RFMOD, RFPGA0_ANALOGPARAMETER4,
+ ROFDM0_XAAGCCORE1, ROFDM0_XBAGCCORE1
+ };
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ const u32 retrycount = 2;
+ u8 patha_ok, pathb_ok;
+ u32 bbvalue;
+ u32 i;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 2.4G :Start!!!\n");
+ if (t == 0) {
+ bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
+ is2t ? "2T2R" : "1T1R");
+
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl92d_phy_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+ rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ }
+ rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t);
+
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017038);
+
+ if (t == 0)
+ rtlphy->rfpi_enable = (u8)rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER1, BIT(8));
+
+ /* Switch BB to PI mode to do IQ Calibration. */
+ if (!rtlphy->rfpi_enable)
+ _rtl92du_phy_pimode_switch(hw, true);
+
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BCCKEN, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22204000);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
+ if (is2t) {
+ rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD,
+ 0x00010000);
+ rtl_set_bbreg(hw, RFPGA0_XB_LSSIPARAMETER, MASKDWORD,
+ 0x00010000);
+ }
+
+ /* MAC settings */
+ rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+
+ /* Page B init */
+ rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x0f600000);
+ if (is2t)
+ rtl_set_bbreg(hw, RCONFIG_ANTB, MASKDWORD, 0x0f600000);
+
+ /* IQ calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n");
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x808000);
+ rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x01007c00);
+ rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800);
+
+ for (i = 0; i < retrycount; i++) {
+ patha_ok = _rtl92du_phy_patha_iqk(hw, is2t);
+ if (patha_ok == 0x03) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A IQK Success!!\n");
+ result[t][0] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A,
+ MASK_IQK_RESULT);
+ result[t][1] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A,
+ MASK_IQK_RESULT);
+ result[t][2] = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2,
+ MASK_IQK_RESULT);
+ result[t][3] = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2,
+ MASK_IQK_RESULT);
+ break;
+ } else if (i == (retrycount - 1) && patha_ok == 0x01) {
+ /* Tx IQK OK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A IQK Only Tx Success!!\n");
+
+ result[t][0] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A,
+ MASK_IQK_RESULT);
+ result[t][1] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A,
+ MASK_IQK_RESULT);
+ }
+ }
+ if (patha_ok == 0x00)
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK failed!!\n");
+
+ if (is2t) {
+ _rtl92du_phy_patha_standby(hw);
+ /* Turn Path B ADDA on */
+ rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
+
+ for (i = 0; i < retrycount; i++) {
+ pathb_ok = _rtl92du_phy_pathb_iqk(hw);
+ if (pathb_ok == 0x03) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B IQK Success!!\n");
+ result[t][4] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B,
+ MASK_IQK_RESULT);
+ result[t][5] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B,
+ MASK_IQK_RESULT);
+ result[t][6] = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2,
+ MASK_IQK_RESULT);
+ result[t][7] = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2,
+ MASK_IQK_RESULT);
+ break;
+ } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
+ /* Tx IQK OK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B Only Tx IQK Success!!\n");
+ result[t][4] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B,
+ MASK_IQK_RESULT);
+ result[t][5] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B,
+ MASK_IQK_RESULT);
+ }
+ }
+ if (pathb_ok == 0x00)
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B IQK failed!!\n");
+ }
+
+ /* Back to BB mode, load original value */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK:Back to BB mode, load original value!\n");
+
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x000000);
+
+ if (t != 0) {
+ /* Switch back BB to SI mode after finish IQ Calibration. */
+ if (!rtlphy->rfpi_enable)
+ _rtl92du_phy_pimode_switch(hw, false);
+
+ /* Reload ADDA power saving parameters */
+ _rtl92du_phy_reload_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+
+ /* Reload MAC parameters */
+ _rtl92du_phy_reload_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+
+ if (is2t)
+ _rtl92du_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ else
+ _rtl92du_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM - 1);
+
+ /* load 0xe30 IQC default value */
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x01008c00);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x01008c00);
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "<==\n");
+}
+
+static void _rtl92du_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw,
+ long result[][8], u8 t)
+{
+ static const u32 adda_reg[IQK_ADDA_REG_NUM] = {
+ RFPGA0_XCD_SWITCHCONTROL, RBLUE_TOOTH, RRX_WAIT_CCA,
+ RTX_CCK_RFON, RTX_CCK_BBON, RTX_OFDM_RFON, RTX_OFDM_BBON,
+ RTX_TO_RX, RTX_TO_TX, RRX_CCK, RRX_OFDM, RRX_WAIT_RIFS,
+ RRX_TO_RX, RSTANDBY, RSLEEP, RPMPD_ANAEN
+ };
+ static const u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
+ REG_TXPAUSE, REG_BCN_CTRL, REG_BCN_CTRL_1, REG_GPIO_MUXCFG
+ };
+ static const u32 iqk_bb_reg[IQK_BB_REG_NUM] = {
+ RFPGA0_XAB_RFINTERFACESW, RFPGA0_XA_RFINTERFACEOE,
+ RFPGA0_XB_RFINTERFACEOE, ROFDM0_TRMUXPAR,
+ RFPGA0_XCD_RFINTERFACESW, ROFDM0_TRXPATHENABLE,
+ RFPGA0_RFMOD, RFPGA0_ANALOGPARAMETER4,
+ ROFDM0_XAAGCCORE1, ROFDM0_XBAGCCORE1
+ };
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
+ u8 patha_ok, pathb_ok;
+ bool rf_path_div;
+ u32 bbvalue;
+
+ /* Note: IQ calibration must be performed after loading
+ * PHY_REG.txt , and radio_a, radio_b.txt
+ */
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 5G NORMAL:Start!!!\n");
+
+ mdelay(IQK_DELAY_TIME * 20);
+
+ if (t == 0) {
+ bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n",
+ is2t ? "2T2R" : "1T1R");
+
+ /* Save ADDA parameters, turn Path A ADDA on */
+ rtl92d_phy_save_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+ rtl92d_phy_save_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+ if (is2t)
+ rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ else
+ rtl92d_phy_save_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM - 1);
+ }
+
+ rf_path_div = rtl_get_bbreg(hw, 0xb30, BIT(27));
+ rtl92d_phy_path_adda_on(hw, adda_reg, !rf_path_div, is2t);
+
+ if (t == 0)
+ rtlphy->rfpi_enable = rtl_get_bbreg(hw,
+ RFPGA0_XA_HSSIPARAMETER1,
+ BIT(8));
+
+ /* Switch BB to PI mode to do IQ Calibration. */
+ if (!rtlphy->rfpi_enable)
+ _rtl92du_phy_pimode_switch(hw, true);
+
+ /* MAC settings */
+ rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+
+ rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BCCKEN, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600);
+ rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4);
+ rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22208000);
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f);
+
+ /* Page A AP setting for IQK */
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0);
+ rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x20000000);
+ if (is2t) {
+ /* Page B AP setting for IQK */
+ rtl_set_bbreg(hw, RPDP_ANTB, MASKDWORD, 0);
+ rtl_set_bbreg(hw, RCONFIG_ANTB, MASKDWORD, 0x20000000);
+ }
+
+ /* IQ calibration setting */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n");
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x808000);
+ rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x10007c00);
+ rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800);
+
+ patha_ok = _rtl92du_phy_patha_iqk_5g_normal(hw, is2t);
+ if (patha_ok == 0x03) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Success!!\n");
+ result[t][0] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A,
+ MASK_IQK_RESULT);
+ result[t][1] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A,
+ MASK_IQK_RESULT);
+ result[t][2] = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2,
+ MASK_IQK_RESULT);
+ result[t][3] = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2,
+ MASK_IQK_RESULT);
+ } else if (patha_ok == 0x01) { /* Tx IQK OK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A IQK Only Tx Success!!\n");
+
+ result[t][0] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A,
+ MASK_IQK_RESULT);
+ result[t][1] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A,
+ MASK_IQK_RESULT);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x000000);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe70 = %#x\n",
+ rtl_get_bbreg(hw, RRX_WAIT_CCA, MASKDWORD));
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "RF path A 0x0 = %#x\n",
+ rtl_get_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK));
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x808000);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Fail!!\n");
+ }
+
+ if (is2t) {
+ /* _rtl92d_phy_patha_standby(hw); */
+ /* Turn Path B ADDA on */
+ rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t);
+
+ pathb_ok = _rtl92du_phy_pathb_iqk_5g_normal(hw);
+ if (pathb_ok == 0x03) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B IQK Success!!\n");
+ result[t][4] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B,
+ MASK_IQK_RESULT);
+ result[t][5] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B,
+ MASK_IQK_RESULT);
+ result[t][6] = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2,
+ MASK_IQK_RESULT);
+ result[t][7] = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2,
+ MASK_IQK_RESULT);
+ } else if (pathb_ok == 0x01) { /* Tx IQK OK */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B Only Tx IQK Success!!\n");
+ result[t][4] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B,
+ MASK_IQK_RESULT);
+ result[t][5] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B,
+ MASK_IQK_RESULT);
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B IQK failed!!\n");
+ }
+ }
+
+ /* Back to BB mode, load original value */
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK:Back to BB mode, load original value!\n");
+ rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0);
+
+ if (is2t)
+ _rtl92du_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM);
+ else
+ _rtl92du_phy_reload_adda_registers(hw, iqk_bb_reg,
+ rtlphy->iqk_bb_backup,
+ IQK_BB_REG_NUM - 1);
+
+ /* path A IQ path to DP block */
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x010170b8);
+ if (is2t) /* path B IQ path to DP block */
+ rtl_set_bbreg(hw, RPDP_ANTB, MASKDWORD, 0x010170b8);
+
+ /* Reload MAC parameters */
+ _rtl92du_phy_reload_mac_registers(hw, iqk_mac_reg,
+ rtlphy->iqk_mac_backup);
+
+ /* Switch back BB to SI mode after finish IQ Calibration. */
+ if (!rtlphy->rfpi_enable)
+ _rtl92du_phy_pimode_switch(hw, false);
+
+ /* Reload ADDA power saving parameters */
+ _rtl92du_phy_reload_adda_registers(hw, adda_reg,
+ rtlphy->adda_backup,
+ IQK_ADDA_REG_NUM);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "<==\n");
+}
+
+static bool _rtl92du_phy_simularity_compare(struct ieee80211_hw *hw,
+ long result[][8], u8 c1, u8 c2)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u32 i, j, diff, sim_bitmap, bound, u4temp = 0;
+ u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */
+ bool is2t = IS_92D_SINGLEPHY(rtlhal->version);
+ bool bresult = true;
+
+ if (is2t)
+ bound = 8;
+ else
+ bound = 4;
+
+ sim_bitmap = 0;
+
+ for (i = 0; i < bound; i++) {
+ diff = abs_diff(result[c1][i], result[c2][i]);
+
+ if (diff > MAX_TOLERANCE_92D) {
+ if ((i == 2 || i == 6) && !sim_bitmap) {
+ if (result[c1][i] + result[c1][i + 1] == 0)
+ final_candidate[(i / 4)] = c2;
+ else if (result[c2][i] + result[c2][i + 1] == 0)
+ final_candidate[(i / 4)] = c1;
+ else
+ sim_bitmap = sim_bitmap | (1 << i);
+ } else {
+ sim_bitmap = sim_bitmap | (1 << i);
+ }
+ }
+ }
+
+ if (sim_bitmap == 0) {
+ for (i = 0; i < (bound / 4); i++) {
+ if (final_candidate[i] != 0xFF) {
+ for (j = i * 4; j < (i + 1) * 4 - 2; j++)
+ result[3][j] =
+ result[final_candidate[i]][j];
+ bresult = false;
+ }
+ }
+
+ for (i = 0; i < bound; i++)
+ u4temp += result[c1][i] + result[c2][i];
+
+ if (u4temp == 0) /* IQK fail for c1 & c2 */
+ bresult = false;
+
+ return bresult;
+ }
+
+ if (!(sim_bitmap & 0x0F)) { /* path A OK */
+ for (i = 0; i < 4; i++)
+ result[3][i] = result[c1][i];
+ } else if (!(sim_bitmap & 0x03)) { /* path A, Tx OK */
+ for (i = 0; i < 2; i++)
+ result[3][i] = result[c1][i];
+ }
+
+ if (!(sim_bitmap & 0xF0) && is2t) { /* path B OK */
+ for (i = 4; i < 8; i++)
+ result[3][i] = result[c1][i];
+ } else if (!(sim_bitmap & 0x30)) { /* path B, Tx OK */
+ for (i = 4; i < 6; i++)
+ result[3][i] = result[c1][i];
+ }
+
+ return false;
+}
+
+static void _rtl92du_phy_patha_fill_iqk_matrix_5g_normal(struct ieee80211_hw *hw,
+ bool iqk_ok,
+ long result[][8],
+ u8 final_candidate,
+ bool txonly)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u32 val_x, reg;
+ int val_y;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A IQ Calibration %s !\n", iqk_ok ? "Success" : "Failed");
+ if (iqk_ok && final_candidate != 0xFF) {
+ val_x = result[final_candidate][0];
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x\n", val_x);
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, 0x3FF0000, val_x);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0);
+
+ val_y = result[final_candidate][1];
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+
+ /* path B IQK result + 3 */
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ val_y += 3;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%x\n", val_y);
+
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, 0x3FF, val_y);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(26), 0);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe30 = 0x%x\n",
+ rtl_get_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD));
+
+ if (txonly) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n");
+ return;
+ }
+
+ reg = result[final_candidate][2];
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][3] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ rtl_set_bbreg(hw, ROFDM0_RXIQEXTANTA, 0xF0000000, reg);
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "%s: Tx/Rx fail restore default value\n", __func__);
+
+ rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x19008c00);
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x19008c00);
+ }
+}
+
+static void _rtl92du_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok, long result[][8],
+ u8 final_candidate, bool txonly)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u32 oldval_0, val_x, tx0_a, reg;
+ long val_y, tx0_c;
+ bool is2t = IS_92D_SINGLEPHY(rtlhal->version) ||
+ rtlhal->macphymode == DUALMAC_DUALPHY;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ _rtl92du_phy_patha_fill_iqk_matrix_5g_normal(hw, iqk_ok, result,
+ final_candidate,
+ txonly);
+ return;
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path A IQ Calibration %s !\n", iqk_ok ? "Success" : "Failed");
+ if (final_candidate == 0xFF || !iqk_ok)
+ return;
+
+ /* OFDM0_D */
+ oldval_0 = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0xffc00000);
+
+ val_x = result[final_candidate][0];
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+
+ tx0_a = (val_x * oldval_0) >> 8;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "X = 0x%x, tx0_a = 0x%x, oldval_0 0x%x\n",
+ val_x, tx0_a, oldval_0);
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24),
+ ((val_x * oldval_0 >> 7) & 0x1));
+
+ val_y = result[final_candidate][1];
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+
+ /* path B IQK result + 3 */
+ if (rtlhal->interfaceindex == 1 &&
+ rtlhal->current_bandtype == BAND_ON_5G)
+ val_y += 3;
+
+ tx0_c = (val_y * oldval_0) >> 8;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Y = 0x%lx, tx0_c = 0x%lx\n",
+ val_y, tx0_c);
+
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, (tx0_c & 0x3C0) >> 6);
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000, tx0_c & 0x3F);
+ if (is2t)
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(26),
+ (val_y * oldval_0 >> 7) & 0x1);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xC80 = 0x%x\n",
+ rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
+ MASKDWORD));
+
+ if (txonly) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n");
+ return;
+ }
+
+ reg = result[final_candidate][2];
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][3] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ rtl_set_bbreg(hw, ROFDM0_RXIQEXTANTA, 0xF0000000, reg);
+}
+
+static void _rtl92du_phy_pathb_fill_iqk_matrix_5g_normal(struct ieee80211_hw *hw,
+ bool iqk_ok,
+ long result[][8],
+ u8 final_candidate,
+ bool txonly)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u32 val_x, reg;
+ int val_y;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "Path B IQ Calibration %s !\n", iqk_ok ? "Success" : "Failed");
+ if (iqk_ok && final_candidate != 0xFF) {
+ val_x = result[final_candidate][4];
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x\n", val_x);
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, 0x3FF0000, val_x);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), 0);
+
+ val_y = result[final_candidate][5];
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+
+ /* path B IQK result + 3 */
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ val_y += 3;
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%x\n", val_y);
+
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, 0x3FF, val_y);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30), 0);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe50 = 0x%x\n",
+ rtl_get_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD));
+
+ if (txonly) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n");
+ return;
+ }
+
+ reg = result[final_candidate][6];
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][7] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][7] >> 6) & 0xF;
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "%s: Tx/Rx fail restore default value\n", __func__);
+
+ rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x19008c00);
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x19008c00);
+ }
+}
+
+static void _rtl92du_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw,
+ bool iqk_ok, long result[][8],
+ u8 final_candidate, bool txonly)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u32 oldval_1, val_x, tx1_a, reg;
+ long val_y, tx1_c;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ _rtl92du_phy_pathb_fill_iqk_matrix_5g_normal(hw, iqk_ok, result,
+ final_candidate,
+ txonly);
+ return;
+ }
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQ Calibration %s !\n",
+ iqk_ok ? "Success" : "Failed");
+
+ if (final_candidate == 0xFF || !iqk_ok)
+ return;
+
+ oldval_1 = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0xffc00000);
+
+ val_x = result[final_candidate][4];
+ if ((val_x & 0x00000200) != 0)
+ val_x = val_x | 0xFFFFFC00;
+
+ tx1_a = (val_x * oldval_1) >> 8;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x, tx1_a = 0x%x\n",
+ val_x, tx1_a);
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28),
+ (val_x * oldval_1 >> 7) & 0x1);
+
+ val_y = result[final_candidate][5];
+ if ((val_y & 0x00000200) != 0)
+ val_y = val_y | 0xFFFFFC00;
+
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ val_y += 3;
+
+ tx1_c = (val_y * oldval_1) >> 8;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%lx, tx1_c = 0x%lx\n",
+ val_y, tx1_c);
+
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, (tx1_c & 0x3C0) >> 6);
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000, tx1_c & 0x3F);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30),
+ (val_y * oldval_1 >> 7) & 0x1);
+
+ if (txonly)
+ return;
+
+ reg = result[final_candidate][6];
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
+ reg = result[final_candidate][7] & 0x3F;
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
+ reg = (result[final_candidate][7] >> 6) & 0xF;
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
+}
+
+void rtl92du_phy_iq_calibrate(struct ieee80211_hw *hw)
+{
+ long rege94, rege9c, regea4, regeac, regeb4;
+ bool is12simular, is13simular, is23simular;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ long regebc, regec4, regecc, regtmp = 0;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 i, final_candidate, indexforchannel;
+ bool patha_ok, pathb_ok;
+ long result[4][8] = {};
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK:Start!!!channel %d\n", rtlphy->current_channel);
+
+ final_candidate = 0xff;
+ patha_ok = false;
+ pathb_ok = false;
+ is12simular = false;
+ is23simular = false;
+ is13simular = false;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK !!!currentband %d\n", rtlhal->current_bandtype);
+
+ for (i = 0; i < 3; i++) {
+ if (rtlhal->current_bandtype == BAND_ON_5G) {
+ _rtl92du_phy_iq_calibrate_5g_normal(hw, result, i);
+ } else if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ if (IS_92D_SINGLEPHY(rtlhal->version))
+ _rtl92du_phy_iq_calibrate(hw, result, i, true);
+ else
+ _rtl92du_phy_iq_calibrate(hw, result, i, false);
+ }
+
+ if (i == 1) {
+ is12simular = _rtl92du_phy_simularity_compare(hw, result,
+ 0, 1);
+ if (is12simular) {
+ final_candidate = 0;
+ break;
+ }
+ }
+
+ if (i == 2) {
+ is13simular = _rtl92du_phy_simularity_compare(hw, result,
+ 0, 2);
+ if (is13simular) {
+ final_candidate = 0;
+ break;
+ }
+
+ is23simular = _rtl92du_phy_simularity_compare(hw, result,
+ 1, 2);
+ if (is23simular) {
+ final_candidate = 1;
+ } else {
+ for (i = 0; i < 8; i++)
+ regtmp += result[3][i];
+
+ if (regtmp != 0)
+ final_candidate = 3;
+ else
+ final_candidate = 0xFF;
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ rege94 = result[i][0];
+ rege9c = result[i][1];
+ regea4 = result[i][2];
+ regeac = result[i][3];
+ regeb4 = result[i][4];
+ regebc = result[i][5];
+ regec4 = result[i][6];
+ regecc = result[i][7];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK: rege94=%lx rege9c=%lx regea4=%lx regeac=%lx regeb4=%lx regebc=%lx regec4=%lx regecc=%lx\n",
+ rege94, rege9c, regea4, regeac, regeb4, regebc, regec4,
+ regecc);
+ }
+
+ if (final_candidate != 0xff) {
+ rege94 = result[final_candidate][0];
+ rtlphy->reg_e94 = rege94;
+ rege9c = result[final_candidate][1];
+ rtlphy->reg_e9c = rege9c;
+ regea4 = result[final_candidate][2];
+ regeac = result[final_candidate][3];
+ regeb4 = result[final_candidate][4];
+ rtlphy->reg_eb4 = regeb4;
+ regebc = result[final_candidate][5];
+ rtlphy->reg_ebc = regebc;
+ regec4 = result[final_candidate][6];
+ regecc = result[final_candidate][7];
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK: final_candidate is %x\n", final_candidate);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "IQK: rege94=%lx rege9c=%lx regea4=%lx regeac=%lx regeb4=%lx regebc=%lx regec4=%lx regecc=%lx\n",
+ rege94, rege9c, regea4, regeac, regeb4, regebc, regec4,
+ regecc);
+
+ patha_ok = true;
+ pathb_ok = true;
+ } else {
+ rtlphy->reg_e94 = 0x100;
+ rtlphy->reg_eb4 = 0x100; /* X default value */
+ rtlphy->reg_e9c = 0x0;
+ rtlphy->reg_ebc = 0x0; /* Y default value */
+ }
+ if (rege94 != 0 /*&& regea4 != 0*/)
+ _rtl92du_phy_patha_fill_iqk_matrix(hw, patha_ok, result,
+ final_candidate,
+ regea4 == 0);
+ if (IS_92D_SINGLEPHY(rtlhal->version) &&
+ regeb4 != 0 /*&& regec4 != 0*/)
+ _rtl92du_phy_pathb_fill_iqk_matrix(hw, pathb_ok, result,
+ final_candidate,
+ regec4 == 0);
+
+ if (final_candidate != 0xFF) {
+ indexforchannel =
+ rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel);
+
+ for (i = 0; i < IQK_MATRIX_REG_NUM; i++)
+ rtlphy->iqk_matrix[indexforchannel].value[0][i] =
+ result[final_candidate][i];
+
+ rtlphy->iqk_matrix[indexforchannel].iqk_done = true;
+
+ rtl_dbg(rtlpriv, COMP_SCAN | COMP_MLME, DBG_LOUD,
+ "IQK OK indexforchannel %d\n", indexforchannel);
+ }
+}
+
+void rtl92du_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ u8 indexforchannel;
+ bool need_iqk;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "channel %d\n", channel);
+ /*------Do IQK for normal chip and test chip 5G band------- */
+
+ indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel);
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "indexforchannel %d done %d\n",
+ indexforchannel,
+ rtlphy->iqk_matrix[indexforchannel].iqk_done);
+
+ /* We need to do IQK if we're about to connect to a network on 5 GHz.
+ * On 5 GHz a channel switch outside of scanning happens only before
+ * connecting.
+ */
+ need_iqk = !mac->act_scanning;
+
+ if (!rtlphy->iqk_matrix[indexforchannel].iqk_done && need_iqk) {
+ rtl_dbg(rtlpriv, COMP_SCAN | COMP_INIT, DBG_LOUD,
+ "Do IQK Matrix reg for channel:%d....\n", channel);
+ rtl92du_phy_iq_calibrate(hw);
+ return;
+ }
+
+ /* Just load the value. */
+ /* 2G band just load once. */
+ if ((!rtlhal->load_imrandiqk_setting_for2g && indexforchannel == 0) ||
+ indexforchannel > 0) {
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD,
+ "Just Read IQK Matrix reg for channel:%d....\n",
+ channel);
+
+ if (rtlphy->iqk_matrix[indexforchannel].value[0][0] != 0)
+ _rtl92du_phy_patha_fill_iqk_matrix(hw, true,
+ rtlphy->iqk_matrix[indexforchannel].value, 0,
+ rtlphy->iqk_matrix[indexforchannel].value[0][2] == 0);
+
+ if (IS_92D_SINGLEPHY(rtlhal->version) &&
+ rtlphy->iqk_matrix[indexforchannel].value[0][4] != 0)
+ _rtl92du_phy_pathb_fill_iqk_matrix(hw, true,
+ rtlphy->iqk_matrix[indexforchannel].value, 0,
+ rtlphy->iqk_matrix[indexforchannel].value[0][6] == 0);
+ }
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+}
+
+static void _rtl92du_phy_reload_lck_setting(struct ieee80211_hw *hw, u8 channel)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u8 erfpath = rtlhal->current_bandtype == BAND_ON_5G ? RF90_PATH_A :
+ IS_92D_SINGLEPHY(rtlhal->version) ? RF90_PATH_B : RF90_PATH_A;
+ bool bneed_powerdown_radio = false;
+ u32 u4tmp, u4regvalue;
+
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "path %d\n", erfpath);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "band type = %d\n",
+ rtlpriv->rtlhal.current_bandtype);
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "channel = %d\n", channel);
+
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {/* Path-A for 5G */
+ u4tmp = rtlpriv->curveindex_5g[channel - 1];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "ver 1 set RF-A, 5G, 0x28 = 0x%x !!\n", u4tmp);
+
+ if (rtlpriv->rtlhal.macphymode == DUALMAC_DUALPHY &&
+ rtlpriv->rtlhal.interfaceindex == 1) {
+ bneed_powerdown_radio =
+ rtl92du_phy_enable_anotherphy(hw, false);
+ rtlpriv->rtlhal.during_mac1init_radioa = true;
+ /* asume no this case */
+ if (bneed_powerdown_radio)
+ rtl92d_phy_enable_rf_env(hw, erfpath,
+ &u4regvalue);
+ }
+
+ rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp);
+
+ if (bneed_powerdown_radio) {
+ rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
+ rtl92du_phy_powerdown_anotherphy(hw, false);
+ }
+ } else if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) {
+ u4tmp = rtlpriv->curveindex_2g[channel - 1];
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", u4tmp);
+
+ if (rtlpriv->rtlhal.macphymode == DUALMAC_DUALPHY &&
+ rtlpriv->rtlhal.interfaceindex == 0) {
+ bneed_powerdown_radio =
+ rtl92du_phy_enable_anotherphy(hw, true);
+ rtlpriv->rtlhal.during_mac0init_radiob = true;
+ if (bneed_powerdown_radio)
+ rtl92d_phy_enable_rf_env(hw, erfpath,
+ &u4regvalue);
+ }
+
+ rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n",
+ rtl_get_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800));
+
+ if (bneed_powerdown_radio) {
+ rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue);
+ rtl92du_phy_powerdown_anotherphy(hw, true);
+ }
+ }
+ rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n");
+}
+
+static void _rtl92du_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u32 curvecount_val[CV_CURVE_CNT * 2];
+ u16 timeout = 800, timecount = 0;
+ u32 u4tmp, offset, rf_syn_g4[2];
+ u8 tmpreg, index, rf_mode[2];
+ u8 path = is2t ? 2 : 1;
+ u8 i;
+
+ /* Check continuous TX and Packet TX */
+ tmpreg = rtl_read_byte(rtlpriv, 0xd03);
+ if ((tmpreg & 0x70) != 0)
+ /* if Deal with contisuous TX case, disable all continuous TX */
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
+ else
+ /* if Deal with Packet TX case, block all queues */
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xF00000, 0x0F);
+
+ for (index = 0; index < path; index++) {
+ /* 1. Read original RF mode */
+ offset = index == 0 ? ROFDM0_XAAGCCORE1 : ROFDM0_XBAGCCORE1;
+ rf_mode[index] = rtl_read_byte(rtlpriv, offset);
+
+ /* 2. Set RF mode = standby mode */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_AC,
+ RFREG_OFFSET_MASK, 0x010000);
+
+ rf_syn_g4[index] = rtl_get_rfreg(hw, index, RF_SYN_G4,
+ RFREG_OFFSET_MASK);
+ rtl_set_rfreg(hw, index, RF_SYN_G4, 0x700, 0x7);
+
+ /* switch CV-curve control by LC-calibration */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7,
+ BIT(17), 0x0);
+
+ /* 4. Set LC calibration begin */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_CHNLBW,
+ 0x08000, 0x01);
+ }
+
+ for (index = 0; index < path; index++) {
+ u4tmp = rtl_get_rfreg(hw, (enum radio_path)index, RF_SYN_G6,
+ RFREG_OFFSET_MASK);
+
+ while ((!(u4tmp & BIT(11))) && timecount <= timeout) {
+ mdelay(50);
+ timecount += 50;
+ u4tmp = rtl_get_rfreg(hw, (enum radio_path)index,
+ RF_SYN_G6, RFREG_OFFSET_MASK);
+ }
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "PHY_LCK finish delay for %d ms=2\n", timecount);
+ }
+
+ if ((tmpreg & 0x70) != 0)
+ rtl_write_byte(rtlpriv, 0xd03, tmpreg);
+ else /* Deal with Packet TX case */
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+
+ rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xF00000, 0x00);
+
+ for (index = 0; index < path; index++) {
+ rtl_get_rfreg(hw, index, RF_SYN_G4, RFREG_OFFSET_MASK);
+
+ if (index == 0 && rtlhal->interfaceindex == 0) {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "path-A / 5G LCK\n");
+ } else {
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "path-B / 2.4G LCK\n");
+ }
+
+ memset(curvecount_val, 0, sizeof(curvecount_val));
+
+ /* Set LC calibration off */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_CHNLBW,
+ 0x08000, 0x0);
+
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "set RF 0x18[15] = 0\n");
+
+ /* save Curve-counting number */
+ for (i = 0; i < CV_CURVE_CNT; i++) {
+ u32 readval = 0, readval2 = 0;
+
+ rtl_set_rfreg(hw, (enum radio_path)index, 0x3F,
+ 0x7f, i);
+
+ rtl_set_rfreg(hw, (enum radio_path)index, 0x4D,
+ RFREG_OFFSET_MASK, 0x0);
+
+ readval = rtl_get_rfreg(hw, (enum radio_path)index,
+ 0x4F, RFREG_OFFSET_MASK);
+ curvecount_val[2 * i + 1] = (readval & 0xfffe0) >> 5;
+
+ /* reg 0x4f [4:0] */
+ /* reg 0x50 [19:10] */
+ readval2 = rtl_get_rfreg(hw, (enum radio_path)index,
+ 0x50, 0xffc00);
+ curvecount_val[2 * i] = (((readval & 0x1F) << 10) |
+ readval2);
+ }
+
+ if (index == 0 && rtlhal->interfaceindex == 0)
+ rtl92d_phy_calc_curvindex(hw, targetchnl_5g,
+ curvecount_val,
+ true, rtlpriv->curveindex_5g);
+ else
+ rtl92d_phy_calc_curvindex(hw, targetchnl_2g,
+ curvecount_val,
+ false, rtlpriv->curveindex_2g);
+
+ /* switch CV-curve control mode */
+ rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7,
+ BIT(17), 0x1);
+ }
+
+ /* Restore original situation */
+ for (index = 0; index < path; index++) {
+ rtl_set_rfreg(hw, index, RF_SYN_G4, RFREG_OFFSET_MASK,
+ rf_syn_g4[index]);
+
+ offset = index == 0 ? ROFDM0_XAAGCCORE1 : ROFDM0_XBAGCCORE1;
+ rtl_write_byte(rtlpriv, offset, 0x50);
+ rtl_write_byte(rtlpriv, offset, rf_mode[index]);
+ }
+
+ _rtl92du_phy_reload_lck_setting(hw, rtlpriv->phy.current_channel);
+}
+
+void rtl92du_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u32 timeout = 2000, timecount = 0;
+
+ while (rtlpriv->mac80211.act_scanning && timecount < timeout) {
+ udelay(50);
+ timecount += 50;
+ }
+
+ rtlphy->lck_inprogress = true;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK,
+ "LCK:Start!!! currentband %x delay %d ms\n",
+ rtlhal->current_bandtype, timecount);
+
+ _rtl92du_phy_lc_calibrate_sw(hw, is2t);
+
+ rtlphy->lck_inprogress = false;
+ RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n");
+}
+
+void rtl92du_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta)
+{
+ /* Nothing to do. */
+}
+
+u8 rtl92du_phy_sw_chnl(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 num_total_rfpath = rtlphy->num_total_rfpath;
+ u8 channel = rtlphy->current_channel;
+ u32 timeout = 1000, timecount = 0;
+ u32 ret_value;
+ u8 rfpath;
+
+ if (rtlphy->sw_chnl_inprogress)
+ return 0;
+ if (rtlphy->set_bwmode_inprogress)
+ return 0;
+
+ if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) {
+ rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD,
+ "sw_chnl_inprogress false driver sleep or unload\n");
+ return 0;
+ }
+
+ while (rtlphy->lck_inprogress && timecount < timeout) {
+ mdelay(50);
+ timecount += 50;
+ }
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY &&
+ rtlhal->bandset == BAND_ON_BOTH) {
+ ret_value = rtl_get_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
+ MASKDWORD);
+ if (rtlphy->current_channel > 14 && !(ret_value & BIT(0)))
+ rtl92du_phy_switch_wirelessband(hw, BAND_ON_5G);
+ else if (rtlphy->current_channel <= 14 && (ret_value & BIT(0)))
+ rtl92du_phy_switch_wirelessband(hw, BAND_ON_2_4G);
+ }
+
+ switch (rtlhal->current_bandtype) {
+ case BAND_ON_5G:
+ /* Get first channel error when change between
+ * 5G and 2.4G band.
+ */
+ if (WARN_ONCE(channel <= 14, "rtl8192du: 5G but channel<=14\n"))
+ return 0;
+ break;
+ case BAND_ON_2_4G:
+ /* Get first channel error when change between
+ * 5G and 2.4G band.
+ */
+ if (WARN_ONCE(channel > 14, "rtl8192du: 2G but channel>14\n"))
+ return 0;
+ break;
+ default:
+ WARN_ONCE(true, "rtl8192du: Invalid WirelessMode(%#x)!!\n",
+ rtlpriv->mac80211.mode);
+ break;
+ }
+
+ rtlphy->sw_chnl_inprogress = true;
+
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE,
+ "switch to channel%d\n", rtlphy->current_channel);
+
+ rtl92d_phy_set_txpower_level(hw, channel);
+
+ for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
+ u32p_replace_bits(&rtlphy->rfreg_chnlval[rfpath],
+ channel, 0xff);
+
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {
+ if (channel > 99)
+ rtlphy->rfreg_chnlval[rfpath] |= (BIT(18));
+ else
+ rtlphy->rfreg_chnlval[rfpath] &= ~BIT(18);
+ rtlphy->rfreg_chnlval[rfpath] |= (BIT(16) | BIT(8));
+ } else {
+ rtlphy->rfreg_chnlval[rfpath] &=
+ ~(BIT(8) | BIT(16) | BIT(18));
+ }
+ rtl_set_rfreg(hw, rfpath, RF_CHNLBW, RFREG_OFFSET_MASK,
+ rtlphy->rfreg_chnlval[rfpath]);
+
+ _rtl92du_phy_reload_imr_setting(hw, channel, rfpath);
+ }
+
+ _rtl92du_phy_switch_rf_setting(hw, channel);
+
+ /* do IQK when all parameters are ready */
+ rtl92du_phy_reload_iqk_setting(hw, channel);
+
+ rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n");
+ rtlphy->sw_chnl_inprogress = false;
+ return 1;
+}
+
+static void _rtl92du_phy_set_rfon(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ /* a. SYS_CLKR 0x08[11] = 1 restore MAC clock */
+ /* b. SPS_CTRL 0x11[7:0] = 0x2b */
+ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY)
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
+
+ /* c. For PCIE: SYS_FUNC_EN 0x02[7:0] = 0xE3 enable BB TRX function */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+
+ /* RF_ON_EXCEP(d~g): */
+ /* d. APSD_CTRL 0x600[7:0] = 0x00 */
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+
+ /* e. SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB TRX function again */
+ /* f. SYS_FUNC_EN 0x02[7:0] = 0xE3 enable BB TRX function*/
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+
+ /* g. txpause 0x522[7:0] = 0x00 enable mac tx queue */
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+}
+
+static void _rtl92du_phy_set_rfsleep(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 u4btmp;
+ u8 retry = 5;
+
+ /* a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue */
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
+
+ /* b. RF path 0 offset 0x00 = 0x00 disable RF */
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+
+ /* c. APSD_CTRL 0x600[7:0] = 0x40 */
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+
+ /* d. APSD_CTRL 0x600[7:0] = 0x00
+ * APSD_CTRL 0x600[7:0] = 0x00
+ * RF path 0 offset 0x00 = 0x00
+ * APSD_CTRL 0x600[7:0] = 0x40
+ */
+ u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+ while (u4btmp != 0 && retry > 0) {
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
+ rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
+ u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
+ retry--;
+ }
+ if (retry == 0) {
+ /* Jump out the LPS turn off sequence to RF_ON_EXCEP */
+ rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
+
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
+ rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD,
+ "Fail !!! Switch RF timeout\n");
+ return;
+ }
+
+ /* e. For PCIE: SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB TRX function */
+ rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
+
+ /* f. SPS_CTRL 0x11[7:0] = 0x22 */
+ if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY)
+ rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
+}
+
+bool rtl92du_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ bool bresult = true;
+
+ if (rfpwr_state == ppsc->rfpwr_state)
+ return false;
+
+ switch (rfpwr_state) {
+ case ERFON:
+ if (ppsc->rfpwr_state == ERFOFF &&
+ RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
+ u32 initializecount = 0;
+ bool rtstatus;
+
+ do {
+ initializecount++;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic enable\n");
+ rtstatus = rtl_ps_enable_nic(hw);
+ } while (!rtstatus && (initializecount < 10));
+
+ RT_CLEAR_PS_LEVEL(ppsc,
+ RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "awake, slept:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_sleep_jiffies),
+ rtlpriv->psc.state_inap);
+ ppsc->last_awake_jiffies = jiffies;
+ _rtl92du_phy_set_rfon(hw);
+ }
+
+ if (mac->link_state == MAC80211_LINKED)
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK);
+ else
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK);
+ break;
+ case ERFOFF:
+ if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
+ rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG,
+ "IPS Set eRf nic disable\n");
+ rtl_ps_disable_nic(hw);
+ RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
+ } else {
+ if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK);
+ else
+ rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
+ }
+ break;
+ case ERFSLEEP:
+ if (ppsc->rfpwr_state == ERFOFF)
+ return false;
+
+ rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG,
+ "sleep awakened:%d ms state_inap:%x\n",
+ jiffies_to_msecs(jiffies -
+ ppsc->last_awake_jiffies),
+ rtlpriv->psc.state_inap);
+ ppsc->last_sleep_jiffies = jiffies;
+ _rtl92du_phy_set_rfsleep(hw);
+ break;
+ default:
+ pr_err("switch case %#x not processed\n",
+ rfpwr_state);
+ return false;
+ }
+
+ if (bresult)
+ ppsc->rfpwr_state = rfpwr_state;
+
+ return bresult;
+}
+
+void rtl92du_phy_set_poweron(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u32 mac_reg = (rtlhal->interfaceindex == 0 ? REG_MAC0 : REG_MAC1);
+ u8 value8;
+ u16 i;
+
+ /* notice fw know band status 0x81[1]/0x53[1] = 0: 5G, 1: 2G */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ value8 = rtl_read_byte(rtlpriv, mac_reg);
+ value8 |= BIT(1);
+ rtl_write_byte(rtlpriv, mac_reg, value8);
+ } else {
+ value8 = rtl_read_byte(rtlpriv, mac_reg);
+ value8 &= ~BIT(1);
+ rtl_write_byte(rtlpriv, mac_reg, value8);
+ }
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY) {
+ value8 = rtl_read_byte(rtlpriv, REG_MAC0);
+ rtl_write_byte(rtlpriv, REG_MAC0, value8 | MAC0_ON);
+ } else {
+ mutex_lock(rtlpriv->mutex_for_power_on_off);
+ if (rtlhal->interfaceindex == 0) {
+ value8 = rtl_read_byte(rtlpriv, REG_MAC0);
+ rtl_write_byte(rtlpriv, REG_MAC0, value8 | MAC0_ON);
+ } else {
+ value8 = rtl_read_byte(rtlpriv, REG_MAC1);
+ rtl_write_byte(rtlpriv, REG_MAC1, value8 | MAC1_ON);
+ }
+ value8 = rtl_read_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS);
+ mutex_unlock(rtlpriv->mutex_for_power_on_off);
+
+ for (i = 0; i < 200; i++) {
+ if ((value8 & BIT(7)) == 0)
+ break;
+
+ udelay(500);
+ mutex_lock(rtlpriv->mutex_for_power_on_off);
+ value8 = rtl_read_byte(rtlpriv,
+ REG_POWER_OFF_IN_PROCESS);
+ mutex_unlock(rtlpriv->mutex_for_power_on_off);
+ }
+ if (i == 200)
+ WARN_ONCE(true, "rtl8192du: Another mac power off over time\n");
+ }
+}
+
+void rtl92du_update_bbrf_configuration(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ u8 rfpath, i;
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n");
+ /* r_select_5G for path_A/B 0 for 2.4G, 1 for 5G */
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* r_select_5G for path_A/B, 0x878 */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(0), 0x0);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), 0x0);
+ if (rtlhal->macphymode != DUALMAC_DUALPHY) {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(16), 0x0);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(31), 0x0);
+ }
+
+ /* rssi_table_select: index 0 for 2.4G. 1~3 for 5G, 0xc78 */
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, BIT(6) | BIT(7), 0x0);
+
+ /* fc_area 0xd2c */
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(14) | BIT(13), 0x0);
+
+ /* 5G LAN ON */
+ rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0xa);
+
+ /* TX BB gain shift*1, Just for testchip, 0xc80, 0xc88 */
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, 0x40000100);
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
+ BIT(10) | BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(3)) >> 3) |
+ (rtlefuse->eeprom_c9 & BIT(1)) |
+ ((rtlefuse->eeprom_cc & BIT(1)) << 4));
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
+ BIT(10) | BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(2)) >> 2) |
+ ((rtlefuse->eeprom_c9 & BIT(0)) << 1) |
+ ((rtlefuse->eeprom_cc & BIT(0)) << 5));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), 0);
+
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017038);
+ rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x0f600000);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
+ BIT(26) | BIT(22) | BIT(21) | BIT(10) |
+ BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(3)) >> 3) |
+ (rtlefuse->eeprom_c9 & BIT(1)) |
+ ((rtlefuse->eeprom_cc & BIT(1)) << 4) |
+ ((rtlefuse->eeprom_c9 & BIT(7)) << 9) |
+ ((rtlefuse->eeprom_c9 & BIT(5)) << 12) |
+ ((rtlefuse->eeprom_cc & BIT(3)) << 18));
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE,
+ BIT(10) | BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(2)) >> 2) |
+ ((rtlefuse->eeprom_c9 & BIT(0)) << 1) |
+ ((rtlefuse->eeprom_cc & BIT(0)) << 5));
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
+ BIT(10) | BIT(6) | BIT(5),
+ ((rtlefuse->eeprom_c9 & BIT(6)) >> 6) |
+ ((rtlefuse->eeprom_c9 & BIT(4)) >> 3) |
+ ((rtlefuse->eeprom_cc & BIT(2)) << 3));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
+ BIT(31) | BIT(15), 0);
+
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017038);
+ rtl_set_bbreg(hw, RPDP_ANTB, MASKDWORD, 0x01017038);
+ rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x0f600000);
+ rtl_set_bbreg(hw, RCONFIG_ANTB, MASKDWORD, 0x0f600000);
+ }
+ /* 1.5V_LDO */
+ } else {
+ /* r_select_5G for path_A/B */
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(0), 0x1);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), 0x1);
+ if (rtlhal->macphymode != DUALMAC_DUALPHY) {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(16), 0x1);
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(31), 0x1);
+ }
+
+ /* rssi_table_select: index 0 for 2.4G. 1~3 for 5G */
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, BIT(6) | BIT(7), 0x1);
+
+ /* fc_area */
+ rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(14) | BIT(13), 0x1);
+
+ /* 5G LAN ON */
+ rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0x0);
+
+ /* TX BB gain shift, Just for testchip, 0xc80, 0xc88 */
+ if (rtlefuse->internal_pa_5g[rtlhal->interfaceindex])
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
+ 0x2d4000b5);
+ else
+ rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD,
+ 0x20000080);
+
+ if (rtlhal->macphymode != DUALMAC_DUALPHY) {
+ if (rtlefuse->internal_pa_5g[1])
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD, 0x2d4000b5);
+ else
+ rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
+ MASKDWORD, 0x20000080);
+ }
+
+ rtl_set_bbreg(hw, 0xB30, BIT(27), 0);
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
+ BIT(10) | BIT(6) | BIT(5),
+ (rtlefuse->eeprom_cc & BIT(5)));
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(10),
+ ((rtlefuse->eeprom_cc & BIT(4)) >> 4));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15),
+ (rtlefuse->eeprom_cc & BIT(4)) >> 4);
+
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017098);
+ rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x20000000);
+ } else {
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW,
+ BIT(26) | BIT(22) | BIT(21) | BIT(10) |
+ BIT(6) | BIT(5),
+ (rtlefuse->eeprom_cc & BIT(5)) |
+ ((rtlefuse->eeprom_cc & BIT(7)) << 14));
+ rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(10),
+ ((rtlefuse->eeprom_cc & BIT(4)) >> 4));
+ rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BIT(10),
+ ((rtlefuse->eeprom_cc & BIT(6)) >> 6));
+ rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER,
+ BIT(31) | BIT(15),
+ ((rtlefuse->eeprom_cc & BIT(4)) >> 4) |
+ ((rtlefuse->eeprom_cc & BIT(6)) << 10));
+
+ rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017098);
+ rtl_set_bbreg(hw, RPDP_ANTB, MASKDWORD, 0x01017098);
+ rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x20000000);
+ rtl_set_bbreg(hw, RCONFIG_ANTB, MASKDWORD, 0x20000000);
+ }
+ }
+
+ /* update IQK related settings */
+ rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100);
+ rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30) | BIT(28) |
+ BIT(26) | BIT(24), 0x00);
+ rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_RXIQEXTANTA, 0xF0000000, 0x00);
+ rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, 0x00);
+
+ /* Update RF */
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G) {
+ /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
+ rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) |
+ BIT(18) | 0xff, 1);
+
+ /* RF0x0b[16:14] =3b'111 */
+ rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B,
+ 0x1c000, 0x07);
+ } else {
+ /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
+ 0x97524);
+ }
+
+ /* Set right channel on RF reg0x18 for another mac. */
+ if (rtlhal->interfaceindex == 0 && rtlhal->bandset == BAND_ON_2_4G) {
+ /* Set MAC1 default channel if MAC1 not up. */
+ if (!(rtl_read_byte(rtlpriv, REG_MAC1) & MAC1_ON)) {
+ rtl92du_phy_enable_anotherphy(hw, true);
+ rtlhal->during_mac0init_radiob = true;
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW,
+ RFREG_OFFSET_MASK, 0x97524);
+ rtl92du_phy_powerdown_anotherphy(hw, true);
+ }
+ } else if (rtlhal->interfaceindex == 1 && rtlhal->bandset == BAND_ON_5G) {
+ /* Set MAC0 default channel */
+ if (!(rtl_read_byte(rtlpriv, REG_MAC0) & MAC0_ON)) {
+ rtl92du_phy_enable_anotherphy(hw, false);
+ rtlhal->during_mac1init_radioa = true;
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW,
+ RFREG_OFFSET_MASK, 0x87401);
+ rtl92du_phy_powerdown_anotherphy(hw, false);
+ }
+ }
+ }
+
+ /* Update for all band. */
+ /* DMDP */
+ if (rtlphy->rf_type == RF_1T1R) {
+ /* Use antenna 0, 0xc04, 0xd04 */
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x11);
+ rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x1);
+
+ /* enable ad/da clock1 for dual-phy reg0x888 */
+ if (rtlhal->interfaceindex == 0) {
+ rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) |
+ BIT(13), 0x3);
+ } else if (rtl92du_phy_enable_anotherphy(hw, false)) {
+ rtlhal->during_mac1init_radioa = true;
+ rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN,
+ BIT(12) | BIT(13), 0x3);
+ rtl92du_phy_powerdown_anotherphy(hw, false);
+ }
+
+ rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(19) | BIT(20), 0x0);
+ } else {
+ /* Single PHY */
+ /* Use antenna 0 & 1, 0xc04, 0xd04 */
+ rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x33);
+ rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x3);
+ /* disable ad/da clock1,0x888 */
+ rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) | BIT(13), 0);
+
+ rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(19) | BIT(20), 0x1);
+ }
+
+ for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
+ rfpath++) {
+ rtlphy->rfreg_chnlval[rfpath] = rtl_get_rfreg(hw, rfpath,
+ RF_CHNLBW,
+ RFREG_OFFSET_MASK);
+ rtlphy->reg_rf3c[rfpath] = rtl_get_rfreg(hw, rfpath, 0x3C,
+ RFREG_OFFSET_MASK);
+ }
+
+ for (i = 0; i < 2; i++)
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n",
+ rtlphy->rfreg_chnlval[i]);
+
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<==\n");
+}
+
+bool rtl92du_phy_check_poweroff(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 u1btmp;
+
+ if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY) {
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC0);
+ rtl_write_byte(rtlpriv, REG_MAC0, u1btmp & ~MAC0_ON);
+ return true;
+ }
+
+ mutex_lock(rtlpriv->mutex_for_power_on_off);
+ if (rtlhal->interfaceindex == 0) {
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC0);
+ rtl_write_byte(rtlpriv, REG_MAC0, u1btmp & ~MAC0_ON);
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC1);
+ u1btmp &= MAC1_ON;
+ } else {
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC1);
+ rtl_write_byte(rtlpriv, REG_MAC1, u1btmp & ~MAC1_ON);
+ u1btmp = rtl_read_byte(rtlpriv, REG_MAC0);
+ u1btmp &= MAC0_ON;
+ }
+ if (u1btmp) {
+ mutex_unlock(rtlpriv->mutex_for_power_on_off);
+ return false;
+ }
+ u1btmp = rtl_read_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS);
+ u1btmp |= BIT(7);
+ rtl_write_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS, u1btmp);
+ mutex_unlock(rtlpriv->mutex_for_power_on_off);
+
+ return true;
+}
+
+void rtl92du_phy_init_pa_bias(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ bool is_single_mac = rtlhal->macphymode == SINGLEMAC_SINGLEPHY;
+ enum radio_path rf_path;
+ u8 val8;
+
+ read_efuse_byte(hw, 0x3FA, &val8);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "%s: 0x3FA %#x\n",
+ __func__, val8);
+
+ if (!(val8 & BIT(0)) && (is_single_mac || rtlhal->interfaceindex == 0)) {
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, 0x07401);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x0F425);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x4F425);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x8F425);
+
+ /* Back to RX Mode */
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x30000);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "2G PA BIAS path A\n");
+ }
+
+ if (!(val8 & BIT(1)) && (is_single_mac || rtlhal->interfaceindex == 1)) {
+ rf_path = rtlhal->interfaceindex == 1 ? RF90_PATH_A : RF90_PATH_B;
+
+ rtl_set_rfreg(hw, rf_path, RF_CHNLBW, RFREG_OFFSET_MASK, 0x07401);
+ rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x0F425);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x4F425);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x8F425);
+
+ /* Back to RX Mode */
+ rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x30000);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "2G PA BIAS path B\n");
+ }
+
+ if (!(val8 & BIT(2)) && (is_single_mac || rtlhal->interfaceindex == 0)) {
+ /* 5GL_channel */
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, 0x17524);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x0F496);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x4F496);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x8F496);
+
+ /* 5GM_channel */
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, 0x37564);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x0F496);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x4F496);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x8F496);
+
+ /* 5GH_channel */
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, 0x57595);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x0F496);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x4F496);
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x8F496);
+
+ /* Back to RX Mode */
+ rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x30000);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "5G PA BIAS path A\n");
+ }
+
+ if (!(val8 & BIT(3)) && (is_single_mac || rtlhal->interfaceindex == 1)) {
+ rf_path = rtlhal->interfaceindex == 1 ? RF90_PATH_A : RF90_PATH_B;
+
+ /* 5GL_channel */
+ rtl_set_rfreg(hw, rf_path, RF_CHNLBW, RFREG_OFFSET_MASK, 0x17524);
+ rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x0F496);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x4F496);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x8F496);
+
+ /* 5GM_channel */
+ rtl_set_rfreg(hw, rf_path, RF_CHNLBW, RFREG_OFFSET_MASK, 0x37564);
+ rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x0F496);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x4F496);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x8F496);
+
+ /* 5GH_channel */
+ rtl_set_rfreg(hw, rf_path, RF_CHNLBW, RFREG_OFFSET_MASK, 0x57595);
+ rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x70000);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x0F496);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x4F496);
+ rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x8F496);
+
+ /* Back to RX Mode */
+ rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x30000);
+
+ rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "5G PA BIAS path B\n");
+ }
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h
new file mode 100644
index 000000000000..090a6203db7e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_PHY_H__
+#define __RTL92DU_PHY_H__
+
+u32 rtl92du_phy_query_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask);
+void rtl92du_phy_set_bb_reg(struct ieee80211_hw *hw,
+ u32 regaddr, u32 bitmask, u32 data);
+bool rtl92du_phy_mac_config(struct ieee80211_hw *hw);
+bool rtl92du_phy_bb_config(struct ieee80211_hw *hw);
+bool rtl92du_phy_rf_config(struct ieee80211_hw *hw);
+void rtl92du_phy_set_bw_mode(struct ieee80211_hw *hw,
+ enum nl80211_channel_type ch_type);
+u8 rtl92du_phy_sw_chnl(struct ieee80211_hw *hw);
+bool rtl92du_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
+ enum rf_content content,
+ enum radio_path rfpath);
+bool rtl92du_phy_set_rf_power_state(struct ieee80211_hw *hw,
+ enum rf_pwrstate rfpwr_state);
+
+void rtl92du_phy_set_poweron(struct ieee80211_hw *hw);
+bool rtl92du_phy_check_poweroff(struct ieee80211_hw *hw);
+void rtl92du_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t);
+void rtl92du_update_bbrf_configuration(struct ieee80211_hw *hw);
+void rtl92du_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta);
+void rtl92du_phy_iq_calibrate(struct ieee80211_hw *hw);
+void rtl92du_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel);
+void rtl92du_phy_init_pa_bias(struct ieee80211_hw *hw);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c
new file mode 100644
index 000000000000..044dd65eafd0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/phy_common.h"
+#include "phy.h"
+#include "rf.h"
+
+bool rtl92du_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u8 mac_on_bit = bmac0 ? MAC1_ON : MAC0_ON;
+ u8 mac_reg = bmac0 ? REG_MAC1 : REG_MAC0;
+ bool bresult = true; /* true: need to enable BB/RF power */
+ u32 maskforphyset = 0;
+ u16 val16;
+ u8 u1btmp;
+
+ rtlhal->during_mac0init_radiob = false;
+ rtlhal->during_mac1init_radioa = false;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "===>\n");
+
+ /* MAC0 Need PHY1 load radio_b.txt . Driver use DBI to write. */
+ u1btmp = rtl_read_byte(rtlpriv, mac_reg);
+ if (!(u1btmp & mac_on_bit)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "enable BB & RF\n");
+ /* Enable BB and RF power */
+
+ maskforphyset = bmac0 ? MAC0_ACCESS_PHY1 : MAC1_ACCESS_PHY0;
+
+ val16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN | maskforphyset);
+ val16 &= 0xfffc;
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN | maskforphyset, val16);
+
+ val16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN | maskforphyset);
+ val16 |= BIT(13) | BIT(0) | BIT(1);
+ rtl_write_word(rtlpriv, REG_SYS_FUNC_EN | maskforphyset, val16);
+ } else {
+ /* We think if MAC1 is ON,then radio_a.txt
+ * and radio_b.txt has been load.
+ */
+ bresult = false;
+ }
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<===\n");
+ return bresult;
+}
+
+void rtl92du_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ u8 mac_on_bit = bmac0 ? MAC1_ON : MAC0_ON;
+ u8 mac_reg = bmac0 ? REG_MAC1 : REG_MAC0;
+ u32 maskforphyset = 0;
+ u8 u1btmp;
+
+ rtlhal->during_mac0init_radiob = false;
+ rtlhal->during_mac1init_radioa = false;
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n");
+
+ /* check MAC0 enable or not again now, if
+ * enabled, not power down radio A.
+ */
+ u1btmp = rtl_read_byte(rtlpriv, mac_reg);
+ if (!(u1btmp & mac_on_bit)) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "power down\n");
+ /* power down RF radio A according to YuNan's advice. */
+ maskforphyset = bmac0 ? MAC0_ACCESS_PHY1 : MAC1_ACCESS_PHY0;
+ rtl_write_dword(rtlpriv, RFPGA0_XA_LSSIPARAMETER | maskforphyset,
+ 0x00000000);
+ }
+ rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n");
+}
+
+bool rtl92du_phy_rf6052_config(struct ieee80211_hw *hw)
+{
+ bool mac1_initradioa_first = false, mac0_initradiob_first = false;
+ bool need_pwrdown_radioa = false, need_pwrdown_radiob = false;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = &rtlpriv->rtlhal;
+ struct rtl_phy *rtlphy = &rtlpriv->phy;
+ struct bb_reg_def *pphyreg;
+ bool true_bpath = false;
+ bool rtstatus = true;
+ u32 u4_regvalue = 0;
+ u8 rfpath;
+
+ if (rtlphy->rf_type == RF_1T1R)
+ rtlphy->num_total_rfpath = 1;
+ else
+ rtlphy->num_total_rfpath = 2;
+
+ /* Single phy mode: use radio_a radio_b config path_A path_B
+ * separately by MAC0, and MAC1 needn't configure RF;
+ * Dual PHY mode: MAC0 use radio_a config 1st phy path_A,
+ * MAC1 use radio_b config 2nd PHY path_A.
+ * DMDP, MAC0 on G band, MAC1 on A band.
+ */
+ if (rtlhal->macphymode == DUALMAC_DUALPHY) {
+ if (rtlhal->current_bandtype == BAND_ON_2_4G &&
+ rtlhal->interfaceindex == 0) {
+ /* MAC0 needs PHY1 load radio_b.txt. */
+ if (rtl92du_phy_enable_anotherphy(hw, true)) {
+ rtlphy->num_total_rfpath = 2;
+ mac0_initradiob_first = true;
+ } else {
+ /* We think if MAC1 is ON,then radio_a.txt and
+ * radio_b.txt has been load.
+ */
+ return rtstatus;
+ }
+ } else if (rtlhal->current_bandtype == BAND_ON_5G &&
+ rtlhal->interfaceindex == 1) {
+ /* MAC1 needs PHY0 load radio_a.txt. */
+ if (rtl92du_phy_enable_anotherphy(hw, false)) {
+ rtlphy->num_total_rfpath = 2;
+ mac1_initradioa_first = true;
+ } else {
+ /* We think if MAC0 is ON, then radio_a.txt and
+ * radio_b.txt has been load.
+ */
+ return rtstatus;
+ }
+ } else if (rtlhal->interfaceindex == 1) {
+ /* MAC0 enabled, only init radia B. */
+ true_bpath = true;
+ }
+ }
+
+ for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
+ /* Mac1 use PHY0 write */
+ if (mac1_initradioa_first) {
+ if (rfpath == RF90_PATH_A) {
+ rtlhal->during_mac1init_radioa = true;
+ need_pwrdown_radioa = true;
+ } else if (rfpath == RF90_PATH_B) {
+ rtlhal->during_mac1init_radioa = false;
+ mac1_initradioa_first = false;
+ rfpath = RF90_PATH_A;
+ true_bpath = true;
+ rtlphy->num_total_rfpath = 1;
+ }
+ } else if (mac0_initradiob_first) {
+ /* Mac0 use PHY1 write */
+ if (rfpath == RF90_PATH_A)
+ rtlhal->during_mac0init_radiob = false;
+ if (rfpath == RF90_PATH_B) {
+ rtlhal->during_mac0init_radiob = true;
+ mac0_initradiob_first = false;
+ need_pwrdown_radiob = true;
+ rfpath = RF90_PATH_A;
+ true_bpath = true;
+ rtlphy->num_total_rfpath = 1;
+ }
+ }
+
+ pphyreg = &rtlphy->phyreg_def[rfpath];
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
+ BRFSI_RFENV << 16);
+ break;
+ }
+
+ rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
+ udelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
+ udelay(1);
+
+ /* Set bit number of Address and Data for RF register */
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
+ B3WIREADDRESSLENGTH, 0x0);
+ udelay(1);
+ rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
+ udelay(1);
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ if (true_bpath)
+ rtstatus = rtl92du_phy_config_rf_with_headerfile(
+ hw, radiob_txt,
+ (enum radio_path)rfpath);
+ else
+ rtstatus = rtl92du_phy_config_rf_with_headerfile(
+ hw, radioa_txt,
+ (enum radio_path)rfpath);
+ break;
+ case RF90_PATH_B:
+ rtstatus =
+ rtl92du_phy_config_rf_with_headerfile(hw, radiob_txt,
+ (enum radio_path)rfpath);
+ break;
+ case RF90_PATH_C:
+ break;
+ case RF90_PATH_D:
+ break;
+ }
+
+ switch (rfpath) {
+ case RF90_PATH_A:
+ case RF90_PATH_C:
+ rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV,
+ u4_regvalue);
+ break;
+ case RF90_PATH_B:
+ case RF90_PATH_D:
+ rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16,
+ u4_regvalue);
+ break;
+ }
+
+ if (!rtstatus) {
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE,
+ "Radio[%d] Fail!!\n", rfpath);
+ return rtstatus;
+ }
+ }
+
+ /* check MAC0 enable or not again, if enabled,
+ * not power down radio A.
+ * check MAC1 enable or not again, if enabled,
+ * not power down radio B.
+ */
+ if (need_pwrdown_radioa)
+ rtl92du_phy_powerdown_anotherphy(hw, false);
+ else if (need_pwrdown_radiob)
+ rtl92du_phy_powerdown_anotherphy(hw, true);
+ rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n");
+
+ return rtstatus;
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h
new file mode 100644
index 000000000000..4a92cbdd00c0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_RF_H__
+#define __RTL92DU_RF_H__
+
+bool rtl92du_phy_rf6052_config(struct ieee80211_hw *hw);
+bool rtl92du_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0);
+void rtl92du_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c
new file mode 100644
index 000000000000..d069a81ac617
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../core.h"
+#include "../usb.h"
+#include "../base.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/fw_common.h"
+#include "../rtl8192d/hw_common.h"
+#include "../rtl8192d/phy_common.h"
+#include "../rtl8192d/trx_common.h"
+#include "phy.h"
+#include "dm.h"
+#include "hw.h"
+#include "trx.h"
+#include "led.h"
+
+#include <linux/module.h>
+
+static struct usb_interface *rtl92du_get_other_intf(struct ieee80211_hw *hw)
+{
+ struct usb_interface *intf;
+ struct usb_device *udev;
+ u8 other_interfaceindex;
+
+ /* See SET_IEEE80211_DEV(hw, &intf->dev); in usb.c */
+ intf = container_of_const(wiphy_dev(hw->wiphy), struct usb_interface, dev);
+
+ if (intf->altsetting[0].desc.bInterfaceNumber == 0)
+ other_interfaceindex = 1;
+ else
+ other_interfaceindex = 0;
+
+ udev = interface_to_usbdev(intf);
+
+ return usb_ifnum_to_if(udev, other_interfaceindex);
+}
+
+static int rtl92du_init_shared_data(struct ieee80211_hw *hw)
+{
+ struct usb_interface *other_intf = rtl92du_get_other_intf(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_priv *other_rtlpriv = NULL;
+ struct ieee80211_hw *other_hw = NULL;
+
+ if (other_intf)
+ other_hw = usb_get_intfdata(other_intf);
+
+ if (other_hw) {
+ /* The other interface was already probed. */
+ other_rtlpriv = rtl_priv(other_hw);
+ rtlpriv->curveindex_2g = other_rtlpriv->curveindex_2g;
+ rtlpriv->curveindex_5g = other_rtlpriv->curveindex_5g;
+ rtlpriv->mutex_for_power_on_off = other_rtlpriv->mutex_for_power_on_off;
+ rtlpriv->mutex_for_hw_init = other_rtlpriv->mutex_for_hw_init;
+
+ if (!rtlpriv->curveindex_2g || !rtlpriv->curveindex_5g ||
+ !rtlpriv->mutex_for_power_on_off || !rtlpriv->mutex_for_hw_init)
+ return -ENOMEM;
+
+ return 0;
+ }
+
+ /* The other interface doesn't exist or was not probed yet. */
+ rtlpriv->curveindex_2g = kcalloc(TARGET_CHNL_NUM_2G,
+ sizeof(*rtlpriv->curveindex_2g),
+ GFP_KERNEL);
+ rtlpriv->curveindex_5g = kcalloc(TARGET_CHNL_NUM_5G,
+ sizeof(*rtlpriv->curveindex_5g),
+ GFP_KERNEL);
+ rtlpriv->mutex_for_power_on_off =
+ kzalloc(sizeof(*rtlpriv->mutex_for_power_on_off), GFP_KERNEL);
+ rtlpriv->mutex_for_hw_init =
+ kzalloc(sizeof(*rtlpriv->mutex_for_hw_init), GFP_KERNEL);
+
+ if (!rtlpriv->curveindex_2g || !rtlpriv->curveindex_5g ||
+ !rtlpriv->mutex_for_power_on_off || !rtlpriv->mutex_for_hw_init) {
+ kfree(rtlpriv->curveindex_2g);
+ kfree(rtlpriv->curveindex_5g);
+ kfree(rtlpriv->mutex_for_power_on_off);
+ kfree(rtlpriv->mutex_for_hw_init);
+ rtlpriv->curveindex_2g = NULL;
+ rtlpriv->curveindex_5g = NULL;
+ rtlpriv->mutex_for_power_on_off = NULL;
+ rtlpriv->mutex_for_hw_init = NULL;
+ return -ENOMEM;
+ }
+
+ mutex_init(rtlpriv->mutex_for_power_on_off);
+ mutex_init(rtlpriv->mutex_for_hw_init);
+
+ return 0;
+}
+
+static void rtl92du_deinit_shared_data(struct ieee80211_hw *hw)
+{
+ struct usb_interface *other_intf = rtl92du_get_other_intf(hw);
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (!other_intf || !usb_get_intfdata(other_intf)) {
+ /* The other interface doesn't exist or was already disconnected. */
+ kfree(rtlpriv->curveindex_2g);
+ kfree(rtlpriv->curveindex_5g);
+ if (rtlpriv->mutex_for_power_on_off)
+ mutex_destroy(rtlpriv->mutex_for_power_on_off);
+ if (rtlpriv->mutex_for_hw_init)
+ mutex_destroy(rtlpriv->mutex_for_hw_init);
+ kfree(rtlpriv->mutex_for_power_on_off);
+ kfree(rtlpriv->mutex_for_hw_init);
+ }
+}
+
+static int rtl92du_init_sw_vars(struct ieee80211_hw *hw)
+{
+ const char *fw_name = "rtlwifi/rtl8192dufw.bin";
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ int err;
+
+ err = rtl92du_init_shared_data(hw);
+ if (err)
+ return err;
+
+ rtlpriv->dm.dm_initialgain_enable = true;
+ rtlpriv->dm.dm_flag = 0;
+ rtlpriv->dm.disable_framebursting = false;
+ rtlpriv->dm.thermalvalue = 0;
+ rtlpriv->dm.useramask = true;
+
+ /* dual mac */
+ if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
+ rtlpriv->phy.current_channel = 36;
+ else
+ rtlpriv->phy.current_channel = 1;
+
+ if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY)
+ rtlpriv->rtlhal.disable_amsdu_8k = true;
+
+ /* for LPS & IPS */
+ rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
+ rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
+ rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
+
+ /* for early mode */
+ rtlpriv->rtlhal.earlymode_enable = false;
+
+ /* for firmware buf */
+ rtlpriv->rtlhal.pfirmware = kmalloc(0x8000, GFP_KERNEL);
+ if (!rtlpriv->rtlhal.pfirmware)
+ return -ENOMEM;
+
+ rtlpriv->max_fw_size = 0x8000;
+ pr_info("Driver for Realtek RTL8192DU WLAN interface\n");
+ pr_info("Loading firmware file %s\n", fw_name);
+
+ /* request fw */
+ err = request_firmware_nowait(THIS_MODULE, 1, fw_name,
+ rtlpriv->io.dev, GFP_KERNEL, hw,
+ rtl_fw_cb);
+ if (err) {
+ pr_err("Failed to request firmware!\n");
+ kfree(rtlpriv->rtlhal.pfirmware);
+ rtlpriv->rtlhal.pfirmware = NULL;
+ return err;
+ }
+
+ return 0;
+}
+
+static void rtl92du_deinit_sw_vars(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ kfree(rtlpriv->rtlhal.pfirmware);
+ rtlpriv->rtlhal.pfirmware = NULL;
+
+ rtl92du_deinit_shared_data(hw);
+}
+
+static const struct rtl_hal_ops rtl8192du_hal_ops = {
+ .init_sw_vars = rtl92du_init_sw_vars,
+ .deinit_sw_vars = rtl92du_deinit_sw_vars,
+ .read_chip_version = rtl92du_read_chip_version,
+ .read_eeprom_info = rtl92d_read_eeprom_info,
+ .hw_init = rtl92du_hw_init,
+ .hw_disable = rtl92du_card_disable,
+ .enable_interrupt = rtl92du_enable_interrupt,
+ .disable_interrupt = rtl92du_disable_interrupt,
+ .set_network_type = rtl92du_set_network_type,
+ .set_chk_bssid = rtl92du_set_check_bssid,
+ .set_qos = rtl92d_set_qos,
+ .set_bcn_reg = rtl92du_set_beacon_related_registers,
+ .set_bcn_intv = rtl92du_set_beacon_interval,
+ .update_interrupt_mask = rtl92du_update_interrupt_mask,
+ .get_hw_reg = rtl92du_get_hw_reg,
+ .set_hw_reg = rtl92du_set_hw_reg,
+ .update_rate_tbl = rtl92d_update_hal_rate_tbl,
+ .fill_tx_desc = rtl92du_tx_fill_desc,
+ .query_rx_desc = rtl92d_rx_query_desc,
+ .set_channel_access = rtl92d_update_channel_access_setting,
+ .radio_onoff_checking = rtl92d_gpio_radio_on_off_checking,
+ .set_bw_mode = rtl92du_phy_set_bw_mode,
+ .switch_channel = rtl92du_phy_sw_chnl,
+ .dm_watchdog = rtl92du_dm_watchdog,
+ .scan_operation_backup = rtl_phy_scan_operation_backup,
+ .set_rf_power_state = rtl92du_phy_set_rf_power_state,
+ .led_control = rtl92du_led_control,
+ .set_desc = rtl92d_set_desc,
+ .get_desc = rtl92d_get_desc,
+ .enable_hw_sec = rtl92d_enable_hw_security_config,
+ .set_key = rtl92d_set_key,
+ .get_bbreg = rtl92du_phy_query_bb_reg,
+ .set_bbreg = rtl92du_phy_set_bb_reg,
+ .get_rfreg = rtl92d_phy_query_rf_reg,
+ .set_rfreg = rtl92d_phy_set_rf_reg,
+ .linked_set_reg = rtl92du_linked_set_reg,
+ .fill_h2c_cmd = rtl92d_fill_h2c_cmd,
+ .get_btc_status = rtl_btc_status_false,
+ .phy_iq_calibrate = rtl92du_phy_iq_calibrate,
+ .phy_lc_calibrate = rtl92du_phy_lc_calibrate,
+};
+
+static struct rtl_mod_params rtl92du_mod_params = {
+ .sw_crypto = false,
+ .inactiveps = false,
+ .swctrl_lps = false,
+ .debug_level = 0,
+ .debug_mask = 0,
+};
+
+static const struct rtl_hal_usbint_cfg rtl92du_interface_cfg = {
+ /* rx */
+ .rx_urb_num = 8,
+ .rx_max_size = 15360,
+ .usb_rx_hdl = NULL,
+ .usb_rx_segregate_hdl = NULL,
+ /* tx */
+ .usb_tx_cleanup = rtl92du_tx_cleanup,
+ .usb_tx_post_hdl = rtl92du_tx_post_hdl,
+ .usb_tx_aggregate_hdl = rtl92du_tx_aggregate_hdl,
+ .usb_endpoint_mapping = rtl92du_endpoint_mapping,
+ .usb_mq_to_hwq = rtl92du_mq_to_hwq,
+};
+
+static const struct rtl_hal_cfg rtl92du_hal_cfg = {
+ .name = "rtl8192du",
+ .ops = &rtl8192du_hal_ops,
+ .mod_params = &rtl92du_mod_params,
+ .usb_interface_cfg = &rtl92du_interface_cfg,
+
+ .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
+ .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
+ .maps[SYS_CLK] = REG_SYS_CLKR,
+ .maps[MAC_RCR_AM] = RCR_AM,
+ .maps[MAC_RCR_AB] = RCR_AB,
+ .maps[MAC_RCR_ACRC32] = RCR_ACRC32,
+ .maps[MAC_RCR_ACF] = RCR_ACF,
+ .maps[MAC_RCR_AAP] = RCR_AAP,
+
+ .maps[EFUSE_TEST] = REG_EFUSE_TEST,
+ .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS,
+ .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_CLK] = 0, /* just for 92se */
+ .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
+ .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
+ .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
+ .maps[EFUSE_LOADER_CLK_EN] = 0,
+ .maps[EFUSE_ANA8M] = 0, /* just for 92se */
+ .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
+ .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION,
+ .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN,
+
+ .maps[RWCAM] = REG_CAMCMD,
+ .maps[WCAMI] = REG_CAMWRITE,
+ .maps[RCAMO] = REG_CAMREAD,
+ .maps[CAMDBG] = REG_CAMDBG,
+ .maps[SECR] = REG_SECCFG,
+ .maps[SEC_CAM_NONE] = CAM_NONE,
+ .maps[SEC_CAM_WEP40] = CAM_WEP40,
+ .maps[SEC_CAM_TKIP] = CAM_TKIP,
+ .maps[SEC_CAM_AES] = CAM_AES,
+ .maps[SEC_CAM_WEP104] = CAM_WEP104,
+
+ .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
+ .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
+ .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
+ .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
+ .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
+ .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
+ .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
+ .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
+ .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
+ .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
+ .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
+ .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
+ .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
+ .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
+ .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
+ .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
+
+ .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
+ .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
+ .maps[RTL_IMR_BCNINT] = IMR_BCNINT,
+ .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
+ .maps[RTL_IMR_RDU] = IMR_RDU,
+ .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
+ .maps[RTL_IMR_BDOK] = IMR_BDOK,
+ .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
+ .maps[RTL_IMR_TBDER] = IMR_TBDER,
+ .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
+ .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
+ .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
+ .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
+ .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
+ .maps[RTL_IMR_VODOK] = IMR_VODOK,
+ .maps[RTL_IMR_ROK] = IMR_ROK,
+ .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
+
+ .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M,
+ .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M,
+ .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M,
+ .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M,
+ .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M,
+ .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M,
+ .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M,
+ .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M,
+ .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M,
+ .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M,
+ .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M,
+ .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M,
+
+ .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7,
+ .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15,
+};
+
+module_param_named(swenc, rtl92du_mod_params.sw_crypto, bool, 0444);
+module_param_named(debug_level, rtl92du_mod_params.debug_level, int, 0644);
+module_param_named(ips, rtl92du_mod_params.inactiveps, bool, 0444);
+module_param_named(swlps, rtl92du_mod_params.swctrl_lps, bool, 0444);
+module_param_named(debug_mask, rtl92du_mod_params.debug_mask, ullong, 0644);
+MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
+MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 0)\n");
+MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
+MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
+MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
+
+#define USB_VENDOR_ID_REALTEK 0x0bda
+
+static const struct usb_device_id rtl8192d_usb_ids[] = {
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8193, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8194, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8111, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x0193, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8171, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0xe194, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x2019, 0xab2c, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x2019, 0xab2d, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x2019, 0x4903, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x2019, 0x4904, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x07b8, 0x8193, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x20f4, 0x664b, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x04dd, 0x954f, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x04dd, 0x96a6, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x050d, 0x110a, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x050d, 0x1105, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x050d, 0x120a, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x1668, 0x8102, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x0930, 0x0a0a, rtl92du_hal_cfg)},
+ {RTL_USB_DEVICE(0x2001, 0x330c, rtl92du_hal_cfg)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(usb, rtl8192d_usb_ids);
+
+static int rtl8192du_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return rtl_usb_probe(intf, id, &rtl92du_hal_cfg);
+}
+
+static struct usb_driver rtl8192du_driver = {
+ .name = "rtl8192du",
+ .probe = rtl8192du_probe,
+ .disconnect = rtl_usb_disconnect,
+ .id_table = rtl8192d_usb_ids,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(rtl8192du_driver);
+
+MODULE_AUTHOR("Bitterblue Smith <rtl8821cerfe2@gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek 8192DU 802.11n Dual Mac USB wireless");
+MODULE_FIRMWARE("rtlwifi/rtl8192dufw.bin");
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c
new file mode 100644
index 000000000000..036701433d85
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c
@@ -0,0 +1,1675 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include <linux/types.h>
+
+#include "table.h"
+
+const u32 rtl8192du_phy_reg_2tarray[PHY_REG_2T_ARRAYLENGTH] = {
+ 0x800, 0x80040002,
+ 0x804, 0x00000003,
+ 0x808, 0x0000fc00,
+ 0x80c, 0x0000000a,
+ 0x810, 0x10001331,
+ 0x814, 0x020c3d10,
+ 0x818, 0x02200385,
+ 0x81c, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390004,
+ 0x828, 0x01000100,
+ 0x82c, 0x00390004,
+ 0x830, 0x27272727,
+ 0x834, 0x27272727,
+ 0x838, 0x27272727,
+ 0x83c, 0x27272727,
+ 0x840, 0x00010000,
+ 0x844, 0x00010000,
+ 0x848, 0x27272727,
+ 0x84c, 0x27272727,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569a569a,
+ 0x85c, 0x0c1b25a4,
+ 0x860, 0x66e60250,
+ 0x864, 0x061f0150,
+ 0x868, 0x27272727,
+ 0x86c, 0x272b2b2b,
+ 0x870, 0x07000700,
+ 0x874, 0x22188000,
+ 0x878, 0x08080808,
+ 0x87c, 0x0001fff8,
+ 0x880, 0xc0083070,
+ 0x884, 0x00000cd5,
+ 0x888, 0x00000000,
+ 0x88c, 0xcc0000c0,
+ 0x890, 0x00000800,
+ 0x894, 0xfffffffe,
+ 0x898, 0x40302010,
+ 0x89c, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90c, 0x81121313,
+ 0xa00, 0x00d047c8,
+ 0xa04, 0x80ff000c,
+ 0xa08, 0x8c8a8300,
+ 0xa0c, 0x2e68120f,
+ 0xa10, 0x9500bb78,
+ 0xa14, 0x11144028,
+ 0xa18, 0x00881117,
+ 0xa1c, 0x89140f00,
+ 0xa20, 0x1a1b0000,
+ 0xa24, 0x090e1317,
+ 0xa28, 0x00000204,
+ 0xa2c, 0x00d30000,
+ 0xa70, 0x101fff00,
+ 0xa74, 0x00000007,
+ 0xc00, 0x40071d40,
+ 0xc04, 0x03a05633,
+ 0xc08, 0x001000e4,
+ 0xc0c, 0x6c6c6c6c,
+ 0xc10, 0x08800000,
+ 0xc14, 0x40000100,
+ 0xc18, 0x08800000,
+ 0xc1c, 0x40000100,
+ 0xc20, 0x00000000,
+ 0xc24, 0x00000000,
+ 0xc28, 0x00000000,
+ 0xc2c, 0x00000000,
+ 0xc30, 0x69e9ac44,
+ 0xc34, 0x469652af,
+ 0xc38, 0x49795994,
+ 0xc3c, 0x0a979718,
+ 0xc40, 0x1f7c403f,
+ 0xc44, 0x000100b7,
+ 0xc48, 0xec020107,
+ 0xc4c, 0x007f037f,
+ 0xc50, 0x69543420,
+ 0xc54, 0x43bc009e,
+ 0xc58, 0x69543420,
+ 0xc5c, 0x433c00a8,
+ 0xc60, 0x00000000,
+ 0xc64, 0x7112848b,
+ 0xc68, 0x47c00bff,
+ 0xc6c, 0x00000036,
+ 0xc70, 0x2c7f000d,
+ 0xc74, 0x258610db,
+ 0xc78, 0x0000001f,
+ 0xc7c, 0x40b95612,
+ 0xc80, 0x40000100,
+ 0xc84, 0x20f60000,
+ 0xc88, 0x40000100,
+ 0xc8c, 0xa0e40000,
+ 0xc90, 0x00121820,
+ 0xc94, 0x00000007,
+ 0xc98, 0x00121820,
+ 0xc9c, 0x00007f7f,
+ 0xca0, 0x00000000,
+ 0xca4, 0x00000080,
+ 0xca8, 0x00000000,
+ 0xcac, 0x00000000,
+ 0xcb0, 0x00000000,
+ 0xcb4, 0x00000000,
+ 0xcb8, 0x00000000,
+ 0xcbc, 0x28000000,
+ 0xcc0, 0x00000000,
+ 0xcc4, 0x00000000,
+ 0xcc8, 0x00000000,
+ 0xccc, 0x00000000,
+ 0xcd0, 0x00000000,
+ 0xcd4, 0x00000000,
+ 0xcd8, 0x64b11e20,
+ 0xcdc, 0xe0767533,
+ 0xce0, 0x00222222,
+ 0xce4, 0x00000000,
+ 0xce8, 0x37644302,
+ 0xcec, 0x2f97d40c,
+ 0xd00, 0x00080740,
+ 0xd04, 0x00020403,
+ 0xd08, 0x0000907f,
+ 0xd0c, 0x20010201,
+ 0xd10, 0xa0633333,
+ 0xd14, 0x3333bc43,
+ 0xd18, 0x7a8f5b6b,
+ 0xd2c, 0xcc979975,
+ 0xd30, 0x00000000,
+ 0xd34, 0x80608404,
+ 0xd38, 0x00000000,
+ 0xd3c, 0x00027353,
+ 0xd40, 0x00000000,
+ 0xd44, 0x00000000,
+ 0xd48, 0x00000000,
+ 0xd4c, 0x00000000,
+ 0xd50, 0x6437140a,
+ 0xd54, 0x00000000,
+ 0xd58, 0x00000000,
+ 0xd5c, 0x30032064,
+ 0xd60, 0x4653de68,
+ 0xd64, 0x04518a3c,
+ 0xd68, 0x00002101,
+ 0xd6c, 0x2a201c16,
+ 0xd70, 0x1812362e,
+ 0xd74, 0x322c2220,
+ 0xd78, 0x000e3c24,
+ 0xe00, 0x2a2a2a2a,
+ 0xe04, 0x2a2a2a2a,
+ 0xe08, 0x03902a2a,
+ 0xe10, 0x2a2a2a2a,
+ 0xe14, 0x2a2a2a2a,
+ 0xe18, 0x2a2a2a2a,
+ 0xe1c, 0x2a2a2a2a,
+ 0xe28, 0x00000000,
+ 0xe30, 0x1000dc1f,
+ 0xe34, 0x10008c1f,
+ 0xe38, 0x02140102,
+ 0xe3c, 0x681604c2,
+ 0xe40, 0x01007c00,
+ 0xe44, 0x01004800,
+ 0xe48, 0xfb000000,
+ 0xe4c, 0x000028d1,
+ 0xe50, 0x1000dc1f,
+ 0xe54, 0x10008c1f,
+ 0xe58, 0x02140102,
+ 0xe5c, 0x28160d05,
+ 0xe60, 0x00000010,
+ 0xe68, 0x001b25a4,
+ 0xe6c, 0x63db25a4,
+ 0xe70, 0x63db25a4,
+ 0xe74, 0x0c126da4,
+ 0xe78, 0x0c126da4,
+ 0xe7c, 0x0c126da4,
+ 0xe80, 0x0c126da4,
+ 0xe84, 0x63db25a4,
+ 0xe88, 0x0c126da4,
+ 0xe8c, 0x63db25a4,
+ 0xed0, 0x63db25a4,
+ 0xed4, 0x63db25a4,
+ 0xed8, 0x63db25a4,
+ 0xedc, 0x001b25a4,
+ 0xee0, 0x001b25a4,
+ 0xeec, 0x6fdb25a4,
+ 0xf14, 0x00000003,
+ 0xf1c, 0x00000064,
+ 0xf4c, 0x00000004,
+ 0xf00, 0x00000300,
+};
+
+const u32 rtl8192du_phy_reg_array_pg[PHY_REG_ARRAY_PG_LENGTH] = {
+ 0xe00, 0xffffffff, 0x07090c0c,
+ 0xe04, 0xffffffff, 0x01020405,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x0b0c0c0e,
+ 0xe14, 0xffffffff, 0x01030506,
+ 0xe18, 0xffffffff, 0x0b0c0d0e,
+ 0xe1c, 0xffffffff, 0x01030509,
+ 0x830, 0xffffffff, 0x07090c0c,
+ 0x834, 0xffffffff, 0x01020405,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x0b0c0c0e,
+ 0x848, 0xffffffff, 0x01030506,
+ 0x84c, 0xffffffff, 0x0b0c0d0e,
+ 0x868, 0xffffffff, 0x01030509,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x06060606,
+ 0xe14, 0xffffffff, 0x00020406,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x06060606,
+ 0x848, 0xffffffff, 0x00020406,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x00000000,
+ 0xe04, 0xffffffff, 0x00000000,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x00000000,
+ 0xe14, 0xffffffff, 0x00000000,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x00000000,
+ 0x834, 0xffffffff, 0x00000000,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x00000000,
+ 0x848, 0xffffffff, 0x00000000,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+ 0xe00, 0xffffffff, 0x04040404,
+ 0xe04, 0xffffffff, 0x00020204,
+ 0xe08, 0x0000ff00, 0x00000000,
+ 0x86c, 0xffffff00, 0x00000000,
+ 0xe10, 0xffffffff, 0x08080808,
+ 0xe14, 0xffffffff, 0x00040408,
+ 0xe18, 0xffffffff, 0x00000000,
+ 0xe1c, 0xffffffff, 0x00000000,
+ 0x830, 0xffffffff, 0x04040404,
+ 0x834, 0xffffffff, 0x00020204,
+ 0x838, 0xffffff00, 0x00000000,
+ 0x86c, 0x000000ff, 0x00000000,
+ 0x83c, 0xffffffff, 0x08080808,
+ 0x848, 0xffffffff, 0x00040408,
+ 0x84c, 0xffffffff, 0x00000000,
+ 0x868, 0xffffffff, 0x00000000,
+};
+
+const u32 rtl8192du_radioa_2tarray[RADIOA_2T_ARRAYLENGTH] = {
+ 0x000, 0x00030000,
+ 0x001, 0x00030000,
+ 0x002, 0x00000000,
+ 0x003, 0x00018c63,
+ 0x004, 0x00018c63,
+ 0x008, 0x00084000,
+ 0x00b, 0x0001c000,
+ 0x00e, 0x00018c67,
+ 0x00f, 0x00000851,
+ 0x014, 0x00021440,
+ 0x018, 0x00017524,
+ 0x019, 0x00000000,
+ 0x01d, 0x000a1290,
+ 0x023, 0x00001558,
+ 0x01a, 0x00030a99,
+ 0x01b, 0x00040b00,
+ 0x01c, 0x000fc339,
+ 0x03a, 0x000a57eb,
+ 0x03b, 0x00020000,
+ 0x03c, 0x000ff454,
+ 0x020, 0x0000aa52,
+ 0x021, 0x00054000,
+ 0x040, 0x0000aa52,
+ 0x041, 0x00014000,
+ 0x025, 0x000803be,
+ 0x026, 0x000fc638,
+ 0x027, 0x00077c18,
+ 0x028, 0x000de471,
+ 0x029, 0x000d7110,
+ 0x02a, 0x0008cb04,
+ 0x02b, 0x0004128b,
+ 0x02c, 0x00001840,
+ 0x043, 0x0002444f,
+ 0x044, 0x0001adb0,
+ 0x045, 0x00056467,
+ 0x046, 0x0008992c,
+ 0x047, 0x0000452c,
+ 0x048, 0x000f9c43,
+ 0x049, 0x00002e0c,
+ 0x04a, 0x000546eb,
+ 0x04b, 0x0008966c,
+ 0x04c, 0x0000dde9,
+ 0x018, 0x00007401,
+ 0x000, 0x00070000,
+ 0x012, 0x000dc000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x000287b7,
+ 0x013, 0x000247ab,
+ 0x013, 0x0002079f,
+ 0x013, 0x0001c793,
+ 0x013, 0x0001839b,
+ 0x013, 0x00014392,
+ 0x013, 0x0001019a,
+ 0x013, 0x0000c191,
+ 0x013, 0x00008194,
+ 0x013, 0x000040a0,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x016, 0x000e1330,
+ 0x016, 0x000a1330,
+ 0x016, 0x00061330,
+ 0x016, 0x00021330,
+ 0x018, 0x00017524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b4,
+ 0x013, 0x0000c0a8,
+ 0x013, 0x0000b030,
+ 0x013, 0x00004024,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x018, 0x00037524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b4,
+ 0x013, 0x0000c0a8,
+ 0x013, 0x0000b030,
+ 0x013, 0x00004024,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x018, 0x00057568,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b4,
+ 0x013, 0x0000c0a8,
+ 0x013, 0x0000b030,
+ 0x013, 0x00004024,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x030, 0x0004470f,
+ 0x031, 0x00044ff0,
+ 0x032, 0x00000070,
+ 0x033, 0x000dd480,
+ 0x034, 0x000ffac0,
+ 0x035, 0x000b80c0,
+ 0x036, 0x00077000,
+ 0x037, 0x00064ff2,
+ 0x038, 0x000e7661,
+ 0x039, 0x00000e90,
+ 0x000, 0x00030000,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088009,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088001,
+ 0x01f, 0x00080000,
+ 0x0fe, 0x00000000,
+ 0x018, 0x00097524,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x02b, 0x00041289,
+ 0x0fe, 0x00000000,
+ 0x02d, 0x0006aaaa,
+ 0x02e, 0x000b4d01,
+ 0x02d, 0x00080000,
+ 0x02e, 0x00004d02,
+ 0x02d, 0x00095555,
+ 0x02e, 0x00054d03,
+ 0x02d, 0x000aaaaa,
+ 0x02e, 0x000b4d04,
+ 0x02d, 0x000c0000,
+ 0x02e, 0x00004d05,
+ 0x02d, 0x000d5555,
+ 0x02e, 0x00054d06,
+ 0x02d, 0x000eaaaa,
+ 0x02e, 0x000b4d07,
+ 0x02d, 0x00000000,
+ 0x02e, 0x00005108,
+ 0x02d, 0x00015555,
+ 0x02e, 0x00055109,
+ 0x02d, 0x0002aaaa,
+ 0x02e, 0x000b510a,
+ 0x02d, 0x00040000,
+ 0x02e, 0x0000510b,
+ 0x02d, 0x00055555,
+ 0x02e, 0x0005510c,
+};
+
+const u32 rtl8192du_radiob_2tarray[RADIOB_2T_ARRAYLENGTH] = {
+ 0x000, 0x00030000,
+ 0x001, 0x00030000,
+ 0x002, 0x00000000,
+ 0x003, 0x00018c63,
+ 0x004, 0x00018c63,
+ 0x008, 0x00084000,
+ 0x00b, 0x0001c000,
+ 0x00e, 0x00018c67,
+ 0x00f, 0x00000851,
+ 0x014, 0x00021440,
+ 0x018, 0x00007401,
+ 0x019, 0x00000060,
+ 0x01d, 0x000a1290,
+ 0x023, 0x00001558,
+ 0x01a, 0x00030a99,
+ 0x01b, 0x00040b00,
+ 0x01c, 0x000fc339,
+ 0x03a, 0x000a57eb,
+ 0x03b, 0x00020000,
+ 0x03c, 0x000ff454,
+ 0x020, 0x0000aa52,
+ 0x021, 0x00054000,
+ 0x040, 0x0000aa52,
+ 0x041, 0x00014000,
+ 0x025, 0x000803be,
+ 0x026, 0x000fc638,
+ 0x027, 0x00077c18,
+ 0x028, 0x000d1c31,
+ 0x029, 0x000d7110,
+ 0x02a, 0x000aeb04,
+ 0x02b, 0x0004128b,
+ 0x02c, 0x00001840,
+ 0x043, 0x0002444f,
+ 0x044, 0x0001adb0,
+ 0x045, 0x00056467,
+ 0x046, 0x0008992c,
+ 0x047, 0x0000452c,
+ 0x048, 0x000f9c43,
+ 0x049, 0x00002e0c,
+ 0x04a, 0x000546eb,
+ 0x04b, 0x0008966c,
+ 0x04c, 0x0000dde9,
+ 0x018, 0x00007401,
+ 0x000, 0x00070000,
+ 0x012, 0x000dc000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x000287b7,
+ 0x013, 0x000247ab,
+ 0x013, 0x0002079f,
+ 0x013, 0x0001c793,
+ 0x013, 0x0001839b,
+ 0x013, 0x00014392,
+ 0x013, 0x0001019a,
+ 0x013, 0x0000c191,
+ 0x013, 0x00008194,
+ 0x013, 0x000040a0,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x016, 0x000e1330,
+ 0x016, 0x000a1330,
+ 0x016, 0x00061330,
+ 0x016, 0x00021330,
+ 0x018, 0x00017524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b4,
+ 0x013, 0x0000c0a8,
+ 0x013, 0x0000b030,
+ 0x013, 0x00004024,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x018, 0x00037524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b4,
+ 0x013, 0x0000c0a8,
+ 0x013, 0x0000b030,
+ 0x013, 0x00004024,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x018, 0x00057524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bc,
+ 0x013, 0x000247b0,
+ 0x013, 0x000203b4,
+ 0x013, 0x0001c3a8,
+ 0x013, 0x000181b4,
+ 0x013, 0x000141a8,
+ 0x013, 0x000100b4,
+ 0x013, 0x0000c0a8,
+ 0x013, 0x0000b030,
+ 0x013, 0x00004024,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f4c3,
+ 0x015, 0x0004f4c3,
+ 0x015, 0x0008f4c3,
+ 0x016, 0x000e085f,
+ 0x016, 0x000a085f,
+ 0x016, 0x0006085f,
+ 0x016, 0x0002085f,
+ 0x030, 0x0004470f,
+ 0x031, 0x00044ff0,
+ 0x032, 0x00000070,
+ 0x033, 0x000dd480,
+ 0x034, 0x000ffac0,
+ 0x035, 0x000b80c0,
+ 0x036, 0x00077000,
+ 0x037, 0x00064ff2,
+ 0x038, 0x000e7661,
+ 0x039, 0x00000e90,
+ 0x000, 0x00030000,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088009,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088001,
+ 0x01f, 0x00080000,
+ 0x0fe, 0x00000000,
+ 0x018, 0x00087401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x02b, 0x00041289,
+ 0x0fe, 0x00000000,
+ 0x02d, 0x00066666,
+ 0x02e, 0x00064001,
+ 0x02d, 0x00091111,
+ 0x02e, 0x00014002,
+ 0x02d, 0x000bbbbb,
+ 0x02e, 0x000b4003,
+ 0x02d, 0x000e6666,
+ 0x02e, 0x00064004,
+ 0x02d, 0x00088888,
+ 0x02e, 0x00084005,
+ 0x02d, 0x0009dddd,
+ 0x02e, 0x000d4006,
+ 0x02d, 0x000b3333,
+ 0x02e, 0x00034007,
+ 0x02d, 0x00048888,
+ 0x02e, 0x00084408,
+ 0x02d, 0x000bbbbb,
+ 0x02e, 0x000b4409,
+ 0x02d, 0x000e6666,
+ 0x02e, 0x0006440a,
+ 0x02d, 0x00011111,
+ 0x02e, 0x0001480b,
+ 0x02d, 0x0003bbbb,
+ 0x02e, 0x000b480c,
+ 0x02d, 0x00066666,
+ 0x02e, 0x0006480d,
+ 0x02d, 0x000ccccc,
+ 0x02e, 0x000c480e,
+};
+
+const u32 rtl8192du_radioa_2t_int_paarray[RADIOA_2T_INT_PA_ARRAYLENGTH] = {
+ 0x000, 0x00030000,
+ 0x001, 0x00030000,
+ 0x002, 0x00000000,
+ 0x003, 0x00018c63,
+ 0x004, 0x00018c63,
+ 0x008, 0x00084000,
+ 0x00b, 0x0001c000,
+ 0x00e, 0x00018c67,
+ 0x00f, 0x00000851,
+ 0x014, 0x00021440,
+ 0x018, 0x00017524,
+ 0x019, 0x00000000,
+ 0x01d, 0x000a1290,
+ 0x023, 0x00001558,
+ 0x01a, 0x00030a99,
+ 0x01b, 0x00040b00,
+ 0x01c, 0x000fc339,
+ 0x03a, 0x000a57eb,
+ 0x03b, 0x00020000,
+ 0x03c, 0x000ff455,
+ 0x020, 0x0000aa52,
+ 0x021, 0x00054000,
+ 0x040, 0x0000aa52,
+ 0x041, 0x00014000,
+ 0x025, 0x000803be,
+ 0x026, 0x000fc638,
+ 0x027, 0x00077c18,
+ 0x028, 0x000de471,
+ 0x029, 0x000d7110,
+ 0x02a, 0x0008eb04,
+ 0x02b, 0x0004128b,
+ 0x02c, 0x00001840,
+ 0x043, 0x0002444f,
+ 0x044, 0x0001adb0,
+ 0x045, 0x00056467,
+ 0x046, 0x0008992c,
+ 0x047, 0x0000452c,
+ 0x048, 0x000c0443,
+ 0x049, 0x00000730,
+ 0x04a, 0x00050f0f,
+ 0x04b, 0x000896ef,
+ 0x04c, 0x0000ddee,
+ 0x018, 0x00007401,
+ 0x000, 0x00070000,
+ 0x012, 0x000dc000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x000287b7,
+ 0x013, 0x000247ab,
+ 0x013, 0x0002079f,
+ 0x013, 0x0001c793,
+ 0x013, 0x0001839b,
+ 0x013, 0x00014392,
+ 0x013, 0x0001019a,
+ 0x013, 0x0000c191,
+ 0x013, 0x00008194,
+ 0x013, 0x000040a0,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x016, 0x000e1330,
+ 0x016, 0x000a1330,
+ 0x016, 0x00061330,
+ 0x016, 0x00021330,
+ 0x018, 0x00017524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x018, 0x00037564,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x018, 0x00057595,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x030, 0x0004470f,
+ 0x031, 0x00044ff0,
+ 0x032, 0x00000070,
+ 0x033, 0x000dd480,
+ 0x034, 0x000ffac0,
+ 0x035, 0x000b80c0,
+ 0x036, 0x00077000,
+ 0x037, 0x00064ff2,
+ 0x038, 0x000e7661,
+ 0x039, 0x00000e90,
+ 0x000, 0x00030000,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088009,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088001,
+ 0x01f, 0x00080000,
+ 0x0fe, 0x00000000,
+ 0x018, 0x00097524,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x02b, 0x00041289,
+ 0x0fe, 0x00000000,
+ 0x02d, 0x0006aaaa,
+ 0x02e, 0x000b4d01,
+ 0x02d, 0x00080000,
+ 0x02e, 0x00004d02,
+ 0x02d, 0x00095555,
+ 0x02e, 0x00054d03,
+ 0x02d, 0x000aaaaa,
+ 0x02e, 0x000b4d04,
+ 0x02d, 0x000c0000,
+ 0x02e, 0x00004d05,
+ 0x02d, 0x000d5555,
+ 0x02e, 0x00054d06,
+ 0x02d, 0x000eaaaa,
+ 0x02e, 0x000b4d07,
+ 0x02d, 0x00000000,
+ 0x02e, 0x00005108,
+ 0x02d, 0x00015555,
+ 0x02e, 0x00055109,
+ 0x02d, 0x0002aaaa,
+ 0x02e, 0x000b510a,
+ 0x02d, 0x00040000,
+ 0x02e, 0x0000510b,
+ 0x02d, 0x00055555,
+ 0x02e, 0x0005510c,
+};
+
+const u32 rtl8192du_radiob_2t_int_paarray[RADIOB_2T_INT_PA_ARRAYLENGTH] = {
+ 0x000, 0x00030000,
+ 0x001, 0x00030000,
+ 0x002, 0x00000000,
+ 0x003, 0x00018c63,
+ 0x004, 0x00018c63,
+ 0x008, 0x00084000,
+ 0x00b, 0x0001c000,
+ 0x00e, 0x00018c67,
+ 0x00f, 0x00000851,
+ 0x014, 0x00021440,
+ 0x018, 0x00007401,
+ 0x019, 0x00000060,
+ 0x01d, 0x000a1290,
+ 0x023, 0x00001558,
+ 0x01a, 0x00030a99,
+ 0x01b, 0x00040b00,
+ 0x01c, 0x000fc339,
+ 0x03a, 0x000a57eb,
+ 0x03b, 0x00020000,
+ 0x03c, 0x000ff455,
+ 0x020, 0x0000aa52,
+ 0x021, 0x00054000,
+ 0x040, 0x0000aa52,
+ 0x041, 0x00014000,
+ 0x025, 0x000803be,
+ 0x026, 0x000fc638,
+ 0x027, 0x00077c18,
+ 0x028, 0x000d1c31,
+ 0x029, 0x000d7110,
+ 0x02a, 0x000aeb04,
+ 0x02b, 0x0004128b,
+ 0x02c, 0x00001840,
+ 0x043, 0x0002444f,
+ 0x044, 0x0001adb0,
+ 0x045, 0x00056467,
+ 0x046, 0x0008992c,
+ 0x047, 0x0000452c,
+ 0x048, 0x000c0443,
+ 0x049, 0x00000730,
+ 0x04a, 0x00050f0f,
+ 0x04b, 0x000896ef,
+ 0x04c, 0x0000ddee,
+ 0x018, 0x00007401,
+ 0x000, 0x00070000,
+ 0x012, 0x000dc000,
+ 0x012, 0x00090000,
+ 0x012, 0x00051000,
+ 0x012, 0x00012000,
+ 0x013, 0x000287b7,
+ 0x013, 0x000247ab,
+ 0x013, 0x0002079f,
+ 0x013, 0x0001c793,
+ 0x013, 0x0001839b,
+ 0x013, 0x00014392,
+ 0x013, 0x0001019a,
+ 0x013, 0x0000c191,
+ 0x013, 0x00008194,
+ 0x013, 0x000040a0,
+ 0x013, 0x00000018,
+ 0x015, 0x0000f424,
+ 0x015, 0x0004f424,
+ 0x015, 0x0008f424,
+ 0x016, 0x000e1330,
+ 0x016, 0x000a1330,
+ 0x016, 0x00061330,
+ 0x016, 0x00021330,
+ 0x018, 0x00017524,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x018, 0x00037564,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x018, 0x00057595,
+ 0x000, 0x00070000,
+ 0x012, 0x000cf000,
+ 0x012, 0x000bc000,
+ 0x012, 0x00078000,
+ 0x012, 0x00000000,
+ 0x013, 0x000287bf,
+ 0x013, 0x000247b3,
+ 0x013, 0x000207a7,
+ 0x013, 0x0001c79b,
+ 0x013, 0x0001839f,
+ 0x013, 0x00014393,
+ 0x013, 0x00010399,
+ 0x013, 0x0000c38d,
+ 0x013, 0x00008199,
+ 0x013, 0x0000418d,
+ 0x013, 0x00000099,
+ 0x015, 0x0000f495,
+ 0x015, 0x0004f495,
+ 0x015, 0x0008f495,
+ 0x016, 0x000e1874,
+ 0x016, 0x000a1874,
+ 0x016, 0x00061874,
+ 0x016, 0x00021874,
+ 0x030, 0x0004470f,
+ 0x031, 0x00044ff0,
+ 0x032, 0x00000070,
+ 0x033, 0x000dd480,
+ 0x034, 0x000ffac0,
+ 0x035, 0x000b80c0,
+ 0x036, 0x00077000,
+ 0x037, 0x00064ff2,
+ 0x038, 0x000e7661,
+ 0x039, 0x00000e90,
+ 0x000, 0x00030000,
+ 0x018, 0x0000f401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088009,
+ 0x01f, 0x00080003,
+ 0x0fe, 0x00000000,
+ 0x01e, 0x00088001,
+ 0x01f, 0x00080000,
+ 0x0fe, 0x00000000,
+ 0x018, 0x00087401,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x0fe, 0x00000000,
+ 0x02b, 0x00041289,
+ 0x0fe, 0x00000000,
+ 0x02d, 0x00066666,
+ 0x02e, 0x00064001,
+ 0x02d, 0x00091111,
+ 0x02e, 0x00014002,
+ 0x02d, 0x000bbbbb,
+ 0x02e, 0x000b4003,
+ 0x02d, 0x000e6666,
+ 0x02e, 0x00064004,
+ 0x02d, 0x00088888,
+ 0x02e, 0x00084005,
+ 0x02d, 0x0009dddd,
+ 0x02e, 0x000d4006,
+ 0x02d, 0x000b3333,
+ 0x02e, 0x00034007,
+ 0x02d, 0x00048888,
+ 0x02e, 0x00084408,
+ 0x02d, 0x000bbbbb,
+ 0x02e, 0x000b4409,
+ 0x02d, 0x000e6666,
+ 0x02e, 0x0006440a,
+ 0x02d, 0x00011111,
+ 0x02e, 0x0001480b,
+ 0x02d, 0x0003bbbb,
+ 0x02e, 0x000b480c,
+ 0x02d, 0x00066666,
+ 0x02e, 0x0006480d,
+ 0x02d, 0x000ccccc,
+ 0x02e, 0x000c480e,
+};
+
+const u32 rtl8192du_mac_2tarray[MAC_2T_ARRAYLENGTH] = {
+ 0x420, 0x00000080,
+ 0x423, 0x00000000,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000004,
+ 0x435, 0x00000005,
+ 0x436, 0x00000006,
+ 0x437, 0x00000007,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43a, 0x00000000,
+ 0x43b, 0x00000001,
+ 0x43c, 0x00000004,
+ 0x43d, 0x00000005,
+ 0x43e, 0x00000006,
+ 0x43f, 0x00000007,
+ 0x440, 0x00000050,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000015,
+ 0x445, 0x000000f0,
+ 0x446, 0x0000000f,
+ 0x447, 0x00000000,
+ 0x462, 0x00000008,
+ 0x463, 0x00000003,
+ 0x4c8, 0x000000ff,
+ 0x4c9, 0x00000008,
+ 0x4cc, 0x000000ff,
+ 0x4cd, 0x000000ff,
+ 0x4ce, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000a2,
+ 0x502, 0x0000002f,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000a3,
+ 0x506, 0x0000005e,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002b,
+ 0x509, 0x000000a4,
+ 0x50a, 0x0000005e,
+ 0x50b, 0x00000000,
+ 0x50c, 0x0000004f,
+ 0x50d, 0x000000a4,
+ 0x50e, 0x00000000,
+ 0x50f, 0x00000000,
+ 0x512, 0x0000001c,
+ 0x514, 0x0000000a,
+ 0x515, 0x00000010,
+ 0x516, 0x0000000a,
+ 0x517, 0x00000010,
+ 0x51a, 0x00000016,
+ 0x524, 0x0000000f,
+ 0x525, 0x0000004f,
+ 0x546, 0x00000040,
+ 0x547, 0x00000000,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55a, 0x00000002,
+ 0x55d, 0x000000ff,
+ 0x605, 0x00000080,
+ 0x608, 0x0000000e,
+ 0x609, 0x0000002a,
+ 0x652, 0x00000020,
+ 0x63c, 0x0000000a,
+ 0x63d, 0x0000000a,
+ 0x63e, 0x0000000e,
+ 0x63f, 0x0000000e,
+ 0x66e, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70a, 0x00000065,
+ 0x70b, 0x00000087,
+ 0x024, 0x0000000d,
+ 0x025, 0x00000080,
+ 0x026, 0x00000011,
+ 0x027, 0x00000000,
+ 0x028, 0x00000083,
+ 0x029, 0x000000db,
+ 0x02a, 0x000000ff,
+ 0x02b, 0x00000000,
+ 0x014, 0x00000055,
+ 0x015, 0x000000a9,
+ 0x016, 0x0000008b,
+ 0x017, 0x00000008,
+ 0x010, 0x00000003,
+ 0x011, 0x0000002b,
+ 0x012, 0x00000002,
+ 0x013, 0x00000049,
+};
+
+const u32 rtl8192du_agctab_array[AGCTAB_ARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7b060001,
+ 0xc78, 0x7a070001,
+ 0xc78, 0x79080001,
+ 0xc78, 0x78090001,
+ 0xc78, 0x770a0001,
+ 0xc78, 0x760b0001,
+ 0xc78, 0x750c0001,
+ 0xc78, 0x740d0001,
+ 0xc78, 0x730e0001,
+ 0xc78, 0x720f0001,
+ 0xc78, 0x71100001,
+ 0xc78, 0x70110001,
+ 0xc78, 0x6f120001,
+ 0xc78, 0x6e130001,
+ 0xc78, 0x6d140001,
+ 0xc78, 0x6c150001,
+ 0xc78, 0x6b160001,
+ 0xc78, 0x6a170001,
+ 0xc78, 0x69180001,
+ 0xc78, 0x68190001,
+ 0xc78, 0x671a0001,
+ 0xc78, 0x661b0001,
+ 0xc78, 0x651c0001,
+ 0xc78, 0x641d0001,
+ 0xc78, 0x631e0001,
+ 0xc78, 0x621f0001,
+ 0xc78, 0x61200001,
+ 0xc78, 0x60210001,
+ 0xc78, 0x49220001,
+ 0xc78, 0x48230001,
+ 0xc78, 0x47240001,
+ 0xc78, 0x46250001,
+ 0xc78, 0x45260001,
+ 0xc78, 0x44270001,
+ 0xc78, 0x43280001,
+ 0xc78, 0x42290001,
+ 0xc78, 0x412a0001,
+ 0xc78, 0x402b0001,
+ 0xc78, 0x262c0001,
+ 0xc78, 0x252d0001,
+ 0xc78, 0x242e0001,
+ 0xc78, 0x232f0001,
+ 0xc78, 0x22300001,
+ 0xc78, 0x21310001,
+ 0xc78, 0x20320001,
+ 0xc78, 0x06330001,
+ 0xc78, 0x05340001,
+ 0xc78, 0x04350001,
+ 0xc78, 0x03360001,
+ 0xc78, 0x02370001,
+ 0xc78, 0x01380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x7b400001,
+ 0xc78, 0x7b410001,
+ 0xc78, 0x7a420001,
+ 0xc78, 0x79430001,
+ 0xc78, 0x78440001,
+ 0xc78, 0x77450001,
+ 0xc78, 0x76460001,
+ 0xc78, 0x75470001,
+ 0xc78, 0x74480001,
+ 0xc78, 0x73490001,
+ 0xc78, 0x724a0001,
+ 0xc78, 0x714b0001,
+ 0xc78, 0x704c0001,
+ 0xc78, 0x6f4d0001,
+ 0xc78, 0x6e4e0001,
+ 0xc78, 0x6d4f0001,
+ 0xc78, 0x6c500001,
+ 0xc78, 0x6b510001,
+ 0xc78, 0x6a520001,
+ 0xc78, 0x69530001,
+ 0xc78, 0x68540001,
+ 0xc78, 0x67550001,
+ 0xc78, 0x66560001,
+ 0xc78, 0x65570001,
+ 0xc78, 0x64580001,
+ 0xc78, 0x63590001,
+ 0xc78, 0x625a0001,
+ 0xc78, 0x615b0001,
+ 0xc78, 0x605c0001,
+ 0xc78, 0x485d0001,
+ 0xc78, 0x475e0001,
+ 0xc78, 0x465f0001,
+ 0xc78, 0x45600001,
+ 0xc78, 0x44610001,
+ 0xc78, 0x43620001,
+ 0xc78, 0x42630001,
+ 0xc78, 0x41640001,
+ 0xc78, 0x40650001,
+ 0xc78, 0x27660001,
+ 0xc78, 0x26670001,
+ 0xc78, 0x25680001,
+ 0xc78, 0x24690001,
+ 0xc78, 0x236a0001,
+ 0xc78, 0x226b0001,
+ 0xc78, 0x216c0001,
+ 0xc78, 0x206d0001,
+ 0xc78, 0x206e0001,
+ 0xc78, 0x206f0001,
+ 0xc78, 0x20700001,
+ 0xc78, 0x20710001,
+ 0xc78, 0x20720001,
+ 0xc78, 0x20730001,
+ 0xc78, 0x20740001,
+ 0xc78, 0x20750001,
+ 0xc78, 0x20760001,
+ 0xc78, 0x20770001,
+ 0xc78, 0x20780001,
+ 0xc78, 0x20790001,
+ 0xc78, 0x207a0001,
+ 0xc78, 0x207b0001,
+ 0xc78, 0x207c0001,
+ 0xc78, 0x207d0001,
+ 0xc78, 0x207e0001,
+ 0xc78, 0x207f0001,
+ 0xc78, 0x38000002,
+ 0xc78, 0x38010002,
+ 0xc78, 0x38020002,
+ 0xc78, 0x38030002,
+ 0xc78, 0x38040002,
+ 0xc78, 0x38050002,
+ 0xc78, 0x38060002,
+ 0xc78, 0x38070002,
+ 0xc78, 0x38080002,
+ 0xc78, 0x3c090002,
+ 0xc78, 0x3e0a0002,
+ 0xc78, 0x400b0002,
+ 0xc78, 0x440c0002,
+ 0xc78, 0x480d0002,
+ 0xc78, 0x4c0e0002,
+ 0xc78, 0x500f0002,
+ 0xc78, 0x52100002,
+ 0xc78, 0x56110002,
+ 0xc78, 0x5a120002,
+ 0xc78, 0x5e130002,
+ 0xc78, 0x60140002,
+ 0xc78, 0x60150002,
+ 0xc78, 0x60160002,
+ 0xc78, 0x62170002,
+ 0xc78, 0x62180002,
+ 0xc78, 0x62190002,
+ 0xc78, 0x621a0002,
+ 0xc78, 0x621b0002,
+ 0xc78, 0x621c0002,
+ 0xc78, 0x621d0002,
+ 0xc78, 0x621e0002,
+ 0xc78, 0x621f0002,
+ 0xc78, 0x32000044,
+ 0xc78, 0x32010044,
+ 0xc78, 0x32020044,
+ 0xc78, 0x32030044,
+ 0xc78, 0x32040044,
+ 0xc78, 0x32050044,
+ 0xc78, 0x32060044,
+ 0xc78, 0x34070044,
+ 0xc78, 0x35080044,
+ 0xc78, 0x36090044,
+ 0xc78, 0x370a0044,
+ 0xc78, 0x380b0044,
+ 0xc78, 0x390c0044,
+ 0xc78, 0x3a0d0044,
+ 0xc78, 0x3e0e0044,
+ 0xc78, 0x420f0044,
+ 0xc78, 0x44100044,
+ 0xc78, 0x46110044,
+ 0xc78, 0x4a120044,
+ 0xc78, 0x4e130044,
+ 0xc78, 0x50140044,
+ 0xc78, 0x55150044,
+ 0xc78, 0x5a160044,
+ 0xc78, 0x5e170044,
+ 0xc78, 0x64180044,
+ 0xc78, 0x6e190044,
+ 0xc78, 0x6e1a0044,
+ 0xc78, 0x6e1b0044,
+ 0xc78, 0x6e1c0044,
+ 0xc78, 0x6e1d0044,
+ 0xc78, 0x6e1e0044,
+ 0xc78, 0x6e1f0044,
+ 0xc78, 0x6e1f0000,
+};
+
+const u32 rtl8192du_agctab_5garray[AGCTAB_5G_ARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7a020001,
+ 0xc78, 0x79030001,
+ 0xc78, 0x78040001,
+ 0xc78, 0x77050001,
+ 0xc78, 0x76060001,
+ 0xc78, 0x75070001,
+ 0xc78, 0x74080001,
+ 0xc78, 0x73090001,
+ 0xc78, 0x720a0001,
+ 0xc78, 0x710b0001,
+ 0xc78, 0x700c0001,
+ 0xc78, 0x6f0d0001,
+ 0xc78, 0x6e0e0001,
+ 0xc78, 0x6d0f0001,
+ 0xc78, 0x6c100001,
+ 0xc78, 0x6b110001,
+ 0xc78, 0x6a120001,
+ 0xc78, 0x69130001,
+ 0xc78, 0x68140001,
+ 0xc78, 0x67150001,
+ 0xc78, 0x66160001,
+ 0xc78, 0x65170001,
+ 0xc78, 0x64180001,
+ 0xc78, 0x63190001,
+ 0xc78, 0x621a0001,
+ 0xc78, 0x611b0001,
+ 0xc78, 0x601c0001,
+ 0xc78, 0x481d0001,
+ 0xc78, 0x471e0001,
+ 0xc78, 0x461f0001,
+ 0xc78, 0x45200001,
+ 0xc78, 0x44210001,
+ 0xc78, 0x43220001,
+ 0xc78, 0x42230001,
+ 0xc78, 0x41240001,
+ 0xc78, 0x40250001,
+ 0xc78, 0x27260001,
+ 0xc78, 0x26270001,
+ 0xc78, 0x25280001,
+ 0xc78, 0x24290001,
+ 0xc78, 0x232a0001,
+ 0xc78, 0x222b0001,
+ 0xc78, 0x212c0001,
+ 0xc78, 0x202d0001,
+ 0xc78, 0x202e0001,
+ 0xc78, 0x202f0001,
+ 0xc78, 0x20300001,
+ 0xc78, 0x20310001,
+ 0xc78, 0x20320001,
+ 0xc78, 0x20330001,
+ 0xc78, 0x20340001,
+ 0xc78, 0x20350001,
+ 0xc78, 0x20360001,
+ 0xc78, 0x20370001,
+ 0xc78, 0x20380001,
+ 0xc78, 0x20390001,
+ 0xc78, 0x203a0001,
+ 0xc78, 0x203b0001,
+ 0xc78, 0x203c0001,
+ 0xc78, 0x203d0001,
+ 0xc78, 0x203e0001,
+ 0xc78, 0x203f0001,
+ 0xc78, 0x32000044,
+ 0xc78, 0x32010044,
+ 0xc78, 0x32020044,
+ 0xc78, 0x32030044,
+ 0xc78, 0x32040044,
+ 0xc78, 0x32050044,
+ 0xc78, 0x32060044,
+ 0xc78, 0x34070044,
+ 0xc78, 0x35080044,
+ 0xc78, 0x36090044,
+ 0xc78, 0x370a0044,
+ 0xc78, 0x380b0044,
+ 0xc78, 0x390c0044,
+ 0xc78, 0x3a0d0044,
+ 0xc78, 0x3e0e0044,
+ 0xc78, 0x420f0044,
+ 0xc78, 0x44100044,
+ 0xc78, 0x46110044,
+ 0xc78, 0x4a120044,
+ 0xc78, 0x4e130044,
+ 0xc78, 0x50140044,
+ 0xc78, 0x55150044,
+ 0xc78, 0x5a160044,
+ 0xc78, 0x5e170044,
+ 0xc78, 0x64180044,
+ 0xc78, 0x6e190044,
+ 0xc78, 0x6e1a0044,
+ 0xc78, 0x6e1b0044,
+ 0xc78, 0x6e1c0044,
+ 0xc78, 0x6e1d0044,
+ 0xc78, 0x6e1e0044,
+ 0xc78, 0x6e1f0044,
+ 0xc78, 0x6e1f0000,
+};
+
+const u32 rtl8192du_agctab_2garray[AGCTAB_2G_ARRAYLENGTH] = {
+ 0xc78, 0x7b000001,
+ 0xc78, 0x7b010001,
+ 0xc78, 0x7b020001,
+ 0xc78, 0x7b030001,
+ 0xc78, 0x7b040001,
+ 0xc78, 0x7b050001,
+ 0xc78, 0x7b060001,
+ 0xc78, 0x7a070001,
+ 0xc78, 0x79080001,
+ 0xc78, 0x78090001,
+ 0xc78, 0x770a0001,
+ 0xc78, 0x760b0001,
+ 0xc78, 0x750c0001,
+ 0xc78, 0x740d0001,
+ 0xc78, 0x730e0001,
+ 0xc78, 0x720f0001,
+ 0xc78, 0x71100001,
+ 0xc78, 0x70110001,
+ 0xc78, 0x6f120001,
+ 0xc78, 0x6e130001,
+ 0xc78, 0x6d140001,
+ 0xc78, 0x6c150001,
+ 0xc78, 0x6b160001,
+ 0xc78, 0x6a170001,
+ 0xc78, 0x69180001,
+ 0xc78, 0x68190001,
+ 0xc78, 0x671a0001,
+ 0xc78, 0x661b0001,
+ 0xc78, 0x651c0001,
+ 0xc78, 0x641d0001,
+ 0xc78, 0x631e0001,
+ 0xc78, 0x621f0001,
+ 0xc78, 0x61200001,
+ 0xc78, 0x60210001,
+ 0xc78, 0x49220001,
+ 0xc78, 0x48230001,
+ 0xc78, 0x47240001,
+ 0xc78, 0x46250001,
+ 0xc78, 0x45260001,
+ 0xc78, 0x44270001,
+ 0xc78, 0x43280001,
+ 0xc78, 0x42290001,
+ 0xc78, 0x412a0001,
+ 0xc78, 0x402b0001,
+ 0xc78, 0x262c0001,
+ 0xc78, 0x252d0001,
+ 0xc78, 0x242e0001,
+ 0xc78, 0x232f0001,
+ 0xc78, 0x22300001,
+ 0xc78, 0x21310001,
+ 0xc78, 0x20320001,
+ 0xc78, 0x06330001,
+ 0xc78, 0x05340001,
+ 0xc78, 0x04350001,
+ 0xc78, 0x03360001,
+ 0xc78, 0x02370001,
+ 0xc78, 0x01380001,
+ 0xc78, 0x00390001,
+ 0xc78, 0x003a0001,
+ 0xc78, 0x003b0001,
+ 0xc78, 0x003c0001,
+ 0xc78, 0x003d0001,
+ 0xc78, 0x003e0001,
+ 0xc78, 0x003f0001,
+ 0xc78, 0x38000002,
+ 0xc78, 0x38010002,
+ 0xc78, 0x38020002,
+ 0xc78, 0x38030002,
+ 0xc78, 0x38040002,
+ 0xc78, 0x38050002,
+ 0xc78, 0x38060002,
+ 0xc78, 0x38070002,
+ 0xc78, 0x38080002,
+ 0xc78, 0x3c090002,
+ 0xc78, 0x3e0a0002,
+ 0xc78, 0x400b0002,
+ 0xc78, 0x440c0002,
+ 0xc78, 0x480d0002,
+ 0xc78, 0x4c0e0002,
+ 0xc78, 0x500f0002,
+ 0xc78, 0x52100002,
+ 0xc78, 0x56110002,
+ 0xc78, 0x5a120002,
+ 0xc78, 0x5e130002,
+ 0xc78, 0x60140002,
+ 0xc78, 0x60150002,
+ 0xc78, 0x60160002,
+ 0xc78, 0x62170002,
+ 0xc78, 0x62180002,
+ 0xc78, 0x62190002,
+ 0xc78, 0x621a0002,
+ 0xc78, 0x621b0002,
+ 0xc78, 0x621c0002,
+ 0xc78, 0x621d0002,
+ 0xc78, 0x621e0002,
+ 0xc78, 0x621f0002,
+ 0xc78, 0x6e1f0000,
+};
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h
new file mode 100644
index 000000000000..b809ba511320
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_TABLE_H__
+#define __RTL92DU_TABLE_H__
+
+#define PHY_REG_2T_ARRAYLENGTH 372
+#define PHY_REG_ARRAY_PG_LENGTH 624
+#define RADIOA_2T_ARRAYLENGTH 378
+#define RADIOB_2T_ARRAYLENGTH 384
+#define RADIOA_2T_INT_PA_ARRAYLENGTH 378
+#define RADIOB_2T_INT_PA_ARRAYLENGTH 384
+#define MAC_2T_ARRAYLENGTH 192
+#define AGCTAB_ARRAYLENGTH 386
+#define AGCTAB_5G_ARRAYLENGTH 194
+#define AGCTAB_2G_ARRAYLENGTH 194
+
+extern const u32 rtl8192du_phy_reg_2tarray[PHY_REG_2T_ARRAYLENGTH];
+extern const u32 rtl8192du_phy_reg_array_pg[PHY_REG_ARRAY_PG_LENGTH];
+extern const u32 rtl8192du_radioa_2tarray[RADIOA_2T_ARRAYLENGTH];
+extern const u32 rtl8192du_radiob_2tarray[RADIOB_2T_ARRAYLENGTH];
+extern const u32 rtl8192du_radioa_2t_int_paarray[RADIOA_2T_INT_PA_ARRAYLENGTH];
+extern const u32 rtl8192du_radiob_2t_int_paarray[RADIOB_2T_INT_PA_ARRAYLENGTH];
+extern const u32 rtl8192du_mac_2tarray[MAC_2T_ARRAYLENGTH];
+extern const u32 rtl8192du_agctab_array[AGCTAB_ARRAYLENGTH];
+extern const u32 rtl8192du_agctab_5garray[AGCTAB_5G_ARRAYLENGTH];
+extern const u32 rtl8192du_agctab_2garray[AGCTAB_2G_ARRAYLENGTH];
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c
new file mode 100644
index 000000000000..743ce0cfffe6
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#include "../wifi.h"
+#include "../base.h"
+#include "../usb.h"
+#include "../rtl8192d/reg.h"
+#include "../rtl8192d/def.h"
+#include "../rtl8192d/trx_common.h"
+#include "trx.h"
+
+void rtl92du_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+}
+
+int rtl92du_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
+ struct sk_buff *skb)
+{
+ return 0;
+}
+
+struct sk_buff *rtl92du_tx_aggregate_hdl(struct ieee80211_hw *hw,
+ struct sk_buff_head *list)
+{
+ return skb_dequeue(list);
+}
+
+static enum rtl_desc_qsel _rtl92du_hwq_to_descq(u16 queue_index)
+{
+ switch (queue_index) {
+ case RTL_TXQ_BCN:
+ return QSLT_BEACON;
+ case RTL_TXQ_MGT:
+ return QSLT_MGNT;
+ case RTL_TXQ_VO:
+ return QSLT_VO;
+ case RTL_TXQ_VI:
+ return QSLT_VI;
+ case RTL_TXQ_BK:
+ return QSLT_BK;
+ default:
+ case RTL_TXQ_BE:
+ return QSLT_BE;
+ }
+}
+
+/* For HW recovery information */
+static void _rtl92du_tx_desc_checksum(__le32 *txdesc)
+{
+ __le16 *ptr = (__le16 *)txdesc;
+ u16 checksum = 0;
+ u32 index;
+
+ /* Clear first */
+ set_tx_desc_tx_desc_checksum(txdesc, 0);
+ for (index = 0; index < 16; index++)
+ checksum = checksum ^ le16_to_cpu(*(ptr + index));
+ set_tx_desc_tx_desc_checksum(txdesc, checksum);
+}
+
+void rtl92du_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc_tx,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb,
+ u8 queue_index,
+ struct rtl_tcb_desc *tcb_desc)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ struct rtl_mac *mac = rtl_mac(rtlpriv);
+ struct rtl_sta_info *sta_entry;
+ __le16 fc = hdr->frame_control;
+ u8 agg_state = RTL_AGG_STOP;
+ u16 pktlen = skb->len;
+ u32 rts_en, hw_rts_en;
+ u8 ampdu_density = 0;
+ u16 seq_number;
+ __le32 *txdesc;
+ u8 rate_flag;
+ u8 tid;
+
+ rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc);
+
+ txdesc = (__le32 *)skb_push(skb, RTL_TX_HEADER_SIZE);
+ memset(txdesc, 0, RTL_TX_HEADER_SIZE);
+
+ set_tx_desc_pkt_size(txdesc, pktlen);
+ set_tx_desc_linip(txdesc, 0);
+ set_tx_desc_pkt_offset(txdesc, RTL_DUMMY_OFFSET);
+ set_tx_desc_offset(txdesc, RTL_TX_HEADER_SIZE);
+ /* 5G have no CCK rate */
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ if (tcb_desc->hw_rate < DESC_RATE6M)
+ tcb_desc->hw_rate = DESC_RATE6M;
+
+ set_tx_desc_tx_rate(txdesc, tcb_desc->hw_rate);
+ if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble)
+ set_tx_desc_data_shortgi(txdesc, 1);
+
+ if (rtlhal->macphymode == DUALMAC_DUALPHY &&
+ tcb_desc->hw_rate == DESC_RATEMCS7)
+ set_tx_desc_data_shortgi(txdesc, 1);
+
+ if (sta) {
+ sta_entry = (struct rtl_sta_info *)sta->drv_priv;
+ tid = ieee80211_get_tid(hdr);
+ agg_state = sta_entry->tids[tid].agg.agg_state;
+ ampdu_density = sta->deflink.ht_cap.ampdu_density;
+ }
+
+ if (agg_state == RTL_AGG_OPERATIONAL &&
+ info->flags & IEEE80211_TX_CTL_AMPDU) {
+ set_tx_desc_agg_enable(txdesc, 1);
+ set_tx_desc_max_agg_num(txdesc, 0x14);
+ set_tx_desc_ampdu_density(txdesc, ampdu_density);
+ tcb_desc->rts_enable = 1;
+ tcb_desc->rts_rate = DESC_RATE24M;
+ } else {
+ set_tx_desc_agg_break(txdesc, 1);
+ }
+ seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ set_tx_desc_seq(txdesc, seq_number);
+
+ rts_en = tcb_desc->rts_enable && !tcb_desc->cts_enable;
+ hw_rts_en = tcb_desc->rts_enable || tcb_desc->cts_enable;
+ set_tx_desc_rts_enable(txdesc, rts_en);
+ set_tx_desc_hw_rts_enable(txdesc, hw_rts_en);
+ set_tx_desc_cts2self(txdesc, tcb_desc->cts_enable);
+ set_tx_desc_rts_stbc(txdesc, tcb_desc->rts_stbc);
+ /* 5G have no CCK rate */
+ if (rtlhal->current_bandtype == BAND_ON_5G)
+ if (tcb_desc->rts_rate < DESC_RATE6M)
+ tcb_desc->rts_rate = DESC_RATE6M;
+ set_tx_desc_rts_rate(txdesc, tcb_desc->rts_rate);
+ set_tx_desc_rts_bw(txdesc, 0);
+ set_tx_desc_rts_sc(txdesc, tcb_desc->rts_sc);
+ set_tx_desc_rts_short(txdesc, tcb_desc->rts_use_shortpreamble);
+
+ rate_flag = info->control.rates[0].flags;
+ if (mac->bw_40) {
+ if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
+ set_tx_desc_data_bw(txdesc, 1);
+ set_tx_desc_tx_sub_carrier(txdesc, 3);
+ } else if (rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH) {
+ set_tx_desc_data_bw(txdesc, 1);
+ set_tx_desc_tx_sub_carrier(txdesc, mac->cur_40_prime_sc);
+ } else {
+ set_tx_desc_data_bw(txdesc, 0);
+ set_tx_desc_tx_sub_carrier(txdesc, 0);
+ }
+ } else {
+ set_tx_desc_data_bw(txdesc, 0);
+ set_tx_desc_tx_sub_carrier(txdesc, 0);
+ }
+
+ if (info->control.hw_key) {
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ set_tx_desc_sec_type(txdesc, 0x1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ set_tx_desc_sec_type(txdesc, 0x3);
+ break;
+ default:
+ set_tx_desc_sec_type(txdesc, 0x0);
+ break;
+ }
+ }
+
+ set_tx_desc_pkt_id(txdesc, 0);
+ set_tx_desc_queue_sel(txdesc, _rtl92du_hwq_to_descq(queue_index));
+ set_tx_desc_data_rate_fb_limit(txdesc, 0x1F);
+ set_tx_desc_rts_rate_fb_limit(txdesc, 0xF);
+ set_tx_desc_disable_fb(txdesc, 0);
+ set_tx_desc_use_rate(txdesc, tcb_desc->use_driver_rate);
+
+ if (ieee80211_is_data_qos(fc)) {
+ if (mac->rdg_en) {
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE,
+ "Enable RDG function\n");
+ set_tx_desc_rdg_enable(txdesc, 1);
+ set_tx_desc_htc(txdesc, 1);
+ }
+ set_tx_desc_qos(txdesc, 1);
+ }
+
+ if (rtlpriv->dm.useramask) {
+ set_tx_desc_rate_id(txdesc, tcb_desc->ratr_index);
+ set_tx_desc_macid(txdesc, tcb_desc->mac_id);
+ } else {
+ set_tx_desc_rate_id(txdesc, 0xC + tcb_desc->ratr_index);
+ set_tx_desc_macid(txdesc, tcb_desc->ratr_index);
+ }
+
+ if (!ieee80211_is_data_qos(fc) && ppsc->leisure_ps &&
+ ppsc->fwctrl_lps) {
+ set_tx_desc_hwseq_en(txdesc, 1);
+ set_tx_desc_pkt_id(txdesc, 8);
+ }
+
+ if (ieee80211_has_morefrags(fc))
+ set_tx_desc_more_frag(txdesc, 1);
+ if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
+ is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
+ set_tx_desc_bmc(txdesc, 1);
+
+ set_tx_desc_own(txdesc, 1);
+ set_tx_desc_last_seg(txdesc, 1);
+ set_tx_desc_first_seg(txdesc, 1);
+ _rtl92du_tx_desc_checksum(txdesc);
+
+ rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n");
+}
+
+static void _rtl92du_config_out_ep(struct ieee80211_hw *hw, u8 num_out_pipe)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
+ u16 ep_cfg;
+
+ rtlusb->out_queue_sel = 0;
+ rtlusb->out_ep_nums = 0;
+
+ if (rtlhal->interfaceindex == 0)
+ ep_cfg = rtl_read_word(rtlpriv, REG_USB_Queue_Select_MAC0);
+ else
+ ep_cfg = rtl_read_word(rtlpriv, REG_USB_Queue_Select_MAC1);
+
+ if (ep_cfg & 0x00f) {
+ rtlusb->out_queue_sel |= TX_SELE_HQ;
+ rtlusb->out_ep_nums++;
+ }
+ if (ep_cfg & 0x0f0) {
+ rtlusb->out_queue_sel |= TX_SELE_NQ;
+ rtlusb->out_ep_nums++;
+ }
+ if (ep_cfg & 0xf00) {
+ rtlusb->out_queue_sel |= TX_SELE_LQ;
+ rtlusb->out_ep_nums++;
+ }
+
+ switch (num_out_pipe) {
+ case 3:
+ rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_NQ | TX_SELE_LQ;
+ rtlusb->out_ep_nums = 3;
+ break;
+ case 2:
+ rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_NQ;
+ rtlusb->out_ep_nums = 2;
+ break;
+ case 1:
+ rtlusb->out_queue_sel = TX_SELE_HQ;
+ rtlusb->out_ep_nums = 1;
+ break;
+ default:
+ break;
+ }
+}
+
+static void _rtl92du_one_out_ep_mapping(struct rtl_usb *rtlusb,
+ struct rtl_ep_map *ep_map)
+{
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
+}
+
+static void _rtl92du_two_out_ep_mapping(struct rtl_usb *rtlusb,
+ struct rtl_ep_map *ep_map)
+{
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
+}
+
+static void _rtl92du_three_out_ep_mapping(struct rtl_usb *rtlusb,
+ struct rtl_ep_map *ep_map)
+{
+ ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[2];
+ ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1];
+ ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0];
+ ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0];
+}
+
+static int _rtl92du_out_ep_mapping(struct ieee80211_hw *hw)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+ struct rtl_ep_map *ep_map = &rtlusb->ep_map;
+
+ switch (rtlusb->out_ep_nums) {
+ case 1:
+ _rtl92du_one_out_ep_mapping(rtlusb, ep_map);
+ break;
+ case 2:
+ _rtl92du_two_out_ep_mapping(rtlusb, ep_map);
+ break;
+ case 3:
+ _rtl92du_three_out_ep_mapping(rtlusb, ep_map);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int rtl92du_endpoint_mapping(struct ieee80211_hw *hw)
+{
+ struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
+
+ _rtl92du_config_out_ep(hw, rtlusb->out_ep_nums);
+
+ /* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
+ if (rtlusb->out_ep_nums == 1 && rtlusb->in_ep_nums != 1)
+ return -EINVAL;
+
+ return _rtl92du_out_ep_mapping(hw);
+}
+
+u16 rtl92du_mq_to_hwq(__le16 fc, u16 mac80211_queue_index)
+{
+ u16 hw_queue_index;
+
+ if (unlikely(ieee80211_is_beacon(fc))) {
+ hw_queue_index = RTL_TXQ_BCN;
+ goto out;
+ }
+ if (ieee80211_is_mgmt(fc)) {
+ hw_queue_index = RTL_TXQ_MGT;
+ goto out;
+ }
+
+ switch (mac80211_queue_index) {
+ case 0:
+ hw_queue_index = RTL_TXQ_VO;
+ break;
+ case 1:
+ hw_queue_index = RTL_TXQ_VI;
+ break;
+ case 2:
+ hw_queue_index = RTL_TXQ_BE;
+ break;
+ case 3:
+ hw_queue_index = RTL_TXQ_BK;
+ break;
+ default:
+ hw_queue_index = RTL_TXQ_BE;
+ WARN_ONCE(true, "rtl8192du: QSLT_BE queue, skb_queue:%d\n",
+ mac80211_queue_index);
+ break;
+ }
+out:
+ return hw_queue_index;
+}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h
new file mode 100644
index 000000000000..8c3d24622fa7
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2024 Realtek Corporation.*/
+
+#ifndef __RTL92DU_TRX_H__
+#define __RTL92DU_TRX_H__
+
+#define TX_SELE_HQ BIT(0) /* High Queue */
+#define TX_SELE_LQ BIT(1) /* Low Queue */
+#define TX_SELE_NQ BIT(2) /* Normal Queue */
+
+#define TX_TOTAL_PAGE_NUMBER_92DU 0xF8
+#define TEST_PAGE_NUM_PUBQ_92DU 0x89
+#define TX_TOTAL_PAGE_NUMBER_92D_DUAL_MAC 0x7A
+#define NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC 0x5A
+#define NORMAL_PAGE_NUM_HPQ_92D_DUAL_MAC 0x10
+#define NORMAL_PAGE_NUM_LPQ_92D_DUAL_MAC 0x10
+#define NORMAL_PAGE_NUM_NORMALQ_92D_DUAL_MAC 0
+
+#define WMM_NORMAL_TX_TOTAL_PAGE_NUMBER 0xF5
+
+#define WMM_NORMAL_PAGE_NUM_PUBQ_92D 0x65
+#define WMM_NORMAL_PAGE_NUM_HPQ_92D 0x30
+#define WMM_NORMAL_PAGE_NUM_LPQ_92D 0x30
+#define WMM_NORMAL_PAGE_NUM_NPQ_92D 0x30
+
+#define WMM_NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC 0x32
+#define WMM_NORMAL_PAGE_NUM_HPQ_92D_DUAL_MAC 0x18
+#define WMM_NORMAL_PAGE_NUM_LPQ_92D_DUAL_MAC 0x18
+#define WMM_NORMAL_PAGE_NUM_NPQ_92D_DUAL_MAC 0x18
+
+static inline void set_tx_desc_bmc(__le32 *__txdesc, u32 __value)
+{
+ le32p_replace_bits(__txdesc, __value, BIT(24));
+}
+
+static inline void set_tx_desc_agg_break(__le32 *__txdesc, u32 __value)
+{
+ le32p_replace_bits((__txdesc + 1), __value, BIT(6));
+}
+
+static inline void set_tx_desc_tx_desc_checksum(__le32 *__txdesc, u32 __value)
+{
+ le32p_replace_bits((__txdesc + 7), __value, GENMASK(15, 0));
+}
+
+void rtl92du_tx_fill_desc(struct ieee80211_hw *hw,
+ struct ieee80211_hdr *hdr, u8 *pdesc,
+ u8 *pbd_desc_tx, struct ieee80211_tx_info *info,
+ struct ieee80211_sta *sta,
+ struct sk_buff *skb, u8 hw_queue,
+ struct rtl_tcb_desc *ptcb_desc);
+int rtl92du_endpoint_mapping(struct ieee80211_hw *hw);
+u16 rtl92du_mq_to_hwq(__le16 fc, u16 mac80211_queue_index);
+struct sk_buff *rtl92du_tx_aggregate_hdl(struct ieee80211_hw *hw,
+ struct sk_buff_head *list);
+void rtl92du_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb);
+int rtl92du_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb,
+ struct sk_buff *skb);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
index 7bde20fdbeab..162e734d5b08 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c
@@ -31,7 +31,7 @@ static void rtl92ee_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
*/
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
index 675bdd32feb1..bbf8ff63dced 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
@@ -27,7 +27,7 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
* */
rtlpci->const_pci_aspm = 2;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
index dd7505e2f22c..1b144fbd4d26 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
@@ -33,7 +33,7 @@ static void rtl8723e_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
*/
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
index 094cb36153f5..13e689037acc 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/phy.c
@@ -1110,16 +1110,22 @@ static void _rtl8723be_phy_set_txpower_index(struct ieee80211_hw *hw,
void rtl8723be_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
{
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
- u8 cck_rates[] = {DESC92C_RATE1M, DESC92C_RATE2M,
- DESC92C_RATE5_5M, DESC92C_RATE11M};
- u8 ofdm_rates[] = {DESC92C_RATE6M, DESC92C_RATE9M,
- DESC92C_RATE12M, DESC92C_RATE18M,
- DESC92C_RATE24M, DESC92C_RATE36M,
- DESC92C_RATE48M, DESC92C_RATE54M};
- u8 ht_rates_1t[] = {DESC92C_RATEMCS0, DESC92C_RATEMCS1,
- DESC92C_RATEMCS2, DESC92C_RATEMCS3,
- DESC92C_RATEMCS4, DESC92C_RATEMCS5,
- DESC92C_RATEMCS6, DESC92C_RATEMCS7};
+ static const u8 cck_rates[] = {
+ DESC92C_RATE1M, DESC92C_RATE2M,
+ DESC92C_RATE5_5M, DESC92C_RATE11M
+ };
+ static const u8 ofdm_rates[] = {
+ DESC92C_RATE6M, DESC92C_RATE9M,
+ DESC92C_RATE12M, DESC92C_RATE18M,
+ DESC92C_RATE24M, DESC92C_RATE36M,
+ DESC92C_RATE48M, DESC92C_RATE54M
+ };
+ static const u8 ht_rates_1t[] = {
+ DESC92C_RATEMCS0, DESC92C_RATEMCS1,
+ DESC92C_RATEMCS2, DESC92C_RATEMCS3,
+ DESC92C_RATEMCS4, DESC92C_RATEMCS5,
+ DESC92C_RATEMCS6, DESC92C_RATEMCS7
+ };
u8 i;
u8 power_index;
@@ -2155,15 +2161,16 @@ static void _rtl8723be_phy_iq_calibrate(struct ieee80211_hw *hw,
static u8 _get_right_chnl_place_for_iqk(u8 chnl)
{
- u8 channel_all[TARGET_CHNL_NUM_2G_5G] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
- 13, 14, 36, 38, 40, 42, 44, 46,
- 48, 50, 52, 54, 56, 58, 60, 62, 64,
- 100, 102, 104, 106, 108, 110,
- 112, 114, 116, 118, 120, 122,
- 124, 126, 128, 130, 132, 134, 136,
- 138, 140, 149, 151, 153, 155, 157,
- 159, 161, 163, 165};
+ static const u8 channel_all[TARGET_CHNL_NUM_2G_5G] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
+ 13, 14, 36, 38, 40, 42, 44, 46,
+ 48, 50, 52, 54, 56, 58, 60, 62, 64,
+ 100, 102, 104, 106, 108, 110,
+ 112, 114, 116, 118, 120, 122,
+ 124, 126, 128, 130, 132, 134, 136,
+ 138, 140, 149, 151, 153, 155, 157,
+ 159, 161, 163, 165
+ };
u8 place = chnl;
if (chnl > 14) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
index 162c34f0e9b7..0a92d0325098 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
@@ -32,7 +32,7 @@ static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
*/
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
index 7b911695db33..a65503c5ae5a 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c
@@ -30,7 +30,7 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw)
* 2 - Enable ASPM with Clock Req,
* 3 - Alwyas Enable ASPM with Clock Req,
* 4 - Always Enable ASPM without Clock Req.
- * set defult to RTL8192CE:3 RTL8192E:2
+ * set default to RTL8192CE:3 RTL8192E:2
*/
rtlpci->const_pci_aspm = 3;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 6e8c87a2fae4..d37a017b2b81 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -23,6 +23,8 @@ MODULE_DESCRIPTION("USB basic driver for rtlwifi");
#define MAX_USBCTRL_VENDORREQ_TIMES 10
+static void _rtl_usb_cleanup_tx(struct ieee80211_hw *hw);
+
static void _usbctrl_vendorreq_sync(struct usb_device *udev, u8 reqtype,
u16 value, void *pdata, u16 len)
{
@@ -285,9 +287,23 @@ static int _rtl_usb_init(struct ieee80211_hw *hw)
}
/* usb endpoint mapping */
err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw);
- rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq;
- _rtl_usb_init_tx(hw);
- _rtl_usb_init_rx(hw);
+ if (err)
+ return err;
+
+ rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq;
+
+ err = _rtl_usb_init_tx(hw);
+ if (err)
+ return err;
+
+ err = _rtl_usb_init_rx(hw);
+ if (err)
+ goto err_out;
+
+ return 0;
+
+err_out:
+ _rtl_usb_cleanup_tx(hw);
return err;
}
@@ -691,17 +707,13 @@ static int rtl_usb_start(struct ieee80211_hw *hw)
}
/*======================= tx =========================================*/
-static void rtl_usb_cleanup(struct ieee80211_hw *hw)
+static void _rtl_usb_cleanup_tx(struct ieee80211_hw *hw)
{
u32 i;
struct sk_buff *_skb;
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
struct ieee80211_tx_info *txinfo;
- /* clean up rx stuff. */
- _rtl_usb_cleanup_rx(hw);
-
- /* clean up tx stuff */
for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) {
while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) {
rtlusb->usb_tx_cleanup(hw, _skb);
@@ -715,6 +727,12 @@ static void rtl_usb_cleanup(struct ieee80211_hw *hw)
usb_kill_anchored_urbs(&rtlusb->tx_submitted);
}
+static void rtl_usb_cleanup(struct ieee80211_hw *hw)
+{
+ _rtl_usb_cleanup_rx(hw);
+ _rtl_usb_cleanup_tx(hw);
+}
+
/* We may add some struct into struct rtl_usb later. Do deinit here. */
static void rtl_usb_deinit(struct ieee80211_hw *hw)
{
@@ -937,7 +955,7 @@ static const struct rtl_intf_ops rtl_usb_ops = {
int rtl_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id,
- struct rtl_hal_cfg *rtl_hal_cfg)
+ const struct rtl_hal_cfg *rtl_hal_cfg)
{
int err;
struct ieee80211_hw *hw = NULL;
@@ -979,6 +997,9 @@ int rtl_usb_probe(struct usb_interface *intf,
usb_priv->dev.intf = intf;
usb_priv->dev.udev = udev;
usb_set_intfdata(intf, hw);
+ /* For dual MAC RTL8192DU, which has two interfaces. */
+ rtlpriv->rtlhal.interfaceindex =
+ intf->altsetting[0].desc.bInterfaceNumber;
/* init cfg & intf_ops */
rtlpriv->rtlhal.interface = INTF_USB;
rtlpriv->cfg = rtl_hal_cfg;
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h
index 12529afc0510..b66d6f9ae564 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.h
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.h
@@ -136,7 +136,7 @@ struct rtl_usb_priv {
int rtl_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id,
- struct rtl_hal_cfg *rtl92cu_hal_cfg);
+ const struct rtl_hal_cfg *rtl92cu_hal_cfg);
void rtl_usb_disconnect(struct usb_interface *intf);
int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message);
int rtl_usb_resume(struct usb_interface *pusb_intf);
diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h
index 9fabf597cfd6..ae6e351bc83c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/wifi.h
+++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h
@@ -20,6 +20,7 @@
#define MASKBYTE1 0xff00
#define MASKBYTE2 0xff0000
#define MASKBYTE3 0xff000000
+#define MASKH3BYTES 0xffffff00
#define MASKHWORD 0xffff0000
#define MASKLWORD 0x0000ffff
#define MASKDWORD 0xffffffff
@@ -48,6 +49,10 @@
#define MASK20BITS 0xfffff
#define RFREG_OFFSET_MASK 0xfffff
+/* For dual MAC RTL8192DU */
+#define MAC0_ACCESS_PHY1 0x4000
+#define MAC1_ACCESS_PHY0 0x2000
+
#define RF_CHANGE_BY_INIT 0
#define RF_CHANGE_BY_IPS BIT(28)
#define RF_CHANGE_BY_PS BIT(29)
@@ -1043,33 +1048,6 @@ struct octet_string {
u16 length;
};
-struct rtl_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 payload[];
-} __packed;
-
-struct rtl_info_element {
- u8 id;
- u8 len;
- u8 data[];
-} __packed;
-
-struct rtl_probe_rsp {
- struct rtl_hdr_3addr header;
- u32 time_stamp[2];
- __le16 beacon_interval;
- __le16 capability;
- /*SSID, supported rates, FH params, DS params,
- * CF params, IBSS params, TIM (if beacon), RSN
- */
- struct rtl_info_element info_element[];
-} __packed;
-
struct rtl_led_ctl {
bool led_opendrain;
enum rtl_led_pin sw_led0;
@@ -2268,6 +2246,7 @@ struct rtl_hal_ops {
bool (*config_bb_with_pgheaderfile)(struct ieee80211_hw *hw,
u8 configtype);
void (*phy_lc_calibrate)(struct ieee80211_hw *hw, bool is2t);
+ void (*phy_iq_calibrate)(struct ieee80211_hw *hw);
void (*phy_set_bw_mode_callback)(struct ieee80211_hw *hw);
void (*dm_dynamic_txpower)(struct ieee80211_hw *hw);
void (*c2h_command_handle)(struct ieee80211_hw *hw);
@@ -2377,9 +2356,9 @@ struct rtl_hal_cfg {
bool write_readback;
char *name;
char *alt_fw_name;
- struct rtl_hal_ops *ops;
+ const struct rtl_hal_ops *ops;
struct rtl_mod_params *mod_params;
- struct rtl_hal_usbint_cfg *usb_interface_cfg;
+ const struct rtl_hal_usbint_cfg *usb_interface_cfg;
enum rtl_spec_ver spec_ver;
/*this map used for some registers or vars
@@ -2728,7 +2707,7 @@ struct rtl_priv {
/* hal_cfg : for diff cards
* intf_ops : for diff interrface usb/pcie
*/
- struct rtl_hal_cfg *cfg;
+ const struct rtl_hal_cfg *cfg;
const struct rtl_intf_ops *intf_ops;
/* this var will be set by set_bit,
@@ -2767,6 +2746,12 @@ struct rtl_priv {
*/
bool use_new_trx_flow;
+ /* For dual MAC RTL8192DU, things shared by the 2 USB interfaces */
+ u32 *curveindex_2g;
+ u32 *curveindex_5g;
+ struct mutex *mutex_for_power_on_off; /* for power on/off */
+ struct mutex *mutex_for_hw_init; /* for hardware init */
+
#ifdef CONFIG_PM
struct wiphy_wowlan_support wowlan;
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/Kconfig b/drivers/net/wireless/realtek/rtw88/Kconfig
index cffad1c01249..22838ede03cd 100644
--- a/drivers/net/wireless/realtek/rtw88/Kconfig
+++ b/drivers/net/wireless/realtek/rtw88/Kconfig
@@ -28,8 +28,16 @@ config RTW88_8822B
config RTW88_8822C
tristate
+config RTW88_8723X
+ tristate
+
+config RTW88_8703B
+ tristate
+ select RTW88_8723X
+
config RTW88_8723D
tristate
+ select RTW88_8723X
config RTW88_8821C
tristate
@@ -122,6 +130,20 @@ config RTW88_8723DS
802.11n SDIO wireless network adapter
+config RTW88_8723CS
+ tristate "Realtek 8723CS SDIO wireless network adapter"
+ depends on MMC
+ select RTW88_CORE
+ select RTW88_SDIO
+ select RTW88_8703B
+ help
+ Select this option to enable support for 8723CS chipset (EXPERIMENTAL)
+
+ This module adds support for the 8723CS 802.11n SDIO
+ wireless network adapter.
+
+ If you choose to build a module, it'll be called rtw88_8723cs.
+
config RTW88_8723DU
tristate "Realtek 8723DU USB wireless network adapter"
depends on USB
diff --git a/drivers/net/wireless/realtek/rtw88/Makefile b/drivers/net/wireless/realtek/rtw88/Makefile
index fd212c09d88a..8f47359b4380 100644
--- a/drivers/net/wireless/realtek/rtw88/Makefile
+++ b/drivers/net/wireless/realtek/rtw88/Makefile
@@ -44,6 +44,15 @@ rtw88_8822cs-objs := rtw8822cs.o
obj-$(CONFIG_RTW88_8822CU) += rtw88_8822cu.o
rtw88_8822cu-objs := rtw8822cu.o
+obj-$(CONFIG_RTW88_8723X) += rtw88_8723x.o
+rtw88_8723x-objs := rtw8723x.o
+
+obj-$(CONFIG_RTW88_8703B) += rtw88_8703b.o
+rtw88_8703b-objs := rtw8703b.o rtw8703b_tables.o
+
+obj-$(CONFIG_RTW88_8723CS) += rtw88_8723cs.o
+rtw88_8723cs-objs := rtw8723cs.o
+
obj-$(CONFIG_RTW88_8723D) += rtw88_8723d.o
rtw88_8723d-objs := rtw8723d.o rtw8723d_table.o
diff --git a/drivers/net/wireless/realtek/rtw88/coex.c b/drivers/net/wireless/realtek/rtw88/coex.c
index 86467d2f8888..de3332eb7a22 100644
--- a/drivers/net/wireless/realtek/rtw88/coex.c
+++ b/drivers/net/wireless/realtek/rtw88/coex.c
@@ -3937,7 +3937,9 @@ void rtw_coex_display_coex_info(struct rtw_dev *rtwdev, struct seq_file *m)
lte_coex = rtw_coex_read_indirect_reg(rtwdev, 0x38);
bt_coex = rtw_coex_read_indirect_reg(rtwdev, 0x54);
- if (!coex_stat->bt_disabled && !coex_stat->bt_mailbox_reply) {
+ if (!coex_stat->wl_under_ips &&
+ (!coex_stat->wl_under_lps || coex_stat->wl_force_lps_ctrl) &&
+ !coex_stat->bt_disabled && !coex_stat->bt_mailbox_reply) {
rtw_coex_get_bt_supported_version(rtwdev,
&coex_stat->bt_supported_version);
rtw_coex_get_bt_patch_version(rtwdev, &coex_stat->patch_ver);
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index f20c0471c82a..eb69006c463e 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -26,6 +26,7 @@ enum rtw_debug_mask {
RTW_DBG_STATE = 0x00020000,
RTW_DBG_SDIO = 0x00040000,
+ RTW_DBG_UNEXP = 0x80000000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index 3f037ddcecf1..ab7d414d0ba6 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -783,12 +783,18 @@ void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
static const u8 rssi_min = 0, rssi_max = 100, rssi_offset = 100;
struct rtw_sta_info *si =
sta ? (struct rtw_sta_info *)sta->drv_priv : NULL;
- s32 threshold = bss_conf->cqm_rssi_thold + rssi_offset;
+ s32 thold = RTW_DEFAULT_CQM_THOLD;
+ u32 hyst = RTW_DEFAULT_CQM_HYST;
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
if (!rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_BCN_FILTER))
return;
+ if (bss_conf->cqm_rssi_thold)
+ thold = bss_conf->cqm_rssi_thold;
+ if (bss_conf->cqm_rssi_hyst)
+ hyst = bss_conf->cqm_rssi_hyst;
+
if (!connect) {
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
@@ -805,15 +811,15 @@ void rtw_fw_beacon_filter_config(struct rtw_dev *rtwdev, bool connect,
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
memset(h2c_pkt, 0, sizeof(h2c_pkt));
- threshold = clamp_t(s32, threshold, rssi_min, rssi_max);
+ thold = clamp_t(s32, thold + rssi_offset, rssi_min, rssi_max);
SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_BCN_FILTER_OFFLOAD_P1);
SET_BCN_FILTER_OFFLOAD_P1_ENABLE(h2c_pkt, connect);
SET_BCN_FILTER_OFFLOAD_P1_OFFLOAD_MODE(h2c_pkt,
BCN_FILTER_OFFLOAD_MODE_DEFAULT);
- SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, (u8)threshold);
+ SET_BCN_FILTER_OFFLOAD_P1_THRESHOLD(h2c_pkt, thold);
SET_BCN_FILTER_OFFLOAD_P1_BCN_LOSS_CNT(h2c_pkt, BCN_LOSS_CNT);
SET_BCN_FILTER_OFFLOAD_P1_MACID(h2c_pkt, si->mac_id);
- SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, bss_conf->cqm_rssi_hyst);
+ SET_BCN_FILTER_OFFLOAD_P1_HYST(h2c_pkt, hyst);
SET_BCN_FILTER_OFFLOAD_P1_BCN_INTERVAL(h2c_pkt, bss_conf->beacon_int);
rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
}
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 84e47c71ea12..e999c24e4634 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -29,6 +29,8 @@
#define BCN_FILTER_CONNECTION_LOSS 1
#define BCN_FILTER_CONNECTED 2
#define BCN_FILTER_NOTIFY_BEACON_LOSS 3
+#define RTW_DEFAULT_CQM_THOLD -70
+#define RTW_DEFAULT_CQM_HYST 4
#define SCAN_NOTIFY_TIMEOUT msecs_to_jiffies(10)
diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c
index 0c1c1ff31085..564f5988ee82 100644
--- a/drivers/net/wireless/realtek/rtw88/mac.c
+++ b/drivers/net/wireless/realtek/rtw88/mac.c
@@ -943,6 +943,12 @@ static int __rtw_download_firmware_legacy(struct rtw_dev *rtwdev,
{
int ret = 0;
+ /* reset firmware if still present */
+ if (rtwdev->chip->id == RTW_CHIP_TYPE_8703B &&
+ rtw_read8_mask(rtwdev, REG_MCUFW_CTRL, BIT_RAM_DL_SEL)) {
+ rtw_write8(rtwdev, REG_MCUFW_CTRL, 0x00);
+ }
+
en_download_firmware_legacy(rtwdev, true);
ret = download_firmware_legacy(rtwdev, fw->firmware->data, fw->firmware->size);
en_download_firmware_legacy(rtwdev, false);
@@ -1033,14 +1039,15 @@ static void __rtw_mac_flush_prio_queue(struct rtw_dev *rtwdev,
msleep(20);
}
- /* priority queue is still not empty, throw a warning,
+ /* priority queue is still not empty, throw a debug message
*
* Note that if we want to flush the tx queue when having a lot of
* traffic (ex, 100Mbps up), some of the packets could be dropped.
* And it requires like ~2secs to flush the full priority queue.
*/
if (!drop)
- rtw_warn(rtwdev, "timed out to flush queue %d\n", prio_queue);
+ rtw_dbg(rtwdev, RTW_DBG_UNEXP,
+ "timed out to flush queue %d\n", prio_queue);
}
static void rtw_mac_flush_prio_queues(struct rtw_dev *rtwdev,
@@ -1194,6 +1201,15 @@ static int __priority_queue_cfg(struct rtw_dev *rtwdev,
rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary);
rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary);
rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1);
+
+ if (rtwdev->hci.type == RTW_HCI_TYPE_USB) {
+ rtw_write8_mask(rtwdev, REG_AUTO_LLT_V1, BIT_MASK_BLK_DESC_NUM,
+ chip->usb_tx_agg_desc_num);
+
+ rtw_write8(rtwdev, REG_AUTO_LLT_V1 + 3, chip->usb_tx_agg_desc_num);
+ rtw_write8_set(rtwdev, REG_TXDMA_OFFSET_CHK + 1, BIT(1));
+ }
+
rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1);
if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0))
diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c
index 7af5bf7fe5b6..63326b352738 100644
--- a/drivers/net/wireless/realtek/rtw88/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw88/mac80211.c
@@ -62,7 +62,7 @@ static int rtw_ops_start(struct ieee80211_hw *hw)
return ret;
}
-static void rtw_ops_stop(struct ieee80211_hw *hw)
+static void rtw_ops_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rtw_dev *rtwdev = hw->priv;
@@ -386,6 +386,8 @@ static void rtw_ops_bss_info_changed(struct ieee80211_hw *hw,
rtw_coex_media_status_notify(rtwdev, vif->cfg.assoc);
if (rtw_bf_support)
rtw_bf_assoc(rtwdev, vif, conf);
+
+ rtw_fw_beacon_filter_config(rtwdev, true, vif);
} else {
rtw_leave_lps(rtwdev);
rtw_bf_disassoc(rtwdev, vif, conf);
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index ffba6b88f392..7ab7a988b123 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -227,9 +227,6 @@ static void rtw_watch_dog_work(struct work_struct *work)
else
clear_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags);
- rtw_coex_wl_status_check(rtwdev);
- rtw_coex_query_bt_hid_list(rtwdev);
-
if (busy_traffic != test_bit(RTW_FLAG_BUSY_TRAFFIC, rtwdev->flags))
rtw_coex_wl_status_change_notify(rtwdev, 0);
@@ -257,6 +254,8 @@ static void rtw_watch_dog_work(struct work_struct *work)
/* make sure BB/RF is working for dynamic mech */
rtw_leave_lps(rtwdev);
+ rtw_coex_wl_status_check(rtwdev);
+ rtw_coex_query_bt_hid_list(rtwdev);
rtw_phy_dynamic_mechanism(rtwdev);
@@ -2204,6 +2203,7 @@ EXPORT_SYMBOL(rtw_core_deinit);
int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
{
+ bool sta_mode_only = rtwdev->hci.type == RTW_HCI_TYPE_SDIO;
struct rtw_hal *hal = &rtwdev->hal;
int max_tx_headroom = 0;
int ret;
@@ -2232,10 +2232,12 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, TX_AMSDU);
ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS);
- hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
+ if (sta_mode_only)
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ else
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_ADHOC);
hw->wiphy->available_antennas_tx = hal->antenna_tx;
hw->wiphy->available_antennas_rx = hal->antenna_rx;
@@ -2246,7 +2248,7 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
hw->wiphy->max_scan_ssids = RTW_SCAN_MAX_SSIDS;
hw->wiphy->max_scan_ie_len = rtw_get_max_scan_ie_len(rtwdev);
- if (rtwdev->chip->id == RTW_CHIP_TYPE_8822C) {
+ if (!sta_mode_only && rtwdev->chip->id == RTW_CHIP_TYPE_8822C) {
hw->wiphy->iface_combinations = rtw_iface_combs;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(rtw_iface_combs);
}
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index e14d1da43940..49a3fd4fb7dc 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -187,6 +187,7 @@ enum rtw_chip_type {
RTW_CHIP_TYPE_8822C,
RTW_CHIP_TYPE_8723D,
RTW_CHIP_TYPE_8821C,
+ RTW_CHIP_TYPE_8703B,
};
enum rtw_tx_queue_type {
@@ -1196,6 +1197,8 @@ struct rtw_chip_info {
u16 fw_fifo_addr[RTW_FW_FIFO_MAX];
const struct rtw_fwcd_segs *fwcd_segs;
+ u8 usb_tx_agg_desc_num;
+
u8 default_1ss_tx_path;
bool path_div_supported;
@@ -1700,11 +1703,13 @@ struct rtw_dm_info {
s8 delta_power_index[RTW_RF_PATH_MAX];
s8 delta_power_index_last[RTW_RF_PATH_MAX];
u8 default_ofdm_index;
+ u8 default_cck_index;
bool pwr_trk_triggered;
bool pwr_trk_init_trigger;
struct ewma_thermal avg_thermal[RTW_RF_PATH_MAX];
s8 txagc_remnant_cck;
s8 txagc_remnant_ofdm;
+ u8 rx_cck_agc_report_type;
/* backup dack results for each path and I/Q */
u32 dack_adck[RTW_RF_PATH_MAX];
diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c
index 9986a4cb37eb..a5b9d6c7be37 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.c
+++ b/drivers/net/wireless/realtek/rtw88/pci.c
@@ -729,7 +729,8 @@ static void __pci_flush_queue(struct rtw_dev *rtwdev, u8 pci_q, bool drop)
}
if (!drop)
- rtw_warn(rtwdev, "timed out to flush pci tx ring[%d]\n", pci_q);
+ rtw_dbg(rtwdev, RTW_DBG_UNEXP,
+ "timed out to flush pci tx ring[%d]\n", pci_q);
}
static void __rtw_pci_flush_queues(struct rtw_dev *rtwdev, u32 pci_queues,
@@ -1612,7 +1613,7 @@ static struct rtw_hci_ops rtw_pci_ops = {
static int rtw_pci_request_irq(struct rtw_dev *rtwdev, struct pci_dev *pdev)
{
- unsigned int flags = PCI_IRQ_LEGACY;
+ unsigned int flags = PCI_IRQ_INTX;
int ret;
if (!rtw_disable_msi)
@@ -1681,12 +1682,16 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void rtw_pci_napi_init(struct rtw_dev *rtwdev)
+static int rtw_pci_napi_init(struct rtw_dev *rtwdev)
{
struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv;
- init_dummy_netdev(&rtwpci->netdev);
- netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
+ rtwpci->netdev = alloc_netdev_dummy(0);
+ if (!rtwpci->netdev)
+ return -ENOMEM;
+
+ netif_napi_add(rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll);
+ return 0;
}
static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
@@ -1695,6 +1700,7 @@ static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev)
rtw_pci_napi_stop(rtwdev);
netif_napi_del(&rtwpci->napi);
+ free_netdev(rtwpci->netdev);
}
int rtw_pci_probe(struct pci_dev *pdev,
@@ -1744,7 +1750,11 @@ int rtw_pci_probe(struct pci_dev *pdev,
goto err_pci_declaim;
}
- rtw_pci_napi_init(rtwdev);
+ ret = rtw_pci_napi_init(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to setup NAPI\n");
+ goto err_pci_declaim;
+ }
ret = rtw_chip_info_setup(rtwdev);
if (ret) {
diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h
index 0c37efd8c66f..13988db1cb4c 100644
--- a/drivers/net/wireless/realtek/rtw88/pci.h
+++ b/drivers/net/wireless/realtek/rtw88/pci.h
@@ -215,7 +215,7 @@ struct rtw_pci {
bool running;
/* napi structure */
- struct net_device netdev;
+ struct net_device *netdev;
struct napi_struct napi;
u16 rx_tag;
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index b122f226924b..02ef9a77316b 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -270,6 +270,7 @@
#define BIT_MASK_BCN_HEAD_1_V1 0xfff
#define REG_AUTO_LLT_V1 0x0208
#define BIT_AUTO_INIT_LLT_V1 BIT(0)
+#define BIT_MASK_BLK_DESC_NUM GENMASK(7, 4)
#define REG_DWBCN0_CTRL 0x0208
#define BIT_BCN_VALID BIT(16)
#define REG_TXDMA_OFFSET_CHK 0x020C
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
new file mode 100644
index 000000000000..222608de33cd
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c
@@ -0,0 +1,2110 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright Fiona Klute <fiona.klute@gmx.de> */
+
+#include <linux/of_net.h>
+#include "main.h"
+#include "coex.h"
+#include "debug.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rx.h"
+#include "rtw8703b.h"
+#include "rtw8703b_tables.h"
+#include "rtw8723x.h"
+
+#define BIT_MASK_TXQ_INIT (BIT(7))
+#define WLAN_RL_VAL 0x3030
+/* disable BAR */
+#define WLAN_BAR_VAL 0x0201ffff
+#define WLAN_PIFS_VAL 0
+#define WLAN_RX_PKT_LIMIT 0x18
+#define WLAN_SLOT_TIME 0x09
+#define WLAN_SPEC_SIFS 0x100a
+#define WLAN_MAX_AGG_NR 0x1f
+#define WLAN_AMPDU_MAX_TIME 0x70
+
+/* unit is 32us */
+#define TBTT_PROHIBIT_SETUP_TIME 0x04
+#define TBTT_PROHIBIT_HOLD_TIME 0x80
+#define TBTT_PROHIBIT_HOLD_TIME_STOP_BCN 0x64
+
+/* raw pkt_stat->drv_info_sz is in unit of 8-bytes */
+#define RX_DRV_INFO_SZ_UNIT_8703B 8
+
+#define TRANS_SEQ_END \
+ 0xFFFF, \
+ RTW_PWR_CUT_ALL_MSK, \
+ RTW_PWR_INTF_ALL_MSK, \
+ 0, \
+ RTW_PWR_CMD_END, 0, 0
+
+/* rssi in percentage % (dbm = % - 100) */
+/* These are used to select simple signal quality levels, might need
+ * tweaking. Same for rf_para tables below.
+ */
+static const u8 wl_rssi_step_8703b[] = {60, 50, 44, 30};
+static const u8 bt_rssi_step_8703b[] = {30, 30, 30, 30};
+static const struct coex_5g_afh_map afh_5g_8703b[] = { {0, 0, 0} };
+
+/* Actually decreasing wifi TX power/RX gain isn't implemented in
+ * rtw8703b, but hopefully adjusting the BT side helps.
+ */
+static const struct coex_rf_para rf_para_tx_8703b[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 10, false, 7}, /* for WL-CPT */
+ {1, 0, true, 4},
+ {1, 2, true, 4},
+ {1, 10, true, 4},
+ {1, 15, true, 4}
+};
+
+static const struct coex_rf_para rf_para_rx_8703b[] = {
+ {0, 0, false, 7}, /* for normal */
+ {0, 10, false, 7}, /* for WL-CPT */
+ {1, 0, true, 5},
+ {1, 2, true, 5},
+ {1, 10, true, 5},
+ {1, 15, true, 5}
+};
+
+static const u32 rtw8703b_ofdm_swing_table[] = {
+ 0x0b40002d, /* 0, -15.0dB */
+ 0x0c000030, /* 1, -14.5dB */
+ 0x0cc00033, /* 2, -14.0dB */
+ 0x0d800036, /* 3, -13.5dB */
+ 0x0e400039, /* 4, -13.0dB */
+ 0x0f00003c, /* 5, -12.5dB */
+ 0x10000040, /* 6, -12.0dB */
+ 0x11000044, /* 7, -11.5dB */
+ 0x12000048, /* 8, -11.0dB */
+ 0x1300004c, /* 9, -10.5dB */
+ 0x14400051, /* 10, -10.0dB */
+ 0x15800056, /* 11, -9.5dB */
+ 0x16c0005b, /* 12, -9.0dB */
+ 0x18000060, /* 13, -8.5dB */
+ 0x19800066, /* 14, -8.0dB */
+ 0x1b00006c, /* 15, -7.5dB */
+ 0x1c800072, /* 16, -7.0dB */
+ 0x1e400079, /* 17, -6.5dB */
+ 0x20000080, /* 18, -6.0dB */
+ 0x22000088, /* 19, -5.5dB */
+ 0x24000090, /* 20, -5.0dB */
+ 0x26000098, /* 21, -4.5dB */
+ 0x288000a2, /* 22, -4.0dB */
+ 0x2ac000ab, /* 23, -3.5dB */
+ 0x2d4000b5, /* 24, -3.0dB */
+ 0x300000c0, /* 25, -2.5dB */
+ 0x32c000cb, /* 26, -2.0dB */
+ 0x35c000d7, /* 27, -1.5dB */
+ 0x390000e4, /* 28, -1.0dB */
+ 0x3c8000f2, /* 29, -0.5dB */
+ 0x40000100, /* 30, +0dB */
+ 0x43c0010f, /* 31, +0.5dB */
+ 0x47c0011f, /* 32, +1.0dB */
+ 0x4c000130, /* 33, +1.5dB */
+ 0x50800142, /* 34, +2.0dB */
+ 0x55400155, /* 35, +2.5dB */
+ 0x5a400169, /* 36, +3.0dB */
+ 0x5fc0017f, /* 37, +3.5dB */
+ 0x65400195, /* 38, +4.0dB */
+ 0x6b8001ae, /* 39, +4.5dB */
+ 0x71c001c7, /* 40, +5.0dB */
+ 0x788001e2, /* 41, +5.5dB */
+ 0x7f8001fe /* 42, +6.0dB */
+};
+
+static const u32 rtw8703b_cck_pwr_regs[] = {
+ 0x0a22, 0x0a23, 0x0a24, 0x0a25, 0x0a26, 0x0a27, 0x0a28, 0x0a29,
+ 0x0a9a, 0x0a9b, 0x0a9c, 0x0a9d, 0x0aa0, 0x0aa1, 0x0aa2, 0x0aa3,
+};
+
+static const u8 rtw8703b_cck_swing_table[][16] = {
+ {0x44, 0x42, 0x3C, 0x33, 0x28, 0x1C, 0x13, 0x0B, 0x05, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-16dB*/
+ {0x48, 0x46, 0x3F, 0x36, 0x2A, 0x1E, 0x14, 0x0B, 0x05, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-15.5dB*/
+ {0x4D, 0x4A, 0x43, 0x39, 0x2C, 0x20, 0x15, 0x0C, 0x06, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-15dB*/
+ {0x51, 0x4F, 0x47, 0x3C, 0x2F, 0x22, 0x16, 0x0D, 0x06, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-14.5dB*/
+ {0x56, 0x53, 0x4B, 0x40, 0x32, 0x24, 0x17, 0x0E, 0x06, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-14dB*/
+ {0x5B, 0x58, 0x50, 0x43, 0x35, 0x26, 0x19, 0x0E, 0x07, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-13.5dB*/
+ {0x60, 0x5D, 0x54, 0x47, 0x38, 0x28, 0x1A, 0x0F, 0x07, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-13dB*/
+ {0x66, 0x63, 0x59, 0x4C, 0x3B, 0x2B, 0x1C, 0x10, 0x08, 0x02,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-12.5dB*/
+ {0x6C, 0x69, 0x5F, 0x50, 0x3F, 0x2D, 0x1E, 0x11, 0x08, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-12dB*/
+ {0x73, 0x6F, 0x64, 0x55, 0x42, 0x30, 0x1F, 0x12, 0x08, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-11.5dB*/
+ {0x79, 0x76, 0x6A, 0x5A, 0x46, 0x33, 0x21, 0x13, 0x09, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-11dB*/
+ {0x81, 0x7C, 0x71, 0x5F, 0x4A, 0x36, 0x23, 0x14, 0x0A, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-10.5dB*/
+ {0x88, 0x84, 0x77, 0x65, 0x4F, 0x39, 0x25, 0x15, 0x0A, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-10dB*/
+ {0x90, 0x8C, 0x7E, 0x6B, 0x54, 0x3C, 0x27, 0x17, 0x0B, 0x03,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-9.5dB*/
+ {0x99, 0x94, 0x86, 0x71, 0x58, 0x40, 0x2A, 0x18, 0x0B, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-9dB*/
+ {0xA2, 0x9D, 0x8E, 0x78, 0x5E, 0x43, 0x2C, 0x19, 0x0C, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-8.5dB*/
+ {0xAC, 0xA6, 0x96, 0x7F, 0x63, 0x47, 0x2F, 0x1B, 0x0D, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-8dB*/
+ {0xB6, 0xB0, 0x9F, 0x87, 0x69, 0x4C, 0x32, 0x1D, 0x0D, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-7.5dB*/
+ {0xC1, 0xBA, 0xA8, 0x8F, 0x6F, 0x50, 0x35, 0x1E, 0x0E, 0x04,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-7dB*/
+ {0xCC, 0xC5, 0xB2, 0x97, 0x76, 0x55, 0x38, 0x20, 0x0F, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*-6.5dB*/
+ {0xD8, 0xD1, 0xBD, 0xA0, 0x7D, 0x5A, 0x3B, 0x22, 0x10, 0x05,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} /*-6dB*/
+};
+
+#define RTW_OFDM_SWING_TABLE_SIZE ARRAY_SIZE(rtw8703b_ofdm_swing_table)
+#define RTW_CCK_SWING_TABLE_SIZE ARRAY_SIZE(rtw8703b_cck_swing_table)
+
+static const struct rtw_pwr_seq_cmd trans_pre_enable_8703b[] = {
+ /* set up external crystal (XTAL) */
+ {REG_PAD_CTRL1 + 2,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
+ /* set CLK_REQ to high active */
+ {0x0069,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ /* unlock ISO/CLK/power control register */
+ {REG_RSV_CTRL,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8703b[] = {
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_carddis_8703b[] = {
+ {0x0023,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), BIT(4)},
+ {0x0007,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK | RTW_PWR_INTF_USB_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xFF, 0x20},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), BIT(7)},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd trans_cardemu_to_act_8703b[] = {
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0067,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(4), 0},
+ {0x0001,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_DELAY, 1, RTW_PWR_DELAY_MS},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3) | BIT(2)), 0},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0004,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+ {0x0004,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), 0},
+ /* wait for power ready */
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), BIT(1)},
+ {0x0075,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_PCI_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(7), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, (BIT(4) | BIT(3)), 0},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(0), 0},
+ {0x0010,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x0049,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0063,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0062,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0058,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x005A,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0068,
+ RTW_PWR_CUT_TEST_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(3), BIT(3)},
+ {0x0069,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_cardemu_8703b[] = {
+ {0x001f,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0},
+ {0x0049,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0006,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0005,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, BIT(1), 0},
+ {0x0010,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(6), 0},
+ {0x0000,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x0020,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_USB_MSK | RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_reset_mcu_8703b[] = {
+ {REG_SYS_FUNC_EN + 1,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT_FEN_CPUEN, 0},
+ /* reset MCU ready */
+ {REG_MCUFW_CTRL,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0},
+ /* reset MCU IO wrapper */
+ {REG_RSV_CTRL + 1,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {REG_RSV_CTRL + 1,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 1},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd trans_act_to_lps_8703b[] = {
+ {0x0301,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0xff},
+ {0x0522,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0xff},
+ {0x05f8,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xff, 0},
+ {0x05f9,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xff, 0},
+ {0x05fa,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xff, 0},
+ {0x05fb,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_POLLING, 0xff, 0},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(0), 0},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_DELAY, 0, RTW_PWR_DELAY_US},
+ {0x0002,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0100,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0x03},
+ {0x0101,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(1), 0},
+ {0x0093,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_SDIO_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, 0xff, 0},
+ {0x0553,
+ RTW_PWR_CUT_ALL_MSK,
+ RTW_PWR_INTF_ALL_MSK,
+ RTW_PWR_ADDR_MAC,
+ RTW_PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {TRANS_SEQ_END},
+};
+
+static const struct rtw_pwr_seq_cmd *card_enable_flow_8703b[] = {
+ trans_pre_enable_8703b,
+ trans_carddis_to_cardemu_8703b,
+ trans_cardemu_to_act_8703b,
+ NULL
+};
+
+static const struct rtw_pwr_seq_cmd *card_disable_flow_8703b[] = {
+ trans_act_to_lps_8703b,
+ trans_act_to_reset_mcu_8703b,
+ trans_act_to_cardemu_8703b,
+ trans_cardemu_to_carddis_8703b,
+ NULL
+};
+
+static const struct rtw_rfe_def rtw8703b_rfe_defs[] = {
+ [0] = { .phy_pg_tbl = &rtw8703b_bb_pg_tbl,
+ .txpwr_lmt_tbl = &rtw8703b_txpwr_lmt_tbl,},
+};
+
+static const struct rtw_page_table page_table_8703b[] = {
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+ {12, 2, 2, 0, 1},
+};
+
+static const struct rtw_rqpn rqpn_table_8703b[] = {
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_HIGH,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_HIGH, RTW_DMA_MAPPING_HIGH},
+ {RTW_DMA_MAPPING_NORMAL, RTW_DMA_MAPPING_NORMAL,
+ RTW_DMA_MAPPING_LOW, RTW_DMA_MAPPING_LOW,
+ RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
+};
+
+/* Default power index table for RTL8703B, used if EFUSE does not
+ * contain valid data. Replaces EFUSE data from offset 0x10 (start of
+ * txpwr_idx_table).
+ */
+static const u8 rtw8703b_txpwr_idx_table[] = {
+ 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x2D,
+ 0x2D, 0x2D, 0x2D, 0x2D, 0x2D, 0x02
+};
+
+static void try_mac_from_devicetree(struct rtw_dev *rtwdev)
+{
+ struct device_node *node = rtwdev->dev->of_node;
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ int ret;
+
+ if (node) {
+ ret = of_get_mac_address(node, efuse->addr);
+ if (ret == 0) {
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "got wifi mac address from DT: %pM\n",
+ efuse->addr);
+ }
+ }
+}
+
+#define DBG_EFUSE_FIX(rtwdev, name) \
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "Fixed invalid EFUSE value: " \
+ # name "=0x%x\n", rtwdev->efuse.name)
+
+static int rtw8703b_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ u8 *pwr = (u8 *)efuse->txpwr_idx_table;
+ bool valid = false;
+ int ret;
+
+ ret = rtw8723x_read_efuse(rtwdev, log_map);
+ if (ret != 0)
+ return ret;
+
+ if (!is_valid_ether_addr(efuse->addr))
+ try_mac_from_devicetree(rtwdev);
+
+ /* If TX power index table in EFUSE is invalid, fall back to
+ * built-in table.
+ */
+ for (int i = 0; i < ARRAY_SIZE(rtw8703b_txpwr_idx_table); i++)
+ if (pwr[i] != 0xff) {
+ valid = true;
+ break;
+ }
+ if (!valid) {
+ for (int i = 0; i < ARRAY_SIZE(rtw8703b_txpwr_idx_table); i++)
+ pwr[i] = rtw8703b_txpwr_idx_table[i];
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "Replaced invalid EFUSE TX power index table.");
+ rtw8723x_debug_txpwr_limit(rtwdev,
+ efuse->txpwr_idx_table, 2);
+ }
+
+ /* Override invalid antenna settings. */
+ if (efuse->bt_setting == 0xff) {
+ /* shared antenna */
+ efuse->bt_setting |= BIT(0);
+ /* RF path A */
+ efuse->bt_setting &= ~BIT(6);
+ DBG_EFUSE_FIX(rtwdev, bt_setting);
+ }
+
+ /* Override invalid board options: The coex code incorrectly
+ * assumes that if bits 6 & 7 are set the board doesn't
+ * support coex. Regd is also derived from rf_board_option and
+ * should be 0 if there's no valid data.
+ */
+ if (efuse->rf_board_option == 0xff) {
+ efuse->regd = 0;
+ efuse->rf_board_option &= GENMASK(5, 0);
+ DBG_EFUSE_FIX(rtwdev, rf_board_option);
+ }
+
+ /* Override invalid crystal cap setting, default comes from
+ * vendor driver. Chip specific.
+ */
+ if (efuse->crystal_cap == 0xff) {
+ efuse->crystal_cap = 0x20;
+ DBG_EFUSE_FIX(rtwdev, crystal_cap);
+ }
+
+ return 0;
+}
+
+static void rtw8703b_pwrtrack_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 path;
+
+ /* TODO: The vendor driver selects these using tables in
+ * halrf_powertracking_ce.c, functions are called
+ * get_swing_index and get_cck_swing_index. There the current
+ * fixed values are only the defaults in case no match is
+ * found.
+ */
+ dm_info->default_ofdm_index = 30;
+ dm_info->default_cck_index = 20;
+
+ for (path = RF_PATH_A; path < rtwdev->hal.rf_path_num; path++) {
+ ewma_thermal_init(&dm_info->avg_thermal[path]);
+ dm_info->delta_power_index[path] = 0;
+ }
+ dm_info->pwr_trk_triggered = false;
+ dm_info->pwr_trk_init_trigger = true;
+ dm_info->thermal_meter_k = rtwdev->efuse.thermal_meter_k;
+ dm_info->txagc_remnant_cck = 0;
+ dm_info->txagc_remnant_ofdm = 0;
+}
+
+static void rtw8703b_phy_set_param(struct rtw_dev *rtwdev)
+{
+ u8 xtal_cap = rtwdev->efuse.crystal_cap & 0x3F;
+
+ /* power on BB/RF domain */
+ rtw_write16_set(rtwdev, REG_SYS_FUNC_EN,
+ BIT_FEN_EN_25_1 | BIT_FEN_BB_GLB_RST | BIT_FEN_BB_RSTB);
+ rtw_write8_set(rtwdev, REG_RF_CTRL,
+ BIT_RF_EN | BIT_RF_RSTB | BIT_RF_SDM_RSTB);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_WLINT, RFREG_MASK, 0x0780);
+ rtw_write8(rtwdev, REG_AFE_CTRL1 + 1, 0x80);
+
+ rtw_phy_load_tables(rtwdev);
+
+ rtw_write32_clr(rtwdev, REG_RCR, BIT_RCR_ADF);
+ /* 0xff is from vendor driver, rtw8723d uses
+ * BIT_HIQ_NO_LMT_EN_ROOT. Comment in vendor driver: "Packet
+ * in Hi Queue Tx immediately". I wonder if setting all bits
+ * is really necessary.
+ */
+ rtw_write8_set(rtwdev, REG_HIQ_NO_LMT_EN, 0xff);
+ rtw_write16_set(rtwdev, REG_AFE_CTRL_4, BIT_CK320M_AFE_EN | BIT_EN_SYN);
+
+ rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL,
+ xtal_cap | (xtal_cap << 6));
+ rtw_write32_set(rtwdev, REG_FPGA0_RFMOD, BIT_CCKEN | BIT_OFDMEN);
+
+ /* Init EDCA */
+ rtw_write16(rtwdev, REG_SPEC_SIFS, WLAN_SPEC_SIFS);
+ rtw_write16(rtwdev, REG_MAC_SPEC_SIFS, WLAN_SPEC_SIFS);
+ rtw_write16(rtwdev, REG_SIFS, WLAN_SPEC_SIFS); /* CCK */
+ rtw_write16(rtwdev, REG_SIFS + 2, WLAN_SPEC_SIFS); /* OFDM */
+ /* TXOP */
+ rtw_write32(rtwdev, REG_EDCA_VO_PARAM, 0x002FA226);
+ rtw_write32(rtwdev, REG_EDCA_VI_PARAM, 0x005EA324);
+ rtw_write32(rtwdev, REG_EDCA_BE_PARAM, 0x005EA42B);
+ rtw_write32(rtwdev, REG_EDCA_BK_PARAM, 0x0000A44F);
+
+ /* Init retry */
+ rtw_write8(rtwdev, REG_ACKTO, 0x40);
+
+ /* Set up RX aggregation. sdio.c also sets DMA mode, but not
+ * the burst parameters.
+ */
+ rtw_write8(rtwdev, REG_RXDMA_MODE,
+ BIT_DMA_MODE |
+ FIELD_PREP_CONST(BIT_MASK_AGG_BURST_NUM, AGG_BURST_NUM) |
+ FIELD_PREP_CONST(BIT_MASK_AGG_BURST_SIZE, AGG_BURST_SIZE));
+
+ /* Init beacon parameters */
+ rtw_write8(rtwdev, REG_BCN_CTRL,
+ BIT_DIS_TSF_UDT | BIT_EN_BCN_FUNCTION | BIT_EN_TXBCN_RPT);
+ rtw_write8(rtwdev, REG_TBTT_PROHIBIT, TBTT_PROHIBIT_SETUP_TIME);
+ rtw_write8(rtwdev, REG_TBTT_PROHIBIT + 1,
+ TBTT_PROHIBIT_HOLD_TIME_STOP_BCN & 0xFF);
+ rtw_write8(rtwdev, REG_TBTT_PROHIBIT + 2,
+ (rtw_read8(rtwdev, REG_TBTT_PROHIBIT + 2) & 0xF0)
+ | (TBTT_PROHIBIT_HOLD_TIME_STOP_BCN >> 8));
+
+ /* configure packet burst */
+ rtw_write8_set(rtwdev, REG_SINGLE_AMPDU_CTRL, BIT_EN_SINGLE_APMDU);
+ rtw_write8(rtwdev, REG_RX_PKT_LIMIT, WLAN_RX_PKT_LIMIT);
+ rtw_write8(rtwdev, REG_MAX_AGGR_NUM, WLAN_MAX_AGG_NR);
+ rtw_write8(rtwdev, REG_PIFS, WLAN_PIFS_VAL);
+ rtw_write8_clr(rtwdev, REG_FWHW_TXQ_CTRL, BIT_MASK_TXQ_INIT);
+ rtw_write8(rtwdev, REG_AMPDU_MAX_TIME, WLAN_AMPDU_MAX_TIME);
+
+ rtw_write8(rtwdev, REG_SLOT, WLAN_SLOT_TIME);
+ rtw_write16(rtwdev, REG_RETRY_LIMIT, WLAN_RL_VAL);
+ rtw_write32(rtwdev, REG_BAR_MODE_CTRL, WLAN_BAR_VAL);
+ rtw_write16(rtwdev, REG_ATIMWND, 0x2);
+
+ rtw_phy_init(rtwdev);
+
+ if (rtw_read32_mask(rtwdev, REG_BB_AMP, BIT_MASK_RX_LNA) != 0) {
+ rtwdev->dm_info.rx_cck_agc_report_type = 1;
+ } else {
+ rtwdev->dm_info.rx_cck_agc_report_type = 0;
+ rtw_warn(rtwdev, "unexpected cck agc report type");
+ }
+
+ rtw8723x_lck(rtwdev);
+
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50);
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x20);
+
+ rtw8703b_pwrtrack_init(rtwdev);
+}
+
+static bool rtw8703b_check_spur_ov_thres(struct rtw_dev *rtwdev,
+ u32 freq, u32 thres)
+{
+ bool ret = false;
+
+ rtw_write32(rtwdev, REG_ANALOG_P4, DIS_3WIRE);
+ rtw_write32(rtwdev, REG_PSDFN, freq);
+ rtw_write32(rtwdev, REG_PSDFN, START_PSD | freq);
+
+ msleep(30);
+ if (rtw_read32(rtwdev, REG_PSDRPT) >= thres)
+ ret = true;
+
+ rtw_write32(rtwdev, REG_PSDFN, freq);
+ rtw_write32(rtwdev, REG_ANALOG_P4, EN_3WIRE);
+
+ return ret;
+}
+
+static void rtw8703b_cfg_notch(struct rtw_dev *rtwdev, u8 channel, bool notch)
+{
+ if (!notch) {
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x1f);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x0);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x0);
+ return;
+ }
+
+ switch (channel) {
+ case 5:
+ fallthrough;
+ case 13:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0xb);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x06000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ case 6:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x4);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000600);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ case 7:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x3);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x06000000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ case 8:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0xa);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00000380);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ case 14:
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_MASK_RXDSP, 0x5);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x1);
+ rtw_write32(rtwdev, REG_OFDM1_CSI1, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI2, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI3, 0x00000000);
+ rtw_write32(rtwdev, REG_OFDM1_CSI4, 0x00180000);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x1);
+ break;
+ default:
+ rtw_warn(rtwdev,
+ "Bug: Notch filter enable called for channel %u!",
+ channel);
+ rtw_write32_mask(rtwdev, REG_OFDM0_RXDSP, BIT_EN_RXDSP, 0x0);
+ rtw_write32_mask(rtwdev, REG_OFDM1_CFOTRK, BIT_EN_CFOTRK, 0x0);
+ break;
+ }
+}
+
+static void rtw8703b_spur_cal(struct rtw_dev *rtwdev, u8 channel)
+{
+ bool notch;
+ u32 freq;
+
+ if (channel == 5) {
+ freq = FREQ_CH5;
+ } else if (channel == 6) {
+ freq = FREQ_CH6;
+ } else if (channel == 7) {
+ freq = FREQ_CH7;
+ } else if (channel == 8) {
+ freq = FREQ_CH8;
+ } else if (channel == 13) {
+ freq = FREQ_CH13;
+ } else if (channel == 14) {
+ freq = FREQ_CH14;
+ } else {
+ rtw8703b_cfg_notch(rtwdev, channel, false);
+ return;
+ }
+
+ notch = rtw8703b_check_spur_ov_thres(rtwdev, freq, SPUR_THRES);
+ rtw8703b_cfg_notch(rtwdev, channel, notch);
+}
+
+static void rtw8703b_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
+{
+ u32 rf_cfgch_a;
+ u32 rf_cfgch_b;
+ /* default value for 20M */
+ u32 rf_rck = 0x00000C08;
+
+ rf_cfgch_a = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK);
+ rf_cfgch_b = rtw_read_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK);
+
+ rf_cfgch_a &= ~RFCFGCH_CHANNEL_MASK;
+ rf_cfgch_b &= ~RFCFGCH_CHANNEL_MASK;
+ rf_cfgch_a |= (channel & RFCFGCH_CHANNEL_MASK);
+ rf_cfgch_b |= (channel & RFCFGCH_CHANNEL_MASK);
+
+ rf_cfgch_a &= ~RFCFGCH_BW_MASK;
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ rf_cfgch_a |= RFCFGCH_BW_20M;
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rf_cfgch_a |= RFCFGCH_BW_40M;
+ rf_rck = 0x00000C4C;
+ break;
+ default:
+ break;
+ }
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, rf_cfgch_a);
+ rtw_write_rf(rtwdev, RF_PATH_B, RF_CFGCH, RFREG_MASK, rf_cfgch_b);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_RCK1, RFREG_MASK, rf_rck);
+ rtw8703b_spur_cal(rtwdev, channel);
+}
+
+#define CCK_DFIR_NR_8703B 2
+static const struct rtw_backup_info cck_dfir_cfg[][CCK_DFIR_NR_8703B] = {
+ [0] = {
+ { .len = 4, .reg = REG_CCK_TXSF2, .val = 0x5A7DA0BD },
+ { .len = 4, .reg = REG_CCK_DBG, .val = 0x0000223B },
+ },
+ [1] = {
+ { .len = 4, .reg = REG_CCK_TXSF2, .val = 0x00000000 },
+ { .len = 4, .reg = REG_CCK_DBG, .val = 0x00000000 },
+ },
+};
+
+static void rtw8703b_set_channel_bb(struct rtw_dev *rtwdev, u8 channel, u8 bw,
+ u8 primary_ch_idx)
+{
+ const struct rtw_backup_info *cck_dfir;
+ int i;
+
+ cck_dfir = channel <= 13 ? cck_dfir_cfg[0] : cck_dfir_cfg[1];
+
+ for (i = 0; i < CCK_DFIR_NR_8703B; i++, cck_dfir++)
+ rtw_write32(rtwdev, cck_dfir->reg, cck_dfir->val);
+
+ switch (bw) {
+ case RTW_CHANNEL_WIDTH_20:
+ rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x0);
+ rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x0);
+ rtw_write32_mask(rtwdev, REG_OFDM0_TX_PSD_NOISE,
+ GENMASK(31, 20), 0x0);
+ rtw_write32(rtwdev, REG_BBRX_DFIR, 0x4A880000);
+ rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x19F60000);
+ break;
+ case RTW_CHANNEL_WIDTH_40:
+ rtw_write32_mask(rtwdev, REG_FPGA0_RFMOD, BIT_MASK_RFMOD, 0x1);
+ rtw_write32_mask(rtwdev, REG_FPGA1_RFMOD, BIT_MASK_RFMOD, 0x1);
+ rtw_write32(rtwdev, REG_BBRX_DFIR, 0x40100000);
+ rtw_write32(rtwdev, REG_OFDM0_A_TX_AFE, 0x51F60000);
+ rtw_write32_mask(rtwdev, REG_CCK0_SYS, BIT_CCK_SIDE_BAND,
+ primary_ch_idx == RTW_SC_20_UPPER ? 1 : 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, 0xC00,
+ primary_ch_idx == RTW_SC_20_UPPER ? 2 : 1);
+
+ rtw_write32_mask(rtwdev, REG_BB_PWR_SAV5_11N, GENMASK(27, 26),
+ primary_ch_idx == RTW_SC_20_UPPER ? 1 : 2);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw8703b_set_channel(struct rtw_dev *rtwdev, u8 channel,
+ u8 bw, u8 primary_chan_idx)
+{
+ rtw8703b_set_channel_rf(rtwdev, channel, bw);
+ rtw_set_channel_mac(rtwdev, channel, bw, primary_chan_idx);
+ rtw8703b_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
+}
+
+/* Not all indices are valid, based on available data. None of the
+ * known valid values are positive, so use 0x7f as "invalid".
+ */
+#define LNA_IDX_INVALID 0x7f
+static const s8 lna_gain_table[16] = {
+ -2, LNA_IDX_INVALID, LNA_IDX_INVALID, LNA_IDX_INVALID,
+ -6, LNA_IDX_INVALID, LNA_IDX_INVALID, -19,
+ -32, LNA_IDX_INVALID, -36, -42,
+ LNA_IDX_INVALID, LNA_IDX_INVALID, LNA_IDX_INVALID, -48,
+};
+
+static s8 get_cck_rx_pwr(struct rtw_dev *rtwdev, u8 lna_idx, u8 vga_idx)
+{
+ s8 lna_gain = 0;
+
+ if (lna_idx < ARRAY_SIZE(lna_gain_table))
+ lna_gain = lna_gain_table[lna_idx];
+
+ if (lna_gain >= 0) {
+ rtw_warn(rtwdev, "incorrect lna index (%d)\n", lna_idx);
+ return -120;
+ }
+
+ return lna_gain - 2 * vga_idx;
+}
+
+static void query_phy_status_cck(struct rtw_dev *rtwdev, u8 *phy_raw,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct phy_status_8703b *phy_status = (struct phy_status_8703b *)phy_raw;
+ u8 vga_idx = phy_status->cck_agc_rpt_ofdm_cfosho_a & VGA_BITS;
+ u8 lna_idx = phy_status->cck_agc_rpt_ofdm_cfosho_a & LNA_L_BITS;
+ s8 rx_power;
+
+ if (rtwdev->dm_info.rx_cck_agc_report_type == 1)
+ lna_idx = FIELD_PREP(BIT_LNA_H_MASK,
+ phy_status->cck_rpt_b_ofdm_cfosho_b & LNA_H_BIT)
+ | FIELD_PREP(BIT_LNA_L_MASK, lna_idx);
+ else
+ lna_idx = FIELD_PREP(BIT_LNA_L_MASK, lna_idx);
+ rx_power = get_cck_rx_pwr(rtwdev, lna_idx, vga_idx);
+
+ pkt_stat->rx_power[RF_PATH_A] = rx_power;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ rtwdev->dm_info.rssi[RF_PATH_A] = pkt_stat->rssi;
+ pkt_stat->signal_power = rx_power;
+}
+
+static void query_phy_status_ofdm(struct rtw_dev *rtwdev, u8 *phy_raw,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ struct phy_status_8703b *phy_status = (struct phy_status_8703b *)phy_raw;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 val_s8;
+
+ val_s8 = phy_status->path_agc[RF_PATH_A].gain & 0x3F;
+ pkt_stat->rx_power[RF_PATH_A] = (val_s8 * 2) - 110;
+ pkt_stat->rssi = rtw_phy_rf_power_2_rssi(pkt_stat->rx_power, 1);
+ pkt_stat->rx_snr[RF_PATH_A] = (s8)(phy_status->path_rxsnr[RF_PATH_A] / 2);
+
+ /* signal power reported by HW */
+ val_s8 = phy_status->cck_sig_qual_ofdm_pwdb_all >> 1;
+ pkt_stat->signal_power = (val_s8 & 0x7f) - 110;
+
+ pkt_stat->rx_evm[RF_PATH_A] = phy_status->stream_rxevm[RF_PATH_A];
+ pkt_stat->cfo_tail[RF_PATH_A] = phy_status->path_cfotail[RF_PATH_A];
+
+ dm_info->curr_rx_rate = pkt_stat->rate;
+ dm_info->rssi[RF_PATH_A] = pkt_stat->rssi;
+ dm_info->rx_snr[RF_PATH_A] = pkt_stat->rx_snr[RF_PATH_A] >> 1;
+ /* convert to KHz (used only for debugfs) */
+ dm_info->cfo_tail[RF_PATH_A] = (pkt_stat->cfo_tail[RF_PATH_A] * 5) >> 1;
+
+ /* (EVM value as s8 / 2) is dbm, should usually be in -33 to 0
+ * range. rx_evm_dbm needs the absolute (positive) value.
+ */
+ val_s8 = (s8)pkt_stat->rx_evm[RF_PATH_A];
+ val_s8 = clamp_t(s8, -val_s8 >> 1, 0, 64);
+ val_s8 &= 0x3F; /* 64->0: second path of 1SS rate is 64 */
+ dm_info->rx_evm_dbm[RF_PATH_A] = val_s8;
+}
+
+static void query_phy_status(struct rtw_dev *rtwdev, u8 *phy_status,
+ struct rtw_rx_pkt_stat *pkt_stat)
+{
+ if (pkt_stat->rate <= DESC_RATE11M)
+ query_phy_status_cck(rtwdev, phy_status, pkt_stat);
+ else
+ query_phy_status_ofdm(rtwdev, phy_status, pkt_stat);
+}
+
+static void rtw8703b_query_rx_desc(struct rtw_dev *rtwdev, u8 *rx_desc,
+ struct rtw_rx_pkt_stat *pkt_stat,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hdr *hdr;
+ u32 desc_sz = rtwdev->chip->rx_pkt_desc_sz;
+ u8 *phy_status = NULL;
+
+ memset(pkt_stat, 0, sizeof(*pkt_stat));
+
+ pkt_stat->phy_status = GET_RX_DESC_PHYST(rx_desc);
+ pkt_stat->icv_err = GET_RX_DESC_ICV_ERR(rx_desc);
+ pkt_stat->crc_err = GET_RX_DESC_CRC32(rx_desc);
+ pkt_stat->decrypted = !GET_RX_DESC_SWDEC(rx_desc) &&
+ GET_RX_DESC_ENC_TYPE(rx_desc) != RX_DESC_ENC_NONE;
+ pkt_stat->is_c2h = GET_RX_DESC_C2H(rx_desc);
+ pkt_stat->pkt_len = GET_RX_DESC_PKT_LEN(rx_desc);
+ pkt_stat->drv_info_sz = GET_RX_DESC_DRV_INFO_SIZE(rx_desc);
+ pkt_stat->shift = GET_RX_DESC_SHIFT(rx_desc);
+ pkt_stat->rate = GET_RX_DESC_RX_RATE(rx_desc);
+ pkt_stat->cam_id = GET_RX_DESC_MACID(rx_desc);
+ pkt_stat->ppdu_cnt = 0;
+ pkt_stat->tsf_low = GET_RX_DESC_TSFL(rx_desc);
+
+ pkt_stat->drv_info_sz *= RX_DRV_INFO_SZ_UNIT_8703B;
+
+ if (pkt_stat->is_c2h)
+ return;
+
+ hdr = (struct ieee80211_hdr *)(rx_desc + desc_sz + pkt_stat->shift +
+ pkt_stat->drv_info_sz);
+
+ pkt_stat->bw = GET_RX_DESC_BW(rx_desc);
+
+ if (pkt_stat->phy_status) {
+ phy_status = rx_desc + desc_sz + pkt_stat->shift;
+ query_phy_status(rtwdev, phy_status, pkt_stat);
+ }
+
+ rtw_rx_fill_rx_status(rtwdev, pkt_stat, hdr, rx_status, phy_status);
+
+ /* Rtl8723cs driver checks for size < 14 or size > 8192 and
+ * simply drops the packet. Maybe this should go into
+ * rtw_rx_fill_rx_status()?
+ */
+ if (pkt_stat->pkt_len == 0) {
+ rx_status->flag |= RX_FLAG_NO_PSDU;
+ rtw_dbg(rtwdev, RTW_DBG_RX, "zero length packet");
+ }
+}
+
+#define ADDA_ON_VAL_8703B 0x03c00014
+
+static
+void rtw8703b_iqk_config_mac(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ rtw_write8(rtwdev, rtw8723x_common.iqk_mac8_regs[0], 0x3F);
+ for (int i = 1; i < RTW8723X_IQK_MAC8_REG_NUM; i++)
+ rtw_write8(rtwdev, rtw8723x_common.iqk_mac8_regs[i],
+ backup->mac8[i] & (~BIT(3)));
+}
+
+#define IQK_LTE_WRITE_VAL_8703B 0x00007700
+#define IQK_DELAY_TIME_8703B 4
+
+static void rtw8703b_iqk_one_shot(struct rtw_dev *rtwdev, bool tx)
+{
+ u32 regval;
+ ktime_t t;
+ s64 dur;
+ int ret;
+
+ /* enter IQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK);
+ rtw8723x_iqk_config_lte_path_gnt(rtwdev, IQK_LTE_WRITE_VAL_8703B);
+
+ /* One shot, LOK & IQK */
+ rtw_write32(rtwdev, REG_IQK_AGC_PTS_11N, 0xf9000000);
+ rtw_write32(rtwdev, REG_IQK_AGC_PTS_11N, 0xf8000000);
+
+ t = ktime_get();
+ msleep(IQK_DELAY_TIME_8703B);
+ ret = read_poll_timeout(rtw_read32, regval, regval != 0, 1000,
+ 100000, false, rtwdev,
+ REG_IQK_RDY);
+ dur = ktime_us_delta(ktime_get(), t);
+
+ if (ret)
+ rtw_warn(rtwdev, "[IQK] %s timed out after %lldus!\n",
+ tx ? "TX" : "RX", dur);
+ else
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] %s done after %lldus\n",
+ tx ? "TX" : "RX", dur);
+}
+
+static void rtw8703b_iqk_txrx_path_post(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ rtw8723x_iqk_restore_lte_path_gnt(rtwdev, backup);
+ rtw_write32(rtwdev, REG_BB_SEL_BTG, backup->bb_sel_btg);
+
+ /* leave IQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK);
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, 0x800, 0x0);
+}
+
+static u8 rtw8703b_iqk_check_tx_failed(struct rtw_dev *rtwdev)
+{
+ s32 tx_x, tx_y;
+ u32 tx_fail;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xeac = 0x%x\n",
+ rtw_read32(rtwdev, REG_IQK_RES_RY));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xe94 = 0x%x, 0xe9c = 0x%x\n",
+ rtw_read32(rtwdev, REG_IQK_RES_TX),
+ rtw_read32(rtwdev, REG_IQK_RES_TY));
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] 0xe90(before IQK) = 0x%x, 0xe98(after IQK) = 0x%x\n",
+ rtw_read32(rtwdev, REG_IQK_RDY),
+ rtw_read32(rtwdev, 0xe98));
+
+ tx_fail = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_IQK_TX_FAIL);
+ tx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX);
+ tx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY);
+
+ if (!tx_fail && tx_x != IQK_TX_X_ERR && tx_y != IQK_TX_Y_ERR)
+ return IQK_TX_OK;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] A TX IQK failed\n");
+
+ return 0;
+}
+
+static u8 rtw8703b_iqk_check_rx_failed(struct rtw_dev *rtwdev)
+{
+ s32 rx_x, rx_y;
+ u32 rx_fail;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xea4 = 0x%x, 0xeac = 0x%x\n",
+ rtw_read32(rtwdev, REG_IQK_RES_RX),
+ rtw_read32(rtwdev, REG_IQK_RES_RY));
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] 0xea0(before IQK) = 0x%x, 0xea8(after IQK) = 0x%x\n",
+ rtw_read32(rtwdev, 0xea0),
+ rtw_read32(rtwdev, 0xea8));
+
+ rx_fail = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_IQK_RX_FAIL);
+ rx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_RX, BIT_MASK_RES_RX);
+ rx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_RY, BIT_MASK_RES_RY);
+ rx_y = abs(iqkxy_to_s32(rx_y));
+
+ if (!rx_fail && rx_x != IQK_RX_X_ERR && rx_y != IQK_RX_Y_ERR &&
+ rx_x < IQK_RX_X_UPPER && rx_x > IQK_RX_X_LOWER &&
+ rx_y < IQK_RX_Y_LMT)
+ return IQK_RX_OK;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] A RX IQK failed\n");
+
+ return 0;
+}
+
+static u8 rtw8703b_iqk_tx_path(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ u8 status;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A TX IQK!\n");
+
+ /* IQK setting */
+ rtw_write32(rtwdev, REG_TXIQK_11N, 0x01007c00);
+ rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800);
+ rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x18008c1c);
+ rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8214030f);
+ rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28110000);
+ rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x82110000);
+ rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x00462911);
+
+ /* leave IQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, 0xffffff00, 0x000000);
+
+ /* PA, PAD setting */
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, 0x800, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x55, 0x7f, 0x7);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x7f, RFREG_MASK, 0xd400);
+
+ rtw8703b_iqk_one_shot(rtwdev, true);
+ status = rtw8703b_iqk_check_tx_failed(rtwdev);
+
+ rtw8703b_iqk_txrx_path_post(rtwdev, backup);
+
+ return status;
+}
+
+static u8 rtw8703b_iqk_rx_path(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ u8 status;
+ u32 tx_x, tx_y;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A RX IQK step 1!\n");
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x67 @A RX IQK1 = 0x%x\n",
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+ rtw_write32(rtwdev, REG_BB_SEL_BTG, 0x99000000);
+
+ /* disable IQC mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK);
+
+ /* IQK setting */
+ rtw_write32(rtwdev, REG_TXIQK_11N, 0x01007c00);
+ rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800);
+
+ /* path IQK setting */
+ rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x18008c1c);
+ rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x8216000f);
+ rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28110000);
+ rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x28110000);
+ rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000);
+
+ /* LOK setting */
+ rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x0046a911);
+
+ /* RX IQK mode */
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, 0x80000, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x30, RFREG_MASK, 0x30000);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x31, RFREG_MASK, 0x00007);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x32, RFREG_MASK, 0x57db7);
+
+ rtw8703b_iqk_one_shot(rtwdev, true);
+ /* leave IQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, 0xffffff00, 0x000000);
+ status = rtw8703b_iqk_check_tx_failed(rtwdev);
+
+ if (!status)
+ goto restore;
+
+ /* second round */
+ tx_x = rtw_read32_mask(rtwdev, REG_IQK_RES_TX, BIT_MASK_RES_TX);
+ tx_y = rtw_read32_mask(rtwdev, REG_IQK_RES_TY, BIT_MASK_RES_TY);
+
+ rtw_write32(rtwdev, REG_TXIQK_11N, BIT_SET_TXIQK_11N(tx_x, tx_y));
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0xe40 = 0x%x u4tmp = 0x%x\n",
+ rtw_read32(rtwdev, REG_TXIQK_11N),
+ BIT_SET_TXIQK_11N(tx_x, tx_y));
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A RX IQK step 2!\n");
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] 0x67 @A RX IQK 2 = 0x%x\n",
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+
+ /* IQK setting */
+ rtw_write32(rtwdev, REG_RXIQK_11N, 0x01004800);
+ rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x38008c1c);
+ rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x18008c1c);
+ rtw_write32(rtwdev, REG_TX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_RX_IQK_TONE_B, 0x38008c1c);
+ rtw_write32(rtwdev, REG_TXIQK_PI_A_11N, 0x82110000);
+ rtw_write32(rtwdev, REG_RXIQK_PI_A_11N, 0x28160c1f);
+ rtw_write32(rtwdev, REG_TXIQK_PI_B, 0x82110000);
+ rtw_write32(rtwdev, REG_RXIQK_PI_B, 0x28110000);
+
+ /* LO calibration setting */
+ rtw_write32(rtwdev, REG_IQK_AGC_RSP_11N, 0x0046a8d1);
+
+ /* leave IQK mode */
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, 0xffffff00, 0x000000);
+ /* modify RX IQK mode table */
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTWE, 0x80000, 0x1);
+ /* RF_RCK_OS, RF_TXPA_G1, RF_TXPA_G2 */
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x30, RFREG_MASK, 0x30000);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x31, RFREG_MASK, 0x00007);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x32, RFREG_MASK, 0xf7d77);
+
+ /* PA, PAD setting */
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, 0x800, 0x1);
+ rtw_write_rf(rtwdev, RF_PATH_A, 0x55, 0x7f, 0x5);
+
+ rtw8703b_iqk_one_shot(rtwdev, false);
+ status |= rtw8703b_iqk_check_rx_failed(rtwdev);
+
+restore:
+ rtw8703b_iqk_txrx_path_post(rtwdev, backup);
+
+ return status;
+}
+
+static
+void rtw8703b_iqk_one_round(struct rtw_dev *rtwdev, s32 result[][IQK_NR], u8 t,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ u32 i;
+ u8 a_ok;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] IQ Calibration for 1T1R_S0/S1 for %d times\n", t);
+
+ rtw8723x_iqk_path_adda_on(rtwdev, ADDA_ON_VAL_8703B);
+ rtw8703b_iqk_config_mac(rtwdev, backup);
+ rtw_write32_mask(rtwdev, REG_CCK_ANT_SEL_11N, 0x0f000000, 0xf);
+ rtw_write32(rtwdev, REG_BB_RX_PATH_11N, 0x03a05600);
+ rtw_write32(rtwdev, REG_TRMUX_11N, 0x000800e4);
+ rtw_write32(rtwdev, REG_BB_PWR_SAV1_11N, 0x25204000);
+
+ for (i = 0; i < PATH_IQK_RETRY; i++) {
+ a_ok = rtw8703b_iqk_tx_path(rtwdev, backup);
+ if (a_ok == IQK_TX_OK) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] path A TX IQK success!\n");
+ result[t][IQK_S1_TX_X] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_TX,
+ BIT_MASK_RES_TX);
+ result[t][IQK_S1_TX_Y] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_TY,
+ BIT_MASK_RES_TY);
+ break;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A TX IQK fail!\n");
+ result[t][IQK_S1_TX_X] = 0x100;
+ result[t][IQK_S1_TX_Y] = 0x0;
+ }
+
+ for (i = 0; i < PATH_IQK_RETRY; i++) {
+ a_ok = rtw8703b_iqk_rx_path(rtwdev, backup);
+ if (a_ok == (IQK_TX_OK | IQK_RX_OK)) {
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] path A RX IQK success!\n");
+ result[t][IQK_S1_RX_X] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_RX,
+ BIT_MASK_RES_RX);
+ result[t][IQK_S1_RX_Y] =
+ rtw_read32_mask(rtwdev, REG_IQK_RES_RY,
+ BIT_MASK_RES_RY);
+ break;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A RX IQK fail!\n");
+ result[t][IQK_S1_RX_X] = 0x100;
+ result[t][IQK_S1_RX_Y] = 0x0;
+ }
+
+ if (a_ok == 0x0)
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] path A IQK fail!\n");
+
+ rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, RST_IQK);
+ mdelay(1);
+}
+
+static
+void rtw8703b_iqk_fill_a_matrix(struct rtw_dev *rtwdev, const s32 result[])
+{
+ u32 tmp_rx_iqi = 0x40000100 & GENMASK(31, 16);
+ s32 tx1_a, tx1_a_ext;
+ s32 tx1_c, tx1_c_ext;
+ s32 oldval_1;
+ s32 x, y;
+
+ if (result[IQK_S1_TX_X] == 0)
+ return;
+
+ oldval_1 = rtw_read32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE,
+ BIT_MASK_TXIQ_ELM_D);
+
+ x = iqkxy_to_s32(result[IQK_S1_TX_X]);
+ tx1_a = iqk_mult(x, oldval_1, &tx1_a_ext);
+ rtw_write32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE,
+ BIT_MASK_TXIQ_ELM_A, tx1_a);
+ rtw_write32_mask(rtwdev, REG_OFDM_0_ECCA_THRESHOLD,
+ BIT_MASK_OFDM0_EXT_A, tx1_a_ext);
+
+ y = iqkxy_to_s32(result[IQK_S1_TX_Y]);
+ tx1_c = iqk_mult(y, oldval_1, &tx1_c_ext);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS,
+ BIT_SET_TXIQ_ELM_C1(tx1_c));
+ rtw_write32_mask(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE,
+ BIT_MASK_TXIQ_ELM_C, BIT_SET_TXIQ_ELM_C2(tx1_c));
+ rtw_write32_mask(rtwdev, REG_OFDM_0_ECCA_THRESHOLD,
+ BIT_MASK_OFDM0_EXT_C, tx1_c_ext);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] X = 0x%x, TX1_A = 0x%x, oldval_1 0x%x\n",
+ x, tx1_a, oldval_1);
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] Y = 0x%x, TX1_C = 0x%x\n", y, tx1_c);
+
+ if (result[IQK_S1_RX_X] == 0)
+ return;
+
+ tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_X, result[IQK_S1_RX_X]);
+ tmp_rx_iqi |= FIELD_PREP(BIT_MASK_RXIQ_S1_Y1, result[IQK_S1_RX_X]);
+ rtw_write32(rtwdev, REG_A_RXIQI, tmp_rx_iqi);
+ rtw_write32_mask(rtwdev, REG_RXIQK_MATRIX_LSB_11N, BIT_MASK_RXIQ_S1_Y2,
+ BIT_SET_RXIQ_S1_Y2(result[IQK_S1_RX_Y]));
+}
+
+static void rtw8703b_phy_calibration(struct rtw_dev *rtwdev)
+{
+ /* For some reason path A is called S1 and B S0 in shared
+ * rtw88 calibration data.
+ */
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw8723x_iqk_backup_regs backup;
+ u8 final_candidate = IQK_ROUND_INVALID;
+ s32 result[IQK_ROUND_SIZE][IQK_NR];
+ bool good;
+ u8 i, j;
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] Start!\n");
+
+ memset(result, 0, sizeof(result));
+
+ rtw8723x_iqk_backup_path_ctrl(rtwdev, &backup);
+ rtw8723x_iqk_backup_lte_path_gnt(rtwdev, &backup);
+ rtw8723x_iqk_backup_regs(rtwdev, &backup);
+
+ for (i = IQK_ROUND_0; i <= IQK_ROUND_2; i++) {
+ rtw8723x_iqk_config_path_ctrl(rtwdev);
+ rtw8723x_iqk_config_lte_path_gnt(rtwdev, IQK_LTE_WRITE_VAL_8703B);
+
+ rtw8703b_iqk_one_round(rtwdev, result, i, &backup);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] back to BB mode, load original values!\n");
+ if (i > IQK_ROUND_0)
+ rtw8723x_iqk_restore_regs(rtwdev, &backup);
+ rtw8723x_iqk_restore_lte_path_gnt(rtwdev, &backup);
+ rtw8723x_iqk_restore_path_ctrl(rtwdev, &backup);
+
+ for (j = IQK_ROUND_0; j < i; j++) {
+ good = rtw8723x_iqk_similarity_cmp(rtwdev, result, j, i);
+
+ if (good) {
+ final_candidate = j;
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] cmp %d:%d final_candidate is %x\n",
+ j, i, final_candidate);
+ goto iqk_done;
+ }
+ }
+ }
+
+ if (final_candidate == IQK_ROUND_INVALID) {
+ s32 reg_tmp = 0;
+
+ for (i = 0; i < IQK_NR; i++)
+ reg_tmp += result[IQK_ROUND_HYBRID][i];
+
+ if (reg_tmp != 0) {
+ final_candidate = IQK_ROUND_HYBRID;
+ } else {
+ WARN(1, "IQK failed\n");
+ goto out;
+ }
+ }
+
+iqk_done:
+ /* only path A is calibrated in rtl8703b */
+ rtw8703b_iqk_fill_a_matrix(rtwdev, result[final_candidate]);
+
+ dm_info->iqk.result.s1_x = result[final_candidate][IQK_S1_TX_X];
+ dm_info->iqk.result.s1_y = result[final_candidate][IQK_S1_TX_Y];
+ dm_info->iqk.result.s0_x = result[final_candidate][IQK_S0_TX_X];
+ dm_info->iqk.result.s0_y = result[final_candidate][IQK_S0_TX_Y];
+ dm_info->iqk.done = true;
+
+out:
+ rtw_write32(rtwdev, REG_BB_SEL_BTG, backup.bb_sel_btg);
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] final_candidate is %x\n",
+ final_candidate);
+
+ for (i = IQK_ROUND_0; i < IQK_ROUND_SIZE; i++)
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] Result %u: rege94_s1=%x rege9c_s1=%x regea4_s1=%x regeac_s1=%x rege94_s0=%x rege9c_s0=%x regea4_s0=%x regeac_s0=%x %s\n",
+ i,
+ result[i][0], result[i][1], result[i][2], result[i][3],
+ result[i][4], result[i][5], result[i][6], result[i][7],
+ final_candidate == i ? "(final candidate)" : "");
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] 0xc80 = 0x%x 0xc94 = 0x%x 0xc14 = 0x%x 0xca0 = 0x%x\n",
+ rtw_read32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE),
+ rtw_read32(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N),
+ rtw_read32(rtwdev, REG_A_RXIQI),
+ rtw_read32(rtwdev, REG_RXIQK_MATRIX_LSB_11N));
+ rtw_dbg(rtwdev, RTW_DBG_RFK,
+ "[IQK] 0xcd0 = 0x%x 0xcd4 = 0x%x 0xcd8 = 0x%x\n",
+ rtw_read32(rtwdev, REG_TXIQ_AB_S0),
+ rtw_read32(rtwdev, REG_TXIQ_CD_S0),
+ rtw_read32(rtwdev, REG_RXIQ_AB_S0));
+
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] Finished.\n");
+}
+
+static void rtw8703b_set_iqk_matrix_by_result(struct rtw_dev *rtwdev,
+ u32 ofdm_swing, u8 rf_path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s32 ele_A, ele_D, ele_C;
+ s32 ele_A_ext, ele_C_ext, ele_D_ext;
+ s32 iqk_result_x;
+ s32 iqk_result_y;
+ s32 value32;
+
+ switch (rf_path) {
+ default:
+ case RF_PATH_A:
+ iqk_result_x = dm_info->iqk.result.s1_x;
+ iqk_result_y = dm_info->iqk.result.s1_y;
+ break;
+ case RF_PATH_B:
+ iqk_result_x = dm_info->iqk.result.s0_x;
+ iqk_result_y = dm_info->iqk.result.s0_y;
+ break;
+ }
+
+ /* new element D */
+ ele_D = OFDM_SWING_D(ofdm_swing);
+ iqk_mult(iqk_result_x, ele_D, &ele_D_ext);
+ /* new element A */
+ iqk_result_x = iqkxy_to_s32(iqk_result_x);
+ ele_A = iqk_mult(iqk_result_x, ele_D, &ele_A_ext);
+ /* new element C */
+ iqk_result_y = iqkxy_to_s32(iqk_result_y);
+ ele_C = iqk_mult(iqk_result_y, ele_D, &ele_C_ext);
+
+ switch (rf_path) {
+ case RF_PATH_A:
+ default:
+ /* write new elements A, C, D, and element B is always 0 */
+ value32 = BIT_SET_TXIQ_ELM_ACD(ele_A, ele_C, ele_D);
+ rtw_write32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, value32);
+ value32 = BIT_SET_TXIQ_ELM_C1(ele_C);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS,
+ value32);
+ value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD);
+ value32 &= ~BIT_MASK_OFDM0_EXTS;
+ value32 |= BIT_SET_OFDM0_EXTS(ele_A_ext, ele_C_ext, ele_D_ext);
+ rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32);
+ break;
+
+ case RF_PATH_B:
+ /* write new elements A, C, D, and element B is always 0 */
+ value32 = BIT_SET_TXIQ_ELM_ACD(ele_A, ele_C, ele_D);
+ rtw_write32(rtwdev, REG_OFDM_0_XB_TX_IQ_IMBALANCE, value32);
+ value32 = BIT_SET_TXIQ_ELM_C1(ele_C);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXB_LSB2_11N, MASKH4BITS,
+ value32);
+ value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD);
+ value32 &= ~BIT_MASK_OFDM0_EXTS_B;
+ value32 |= BIT_SET_OFDM0_EXTS_B(ele_A_ext, ele_C_ext, ele_D_ext);
+ rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32);
+ break;
+ }
+}
+
+static void rtw8703b_set_iqk_matrix(struct rtw_dev *rtwdev, s8 ofdm_index,
+ u8 rf_path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s32 value32;
+ u32 ofdm_swing;
+
+ ofdm_index = clamp_t(s8, ofdm_index, 0, RTW_OFDM_SWING_TABLE_SIZE - 1);
+
+ ofdm_swing = rtw8703b_ofdm_swing_table[ofdm_index];
+
+ if (dm_info->iqk.done) {
+ rtw8703b_set_iqk_matrix_by_result(rtwdev, ofdm_swing, rf_path);
+ return;
+ }
+
+ switch (rf_path) {
+ case RF_PATH_A:
+ default:
+ rtw_write32(rtwdev, REG_OFDM_0_XA_TX_IQ_IMBALANCE, ofdm_swing);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXA_LSB2_11N, MASKH4BITS,
+ 0x00);
+
+ value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD);
+ value32 &= ~BIT_MASK_OFDM0_EXTS;
+ rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32);
+ break;
+
+ case RF_PATH_B:
+ rtw_write32(rtwdev, REG_OFDM_0_XB_TX_IQ_IMBALANCE, ofdm_swing);
+ rtw_write32_mask(rtwdev, REG_TXIQK_MATRIXB_LSB2_11N, MASKH4BITS,
+ 0x00);
+
+ value32 = rtw_read32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD);
+ value32 &= ~BIT_MASK_OFDM0_EXTS_B;
+ rtw_write32(rtwdev, REG_OFDM_0_ECCA_THRESHOLD, value32);
+ break;
+ }
+}
+
+static void rtw8703b_pwrtrack_set_ofdm_pwr(struct rtw_dev *rtwdev, s8 swing_idx,
+ s8 txagc_idx)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ dm_info->txagc_remnant_ofdm = txagc_idx;
+
+ /* Only path A is calibrated for rtl8703b */
+ rtw8703b_set_iqk_matrix(rtwdev, swing_idx, RF_PATH_A);
+}
+
+static void rtw8703b_pwrtrack_set_cck_pwr(struct rtw_dev *rtwdev, s8 swing_idx,
+ s8 txagc_idx)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ dm_info->txagc_remnant_cck = txagc_idx;
+
+ swing_idx = clamp_t(s8, swing_idx, 0, RTW_CCK_SWING_TABLE_SIZE - 1);
+
+ BUILD_BUG_ON(ARRAY_SIZE(rtw8703b_cck_pwr_regs)
+ != ARRAY_SIZE(rtw8703b_cck_swing_table[0]));
+
+ for (int i = 0; i < ARRAY_SIZE(rtw8703b_cck_pwr_regs); i++)
+ rtw_write8(rtwdev, rtw8703b_cck_pwr_regs[i],
+ rtw8703b_cck_swing_table[swing_idx][i]);
+}
+
+static void rtw8703b_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 limit_ofdm;
+ u8 limit_cck = 21;
+ s8 final_ofdm_swing_index;
+ s8 final_cck_swing_index;
+
+ limit_ofdm = rtw8723x_pwrtrack_get_limit_ofdm(rtwdev);
+
+ final_ofdm_swing_index = dm_info->default_ofdm_index +
+ dm_info->delta_power_index[path];
+ final_cck_swing_index = dm_info->default_cck_index +
+ dm_info->delta_power_index[path];
+
+ if (final_ofdm_swing_index > limit_ofdm)
+ rtw8703b_pwrtrack_set_ofdm_pwr(rtwdev, limit_ofdm,
+ final_ofdm_swing_index - limit_ofdm);
+ else if (final_ofdm_swing_index < 0)
+ rtw8703b_pwrtrack_set_ofdm_pwr(rtwdev, 0,
+ final_ofdm_swing_index);
+ else
+ rtw8703b_pwrtrack_set_ofdm_pwr(rtwdev, final_ofdm_swing_index, 0);
+
+ if (final_cck_swing_index > limit_cck)
+ rtw8703b_pwrtrack_set_cck_pwr(rtwdev, limit_cck,
+ final_cck_swing_index - limit_cck);
+ else if (final_cck_swing_index < 0)
+ rtw8703b_pwrtrack_set_cck_pwr(rtwdev, 0,
+ final_cck_swing_index);
+ else
+ rtw8703b_pwrtrack_set_cck_pwr(rtwdev, final_cck_swing_index, 0);
+
+ rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
+}
+
+static void rtw8703b_phy_pwrtrack(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ struct rtw_swing_table swing_table;
+ u8 thermal_value, delta, path;
+ bool do_iqk = false;
+
+ rtw_phy_config_swing_table(rtwdev, &swing_table);
+
+ if (rtwdev->efuse.thermal_meter[0] == 0xff)
+ return;
+
+ thermal_value = rtw_read_rf(rtwdev, RF_PATH_A, RF_T_METER, 0xfc00);
+
+ rtw_phy_pwrtrack_avg(rtwdev, thermal_value, RF_PATH_A);
+
+ do_iqk = rtw_phy_pwrtrack_need_iqk(rtwdev);
+
+ if (do_iqk)
+ rtw8723x_lck(rtwdev);
+
+ if (dm_info->pwr_trk_init_trigger)
+ dm_info->pwr_trk_init_trigger = false;
+ else if (!rtw_phy_pwrtrack_thermal_changed(rtwdev, thermal_value,
+ RF_PATH_A))
+ goto iqk;
+
+ delta = rtw_phy_pwrtrack_get_delta(rtwdev, RF_PATH_A);
+
+ delta = min_t(u8, delta, RTW_PWR_TRK_TBL_SZ - 1);
+
+ for (path = 0; path < rtwdev->hal.rf_path_num; path++) {
+ s8 delta_cur, delta_last;
+
+ delta_last = dm_info->delta_power_index[path];
+ delta_cur = rtw_phy_pwrtrack_get_pwridx(rtwdev, &swing_table,
+ path, RF_PATH_A, delta);
+ if (delta_last == delta_cur)
+ continue;
+
+ dm_info->delta_power_index[path] = delta_cur;
+ rtw8703b_pwrtrack_set(rtwdev, path);
+ }
+
+ rtw8723x_pwrtrack_set_xtal(rtwdev, RF_PATH_A, delta);
+
+iqk:
+ if (do_iqk)
+ rtw8703b_phy_calibration(rtwdev);
+}
+
+static void rtw8703b_pwr_track(struct rtw_dev *rtwdev)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ if (efuse->power_track_type != 0) {
+ rtw_warn(rtwdev, "unsupported power track type");
+ return;
+ }
+
+ if (!dm_info->pwr_trk_triggered) {
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_T_METER,
+ GENMASK(17, 16), 0x03);
+ dm_info->pwr_trk_triggered = true;
+ return;
+ }
+
+ rtw8703b_phy_pwrtrack(rtwdev);
+ dm_info->pwr_trk_triggered = false;
+}
+
+static void rtw8703b_coex_set_gnt_fix(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8703b_coex_set_gnt_debug(struct rtw_dev *rtwdev)
+{
+}
+
+static void rtw8703b_coex_set_rfe_type(struct rtw_dev *rtwdev)
+{
+ struct rtw_coex *coex = &rtwdev->coex;
+ struct rtw_coex_rfe *coex_rfe = &coex->rfe;
+
+ coex_rfe->rfe_module_type = rtwdev->efuse.rfe_option;
+ coex_rfe->ant_switch_polarity = 0;
+ coex_rfe->ant_switch_exist = false;
+ coex_rfe->ant_switch_with_bt = false;
+ coex_rfe->ant_switch_diversity = false;
+ coex_rfe->wlg_at_btg = true;
+
+ /* disable LTE coex on wifi side */
+ rtw_coex_write_indirect_reg(rtwdev, LTE_COEX_CTRL, BIT_LTE_COEX_EN, 0x0);
+ rtw_coex_write_indirect_reg(rtwdev, LTE_WL_TRX_CTRL, MASKLWORD, 0xffff);
+ rtw_coex_write_indirect_reg(rtwdev, LTE_BT_TRX_CTRL, MASKLWORD, 0xffff);
+}
+
+static void rtw8703b_coex_set_wl_tx_power(struct rtw_dev *rtwdev, u8 wl_pwr)
+{
+}
+
+static void rtw8703b_coex_set_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
+{
+}
+
+static const u8 rtw8703b_pwrtrk_2gb_n[] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6,
+ 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11
+};
+
+static const u8 rtw8703b_pwrtrk_2gb_p[] = {
+ 0, 1, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 7,
+ 8, 8, 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15
+};
+
+static const u8 rtw8703b_pwrtrk_2ga_n[] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6,
+ 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11
+};
+
+static const u8 rtw8703b_pwrtrk_2ga_p[] = {
+ 0, 1, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 7,
+ 8, 8, 9, 9, 10, 10, 11, 11, 12, 13, 13, 14, 14, 15, 15
+};
+
+static const u8 rtw8703b_pwrtrk_2g_cck_b_n[] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6,
+ 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11
+};
+
+static const u8 rtw8703b_pwrtrk_2g_cck_b_p[] = {
+ 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6, 6, 6,
+ 7, 7, 8, 8, 8, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13
+};
+
+static const u8 rtw8703b_pwrtrk_2g_cck_a_n[] = {
+ 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6,
+ 7, 7, 7, 7, 8, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11
+};
+
+static const u8 rtw8703b_pwrtrk_2g_cck_a_p[] = {
+ 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 6, 6, 6,
+ 7, 7, 8, 8, 8, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13
+};
+
+static const s8 rtw8703b_pwrtrk_xtal_n[] = {
+ 0, 0, 0, -1, -1, -1, -1, -2, -2, -2, -3, -3, -3, -3, -3,
+ -4, -2, -2, -1, -1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1
+};
+
+static const s8 rtw8703b_pwrtrk_xtal_p[] = {
+ 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 1, 0, -1, -1, -1,
+ -2, -3, -7, -9, -10, -11, -14, -16, -18, -20, -22, -24, -26, -28, -30
+};
+
+static const struct rtw_pwr_track_tbl rtw8703b_rtw_pwr_track_tbl = {
+ .pwrtrk_2gb_n = rtw8703b_pwrtrk_2gb_n,
+ .pwrtrk_2gb_p = rtw8703b_pwrtrk_2gb_p,
+ .pwrtrk_2ga_n = rtw8703b_pwrtrk_2ga_n,
+ .pwrtrk_2ga_p = rtw8703b_pwrtrk_2ga_p,
+ .pwrtrk_2g_cckb_n = rtw8703b_pwrtrk_2g_cck_b_n,
+ .pwrtrk_2g_cckb_p = rtw8703b_pwrtrk_2g_cck_b_p,
+ .pwrtrk_2g_ccka_n = rtw8703b_pwrtrk_2g_cck_a_n,
+ .pwrtrk_2g_ccka_p = rtw8703b_pwrtrk_2g_cck_a_p,
+ .pwrtrk_xtal_n = rtw8703b_pwrtrk_xtal_n,
+ .pwrtrk_xtal_p = rtw8703b_pwrtrk_xtal_p,
+};
+
+/* Shared-Antenna Coex Table */
+static const struct coex_table_para table_sant_8703b[] = {
+ {0xffffffff, 0xffffffff}, /* case-0 */
+ {0x55555555, 0x55555555},
+ {0x66555555, 0x66555555},
+ {0xaaaaaaaa, 0xaaaaaaaa},
+ {0x5a5a5a5a, 0x5a5a5a5a},
+ {0xfafafafa, 0xfafafafa}, /* case-5 */
+ {0x6a5a5555, 0xaaaaaaaa},
+ {0x6a5a56aa, 0x6a5a56aa},
+ {0x6a5a5a5a, 0x6a5a5a5a},
+ {0x66555555, 0x5a5a5a5a},
+ {0x66555555, 0x6a5a5a5a}, /* case-10 */
+ {0x66555555, 0x6a5a5aaa},
+ {0x66555555, 0x5a5a5aaa},
+ {0x66555555, 0x6aaa5aaa},
+ {0x66555555, 0xaaaa5aaa},
+ {0x66555555, 0xaaaaaaaa}, /* case-15 */
+ {0xffff55ff, 0xfafafafa},
+ {0xffff55ff, 0x6afa5afa},
+ {0xaaffffaa, 0xfafafafa},
+ {0xaa5555aa, 0x5a5a5a5a},
+ {0xaa5555aa, 0x6a5a5a5a}, /* case-20 */
+ {0xaa5555aa, 0xaaaaaaaa},
+ {0xffffffff, 0x5a5a5a5a},
+ {0xffffffff, 0x5a5a5a5a},
+ {0xffffffff, 0x55555555},
+ {0xffffffff, 0x5a5a5aaa}, /* case-25 */
+ {0x55555555, 0x5a5a5a5a},
+ {0x55555555, 0xaaaaaaaa},
+ {0x55555555, 0x6a5a6a5a},
+ {0x66556655, 0x66556655},
+ {0x66556aaa, 0x6a5a6aaa}, /* case-30 */
+ {0xffffffff, 0x5aaa5aaa},
+ {0x56555555, 0x5a5a5aaa},
+};
+
+/* Shared-Antenna TDMA */
+static const struct coex_tdma_para tdma_sant_8703b[] = {
+ { {0x00, 0x00, 0x00, 0x00, 0x00} }, /* case-0 */
+ { {0x61, 0x45, 0x03, 0x11, 0x11} }, /* case-1 */
+ { {0x61, 0x3a, 0x03, 0x11, 0x11} },
+ { {0x61, 0x30, 0x03, 0x11, 0x11} },
+ { {0x61, 0x20, 0x03, 0x11, 0x11} },
+ { {0x61, 0x10, 0x03, 0x11, 0x11} }, /* case-5 */
+ { {0x61, 0x45, 0x03, 0x11, 0x10} },
+ { {0x61, 0x3a, 0x03, 0x11, 0x10} },
+ { {0x61, 0x30, 0x03, 0x11, 0x10} },
+ { {0x61, 0x20, 0x03, 0x11, 0x10} },
+ { {0x61, 0x10, 0x03, 0x11, 0x10} }, /* case-10 */
+ { {0x61, 0x08, 0x03, 0x11, 0x14} },
+ { {0x61, 0x08, 0x03, 0x10, 0x14} },
+ { {0x51, 0x08, 0x03, 0x10, 0x54} },
+ { {0x51, 0x08, 0x03, 0x10, 0x55} },
+ { {0x51, 0x08, 0x07, 0x10, 0x54} }, /* case-15 */
+ { {0x51, 0x45, 0x03, 0x10, 0x50} },
+ { {0x51, 0x3a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x30, 0x03, 0x10, 0x50} },
+ { {0x51, 0x20, 0x03, 0x10, 0x50} },
+ { {0x51, 0x10, 0x03, 0x10, 0x50} }, /* case-20 */
+ { {0x51, 0x4a, 0x03, 0x10, 0x50} },
+ { {0x51, 0x0c, 0x03, 0x10, 0x54} },
+ { {0x55, 0x08, 0x03, 0x10, 0x54} },
+ { {0x65, 0x10, 0x03, 0x11, 0x10} },
+ { {0x51, 0x10, 0x03, 0x10, 0x51} }, /* case-25 */
+ { {0x51, 0x08, 0x03, 0x10, 0x50} },
+ { {0x61, 0x08, 0x03, 0x11, 0x11} },
+};
+
+static struct rtw_chip_ops rtw8703b_ops = {
+ .mac_init = rtw8723x_mac_init,
+ .dump_fw_crash = NULL,
+ .shutdown = NULL,
+ .read_efuse = rtw8703b_read_efuse,
+ .phy_set_param = rtw8703b_phy_set_param,
+ .set_channel = rtw8703b_set_channel,
+ .query_rx_desc = rtw8703b_query_rx_desc,
+ .read_rf = rtw_phy_read_rf_sipi,
+ .write_rf = rtw_phy_write_rf_reg_sipi,
+ .set_tx_power_index = rtw8723x_set_tx_power_index,
+ .set_antenna = NULL,
+ .cfg_ldo25 = rtw8723x_cfg_ldo25,
+ .efuse_grant = rtw8723x_efuse_grant,
+ .false_alarm_statistics = rtw8723x_false_alarm_statistics,
+ .phy_calibration = rtw8703b_phy_calibration,
+ .dpk_track = NULL,
+ /* 8723d uses REG_CSRATIO to set dm_info.cck_pd_default, which
+ * is used in its cck_pd_set function. According to comments
+ * in the vendor driver code it doesn't exist in this chip
+ * generation, only 0xa0a ("ODM_CCK_PD_THRESH", which is only
+ * *written* to).
+ */
+ .cck_pd_set = NULL,
+ .pwr_track = rtw8703b_pwr_track,
+ .config_bfee = NULL,
+ .set_gid_table = NULL,
+ .cfg_csi_rate = NULL,
+ .adaptivity_init = NULL,
+ .adaptivity = NULL,
+ .cfo_init = NULL,
+ .cfo_track = NULL,
+ .config_tx_path = NULL,
+ .config_txrx_mode = NULL,
+ .fill_txdesc_checksum = rtw8723x_fill_txdesc_checksum,
+
+ /* for coex */
+ .coex_set_init = rtw8723x_coex_cfg_init,
+ .coex_set_ant_switch = NULL,
+ .coex_set_gnt_fix = rtw8703b_coex_set_gnt_fix,
+ .coex_set_gnt_debug = rtw8703b_coex_set_gnt_debug,
+ .coex_set_rfe_type = rtw8703b_coex_set_rfe_type,
+ .coex_set_wl_tx_power = rtw8703b_coex_set_wl_tx_power,
+ .coex_set_wl_rx_gain = rtw8703b_coex_set_wl_rx_gain,
+};
+
+const struct rtw_chip_info rtw8703b_hw_spec = {
+ .ops = &rtw8703b_ops,
+ .id = RTW_CHIP_TYPE_8703B,
+
+ .fw_name = "rtw88/rtw8703b_fw.bin",
+ .wlan_cpu = RTW_WCPU_11N,
+ .tx_pkt_desc_sz = 40,
+ .tx_buf_desc_sz = 16,
+ .rx_pkt_desc_sz = 24,
+ .rx_buf_desc_sz = 8,
+ .phy_efuse_size = 256,
+ .log_efuse_size = 512,
+ .ptct_efuse_size = 15,
+ .txff_size = 32768,
+ .rxff_size = 16384,
+ .rsvd_drv_pg_num = 8,
+ .band = RTW_BAND_2G,
+ .page_size = TX_PAGE_SIZE,
+ .csi_buf_pg_num = 0,
+ .dig_min = 0x20,
+ .txgi_factor = 1,
+ .is_pwr_by_rate_dec = true,
+ .rx_ldpc = false,
+ .tx_stbc = false,
+ .max_power_index = 0x3f,
+ .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
+ .usb_tx_agg_desc_num = 1, /* Not sure if this chip has USB interface */
+
+ .path_div_supported = false,
+ .ht_supported = true,
+ .vht_supported = false,
+ .lps_deep_mode_supported = 0,
+
+ .sys_func_en = 0xFD,
+ .pwr_on_seq = card_enable_flow_8703b,
+ .pwr_off_seq = card_disable_flow_8703b,
+ .rqpn_table = rqpn_table_8703b,
+ .prioq_addrs = &rtw8723x_common.prioq_addrs,
+ .page_table = page_table_8703b,
+ /* used only in pci.c, not needed for SDIO devices */
+ .intf_table = NULL,
+
+ .dig = rtw8723x_common.dig,
+ .dig_cck = rtw8723x_common.dig_cck,
+
+ .rf_sipi_addr = {0x840, 0x844},
+ .rf_sipi_read_addr = rtw8723x_common.rf_sipi_addr,
+ .fix_rf_phy_num = 2,
+ .ltecoex_addr = &rtw8723x_common.ltecoex_addr,
+
+ .mac_tbl = &rtw8703b_mac_tbl,
+ .agc_tbl = &rtw8703b_agc_tbl,
+ .bb_tbl = &rtw8703b_bb_tbl,
+ .rf_tbl = {&rtw8703b_rf_a_tbl},
+
+ .rfe_defs = rtw8703b_rfe_defs,
+ .rfe_defs_size = ARRAY_SIZE(rtw8703b_rfe_defs),
+
+ .iqk_threshold = 8,
+ .pwr_track_tbl = &rtw8703b_rtw_pwr_track_tbl,
+
+ /* WOWLAN firmware exists, but not implemented yet */
+ .wow_fw_name = "rtw88/rtw8703b_wow_fw.bin",
+ .wowlan_stub = NULL,
+ .max_scan_ie_len = IEEE80211_MAX_DATA_LEN,
+
+ /* Vendor driver has a time-based format, converted from
+ * 20180330
+ */
+ .coex_para_ver = 0x0133ed6a,
+ .bt_desired_ver = 0x1c,
+ .scbd_support = true,
+ .new_scbd10_def = true,
+ .ble_hid_profile_support = false,
+ .wl_mimo_ps_support = false,
+ .pstdma_type = COEX_PSTDMA_FORCE_LPSOFF,
+ .bt_rssi_type = COEX_BTRSSI_RATIO,
+ .ant_isolation = 15,
+ .rssi_tolerance = 2,
+ .bt_rssi_step = bt_rssi_step_8703b,
+ .wl_rssi_step = wl_rssi_step_8703b,
+ /* sant -> shared antenna, nsant -> non-shared antenna
+ * Not sure if 8703b versions with non-shard antenna even exist.
+ */
+ .table_sant_num = ARRAY_SIZE(table_sant_8703b),
+ .table_sant = table_sant_8703b,
+ .table_nsant_num = 0,
+ .table_nsant = NULL,
+ .tdma_sant_num = ARRAY_SIZE(tdma_sant_8703b),
+ .tdma_sant = tdma_sant_8703b,
+ .tdma_nsant_num = 0,
+ .tdma_nsant = NULL,
+ .wl_rf_para_num = ARRAY_SIZE(rf_para_tx_8703b),
+ .wl_rf_para_tx = rf_para_tx_8703b,
+ .wl_rf_para_rx = rf_para_rx_8703b,
+ .bt_afh_span_bw20 = 0x20,
+ .bt_afh_span_bw40 = 0x30,
+ .afh_5g_num = ARRAY_SIZE(afh_5g_8703b),
+ .afh_5g = afh_5g_8703b,
+ /* REG_BTG_SEL doesn't seem to have a counterpart in the
+ * vendor driver. Mathematically it's REG_PAD_CTRL1 + 3.
+ *
+ * It is used in the cardemu_to_act power sequence by though
+ * (by address, 0x0067), comment: "0x67[0] = 0 to disable
+ * BT_GPS_SEL pins" That seems to fit.
+ */
+ .btg_reg = NULL,
+ /* These registers are used to read (and print) from if
+ * CONFIG_RTW88_DEBUGFS is enabled.
+ */
+ .coex_info_hw_regs_num = 0,
+ .coex_info_hw_regs = NULL,
+};
+EXPORT_SYMBOL(rtw8703b_hw_spec);
+
+MODULE_FIRMWARE("rtw88/rtw8703b_fw.bin");
+MODULE_FIRMWARE("rtw88/rtw8703b_wow_fw.bin");
+
+MODULE_AUTHOR("Fiona Klute <fiona.klute@gmx.de>");
+MODULE_DESCRIPTION("Realtek 802.11n wireless 8703b driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.h b/drivers/net/wireless/realtek/rtw88/rtw8703b.h
new file mode 100644
index 000000000000..3e2da2e6739d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright Fiona Klute <fiona.klute@gmx.de> */
+
+#ifndef __RTW8703B_H__
+#define __RTW8703B_H__
+
+#include "rtw8723x.h"
+
+extern const struct rtw_chip_info rtw8703b_hw_spec;
+
+/* phy status parsing */
+#define VGA_BITS GENMASK(4, 0)
+#define LNA_L_BITS GENMASK(7, 5)
+#define LNA_H_BIT BIT(7)
+/* masks for assembling LNA index from high and low bits */
+#define BIT_LNA_H_MASK BIT(3)
+#define BIT_LNA_L_MASK GENMASK(2, 0)
+
+struct phy_rx_agc_info {
+#ifdef __LITTLE_ENDIAN
+ u8 gain: 7;
+ u8 trsw: 1;
+#else
+ u8 trsw: 1;
+ u8 gain: 7;
+#endif
+} __packed;
+
+/* This struct is called phy_status_rpt_8192cd in the vendor driver,
+ * there might be potential to share it with drivers for other chips
+ * of the same generation.
+ */
+struct phy_status_8703b {
+ struct phy_rx_agc_info path_agc[2];
+ u8 ch_corr[2];
+ u8 cck_sig_qual_ofdm_pwdb_all;
+ /* for CCK: bits 0:4: VGA index, bits 5:7: LNA index (low) */
+ u8 cck_agc_rpt_ofdm_cfosho_a;
+ /* for CCK: bit 7 is high bit of LNA index if long report type */
+ u8 cck_rpt_b_ofdm_cfosho_b;
+ u8 reserved_1;
+ u8 noise_power_db_msb;
+ s8 path_cfotail[2];
+ u8 pcts_mask[2];
+ s8 stream_rxevm[2];
+ u8 path_rxsnr[2];
+ u8 noise_power_db_lsb;
+ u8 reserved_2[3];
+ u8 stream_csi[2];
+ u8 stream_target_csi[2];
+ s8 sig_evm;
+ u8 reserved_3;
+
+#ifdef __LITTLE_ENDIAN
+ u8 antsel_rx_keep_2: 1;
+ u8 sgi_en: 1;
+ u8 rxsc: 2;
+ u8 idle_long: 1;
+ u8 r_ant_train_en: 1;
+ u8 ant_sel_b: 1;
+ u8 ant_sel: 1;
+#else /* __BIG_ENDIAN */
+ u8 ant_sel: 1;
+ u8 ant_sel_b: 1;
+ u8 r_ant_train_en: 1;
+ u8 idle_long: 1;
+ u8 rxsc: 2;
+ u8 sgi_en: 1;
+ u8 antsel_rx_keep_2: 1;
+#endif
+} __packed;
+
+/* Baseband registers */
+#define REG_BB_PWR_SAV5_11N 0x0818
+/* BIT(11) should be 1 for 8703B *and* 8723D, which means LNA uses 4
+ * bit for CCK rates in report, not 3. Vendor driver logs a warning if
+ * it's 0, but handles the case.
+ *
+ * Purpose of other parts of this register is unknown, 8723cs driver
+ * code indicates some other chips use certain bits for antenna
+ * diversity.
+ */
+#define REG_BB_AMP 0x0950
+#define BIT_MASK_RX_LNA (BIT(11))
+
+/* 0xaXX: 40MHz channel settings */
+#define REG_CCK_TXSF2 0x0a24 /* CCK TX filter 2 */
+#define REG_CCK_DBG 0x0a28 /* debug port */
+#define REG_OFDM0_A_TX_AFE 0x0c84
+#define REG_TXIQK_MATRIXB_LSB2_11N 0x0c9c
+#define REG_OFDM0_TX_PSD_NOISE 0x0ce4 /* TX pseudo noise weighting */
+#define REG_IQK_RDY 0x0e90 /* is != 0 when IQK is done */
+
+/* RF registers */
+#define RF_RCK1 0x1E
+
+#define AGG_BURST_NUM 3
+#define AGG_BURST_SIZE 0 /* 1K */
+#define BIT_MASK_AGG_BURST_NUM (GENMASK(3, 2))
+#define BIT_MASK_AGG_BURST_SIZE (GENMASK(5, 4))
+
+#endif /* __RTW8703B_H__ */
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c b/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c
new file mode 100644
index 000000000000..81020fd907aa
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.c
@@ -0,0 +1,902 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright Fiona Klute <fiona.klute@gmx.de> */
+
+#include "main.h"
+#include "phy.h"
+#include "rtw8703b_tables.h"
+
+static const struct rtw_phy_pg_cfg_pair rtw8703b_bb_pg[] = {
+ { 0, 0, 0, 0x00000e08, 0x0000ff00, 0x00003200, },
+ { 0, 0, 0, 0x0000086c, 0xffffff00, 0x32323200, },
+ { 0, 0, 0, 0x00000e00, 0xffffffff, 0x34363636, },
+ { 0, 0, 0, 0x00000e04, 0xffffffff, 0x28303234, },
+ { 0, 0, 0, 0x00000e10, 0xffffffff, 0x30343434, },
+ { 0, 0, 0, 0x00000e14, 0xffffffff, 0x26262830, },
+};
+
+RTW_DECL_TABLE_BB_PG(rtw8703b_bb_pg);
+
+/* Regd: FCC -> 0, ETSI -> 2, MKK -> 1
+ * Band: 2.4G -> 0, 5G -> 1
+ * Bandwidth (bw): 20M -> 0, 40M -> 1, 80M -> 2, 160M -> 3
+ * Rate Section (rs): CCK -> 0, OFDM -> 1, HT -> 2, VHT -> 3
+ */
+static const struct rtw_txpwr_lmt_cfg_pair rtw8703b_txpwr_lmt[] = {
+ {0, 0, 0, 0, 1, 30},
+ {2, 0, 0, 0, 1, 26},
+ {1, 0, 0, 0, 1, 32},
+ {0, 0, 0, 0, 2, 30},
+ {2, 0, 0, 0, 2, 26},
+ {1, 0, 0, 0, 2, 32},
+ {0, 0, 0, 0, 3, 30},
+ {2, 0, 0, 0, 3, 26},
+ {1, 0, 0, 0, 3, 32},
+ {0, 0, 0, 0, 4, 30},
+ {2, 0, 0, 0, 4, 26},
+ {1, 0, 0, 0, 4, 32},
+ {0, 0, 0, 0, 5, 30},
+ {2, 0, 0, 0, 5, 26},
+ {1, 0, 0, 0, 5, 32},
+ {0, 0, 0, 0, 6, 30},
+ {2, 0, 0, 0, 6, 26},
+ {1, 0, 0, 0, 6, 32},
+ {0, 0, 0, 0, 7, 30},
+ {2, 0, 0, 0, 7, 26},
+ {1, 0, 0, 0, 7, 32},
+ {0, 0, 0, 0, 8, 30},
+ {2, 0, 0, 0, 8, 26},
+ {1, 0, 0, 0, 8, 32},
+ {0, 0, 0, 0, 9, 30},
+ {2, 0, 0, 0, 9, 26},
+ {1, 0, 0, 0, 9, 32},
+ {0, 0, 0, 0, 10, 30},
+ {2, 0, 0, 0, 10, 26},
+ {1, 0, 0, 0, 10, 32},
+ {0, 0, 0, 0, 11, 30},
+ {2, 0, 0, 0, 11, 26},
+ {1, 0, 0, 0, 11, 32},
+ {0, 0, 0, 0, 12, 63},
+ {2, 0, 0, 0, 12, 26},
+ {1, 0, 0, 0, 12, 32},
+ {0, 0, 0, 0, 13, 63},
+ {2, 0, 0, 0, 13, 26},
+ {1, 0, 0, 0, 13, 32},
+ {0, 0, 0, 0, 14, 63},
+ {2, 0, 0, 0, 14, 63},
+ {1, 0, 0, 0, 14, 32},
+ {0, 0, 0, 1, 1, 28},
+ {2, 0, 0, 1, 1, 28},
+ {1, 0, 0, 1, 1, 28},
+ {0, 0, 0, 1, 2, 28},
+ {2, 0, 0, 1, 2, 32},
+ {1, 0, 0, 1, 2, 32},
+ {0, 0, 0, 1, 3, 32},
+ {2, 0, 0, 1, 3, 32},
+ {1, 0, 0, 1, 3, 32},
+ {0, 0, 0, 1, 4, 32},
+ {2, 0, 0, 1, 4, 32},
+ {1, 0, 0, 1, 4, 32},
+ {0, 0, 0, 1, 5, 32},
+ {2, 0, 0, 1, 5, 32},
+ {1, 0, 0, 1, 5, 32},
+ {0, 0, 0, 1, 6, 32},
+ {2, 0, 0, 1, 6, 32},
+ {1, 0, 0, 1, 6, 32},
+ {0, 0, 0, 1, 7, 32},
+ {2, 0, 0, 1, 7, 32},
+ {1, 0, 0, 1, 7, 32},
+ {0, 0, 0, 1, 8, 32},
+ {2, 0, 0, 1, 8, 32},
+ {1, 0, 0, 1, 8, 32},
+ {0, 0, 0, 1, 9, 32},
+ {2, 0, 0, 1, 9, 32},
+ {1, 0, 0, 1, 9, 32},
+ {0, 0, 0, 1, 10, 28},
+ {2, 0, 0, 1, 10, 32},
+ {1, 0, 0, 1, 10, 32},
+ {0, 0, 0, 1, 11, 28},
+ {2, 0, 0, 1, 11, 32},
+ {1, 0, 0, 1, 11, 32},
+ {0, 0, 0, 1, 12, 63},
+ {2, 0, 0, 1, 12, 32},
+ {1, 0, 0, 1, 12, 32},
+ {0, 0, 0, 1, 13, 63},
+ {2, 0, 0, 1, 13, 28},
+ {1, 0, 0, 1, 13, 28},
+ {0, 0, 0, 1, 14, 63},
+ {2, 0, 0, 1, 14, 63},
+ {1, 0, 0, 1, 14, 63},
+ {0, 0, 0, 2, 1, 26},
+ {2, 0, 0, 2, 1, 26},
+ {1, 0, 0, 2, 1, 28},
+ {0, 0, 0, 2, 2, 26},
+ {2, 0, 0, 2, 2, 32},
+ {1, 0, 0, 2, 2, 32},
+ {0, 0, 0, 2, 3, 32},
+ {2, 0, 0, 2, 3, 32},
+ {1, 0, 0, 2, 3, 32},
+ {0, 0, 0, 2, 4, 32},
+ {2, 0, 0, 2, 4, 32},
+ {1, 0, 0, 2, 4, 32},
+ {0, 0, 0, 2, 5, 32},
+ {2, 0, 0, 2, 5, 32},
+ {1, 0, 0, 2, 5, 32},
+ {0, 0, 0, 2, 6, 32},
+ {2, 0, 0, 2, 6, 32},
+ {1, 0, 0, 2, 6, 32},
+ {0, 0, 0, 2, 7, 32},
+ {2, 0, 0, 2, 7, 32},
+ {1, 0, 0, 2, 7, 32},
+ {0, 0, 0, 2, 8, 32},
+ {2, 0, 0, 2, 8, 32},
+ {1, 0, 0, 2, 8, 32},
+ {0, 0, 0, 2, 9, 32},
+ {2, 0, 0, 2, 9, 32},
+ {1, 0, 0, 2, 9, 32},
+ {0, 0, 0, 2, 10, 26},
+ {2, 0, 0, 2, 10, 32},
+ {1, 0, 0, 2, 10, 32},
+ {0, 0, 0, 2, 11, 26},
+ {2, 0, 0, 2, 11, 32},
+ {1, 0, 0, 2, 11, 32},
+ {0, 0, 0, 2, 12, 63},
+ {2, 0, 0, 2, 12, 32},
+ {1, 0, 0, 2, 12, 32},
+ {0, 0, 0, 2, 13, 63},
+ {2, 0, 0, 2, 13, 26},
+ {1, 0, 0, 2, 13, 28},
+ {0, 0, 0, 2, 14, 63},
+ {2, 0, 0, 2, 14, 63},
+ {1, 0, 0, 2, 14, 63},
+ {0, 0, 1, 2, 1, 63},
+ {2, 0, 1, 2, 1, 63},
+ {1, 0, 1, 2, 1, 63},
+ {0, 0, 1, 2, 2, 63},
+ {2, 0, 1, 2, 2, 63},
+ {1, 0, 1, 2, 2, 63},
+ {0, 0, 1, 2, 3, 26},
+ {2, 0, 1, 2, 3, 26},
+ {1, 0, 1, 2, 3, 26},
+ {0, 0, 1, 2, 4, 26},
+ {2, 0, 1, 2, 4, 28},
+ {1, 0, 1, 2, 4, 26},
+ {0, 0, 1, 2, 5, 28},
+ {2, 0, 1, 2, 5, 28},
+ {1, 0, 1, 2, 5, 26},
+ {0, 0, 1, 2, 6, 28},
+ {2, 0, 1, 2, 6, 28},
+ {1, 0, 1, 2, 6, 26},
+ {0, 0, 1, 2, 7, 28},
+ {2, 0, 1, 2, 7, 28},
+ {1, 0, 1, 2, 7, 26},
+ {0, 0, 1, 2, 8, 26},
+ {2, 0, 1, 2, 8, 28},
+ {1, 0, 1, 2, 8, 26},
+ {0, 0, 1, 2, 9, 26},
+ {2, 0, 1, 2, 9, 28},
+ {1, 0, 1, 2, 9, 26},
+ {0, 0, 1, 2, 10, 26},
+ {2, 0, 1, 2, 10, 28},
+ {1, 0, 1, 2, 10, 26},
+ {0, 0, 1, 2, 11, 26},
+ {2, 0, 1, 2, 11, 26},
+ {1, 0, 1, 2, 11, 26},
+ {0, 0, 1, 2, 12, 63},
+ {2, 0, 1, 2, 12, 26},
+ {1, 0, 1, 2, 12, 26},
+ {0, 0, 1, 2, 13, 63},
+ {2, 0, 1, 2, 13, 26},
+ {1, 0, 1, 2, 13, 26},
+ {0, 0, 1, 2, 14, 63},
+ {2, 0, 1, 2, 14, 63},
+ {1, 0, 1, 2, 14, 63},
+};
+
+RTW_DECL_TABLE_TXPWR_LMT(rtw8703b_txpwr_lmt);
+
+static const u32 rtw8703b_mac[] = {
+ 0x02F, 0x00000030,
+ 0x035, 0x00000000,
+ 0x067, 0x00000002,
+ 0x092, 0x00000080,
+ 0x421, 0x0000000F,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000000,
+ 0x432, 0x00000000,
+ 0x433, 0x00000001,
+ 0x434, 0x00000002,
+ 0x435, 0x00000003,
+ 0x436, 0x00000005,
+ 0x437, 0x00000007,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43A, 0x00000000,
+ 0x43B, 0x00000001,
+ 0x43C, 0x00000002,
+ 0x43D, 0x00000003,
+ 0x43E, 0x00000005,
+ 0x43F, 0x00000007,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000010,
+ 0x445, 0x00000000,
+ 0x446, 0x00000000,
+ 0x447, 0x00000000,
+ 0x448, 0x00000000,
+ 0x449, 0x000000F0,
+ 0x44A, 0x0000000F,
+ 0x44B, 0x0000003E,
+ 0x44C, 0x00000010,
+ 0x44D, 0x00000000,
+ 0x44E, 0x00000000,
+ 0x44F, 0x00000000,
+ 0x450, 0x00000000,
+ 0x451, 0x000000F0,
+ 0x452, 0x0000000F,
+ 0x453, 0x00000000,
+ 0x456, 0x0000005E,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55C, 0x00000028,
+ 0x55D, 0x000000FF,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x638, 0x00000028,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000C,
+ 0x63F, 0x0000000C,
+ 0x640, 0x00000040,
+ 0x642, 0x00000040,
+ 0x643, 0x00000000,
+ 0x652, 0x000000C8,
+ 0x66A, 0x000000B0,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+ 0x765, 0x00000018,
+ 0x76E, 0x00000004,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8703b_mac, rtw_phy_cfg_mac);
+
+static const u32 rtw8703b_agc[] = {
+ 0xC78, 0xFC000101,
+ 0xC78, 0xFB010101,
+ 0xC78, 0xFA020101,
+ 0xC78, 0xF9030101,
+ 0xC78, 0xF8040101,
+ 0xC78, 0xF7050101,
+ 0xC78, 0xF6060101,
+ 0xC78, 0xF5070101,
+ 0xC78, 0xF4080101,
+ 0xC78, 0xF3090101,
+ 0xC78, 0xF20A0101,
+ 0xC78, 0xF10B0101,
+ 0xC78, 0xF00C0101,
+ 0xC78, 0xEF0D0101,
+ 0xC78, 0xEE0E0101,
+ 0xC78, 0xED0F0101,
+ 0xC78, 0xEC100101,
+ 0xC78, 0xEB110101,
+ 0xC78, 0xEA120101,
+ 0xC78, 0xE9130101,
+ 0xC78, 0xE8140101,
+ 0xC78, 0xE7150101,
+ 0xC78, 0xE6160101,
+ 0xC78, 0xE5170101,
+ 0xC78, 0xE4180101,
+ 0xC78, 0xE3190101,
+ 0xC78, 0x661A0101,
+ 0xC78, 0x651B0101,
+ 0xC78, 0x641C0101,
+ 0xC78, 0x631D0101,
+ 0xC78, 0x071E0101,
+ 0xC78, 0x061F0101,
+ 0xC78, 0x05200101,
+ 0xC78, 0x04210101,
+ 0xC78, 0x03220101,
+ 0xC78, 0xE8230001,
+ 0xC78, 0xE7240001,
+ 0xC78, 0xE6250001,
+ 0xC78, 0xE5260001,
+ 0xC78, 0xE4270001,
+ 0xC78, 0x89280001,
+ 0xC78, 0x88290001,
+ 0xC78, 0x872A0001,
+ 0xC78, 0x862B0001,
+ 0xC78, 0x852C0001,
+ 0xC78, 0x482D0001,
+ 0xC78, 0x472E0001,
+ 0xC78, 0x462F0001,
+ 0xC78, 0x45300001,
+ 0xC78, 0x44310001,
+ 0xC78, 0x07320001,
+ 0xC78, 0x06330001,
+ 0xC78, 0x05340001,
+ 0xC78, 0x04350001,
+ 0xC78, 0x03360001,
+ 0xC78, 0x02370001,
+ 0xC78, 0x01380001,
+ 0xC78, 0x00390001,
+ 0xC78, 0x003A0001,
+ 0xC78, 0x003B0001,
+ 0xC78, 0x003C0001,
+ 0xC78, 0x003D0001,
+ 0xC78, 0x003E0001,
+ 0xC78, 0x003F0001,
+ 0xC78, 0x7F002001,
+ 0xC78, 0x7F012001,
+ 0xC78, 0x7F022001,
+ 0xC78, 0x7F032001,
+ 0xC78, 0x7F042001,
+ 0xC78, 0x7F052001,
+ 0xC78, 0x7F062001,
+ 0xC78, 0x7F072001,
+ 0xC78, 0x7F082001,
+ 0xC78, 0x7F092001,
+ 0xC78, 0x7F0A2001,
+ 0xC78, 0x7F0B2001,
+ 0xC78, 0x7F0C2001,
+ 0xC78, 0x7F0D2001,
+ 0xC78, 0x7F0E2001,
+ 0xC78, 0x7F0F2001,
+ 0xC78, 0x7F102001,
+ 0xC78, 0x7F112001,
+ 0xC78, 0x7E122001,
+ 0xC78, 0x7D132001,
+ 0xC78, 0x7C142001,
+ 0xC78, 0x7B152001,
+ 0xC78, 0x7A162001,
+ 0xC78, 0x79172001,
+ 0xC78, 0x78182001,
+ 0xC78, 0x77192001,
+ 0xC78, 0x761A2001,
+ 0xC78, 0x751B2001,
+ 0xC78, 0x741C2001,
+ 0xC78, 0x731D2001,
+ 0xC78, 0x721E2001,
+ 0xC78, 0x711F2001,
+ 0xC78, 0x70202001,
+ 0xC78, 0x6F212001,
+ 0xC78, 0x6E222001,
+ 0xC78, 0x6D232001,
+ 0xC78, 0x6C242001,
+ 0xC78, 0x6B252001,
+ 0xC78, 0x6A262001,
+ 0xC78, 0x69272001,
+ 0xC78, 0x68282001,
+ 0xC78, 0x67292001,
+ 0xC78, 0x662A2001,
+ 0xC78, 0x652B2001,
+ 0xC78, 0x642C2001,
+ 0xC78, 0x632D2001,
+ 0xC78, 0x622E2001,
+ 0xC78, 0x612F2001,
+ 0xC78, 0x60302001,
+ 0xC78, 0x42312001,
+ 0xC78, 0x41322001,
+ 0xC78, 0x40332001,
+ 0xC78, 0x23342001,
+ 0xC78, 0x22352001,
+ 0xC78, 0x21362001,
+ 0xC78, 0x20372001,
+ 0xC78, 0x00382001,
+ 0xC78, 0x02392001,
+ 0xC78, 0x013A2001,
+ 0xC78, 0x003B2001,
+ 0xC78, 0x003C2001,
+ 0xC78, 0x003D2001,
+ 0xC78, 0x003E2001,
+ 0xC78, 0x003F2001,
+ 0xC78, 0x7F003101,
+ 0xC78, 0x7F013101,
+ 0xC78, 0x7F023101,
+ 0xC78, 0x7F033101,
+ 0xC78, 0x7F043101,
+ 0xC78, 0x7F053101,
+ 0xC78, 0x7F063101,
+ 0xC78, 0x7E073101,
+ 0xC78, 0x7D083101,
+ 0xC78, 0x7C093101,
+ 0xC78, 0x7B0A3101,
+ 0xC78, 0x7A0B3101,
+ 0xC78, 0x790C3101,
+ 0xC78, 0x780D3101,
+ 0xC78, 0x770E3101,
+ 0xC78, 0x760F3101,
+ 0xC78, 0x75103101,
+ 0xC78, 0x74113101,
+ 0xC78, 0x73123101,
+ 0xC78, 0x72133101,
+ 0xC78, 0x71143101,
+ 0xC78, 0x70153101,
+ 0xC78, 0x6F163101,
+ 0xC78, 0x69173101,
+ 0xC78, 0x68183101,
+ 0xC78, 0x67193101,
+ 0xC78, 0x661A3101,
+ 0xC78, 0x651B3101,
+ 0xC78, 0x641C3101,
+ 0xC78, 0x631D3101,
+ 0xC78, 0x621E3101,
+ 0xC78, 0x611F3101,
+ 0xC78, 0x60203101,
+ 0xC78, 0x42213101,
+ 0xC78, 0x41223101,
+ 0xC78, 0x40233101,
+ 0xC78, 0x22243101,
+ 0xC78, 0x21253101,
+ 0xC78, 0x20263101,
+ 0xC78, 0x00273101,
+ 0xC78, 0x00283101,
+ 0xC78, 0x00293101,
+ 0xC78, 0x002A3101,
+ 0xC78, 0x002B3101,
+ 0xC78, 0x002C3101,
+ 0xC78, 0x002D3101,
+ 0xC78, 0x002E3101,
+ 0xC78, 0x002F3101,
+ 0xC78, 0x00303101,
+ 0xC78, 0x00313101,
+ 0xC78, 0x00323101,
+ 0xC78, 0x00333101,
+ 0xC78, 0x00343101,
+ 0xC78, 0x00353101,
+ 0xC78, 0x00363101,
+ 0xC78, 0x00373101,
+ 0xC78, 0x00383101,
+ 0xC78, 0x00393101,
+ 0xC78, 0x003A3101,
+ 0xC78, 0x003B3101,
+ 0xC78, 0x003C3101,
+ 0xC78, 0x003D3101,
+ 0xC78, 0x003E3101,
+ 0xC78, 0x003F3101,
+ 0xC78, 0xFA403101,
+ 0xC78, 0xF9413101,
+ 0xC78, 0xF8423101,
+ 0xC78, 0xF7433101,
+ 0xC78, 0xF6443101,
+ 0xC78, 0xF5453101,
+ 0xC78, 0xF4463101,
+ 0xC78, 0xF3473101,
+ 0xC78, 0xF2483101,
+ 0xC78, 0xE1493101,
+ 0xC78, 0xE04A3101,
+ 0xC78, 0xEF4B3101,
+ 0xC78, 0xEE4C3101,
+ 0xC78, 0xED4D3101,
+ 0xC78, 0xEC4E3101,
+ 0xC78, 0xEB4F3101,
+ 0xC78, 0xEA503101,
+ 0xC78, 0xE9513101,
+ 0xC78, 0xE8523101,
+ 0xC78, 0xE7533101,
+ 0xC78, 0xE6543101,
+ 0xC78, 0xE5553101,
+ 0xC78, 0xE4563101,
+ 0xC78, 0xE3573101,
+ 0xC78, 0xE2583101,
+ 0xC78, 0xE1593101,
+ 0xC78, 0xE05A3101,
+ 0xC78, 0xC25B3101,
+ 0xC78, 0xC15C3101,
+ 0xC78, 0xC05D3101,
+ 0xC78, 0x825E3101,
+ 0xC78, 0x815F3101,
+ 0xC78, 0x80603101,
+ 0xC78, 0x80613101,
+ 0xC78, 0x80623101,
+ 0xC78, 0x80633101,
+ 0xC78, 0x80643101,
+ 0xC78, 0x80653101,
+ 0xC78, 0x80663101,
+ 0xC78, 0x80673101,
+ 0xC78, 0x80683101,
+ 0xC78, 0x80693101,
+ 0xC78, 0x806A3101,
+ 0xC78, 0x806B3101,
+ 0xC78, 0x806C3101,
+ 0xC78, 0x806D3101,
+ 0xC78, 0x806E3101,
+ 0xC78, 0x806F3101,
+ 0xC78, 0x80703101,
+ 0xC78, 0x80713101,
+ 0xC78, 0x80723101,
+ 0xC78, 0x80733101,
+ 0xC78, 0x80743101,
+ 0xC78, 0x80753101,
+ 0xC78, 0x80763101,
+ 0xC78, 0x80773101,
+ 0xC78, 0x80783101,
+ 0xC78, 0x80793101,
+ 0xC78, 0x807A3101,
+ 0xC78, 0x807B3101,
+ 0xC78, 0x807C3101,
+ 0xC78, 0x807D3101,
+ 0xC78, 0x807E3101,
+ 0xC78, 0x807F3101,
+ 0xC78, 0xFF402001,
+ 0xC78, 0xFF412001,
+ 0xC78, 0xFF422001,
+ 0xC78, 0xFF432001,
+ 0xC78, 0xFF442001,
+ 0xC78, 0xFF452001,
+ 0xC78, 0xFF462001,
+ 0xC78, 0xFF472001,
+ 0xC78, 0xFF482001,
+ 0xC78, 0xFF492001,
+ 0xC78, 0xFF4A2001,
+ 0xC78, 0xFF4B2001,
+ 0xC78, 0xFF4C2001,
+ 0xC78, 0xFE4D2001,
+ 0xC78, 0xFD4E2001,
+ 0xC78, 0xFC4F2001,
+ 0xC78, 0xFB502001,
+ 0xC78, 0xFA512001,
+ 0xC78, 0xF9522001,
+ 0xC78, 0xF8532001,
+ 0xC78, 0xF7542001,
+ 0xC78, 0xF6552001,
+ 0xC78, 0xF5562001,
+ 0xC78, 0xF4572001,
+ 0xC78, 0xF3582001,
+ 0xC78, 0xF2592001,
+ 0xC78, 0xF15A2001,
+ 0xC78, 0xF05B2001,
+ 0xC78, 0xEF5C2001,
+ 0xC78, 0xEE5D2001,
+ 0xC78, 0xED5E2001,
+ 0xC78, 0xEC5F2001,
+ 0xC78, 0xEB602001,
+ 0xC78, 0xEA612001,
+ 0xC78, 0xE9622001,
+ 0xC78, 0xE8632001,
+ 0xC78, 0xE7642001,
+ 0xC78, 0xE6652001,
+ 0xC78, 0xE5662001,
+ 0xC78, 0xE4672001,
+ 0xC78, 0xE3682001,
+ 0xC78, 0xC5692001,
+ 0xC78, 0xC46A2001,
+ 0xC78, 0xC36B2001,
+ 0xC78, 0xA46C2001,
+ 0xC78, 0x846D2001,
+ 0xC78, 0x836E2001,
+ 0xC78, 0x826F2001,
+ 0xC78, 0x81702001,
+ 0xC78, 0x80712001,
+ 0xC78, 0x80722001,
+ 0xC78, 0x80732001,
+ 0xC78, 0x80742001,
+ 0xC78, 0x80752001,
+ 0xC78, 0x80762001,
+ 0xC78, 0x80772001,
+ 0xC78, 0x80782001,
+ 0xC78, 0x80792001,
+ 0xC78, 0x807A2001,
+ 0xC78, 0x807B2001,
+ 0xC78, 0x807C2001,
+ 0xC78, 0x807D2001,
+ 0xC78, 0x807E2001,
+ 0xC78, 0x807F2001,
+ 0xC50, 0x69553422,
+ 0xC50, 0x69553420,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8703b_agc, rtw_phy_cfg_agc);
+
+/* init values for BB registers */
+static const u32 rtw8703b_bb[] = {
+ 0x800, 0x83045700,
+ 0x804, 0x00000003,
+ 0x808, 0x0000FC00,
+ 0x80C, 0x0000000A,
+ 0x810, 0x10001331,
+ 0x814, 0x020C3D10,
+ 0x818, 0x02200385,
+ 0x81C, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390204,
+ 0x828, 0x00000000,
+ 0x82C, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83C, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84C, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569A11A9,
+ 0x85C, 0x01000014,
+ 0x860, 0x66F60110,
+ 0x864, 0x061F0649,
+ 0x868, 0x00000000,
+ 0x86C, 0x27272700,
+ 0x870, 0x07000760,
+ 0x874, 0x25004000,
+ 0x878, 0x00000808,
+ 0x87C, 0x004F0201,
+ 0x880, 0xB0000B1E,
+ 0x884, 0x00000001,
+ 0x888, 0x00000000,
+ 0x88C, 0xCCC000C0,
+ 0x890, 0x00000800,
+ 0x894, 0xFFFFFFFE,
+ 0x898, 0x40302010,
+ 0x89C, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90C, 0x81121111,
+ 0x910, 0x00000002,
+ 0x914, 0x00000201,
+ 0x948, 0x99000000,
+ 0x94C, 0x00000010,
+ 0x950, 0x20003800,
+ 0x954, 0x4A880000,
+ 0x958, 0x4BC5D87A,
+ 0x95C, 0x04EB9B79,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x80FF800C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E7F120F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x1114D028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0xD1D80000,
+ 0xA24, 0x5A7DA0BD,
+ 0xA28, 0x0000223B,
+ 0xA2C, 0x00D30000,
+ 0xA70, 0x101FBF00,
+ 0xA74, 0x00000007,
+ 0xA78, 0x00008900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x2180FA74,
+ 0xA84, 0x00120000,
+ 0xA88, 0x040C0000,
+ 0xA8C, 0x12345678,
+ 0xA90, 0xABCDEF00,
+ 0xA94, 0x001B1B89,
+ 0xA98, 0x05100000,
+ 0xA9C, 0x3F000000,
+ 0xAA0, 0x00000000,
+ 0xB2C, 0x00000000,
+ 0xC00, 0x48071D40,
+ 0xC04, 0x03A05611,
+ 0xC08, 0x000000E4,
+ 0xC0C, 0x6C6C6C6C,
+ 0xC10, 0x18800000,
+ 0xC14, 0x40000100,
+ 0xC18, 0x08800000,
+ 0xC1C, 0x40000100,
+ 0xC20, 0x00000000,
+ 0xC24, 0x00000000,
+ 0xC28, 0x00000000,
+ 0xC2C, 0x00000000,
+ 0xC30, 0x69E9AC4B,
+ 0xC34, 0x31000040,
+ 0xC38, 0x21688080,
+ 0xC3C, 0x000016CC,
+ 0xC40, 0x1F78403F,
+ 0xC44, 0x00010036,
+ 0xC48, 0xEC020107,
+ 0xC4C, 0x007F037F,
+ 0xC50, 0x69553420,
+ 0xC54, 0x43BC0094,
+ 0xC58, 0x00015967,
+ 0xC5C, 0x18250492,
+ 0xC60, 0x00000000,
+ 0xC64, 0x7112848B,
+ 0xC68, 0x47C07BFF,
+ 0xC6C, 0x00000036,
+ 0xC70, 0x2C7F000D,
+ 0xC74, 0x020600DB,
+ 0xC78, 0x0000001F,
+ 0xC7C, 0x00B91612,
+ 0xC80, 0x390000E4,
+ 0xC84, 0x19F60000,
+ 0xC88, 0x40000100,
+ 0xC8C, 0x20200000,
+ 0xC90, 0x00091521,
+ 0xC94, 0x00000000,
+ 0xC98, 0x00121820,
+ 0xC9C, 0x00007F7F,
+ 0xCA0, 0x00000000,
+ 0xCA4, 0x000300A0,
+ 0xCA8, 0x00000000,
+ 0xCAC, 0x00000000,
+ 0xCB0, 0x00000000,
+ 0xCB4, 0x00000000,
+ 0xCB8, 0x00000000,
+ 0xCBC, 0x28000000,
+ 0xCC0, 0x00000000,
+ 0xCC4, 0x00000000,
+ 0xCC8, 0x00000000,
+ 0xCCC, 0x00000000,
+ 0xCD0, 0x00000000,
+ 0xCD4, 0x00000000,
+ 0xCD8, 0x64B22427,
+ 0xCDC, 0x00766932,
+ 0xCE0, 0x00222222,
+ 0xCE4, 0x10000000,
+ 0xCE8, 0x37644302,
+ 0xCEC, 0x2F97D40C,
+ 0xD00, 0x00030740,
+ 0xD04, 0x40020401,
+ 0xD08, 0x0000907F,
+ 0xD0C, 0x20010201,
+ 0xD10, 0xA0633333,
+ 0xD14, 0x3333BC53,
+ 0xD18, 0x7A8F5B6F,
+ 0xD2C, 0xCB979975,
+ 0xD30, 0x00000000,
+ 0xD34, 0x80608000,
+ 0xD38, 0x98000000,
+ 0xD3C, 0x40127353,
+ 0xD40, 0x00000000,
+ 0xD44, 0x00000000,
+ 0xD48, 0x00000000,
+ 0xD4C, 0x00000000,
+ 0xD50, 0x6437140A,
+ 0xD54, 0x00000000,
+ 0xD58, 0x00000282,
+ 0xD5C, 0x30032064,
+ 0xD60, 0x4653DE68,
+ 0xD64, 0x04518A3C,
+ 0xD68, 0x00002101,
+ 0xE00, 0x2D2D2D2D,
+ 0xE04, 0x2D2D2D2D,
+ 0xE08, 0x0390272D,
+ 0xE10, 0x2D2D2D2D,
+ 0xE14, 0x2D2D2D2D,
+ 0xE18, 0x2D2D2D2D,
+ 0xE1C, 0x2D2D2D2D,
+ 0xE28, 0x00000000,
+ 0xE30, 0x1000DC1F,
+ 0xE34, 0x10008C1F,
+ 0xE38, 0x02140102,
+ 0xE3C, 0x681604C2,
+ 0xE40, 0x01007C00,
+ 0xE44, 0x01004800,
+ 0xE48, 0xFB000000,
+ 0xE4C, 0x000028D1,
+ 0xE50, 0x1000DC1F,
+ 0xE54, 0x10008C1F,
+ 0xE58, 0x02140102,
+ 0xE5C, 0x28160D05,
+ 0xE60, 0x00000048,
+ 0xE68, 0x001B25A4,
+ 0xE6C, 0x01C00014,
+ 0xE70, 0x01C00014,
+ 0xE74, 0x02000014,
+ 0xE78, 0x02000014,
+ 0xE7C, 0x02000014,
+ 0xE80, 0x02000014,
+ 0xE84, 0x01C00014,
+ 0xE88, 0x02000014,
+ 0xE8C, 0x01C00014,
+ 0xED0, 0x01C00014,
+ 0xED4, 0x01C00014,
+ 0xED8, 0x01C00014,
+ 0xEDC, 0x00000014,
+ 0xEE0, 0x00000014,
+ 0xEE8, 0x21555448,
+ 0xEEC, 0x03C00014,
+ 0xF14, 0x00000003,
+ 0xF4C, 0x00000000,
+ 0xF00, 0x00000300,
+};
+
+RTW_DECL_TABLE_PHY_COND(rtw8703b_bb, rtw_phy_cfg_bb);
+
+static const u32 rtw8703b_rf_a[] = {
+ 0x018, 0x00008C01,
+ 0x0B5, 0x0008C050,
+ 0x0B1, 0x00054258,
+ 0x0B2, 0x00054C00,
+ 0x030, 0x00018000,
+ 0x031, 0x00000027,
+ 0x032, 0x000A7F07,
+ 0x030, 0x00020000,
+ 0x031, 0x00000027,
+ 0x032, 0x000E7D87,
+ 0x01C, 0x000F8635,
+ 0x0EF, 0x00080000,
+ 0x030, 0x00008000,
+ 0x031, 0x00000004,
+ 0x032, 0x00006105,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00000400,
+ 0x041, 0x0000BD54,
+ 0x041, 0x00003DD4,
+ 0x041, 0x0000FDD4,
+ 0x0EF, 0x00000000,
+ 0x0DF, 0x00000600,
+ 0x050, 0x0000C6DB,
+ 0x051, 0x00004505,
+ 0x052, 0x0000E31D,
+ 0x053, 0x00040579,
+ 0x054, 0x00000000,
+ 0x055, 0x0008206E,
+ 0x056, 0x00040000,
+ 0x0EF, 0x00000100,
+ 0x034, 0x0000ADD7,
+ 0x034, 0x00009DD4,
+ 0x034, 0x00008DD1,
+ 0x034, 0x00007DCE,
+ 0x034, 0x00006DCB,
+ 0x034, 0x00005CCE,
+ 0x034, 0x000048CD,
+ 0x034, 0x000034CC,
+ 0x034, 0x0000244F,
+ 0x034, 0x0000144C,
+ 0x034, 0x0000004E,
+ 0x0EF, 0x00000000,
+ 0x0EF, 0x00002000,
+ 0x03B, 0x0003801F,
+ 0x03B, 0x00030002,
+ 0x03B, 0x00028001,
+ 0x03B, 0x00020000,
+ 0x03B, 0x00018003,
+ 0x03B, 0x00010002,
+ 0x03B, 0x00008001,
+ 0x03B, 0x00000000,
+ 0x0EF, 0x00000000,
+ 0x082, 0x000C0000,
+ 0x083, 0x000AF025,
+ 0x01E, 0x00000C08,
+};
+
+RTW_DECL_TABLE_RF_RADIO(rtw8703b_rf_a, A);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h b/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h
new file mode 100644
index 000000000000..98bd399bddbf
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8703b_tables.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright Fiona Klute <fiona.klute@gmx.de> */
+
+#ifndef __RTW8703B_TABLES_H__
+#define __RTW8703B_TABLES_H__
+
+extern const struct rtw_table rtw8703b_bb_pg_tbl;
+extern const struct rtw_table rtw8703b_txpwr_lmt_tbl;
+extern const struct rtw_table rtw8703b_mac_tbl;
+extern const struct rtw_table rtw8703b_agc_tbl;
+extern const struct rtw_table rtw8703b_bb_tbl;
+extern const struct rtw_table rtw8703b_rf_a_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723cs.c b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
new file mode 100644
index 000000000000..8d38d36be8c0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723cs.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright Fiona Klute <fiona.klute@gmx.de> */
+
+#include <linux/mmc/sdio_func.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/module.h>
+#include "main.h"
+#include "rtw8703b.h"
+#include "sdio.h"
+
+static const struct sdio_device_id rtw_8723cs_id_table[] = {
+ {
+ SDIO_DEVICE(SDIO_VENDOR_ID_REALTEK,
+ SDIO_DEVICE_ID_REALTEK_RTW8723CS),
+ .driver_data = (kernel_ulong_t)&rtw8703b_hw_spec,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(sdio, rtw_8723cs_id_table);
+
+static struct sdio_driver rtw_8723cs_driver = {
+ .name = "rtw8723cs",
+ .id_table = rtw_8723cs_id_table,
+ .probe = rtw_sdio_probe,
+ .remove = rtw_sdio_remove,
+ .drv = {
+ .pm = &rtw_sdio_pm_ops,
+ .shutdown = rtw_sdio_shutdown
+ }};
+module_sdio_driver(rtw_8723cs_driver);
+
+MODULE_AUTHOR("Fiona Klute <fiona.klute@gmx.de>");
+MODULE_DESCRIPTION("Realtek 802.11n wireless 8723cs driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
index c575476a0020..3fba4054d45f 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c
@@ -9,36 +9,13 @@
#include "tx.h"
#include "rx.h"
#include "phy.h"
+#include "rtw8723x.h"
#include "rtw8723d.h"
#include "rtw8723d_table.h"
#include "mac.h"
#include "reg.h"
#include "debug.h"
-static const struct rtw_hw_reg rtw8723d_txagc[] = {
- [DESC_RATE1M] = { .addr = 0xe08, .mask = 0x0000ff00 },
- [DESC_RATE2M] = { .addr = 0x86c, .mask = 0x0000ff00 },
- [DESC_RATE5_5M] = { .addr = 0x86c, .mask = 0x00ff0000 },
- [DESC_RATE11M] = { .addr = 0x86c, .mask = 0xff000000 },
- [DESC_RATE6M] = { .addr = 0xe00, .mask = 0x000000ff },
- [DESC_RATE9M] = { .addr = 0xe00, .mask = 0x0000ff00 },
- [DESC_RATE12M] = { .addr = 0xe00, .mask = 0x00ff0000 },
- [DESC_RATE18M] = { .addr = 0xe00, .mask = 0xff000000 },
- [DESC_RATE24M] = { .addr = 0xe04, .mask = 0x000000ff },
- [DESC_RATE36M] = { .addr = 0xe04, .mask = 0x0000ff00 },
- [DESC_RATE48M] = { .addr = 0xe04, .mask = 0x00ff0000 },
- [DESC_RATE54M] = { .addr = 0xe04, .mask = 0xff000000 },
- [DESC_RATEMCS0] = { .addr = 0xe10, .mask = 0x000000ff },
- [DESC_RATEMCS1] = { .addr = 0xe10, .mask = 0x0000ff00 },
- [DESC_RATEMCS2] = { .addr = 0xe10, .mask = 0x00ff0000 },
- [DESC_RATEMCS3] = { .addr = 0xe10, .mask = 0xff000000 },
- [DESC_RATEMCS4] = { .addr = 0xe14, .mask = 0x000000ff },
- [DESC_RATEMCS5] = { .addr = 0xe14, .mask = 0x0000ff00 },
- [DESC_RATEMCS6] = { .addr = 0xe14, .mask = 0x00ff0000 },
- [DESC_RATEMCS7] = { .addr = 0xe14, .mask = 0xff000000 },
-};
-
-#define WLAN_TXQ_RPT_EN 0x1F
#define WLAN_SLOT_TIME 0x09
#define WLAN_RL_VAL 0x3030
#define WLAN_BAR_VAL 0x0201ffff
@@ -65,34 +42,6 @@ static const struct rtw_hw_reg rtw8723d_txagc[] = {
#define WLAN_LTR_CTRL1 0xCB004010
#define WLAN_LTR_CTRL2 0x01233425
-static void rtw8723d_lck(struct rtw_dev *rtwdev)
-{
- u32 lc_cal;
- u8 val_ctx, rf_val;
- int ret;
-
- val_ctx = rtw_read8(rtwdev, REG_CTX);
- if ((val_ctx & BIT_MASK_CTX_TYPE) != 0)
- rtw_write8(rtwdev, REG_CTX, val_ctx & ~BIT_MASK_CTX_TYPE);
- else
- rtw_write8(rtwdev, REG_TXPAUSE, 0xFF);
- lc_cal = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK);
-
- rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal | BIT_LCK);
-
- ret = read_poll_timeout(rtw_read_rf, rf_val, rf_val != 0x1,
- 10000, 1000000, false,
- rtwdev, RF_PATH_A, RF_CFGCH, BIT_LCK);
- if (ret)
- rtw_warn(rtwdev, "failed to poll LCK status bit\n");
-
- rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal);
- if ((val_ctx & BIT_MASK_CTX_TYPE) != 0)
- rtw_write8(rtwdev, REG_CTX, val_ctx);
- else
- rtw_write8(rtwdev, REG_TXPAUSE, 0x00);
-}
-
static const u32 rtw8723d_ofdm_swing_table[] = {
0x0b40002d, 0x0c000030, 0x0cc00033, 0x0d800036, 0x0e400039, 0x0f00003c,
0x10000040, 0x11000044, 0x12000048, 0x1300004c, 0x14400051, 0x15800056,
@@ -196,7 +145,7 @@ static void rtw8723d_phy_set_param(struct rtw_dev *rtwdev)
rtw_write16_set(rtwdev, REG_TXDMA_OFFSET_CHK, BIT_DROP_DATA_EN);
- rtw8723d_lck(rtwdev);
+ rtw8723x_lck(rtwdev);
rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50);
rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x20);
@@ -204,67 +153,6 @@ static void rtw8723d_phy_set_param(struct rtw_dev *rtwdev)
rtw8723d_pwrtrack_init(rtwdev);
}
-static void rtw8723de_efuse_parsing(struct rtw_efuse *efuse,
- struct rtw8723d_efuse *map)
-{
- ether_addr_copy(efuse->addr, map->e.mac_addr);
-}
-
-static void rtw8723du_efuse_parsing(struct rtw_efuse *efuse,
- struct rtw8723d_efuse *map)
-{
- ether_addr_copy(efuse->addr, map->u.mac_addr);
-}
-
-static void rtw8723ds_efuse_parsing(struct rtw_efuse *efuse,
- struct rtw8723d_efuse *map)
-{
- ether_addr_copy(efuse->addr, map->s.mac_addr);
-}
-
-static int rtw8723d_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
-{
- struct rtw_efuse *efuse = &rtwdev->efuse;
- struct rtw8723d_efuse *map;
- int i;
-
- map = (struct rtw8723d_efuse *)log_map;
-
- efuse->rfe_option = 0;
- efuse->rf_board_option = map->rf_board_option;
- efuse->crystal_cap = map->xtal_k;
- efuse->pa_type_2g = map->pa_type;
- efuse->lna_type_2g = map->lna_type_2g[0];
- efuse->channel_plan = map->channel_plan;
- efuse->country_code[0] = map->country_code[0];
- efuse->country_code[1] = map->country_code[1];
- efuse->bt_setting = map->rf_bt_setting;
- efuse->regd = map->rf_board_option & 0x7;
- efuse->thermal_meter[0] = map->thermal_meter;
- efuse->thermal_meter_k = map->thermal_meter;
- efuse->afe = map->afe;
-
- for (i = 0; i < 4; i++)
- efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
-
- switch (rtw_hci_type(rtwdev)) {
- case RTW_HCI_TYPE_PCIE:
- rtw8723de_efuse_parsing(efuse, map);
- break;
- case RTW_HCI_TYPE_USB:
- rtw8723du_efuse_parsing(efuse, map);
- break;
- case RTW_HCI_TYPE_SDIO:
- rtw8723ds_efuse_parsing(efuse, map);
- break;
- default:
- /* unsupported now */
- return -ENOTSUPP;
- }
-
- return 0;
-}
-
static void query_phy_status_page0(struct rtw_dev *rtwdev, u8 *phy_status,
struct rtw_rx_pkt_stat *pkt_stat)
{
@@ -540,297 +428,11 @@ static void rtw8723d_set_channel(struct rtw_dev *rtwdev, u8 channel, u8 bw,
rtw8723d_set_channel_bb(rtwdev, channel, bw, primary_chan_idx);
}
-#define BIT_CFENDFORM BIT(9)
-#define BIT_WMAC_TCR_ERR0 BIT(12)
-#define BIT_WMAC_TCR_ERR1 BIT(13)
-#define BIT_TCR_CFG (BIT_CFENDFORM | BIT_WMAC_TCR_ERR0 | \
- BIT_WMAC_TCR_ERR1)
-#define WLAN_RX_FILTER0 0xFFFF
-#define WLAN_RX_FILTER1 0x400
-#define WLAN_RX_FILTER2 0xFFFF
-#define WLAN_RCR_CFG 0x700060CE
-
-static int rtw8723d_mac_init(struct rtw_dev *rtwdev)
-{
- rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
- rtw_write32(rtwdev, REG_TCR, BIT_TCR_CFG);
-
- rtw_write16(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
- rtw_write16(rtwdev, REG_RXFLTMAP1, WLAN_RX_FILTER1);
- rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
- rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
-
- rtw_write32(rtwdev, REG_INT_MIG, 0);
- rtw_write32(rtwdev, REG_MCUTST_1, 0x0);
-
- rtw_write8(rtwdev, REG_MISC_CTRL, BIT_DIS_SECOND_CCA);
- rtw_write8(rtwdev, REG_2ND_CCA_CTRL, 0);
-
- return 0;
-}
-
static void rtw8723d_shutdown(struct rtw_dev *rtwdev)
{
rtw_write16_set(rtwdev, REG_HCI_OPT_CTRL, BIT_USB_SUS_DIS);
}
-static void rtw8723d_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
-{
- u8 ldo_pwr;
-
- ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3);
- if (enable) {
- ldo_pwr &= ~BIT_MASK_LDO25_VOLTAGE;
- ldo_pwr |= (BIT_LDO25_VOLTAGE_V25 << 4) | BIT_LDO25_EN;
- } else {
- ldo_pwr &= ~BIT_LDO25_EN;
- }
- rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
-}
-
-static void
-rtw8723d_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
-{
- struct rtw_hal *hal = &rtwdev->hal;
- const struct rtw_hw_reg *txagc;
- u8 rate, pwr_index;
- int j;
-
- for (j = 0; j < rtw_rate_size[rs]; j++) {
- rate = rtw_rate_section[rs][j];
- pwr_index = hal->tx_pwr_tbl[path][rate];
-
- if (rate >= ARRAY_SIZE(rtw8723d_txagc)) {
- rtw_warn(rtwdev, "rate 0x%x isn't supported\n", rate);
- continue;
- }
- txagc = &rtw8723d_txagc[rate];
- if (!txagc->addr) {
- rtw_warn(rtwdev, "rate 0x%x isn't defined\n", rate);
- continue;
- }
-
- rtw_write32_mask(rtwdev, txagc->addr, txagc->mask, pwr_index);
- }
-}
-
-static void rtw8723d_set_tx_power_index(struct rtw_dev *rtwdev)
-{
- struct rtw_hal *hal = &rtwdev->hal;
- int rs, path;
-
- for (path = 0; path < hal->rf_path_num; path++) {
- for (rs = 0; rs <= RTW_RATE_SECTION_HT_1S; rs++)
- rtw8723d_set_tx_power_index_by_rate(rtwdev, path, rs);
- }
-}
-
-static void rtw8723d_efuse_grant(struct rtw_dev *rtwdev, bool on)
-{
- if (on) {
- rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
-
- rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_ELDR);
- rtw_write16_set(rtwdev, REG_SYS_CLKR, BIT_LOADER_CLK_EN | BIT_ANA8M);
- } else {
- rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
- }
-}
-
-static void rtw8723d_false_alarm_statistics(struct rtw_dev *rtwdev)
-{
- struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- u32 cck_fa_cnt;
- u32 ofdm_fa_cnt;
- u32 crc32_cnt;
- u32 val32;
-
- /* hold counter */
- rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 1);
- rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 1);
- rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KEEP, 1);
- rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KEEP, 1);
-
- cck_fa_cnt = rtw_read32_mask(rtwdev, REG_CCK_FA_LSB_11N, MASKBYTE0);
- cck_fa_cnt += rtw_read32_mask(rtwdev, REG_CCK_FA_MSB_11N, MASKBYTE3) << 8;
-
- val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE1_11N);
- ofdm_fa_cnt = u32_get_bits(val32, BIT_MASK_OFDM_FF_CNT);
- ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_SF_CNT);
- val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE2_11N);
- dm_info->ofdm_cca_cnt = u32_get_bits(val32, BIT_MASK_OFDM_CCA_CNT);
- ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_PF_CNT);
- val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE3_11N);
- ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_RI_CNT);
- ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_CRC_CNT);
- val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE4_11N);
- ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_MNS_CNT);
-
- dm_info->cck_fa_cnt = cck_fa_cnt;
- dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
- dm_info->total_fa_cnt = cck_fa_cnt + ofdm_fa_cnt;
-
- dm_info->cck_err_cnt = rtw_read32(rtwdev, REG_IGI_C_11N);
- dm_info->cck_ok_cnt = rtw_read32(rtwdev, REG_IGI_D_11N);
- crc32_cnt = rtw_read32(rtwdev, REG_OFDM_CRC32_CNT_11N);
- dm_info->ofdm_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_ERR);
- dm_info->ofdm_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_OK);
- crc32_cnt = rtw_read32(rtwdev, REG_HT_CRC32_CNT_11N);
- dm_info->ht_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_ERR);
- dm_info->ht_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_OK);
- dm_info->vht_err_cnt = 0;
- dm_info->vht_ok_cnt = 0;
-
- val32 = rtw_read32(rtwdev, REG_CCK_CCA_CNT_11N);
- dm_info->cck_cca_cnt = (u32_get_bits(val32, BIT_MASK_CCK_FA_MSB) << 8) |
- u32_get_bits(val32, BIT_MASK_CCK_FA_LSB);
- dm_info->total_cca_cnt = dm_info->cck_cca_cnt + dm_info->ofdm_cca_cnt;
-
- /* reset counter */
- rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 1);
- rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 0);
- rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 1);
- rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 0);
- rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 0);
- rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 0);
- rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 0);
- rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 2);
- rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 0);
- rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 2);
- rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 1);
- rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 0);
-}
-
-static const u32 iqk_adda_regs[] = {
- 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
- 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, 0xeec
-};
-
-static const u32 iqk_mac8_regs[] = {0x522, 0x550, 0x551};
-static const u32 iqk_mac32_regs[] = {0x40};
-
-static const u32 iqk_bb_regs[] = {
- 0xc04, 0xc08, 0x874, 0xb68, 0xb6c, 0x870, 0x860, 0x864, 0xa04
-};
-
-#define IQK_ADDA_REG_NUM ARRAY_SIZE(iqk_adda_regs)
-#define IQK_MAC8_REG_NUM ARRAY_SIZE(iqk_mac8_regs)
-#define IQK_MAC32_REG_NUM ARRAY_SIZE(iqk_mac32_regs)
-#define IQK_BB_REG_NUM ARRAY_SIZE(iqk_bb_regs)
-
-struct iqk_backup_regs {
- u32 adda[IQK_ADDA_REG_NUM];
- u8 mac8[IQK_MAC8_REG_NUM];
- u32 mac32[IQK_MAC32_REG_NUM];
- u32 bb[IQK_BB_REG_NUM];
-
- u32 lte_path;
- u32 lte_gnt;
-
- u32 bb_sel_btg;
- u8 btg_sel;
-
- u8 igia;
- u8 igib;
-};
-
-static void rtw8723d_iqk_backup_regs(struct rtw_dev *rtwdev,
- struct iqk_backup_regs *backup)
-{
- int i;
-
- for (i = 0; i < IQK_ADDA_REG_NUM; i++)
- backup->adda[i] = rtw_read32(rtwdev, iqk_adda_regs[i]);
-
- for (i = 0; i < IQK_MAC8_REG_NUM; i++)
- backup->mac8[i] = rtw_read8(rtwdev, iqk_mac8_regs[i]);
- for (i = 0; i < IQK_MAC32_REG_NUM; i++)
- backup->mac32[i] = rtw_read32(rtwdev, iqk_mac32_regs[i]);
-
- for (i = 0; i < IQK_BB_REG_NUM; i++)
- backup->bb[i] = rtw_read32(rtwdev, iqk_bb_regs[i]);
-
- backup->igia = rtw_read32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0);
- backup->igib = rtw_read32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0);
-
- backup->bb_sel_btg = rtw_read32(rtwdev, REG_BB_SEL_BTG);
-}
-
-static void rtw8723d_iqk_restore_regs(struct rtw_dev *rtwdev,
- const struct iqk_backup_regs *backup)
-{
- int i;
-
- for (i = 0; i < IQK_ADDA_REG_NUM; i++)
- rtw_write32(rtwdev, iqk_adda_regs[i], backup->adda[i]);
-
- for (i = 0; i < IQK_MAC8_REG_NUM; i++)
- rtw_write8(rtwdev, iqk_mac8_regs[i], backup->mac8[i]);
- for (i = 0; i < IQK_MAC32_REG_NUM; i++)
- rtw_write32(rtwdev, iqk_mac32_regs[i], backup->mac32[i]);
-
- for (i = 0; i < IQK_BB_REG_NUM; i++)
- rtw_write32(rtwdev, iqk_bb_regs[i], backup->bb[i]);
-
- rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50);
- rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, backup->igia);
-
- rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, 0x50);
- rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, backup->igib);
-
- rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x01008c00);
- rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x01008c00);
-}
-
-static void rtw8723d_iqk_backup_path_ctrl(struct rtw_dev *rtwdev,
- struct iqk_backup_regs *backup)
-{
- backup->btg_sel = rtw_read8(rtwdev, REG_BTG_SEL);
- rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] original 0x67 = 0x%x\n",
- backup->btg_sel);
-}
-
-static void rtw8723d_iqk_config_path_ctrl(struct rtw_dev *rtwdev)
-{
- rtw_write32_mask(rtwdev, REG_PAD_CTRL1, BIT_BT_BTG_SEL, 0x1);
- rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] set 0x67 = 0x%x\n",
- rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
-}
-
-static void rtw8723d_iqk_restore_path_ctrl(struct rtw_dev *rtwdev,
- const struct iqk_backup_regs *backup)
-{
- rtw_write8(rtwdev, REG_BTG_SEL, backup->btg_sel);
- rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] restore 0x67 = 0x%x\n",
- rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
-}
-
-static void rtw8723d_iqk_backup_lte_path_gnt(struct rtw_dev *rtwdev,
- struct iqk_backup_regs *backup)
-{
- backup->lte_path = rtw_read32(rtwdev, REG_LTECOEX_PATH_CONTROL);
- rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0038);
- mdelay(1);
- backup->lte_gnt = rtw_read32(rtwdev, REG_LTECOEX_READ_DATA);
- rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] OriginalGNT = 0x%x\n",
- backup->lte_gnt);
-}
-
-static void rtw8723d_iqk_config_lte_path_gnt(struct rtw_dev *rtwdev)
-{
- rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, 0x0000ff00);
- rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc0020038);
- rtw_write32_mask(rtwdev, REG_LTECOEX_PATH_CONTROL, BIT_LTE_MUX_CTRL_PATH, 0x1);
-}
-
-static void rtw8723d_iqk_restore_lte_path_gnt(struct rtw_dev *rtwdev,
- const struct iqk_backup_regs *bak)
-{
- rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, bak->lte_gnt);
- rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc00f0038);
- rtw_write32(rtwdev, REG_LTECOEX_PATH_CONTROL, bak->lte_path);
-}
-
struct rtw_8723d_iqk_cfg {
const char *name;
u32 val_bb_sel_btg;
@@ -930,6 +532,8 @@ static u8 rtw8723d_iqk_check_rx_failed(struct rtw_dev *rtwdev,
return 0;
}
+#define IQK_LTE_WRITE_VAL_8723D 0x0000ff00
+
static void rtw8723d_iqk_one_shot(struct rtw_dev *rtwdev, bool tx,
const struct rtw_8723d_iqk_cfg *iqk_cfg)
{
@@ -937,7 +541,7 @@ static void rtw8723d_iqk_one_shot(struct rtw_dev *rtwdev, bool tx,
/* enter IQK mode */
rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK);
- rtw8723d_iqk_config_lte_path_gnt(rtwdev);
+ rtw8723x_iqk_config_lte_path_gnt(rtwdev, IQK_LTE_WRITE_VAL_8723D);
rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0054);
mdelay(1);
@@ -959,9 +563,9 @@ static void rtw8723d_iqk_one_shot(struct rtw_dev *rtwdev, bool tx,
static void rtw8723d_iqk_txrx_path_post(struct rtw_dev *rtwdev,
const struct rtw_8723d_iqk_cfg *iqk_cfg,
- const struct iqk_backup_regs *backup)
+ const struct rtw8723x_iqk_backup_regs *backup)
{
- rtw8723d_iqk_restore_lte_path_gnt(rtwdev, backup);
+ rtw8723x_iqk_restore_lte_path_gnt(rtwdev, backup);
rtw_write32(rtwdev, REG_BB_SEL_BTG, backup->bb_sel_btg);
/* leave IQK mode */
@@ -974,7 +578,7 @@ static void rtw8723d_iqk_txrx_path_post(struct rtw_dev *rtwdev,
static u8 rtw8723d_iqk_tx_path(struct rtw_dev *rtwdev,
const struct rtw_8723d_iqk_cfg *iqk_cfg,
- const struct iqk_backup_regs *backup)
+ const struct rtw8723x_iqk_backup_regs *backup)
{
u8 status;
@@ -1033,7 +637,7 @@ static u8 rtw8723d_iqk_tx_path(struct rtw_dev *rtwdev,
static u8 rtw8723d_iqk_rx_path(struct rtw_dev *rtwdev,
const struct rtw_8723d_iqk_cfg *iqk_cfg,
- const struct iqk_backup_regs *backup)
+ const struct rtw8723x_iqk_backup_regs *backup)
{
u32 tx_x, tx_y;
u8 status;
@@ -1220,14 +824,6 @@ void rtw8723d_iqk_fill_s0_matrix(struct rtw_dev *rtwdev, const s32 result[])
result[IQK_S0_RX_Y]);
}
-static void rtw8723d_iqk_path_adda_on(struct rtw_dev *rtwdev)
-{
- int i;
-
- for (i = 0; i < IQK_ADDA_REG_NUM; i++)
- rtw_write32(rtwdev, iqk_adda_regs[i], 0x03c00016);
-}
-
static void rtw8723d_iqk_config_mac(struct rtw_dev *rtwdev)
{
rtw_write8(rtwdev, REG_TXPAUSE, 0xff);
@@ -1245,70 +841,14 @@ void rtw8723d_iqk_rf_standby(struct rtw_dev *rtwdev, enum rtw_rf_path path)
rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK);
}
-static
-bool rtw8723d_iqk_similarity_cmp(struct rtw_dev *rtwdev, s32 result[][IQK_NR],
- u8 c1, u8 c2)
-{
- u32 i, j, diff;
- u32 bitmap = 0;
- u8 candidate[PATH_NR] = {IQK_ROUND_INVALID, IQK_ROUND_INVALID};
- bool ret = true;
-
- s32 tmp1, tmp2;
-
- for (i = 0; i < IQK_NR; i++) {
- tmp1 = iqkxy_to_s32(result[c1][i]);
- tmp2 = iqkxy_to_s32(result[c2][i]);
-
- diff = abs(tmp1 - tmp2);
-
- if (diff <= MAX_TOLERANCE)
- continue;
-
- if ((i == IQK_S1_RX_X || i == IQK_S0_RX_X) && !bitmap) {
- if (result[c1][i] + result[c1][i + 1] == 0)
- candidate[i / IQK_SX_NR] = c2;
- else if (result[c2][i] + result[c2][i + 1] == 0)
- candidate[i / IQK_SX_NR] = c1;
- else
- bitmap |= BIT(i);
- } else {
- bitmap |= BIT(i);
- }
- }
-
- if (bitmap != 0)
- goto check_sim;
-
- for (i = 0; i < PATH_NR; i++) {
- if (candidate[i] == IQK_ROUND_INVALID)
- continue;
-
- for (j = i * IQK_SX_NR; j < i * IQK_SX_NR + 2; j++)
- result[IQK_ROUND_HYBRID][j] = result[candidate[i]][j];
- ret = false;
- }
-
- return ret;
-
-check_sim:
- for (i = 0; i < IQK_NR; i++) {
- j = i & ~1; /* 2 bits are a pair for IQ[X, Y] */
- if (bitmap & GENMASK(j + 1, j))
- continue;
-
- result[IQK_ROUND_HYBRID][i] = result[c1][i];
- }
-
- return false;
-}
+#define ADDA_ON_VAL_8723D 0x03c00016
static
-void rtw8723d_iqk_precfg_path(struct rtw_dev *rtwdev, enum rtw8723d_path path)
+void rtw8723d_iqk_precfg_path(struct rtw_dev *rtwdev, enum rtw8723x_path path)
{
if (path == PATH_S0) {
rtw8723d_iqk_rf_standby(rtwdev, RF_PATH_A);
- rtw8723d_iqk_path_adda_on(rtwdev);
+ rtw8723x_iqk_path_adda_on(rtwdev, ADDA_ON_VAL_8723D);
}
rtw_write32_mask(rtwdev, REG_FPGA0_IQK_11N, BIT_MASK_IQK_MOD, EN_IQK);
@@ -1317,13 +857,13 @@ void rtw8723d_iqk_precfg_path(struct rtw_dev *rtwdev, enum rtw8723d_path path)
if (path == PATH_S1) {
rtw8723d_iqk_rf_standby(rtwdev, RF_PATH_B);
- rtw8723d_iqk_path_adda_on(rtwdev);
+ rtw8723x_iqk_path_adda_on(rtwdev, ADDA_ON_VAL_8723D);
}
}
static
void rtw8723d_iqk_one_round(struct rtw_dev *rtwdev, s32 result[][IQK_NR], u8 t,
- const struct iqk_backup_regs *backup)
+ const struct rtw8723x_iqk_backup_regs *backup)
{
u32 i;
u8 s1_ok, s0_ok;
@@ -1331,7 +871,7 @@ void rtw8723d_iqk_one_round(struct rtw_dev *rtwdev, s32 result[][IQK_NR], u8 t,
rtw_dbg(rtwdev, RTW_DBG_RFK,
"[IQK] IQ Calibration for 1T1R_S0/S1 for %d times\n", t);
- rtw8723d_iqk_path_adda_on(rtwdev);
+ rtw8723x_iqk_path_adda_on(rtwdev, ADDA_ON_VAL_8723D);
rtw8723d_iqk_config_mac(rtwdev);
rtw_write32_mask(rtwdev, REG_CCK_ANT_SEL_11N, 0x0f000000, 0xf);
rtw_write32(rtwdev, REG_BB_RX_PATH_11N, 0x03a05611);
@@ -1427,7 +967,7 @@ static void rtw8723d_phy_calibration(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
s32 result[IQK_ROUND_SIZE][IQK_NR];
- struct iqk_backup_regs backup;
+ struct rtw8723x_iqk_backup_regs backup;
u8 i, j;
u8 final_candidate = IQK_ROUND_INVALID;
bool good;
@@ -1436,23 +976,23 @@ static void rtw8723d_phy_calibration(struct rtw_dev *rtwdev)
memset(result, 0, sizeof(result));
- rtw8723d_iqk_backup_path_ctrl(rtwdev, &backup);
- rtw8723d_iqk_backup_lte_path_gnt(rtwdev, &backup);
- rtw8723d_iqk_backup_regs(rtwdev, &backup);
+ rtw8723x_iqk_backup_path_ctrl(rtwdev, &backup);
+ rtw8723x_iqk_backup_lte_path_gnt(rtwdev, &backup);
+ rtw8723x_iqk_backup_regs(rtwdev, &backup);
for (i = IQK_ROUND_0; i <= IQK_ROUND_2; i++) {
- rtw8723d_iqk_config_path_ctrl(rtwdev);
- rtw8723d_iqk_config_lte_path_gnt(rtwdev);
+ rtw8723x_iqk_config_path_ctrl(rtwdev);
+ rtw8723x_iqk_config_lte_path_gnt(rtwdev, IQK_LTE_WRITE_VAL_8723D);
rtw8723d_iqk_one_round(rtwdev, result, i, &backup);
if (i > IQK_ROUND_0)
- rtw8723d_iqk_restore_regs(rtwdev, &backup);
- rtw8723d_iqk_restore_lte_path_gnt(rtwdev, &backup);
- rtw8723d_iqk_restore_path_ctrl(rtwdev, &backup);
+ rtw8723x_iqk_restore_regs(rtwdev, &backup);
+ rtw8723x_iqk_restore_lte_path_gnt(rtwdev, &backup);
+ rtw8723x_iqk_restore_path_ctrl(rtwdev, &backup);
for (j = IQK_ROUND_0; j < i; j++) {
- good = rtw8723d_iqk_similarity_cmp(rtwdev, result, j, i);
+ good = rtw8723x_iqk_similarity_cmp(rtwdev, result, j, i);
if (good) {
final_candidate = j;
@@ -1546,26 +1086,6 @@ static void rtw8723d_phy_cck_pd_set(struct rtw_dev *rtwdev, u8 new_lvl)
}
/* for coex */
-static void rtw8723d_coex_cfg_init(struct rtw_dev *rtwdev)
-{
- /* enable TBTT nterrupt */
- rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
-
- /* BT report packet sample rate */
- /* 0x790[5:0]=0x5 */
- rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, BIT_MASK_SAMPLE_RATE, 0x5);
-
- /* enable BT counter statistics */
- rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1);
-
- /* enable PTA (3-wire function form BT side) */
- rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN);
- rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_PO_BT_PTA_PINS);
-
- /* enable PTA (tx/rx signal form WiFi side) */
- rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN);
-}
-
static void rtw8723d_coex_cfg_gnt_fix(struct rtw_dev *rtwdev)
{
}
@@ -1671,39 +1191,6 @@ static void rtw8723d_coex_cfg_wl_rx_gain(struct rtw_dev *rtwdev, bool low_gain)
}
}
-static u8 rtw8723d_pwrtrack_get_limit_ofdm(struct rtw_dev *rtwdev)
-{
- struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- u8 tx_rate = dm_info->tx_rate;
- u8 limit_ofdm = 30;
-
- switch (tx_rate) {
- case DESC_RATE1M...DESC_RATE5_5M:
- case DESC_RATE11M:
- break;
- case DESC_RATE6M...DESC_RATE48M:
- limit_ofdm = 36;
- break;
- case DESC_RATE54M:
- limit_ofdm = 34;
- break;
- case DESC_RATEMCS0...DESC_RATEMCS2:
- limit_ofdm = 38;
- break;
- case DESC_RATEMCS3...DESC_RATEMCS4:
- limit_ofdm = 36;
- break;
- case DESC_RATEMCS5...DESC_RATEMCS7:
- limit_ofdm = 34;
- break;
- default:
- rtw_warn(rtwdev, "pwrtrack unhandled tx_rate 0x%x\n", tx_rate);
- break;
- }
-
- return limit_ofdm;
-}
-
static void rtw8723d_set_iqk_matrix_by_result(struct rtw_dev *rtwdev,
u32 ofdm_swing, u8 rf_path)
{
@@ -1845,7 +1332,7 @@ static void rtw8723d_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
s8 final_ofdm_swing_index;
s8 final_cck_swing_index;
- limit_ofdm = rtw8723d_pwrtrack_get_limit_ofdm(rtwdev);
+ limit_ofdm = rtw8723x_pwrtrack_get_limit_ofdm(rtwdev);
final_ofdm_swing_index = RTW_DEF_OFDM_SWING_INDEX +
dm_info->delta_power_index[path];
@@ -1873,26 +1360,6 @@ static void rtw8723d_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
}
-static void rtw8723d_pwrtrack_set_xtal(struct rtw_dev *rtwdev, u8 therm_path,
- u8 delta)
-{
- struct rtw_dm_info *dm_info = &rtwdev->dm_info;
- const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl;
- const s8 *pwrtrk_xtal;
- s8 xtal_cap;
-
- if (dm_info->thermal_avg[therm_path] >
- rtwdev->efuse.thermal_meter[therm_path])
- pwrtrk_xtal = tbl->pwrtrk_xtal_p;
- else
- pwrtrk_xtal = tbl->pwrtrk_xtal_n;
-
- xtal_cap = rtwdev->efuse.crystal_cap & 0x3F;
- xtal_cap = clamp_t(s8, xtal_cap + pwrtrk_xtal[delta], 0, 0x3F);
- rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL,
- xtal_cap | (xtal_cap << 6));
-}
-
static void rtw8723d_phy_pwrtrack(struct rtw_dev *rtwdev)
{
struct rtw_dm_info *dm_info = &rtwdev->dm_info;
@@ -1912,7 +1379,7 @@ static void rtw8723d_phy_pwrtrack(struct rtw_dev *rtwdev)
do_iqk = rtw_phy_pwrtrack_need_iqk(rtwdev);
if (do_iqk)
- rtw8723d_lck(rtwdev);
+ rtw8723x_lck(rtwdev);
if (dm_info->pwr_trk_init_trigger)
dm_info->pwr_trk_init_trigger = false;
@@ -1937,7 +1404,7 @@ static void rtw8723d_phy_pwrtrack(struct rtw_dev *rtwdev)
rtw8723d_pwrtrack_set(rtwdev, path);
}
- rtw8723d_pwrtrack_set_xtal(rtwdev, RF_PATH_A, delta);
+ rtw8723x_pwrtrack_set_xtal(rtwdev, RF_PATH_A, delta);
iqk:
if (do_iqk)
@@ -1963,49 +1430,29 @@ static void rtw8723d_pwr_track(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
}
-static void rtw8723d_fill_txdesc_checksum(struct rtw_dev *rtwdev,
- struct rtw_tx_pkt_info *pkt_info,
- u8 *txdesc)
-{
- size_t words = 32 / 2; /* calculate the first 32 bytes (16 words) */
- __le16 chksum = 0;
- __le16 *data = (__le16 *)(txdesc);
- struct rtw_tx_desc *tx_desc = (struct rtw_tx_desc *)txdesc;
-
- le32p_replace_bits(&tx_desc->w7, 0, RTW_TX_DESC_W7_TXDESC_CHECKSUM);
-
- while (words--)
- chksum ^= *data++;
-
- chksum = ~chksum;
-
- le32p_replace_bits(&tx_desc->w7, __le16_to_cpu(chksum),
- RTW_TX_DESC_W7_TXDESC_CHECKSUM);
-}
-
static struct rtw_chip_ops rtw8723d_ops = {
.phy_set_param = rtw8723d_phy_set_param,
- .read_efuse = rtw8723d_read_efuse,
+ .read_efuse = rtw8723x_read_efuse,
.query_rx_desc = rtw8723d_query_rx_desc,
.set_channel = rtw8723d_set_channel,
- .mac_init = rtw8723d_mac_init,
+ .mac_init = rtw8723x_mac_init,
.shutdown = rtw8723d_shutdown,
.read_rf = rtw_phy_read_rf_sipi,
.write_rf = rtw_phy_write_rf_reg_sipi,
- .set_tx_power_index = rtw8723d_set_tx_power_index,
+ .set_tx_power_index = rtw8723x_set_tx_power_index,
.set_antenna = NULL,
- .cfg_ldo25 = rtw8723d_cfg_ldo25,
- .efuse_grant = rtw8723d_efuse_grant,
- .false_alarm_statistics = rtw8723d_false_alarm_statistics,
+ .cfg_ldo25 = rtw8723x_cfg_ldo25,
+ .efuse_grant = rtw8723x_efuse_grant,
+ .false_alarm_statistics = rtw8723x_false_alarm_statistics,
.phy_calibration = rtw8723d_phy_calibration,
.cck_pd_set = rtw8723d_phy_cck_pd_set,
.pwr_track = rtw8723d_pwr_track,
.config_bfee = NULL,
.set_gid_table = NULL,
.cfg_csi_rate = NULL,
- .fill_txdesc_checksum = rtw8723d_fill_txdesc_checksum,
+ .fill_txdesc_checksum = rtw8723x_fill_txdesc_checksum,
- .coex_set_init = rtw8723d_coex_cfg_init,
+ .coex_set_init = rtw8723x_coex_cfg_init,
.coex_set_ant_switch = NULL,
.coex_set_gnt_fix = rtw8723d_coex_cfg_gnt_fix,
.coex_set_gnt_debug = rtw8723d_coex_cfg_gnt_debug,
@@ -2592,22 +2039,6 @@ static const struct rtw_rqpn rqpn_table_8723d[] = {
RTW_DMA_MAPPING_EXTRA, RTW_DMA_MAPPING_HIGH},
};
-static const struct rtw_prioq_addrs prioq_addrs_8723d = {
- .prio[RTW_DMA_MAPPING_EXTRA] = {
- .rsvd = REG_RQPN_NPQ + 2, .avail = REG_RQPN_NPQ + 3,
- },
- .prio[RTW_DMA_MAPPING_LOW] = {
- .rsvd = REG_RQPN + 1, .avail = REG_FIFOPAGE_CTRL_2 + 1,
- },
- .prio[RTW_DMA_MAPPING_NORMAL] = {
- .rsvd = REG_RQPN_NPQ, .avail = REG_RQPN_NPQ + 1,
- },
- .prio[RTW_DMA_MAPPING_HIGH] = {
- .rsvd = REG_RQPN, .avail = REG_FIFOPAGE_CTRL_2,
- },
- .wsize = false,
-};
-
static const struct rtw_intf_phy_para pcie_gen1_param_8723d[] = {
{0x0008, 0x4a22,
RTW_IP_SEL_PHY,
@@ -2628,28 +2059,6 @@ static const struct rtw_intf_phy_para_table phy_para_table_8723d = {
.n_gen1_para = ARRAY_SIZE(pcie_gen1_param_8723d),
};
-static const struct rtw_hw_reg rtw8723d_dig[] = {
- [0] = { .addr = 0xc50, .mask = 0x7f },
- [1] = { .addr = 0xc50, .mask = 0x7f },
-};
-
-static const struct rtw_hw_reg rtw8723d_dig_cck[] = {
- [0] = { .addr = 0xa0c, .mask = 0x3f00 },
-};
-
-static const struct rtw_rf_sipi_addr rtw8723d_rf_sipi_addr[] = {
- [RF_PATH_A] = { .hssi_1 = 0x820, .lssi_read = 0x8a0,
- .hssi_2 = 0x824, .lssi_read_pi = 0x8b8},
- [RF_PATH_B] = { .hssi_1 = 0x828, .lssi_read = 0x8a4,
- .hssi_2 = 0x82c, .lssi_read_pi = 0x8bc},
-};
-
-static const struct rtw_ltecoex_addr rtw8723d_ltecoex_addr = {
- .ctrl = REG_LTECOEX_CTRL,
- .wdata = REG_LTECOEX_WRITE_DATA,
- .rdata = REG_LTECOEX_READ_DATA,
-};
-
static const struct rtw_rfe_def rtw8723d_rfe_defs[] = {
[0] = { .phy_pg_tbl = &rtw8723d_bb_pg_tbl,
.txpwr_lmt_tbl = &rtw8723d_txpwr_lmt_tbl,},
@@ -2762,6 +2171,7 @@ const struct rtw_chip_info rtw8723d_hw_spec = {
.band = RTW_BAND_2G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x20,
+ .usb_tx_agg_desc_num = 1,
.ht_supported = true,
.vht_supported = false,
.lps_deep_mode_supported = 0,
@@ -2770,14 +2180,14 @@ const struct rtw_chip_info rtw8723d_hw_spec = {
.pwr_off_seq = card_disable_flow_8723d,
.page_table = page_table_8723d,
.rqpn_table = rqpn_table_8723d,
- .prioq_addrs = &prioq_addrs_8723d,
+ .prioq_addrs = &rtw8723x_common.prioq_addrs,
.intf_table = &phy_para_table_8723d,
- .dig = rtw8723d_dig,
- .dig_cck = rtw8723d_dig_cck,
+ .dig = rtw8723x_common.dig,
+ .dig_cck = rtw8723x_common.dig_cck,
.rf_sipi_addr = {0x840, 0x844},
- .rf_sipi_read_addr = rtw8723d_rf_sipi_addr,
+ .rf_sipi_read_addr = rtw8723x_common.rf_sipi_addr,
.fix_rf_phy_num = 2,
- .ltecoex_addr = &rtw8723d_ltecoex_addr,
+ .ltecoex_addr = &rtw8723x_common.ltecoex_addr,
.mac_tbl = &rtw8723d_mac_tbl,
.agc_tbl = &rtw8723d_agc_tbl,
.bb_tbl = &rtw8723d_bb_tbl,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
index 2434e2480cbe..fba06c9f480e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
@@ -5,90 +5,7 @@
#ifndef __RTW8723D_H__
#define __RTW8723D_H__
-enum rtw8723d_path {
- PATH_S1,
- PATH_S0,
- PATH_NR,
-};
-
-enum rtw8723d_iqk_round {
- IQK_ROUND_0,
- IQK_ROUND_1,
- IQK_ROUND_2,
- IQK_ROUND_HYBRID,
- IQK_ROUND_SIZE,
- IQK_ROUND_INVALID = 0xff,
-};
-
-enum rtw8723d_iqk_result {
- IQK_S1_TX_X,
- IQK_S1_TX_Y,
- IQK_S1_RX_X,
- IQK_S1_RX_Y,
- IQK_S0_TX_X,
- IQK_S0_TX_Y,
- IQK_S0_RX_X,
- IQK_S0_RX_Y,
- IQK_NR,
- IQK_SX_NR = IQK_NR / PATH_NR,
-};
-
-struct rtw8723de_efuse {
- u8 mac_addr[ETH_ALEN]; /* 0xd0 */
- u8 vender_id[2];
- u8 device_id[2];
- u8 sub_vender_id[2];
- u8 sub_device_id[2];
-};
-
-struct rtw8723du_efuse {
- u8 res4[48]; /* 0xd0 */
- u8 vender_id[2]; /* 0x100 */
- u8 product_id[2]; /* 0x102 */
- u8 usb_option; /* 0x104 */
- u8 res5[2]; /* 0x105 */
- u8 mac_addr[ETH_ALEN]; /* 0x107 */
-};
-
-struct rtw8723ds_efuse {
- u8 res4[0x4a]; /* 0xd0 */
- u8 mac_addr[ETH_ALEN]; /* 0x11a */
-};
-
-struct rtw8723d_efuse {
- __le16 rtl_id;
- u8 rsvd[2];
- u8 afe;
- u8 rsvd1[11];
-
- /* power index for four RF paths */
- struct rtw_txpwr_idx txpwr_idx_table[4];
-
- u8 channel_plan; /* 0xb8 */
- u8 xtal_k;
- u8 thermal_meter;
- u8 iqk_lck;
- u8 pa_type; /* 0xbc */
- u8 lna_type_2g[2]; /* 0xbd */
- u8 lna_type_5g[2];
- u8 rf_board_option;
- u8 rf_feature_option;
- u8 rf_bt_setting;
- u8 eeprom_version;
- u8 eeprom_customer_id;
- u8 tx_bb_swing_setting_2g;
- u8 res_c7;
- u8 tx_pwr_calibrate_rate;
- u8 rf_antenna_option; /* 0xc9 */
- u8 rfe_option;
- u8 country_code[2];
- u8 res[3];
- union {
- struct rtw8723de_efuse e;
- struct rtw8723du_efuse u;
- struct rtw8723ds_efuse s;
- };
-};
+#include "rtw8723x.h"
extern const struct rtw_chip_info rtw8723d_hw_spec;
@@ -114,193 +31,9 @@ extern const struct rtw_chip_info rtw8723d_hw_spec;
#define GET_PHY_STAT_P1_RXSNR_A(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(7, 0))
-static inline s32 iqkxy_to_s32(s32 val)
-{
- /* val is Q10.8 */
- return sign_extend32(val, 9);
-}
-
-static inline s32 iqk_mult(s32 x, s32 y, s32 *ext)
-{
- /* x, y and return value are Q10.8 */
- s32 t;
-
- t = x * y;
- if (ext)
- *ext = (t >> 7) & 0x1; /* Q.16 --> Q.9; get LSB of Q.9 */
-
- return (t >> 8); /* Q.16 --> Q.8 */
-}
-
-#define OFDM_SWING_A(swing) FIELD_GET(GENMASK(9, 0), swing)
-#define OFDM_SWING_B(swing) FIELD_GET(GENMASK(15, 10), swing)
-#define OFDM_SWING_C(swing) FIELD_GET(GENMASK(21, 16), swing)
-#define OFDM_SWING_D(swing) FIELD_GET(GENMASK(31, 22), swing)
#define RTW_DEF_OFDM_SWING_INDEX 28
#define RTW_DEF_CCK_SWING_INDEX 28
-#define MAX_TOLERANCE 5
-#define IQK_TX_X_ERR 0x142
-#define IQK_TX_Y_ERR 0x42
-#define IQK_RX_X_UPPER 0x11a
-#define IQK_RX_X_LOWER 0xe6
-#define IQK_RX_Y_LMT 0x1a
-#define IQK_TX_OK BIT(0)
-#define IQK_RX_OK BIT(1)
-#define PATH_IQK_RETRY 2
-
-#define SPUR_THRES 0x16
#define CCK_DFIR_NR 3
-#define DIS_3WIRE 0xccf000c0
-#define EN_3WIRE 0xccc000c0
-#define START_PSD 0x400000
-#define FREQ_CH13 0xfccd
-#define FREQ_CH14 0xff9a
-#define RFCFGCH_CHANNEL_MASK GENMASK(7, 0)
-#define RFCFGCH_BW_MASK (BIT(11) | BIT(10))
-#define RFCFGCH_BW_20M (BIT(11) | BIT(10))
-#define RFCFGCH_BW_40M BIT(10)
-#define BIT_MASK_RFMOD BIT(0)
-#define BIT_LCK BIT(15)
-
-#define REG_GPIO_INTM 0x0048
-#define REG_BTG_SEL 0x0067
-#define BIT_MASK_BTG_WL BIT(7)
-#define REG_LTECOEX_PATH_CONTROL 0x0070
-#define REG_LTECOEX_CTRL 0x07c0
-#define REG_LTECOEX_WRITE_DATA 0x07c4
-#define REG_LTECOEX_READ_DATA 0x07c8
-#define REG_PSDFN 0x0808
-#define REG_BB_PWR_SAV1_11N 0x0874
-#define REG_ANA_PARAM1 0x0880
-#define REG_ANALOG_P4 0x088c
-#define REG_PSDRPT 0x08b4
-#define REG_FPGA1_RFMOD 0x0900
-#define REG_BB_SEL_BTG 0x0948
-#define REG_BBRX_DFIR 0x0954
-#define BIT_MASK_RXBB_DFIR GENMASK(27, 24)
-#define BIT_RXBB_DFIR_EN BIT(19)
-#define REG_CCK0_SYS 0x0a00
-#define BIT_CCK_SIDE_BAND BIT(4)
-#define REG_CCK_ANT_SEL_11N 0x0a04
-#define REG_PWRTH 0x0a08
-#define REG_CCK_FA_RST_11N 0x0a2c
-#define BIT_MASK_CCK_CNT_KEEP BIT(12)
-#define BIT_MASK_CCK_CNT_EN BIT(13)
-#define BIT_MASK_CCK_CNT_KPEN (BIT_MASK_CCK_CNT_KEEP | BIT_MASK_CCK_CNT_EN)
-#define BIT_MASK_CCK_FA_KEEP BIT(14)
-#define BIT_MASK_CCK_FA_EN BIT(15)
-#define BIT_MASK_CCK_FA_KPEN (BIT_MASK_CCK_FA_KEEP | BIT_MASK_CCK_FA_EN)
-#define REG_CCK_FA_LSB_11N 0x0a5c
-#define REG_CCK_FA_MSB_11N 0x0a58
-#define REG_CCK_CCA_CNT_11N 0x0a60
-#define BIT_MASK_CCK_FA_MSB GENMASK(7, 0)
-#define BIT_MASK_CCK_FA_LSB GENMASK(15, 8)
-#define REG_PWRTH2 0x0aa8
-#define REG_CSRATIO 0x0aaa
-#define REG_OFDM_FA_HOLDC_11N 0x0c00
-#define BIT_MASK_OFDM_FA_KEEP BIT(31)
-#define REG_BB_RX_PATH_11N 0x0c04
-#define REG_TRMUX_11N 0x0c08
-#define REG_OFDM_FA_RSTC_11N 0x0c0c
-#define BIT_MASK_OFDM_FA_RST BIT(31)
-#define REG_A_RXIQI 0x0c14
-#define BIT_MASK_RXIQ_S1_X 0x000003FF
-#define BIT_MASK_RXIQ_S1_Y1 0x0000FC00
-#define BIT_SET_RXIQ_S1_Y1(y) ((y) & 0x3F)
-#define REG_OFDM0_RXDSP 0x0c40
-#define BIT_MASK_RXDSP GENMASK(28, 24)
-#define BIT_EN_RXDSP BIT(9)
-#define REG_OFDM_0_ECCA_THRESHOLD 0x0c4c
-#define BIT_MASK_OFDM0_EXT_A BIT(31)
-#define BIT_MASK_OFDM0_EXT_C BIT(29)
-#define BIT_MASK_OFDM0_EXTS (BIT(31) | BIT(29) | BIT(28))
-#define BIT_SET_OFDM0_EXTS(a, c, d) (((a) << 31) | ((c) << 29) | ((d) << 28))
-#define REG_OFDM0_XAAGC1 0x0c50
-#define REG_OFDM0_XBAGC1 0x0c58
-#define REG_AGCRSSI 0x0c78
-#define REG_OFDM_0_XA_TX_IQ_IMBALANCE 0x0c80
-#define BIT_MASK_TXIQ_ELM_A 0x03ff
-#define BIT_SET_TXIQ_ELM_ACD(a, c, d) (((d) << 22) | (((c) & 0x3F) << 16) | \
- ((a) & 0x03ff))
-#define BIT_MASK_TXIQ_ELM_C GENMASK(21, 16)
-#define BIT_SET_TXIQ_ELM_C2(c) ((c) & 0x3F)
-#define BIT_MASK_TXIQ_ELM_D GENMASK(31, 22)
-#define REG_TXIQK_MATRIXA_LSB2_11N 0x0c94
-#define BIT_SET_TXIQ_ELM_C1(c) (((c) & 0x000003C0) >> 6)
-#define REG_RXIQK_MATRIX_LSB_11N 0x0ca0
-#define BIT_MASK_RXIQ_S1_Y2 0xF0000000
-#define BIT_SET_RXIQ_S1_Y2(y) (((y) >> 6) & 0xF)
-#define REG_TXIQ_AB_S0 0x0cd0
-#define BIT_MASK_TXIQ_A_S0 0x000007FE
-#define BIT_MASK_TXIQ_A_EXT_S0 BIT(0)
-#define BIT_MASK_TXIQ_B_S0 0x0007E000
-#define REG_TXIQ_CD_S0 0x0cd4
-#define BIT_MASK_TXIQ_C_S0 0x000007FE
-#define BIT_MASK_TXIQ_C_EXT_S0 BIT(0)
-#define BIT_MASK_TXIQ_D_S0 GENMASK(22, 13)
-#define BIT_MASK_TXIQ_D_EXT_S0 BIT(12)
-#define REG_RXIQ_AB_S0 0x0cd8
-#define BIT_MASK_RXIQ_X_S0 0x000003FF
-#define BIT_MASK_RXIQ_Y_S0 0x003FF000
-#define REG_OFDM_FA_TYPE1_11N 0x0cf0
-#define BIT_MASK_OFDM_FF_CNT GENMASK(15, 0)
-#define BIT_MASK_OFDM_SF_CNT GENMASK(31, 16)
-#define REG_OFDM_FA_RSTD_11N 0x0d00
-#define BIT_MASK_OFDM_FA_RST1 BIT(27)
-#define BIT_MASK_OFDM_FA_KEEP1 BIT(31)
-#define REG_CTX 0x0d03
-#define BIT_MASK_CTX_TYPE GENMASK(6, 4)
-#define REG_OFDM1_CFOTRK 0x0d2c
-#define BIT_EN_CFOTRK BIT(28)
-#define REG_OFDM1_CSI1 0x0d40
-#define REG_OFDM1_CSI2 0x0d44
-#define REG_OFDM1_CSI3 0x0d48
-#define REG_OFDM1_CSI4 0x0d4c
-#define REG_OFDM_FA_TYPE2_11N 0x0da0
-#define BIT_MASK_OFDM_CCA_CNT GENMASK(15, 0)
-#define BIT_MASK_OFDM_PF_CNT GENMASK(31, 16)
-#define REG_OFDM_FA_TYPE3_11N 0x0da4
-#define BIT_MASK_OFDM_RI_CNT GENMASK(15, 0)
-#define BIT_MASK_OFDM_CRC_CNT GENMASK(31, 16)
-#define REG_OFDM_FA_TYPE4_11N 0x0da8
-#define BIT_MASK_OFDM_MNS_CNT GENMASK(15, 0)
-#define REG_FPGA0_IQK_11N 0x0e28
-#define BIT_MASK_IQK_MOD 0xffffff00
-#define EN_IQK 0x808000
-#define RST_IQK 0x000000
-#define REG_TXIQK_TONE_A_11N 0x0e30
-#define REG_RXIQK_TONE_A_11N 0x0e34
-#define REG_TXIQK_PI_A_11N 0x0e38
-#define REG_RXIQK_PI_A_11N 0x0e3c
-#define REG_TXIQK_11N 0x0e40
-#define BIT_SET_TXIQK_11N(x, y) (0x80007C00 | ((x) << 16) | (y))
-#define REG_RXIQK_11N 0x0e44
-#define REG_IQK_AGC_PTS_11N 0x0e48
-#define REG_IQK_AGC_RSP_11N 0x0e4c
-#define REG_TX_IQK_TONE_B 0x0e50
-#define REG_RX_IQK_TONE_B 0x0e54
-#define REG_IQK_RES_TX 0x0e94
-#define BIT_MASK_RES_TX GENMASK(25, 16)
-#define REG_IQK_RES_TY 0x0e9c
-#define BIT_MASK_RES_TY GENMASK(25, 16)
-#define REG_IQK_RES_RX 0x0ea4
-#define BIT_MASK_RES_RX GENMASK(25, 16)
-#define REG_IQK_RES_RY 0x0eac
-#define BIT_IQK_TX_FAIL BIT(28)
-#define BIT_IQK_RX_FAIL BIT(27)
-#define BIT_IQK_DONE BIT(26)
-#define BIT_MASK_RES_RY GENMASK(25, 16)
-#define REG_PAGE_F_RST_11N 0x0f14
-#define BIT_MASK_F_RST_ALL BIT(16)
-#define REG_IGI_C_11N 0x0f84
-#define REG_IGI_D_11N 0x0f88
-#define REG_HT_CRC32_CNT_11N 0x0f90
-#define BIT_MASK_HT_CRC_OK GENMASK(15, 0)
-#define BIT_MASK_HT_CRC_ERR GENMASK(31, 16)
-#define REG_OFDM_CRC32_CNT_11N 0x0f94
-#define BIT_MASK_OFDM_LCRC_OK GENMASK(15, 0)
-#define BIT_MASK_OFDM_LCRC_ERR GENMASK(31, 16)
-#define REG_HT_CRC32_CNT_11N_AGG 0x0fb8
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.c b/drivers/net/wireless/realtek/rtw88/rtw8723x.c
new file mode 100644
index 000000000000..0d0b6c2cb9aa
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.c
@@ -0,0 +1,721 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright 2024 Fiona Klute
+ *
+ * Based on code originally in rtw8723d.[ch],
+ * Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#include "main.h"
+#include "debug.h"
+#include "phy.h"
+#include "reg.h"
+#include "tx.h"
+#include "rtw8723x.h"
+
+static const struct rtw_hw_reg rtw8723x_txagc[] = {
+ [DESC_RATE1M] = { .addr = 0xe08, .mask = 0x0000ff00 },
+ [DESC_RATE2M] = { .addr = 0x86c, .mask = 0x0000ff00 },
+ [DESC_RATE5_5M] = { .addr = 0x86c, .mask = 0x00ff0000 },
+ [DESC_RATE11M] = { .addr = 0x86c, .mask = 0xff000000 },
+ [DESC_RATE6M] = { .addr = 0xe00, .mask = 0x000000ff },
+ [DESC_RATE9M] = { .addr = 0xe00, .mask = 0x0000ff00 },
+ [DESC_RATE12M] = { .addr = 0xe00, .mask = 0x00ff0000 },
+ [DESC_RATE18M] = { .addr = 0xe00, .mask = 0xff000000 },
+ [DESC_RATE24M] = { .addr = 0xe04, .mask = 0x000000ff },
+ [DESC_RATE36M] = { .addr = 0xe04, .mask = 0x0000ff00 },
+ [DESC_RATE48M] = { .addr = 0xe04, .mask = 0x00ff0000 },
+ [DESC_RATE54M] = { .addr = 0xe04, .mask = 0xff000000 },
+ [DESC_RATEMCS0] = { .addr = 0xe10, .mask = 0x000000ff },
+ [DESC_RATEMCS1] = { .addr = 0xe10, .mask = 0x0000ff00 },
+ [DESC_RATEMCS2] = { .addr = 0xe10, .mask = 0x00ff0000 },
+ [DESC_RATEMCS3] = { .addr = 0xe10, .mask = 0xff000000 },
+ [DESC_RATEMCS4] = { .addr = 0xe14, .mask = 0x000000ff },
+ [DESC_RATEMCS5] = { .addr = 0xe14, .mask = 0x0000ff00 },
+ [DESC_RATEMCS6] = { .addr = 0xe14, .mask = 0x00ff0000 },
+ [DESC_RATEMCS7] = { .addr = 0xe14, .mask = 0xff000000 },
+};
+
+static void __rtw8723x_lck(struct rtw_dev *rtwdev)
+{
+ u32 lc_cal;
+ u8 val_ctx, rf_val;
+ int ret;
+
+ val_ctx = rtw_read8(rtwdev, REG_CTX);
+ if ((val_ctx & BIT_MASK_CTX_TYPE) != 0)
+ rtw_write8(rtwdev, REG_CTX, val_ctx & ~BIT_MASK_CTX_TYPE);
+ else
+ rtw_write8(rtwdev, REG_TXPAUSE, 0xFF);
+ lc_cal = rtw_read_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK);
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal | BIT_LCK);
+
+ ret = read_poll_timeout(rtw_read_rf, rf_val, rf_val != 0x1,
+ 10000, 1000000, false,
+ rtwdev, RF_PATH_A, RF_CFGCH, BIT_LCK);
+ if (ret)
+ rtw_warn(rtwdev, "failed to poll LCK status bit\n");
+
+ rtw_write_rf(rtwdev, RF_PATH_A, RF_CFGCH, RFREG_MASK, lc_cal);
+ if ((val_ctx & BIT_MASK_CTX_TYPE) != 0)
+ rtw_write8(rtwdev, REG_CTX, val_ctx);
+ else
+ rtw_write8(rtwdev, REG_TXPAUSE, 0x00);
+}
+
+#define DBG_EFUSE_VAL(rtwdev, map, name) \
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, # name "=0x%02x\n", \
+ (map)->name)
+#define DBG_EFUSE_2BYTE(rtwdev, map, name) \
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, # name "=0x%02x%02x\n", \
+ (map)->name[0], (map)->name[1])
+
+static void rtw8723xe_efuse_debug(struct rtw_dev *rtwdev,
+ struct rtw8723x_efuse *map)
+{
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "mac_addr=%pM\n", map->e.mac_addr);
+ DBG_EFUSE_2BYTE(rtwdev, map, e.vendor_id);
+ DBG_EFUSE_2BYTE(rtwdev, map, e.device_id);
+ DBG_EFUSE_2BYTE(rtwdev, map, e.sub_vendor_id);
+ DBG_EFUSE_2BYTE(rtwdev, map, e.sub_device_id);
+}
+
+static void rtw8723xu_efuse_debug(struct rtw_dev *rtwdev,
+ struct rtw8723x_efuse *map)
+{
+ DBG_EFUSE_2BYTE(rtwdev, map, u.vendor_id);
+ DBG_EFUSE_2BYTE(rtwdev, map, u.product_id);
+ DBG_EFUSE_VAL(rtwdev, map, u.usb_option);
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "mac_addr=%pM\n", map->u.mac_addr);
+}
+
+static void rtw8723xs_efuse_debug(struct rtw_dev *rtwdev,
+ struct rtw8723x_efuse *map)
+{
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "mac_addr=%pM\n", map->s.mac_addr);
+}
+
+static void __rtw8723x_debug_txpwr_limit(struct rtw_dev *rtwdev,
+ struct rtw_txpwr_idx *table,
+ int tx_path_count)
+{
+ if (!rtw_dbg_is_enabled(rtwdev, RTW_DBG_EFUSE))
+ return;
+
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "Power index table (2.4G):\n");
+ /* CCK base */
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "CCK base\n");
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF G0 G1 G2 G3 G4 G5\n");
+ for (int i = 0; i < tx_path_count; i++)
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "[%c]: %3u %3u %3u %3u %3u %3u\n",
+ 'A' + i,
+ table[i].pwr_idx_2g.cck_base[0],
+ table[i].pwr_idx_2g.cck_base[1],
+ table[i].pwr_idx_2g.cck_base[2],
+ table[i].pwr_idx_2g.cck_base[3],
+ table[i].pwr_idx_2g.cck_base[4],
+ table[i].pwr_idx_2g.cck_base[5]);
+ /* CCK diff */
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "CCK diff\n");
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF 1S 2S 3S 4S\n");
+ for (int i = 0; i < tx_path_count; i++)
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "[%c]: %2d %2d %2d %2d\n",
+ 'A' + i, 0 /* no diff for 1S */,
+ table[i].pwr_idx_2g.ht_2s_diff.cck,
+ table[i].pwr_idx_2g.ht_3s_diff.cck,
+ table[i].pwr_idx_2g.ht_4s_diff.cck);
+ /* BW40-1S base */
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "BW40-1S base\n");
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF G0 G1 G2 G3 G4\n");
+ for (int i = 0; i < tx_path_count; i++)
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "[%c]: %3u %3u %3u %3u %3u\n",
+ 'A' + i,
+ table[i].pwr_idx_2g.bw40_base[0],
+ table[i].pwr_idx_2g.bw40_base[1],
+ table[i].pwr_idx_2g.bw40_base[2],
+ table[i].pwr_idx_2g.bw40_base[3],
+ table[i].pwr_idx_2g.bw40_base[4]);
+ /* OFDM diff */
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "OFDM diff\n");
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF 1S 2S 3S 4S\n");
+ for (int i = 0; i < tx_path_count; i++)
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "[%c]: %2d %2d %2d %2d\n",
+ 'A' + i,
+ table[i].pwr_idx_2g.ht_1s_diff.ofdm,
+ table[i].pwr_idx_2g.ht_2s_diff.ofdm,
+ table[i].pwr_idx_2g.ht_3s_diff.ofdm,
+ table[i].pwr_idx_2g.ht_4s_diff.ofdm);
+ /* BW20 diff */
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "BW20 diff\n");
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF 1S 2S 3S 4S\n");
+ for (int i = 0; i < tx_path_count; i++)
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "[%c]: %2d %2d %2d %2d\n",
+ 'A' + i,
+ table[i].pwr_idx_2g.ht_1s_diff.bw20,
+ table[i].pwr_idx_2g.ht_2s_diff.bw20,
+ table[i].pwr_idx_2g.ht_3s_diff.bw20,
+ table[i].pwr_idx_2g.ht_4s_diff.bw20);
+ /* BW40 diff */
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "BW40 diff\n");
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "RF 1S 2S 3S 4S\n");
+ for (int i = 0; i < tx_path_count; i++)
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE,
+ "[%c]: %2d %2d %2d %2d\n",
+ 'A' + i, 0 /* no diff for 1S */,
+ table[i].pwr_idx_2g.ht_2s_diff.bw40,
+ table[i].pwr_idx_2g.ht_3s_diff.bw40,
+ table[i].pwr_idx_2g.ht_4s_diff.bw40);
+}
+
+static void efuse_debug_dump(struct rtw_dev *rtwdev,
+ struct rtw8723x_efuse *map)
+{
+ if (!rtw_dbg_is_enabled(rtwdev, RTW_DBG_EFUSE))
+ return;
+
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "EFUSE raw logical map:\n");
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 16, 1,
+ (u8 *)map, sizeof(struct rtw8723x_efuse), false);
+ rtw_dbg(rtwdev, RTW_DBG_EFUSE, "Parsed rtw8723x EFUSE data:\n");
+ DBG_EFUSE_VAL(rtwdev, map, rtl_id);
+ DBG_EFUSE_VAL(rtwdev, map, afe);
+ rtw8723x_debug_txpwr_limit(rtwdev, map->txpwr_idx_table, 4);
+ DBG_EFUSE_VAL(rtwdev, map, channel_plan);
+ DBG_EFUSE_VAL(rtwdev, map, xtal_k);
+ DBG_EFUSE_VAL(rtwdev, map, thermal_meter);
+ DBG_EFUSE_VAL(rtwdev, map, iqk_lck);
+ DBG_EFUSE_VAL(rtwdev, map, pa_type);
+ DBG_EFUSE_2BYTE(rtwdev, map, lna_type_2g);
+ DBG_EFUSE_2BYTE(rtwdev, map, lna_type_5g);
+ DBG_EFUSE_VAL(rtwdev, map, rf_board_option);
+ DBG_EFUSE_VAL(rtwdev, map, rf_feature_option);
+ DBG_EFUSE_VAL(rtwdev, map, rf_bt_setting);
+ DBG_EFUSE_VAL(rtwdev, map, eeprom_version);
+ DBG_EFUSE_VAL(rtwdev, map, eeprom_customer_id);
+ DBG_EFUSE_VAL(rtwdev, map, tx_bb_swing_setting_2g);
+ DBG_EFUSE_VAL(rtwdev, map, tx_pwr_calibrate_rate);
+ DBG_EFUSE_VAL(rtwdev, map, rf_antenna_option);
+ DBG_EFUSE_VAL(rtwdev, map, rfe_option);
+ DBG_EFUSE_2BYTE(rtwdev, map, country_code);
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtw8723xe_efuse_debug(rtwdev, map);
+ break;
+ case RTW_HCI_TYPE_USB:
+ rtw8723xu_efuse_debug(rtwdev, map);
+ break;
+ case RTW_HCI_TYPE_SDIO:
+ rtw8723xs_efuse_debug(rtwdev, map);
+ break;
+ default:
+ /* unsupported now */
+ break;
+ }
+}
+
+static void rtw8723xe_efuse_parsing(struct rtw_efuse *efuse,
+ struct rtw8723x_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+}
+
+static void rtw8723xu_efuse_parsing(struct rtw_efuse *efuse,
+ struct rtw8723x_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->u.mac_addr);
+}
+
+static void rtw8723xs_efuse_parsing(struct rtw_efuse *efuse,
+ struct rtw8723x_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->s.mac_addr);
+}
+
+static int __rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ struct rtw_efuse *efuse = &rtwdev->efuse;
+ struct rtw8723x_efuse *map;
+ int i;
+
+ map = (struct rtw8723x_efuse *)log_map;
+ efuse_debug_dump(rtwdev, map);
+
+ efuse->rfe_option = 0;
+ efuse->rf_board_option = map->rf_board_option;
+ efuse->crystal_cap = map->xtal_k;
+ efuse->pa_type_2g = map->pa_type;
+ efuse->lna_type_2g = map->lna_type_2g[0];
+ efuse->channel_plan = map->channel_plan;
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ efuse->bt_setting = map->rf_bt_setting;
+ efuse->regd = map->rf_board_option & 0x7;
+ efuse->thermal_meter[0] = map->thermal_meter;
+ efuse->thermal_meter_k = map->thermal_meter;
+ efuse->afe = map->afe;
+
+ for (i = 0; i < 4; i++)
+ efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+
+ switch (rtw_hci_type(rtwdev)) {
+ case RTW_HCI_TYPE_PCIE:
+ rtw8723xe_efuse_parsing(efuse, map);
+ break;
+ case RTW_HCI_TYPE_USB:
+ rtw8723xu_efuse_parsing(efuse, map);
+ break;
+ case RTW_HCI_TYPE_SDIO:
+ rtw8723xs_efuse_parsing(efuse, map);
+ break;
+ default:
+ /* unsupported now */
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+#define BIT_CFENDFORM BIT(9)
+#define BIT_WMAC_TCR_ERR0 BIT(12)
+#define BIT_WMAC_TCR_ERR1 BIT(13)
+#define BIT_TCR_CFG (BIT_CFENDFORM | BIT_WMAC_TCR_ERR0 | \
+ BIT_WMAC_TCR_ERR1)
+#define WLAN_RX_FILTER0 0xFFFF
+#define WLAN_RX_FILTER1 0x400
+#define WLAN_RX_FILTER2 0xFFFF
+#define WLAN_RCR_CFG 0x700060CE
+
+static int __rtw8723x_mac_init(struct rtw_dev *rtwdev)
+{
+ rtw_write8(rtwdev, REG_FWHW_TXQ_CTRL + 1, WLAN_TXQ_RPT_EN);
+ rtw_write32(rtwdev, REG_TCR, BIT_TCR_CFG);
+
+ rtw_write16(rtwdev, REG_RXFLTMAP0, WLAN_RX_FILTER0);
+ rtw_write16(rtwdev, REG_RXFLTMAP1, WLAN_RX_FILTER1);
+ rtw_write16(rtwdev, REG_RXFLTMAP2, WLAN_RX_FILTER2);
+ rtw_write32(rtwdev, REG_RCR, WLAN_RCR_CFG);
+
+ rtw_write32(rtwdev, REG_INT_MIG, 0);
+ rtw_write32(rtwdev, REG_MCUTST_1, 0x0);
+
+ rtw_write8(rtwdev, REG_MISC_CTRL, BIT_DIS_SECOND_CCA);
+ rtw_write8(rtwdev, REG_2ND_CCA_CTRL, 0);
+
+ return 0;
+}
+
+static void __rtw8723x_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+ u8 ldo_pwr;
+
+ ldo_pwr = rtw_read8(rtwdev, REG_LDO_EFUSE_CTRL + 3);
+ if (enable) {
+ ldo_pwr &= ~BIT_MASK_LDO25_VOLTAGE;
+ ldo_pwr |= (BIT_LDO25_VOLTAGE_V25 << 4) | BIT_LDO25_EN;
+ } else {
+ ldo_pwr &= ~BIT_LDO25_EN;
+ }
+ rtw_write8(rtwdev, REG_LDO_EFUSE_CTRL + 3, ldo_pwr);
+}
+
+static void
+rtw8723x_set_tx_power_index_by_rate(struct rtw_dev *rtwdev, u8 path, u8 rs)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ const struct rtw_hw_reg *txagc;
+ u8 rate, pwr_index;
+ int j;
+
+ for (j = 0; j < rtw_rate_size[rs]; j++) {
+ rate = rtw_rate_section[rs][j];
+ pwr_index = hal->tx_pwr_tbl[path][rate];
+
+ if (rate >= ARRAY_SIZE(rtw8723x_txagc)) {
+ rtw_warn(rtwdev, "rate 0x%x isn't supported\n", rate);
+ continue;
+ }
+ txagc = &rtw8723x_txagc[rate];
+ if (!txagc->addr) {
+ rtw_warn(rtwdev, "rate 0x%x isn't defined\n", rate);
+ continue;
+ }
+
+ rtw_write32_mask(rtwdev, txagc->addr, txagc->mask, pwr_index);
+ }
+}
+
+static void __rtw8723x_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ int rs, path;
+
+ for (path = 0; path < hal->rf_path_num; path++) {
+ for (rs = 0; rs <= RTW_RATE_SECTION_HT_1S; rs++)
+ rtw8723x_set_tx_power_index_by_rate(rtwdev, path, rs);
+ }
+}
+
+static void __rtw8723x_efuse_grant(struct rtw_dev *rtwdev, bool on)
+{
+ if (on) {
+ rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
+
+ rtw_write16_set(rtwdev, REG_SYS_FUNC_EN, BIT_FEN_ELDR);
+ rtw_write16_set(rtwdev, REG_SYS_CLKR, BIT_LOADER_CLK_EN | BIT_ANA8M);
+ } else {
+ rtw_write8(rtwdev, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
+ }
+}
+
+static void __rtw8723x_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u32 cck_fa_cnt;
+ u32 ofdm_fa_cnt;
+ u32 crc32_cnt;
+ u32 val32;
+
+ /* hold counter */
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 1);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 1);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KEEP, 1);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KEEP, 1);
+
+ cck_fa_cnt = rtw_read32_mask(rtwdev, REG_CCK_FA_LSB_11N, MASKBYTE0);
+ cck_fa_cnt += rtw_read32_mask(rtwdev, REG_CCK_FA_MSB_11N, MASKBYTE3) << 8;
+
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE1_11N);
+ ofdm_fa_cnt = u32_get_bits(val32, BIT_MASK_OFDM_FF_CNT);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_SF_CNT);
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE2_11N);
+ dm_info->ofdm_cca_cnt = u32_get_bits(val32, BIT_MASK_OFDM_CCA_CNT);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_PF_CNT);
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE3_11N);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_RI_CNT);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_CRC_CNT);
+ val32 = rtw_read32(rtwdev, REG_OFDM_FA_TYPE4_11N);
+ ofdm_fa_cnt += u32_get_bits(val32, BIT_MASK_OFDM_MNS_CNT);
+
+ dm_info->cck_fa_cnt = cck_fa_cnt;
+ dm_info->ofdm_fa_cnt = ofdm_fa_cnt;
+ dm_info->total_fa_cnt = cck_fa_cnt + ofdm_fa_cnt;
+
+ dm_info->cck_err_cnt = rtw_read32(rtwdev, REG_IGI_C_11N);
+ dm_info->cck_ok_cnt = rtw_read32(rtwdev, REG_IGI_D_11N);
+ crc32_cnt = rtw_read32(rtwdev, REG_OFDM_CRC32_CNT_11N);
+ dm_info->ofdm_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_ERR);
+ dm_info->ofdm_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_OFDM_LCRC_OK);
+ crc32_cnt = rtw_read32(rtwdev, REG_HT_CRC32_CNT_11N);
+ dm_info->ht_err_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_ERR);
+ dm_info->ht_ok_cnt = u32_get_bits(crc32_cnt, BIT_MASK_HT_CRC_OK);
+ dm_info->vht_err_cnt = 0;
+ dm_info->vht_ok_cnt = 0;
+
+ val32 = rtw_read32(rtwdev, REG_CCK_CCA_CNT_11N);
+ dm_info->cck_cca_cnt = (u32_get_bits(val32, BIT_MASK_CCK_FA_MSB) << 8) |
+ u32_get_bits(val32, BIT_MASK_CCK_FA_LSB);
+ dm_info->total_cca_cnt = dm_info->cck_cca_cnt + dm_info->ofdm_cca_cnt;
+
+ /* reset counter */
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 1);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTC_11N, BIT_MASK_OFDM_FA_RST, 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 1);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_RST1, 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_HOLDC_11N, BIT_MASK_OFDM_FA_KEEP, 0);
+ rtw_write32_mask(rtwdev, REG_OFDM_FA_RSTD_11N, BIT_MASK_OFDM_FA_KEEP1, 0);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 0);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_CNT_KPEN, 2);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 0);
+ rtw_write32_mask(rtwdev, REG_CCK_FA_RST_11N, BIT_MASK_CCK_FA_KPEN, 2);
+ rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 1);
+ rtw_write32_mask(rtwdev, REG_PAGE_F_RST_11N, BIT_MASK_F_RST_ALL, 0);
+}
+
+/* IQK (IQ calibration) */
+
+static
+void __rtw8723x_iqk_backup_regs(struct rtw_dev *rtwdev,
+ struct rtw8723x_iqk_backup_regs *backup)
+{
+ int i;
+
+ for (i = 0; i < RTW8723X_IQK_ADDA_REG_NUM; i++)
+ backup->adda[i] = rtw_read32(rtwdev,
+ rtw8723x_common.iqk_adda_regs[i]);
+
+ for (i = 0; i < RTW8723X_IQK_MAC8_REG_NUM; i++)
+ backup->mac8[i] = rtw_read8(rtwdev,
+ rtw8723x_common.iqk_mac8_regs[i]);
+ for (i = 0; i < RTW8723X_IQK_MAC32_REG_NUM; i++)
+ backup->mac32[i] = rtw_read32(rtwdev,
+ rtw8723x_common.iqk_mac32_regs[i]);
+
+ for (i = 0; i < RTW8723X_IQK_BB_REG_NUM; i++)
+ backup->bb[i] = rtw_read32(rtwdev,
+ rtw8723x_common.iqk_bb_regs[i]);
+
+ backup->igia = rtw_read32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0);
+ backup->igib = rtw_read32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0);
+
+ backup->bb_sel_btg = rtw_read32(rtwdev, REG_BB_SEL_BTG);
+}
+
+static
+void __rtw8723x_iqk_restore_regs(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ int i;
+
+ for (i = 0; i < RTW8723X_IQK_ADDA_REG_NUM; i++)
+ rtw_write32(rtwdev, rtw8723x_common.iqk_adda_regs[i],
+ backup->adda[i]);
+
+ for (i = 0; i < RTW8723X_IQK_MAC8_REG_NUM; i++)
+ rtw_write8(rtwdev, rtw8723x_common.iqk_mac8_regs[i],
+ backup->mac8[i]);
+ for (i = 0; i < RTW8723X_IQK_MAC32_REG_NUM; i++)
+ rtw_write32(rtwdev, rtw8723x_common.iqk_mac32_regs[i],
+ backup->mac32[i]);
+
+ for (i = 0; i < RTW8723X_IQK_BB_REG_NUM; i++)
+ rtw_write32(rtwdev, rtw8723x_common.iqk_bb_regs[i],
+ backup->bb[i]);
+
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, 0x50);
+ rtw_write32_mask(rtwdev, REG_OFDM0_XAAGC1, MASKBYTE0, backup->igia);
+
+ rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, 0x50);
+ rtw_write32_mask(rtwdev, REG_OFDM0_XBAGC1, MASKBYTE0, backup->igib);
+
+ rtw_write32(rtwdev, REG_TXIQK_TONE_A_11N, 0x01008c00);
+ rtw_write32(rtwdev, REG_RXIQK_TONE_A_11N, 0x01008c00);
+}
+
+static
+bool __rtw8723x_iqk_similarity_cmp(struct rtw_dev *rtwdev,
+ s32 result[][IQK_NR],
+ u8 c1, u8 c2)
+{
+ u32 i, j, diff;
+ u32 bitmap = 0;
+ u8 candidate[PATH_NR] = {IQK_ROUND_INVALID, IQK_ROUND_INVALID};
+ bool ret = true;
+
+ s32 tmp1, tmp2;
+
+ for (i = 0; i < IQK_NR; i++) {
+ tmp1 = iqkxy_to_s32(result[c1][i]);
+ tmp2 = iqkxy_to_s32(result[c2][i]);
+
+ diff = abs(tmp1 - tmp2);
+
+ if (diff <= MAX_TOLERANCE)
+ continue;
+
+ if ((i == IQK_S1_RX_X || i == IQK_S0_RX_X) && !bitmap) {
+ if (result[c1][i] + result[c1][i + 1] == 0)
+ candidate[i / IQK_SX_NR] = c2;
+ else if (result[c2][i] + result[c2][i + 1] == 0)
+ candidate[i / IQK_SX_NR] = c1;
+ else
+ bitmap |= BIT(i);
+ } else {
+ bitmap |= BIT(i);
+ }
+ }
+
+ if (bitmap != 0)
+ goto check_sim;
+
+ for (i = 0; i < PATH_NR; i++) {
+ if (candidate[i] == IQK_ROUND_INVALID)
+ continue;
+
+ for (j = i * IQK_SX_NR; j < i * IQK_SX_NR + 2; j++)
+ result[IQK_ROUND_HYBRID][j] = result[candidate[i]][j];
+ ret = false;
+ }
+
+ return ret;
+
+check_sim:
+ for (i = 0; i < IQK_NR; i++) {
+ j = i & ~1; /* 2 bits are a pair for IQ[X, Y] */
+ if (bitmap & GENMASK(j + 1, j))
+ continue;
+
+ result[IQK_ROUND_HYBRID][i] = result[c1][i];
+ }
+
+ return false;
+}
+
+static u8 __rtw8723x_pwrtrack_get_limit_ofdm(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 tx_rate = dm_info->tx_rate;
+ u8 limit_ofdm = 30;
+
+ switch (tx_rate) {
+ case DESC_RATE1M...DESC_RATE5_5M:
+ case DESC_RATE11M:
+ break;
+ case DESC_RATE6M...DESC_RATE48M:
+ limit_ofdm = 36;
+ break;
+ case DESC_RATE54M:
+ limit_ofdm = 34;
+ break;
+ case DESC_RATEMCS0...DESC_RATEMCS2:
+ limit_ofdm = 38;
+ break;
+ case DESC_RATEMCS3...DESC_RATEMCS4:
+ limit_ofdm = 36;
+ break;
+ case DESC_RATEMCS5...DESC_RATEMCS7:
+ limit_ofdm = 34;
+ break;
+ default:
+ rtw_warn(rtwdev, "pwrtrack unhandled tx_rate 0x%x\n", tx_rate);
+ break;
+ }
+
+ return limit_ofdm;
+}
+
+static
+void __rtw8723x_pwrtrack_set_xtal(struct rtw_dev *rtwdev, u8 therm_path,
+ u8 delta)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ const struct rtw_pwr_track_tbl *tbl = rtwdev->chip->pwr_track_tbl;
+ const s8 *pwrtrk_xtal;
+ s8 xtal_cap;
+
+ if (dm_info->thermal_avg[therm_path] >
+ rtwdev->efuse.thermal_meter[therm_path])
+ pwrtrk_xtal = tbl->pwrtrk_xtal_p;
+ else
+ pwrtrk_xtal = tbl->pwrtrk_xtal_n;
+
+ xtal_cap = rtwdev->efuse.crystal_cap & 0x3F;
+ xtal_cap = clamp_t(s8, xtal_cap + pwrtrk_xtal[delta], 0, 0x3F);
+ rtw_write32_mask(rtwdev, REG_AFE_CTRL3, BIT_MASK_XTAL,
+ xtal_cap | (xtal_cap << 6));
+}
+
+static
+void __rtw8723x_fill_txdesc_checksum(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *txdesc)
+{
+ size_t words = 32 / 2; /* calculate the first 32 bytes (16 words) */
+ __le16 chksum = 0;
+ __le16 *data = (__le16 *)(txdesc);
+ struct rtw_tx_desc *tx_desc = (struct rtw_tx_desc *)txdesc;
+
+ le32p_replace_bits(&tx_desc->w7, 0, RTW_TX_DESC_W7_TXDESC_CHECKSUM);
+
+ while (words--)
+ chksum ^= *data++;
+
+ chksum = ~chksum;
+
+ le32p_replace_bits(&tx_desc->w7, __le16_to_cpu(chksum),
+ RTW_TX_DESC_W7_TXDESC_CHECKSUM);
+}
+
+static void __rtw8723x_coex_cfg_init(struct rtw_dev *rtwdev)
+{
+ /* enable TBTT nterrupt */
+ rtw_write8_set(rtwdev, REG_BCN_CTRL, BIT_EN_BCN_FUNCTION);
+
+ /* BT report packet sample rate */
+ /* 0x790[5:0]=0x5 */
+ rtw_write8_mask(rtwdev, REG_BT_TDMA_TIME, BIT_MASK_SAMPLE_RATE, 0x5);
+
+ /* enable BT counter statistics */
+ rtw_write8(rtwdev, REG_BT_STAT_CTRL, 0x1);
+
+ /* enable PTA (3-wire function form BT side) */
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_BT_PTA_EN);
+ rtw_write32_set(rtwdev, REG_GPIO_MUXCFG, BIT_PO_BT_PTA_PINS);
+
+ /* enable PTA (tx/rx signal form WiFi side) */
+ rtw_write8_set(rtwdev, REG_QUEUE_CTRL, BIT_PTA_WL_TX_EN);
+}
+
+const struct rtw8723x_common rtw8723x_common = {
+ .iqk_adda_regs = {
+ 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78, 0xe7c, 0xe80, 0xe84,
+ 0xe88, 0xe8c, 0xed0, 0xed4, 0xed8, 0xedc, 0xee0, 0xeec
+ },
+ .iqk_mac8_regs = {0x522, 0x550, 0x551},
+ .iqk_mac32_regs = {0x40},
+ .iqk_bb_regs = {
+ 0xc04, 0xc08, 0x874, 0xb68, 0xb6c, 0x870, 0x860, 0x864, 0xa04
+ },
+
+ .ltecoex_addr = {
+ .ctrl = REG_LTECOEX_CTRL,
+ .wdata = REG_LTECOEX_WRITE_DATA,
+ .rdata = REG_LTECOEX_READ_DATA,
+ },
+ .rf_sipi_addr = {
+ [RF_PATH_A] = { .hssi_1 = 0x820, .lssi_read = 0x8a0,
+ .hssi_2 = 0x824, .lssi_read_pi = 0x8b8},
+ [RF_PATH_B] = { .hssi_1 = 0x828, .lssi_read = 0x8a4,
+ .hssi_2 = 0x82c, .lssi_read_pi = 0x8bc},
+ },
+ .dig = {
+ [0] = { .addr = 0xc50, .mask = 0x7f },
+ [1] = { .addr = 0xc50, .mask = 0x7f },
+ },
+ .dig_cck = {
+ [0] = { .addr = 0xa0c, .mask = 0x3f00 },
+ },
+ .prioq_addrs = {
+ .prio[RTW_DMA_MAPPING_EXTRA] = {
+ .rsvd = REG_RQPN_NPQ + 2, .avail = REG_RQPN_NPQ + 3,
+ },
+ .prio[RTW_DMA_MAPPING_LOW] = {
+ .rsvd = REG_RQPN + 1, .avail = REG_FIFOPAGE_CTRL_2 + 1,
+ },
+ .prio[RTW_DMA_MAPPING_NORMAL] = {
+ .rsvd = REG_RQPN_NPQ, .avail = REG_RQPN_NPQ + 1,
+ },
+ .prio[RTW_DMA_MAPPING_HIGH] = {
+ .rsvd = REG_RQPN, .avail = REG_FIFOPAGE_CTRL_2,
+ },
+ .wsize = false,
+ },
+
+ .lck = __rtw8723x_lck,
+ .read_efuse = __rtw8723x_read_efuse,
+ .mac_init = __rtw8723x_mac_init,
+ .cfg_ldo25 = __rtw8723x_cfg_ldo25,
+ .set_tx_power_index = __rtw8723x_set_tx_power_index,
+ .efuse_grant = __rtw8723x_efuse_grant,
+ .false_alarm_statistics = __rtw8723x_false_alarm_statistics,
+ .iqk_backup_regs = __rtw8723x_iqk_backup_regs,
+ .iqk_restore_regs = __rtw8723x_iqk_restore_regs,
+ .iqk_similarity_cmp = __rtw8723x_iqk_similarity_cmp,
+ .pwrtrack_get_limit_ofdm = __rtw8723x_pwrtrack_get_limit_ofdm,
+ .pwrtrack_set_xtal = __rtw8723x_pwrtrack_set_xtal,
+ .coex_cfg_init = __rtw8723x_coex_cfg_init,
+ .fill_txdesc_checksum = __rtw8723x_fill_txdesc_checksum,
+ .debug_txpwr_limit = __rtw8723x_debug_txpwr_limit,
+};
+EXPORT_SYMBOL(rtw8723x_common);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_AUTHOR("Fiona Klute <fiona.klute@gmx.de>");
+MODULE_DESCRIPTION("Common functions for Realtek 802.11n wireless 8723x drivers");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723x.h b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
new file mode 100644
index 000000000000..e93bfce994bf
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723x.h
@@ -0,0 +1,518 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright 2024 Fiona Klute
+ *
+ * Based on code originally in rtw8723d.[ch],
+ * Copyright(c) 2018-2019 Realtek Corporation
+ */
+
+#ifndef __RTW8723X_H__
+#define __RTW8723X_H__
+
+#include "main.h"
+#include "debug.h"
+#include "phy.h"
+#include "reg.h"
+
+enum rtw8723x_path {
+ PATH_S1,
+ PATH_S0,
+ PATH_NR,
+};
+
+enum rtw8723x_iqk_round {
+ IQK_ROUND_0,
+ IQK_ROUND_1,
+ IQK_ROUND_2,
+ IQK_ROUND_HYBRID,
+ IQK_ROUND_SIZE,
+ IQK_ROUND_INVALID = 0xff,
+};
+
+enum rtw8723x_iqk_result {
+ IQK_S1_TX_X,
+ IQK_S1_TX_Y,
+ IQK_S1_RX_X,
+ IQK_S1_RX_Y,
+ IQK_S0_TX_X,
+ IQK_S0_TX_Y,
+ IQK_S0_RX_X,
+ IQK_S0_RX_Y,
+ IQK_NR,
+ IQK_SX_NR = IQK_NR / PATH_NR,
+};
+
+struct rtw8723xe_efuse {
+ u8 mac_addr[ETH_ALEN]; /* 0xd0 */
+ u8 vendor_id[2];
+ u8 device_id[2];
+ u8 sub_vendor_id[2];
+ u8 sub_device_id[2];
+};
+
+struct rtw8723xu_efuse {
+ u8 res4[48]; /* 0xd0 */
+ u8 vendor_id[2]; /* 0x100 */
+ u8 product_id[2]; /* 0x102 */
+ u8 usb_option; /* 0x104 */
+ u8 res5[2]; /* 0x105 */
+ u8 mac_addr[ETH_ALEN]; /* 0x107 */
+};
+
+struct rtw8723xs_efuse {
+ u8 res4[0x4a]; /* 0xd0 */
+ u8 mac_addr[ETH_ALEN]; /* 0x11a */
+};
+
+struct rtw8723x_efuse {
+ __le16 rtl_id;
+ u8 rsvd[2];
+ u8 afe;
+ u8 rsvd1[11];
+
+ /* power index for four RF paths */
+ struct rtw_txpwr_idx txpwr_idx_table[4];
+
+ u8 channel_plan; /* 0xb8 */
+ u8 xtal_k;
+ u8 thermal_meter;
+ u8 iqk_lck;
+ u8 pa_type; /* 0xbc */
+ u8 lna_type_2g[2]; /* 0xbd */
+ u8 lna_type_5g[2];
+ u8 rf_board_option;
+ u8 rf_feature_option;
+ u8 rf_bt_setting;
+ u8 eeprom_version;
+ u8 eeprom_customer_id;
+ u8 tx_bb_swing_setting_2g;
+ u8 res_c7;
+ u8 tx_pwr_calibrate_rate;
+ u8 rf_antenna_option; /* 0xc9 */
+ u8 rfe_option;
+ u8 country_code[2];
+ u8 res[3];
+ union {
+ struct rtw8723xe_efuse e;
+ struct rtw8723xu_efuse u;
+ struct rtw8723xs_efuse s;
+ };
+};
+
+#define RTW8723X_IQK_ADDA_REG_NUM 16
+#define RTW8723X_IQK_MAC8_REG_NUM 3
+#define RTW8723X_IQK_MAC32_REG_NUM 1
+#define RTW8723X_IQK_BB_REG_NUM 9
+
+struct rtw8723x_iqk_backup_regs {
+ u32 adda[RTW8723X_IQK_ADDA_REG_NUM];
+ u8 mac8[RTW8723X_IQK_MAC8_REG_NUM];
+ u32 mac32[RTW8723X_IQK_MAC32_REG_NUM];
+ u32 bb[RTW8723X_IQK_BB_REG_NUM];
+
+ u32 lte_path;
+ u32 lte_gnt;
+
+ u32 bb_sel_btg;
+ u8 btg_sel;
+
+ u8 igia;
+ u8 igib;
+};
+
+struct rtw8723x_common {
+ /* registers that must be backed up before IQK and restored after */
+ u32 iqk_adda_regs[RTW8723X_IQK_ADDA_REG_NUM];
+ u32 iqk_mac8_regs[RTW8723X_IQK_MAC8_REG_NUM];
+ u32 iqk_mac32_regs[RTW8723X_IQK_MAC32_REG_NUM];
+ u32 iqk_bb_regs[RTW8723X_IQK_BB_REG_NUM];
+
+ /* chip register definitions */
+ struct rtw_ltecoex_addr ltecoex_addr;
+ struct rtw_rf_sipi_addr rf_sipi_addr[2];
+ struct rtw_hw_reg dig[2];
+ struct rtw_hw_reg dig_cck[1];
+ struct rtw_prioq_addrs prioq_addrs;
+
+ /* common functions */
+ void (*lck)(struct rtw_dev *rtwdev);
+ int (*read_efuse)(struct rtw_dev *rtwdev, u8 *log_map);
+ int (*mac_init)(struct rtw_dev *rtwdev);
+ void (*cfg_ldo25)(struct rtw_dev *rtwdev, bool enable);
+ void (*set_tx_power_index)(struct rtw_dev *rtwdev);
+ void (*efuse_grant)(struct rtw_dev *rtwdev, bool on);
+ void (*false_alarm_statistics)(struct rtw_dev *rtwdev);
+ void (*iqk_backup_regs)(struct rtw_dev *rtwdev,
+ struct rtw8723x_iqk_backup_regs *backup);
+ void (*iqk_restore_regs)(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup);
+ bool (*iqk_similarity_cmp)(struct rtw_dev *rtwdev, s32 result[][IQK_NR],
+ u8 c1, u8 c2);
+ u8 (*pwrtrack_get_limit_ofdm)(struct rtw_dev *rtwdev);
+ void (*pwrtrack_set_xtal)(struct rtw_dev *rtwdev, u8 therm_path,
+ u8 delta);
+ void (*coex_cfg_init)(struct rtw_dev *rtwdev);
+ void (*fill_txdesc_checksum)(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *txdesc);
+ void (*debug_txpwr_limit)(struct rtw_dev *rtwdev,
+ struct rtw_txpwr_idx *table,
+ int tx_path_count);
+};
+
+extern const struct rtw8723x_common rtw8723x_common;
+
+#define PATH_IQK_RETRY 2
+#define MAX_TOLERANCE 5
+#define IQK_TX_X_ERR 0x142
+#define IQK_TX_Y_ERR 0x42
+#define IQK_RX_X_ERR 0x132
+#define IQK_RX_Y_ERR 0x36
+#define IQK_RX_X_UPPER 0x11a
+#define IQK_RX_X_LOWER 0xe6
+#define IQK_RX_Y_LMT 0x1a
+#define IQK_TX_OK BIT(0)
+#define IQK_RX_OK BIT(1)
+
+#define WLAN_TXQ_RPT_EN 0x1F
+
+#define SPUR_THRES 0x16
+#define DIS_3WIRE 0xccf000c0
+#define EN_3WIRE 0xccc000c0
+#define START_PSD 0x400000
+#define FREQ_CH5 0xfccd
+#define FREQ_CH6 0xfc4d
+#define FREQ_CH7 0xffcd
+#define FREQ_CH8 0xff4d
+#define FREQ_CH13 0xfccd
+#define FREQ_CH14 0xff9a
+#define RFCFGCH_CHANNEL_MASK GENMASK(7, 0)
+#define RFCFGCH_BW_MASK (BIT(11) | BIT(10))
+#define RFCFGCH_BW_20M (BIT(11) | BIT(10))
+#define RFCFGCH_BW_40M BIT(10)
+#define BIT_MASK_RFMOD BIT(0)
+#define BIT_LCK BIT(15)
+
+#define REG_GPIO_INTM 0x0048
+#define REG_BTG_SEL 0x0067
+#define BIT_MASK_BTG_WL BIT(7)
+#define REG_LTECOEX_PATH_CONTROL 0x0070
+#define REG_LTECOEX_CTRL 0x07c0
+#define REG_LTECOEX_WRITE_DATA 0x07c4
+#define REG_LTECOEX_READ_DATA 0x07c8
+#define REG_PSDFN 0x0808
+#define REG_BB_PWR_SAV1_11N 0x0874
+#define REG_ANA_PARAM1 0x0880
+#define REG_ANALOG_P4 0x088c
+#define REG_PSDRPT 0x08b4
+#define REG_FPGA1_RFMOD 0x0900
+#define REG_BB_SEL_BTG 0x0948
+#define REG_BBRX_DFIR 0x0954
+#define BIT_MASK_RXBB_DFIR GENMASK(27, 24)
+#define BIT_RXBB_DFIR_EN BIT(19)
+#define REG_CCK0_SYS 0x0a00
+#define BIT_CCK_SIDE_BAND BIT(4)
+#define REG_CCK_ANT_SEL_11N 0x0a04
+#define REG_PWRTH 0x0a08
+#define REG_CCK_FA_RST_11N 0x0a2c
+#define BIT_MASK_CCK_CNT_KEEP BIT(12)
+#define BIT_MASK_CCK_CNT_EN BIT(13)
+#define BIT_MASK_CCK_CNT_KPEN (BIT_MASK_CCK_CNT_KEEP | BIT_MASK_CCK_CNT_EN)
+#define BIT_MASK_CCK_FA_KEEP BIT(14)
+#define BIT_MASK_CCK_FA_EN BIT(15)
+#define BIT_MASK_CCK_FA_KPEN (BIT_MASK_CCK_FA_KEEP | BIT_MASK_CCK_FA_EN)
+#define REG_CCK_FA_LSB_11N 0x0a5c
+#define REG_CCK_FA_MSB_11N 0x0a58
+#define REG_CCK_CCA_CNT_11N 0x0a60
+#define BIT_MASK_CCK_FA_MSB GENMASK(7, 0)
+#define BIT_MASK_CCK_FA_LSB GENMASK(15, 8)
+#define REG_PWRTH2 0x0aa8
+#define REG_CSRATIO 0x0aaa
+#define REG_OFDM_FA_HOLDC_11N 0x0c00
+#define BIT_MASK_OFDM_FA_KEEP BIT(31)
+#define REG_BB_RX_PATH_11N 0x0c04
+#define REG_TRMUX_11N 0x0c08
+#define REG_OFDM_FA_RSTC_11N 0x0c0c
+#define BIT_MASK_OFDM_FA_RST BIT(31)
+#define REG_A_RXIQI 0x0c14
+#define BIT_MASK_RXIQ_S1_X 0x000003FF
+#define BIT_MASK_RXIQ_S1_Y1 0x0000FC00
+#define BIT_SET_RXIQ_S1_Y1(y) ((y) & 0x3F)
+#define REG_OFDM0_RXDSP 0x0c40
+#define BIT_MASK_RXDSP GENMASK(28, 24)
+#define BIT_EN_RXDSP BIT(9)
+#define REG_OFDM_0_ECCA_THRESHOLD 0x0c4c
+#define BIT_MASK_OFDM0_EXT_A BIT(31)
+#define BIT_MASK_OFDM0_EXT_C BIT(29)
+#define BIT_MASK_OFDM0_EXTS (BIT(31) | BIT(29) | BIT(28))
+#define BIT_SET_OFDM0_EXTS(a, c, d) (((a) << 31) | ((c) << 29) | ((d) << 28))
+#define BIT_MASK_OFDM0_EXTS_B (BIT(27) | BIT(25) | BIT(24))
+#define BIT_SET_OFDM0_EXTS_B(a, c, d) (((a) << 27) | ((c) << 25) | ((d) << 24))
+#define REG_OFDM0_XAAGC1 0x0c50
+#define REG_OFDM0_XBAGC1 0x0c58
+#define REG_AGCRSSI 0x0c78
+#define REG_OFDM_0_XA_TX_IQ_IMBALANCE 0x0c80
+#define REG_OFDM_0_XB_TX_IQ_IMBALANCE 0x0c88
+#define BIT_MASK_TXIQ_ELM_A 0x03ff
+#define BIT_SET_TXIQ_ELM_ACD(a, c, d) (((d) << 22) | (((c) & 0x3F) << 16) | \
+ ((a) & 0x03ff))
+#define BIT_MASK_TXIQ_ELM_C GENMASK(21, 16)
+#define BIT_SET_TXIQ_ELM_C2(c) ((c) & 0x3F)
+#define BIT_MASK_TXIQ_ELM_D GENMASK(31, 22)
+#define REG_TXIQK_MATRIXA_LSB2_11N 0x0c94
+#define BIT_SET_TXIQ_ELM_C1(c) (((c) & 0x000003C0) >> 6)
+#define REG_RXIQK_MATRIX_LSB_11N 0x0ca0
+#define BIT_MASK_RXIQ_S1_Y2 0xF0000000
+#define BIT_SET_RXIQ_S1_Y2(y) (((y) >> 6) & 0xF)
+#define REG_TXIQ_AB_S0 0x0cd0
+#define BIT_MASK_TXIQ_A_S0 0x000007FE
+#define BIT_MASK_TXIQ_A_EXT_S0 BIT(0)
+#define BIT_MASK_TXIQ_B_S0 0x0007E000
+#define REG_TXIQ_CD_S0 0x0cd4
+#define BIT_MASK_TXIQ_C_S0 0x000007FE
+#define BIT_MASK_TXIQ_C_EXT_S0 BIT(0)
+#define BIT_MASK_TXIQ_D_S0 GENMASK(22, 13)
+#define BIT_MASK_TXIQ_D_EXT_S0 BIT(12)
+#define REG_RXIQ_AB_S0 0x0cd8
+#define BIT_MASK_RXIQ_X_S0 0x000003FF
+#define BIT_MASK_RXIQ_Y_S0 0x003FF000
+#define REG_OFDM_FA_TYPE1_11N 0x0cf0
+#define BIT_MASK_OFDM_FF_CNT GENMASK(15, 0)
+#define BIT_MASK_OFDM_SF_CNT GENMASK(31, 16)
+#define REG_OFDM_FA_RSTD_11N 0x0d00
+#define BIT_MASK_OFDM_FA_RST1 BIT(27)
+#define BIT_MASK_OFDM_FA_KEEP1 BIT(31)
+#define REG_CTX 0x0d03
+#define BIT_MASK_CTX_TYPE GENMASK(6, 4)
+#define REG_OFDM1_CFOTRK 0x0d2c
+#define BIT_EN_CFOTRK BIT(28)
+#define REG_OFDM1_CSI1 0x0d40
+#define REG_OFDM1_CSI2 0x0d44
+#define REG_OFDM1_CSI3 0x0d48
+#define REG_OFDM1_CSI4 0x0d4c
+#define REG_OFDM_FA_TYPE2_11N 0x0da0
+#define BIT_MASK_OFDM_CCA_CNT GENMASK(15, 0)
+#define BIT_MASK_OFDM_PF_CNT GENMASK(31, 16)
+#define REG_OFDM_FA_TYPE3_11N 0x0da4
+#define BIT_MASK_OFDM_RI_CNT GENMASK(15, 0)
+#define BIT_MASK_OFDM_CRC_CNT GENMASK(31, 16)
+#define REG_OFDM_FA_TYPE4_11N 0x0da8
+#define BIT_MASK_OFDM_MNS_CNT GENMASK(15, 0)
+#define REG_FPGA0_IQK_11N 0x0e28
+#define BIT_MASK_IQK_MOD 0xffffff00
+#define EN_IQK 0x808000
+#define RST_IQK 0x000000
+#define REG_TXIQK_TONE_A_11N 0x0e30
+#define REG_RXIQK_TONE_A_11N 0x0e34
+#define REG_TXIQK_PI_A_11N 0x0e38
+#define REG_RXIQK_PI_A_11N 0x0e3c
+#define REG_TXIQK_11N 0x0e40
+#define BIT_SET_TXIQK_11N(x, y) (0x80007C00 | ((x) << 16) | (y))
+#define REG_RXIQK_11N 0x0e44
+#define REG_IQK_AGC_PTS_11N 0x0e48
+#define REG_IQK_AGC_RSP_11N 0x0e4c
+#define REG_TX_IQK_TONE_B 0x0e50
+#define REG_RX_IQK_TONE_B 0x0e54
+#define REG_TXIQK_PI_B 0x0e58
+#define REG_RXIQK_PI_B 0x0e5c
+#define REG_IQK_RES_TX 0x0e94
+#define BIT_MASK_RES_TX GENMASK(25, 16)
+#define REG_IQK_RES_TY 0x0e9c
+#define BIT_MASK_RES_TY GENMASK(25, 16)
+#define REG_IQK_RES_RX 0x0ea4
+#define BIT_MASK_RES_RX GENMASK(25, 16)
+#define REG_IQK_RES_RY 0x0eac
+#define BIT_IQK_TX_FAIL BIT(28)
+#define BIT_IQK_RX_FAIL BIT(27)
+#define BIT_IQK_DONE BIT(26)
+#define BIT_MASK_RES_RY GENMASK(25, 16)
+#define REG_PAGE_F_RST_11N 0x0f14
+#define BIT_MASK_F_RST_ALL BIT(16)
+#define REG_IGI_C_11N 0x0f84
+#define REG_IGI_D_11N 0x0f88
+#define REG_HT_CRC32_CNT_11N 0x0f90
+#define BIT_MASK_HT_CRC_OK GENMASK(15, 0)
+#define BIT_MASK_HT_CRC_ERR GENMASK(31, 16)
+#define REG_OFDM_CRC32_CNT_11N 0x0f94
+#define BIT_MASK_OFDM_LCRC_OK GENMASK(15, 0)
+#define BIT_MASK_OFDM_LCRC_ERR GENMASK(31, 16)
+#define REG_HT_CRC32_CNT_11N_AGG 0x0fb8
+
+#define OFDM_SWING_A(swing) FIELD_GET(GENMASK(9, 0), swing)
+#define OFDM_SWING_B(swing) FIELD_GET(GENMASK(15, 10), swing)
+#define OFDM_SWING_C(swing) FIELD_GET(GENMASK(21, 16), swing)
+#define OFDM_SWING_D(swing) FIELD_GET(GENMASK(31, 22), swing)
+
+static inline s32 iqkxy_to_s32(s32 val)
+{
+ /* val is Q10.8 */
+ return sign_extend32(val, 9);
+}
+
+static inline s32 iqk_mult(s32 x, s32 y, s32 *ext)
+{
+ /* x, y and return value are Q10.8 */
+ s32 t;
+
+ t = x * y;
+ if (ext)
+ *ext = (t >> 7) & 0x1; /* Q.16 --> Q.9; get LSB of Q.9 */
+
+ return (t >> 8); /* Q.16 --> Q.8 */
+}
+
+static inline
+void rtw8723x_debug_txpwr_limit(struct rtw_dev *rtwdev,
+ struct rtw_txpwr_idx *table,
+ int tx_path_count)
+{
+ rtw8723x_common.debug_txpwr_limit(rtwdev, table, tx_path_count);
+}
+
+static inline void rtw8723x_lck(struct rtw_dev *rtwdev)
+{
+ rtw8723x_common.lck(rtwdev);
+}
+
+static inline int rtw8723x_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
+{
+ return rtw8723x_common.read_efuse(rtwdev, log_map);
+}
+
+static inline int rtw8723x_mac_init(struct rtw_dev *rtwdev)
+{
+ return rtw8723x_common.mac_init(rtwdev);
+}
+
+static inline void rtw8723x_cfg_ldo25(struct rtw_dev *rtwdev, bool enable)
+{
+ rtw8723x_common.cfg_ldo25(rtwdev, enable);
+}
+
+static inline void rtw8723x_set_tx_power_index(struct rtw_dev *rtwdev)
+{
+ rtw8723x_common.set_tx_power_index(rtwdev);
+}
+
+static inline void rtw8723x_efuse_grant(struct rtw_dev *rtwdev, bool on)
+{
+ rtw8723x_common.efuse_grant(rtwdev, on);
+}
+
+static inline void rtw8723x_false_alarm_statistics(struct rtw_dev *rtwdev)
+{
+ rtw8723x_common.false_alarm_statistics(rtwdev);
+}
+
+static inline
+void rtw8723x_iqk_backup_regs(struct rtw_dev *rtwdev,
+ struct rtw8723x_iqk_backup_regs *backup)
+{
+ rtw8723x_common.iqk_backup_regs(rtwdev, backup);
+}
+
+static inline
+void rtw8723x_iqk_restore_regs(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ rtw8723x_common.iqk_restore_regs(rtwdev, backup);
+}
+
+static inline
+bool rtw8723x_iqk_similarity_cmp(struct rtw_dev *rtwdev, s32 result[][IQK_NR],
+ u8 c1, u8 c2)
+{
+ return rtw8723x_common.iqk_similarity_cmp(rtwdev, result, c1, c2);
+}
+
+static inline u8 rtw8723x_pwrtrack_get_limit_ofdm(struct rtw_dev *rtwdev)
+{
+ return rtw8723x_common.pwrtrack_get_limit_ofdm(rtwdev);
+}
+
+static inline
+void rtw8723x_pwrtrack_set_xtal(struct rtw_dev *rtwdev, u8 therm_path,
+ u8 delta)
+{
+ rtw8723x_common.pwrtrack_set_xtal(rtwdev, therm_path, delta);
+}
+
+static inline void rtw8723x_coex_cfg_init(struct rtw_dev *rtwdev)
+{
+ rtw8723x_common.coex_cfg_init(rtwdev);
+}
+
+static inline
+void rtw8723x_fill_txdesc_checksum(struct rtw_dev *rtwdev,
+ struct rtw_tx_pkt_info *pkt_info,
+ u8 *txdesc)
+{
+ rtw8723x_common.fill_txdesc_checksum(rtwdev, pkt_info, txdesc);
+}
+
+/* IQK helper functions, defined as inline so they can be shared
+ * without needing an EXPORT_SYMBOL each.
+ */
+static inline void
+rtw8723x_iqk_backup_path_ctrl(struct rtw_dev *rtwdev,
+ struct rtw8723x_iqk_backup_regs *backup)
+{
+ backup->btg_sel = rtw_read8(rtwdev, REG_BTG_SEL);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] original 0x67 = 0x%x\n",
+ backup->btg_sel);
+}
+
+static inline void rtw8723x_iqk_config_path_ctrl(struct rtw_dev *rtwdev)
+{
+ rtw_write32_mask(rtwdev, REG_PAD_CTRL1, BIT_BT_BTG_SEL, 0x1);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] set 0x67 = 0x%x\n",
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+}
+
+static inline void
+rtw8723x_iqk_restore_path_ctrl(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *backup)
+{
+ rtw_write8(rtwdev, REG_BTG_SEL, backup->btg_sel);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] restore 0x67 = 0x%x\n",
+ rtw_read32_mask(rtwdev, REG_PAD_CTRL1, MASKBYTE3));
+}
+
+static inline void
+rtw8723x_iqk_backup_lte_path_gnt(struct rtw_dev *rtwdev,
+ struct rtw8723x_iqk_backup_regs *backup)
+{
+ backup->lte_path = rtw_read32(rtwdev, REG_LTECOEX_PATH_CONTROL);
+ rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0x800f0038);
+ mdelay(1);
+ backup->lte_gnt = rtw_read32(rtwdev, REG_LTECOEX_READ_DATA);
+ rtw_dbg(rtwdev, RTW_DBG_RFK, "[IQK] OriginalGNT = 0x%x\n",
+ backup->lte_gnt);
+}
+
+static inline void
+rtw8723x_iqk_config_lte_path_gnt(struct rtw_dev *rtwdev,
+ u32 write_data)
+{
+ rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, write_data);
+ rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc0020038);
+ rtw_write32_mask(rtwdev, REG_LTECOEX_PATH_CONTROL,
+ BIT_LTE_MUX_CTRL_PATH, 0x1);
+}
+
+static inline void
+rtw8723x_iqk_restore_lte_path_gnt(struct rtw_dev *rtwdev,
+ const struct rtw8723x_iqk_backup_regs *bak)
+{
+ rtw_write32(rtwdev, REG_LTECOEX_WRITE_DATA, bak->lte_gnt);
+ rtw_write32(rtwdev, REG_LTECOEX_CTRL, 0xc00f0038);
+ rtw_write32(rtwdev, REG_LTECOEX_PATH_CONTROL, bak->lte_path);
+}
+
+/* set all ADDA registers to the given value */
+static inline void rtw8723x_iqk_path_adda_on(struct rtw_dev *rtwdev, u32 value)
+{
+ for (int i = 0; i < RTW8723X_IQK_ADDA_REG_NUM; i++)
+ rtw_write32(rtwdev, rtw8723x_common.iqk_adda_regs[i], value);
+}
+
+#endif /* __RTW8723X_H__ */
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index fe5d8e188350..526e8de77b3e 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -2008,6 +2008,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
+ .usb_tx_agg_desc_num = 3,
.ht_supported = true,
.vht_supported = true,
.lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK),
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index 3017a9760da8..2456ff242818 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -2548,6 +2548,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x1c,
+ .usb_tx_agg_desc_num = 3,
.ht_supported = true,
.vht_supported = true,
.lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK),
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index cd965edc29ce..62376d1cca22 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -5366,6 +5366,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = {
.band = RTW_BAND_2G | RTW_BAND_5G,
.page_size = TX_PAGE_SIZE,
.dig_min = 0x20,
+ .usb_tx_agg_desc_num = 3,
.default_1ss_tx_path = BB_PATH_A,
.path_div_supported = true,
.ht_supported = true,
diff --git a/drivers/net/wireless/realtek/rtw88/rx.h b/drivers/net/wireless/realtek/rtw88/rx.h
index 3342e3761281..d3668c4efc24 100644
--- a/drivers/net/wireless/realtek/rtw88/rx.h
+++ b/drivers/net/wireless/realtek/rtw88/rx.h
@@ -40,6 +40,8 @@ enum rtw_rx_desc_enc {
le32_get_bits(*((__le32 *)(rxdesc) + 0x02), GENMASK(30, 29))
#define GET_RX_DESC_TSFL(rxdesc) \
le32_get_bits(*((__le32 *)(rxdesc) + 0x05), GENMASK(31, 0))
+#define GET_RX_DESC_BW(rxdesc) \
+ (le32_get_bits(*((__le32 *)(rxdesc) + 0x04), GENMASK(31, 24)))
void rtw_rx_stats(struct rtw_dev *rtwdev, struct ieee80211_vif *vif,
struct sk_buff *skb);
diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c
index a0188511099a..a55ca5a24227 100644
--- a/drivers/net/wireless/realtek/rtw88/usb.c
+++ b/drivers/net/wireless/realtek/rtw88/usb.c
@@ -273,6 +273,8 @@ static void rtw_usb_write_port_tx_complete(struct urb *urb)
info = IEEE80211_SKB_CB(skb);
tx_data = rtw_usb_get_tx_data(skb);
+ skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz);
+
/* enqueue to wait for tx report */
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn);
@@ -377,7 +379,9 @@ static bool rtw_usb_tx_agg_skb(struct rtw_usb *rtwusb, struct sk_buff_head *list
skb_iter = skb_peek(list);
- if (skb_iter && skb_iter->len + skb_head->len <= RTW_USB_MAX_XMITBUF_SZ)
+ if (skb_iter &&
+ skb_iter->len + skb_head->len <= RTW_USB_MAX_XMITBUF_SZ &&
+ agg_num < rtwdev->chip->usb_tx_agg_desc_num)
__skb_unlink(skb_iter, list);
else
skb_iter = NULL;
@@ -433,23 +437,21 @@ static int rtw_usb_write_data(struct rtw_dev *rtwdev,
{
const struct rtw_chip_info *chip = rtwdev->chip;
struct sk_buff *skb;
- unsigned int desclen, headsize, size;
+ unsigned int size;
u8 qsel;
int ret = 0;
size = pkt_info->tx_pkt_size;
qsel = pkt_info->qsel;
- desclen = chip->tx_pkt_desc_sz;
- headsize = pkt_info->offset ? pkt_info->offset : desclen;
- skb = dev_alloc_skb(headsize + size);
+ skb = dev_alloc_skb(chip->tx_pkt_desc_sz + size);
if (unlikely(!skb))
return -ENOMEM;
- skb_reserve(skb, headsize);
+ skb_reserve(skb, chip->tx_pkt_desc_sz);
skb_put_data(skb, buf, size);
- skb_push(skb, headsize);
- memset(skb->data, 0, headsize);
+ skb_push(skb, chip->tx_pkt_desc_sz);
+ memset(skb->data, 0, chip->tx_pkt_desc_sz);
rtw_tx_fill_tx_desc(pkt_info, skb);
rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, skb->data);
@@ -740,7 +742,6 @@ static struct rtw_hci_ops rtw_usb_ops = {
static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
{
struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
- int i;
rtwusb->rxwq = create_singlethread_workqueue("rtw88_usb: rx wq");
if (!rtwusb->rxwq) {
@@ -752,13 +753,19 @@ static int rtw_usb_init_rx(struct rtw_dev *rtwdev)
INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler);
+ return 0;
+}
+
+static void rtw_usb_setup_rx(struct rtw_dev *rtwdev)
+{
+ struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev);
+ int i;
+
for (i = 0; i < RTW_USB_RXCB_NUM; i++) {
struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i];
rtw_usb_rx_resubmit(rtwusb, rxcb);
}
-
- return 0;
}
static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev)
@@ -895,6 +902,8 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
goto err_destroy_rxwq;
}
+ rtw_usb_setup_rx(rtwdev);
+
return 0;
err_destroy_rxwq:
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
index 90ffbab7cc4c..3c9f864805b1 100644
--- a/drivers/net/wireless/realtek/rtw89/Kconfig
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -22,12 +22,18 @@ config RTW89_8851B
config RTW89_8852A
tristate
+config RTW89_8852B_COMMON
+ tristate
+
config RTW89_8852B
tristate
config RTW89_8852C
tristate
+config RTW89_8922A
+ tristate
+
config RTW89_8851BE
tristate "Realtek 8851BE PCI wireless network (Wi-Fi 6) adapter"
depends on PCI
@@ -56,6 +62,7 @@ config RTW89_8852BE
select RTW89_CORE
select RTW89_PCI
select RTW89_8852B
+ select RTW89_8852B_COMMON
help
Select this option will enable support for 8852BE chipset
@@ -72,6 +79,18 @@ config RTW89_8852CE
802.11ax PCIe wireless network (Wi-Fi 6E) adapter
+config RTW89_8922AE
+ tristate "Realtek 8922AE PCI wireless network (Wi-Fi 7) adapter"
+ depends on PCI
+ select RTW89_CORE
+ select RTW89_PCI
+ select RTW89_8922A
+ help
+ Select this option will enable support for 8922AE chipset
+
+ 802.11be PCIe wireless network (Wi-Fi 7) adapter
+ supporting 2x2 2GHz/5GHz/6GHz 4096-QAM 160MHz channels.
+
config RTW89_DEBUG
bool
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
index 41940099af1b..1f1050a7a89d 100644
--- a/drivers/net/wireless/realtek/rtw89/Makefile
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -4,17 +4,21 @@ obj-$(CONFIG_RTW89_CORE) += rtw89_core.o
rtw89_core-y += core.o \
mac80211.o \
mac.o \
+ mac_be.o \
phy.o \
+ phy_be.o \
fw.o \
cam.o \
efuse.o \
+ efuse_be.o \
regd.o \
sar.o \
coex.o \
ps.o \
chan.o \
ser.o \
- acpi.o
+ acpi.o \
+ util.o
rtw89_core-$(CONFIG_PM) += wow.o
@@ -36,6 +40,9 @@ rtw89_8852a-objs := rtw8852a.o \
obj-$(CONFIG_RTW89_8852AE) += rtw89_8852ae.o
rtw89_8852ae-objs := rtw8852ae.o
+obj-$(CONFIG_RTW89_8852B_COMMON) += rtw89_8852b_common.o
+rtw89_8852b_common-objs := rtw8852b_common.o
+
obj-$(CONFIG_RTW89_8852B) += rtw89_8852b.o
rtw89_8852b-objs := rtw8852b.o \
rtw8852b_table.o \
@@ -54,8 +61,15 @@ rtw89_8852c-objs := rtw8852c.o \
obj-$(CONFIG_RTW89_8852CE) += rtw89_8852ce.o
rtw89_8852ce-objs := rtw8852ce.o
+obj-$(CONFIG_RTW89_8922A) += rtw89_8922a.o
+rtw89_8922a-objs := rtw8922a.o \
+ rtw8922a_rfk.o
+
+obj-$(CONFIG_RTW89_8922AE) += rtw89_8922ae.o
+rtw89_8922ae-objs := rtw8922ae.o
+
rtw89_core-$(CONFIG_RTW89_DEBUG) += debug.o
obj-$(CONFIG_RTW89_PCI) += rtw89_pci.o
-rtw89_pci-y := pci.o
+rtw89_pci-y := pci.o pci_be.o
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.c b/drivers/net/wireless/realtek/rtw89/acpi.c
index 2e7326a8e3e4..908e980a4b72 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.c
+++ b/drivers/net/wireless/realtek/rtw89/acpi.c
@@ -77,6 +77,50 @@ int rtw89_acpi_dsm_get_policy_6ghz(struct rtw89_dev *rtwdev,
return 0;
}
+static bool chk_acpi_policy_6ghz_sp_sig(const struct rtw89_acpi_policy_6ghz_sp *p)
+{
+ return p->signature[0] == 0x52 &&
+ p->signature[1] == 0x54 &&
+ p->signature[2] == 0x4B &&
+ p->signature[3] == 0x07;
+}
+
+static
+int rtw89_acpi_dsm_get_policy_6ghz_sp(struct rtw89_dev *rtwdev,
+ union acpi_object *obj,
+ struct rtw89_acpi_policy_6ghz_sp **policy)
+{
+ const struct rtw89_acpi_policy_6ghz_sp *ptr;
+ u32 buf_len;
+
+ if (obj->type != ACPI_TYPE_BUFFER) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI,
+ "acpi: expect buffer but type: %d\n", obj->type);
+ return -EINVAL;
+ }
+
+ buf_len = obj->buffer.length;
+ if (buf_len < sizeof(*ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: invalid buffer length: %u\n",
+ __func__, buf_len);
+ return -EINVAL;
+ }
+
+ ptr = (typeof(ptr))obj->buffer.pointer;
+ if (!chk_acpi_policy_6ghz_sp_sig(ptr)) {
+ rtw89_debug(rtwdev, RTW89_DBG_ACPI, "%s: bad signature\n", __func__);
+ return -EINVAL;
+ }
+
+ *policy = kmemdup(ptr, sizeof(*ptr), GFP_KERNEL);
+ if (!*policy)
+ return -ENOMEM;
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_ACPI, "policy_6ghz_sp: ", *policy,
+ sizeof(*ptr));
+ return 0;
+}
+
int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
enum rtw89_acpi_dsm_func func,
struct rtw89_acpi_dsm_result *res)
@@ -95,6 +139,9 @@ int rtw89_acpi_evaluate_dsm(struct rtw89_dev *rtwdev,
if (func == RTW89_ACPI_DSM_FUNC_6G_BP)
ret = rtw89_acpi_dsm_get_policy_6ghz(rtwdev, obj,
&res->u.policy_6ghz);
+ else if (func == RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP)
+ ret = rtw89_acpi_dsm_get_policy_6ghz_sp(rtwdev, obj,
+ &res->u.policy_6ghz_sp);
else
ret = rtw89_acpi_dsm_get_value(rtwdev, obj, &res->u.value);
diff --git a/drivers/net/wireless/realtek/rtw89/acpi.h b/drivers/net/wireless/realtek/rtw89/acpi.h
index fe85b40cf076..d274be1775bf 100644
--- a/drivers/net/wireless/realtek/rtw89/acpi.h
+++ b/drivers/net/wireless/realtek/rtw89/acpi.h
@@ -12,7 +12,13 @@ enum rtw89_acpi_dsm_func {
RTW89_ACPI_DSM_FUNC_6G_DIS = 3,
RTW89_ACPI_DSM_FUNC_6G_BP = 4,
RTW89_ACPI_DSM_FUNC_TAS_EN = 5,
- RTW89_ACPI_DSM_FUNC_59G_EN = 6,
+ RTW89_ACPI_DSM_FUNC_UNII4_SUP = 6,
+ RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP = 7,
+};
+
+enum rtw89_acpi_conf_unii4 {
+ RTW89_ACPI_CONF_UNII4_FCC = BIT(0),
+ RTW89_ACPI_CONF_UNII4_IC = BIT(1),
};
enum rtw89_acpi_policy_mode {
@@ -36,11 +42,24 @@ struct rtw89_acpi_policy_6ghz {
struct rtw89_acpi_country_code country_list[] __counted_by(country_count);
} __packed;
+enum rtw89_acpi_conf_6ghz_sp {
+ RTW89_ACPI_CONF_6GHZ_SP_US = BIT(0),
+};
+
+struct rtw89_acpi_policy_6ghz_sp {
+ u8 signature[4];
+ u8 revision;
+ u8 override;
+ u8 conf;
+ u8 rsvd;
+} __packed;
+
struct rtw89_acpi_dsm_result {
union {
u8 value;
/* caller needs to free it after using */
struct rtw89_acpi_policy_6ghz *policy_6ghz;
+ struct rtw89_acpi_policy_6ghz_sp *policy_6ghz_sp;
} u;
};
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
index 11fbdd142162..4557c6e035a9 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.c
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -150,8 +150,6 @@ static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam,
case RTW89_ADDR_CAM_SEC_NONE:
return -EINVAL;
case RTW89_ADDR_CAM_SEC_ALL_UNI:
- if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EINVAL;
idx = find_first_zero_bit(addr_cam->sec_cam_map,
RTW89_SEC_CAM_IN_ADDR_CAM);
if (idx >= RTW89_SEC_CAM_IN_ADDR_CAM)
@@ -213,6 +211,46 @@ static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam,
return 0;
}
+static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ const struct rtw89_sec_cam_entry *sec_cam,
+ bool inform_fw)
+{
+ struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
+ struct rtw89_vif *rtwvif;
+ struct rtw89_addr_cam_entry *addr_cam;
+ unsigned int i;
+ int ret = 0;
+
+ if (!vif) {
+ rtw89_err(rtwdev, "No iface for deleting sec cam\n");
+ return -EINVAL;
+ }
+
+ rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+
+ for_each_set_bit(i, addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM) {
+ if (addr_cam->sec_ent[i] != sec_cam->sec_cam_idx)
+ continue;
+
+ clear_bit(i, addr_cam->sec_cam_map);
+ }
+
+ if (inform_fw) {
+ ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
+ if (ret)
+ rtw89_err(rtwdev,
+ "failed to update dctl cam del key: %d\n", ret);
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
+ if (ret)
+ rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret);
+ }
+
+ return ret;
+}
+
static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -232,6 +270,11 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
rtwvif = (struct rtw89_vif *)vif->drv_priv;
addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104)
+ addr_cam->sec_ent_mode = RTW89_ADDR_CAM_SEC_ALL_UNI;
+
ret = rtw89_cam_get_addr_cam_key_idx(addr_cam, sec_cam, key, &key_idx);
if (ret) {
rtw89_err(rtwdev, "failed to get addr cam key idx %d, %d\n",
@@ -239,10 +282,8 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
return ret;
}
- key->hw_key_idx = key_idx;
addr_cam->sec_ent_keyid[key_idx] = key->keyidx;
addr_cam->sec_ent[key_idx] = sec_cam->sec_cam_idx;
- addr_cam->sec_entries[key_idx] = sec_cam;
set_bit(key_idx, addr_cam->sec_cam_map);
ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
if (ret) {
@@ -255,7 +296,6 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
rtw89_err(rtwdev, "failed to update addr cam sec entry: %d\n",
ret);
clear_bit(key_idx, addr_cam->sec_cam_map);
- addr_cam->sec_entries[key_idx] = NULL;
return ret;
}
@@ -292,6 +332,9 @@ static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev,
goto err_release_cam;
}
+ key->hw_key_idx = sec_cam_idx;
+ cam_info->sec_entries[sec_cam_idx] = sec_cam;
+
sec_cam->sec_cam_idx = sec_cam_idx;
sec_cam->type = hw_key_type;
sec_cam->len = RTW89_SEC_CAM_LEN;
@@ -313,6 +356,7 @@ static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev,
return 0;
err_release_cam:
+ cam_info->sec_entries[sec_cam_idx] = NULL;
kfree(sec_cam);
clear_bit(sec_cam_idx, cam_info->sec_cam_map);
if (ext_key)
@@ -356,6 +400,9 @@ int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ext_key = true;
break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ hw_key_type = RTW89_SEC_KEY_TYPE_BIP_CCMP128;
+ break;
default:
return -EOPNOTSUPP;
}
@@ -380,42 +427,22 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
struct ieee80211_key_conf *key,
bool inform_fw)
{
- struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
- struct rtw89_vif *rtwvif;
- struct rtw89_addr_cam_entry *addr_cam;
- struct rtw89_sec_cam_entry *sec_cam;
- u8 key_idx = key->hw_key_idx;
+ const struct rtw89_sec_cam_entry *sec_cam;
u8 sec_cam_idx;
- int ret = 0;
-
- if (!vif) {
- rtw89_err(rtwdev, "No iface for deleting sec cam\n");
- return -EINVAL;
- }
+ int ret;
- rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
- sec_cam = addr_cam->sec_entries[key_idx];
+ sec_cam_idx = key->hw_key_idx;
+ sec_cam = cam_info->sec_entries[sec_cam_idx];
if (!sec_cam)
return -EINVAL;
- /* detach sec cam from addr cam */
- clear_bit(key_idx, addr_cam->sec_cam_map);
- addr_cam->sec_entries[key_idx] = NULL;
- if (inform_fw) {
- ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta);
- if (ret)
- rtw89_err(rtwdev, "failed to update dctl cam del key: %d\n", ret);
- ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL);
- if (ret)
- rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret);
- }
+ ret = rtw89_cam_detach_sec_cam(rtwdev, vif, sta, sec_cam, inform_fw);
/* clear valid bit in addr cam will disable sec cam,
* so we don't need to send H2C command again
*/
- sec_cam_idx = sec_cam->sec_cam_idx;
+ cam_info->sec_entries[sec_cam_idx] = NULL;
clear_bit(sec_cam_idx, cam_info->sec_cam_map);
if (sec_cam->ext_key)
clear_bit(sec_cam_idx + 1, cam_info->sec_cam_map);
@@ -496,6 +523,7 @@ static u8 rtw89_get_addr_cam_entry_size(struct rtw89_dev *rtwdev)
case RTL8852A:
case RTL8852B:
case RTL8851B:
+ case RTL8852BT:
return ADDR_CAM_ENT_SIZE;
default:
return ADDR_CAM_ENT_SHORT_SIZE;
@@ -753,29 +781,80 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta,
- u8 *cmd)
+ struct rtw89_h2c_dctlinfo_ud_v1 *h2c)
{
struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ u8 *ptk_tx_iv = rtw_wow->key_info.ptk_tx_iv;
+
+ h2c->c0 = le32_encode_bits(rtwsta ? rtwsta->mac_id : rtwvif->mac_id,
+ DCTLINFO_V1_C0_MACID) |
+ le32_encode_bits(1, DCTLINFO_V1_C0_OP);
- SET_DCTL_MACID_V1(cmd, rtwsta ? rtwsta->mac_id : rtwvif->mac_id);
- SET_DCTL_OPERATION_V1(cmd, 1);
-
- SET_DCTL_SEC_ENT0_KEYID_V1(cmd, addr_cam->sec_ent_keyid[0]);
- SET_DCTL_SEC_ENT1_KEYID_V1(cmd, addr_cam->sec_ent_keyid[1]);
- SET_DCTL_SEC_ENT2_KEYID_V1(cmd, addr_cam->sec_ent_keyid[2]);
- SET_DCTL_SEC_ENT3_KEYID_V1(cmd, addr_cam->sec_ent_keyid[3]);
- SET_DCTL_SEC_ENT4_KEYID_V1(cmd, addr_cam->sec_ent_keyid[4]);
- SET_DCTL_SEC_ENT5_KEYID_V1(cmd, addr_cam->sec_ent_keyid[5]);
- SET_DCTL_SEC_ENT6_KEYID_V1(cmd, addr_cam->sec_ent_keyid[6]);
-
- SET_DCTL_SEC_ENT_VALID_V1(cmd, addr_cam->sec_cam_map[0] & 0xff);
- SET_DCTL_SEC_ENT0_V1(cmd, addr_cam->sec_ent[0]);
- SET_DCTL_SEC_ENT1_V1(cmd, addr_cam->sec_ent[1]);
- SET_DCTL_SEC_ENT2_V1(cmd, addr_cam->sec_ent[2]);
- SET_DCTL_SEC_ENT3_V1(cmd, addr_cam->sec_ent[3]);
- SET_DCTL_SEC_ENT4_V1(cmd, addr_cam->sec_ent[4]);
- SET_DCTL_SEC_ENT5_V1(cmd, addr_cam->sec_ent[5]);
- SET_DCTL_SEC_ENT6_V1(cmd, addr_cam->sec_ent[6]);
+ h2c->w4 = le32_encode_bits(addr_cam->sec_ent_keyid[0],
+ DCTLINFO_V1_W4_SEC_ENT0_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[1],
+ DCTLINFO_V1_W4_SEC_ENT1_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[2],
+ DCTLINFO_V1_W4_SEC_ENT2_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[3],
+ DCTLINFO_V1_W4_SEC_ENT3_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[4],
+ DCTLINFO_V1_W4_SEC_ENT4_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[5],
+ DCTLINFO_V1_W4_SEC_ENT5_KEYID) |
+ le32_encode_bits(addr_cam->sec_ent_keyid[6],
+ DCTLINFO_V1_W4_SEC_ENT6_KEYID);
+ h2c->m4 = cpu_to_le32(DCTLINFO_V1_W4_SEC_ENT0_KEYID |
+ DCTLINFO_V1_W4_SEC_ENT1_KEYID |
+ DCTLINFO_V1_W4_SEC_ENT2_KEYID |
+ DCTLINFO_V1_W4_SEC_ENT3_KEYID |
+ DCTLINFO_V1_W4_SEC_ENT4_KEYID |
+ DCTLINFO_V1_W4_SEC_ENT5_KEYID |
+ DCTLINFO_V1_W4_SEC_ENT6_KEYID);
+
+ h2c->w5 = le32_encode_bits(addr_cam->sec_cam_map[0] & 0xff,
+ DCTLINFO_V1_W5_SEC_ENT_VALID) |
+ le32_encode_bits(addr_cam->sec_ent[0],
+ DCTLINFO_V1_W5_SEC_ENT0) |
+ le32_encode_bits(addr_cam->sec_ent[1],
+ DCTLINFO_V1_W5_SEC_ENT1) |
+ le32_encode_bits(addr_cam->sec_ent[2],
+ DCTLINFO_V1_W5_SEC_ENT2);
+ h2c->m5 = cpu_to_le32(DCTLINFO_V1_W5_SEC_ENT_VALID |
+ DCTLINFO_V1_W5_SEC_ENT0 |
+ DCTLINFO_V1_W5_SEC_ENT1 |
+ DCTLINFO_V1_W5_SEC_ENT2);
+
+ h2c->w6 = le32_encode_bits(addr_cam->sec_ent[3],
+ DCTLINFO_V1_W6_SEC_ENT3) |
+ le32_encode_bits(addr_cam->sec_ent[4],
+ DCTLINFO_V1_W6_SEC_ENT4) |
+ le32_encode_bits(addr_cam->sec_ent[5],
+ DCTLINFO_V1_W6_SEC_ENT5) |
+ le32_encode_bits(addr_cam->sec_ent[6],
+ DCTLINFO_V1_W6_SEC_ENT6);
+ h2c->m6 = cpu_to_le32(DCTLINFO_V1_W6_SEC_ENT3 |
+ DCTLINFO_V1_W6_SEC_ENT4 |
+ DCTLINFO_V1_W6_SEC_ENT5 |
+ DCTLINFO_V1_W6_SEC_ENT6);
+
+ if (rtw_wow->ptk_alg) {
+ h2c->w0 = le32_encode_bits(ptk_tx_iv[0] | ptk_tx_iv[1] << 8,
+ DCTLINFO_V1_W0_AES_IV_L);
+ h2c->m0 = cpu_to_le32(DCTLINFO_V1_W0_AES_IV_L);
+
+ h2c->w1 = le32_encode_bits(ptk_tx_iv[4] |
+ ptk_tx_iv[5] << 8 |
+ ptk_tx_iv[6] << 16 |
+ ptk_tx_iv[7] << 24,
+ DCTLINFO_V1_W1_AES_IV_H);
+ h2c->m1 = cpu_to_le32(DCTLINFO_V1_W1_AES_IV_H);
+
+ h2c->w4 |= le32_encode_bits(rtw_wow->ptk_keyidx,
+ DCTLINFO_V1_W4_SEC_KEY_ID);
+ h2c->m4 |= cpu_to_le32(DCTLINFO_V1_W4_SEC_KEY_ID);
+ }
}
void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
@@ -784,6 +863,8 @@ void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
struct rtw89_h2c_dctlinfo_ud_v2 *h2c)
{
struct rtw89_addr_cam_entry *addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ u8 *ptk_tx_iv = rtw_wow->key_info.ptk_tx_iv;
h2c->c0 = le32_encode_bits(rtwsta ? rtwsta->mac_id : rtwvif->mac_id,
DCTLINFO_V2_C0_MACID) |
@@ -837,4 +918,21 @@ void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
DCTLINFO_V2_W7_SEC_ENT6_V1);
h2c->m7 = cpu_to_le32(DCTLINFO_V2_W7_SEC_ENT5_V1 |
DCTLINFO_V2_W7_SEC_ENT6_V1);
+
+ if (rtw_wow->ptk_alg) {
+ h2c->w0 = le32_encode_bits(ptk_tx_iv[0] | ptk_tx_iv[1] << 8,
+ DCTLINFO_V2_W0_AES_IV_L);
+ h2c->m0 = cpu_to_le32(DCTLINFO_V2_W0_AES_IV_L);
+
+ h2c->w1 = le32_encode_bits(ptk_tx_iv[4] |
+ ptk_tx_iv[5] << 8 |
+ ptk_tx_iv[6] << 16 |
+ ptk_tx_iv[7] << 24,
+ DCTLINFO_V2_W1_AES_IV_H);
+ h2c->m1 = cpu_to_le32(DCTLINFO_V2_W1_AES_IV_H);
+
+ h2c->w4 |= le32_encode_bits(rtw_wow->ptk_keyidx,
+ DCTLINFO_V2_W4_SEC_KEY_ID);
+ h2c->m4 |= cpu_to_le32(DCTLINFO_V2_W4_SEC_KEY_ID);
+ }
}
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
index fa09d11c345c..5d7b624c2dd4 100644
--- a/drivers/net/wireless/realtek/rtw89/cam.h
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -352,6 +352,75 @@ static inline void FWCMD_SET_ADDR_BSSID_BSSID5(void *cmd, u32 value)
le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24));
}
+struct rtw89_h2c_dctlinfo_ud_v1 {
+ __le32 c0;
+ __le32 w0;
+ __le32 w1;
+ __le32 w2;
+ __le32 w3;
+ __le32 w4;
+ __le32 w5;
+ __le32 w6;
+ __le32 w7;
+ __le32 m0;
+ __le32 m1;
+ __le32 m2;
+ __le32 m3;
+ __le32 m4;
+ __le32 m5;
+ __le32 m6;
+ __le32 m7;
+} __packed;
+
+#define DCTLINFO_V1_C0_MACID GENMASK(6, 0)
+#define DCTLINFO_V1_C0_OP BIT(7)
+
+#define DCTLINFO_V1_W0_QOS_FIELD_H GENMASK(7, 0)
+#define DCTLINFO_V1_W0_HW_EXSEQ_MACID GENMASK(14, 8)
+#define DCTLINFO_V1_W0_QOS_DATA BIT(15)
+#define DCTLINFO_V1_W0_AES_IV_L GENMASK(31, 16)
+#define DCTLINFO_V1_W0_ALL GENMASK(31, 0)
+#define DCTLINFO_V1_W1_AES_IV_H GENMASK(31, 0)
+#define DCTLINFO_V1_W1_ALL GENMASK(31, 0)
+#define DCTLINFO_V1_W2_SEQ0 GENMASK(11, 0)
+#define DCTLINFO_V1_W2_SEQ1 GENMASK(23, 12)
+#define DCTLINFO_V1_W2_AMSDU_MAX_LEN GENMASK(26, 24)
+#define DCTLINFO_V1_W2_STA_AMSDU_EN BIT(27)
+#define DCTLINFO_V1_W2_CHKSUM_OFLD_EN BIT(28)
+#define DCTLINFO_V1_W2_WITH_LLC BIT(29)
+#define DCTLINFO_V1_W2_ALL GENMASK(29, 0)
+#define DCTLINFO_V1_W3_SEQ2 GENMASK(11, 0)
+#define DCTLINFO_V1_W3_SEQ3 GENMASK(23, 12)
+#define DCTLINFO_V1_W3_TGT_IND GENMASK(27, 24)
+#define DCTLINFO_V1_W3_TGT_IND_EN BIT(28)
+#define DCTLINFO_V1_W3_HTC_LB GENMASK(31, 29)
+#define DCTLINFO_V1_W3_ALL GENMASK(31, 0)
+#define DCTLINFO_V1_W4_MHDR_LEN GENMASK(4, 0)
+#define DCTLINFO_V1_W4_VLAN_TAG_VALID BIT(5)
+#define DCTLINFO_V1_W4_VLAN_TAG_SEL GENMASK(7, 6)
+#define DCTLINFO_V1_W4_HTC_ORDER BIT(8)
+#define DCTLINFO_V1_W4_SEC_KEY_ID GENMASK(10, 9)
+#define DCTLINFO_V1_W4_WAPI BIT(15)
+#define DCTLINFO_V1_W4_SEC_ENT_MODE GENMASK(17, 16)
+#define DCTLINFO_V1_W4_SEC_ENT0_KEYID GENMASK(19, 18)
+#define DCTLINFO_V1_W4_SEC_ENT1_KEYID GENMASK(21, 20)
+#define DCTLINFO_V1_W4_SEC_ENT2_KEYID GENMASK(23, 22)
+#define DCTLINFO_V1_W4_SEC_ENT3_KEYID GENMASK(25, 24)
+#define DCTLINFO_V1_W4_SEC_ENT4_KEYID GENMASK(27, 26)
+#define DCTLINFO_V1_W4_SEC_ENT5_KEYID GENMASK(29, 28)
+#define DCTLINFO_V1_W4_SEC_ENT6_KEYID GENMASK(31, 30)
+#define DCTLINFO_V1_W4_ALL (GENMASK(31, 15) | GENMASK(10, 0))
+#define DCTLINFO_V1_W5_SEC_ENT_VALID GENMASK(7, 0)
+#define DCTLINFO_V1_W5_SEC_ENT0 GENMASK(15, 8)
+#define DCTLINFO_V1_W5_SEC_ENT1 GENMASK(23, 16)
+#define DCTLINFO_V1_W5_SEC_ENT2 GENMASK(31, 24)
+#define DCTLINFO_V1_W5_ALL GENMASK(31, 0)
+#define DCTLINFO_V1_W6_SEC_ENT3 GENMASK(7, 0)
+#define DCTLINFO_V1_W6_SEC_ENT4 GENMASK(15, 8)
+#define DCTLINFO_V1_W6_SEC_ENT5 GENMASK(23, 16)
+#define DCTLINFO_V1_W6_SEC_ENT6 GENMASK(31, 24)
+#define DCTLINFO_V1_W6_ALL GENMASK(31, 0)
+
struct rtw89_h2c_dctlinfo_ud_v2 {
__le32 c0;
__le32 w0;
@@ -477,7 +546,7 @@ void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
void rtw89_cam_fill_dctl_sec_cam_info_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta,
- u8 *cmd);
+ struct rtw89_h2c_dctlinfo_ud_v1 *h2c);
void rtw89_cam_fill_dctl_sec_cam_info_v2(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta,
diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c
index 051a3cad6101..7f90d93dcdc0 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.c
+++ b/drivers/net/wireless/realtek/rtw89/chan.c
@@ -141,6 +141,28 @@ bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
return band_changed;
}
+int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
+ int (*iterator)(const struct rtw89_chan *chan,
+ void *data),
+ void *data)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chan *chan;
+ int ret;
+ u8 idx;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) {
+ chan = rtw89_chan_get(rtwdev, idx);
+ ret = iterator(chan, data);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
enum rtw89_sub_entity_idx idx,
const struct cfg80211_chan_def *chandef,
@@ -2322,7 +2344,6 @@ static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev,
enum rtw89_sub_entity_idx idx2)
{
struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_sub_entity tmp;
struct rtw89_vif *rtwvif;
u8 cur;
@@ -2332,9 +2353,7 @@ static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev,
hal->sub[idx1].cfg->idx = idx2;
hal->sub[idx2].cfg->idx = idx1;
- tmp = hal->sub[idx1];
- hal->sub[idx1] = hal->sub[idx2];
- hal->sub[idx2] = tmp;
+ swap(hal->sub[idx1], hal->sub[idx2]);
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
if (!rtwvif->chanctx_assigned)
diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h
index ffa412f281f3..5278ff8c513b 100644
--- a/drivers/net/wireless/realtek/rtw89/chan.h
+++ b/drivers/net/wireless/realtek/rtw89/chan.h
@@ -78,6 +78,10 @@ void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan,
bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev,
enum rtw89_sub_entity_idx idx,
const struct rtw89_chan *new);
+int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev,
+ int (*iterator)(const struct rtw89_chan *chan,
+ void *data),
+ void *data);
void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev,
enum rtw89_sub_entity_idx idx,
const struct cfg80211_chan_def *chandef);
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
index d9b66d43f32e..24929ef534e0 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.c
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -12,6 +12,7 @@
#define RTW89_COEX_VERSION 0x07000113
#define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/
+#define BTC_E2G_LIMIT_DEF 80
enum btc_fbtc_tdma_template {
CXTD_OFF = 0x0,
@@ -54,7 +55,6 @@ enum btc_mlme_state {
MLME_LINKED,
};
-#define FCXONESLOT_VER 1
struct btc_fbtc_1slot {
u8 fver;
u8 sid; /* slot id */
@@ -91,7 +91,7 @@ static const struct rtw89_btc_fbtc_slot s_def[] = {
[CXST_BLK] = __DEF_FBTC_SLOT(500, 0x55555555, SLOT_MIX),
[CXST_E2G] = __DEF_FBTC_SLOT(0, 0xea5a5a5a, SLOT_MIX),
[CXST_E5G] = __DEF_FBTC_SLOT(0, 0xffffffff, SLOT_ISO),
- [CXST_EBT] = __DEF_FBTC_SLOT(0, 0xe5555555, SLOT_MIX),
+ [CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX),
[CXST_ENULL] = __DEF_FBTC_SLOT(0, 0xaaaaaaaa, SLOT_ISO),
[CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX),
[CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO),
@@ -133,71 +133,71 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxbtcrpt = 8, .fcxtdma = 7, .fcxslots = 7, .fcxcysta = 7,
.fcxstep = 7, .fcxnullsta = 7, .fcxmreg = 7, .fcxgpiodbg = 7,
.fcxbtver = 7, .fcxbtscan = 7, .fcxbtafh = 7, .fcxbtdevinfo = 7,
- .fwlrole = 2, .frptmap = 7, .fcxctrl = 7, .fcxinit = 7,
- .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6,
+ .fwlrole = 8, .frptmap = 3, .fcxctrl = 7, .fcxinit = 7,
+ .fwevntrptl = 1, .drvinfo_type = 1, .info_buf = 1800, .max_role_num = 6,
},
{RTL8851B, RTW89_FW_VER_CODE(0, 29, 29, 0),
.fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 57, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 42, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852C, RTW89_FW_VER_CODE(0, 27, 0, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 2, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 29, 29, 0),
.fcxbtcrpt = 105, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 5,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 2, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 2, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 2, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 29, 14, 0),
.fcxbtcrpt = 5, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 4,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1800, .max_role_num = 6,
},
{RTL8852B, RTW89_FW_VER_CODE(0, 27, 0, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 1, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852A, RTW89_FW_VER_CODE(0, 13, 37, 0),
.fcxbtcrpt = 4, .fcxtdma = 3, .fcxslots = 1, .fcxcysta = 3,
.fcxstep = 3, .fcxnullsta = 2, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 2, .fcxbtdevinfo = 1,
.fwlrole = 1, .frptmap = 3, .fcxctrl = 1, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1280, .max_role_num = 5,
},
{RTL8852A, RTW89_FW_VER_CODE(0, 13, 0, 0),
.fcxbtcrpt = 1, .fcxtdma = 1, .fcxslots = 1, .fcxcysta = 2,
.fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
.fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
},
/* keep it to be the last as default entry */
@@ -206,18 +206,52 @@ static const struct rtw89_btc_ver rtw89_btc_ver_defs[] = {
.fcxstep = 2, .fcxnullsta = 1, .fcxmreg = 1, .fcxgpiodbg = 1,
.fcxbtver = 1, .fcxbtscan = 1, .fcxbtafh = 1, .fcxbtdevinfo = 1,
.fwlrole = 0, .frptmap = 0, .fcxctrl = 0, .fcxinit = 0,
- .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
+ .fwevntrptl = 0, .drvinfo_type = 0, .info_buf = 1024, .max_role_num = 5,
},
};
#define RTW89_DEFAULT_BTC_VER_IDX (ARRAY_SIZE(rtw89_btc_ver_defs) - 1)
+static const union rtw89_btc_wl_state_map btc_scanning_map = {
+ .map = {
+ .scan = 1,
+ .connecting = 1,
+ .roaming = 1,
+ .transacting = 1,
+ ._4way = 1,
+ },
+};
+
+static u32 chip_id_to_bt_rom_code_id(u32 id)
+{
+ switch (id) {
+ case RTL8852A:
+ case RTL8852B:
+ case RTL8852C:
+ case RTL8852BT:
+ return 0x8852;
+ case RTL8851B:
+ return 0x8851;
+ case RTL8922A:
+ return 0x8922;
+ default:
+ return 0;
+ }
+}
+
struct rtw89_btc_btf_tlv {
u8 type;
u8 len;
u8 val[];
} __packed;
+struct rtw89_btc_btf_tlv_v7 {
+ u8 type;
+ u8 ver;
+ u8 len;
+ u8 val[];
+} __packed;
+
enum btc_btf_set_report_en {
RPT_EN_TDMA,
RPT_EN_CYCLE,
@@ -235,13 +269,24 @@ enum btc_btf_set_report_en {
RPT_EN_MONITER,
};
-#define BTF_SET_REPORT_VER 1
-struct rtw89_btc_btf_set_report {
+struct rtw89_btc_btf_set_report_v1 {
u8 fver;
__le32 enable;
__le32 para;
} __packed;
+struct rtw89_btc_btf_set_report_v8 {
+ u8 type;
+ u8 fver;
+ u8 len;
+ __le32 map;
+} __packed;
+
+union rtw89_fbtc_rtp_ctrl {
+ struct rtw89_btc_btf_set_report_v1 v1;
+ struct rtw89_btc_btf_set_report_v8 v8;
+};
+
#define BTF_SET_SLOT_TABLE_VER 1
struct rtw89_btc_btf_set_slot_table {
u8 fver;
@@ -249,12 +294,31 @@ struct rtw89_btc_btf_set_slot_table {
struct rtw89_btc_fbtc_slot tbls[] __counted_by(tbl_num);
} __packed;
-struct rtw89_btc_btf_set_mon_reg {
+struct rtw89_btc_btf_set_slot_table_v7 {
+ u8 type;
+ u8 ver;
+ u8 len;
+ struct rtw89_btc_fbtc_slot_v7 v7[CXST_MAX];
+} __packed;
+
+struct rtw89_btc_btf_set_mon_reg_v1 {
u8 fver;
u8 reg_num;
struct rtw89_btc_fbtc_mreg regs[] __counted_by(reg_num);
} __packed;
+struct rtw89_btc_btf_set_mon_reg_v7 {
+ u8 type;
+ u8 fver;
+ u8 len;
+ struct rtw89_btc_fbtc_mreg regs[] __counted_by(len);
+} __packed;
+
+union rtw89_fbtc_set_mon_reg {
+ struct rtw89_btc_btf_set_mon_reg_v1 v1;
+ struct rtw89_btc_btf_set_mon_reg_v7 v7;
+} __packed;
+
struct _wl_rinfo_now {
u8 link_mode;
u32 dbcc_2g_phy: 2;
@@ -310,6 +374,7 @@ enum btc_ant_phase {
BTC_ANT_W25G,
BTC_ANT_FREERUN,
BTC_ANT_WRFK,
+ BTC_ANT_WRFK2,
BTC_ANT_BRFK,
BTC_ANT_MAX
};
@@ -614,6 +679,13 @@ enum btc_ctr_path {
BTC_CTRL_BY_WL
};
+enum btc_wlact_state {
+ BTC_WLACT_HW = 0,
+ BTC_WLACT_SW_LO,
+ BTC_WLACT_SW_HI,
+ BTC_WLACT_MAX,
+};
+
enum btc_wl_max_tx_time {
BTC_MAX_TX_TIME_L1 = 500,
BTC_MAX_TX_TIME_L2 = 1000,
@@ -739,7 +811,7 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
- struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
+ struct rtw89_btc_wl_link_info *wl_linfo;
u8 i;
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s\n", __func__);
@@ -761,16 +833,31 @@ static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
if (type & BTC_RESET_DM) {
memset(&btc->dm, 0, sizeof(btc->dm));
memset(bt_linfo->rssi_state, 0, sizeof(bt_linfo->rssi_state));
-
- for (i = 0; i < RTW89_PORT_NUM; i++)
- memset(wl_linfo[i].rssi_state, 0,
- sizeof(wl_linfo[i].rssi_state));
+ for (i = 0; i < RTW89_PORT_NUM; i++) {
+ if (btc->ver->fwlrole == 8)
+ wl_linfo = &wl->rlink_info[i][0];
+ else
+ wl_linfo = &wl->link_info[i];
+ memset(wl_linfo->rssi_state, 0, sizeof(wl_linfo->rssi_state));
+ }
/* set the slot_now table to original */
btc->dm.tdma_now = t_def[CXTD_OFF];
btc->dm.tdma = t_def[CXTD_OFF];
- memcpy(&btc->dm.slot_now, s_def, sizeof(btc->dm.slot_now));
- memcpy(&btc->dm.slot, s_def, sizeof(btc->dm.slot));
+ if (ver->fcxslots >= 7) {
+ for (i = 0; i < ARRAY_SIZE(s_def); i++) {
+ btc->dm.slot.v7[i].dur = s_def[i].dur;
+ btc->dm.slot.v7[i].cxtype = s_def[i].cxtype;
+ btc->dm.slot.v7[i].cxtbl = s_def[i].cxtbl;
+ }
+ memcpy(&btc->dm.slot_now.v7, &btc->dm.slot.v7,
+ sizeof(btc->dm.slot_now.v7));
+ } else {
+ memcpy(&btc->dm.slot_now.v1, s_def,
+ sizeof(btc->dm.slot_now.v1));
+ memcpy(&btc->dm.slot.v1, s_def,
+ sizeof(btc->dm.slot.v1));
+ }
btc->policy_len = 0;
btc->bt_req_len = 0;
@@ -901,14 +988,25 @@ static void _chk_btc_err(struct rtw89_dev *rtwdev, u8 type, u32 cnt)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_cx *cx = &btc->cx;
- struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_bt_info *bt = &cx->bt;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_dm *dm = &btc->dm;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): type:%d cnt:%d\n",
__func__, type, cnt);
switch (type) {
+ case BTC_DCNT_WL_FW_VER_MATCH:
+ if ((wl->ver_info.fw_coex & 0xffff0000) !=
+ rtwdev->chip->wlcx_desired) {
+ wl->fw_ver_mismatch = true;
+ dm->error.map.wl_ver_mismatch = true;
+ } else {
+ wl->fw_ver_mismatch = false;
+ dm->error.map.wl_ver_mismatch = false;
+ }
+ break;
case BTC_DCNT_RPT_HANG:
if (dm->cnt_dm[BTC_DCNT_RPT] == cnt && btc->fwinfo.rpt_en_map)
dm->cnt_dm[BTC_DCNT_RPT_HANG]++;
@@ -1001,6 +1099,19 @@ static void _chk_btc_err(struct rtw89_dev *rtwdev, u8 type, u32 cnt)
else
dm->error.map.slot_no_sync = false;
break;
+ case BTC_DCNT_BTTX_HANG:
+ cnt = cx->cnt_bt[BTC_BCNT_LOPRI_TX];
+
+ if (cnt == 0 && bt->link_info.slave_role)
+ dm->cnt_dm[BTC_DCNT_BTTX_HANG]++;
+ else
+ dm->cnt_dm[BTC_DCNT_BTTX_HANG] = 0;
+
+ if (dm->cnt_dm[BTC_DCNT_BTTX_HANG] >= BTC_CHK_HANG_MAX)
+ dm->error.map.bt_tx_hang = true;
+ else
+ dm->error.map.bt_tx_hang = false;
+ break;
case BTC_DCNT_BTCNT_HANG:
cnt = cx->cnt_bt[BTC_BCNT_HIPRI_RX] +
cx->cnt_bt[BTC_BCNT_HIPRI_TX] +
@@ -1050,27 +1161,36 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_a2dp_desc *a2dp = &bt_linfo->a2dp_desc;
- struct rtw89_btc_fbtc_btver *pver = NULL;
- struct rtw89_btc_fbtc_btscan_v1 *pscan_v1;
- struct rtw89_btc_fbtc_btscan_v2 *pscan_v2;
- struct rtw89_btc_fbtc_btafh *pafh_v1 = NULL;
+ union rtw89_btc_fbtc_btver *pver = &btc->fwinfo.rpt_fbtc_btver.finfo;
struct rtw89_btc_fbtc_btafh_v2 *pafh_v2 = NULL;
+ struct rtw89_btc_fbtc_btafh_v7 *pafh_v7 = NULL;
struct rtw89_btc_fbtc_btdevinfo *pdev = NULL;
+ struct rtw89_btc_fbtc_btafh *pafh_v1 = NULL;
+ struct rtw89_btc_fbtc_btscan_v1 *pscan_v1;
+ struct rtw89_btc_fbtc_btscan_v2 *pscan_v2;
+ struct rtw89_btc_fbtc_btscan_v7 *pscan_v7;
bool scan_update = true;
int i;
- pver = (struct rtw89_btc_fbtc_btver *)pfinfo;
- pdev = (struct rtw89_btc_fbtc_btdevinfo *)pfinfo;
-
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): rpt_type:%d\n",
__func__, rpt_type);
switch (rpt_type) {
case BTC_RPT_TYPE_BT_VER:
- bt->ver_info.fw = le32_to_cpu(pver->fw_ver);
- bt->ver_info.fw_coex = le32_get_bits(pver->coex_ver, GENMASK(7, 0));
- bt->feature = le32_to_cpu(pver->feature);
+ if (ver->fcxbtver == 7) {
+ pver->v7 = *(struct rtw89_btc_fbtc_btver_v7 *)pfinfo;
+ bt->ver_info.fw = le32_to_cpu(pver->v7.fw_ver);
+ bt->ver_info.fw_coex = le32_get_bits(pver->v7.coex_ver,
+ GENMASK(7, 0));
+ bt->feature = le32_to_cpu(pver->v7.feature);
+ } else {
+ pver->v1 = *(struct rtw89_btc_fbtc_btver_v1 *)pfinfo;
+ bt->ver_info.fw = le32_to_cpu(pver->v1.fw_ver);
+ bt->ver_info.fw_coex = le32_get_bits(pver->v1.coex_ver,
+ GENMASK(7, 0));
+ bt->feature = le32_to_cpu(pver->v1.feature);
+ }
break;
case BTC_RPT_TYPE_BT_SCAN:
if (ver->fcxbtscan == 1) {
@@ -1090,6 +1210,15 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
pscan_v2->para[i].intvl == 0)
scan_update = false;
}
+ } else if (ver->fcxbtscan == 7) {
+ pscan_v7 = (struct rtw89_btc_fbtc_btscan_v7 *)pfinfo;
+ for (i = 0; i < CXSCAN_MAX; i++) {
+ bt->scan_info_v2[i] = pscan_v7->para[i];
+ if ((pscan_v7->type & BIT(i)) &&
+ pscan_v7->para[i].win == 0 &&
+ pscan_v7->para[i].intvl == 0)
+ scan_update = false;
+ }
}
if (scan_update)
bt->scan_info_update = 1;
@@ -1106,6 +1235,17 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
memcpy(&bt_linfo->afh_map_le[0], pafh_v2->afh_le_a, 4);
memcpy(&bt_linfo->afh_map_le[4], pafh_v2->afh_le_b, 1);
}
+ } else if (ver->fcxbtafh == 7) {
+ pafh_v7 = (struct rtw89_btc_fbtc_btafh_v7 *)pfinfo;
+ if (pafh_v7->map_type & RPT_BT_AFH_SEQ_LEGACY) {
+ memcpy(&bt_linfo->afh_map[0], pafh_v7->afh_l, 4);
+ memcpy(&bt_linfo->afh_map[4], pafh_v7->afh_m, 4);
+ memcpy(&bt_linfo->afh_map[8], pafh_v7->afh_h, 2);
+ }
+ if (pafh_v7->map_type & RPT_BT_AFH_SEQ_LE) {
+ memcpy(&bt_linfo->afh_map_le[0], pafh_v7->afh_le_a, 4);
+ memcpy(&bt_linfo->afh_map_le[4], pafh_v7->afh_le_b, 1);
+ }
} else if (ver->fcxbtafh == 1) {
pafh_v1 = (struct rtw89_btc_fbtc_btafh *)pfinfo;
memcpy(&bt_linfo->afh_map[0], pafh_v1->afh_l, 4);
@@ -1114,6 +1254,7 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
}
break;
case BTC_RPT_TYPE_BT_DEVICE:
+ pdev = (struct rtw89_btc_fbtc_btdevinfo *)pfinfo;
a2dp->device_name = le32_to_cpu(pdev->dev_name);
a2dp->vendor_id = le16_to_cpu(pdev->vendor_id);
a2dp->flush_time = le32_to_cpu(pdev->flush_time);
@@ -1123,6 +1264,22 @@ static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
}
}
+static void rtw89_btc_fw_rpt_evnt_ver(struct rtw89_dev *rtwdev, u8 *index)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
+
+ if (ver->fwevntrptl == 1)
+ return;
+
+ if (*index <= __BTC_RPT_TYPE_V0_SAME)
+ return;
+ else if (*index <= __BTC_RPT_TYPE_V0_MAX)
+ (*index)++;
+ else
+ *index = BTC_RPT_TYPE_MAX;
+}
+
#define BTC_LEAK_AP_TH 10
#define BTC_CYSTA_CHK_PERIOD 100
@@ -1147,10 +1304,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
struct rtw89_btc_prpt *btc_prpt = NULL;
void *rpt_content = NULL, *pfinfo = NULL;
u8 rpt_type = 0;
- u16 wl_slot_set = 0, wl_slot_real = 0;
+ u16 wl_slot_set = 0, wl_slot_real = 0, val16;
u32 trace_step = 0, rpt_len = 0, diff_t = 0;
u32 cnt_leak_slot, bt_slot_real, bt_slot_set, cnt_rx_imr;
- u8 i, val = 0;
+ u8 i, val = 0, val1, val2;
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): index:%d\n",
@@ -1170,6 +1327,8 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
"[BTC], %s(): rpt_type:%d\n",
__func__, rpt_type);
+ rtw89_btc_fw_rpt_evnt_ver(rtwdev, &rpt_type);
+
switch (rpt_type) {
case BTC_RPT_TYPE_CTRL:
pcinfo = &pfwinfo->rpt_ctrl.cinfo;
@@ -1188,6 +1347,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v105);
pcinfo->req_fver = 5;
break;
+ } else if (ver->fcxbtcrpt == 8) {
+ pfinfo = &pfwinfo->rpt_ctrl.finfo.v8;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo.v8);
+ break;
} else {
goto err;
}
@@ -1198,7 +1361,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
if (ver->fcxtdma == 1) {
pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo.v1;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo.v1);
- } else if (ver->fcxtdma == 3) {
+ } else if (ver->fcxtdma == 3 || ver->fcxtdma == 7) {
pfinfo = &pfwinfo->rpt_fbtc_tdma.finfo.v3;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo.v3);
} else {
@@ -1208,8 +1371,15 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
break;
case BTC_RPT_TYPE_SLOT:
pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo;
- pfinfo = &pfwinfo->rpt_fbtc_slots.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo);
+ if (ver->fcxslots == 1) {
+ pfinfo = &pfwinfo->rpt_fbtc_slots.finfo.v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo.v1);
+ } else if (ver->fcxslots == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_slots.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo.v7);
+ } else {
+ goto err;
+ }
pcinfo->req_fver = ver->fcxslots;
break;
case BTC_RPT_TYPE_CYSTA:
@@ -1231,6 +1401,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v5;
pcysta->v5 = pfwinfo->rpt_fbtc_cysta.finfo.v5;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v5);
+ } else if (ver->fcxcysta == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_cysta.finfo.v7;
+ pcysta->v7 = pfwinfo->rpt_fbtc_cysta.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo.v7);
} else {
goto err;
}
@@ -1264,6 +1438,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
} else if (ver->fcxnullsta == 2) {
pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v2);
+ } else if (ver->fcxnullsta == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_nullsta.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo.v7);
} else {
goto err;
}
@@ -1277,6 +1454,9 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
} else if (ver->fcxmreg == 2) {
pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo.v2);
+ } else if (ver->fcxmreg == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_mregval.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo.v7);
} else {
goto err;
}
@@ -1284,14 +1464,24 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
break;
case BTC_RPT_TYPE_GPIO_DBG:
pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
- pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo);
+ if (ver->fcxgpiodbg == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo.v7);
+ } else {
+ pfinfo = &pfwinfo->rpt_fbtc_gpio_dbg.finfo.v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo.v1);
+ }
pcinfo->req_fver = ver->fcxgpiodbg;
break;
case BTC_RPT_TYPE_BT_VER:
pcinfo = &pfwinfo->rpt_fbtc_btver.cinfo;
- pfinfo = &pfwinfo->rpt_fbtc_btver.finfo;
- pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo);
+ if (ver->fcxbtver == 1) {
+ pfinfo = &pfwinfo->rpt_fbtc_btver.finfo.v1;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo.v1);
+ } else if (ver->fcxbtver == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_btver.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo.v7);
+ }
pcinfo->req_fver = ver->fcxbtver;
break;
case BTC_RPT_TYPE_BT_SCAN:
@@ -1302,6 +1492,11 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
} else if (ver->fcxbtscan == 2) {
pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo.v2;
pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo.v2);
+ } else if (ver->fcxbtscan == 7) {
+ pfinfo = &pfwinfo->rpt_fbtc_btscan.finfo.v7;
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo.v7);
+ } else {
+ goto err;
}
pcinfo->req_fver = ver->fcxbtscan;
break;
@@ -1460,6 +1655,44 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
pfwinfo->event[BTF_EVNT_RPT]);
dm->error.map.bt_rfk_timeout = bt->rfk_info.map.timeout;
+ } else if (ver->fcxbtcrpt == 8) {
+ prpt->v8 = pfwinfo->rpt_ctrl.finfo.v8;
+ pfwinfo->rpt_en_map = le32_to_cpu(prpt->v8.rpt_info.en);
+ wl->ver_info.fw_coex = le32_to_cpu(prpt->v8.rpt_info.cx_ver);
+ wl->ver_info.fw = le32_to_cpu(prpt->v8.rpt_info.fw_ver);
+
+ for (i = RTW89_PHY_0; i < RTW89_PHY_MAX; i++)
+ memcpy(&dm->gnt.band[i], &prpt->v8.gnt_val[i][0],
+ sizeof(dm->gnt.band[i]));
+
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_TX] =
+ le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_HI_TX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_HIPRI_RX] =
+ le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_HI_RX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_TX] =
+ le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_LO_TX_V105]);
+ btc->cx.cnt_bt[BTC_BCNT_LOPRI_RX] =
+ le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_LO_RX_V105]);
+
+ val1 = le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_POLLUTED_V105]);
+ if (val1 > btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW])
+ val1 -= btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW]; /* diff */
+
+ btc->cx.cnt_bt[BTC_BCNT_POLUT_DIFF] = val1;
+ btc->cx.cnt_bt[BTC_BCNT_POLUT_NOW] =
+ le16_to_cpu(prpt->v8.bt_cnt[BTC_BCNT_POLLUTED_V105]);
+
+ val1 = pfwinfo->event[BTF_EVNT_RPT];
+ if (((prpt->v8.rpt_len_max_h << 8) +
+ prpt->v8.rpt_len_max_l) != ver->info_buf)
+ dm->error.map.h2c_c2h_buffer_mismatch = true;
+ else
+ dm->error.map.h2c_c2h_buffer_mismatch = false;
+
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_HANG, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_HANG, val1);
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_FW_VER_MATCH, 0);
+ _chk_btc_err(rtwdev, BTC_DCNT_BTTX_HANG, 0);
} else {
goto err;
}
@@ -1474,7 +1707,7 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
memcmp(&dm->tdma_now,
&pfwinfo->rpt_fbtc_tdma.finfo.v1,
sizeof(dm->tdma_now)));
- else if (ver->fcxtdma == 3)
+ else if (ver->fcxtdma == 3 || ver->fcxtdma == 7)
_chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
memcmp(&dm->tdma_now,
&pfwinfo->rpt_fbtc_tdma.finfo.v3.tdma,
@@ -1483,14 +1716,25 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
goto err;
break;
case BTC_RPT_TYPE_SLOT:
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): check %d %zu\n",
- __func__, BTC_DCNT_SLOT_NONSYNC,
- sizeof(dm->slot_now));
- _chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC,
- memcmp(dm->slot_now,
- pfwinfo->rpt_fbtc_slots.finfo.slot,
- sizeof(dm->slot_now)));
+ if (ver->fcxslots == 7) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): check %d %zu\n",
+ __func__, BTC_DCNT_SLOT_NONSYNC,
+ sizeof(dm->slot_now.v7));
+ _chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC,
+ memcmp(dm->slot_now.v7,
+ pfwinfo->rpt_fbtc_slots.finfo.v7.slot,
+ sizeof(dm->slot_now.v7)));
+ } else if (ver->fcxslots == 1) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): check %d %zu\n",
+ __func__, BTC_DCNT_SLOT_NONSYNC,
+ sizeof(dm->slot_now.v1));
+ _chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC,
+ memcmp(dm->slot_now.v1,
+ pfwinfo->rpt_fbtc_slots.finfo.v1.slot,
+ sizeof(dm->slot_now.v1)));
+ }
break;
case BTC_RPT_TYPE_CYSTA:
if (ver->fcxcysta == 2) {
@@ -1506,10 +1750,17 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
/* Check diff time between WL slot and W1/E2G slot */
if (dm->tdma_now.type == CXTDMA_OFF &&
- dm->tdma_now.ext_ctrl == CXECTL_EXT)
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_E2G].dur);
- else
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ dm->tdma_now.ext_ctrl == CXECTL_EXT) {
+ if (ver->fcxslots == 1)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_E2G].dur);
+ else if (ver->fcxslots == 7)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_E2G].dur);
+ } else {
+ if (ver->fcxslots == 1)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_W1].dur);
+ else if (ver->fcxslots == 7)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_W1].dur);
+ }
if (le16_to_cpu(pcysta->v2.tavg_cycle[CXT_WL]) > wl_slot_set) {
diff_t = le16_to_cpu(pcysta->v2.tavg_cycle[CXT_WL]) - wl_slot_set;
@@ -1539,7 +1790,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
/* Check diff time between real WL slot and W1 slot */
if (dm->tdma_now.type == CXTDMA_OFF) {
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ if (ver->fcxslots == 1)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_W1].dur);
+ else if (ver->fcxslots == 7)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_W1].dur);
wl_slot_real = le16_to_cpu(pcysta->v3.cycle_time.tavg[CXT_WL]);
if (wl_slot_real > wl_slot_set) {
diff_t = wl_slot_real - wl_slot_set;
@@ -1580,7 +1834,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
/* Check diff time between real WL slot and W1 slot */
if (dm->tdma_now.type == CXTDMA_OFF) {
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ if (ver->fcxslots == 1)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_W1].dur);
+ else if (ver->fcxslots == 7)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_W1].dur);
wl_slot_real = le16_to_cpu(pcysta->v4.cycle_time.tavg[CXT_WL]);
if (wl_slot_real > wl_slot_set) {
diff_t = wl_slot_real - wl_slot_set;
@@ -1622,7 +1879,10 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
/* Check diff time between real WL slot and W1 slot */
if (dm->tdma_now.type == CXTDMA_OFF) {
- wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+ if (ver->fcxslots == 1)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v1[CXST_W1].dur);
+ else if (ver->fcxslots == 7)
+ wl_slot_set = le16_to_cpu(dm->slot_now.v7[CXST_W1].dur);
wl_slot_real = le16_to_cpu(pcysta->v5.cycle_time.tavg[CXT_WL]);
if (wl_slot_real > wl_slot_set)
@@ -1654,11 +1914,62 @@ static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
le16_to_cpu(pcysta->v5.slot_cnt[CXST_B1]));
_chk_btc_err(rtwdev, BTC_DCNT_CYCLE_HANG,
le16_to_cpu(pcysta->v5.cycles));
+ } else if (ver->fcxcysta == 7) {
+ if (dm->fddt_train == BTC_FDDT_ENABLE)
+ break;
+
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+
+ if (dm->tdma_now.type != CXTDMA_OFF) {
+ /* Check diff time between real WL slot and W1 slot */
+ val16 = le16_to_cpu(pcysta->v7.cycle_time.tavg[CXT_WL]);
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, val16);
+
+ /* Check Leak-AP */
+ val1 = le32_to_cpu(pcysta->v7.leak_slot.cnt_rximr) *
+ BTC_LEAK_AP_TH;
+ val2 = le16_to_cpu(pcysta->v7.slot_cnt[CXST_LK]);
+
+ val16 = le16_to_cpu(pcysta->v7.cycles);
+ if (dm->tdma_now.rxflctrl &&
+ val16 >= BTC_CYSTA_CHK_PERIOD && val1 > val2)
+ dm->leak_ap = 1;
+ } else if (dm->tdma_now.ext_ctrl == CXECTL_EXT) {
+ val16 = le16_to_cpu(pcysta->v7.cycle_time.tavg[CXT_BT]);
+ /* Check diff between real BT slot and EBT/E5G slot */
+ _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_DRIFT, val16);
+
+ /* Check bt slot length for P2P mode*/
+ val1 = le16_to_cpu(pcysta->v7.a2dp_ept.cnt_timeout) *
+ BTC_SLOT_REQ_TH;
+ val2 = le16_to_cpu(pcysta->v7.a2dp_ept.cnt);
+
+ val16 = le16_to_cpu(pcysta->v7.cycles);
+ if (val16 >= BTC_CYSTA_CHK_PERIOD && val1 > val2)
+ dm->slot_req_more = 1;
+ else if (bt->link_info.status.map.connect == 0)
+ dm->slot_req_more = 0;
+ }
+
+ _chk_btc_err(rtwdev, BTC_DCNT_E2G_HANG,
+ le16_to_cpu(pcysta->v7.slot_cnt[CXST_E2G]));
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_HANG,
+ le16_to_cpu(pcysta->v7.slot_cnt[CXST_W1]));
+ _chk_btc_err(rtwdev, BTC_DCNT_B1_HANG,
+ le16_to_cpu(pcysta->v7.slot_cnt[CXST_B1]));
+
+ /* "BT_SLOT_FLOOD" error-check MUST before "CYCLE_HANG" */
+ _chk_btc_err(rtwdev, BTC_DCNT_BT_SLOT_FLOOD,
+ le16_to_cpu(pcysta->v7.cycles));
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_HANG,
+ le16_to_cpu(pcysta->v7.cycles));
} else {
goto err;
}
break;
case BTC_RPT_TYPE_MREG:
+ if (ver->fcxmreg == 7)
+ break;
_get_reg_status(rtwdev, BTC_CSTATUS_BB_GNT_MUX_MON, &val);
if (dm->wl_btg_rx == BTC_BTGCTRL_BB_GNT_FWCTRL)
dm->wl_btg_rx_rb = BTC_BTGCTRL_BB_GNT_FWCTRL;
@@ -1715,6 +2026,7 @@ static void _parse_btc_report(struct rtw89_dev *rtwdev,
}
#define BTC_TLV_HDR_LEN 2
+#define BTC_TLV_HDR_LEN_V7 3
static void _append_tdma(struct rtw89_dev *rtwdev)
{
@@ -1722,6 +2034,7 @@ static void _append_tdma(struct rtw89_dev *rtwdev)
const struct rtw89_btc_ver *ver = btc->ver;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_btf_tlv *tlv;
+ struct rtw89_btc_btf_tlv_v7 *tlv_v7;
struct rtw89_btc_fbtc_tdma *v;
struct rtw89_btc_fbtc_tdma_v3 *v3;
u16 len = btc->policy_len;
@@ -1741,6 +2054,13 @@ static void _append_tdma(struct rtw89_dev *rtwdev)
tlv->len = sizeof(*v);
*v = dm->tdma;
btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
+ } else if (ver->fcxtdma == 7) {
+ tlv_v7 = (struct rtw89_btc_btf_tlv_v7 *)&btc->policy[len];
+ tlv_v7->len = sizeof(dm->tdma);
+ tlv_v7->ver = ver->fcxtdma;
+ tlv_v7->type = CXPOLICY_TDMA;
+ memcpy(tlv_v7->val, &dm->tdma, tlv_v7->len);
+ btc->policy_len += BTC_TLV_HDR_LEN_V7 + tlv_v7->len;
} else {
tlv->len = sizeof(*v3);
v3 = (struct rtw89_btc_fbtc_tdma_v3 *)&tlv->val[0];
@@ -1756,7 +2076,7 @@ static void _append_tdma(struct rtw89_dev *rtwdev)
dm->tdma.ext_ctrl);
}
-static void _append_slot(struct rtw89_dev *rtwdev)
+static void _append_slot_v1(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
@@ -1771,8 +2091,8 @@ static void _append_slot(struct rtw89_dev *rtwdev)
for (i = 0; i < CXST_MAX; i++) {
if (!btc->update_policy_force &&
- !memcmp(&dm->slot[i], &dm->slot_now[i],
- sizeof(dm->slot[i])))
+ !memcmp(&dm->slot.v1[i], &dm->slot_now.v1[i],
+ sizeof(dm->slot.v1[i])))
continue;
len = btc->policy_len;
@@ -1782,14 +2102,14 @@ static void _append_slot(struct rtw89_dev *rtwdev)
tlv->type = CXPOLICY_SLOT;
tlv->len = sizeof(*v);
- v->fver = FCXONESLOT_VER;
+ v->fver = btc->ver->fcxslots;
v->sid = i;
- v->slot = dm->slot[i];
+ v->slot = dm->slot.v1[i];
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): slot-%d: dur=%d, table=0x%08x, type=%d\n",
- __func__, i, dm->slot[i].dur, dm->slot[i].cxtbl,
- dm->slot[i].cxtype);
+ __func__, i, dm->slot.v1[i].dur, dm->slot.v1[i].cxtbl,
+ dm->slot.v1[i].cxtype);
cnt++;
btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
@@ -1801,6 +2121,71 @@ static void _append_slot(struct rtw89_dev *rtwdev)
__func__, cnt);
}
+static void _append_slot_v7(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc_btf_tlv_v7 *tlv = NULL;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u8 i, cnt = 0;
+ u16 len;
+
+ for (i = 0; i < CXST_MAX; i++) {
+ if (!btc->update_policy_force &&
+ !memcmp(&dm->slot.v7[i], &dm->slot_now.v7[i],
+ sizeof(dm->slot.v7[i])))
+ continue;
+
+ len = btc->policy_len;
+
+ if (!tlv) {
+ if ((len + BTC_TLV_HDR_LEN_V7) > RTW89_BTC_POLICY_MAXLEN) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): buff overflow!\n", __func__);
+ break;
+ }
+
+ tlv = (struct rtw89_btc_btf_tlv_v7 *)&btc->policy[len];
+ tlv->type = CXPOLICY_SLOT;
+ tlv->ver = btc->ver->fcxslots;
+ tlv->len = sizeof(dm->slot.v7[0]) + BTC_TLV_SLOT_ID_LEN_V7;
+ len += BTC_TLV_HDR_LEN_V7;
+ }
+
+ if ((len + (u16)tlv->len) > RTW89_BTC_POLICY_MAXLEN) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): buff overflow!\n", __func__);
+ break;
+ }
+
+ btc->policy[len] = i; /* slot-id */
+ memcpy(&btc->policy[len + 1], &dm->slot.v7[i],
+ sizeof(dm->slot.v7[0]));
+ len += tlv->len;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s: policy_len=%d, slot-%d: dur=%d, type=%d, table=0x%08x\n",
+ __func__, btc->policy_len, i, dm->slot.v7[i].dur,
+ dm->slot.v7[i].cxtype, dm->slot.v7[i].cxtbl);
+ cnt++;
+ btc->policy_len = len; /* update total length */
+ }
+
+ if (cnt > 0)
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s: slot update (cnt=%d, len=%d)!!\n",
+ __func__, cnt, btc->policy_len);
+}
+
+static void _append_slot(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ if (btc->ver->fcxslots == 7)
+ _append_slot_v7(rtwdev);
+ else
+ _append_slot_v1(rtwdev);
+}
+
static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -1946,13 +2331,52 @@ static u32 rtw89_btc_fw_rpt_ver(struct rtw89_dev *rtwdev, u32 rpt_map)
return bit_map;
}
+static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_btc_ver *ver = btc->ver;
+ struct rtw89_btc_btf_tlv_v7 *tlv_v7 = NULL;
+ struct rtw89_btc_btf_set_slot_table *tbl;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u16 n, len;
+
+ if (ver->fcxslots == 7) {
+ len = sizeof(*tlv_v7) + sizeof(dm->slot.v7);
+ tlv_v7 = kmalloc(len, GFP_KERNEL);
+ if (!tlv_v7)
+ return;
+
+ tlv_v7->type = SET_SLOT_TABLE;
+ tlv_v7->ver = ver->fcxslots;
+ tlv_v7->len = ARRAY_SIZE(dm->slot.v7);
+ memcpy(tlv_v7->val, dm->slot.v7, sizeof(dm->slot.v7));
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, (u8 *)tlv_v7, len);
+
+ kfree(tlv_v7);
+ } else {
+ n = struct_size(tbl, tbls, CXST_MAX);
+ tbl = kmalloc(n, GFP_KERNEL);
+ if (!tbl)
+ return;
+
+ tbl->fver = BTF_SET_SLOT_TABLE_VER;
+ tbl->tbl_num = CXST_MAX;
+ memcpy(tbl->tbls, dm->slot.v1, flex_array_size(tbl, tbls, CXST_MAX));
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, tbl, n);
+
+ kfree(tbl);
+ }
+}
+
static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
u32 rpt_map, bool rpt_state)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_smap *wl_smap = &btc->cx.wl.status.map;
struct rtw89_btc_btf_fwinfo *fwinfo = &btc->fwinfo;
- struct rtw89_btc_btf_set_report r = {0};
+ union rtw89_fbtc_rtp_ctrl r;
u32 val, bit_map;
int ret;
@@ -1973,41 +2397,35 @@ static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
if (val == fwinfo->rpt_en_map)
return;
- r.fver = BTF_SET_REPORT_VER;
- r.enable = cpu_to_le32(val);
- r.para = cpu_to_le32(rpt_state);
+ if (btc->ver->fcxbtcrpt == 8) {
+ r.v8.type = SET_REPORT_EN;
+ r.v8.fver = btc->ver->fcxbtcrpt;
+ r.v8.len = sizeof(r.v8.map);
+ r.v8.map = cpu_to_le32(val);
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r.v8,
+ sizeof(r.v8));
+ } else {
+ if (btc->ver->fcxbtcrpt == 105)
+ r.v1.fver = 5;
+ else
+ r.v1.fver = btc->ver->fcxbtcrpt;
+ r.v1.enable = cpu_to_le32(val);
+ r.v1.para = cpu_to_le32(rpt_state);
+ ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r.v1,
+ sizeof(r.v1));
+ }
- ret = _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r, sizeof(r));
if (!ret)
fwinfo->rpt_en_map = val;
}
-static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev, u8 num,
- struct rtw89_btc_fbtc_slot *s)
-{
- struct rtw89_btc_btf_set_slot_table *tbl;
- u16 n;
-
- n = struct_size(tbl, tbls, num);
- tbl = kmalloc(n, GFP_KERNEL);
- if (!tbl)
- return;
-
- tbl->fver = BTF_SET_SLOT_TABLE_VER;
- tbl->tbl_num = num;
- memcpy(tbl->tbls, s, flex_array_size(tbl, tbls, num));
-
- _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, tbl, n);
-
- kfree(tbl);
-}
-
static void btc_fw_set_monreg(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
- struct rtw89_btc_btf_set_mon_reg *monreg = NULL;
- u8 n, ulen, cxmreg_max;
+ struct rtw89_btc_btf_set_mon_reg_v1 *v1 = NULL;
+ struct rtw89_btc_btf_set_mon_reg_v7 *v7 = NULL;
+ u8 i, n, ulen, cxmreg_max;
u16 sz = 0;
n = chip->mon_reg_num;
@@ -2016,10 +2434,8 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev)
if (ver->fcxmreg == 1)
cxmreg_max = CXMREG_MAX;
- else if (ver->fcxmreg == 2)
- cxmreg_max = CXMREG_MAX_V2;
else
- return;
+ cxmreg_max = CXMREG_MAX_V2;
if (n > cxmreg_max) {
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -2028,21 +2444,37 @@ static void btc_fw_set_monreg(struct rtw89_dev *rtwdev)
return;
}
- ulen = sizeof(monreg->regs[0]);
- sz = struct_size(monreg, regs, n);
- monreg = kmalloc(sz, GFP_KERNEL);
- if (!monreg)
- return;
+ ulen = sizeof(struct rtw89_btc_fbtc_mreg);
+
+ if (ver->fcxmreg == 7) {
+ sz = struct_size(v7, regs, n);
+ v7 = kmalloc(sz, GFP_KERNEL);
+ v7->type = RPT_EN_MREG;
+ v7->fver = ver->fcxmreg;
+ v7->len = n;
+ for (i = 0; i < n; i++) {
+ v7->regs[i].type = chip->mon_reg[i].type;
+ v7->regs[i].bytes = chip->mon_reg[i].bytes;
+ v7->regs[i].offset = chip->mon_reg[i].offset;
+ }
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_MREG_TABLE, v7, sz);
+ kfree(v7);
+ } else {
+ sz = struct_size(v1, regs, n);
+ v1 = kmalloc(sz, GFP_KERNEL);
+ v1->fver = ver->fcxmreg;
+ v1->reg_num = n;
+ memcpy(v1->regs, chip->mon_reg, flex_array_size(v1, regs, n));
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_MREG_TABLE, v1, sz);
+ kfree(v1);
+ }
- monreg->fver = ver->fcxmreg;
- monreg->reg_num = n;
- memcpy(monreg->regs, chip->mon_reg, flex_array_size(monreg, regs, n));
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): sz=%d ulen=%d n=%d\n",
__func__, sz, ulen, n);
- _send_fw_cmd(rtwdev, BTFC_SET, SET_MREG_TABLE, (u8 *)monreg, sz);
- kfree(monreg);
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_MREG, 1);
}
@@ -2100,7 +2532,10 @@ static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
btc->policy, btc->policy_len);
if (!ret) {
memcpy(&dm->tdma_now, &dm->tdma, sizeof(dm->tdma_now));
- memcpy(&dm->slot_now, &dm->slot, sizeof(dm->slot_now));
+ if (btc->ver->fcxslots == 7)
+ memcpy(&dm->slot_now.v7, &dm->slot.v7, sizeof(dm->slot_now.v7));
+ else
+ memcpy(&dm->slot_now.v1, &dm->slot.v1, sizeof(dm->slot_now.v1));
}
if (btc->update_policy_force)
@@ -2243,6 +2678,76 @@ static void _set_gnt(struct rtw89_dev *rtwdev, u8 phy_map, u8 wl_state, u8 bt_st
rtw89_chip_mac_cfg_gnt(rtwdev, &dm->gnt);
}
+static void _set_gnt_v1(struct rtw89_dev *rtwdev, u8 phy_map,
+ u8 wl_state, u8 bt_state, u8 wlact_state)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_mac_ax_gnt *g = dm->gnt.band;
+ u8 i, bt_idx = dm->bt_select + 1;
+
+ if (phy_map > BTC_PHY_ALL)
+ return;
+
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ if (!(phy_map & BIT(i)))
+ continue;
+
+ switch (wl_state) {
+ case BTC_GNT_HW:
+ g[i].gnt_wl_sw_en = 0;
+ g[i].gnt_wl = 0;
+ break;
+ case BTC_GNT_SW_LO:
+ g[i].gnt_wl_sw_en = 1;
+ g[i].gnt_wl = 0;
+ break;
+ case BTC_GNT_SW_HI:
+ g[i].gnt_wl_sw_en = 1;
+ g[i].gnt_wl = 1;
+ break;
+ }
+
+ switch (bt_state) {
+ case BTC_GNT_HW:
+ g[i].gnt_bt_sw_en = 0;
+ g[i].gnt_bt = 0;
+ break;
+ case BTC_GNT_SW_LO:
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 0;
+ break;
+ case BTC_GNT_SW_HI:
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 1;
+ break;
+ }
+ }
+
+ if (rtwdev->chip->para_ver & BTC_FEAT_WLAN_ACT_MUX) {
+ for (i = 0; i < 2; i++) {
+ if (!(bt_idx & BIT(i)))
+ continue;
+
+ switch (wlact_state) {
+ case BTC_WLACT_HW:
+ dm->gnt.bt[i].wlan_act_en = 0;
+ dm->gnt.bt[i].wlan_act = 0;
+ break;
+ case BTC_WLACT_SW_LO:
+ dm->gnt.bt[i].wlan_act_en = 1;
+ dm->gnt.bt[i].wlan_act = 0;
+ break;
+ case BTC_WLACT_SW_HI:
+ dm->gnt.bt[i].wlan_act_en = 1;
+ dm->gnt.bt[i].wlan_act = 1;
+ break;
+ }
+ }
+ }
+ rtw89_mac_cfg_gnt_v2(rtwdev, &dm->gnt);
+}
+
#define BTC_TDMA_WLROLE_MAX 2
static void _set_bt_ignore_wlan_act(struct rtw89_dev *rtwdev, u8 enable)
@@ -2493,9 +2998,11 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_active_role *r;
struct rtw89_btc_wl_active_role_v1 *r1;
struct rtw89_btc_wl_active_role_v2 *r2;
+ struct rtw89_btc_wl_rlink *rlink;
u8 en = 0, i, ch = 0, bw = 0;
u8 mode, connect_cnt;
@@ -2511,6 +3018,9 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
} else if (ver->fwlrole == 2) {
mode = wl_rinfo_v2->link_mode;
connect_cnt = wl_rinfo_v2->connect_cnt;
+ } else if (ver->fwlrole == 8) {
+ mode = wl_rinfo_v8->link_mode;
+ connect_cnt = wl_rinfo_v8->connect_cnt;
} else {
return;
}
@@ -2526,6 +3036,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
r2 = &wl_rinfo_v2->active_role_v2[i];
+ rlink = &wl_rinfo_v8->rlink[i][0];
if (ver->fwlrole == 0 &&
(r->role == RTW89_WIFI_ROLE_P2P_GO ||
@@ -2545,6 +3056,12 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
ch = r2->ch;
bw = r2->bw;
break;
+ } else if (ver->fwlrole == 8 &&
+ (rlink->role == RTW89_WIFI_ROLE_P2P_GO ||
+ rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT)) {
+ ch = rlink->ch;
+ bw = rlink->bw;
+ break;
}
}
} else {
@@ -2554,6 +3071,7 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
r = &wl_rinfo->active_role[i];
r1 = &wl_rinfo_v1->active_role_v1[i];
r2 = &wl_rinfo_v2->active_role_v2[i];
+ rlink = &wl_rinfo_v8->rlink[i][0];
if (ver->fwlrole == 0 &&
r->connected && r->band == RTW89_BAND_2G) {
@@ -2570,6 +3088,11 @@ static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
ch = r2->ch;
bw = r2->bw;
break;
+ } else if (ver->fwlrole == 8 &&
+ rlink->connected && rlink->rf_band == RTW89_BAND_2G) {
+ ch = rlink->ch;
+ bw = rlink->bw;
+ break;
}
}
}
@@ -2622,25 +3145,35 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
+ struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
union rtw89_btc_module_info *md = &btc->mdinfo;
const struct rtw89_btc_ver *ver = btc->ver;
- u8 isolation;
+ u8 isolation, connect_cnt = 0;
if (ver->fcxinit == 7)
isolation = md->md_v7.ant.isolation;
else
isolation = md->md.ant.isolation;
+ if (ver->fwlrole == 0)
+ connect_cnt = wl_rinfo->connect_cnt;
+ else if (ver->fwlrole == 1)
+ connect_cnt = wl_rinfo_v1->connect_cnt;
+ else if (ver->fwlrole == 2)
+ connect_cnt = wl_rinfo_v2->connect_cnt;
+ else if (ver->fwlrole == 8)
+ connect_cnt = wl_rinfo_v8->connect_cnt;
+
if (btc->ant_type == BTC_ANT_SHARED) {
btc->dm.trx_para_level = 0;
return false;
}
/* The below is dedicated antenna case */
- if (wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX ||
- wl_rinfo_v1->connect_cnt > BTC_TDMA_WLROLE_MAX) {
+ if (connect_cnt > BTC_TDMA_WLROLE_MAX) {
btc->dm.trx_para_level = 5;
return true;
}
@@ -2693,19 +3226,6 @@ static bool _check_freerun(struct rtw89_dev *rtwdev)
#define _tdma_set_tog(btc, wtg) ({(btc)->dm.tdma.wtgle_n = wtg; })
#define _tdma_set_lek(btc, lek) ({(btc)->dm.tdma.leak_n = lek; })
-#define _slot_set(btc, sid, dura, tbl, type) \
- do { \
- typeof(sid) _sid = (sid); \
- typeof(btc) _btc = (btc); \
- _btc->dm.slot[_sid].dur = cpu_to_le16(dura);\
- _btc->dm.slot[_sid].cxtbl = cpu_to_le32(tbl); \
- _btc->dm.slot[_sid].cxtype = cpu_to_le16(type); \
- } while (0)
-
-#define _slot_set_dur(btc, sid, dura) (btc)->dm.slot[sid].dur = cpu_to_le16(dura)
-#define _slot_set_tbl(btc, sid, tbl) (btc)->dm.slot[sid].cxtbl = cpu_to_le32(tbl)
-#define _slot_set_type(btc, sid, type) (btc)->dm.slot[sid].cxtype = cpu_to_le16(type)
-
struct btc_btinfo_lb2 {
u8 connect: 1;
u8 sco_busy: 1;
@@ -2780,7 +3300,7 @@ void rtw89_btc_set_policy(struct rtw89_dev *rtwdev, u16 policy_type)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_fbtc_tdma *t = &dm->tdma;
- struct rtw89_btc_fbtc_slot *s = dm->slot;
+ struct rtw89_btc_fbtc_slot *s = dm->slot.v1;
u8 type;
u32 tbl_w1, tbl_b1, tbl_b4;
@@ -3091,13 +3611,13 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
struct rtw89_btc_fbtc_tdma *t = &dm->tdma;
- struct rtw89_btc_fbtc_slot *s = dm->slot;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo = &btc->cx.wl.role_info_v1;
struct rtw89_btc_bt_hid_desc *hid = &btc->cx.bt.link_info.hid_desc;
struct rtw89_btc_bt_hfp_desc *hfp = &btc->cx.bt.link_info.hfp_desc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
u8 type, null_role;
u32 tbl_w1, tbl_b1, tbl_b4;
+ u16 dur_2;
type = FIELD_GET(BTC_CXP_MASK, policy_type);
@@ -3139,13 +3659,15 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
case BTC_CXP_USERDEF0:
btc->update_policy_force = true;
*t = t_def[CXTD_OFF];
- s[CXST_OFF] = s_def[CXST_OFF];
+ _slot_set_le(btc, CXST_OFF, s_def[CXST_OFF].dur,
+ s_def[CXST_OFF].cxtbl, s_def[CXST_OFF].cxtype);
_slot_set_tbl(btc, CXST_OFF, cxtbl[2]);
break;
case BTC_CXP_OFF: /* TDMA off */
_write_scbd(rtwdev, BTC_WSCB_TDMA, false);
*t = t_def[CXTD_OFF];
- s[CXST_OFF] = s_def[CXST_OFF];
+ _slot_set_le(btc, CXST_OFF, s_def[CXST_OFF].dur,
+ s_def[CXST_OFF].cxtbl, s_def[CXST_OFF].cxtype);
switch (policy_type) {
case BTC_CXP_OFF_BT:
@@ -3186,7 +3708,8 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
case BTC_CXP_OFFB: /* TDMA off + beacon protect */
_write_scbd(rtwdev, BTC_WSCB_TDMA, false);
*t = t_def[CXTD_OFF_B2];
- s[CXST_OFF] = s_def[CXST_OFF];
+ _slot_set_le(btc, CXST_OFF, s_def[CXST_OFF].dur,
+ s_def[CXST_OFF].cxtbl, s_def[CXST_OFF].cxtype);
switch (policy_type) {
case BTC_CXP_OFFB_BWB0:
@@ -3205,23 +3728,54 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type)
if (hid->exist || hfp->exist)
tbl_w1 = cxtbl[16];
+ dur_2 = dm->e2g_slot_limit;
+
switch (policy_type) {
+ case BTC_CXP_OFFE_2GBWISOB: /* for normal-case */
+ _slot_set(btc, CXST_E2G, 0, tbl_w1, SLOT_ISO);
+ _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
+ s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
+ _slot_set_dur(btc, CXST_EBT, dur_2);
+ break;
+ case BTC_CXP_OFFE_2GISOB: /* for bt no-link */
+ _slot_set(btc, CXST_E2G, 0, cxtbl[1], SLOT_ISO);
+ _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
+ s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
+ _slot_set_dur(btc, CXST_EBT, dur_2);
+ break;
case BTC_CXP_OFFE_DEF:
- s[CXST_E2G] = s_def[CXST_E2G];
- s[CXST_E5G] = s_def[CXST_E5G];
- s[CXST_EBT] = s_def[CXST_EBT];
- s[CXST_ENULL] = s_def[CXST_ENULL];
+ _slot_set_le(btc, CXST_E2G, s_def[CXST_E2G].dur,
+ s_def[CXST_E2G].cxtbl, s_def[CXST_E2G].cxtype);
+ _slot_set_le(btc, CXST_E5G, s_def[CXST_E5G].dur,
+ s_def[CXST_E5G].cxtbl, s_def[CXST_E5G].cxtype);
+ _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
+ s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
+ _slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur,
+ s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype);
break;
case BTC_CXP_OFFE_DEF2:
_slot_set(btc, CXST_E2G, 20, cxtbl[1], SLOT_ISO);
- s[CXST_E5G] = s_def[CXST_E5G];
- s[CXST_EBT] = s_def[CXST_EBT];
- s[CXST_ENULL] = s_def[CXST_ENULL];
+ _slot_set_le(btc, CXST_E5G, s_def[CXST_E5G].dur,
+ s_def[CXST_E5G].cxtbl, s_def[CXST_E5G].cxtype);
+ _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
+ s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype);
+ _slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur,
+ s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype);
+ break;
+ case BTC_CXP_OFFE_2GBWMIXB:
+ _slot_set(btc, CXST_E2G, 0, 0x55555555, SLOT_MIX);
+ _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur,
+ cpu_to_le32(0x55555555), s_def[CXST_EBT].cxtype);
+ break;
+ case BTC_CXP_OFFE_WL: /* for 4-way */
+ _slot_set(btc, CXST_E2G, 0, cxtbl[1], SLOT_MIX);
+ _slot_set(btc, CXST_EBT, 0, cxtbl[1], SLOT_MIX);
break;
default:
break;
}
- s[CXST_OFF] = s_def[CXST_OFF];
+ _slot_set_le(btc, CXST_OFF, s_def[CXST_OFF].dur,
+ s_def[CXST_OFF].cxtbl, s_def[CXST_OFF].cxtype);
break;
case BTC_CXP_FIX: /* TDMA Fix-Slot */
_write_scbd(rtwdev, BTC_WSCB_TDMA, true);
@@ -3481,25 +4035,36 @@ EXPORT_SYMBOL(rtw89_btc_set_policy_v1);
static void _set_bt_plut(struct rtw89_dev *rtwdev, u8 phy_map,
u8 tx_val, u8 rx_val)
{
+ struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
struct rtw89_mac_ax_plt plt;
- plt.band = RTW89_MAC_0;
plt.tx = tx_val;
plt.rx = rx_val;
- if (phy_map & BTC_PHY_0)
+ if (rtwdev->btc.ver->fwlrole == 8) {
+ plt.band = wl->pta_req_mac;
+ if (wl->bt_polut_type[plt.band] == tx_val)
+ return;
+
+ wl->bt_polut_type[plt.band] = tx_val;
rtw89_mac_cfg_plt(rtwdev, &plt);
+ } else {
+ plt.band = RTW89_MAC_0;
- if (!rtwdev->dbcc_en)
- return;
+ if (phy_map & BTC_PHY_0)
+ rtw89_mac_cfg_plt(rtwdev, &plt);
- plt.band = RTW89_MAC_1;
- if (phy_map & BTC_PHY_1)
- rtw89_mac_cfg_plt(rtwdev, &plt);
+ if (!rtwdev->dbcc_en)
+ return;
+
+ plt.band = RTW89_MAC_1;
+ if (phy_map & BTC_PHY_1)
+ rtw89_mac_cfg_plt(rtwdev, &plt);
+ }
}
-static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
- u8 phy_map, u8 type)
+static void _set_ant_v0(struct rtw89_dev *rtwdev, bool force_exec,
+ u8 phy_map, u8 type)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
@@ -3508,13 +4073,21 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
struct rtw89_btc_bt_info *bt = &cx->bt;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
u8 gnt_wl_ctrl, gnt_bt_ctrl, plt_ctrl, i, b2g = 0;
+ bool dbcc_chg = false;
u32 ant_path_type;
ant_path_type = ((phy_map << 8) + type);
+ if (btc->ver->fwlrole == 1)
+ dbcc_chg = wl->role_info_v1.dbcc_chg;
+ else if (btc->ver->fwlrole == 2)
+ dbcc_chg = wl->role_info_v2.dbcc_chg;
+ else if (btc->ver->fwlrole == 8)
+ dbcc_chg = wl->role_info_v8.dbcc_chg;
+
if (btc->dm.run_reason == BTC_RSN_NTFY_POWEROFF ||
btc->dm.run_reason == BTC_RSN_NTFY_RADIO_STATE ||
- btc->dm.run_reason == BTC_RSN_CMD_SET_COEX)
+ btc->dm.run_reason == BTC_RSN_CMD_SET_COEX || dbcc_chg)
force_exec = FC_EXEC;
if (!force_exec && ant_path_type == dm->set_ant_path) {
@@ -3617,6 +4190,117 @@ static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
}
}
+static void _set_ant_v1(struct rtw89_dev *rtwdev, bool force_exec,
+ u8 phy_map, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ u32 ant_path_type = rtw89_get_antpath_type(phy_map, type);
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u8 gwl = BTC_GNT_HW;
+
+ if (btc->dm.run_reason == BTC_RSN_NTFY_POWEROFF ||
+ btc->dm.run_reason == BTC_RSN_NTFY_RADIO_STATE ||
+ btc->dm.run_reason == BTC_RSN_CMD_SET_COEX || wl_rinfo->dbcc_chg)
+ force_exec = FC_EXEC;
+
+ if (wl_rinfo->link_mode != BTC_WLINK_25G_MCC &&
+ btc->dm.wl_btg_rx == 2)
+ force_exec = FC_EXEC;
+
+ if (!force_exec && ant_path_type == dm->set_ant_path) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by no change!!\n",
+ __func__);
+ return;
+ } else if (bt->rfk_info.map.run) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by bt rfk!!\n", __func__);
+ return;
+ } else if (btc->dm.run_reason != BTC_RSN_NTFY_WL_RFK &&
+ wl->rfk_info.state != BTC_WRFK_STOP) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by wl rfk!!\n", __func__);
+ return;
+ }
+
+ dm->set_ant_path = ant_path_type;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): path=0x%x, set_type=0x%x\n",
+ __func__, phy_map, dm->set_ant_path & 0xff);
+
+ switch (type) {
+ case BTC_ANT_WINIT:
+ /* To avoid BT MP driver case (bt_enable but no mailbox) */
+ if (bt->enable.now && bt->run_patch_code)
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_LO, BTC_GNT_SW_HI,
+ BTC_WLACT_SW_LO);
+ else
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO,
+ BTC_WLACT_SW_HI);
+ break;
+ case BTC_ANT_WONLY:
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO,
+ BTC_WLACT_SW_HI);
+ break;
+ case BTC_ANT_WOFF:
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_LO, BTC_GNT_SW_HI,
+ BTC_WLACT_SW_LO);
+ break;
+ case BTC_ANT_W2G:
+ case BTC_ANT_W25G:
+ if (wl_rinfo->dbcc_en) {
+ if (wl_dinfo->real_band[RTW89_PHY_0] == RTW89_BAND_2G)
+ gwl = BTC_GNT_HW;
+ else
+ gwl = BTC_GNT_SW_HI;
+ _set_gnt_v1(rtwdev, BTC_PHY_0, gwl, BTC_GNT_HW, BTC_WLACT_HW);
+
+ if (wl_dinfo->real_band[RTW89_PHY_1] == RTW89_BAND_2G)
+ gwl = BTC_GNT_HW;
+ else
+ gwl = BTC_GNT_SW_HI;
+ _set_gnt_v1(rtwdev, BTC_PHY_1, gwl, BTC_GNT_HW, BTC_WLACT_HW);
+ } else {
+ gwl = BTC_GNT_HW;
+ _set_gnt_v1(rtwdev, phy_map, gwl, BTC_GNT_HW, BTC_WLACT_HW);
+ }
+ break;
+ case BTC_ANT_W5G:
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_HW, BTC_WLACT_HW);
+ break;
+ case BTC_ANT_FREERUN:
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_HI,
+ BTC_WLACT_SW_LO);
+ break;
+ case BTC_ANT_WRFK:
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO,
+ BTC_WLACT_HW);
+ break;
+ case BTC_ANT_WRFK2:
+ _set_gnt_v1(rtwdev, phy_map, BTC_GNT_SW_HI, BTC_GNT_SW_LO,
+ BTC_WLACT_SW_HI); /* no BT-Tx */
+ break;
+ default:
+ return;
+ }
+
+ _set_bt_plut(rtwdev, phy_map, BTC_PLT_GNT_WL, BTC_PLT_GNT_WL);
+}
+
+static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
+ u8 phy_map, u8 type)
+{
+ if (rtwdev->chip->chip_id == RTL8922A)
+ _set_ant_v1(rtwdev, force_exec, phy_map, type);
+ else
+ _set_ant_v0(rtwdev, force_exec, phy_map, type);
+}
+
static void _action_wl_only(struct rtw89_dev *rtwdev)
{
_set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_WONLY);
@@ -3641,7 +4325,7 @@ static void _action_wl_off(struct rtw89_dev *rtwdev, u8 mode)
if (wl->status.map.rf_off || btc->dm.bt_only) {
_set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_WOFF);
} else if (wl->status.map.lps == BTC_LPS_RF_ON) {
- if (wl->role_info.link_mode == BTC_WLINK_5G)
+ if (mode == BTC_WLINK_5G)
_set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_W5G);
else
_set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
@@ -4070,6 +4754,7 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_wl_role_info *wl_rinfo_v0 = &wl->role_info;
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -4090,6 +4775,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
wl_rinfo.link_mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
wl_rinfo.link_mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 8)
+ wl_rinfo.link_mode = wl_rinfo_v8->link_mode;
else
return;
@@ -4103,6 +4790,8 @@ static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
wl_rinfo.dbcc_2g_phy = wl_rinfo_v1->dbcc_2g_phy;
} else if (ver->fwlrole == 2) {
wl_rinfo.dbcc_2g_phy = wl_rinfo_v2->dbcc_2g_phy;
+ } else if (ver->fwlrole == 8) {
+ wl_rinfo.dbcc_2g_phy = wl_rinfo_v8->dbcc_2g_phy;
} else {
return;
}
@@ -4223,7 +4912,10 @@ static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta)
u16 enable = iter_data->enable;
bool reenable = iter_data->reenable;
- plink = &wl->link_info[port];
+ if (btc->ver->fwlrole == 8)
+ plink = &wl->rlink_info[port][0];
+ else
+ plink = &wl->link_info[port];
rtw89_debug(rtwdev, RTW89_DBG_BTC,
"[BTC], %s(): port = %d\n", __func__, port);
@@ -4276,6 +4968,7 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_txtime_data data = {.rtwdev = rtwdev};
u8 mode, igno_bt, tx_retry;
u32 tx_time;
@@ -4291,6 +4984,8 @@ static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 8)
+ mode = wl_rinfo_v8->link_mode;
else
return;
@@ -4348,6 +5043,7 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
bool bt_hi_lna_rx = false;
u8 mode;
@@ -4358,6 +5054,8 @@ static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 8)
+ mode = wl_rinfo_v8->link_mode;
else
return;
@@ -4378,11 +5076,14 @@ static void _set_bt_rx_scan_pri(struct rtw89_dev *rtwdev)
_write_scbd(rtwdev, BTC_WSCB_RXSCAN_PRI, (bool)(!!bt->scan_rx_low_pri));
}
-/* TODO add these functions */
static void _action_common(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_smap *wl_smap = &wl->status.map;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u32 bt_rom_code_id, bt_fw_ver;
_set_btg_ctrl(rtwdev);
_set_wl_preagc_ctrl(rtwdev);
@@ -4392,6 +5093,26 @@ static void _action_common(struct rtw89_dev *rtwdev)
_set_rf_trx_para(rtwdev);
_set_bt_rx_scan_pri(rtwdev);
+ bt_rom_code_id = chip_id_to_bt_rom_code_id(rtwdev->btc.ver->chip_id);
+ bt_fw_ver = bt->ver_info.fw & 0xffff;
+ if (bt->enable.now &&
+ (bt_fw_ver == 0 ||
+ (bt_fw_ver == bt_rom_code_id && bt->run_patch_code && rtwdev->chip->scbd)))
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, 1);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, 0);
+
+ if (dm->run_reason == BTC_RSN_NTFY_INIT ||
+ dm->run_reason == BTC_RSN_NTFY_RADIO_STATE ||
+ dm->run_reason == BTC_RSN_NTFY_POWEROFF) {
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
+
+ if (wl_smap->rf_off == 1 || wl_smap->lps != BTC_LPS_OFF)
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_ALL, 0);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_MREG, 1);
+ }
+
if (wl->scbd_change) {
rtw89_mac_cfg_sb(rtwdev, wl->scbd);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], write scbd: 0x%08x\n",
@@ -4488,6 +5209,30 @@ static void _action_wl_2g_sta(struct rtw89_dev *rtwdev)
_action_by_bt(rtwdev);
}
+static void _action_wl_25g_mcc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ u16 policy_type = BTC_CXP_OFF_BT;
+
+ if (btc->ant_type == BTC_ANT_SHARED) {
+ if (btc->cx.wl.status.map._4way)
+ policy_type = BTC_CXP_OFFE_WL;
+ else if (btc->cx.wl.status.val & btc_scanning_map.val)
+ policy_type = BTC_CXP_OFFE_2GBWMIXB;
+ else if (btc->cx.bt.link_info.profile_cnt.now == 0)
+ policy_type = BTC_CXP_OFFE_2GISOB;
+ else
+ policy_type = BTC_CXP_OFFE_2GBWISOB;
+ } else { /* dedicated-antenna */
+ policy_type = BTC_CXP_OFF_EQ0;
+ }
+
+ btc->dm.e2g_slot_limit = BTC_E2G_LIMIT_DEF;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
+ _set_policy(rtwdev, policy_type, BTC_ACT_WL_25G_MCC);
+}
+
static void _action_wl_scan(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -4495,14 +5240,7 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
if (RTW89_CHK_FW_FEATURE(SCAN_OFFLOAD, &rtwdev->fw)) {
- _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
- if (btc->ant_type == BTC_ANT_SHARED)
- _set_policy(rtwdev, BTC_CXP_OFFE_DEF,
- BTC_RSN_NTFY_SCAN_START);
- else
- _set_policy(rtwdev, BTC_CXP_OFF_EQ0,
- BTC_RSN_NTFY_SCAN_START);
-
+ _action_wl_25g_mcc(rtwdev);
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], Scan offload!\n");
} else if (rtwdev->dbcc_en) {
if (wl_dinfo->real_band[RTW89_PHY_0] != RTW89_BAND_2G &&
@@ -4518,24 +5256,6 @@ static void _action_wl_scan(struct rtw89_dev *rtwdev)
}
}
-static void _action_wl_25g_mcc(struct rtw89_dev *rtwdev)
-{
- struct rtw89_btc *btc = &rtwdev->btc;
-
- _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
-
- if (btc->ant_type == BTC_ANT_SHARED) {
- if (btc->cx.bt.link_info.profile_cnt.now == 0)
- _set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
- BTC_ACT_WL_25G_MCC);
- else
- _set_policy(rtwdev, BTC_CXP_OFFE_DEF,
- BTC_ACT_WL_25G_MCC);
- } else { /* dedicated-antenna */
- _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_25G_MCC);
- }
-}
-
static void _action_wl_2g_mcc(struct rtw89_dev *rtwdev)
{ struct rtw89_btc *btc = &rtwdev->btc;
@@ -4695,6 +5415,31 @@ static void _action_wl_2g_scc_v2(struct rtw89_dev *rtwdev)
_set_policy(rtwdev, policy_type, BTC_ACT_WL_2G_SCC);
}
+static void _action_wl_2g_scc_v8(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u16 policy_type = BTC_CXP_OFF_BT;
+
+ if (btc->ant_type == BTC_ANT_SHARED) {
+ if (wl->status.map._4way)
+ policy_type = BTC_CXP_OFFE_WL;
+ else if (bt->link_info.status.map.connect == 0)
+ policy_type = BTC_CXP_OFFE_2GISOB;
+ else
+ policy_type = BTC_CXP_OFFE_2GBWISOB;
+ } else {
+ policy_type = BTC_CXP_OFF_EQ0;
+ }
+
+ dm->e2g_slot_limit = BTC_E2G_LIMIT_DEF;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ _set_policy(rtwdev, policy_type, BTC_ACT_WL_2G_SCC);
+}
+
static void _action_wl_2g_ap(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -5287,6 +6032,312 @@ static void _update_wl_info_v2(struct rtw89_dev *rtwdev)
#define BTC_CHK_HANG_MAX 3
#define BTC_SCB_INV_VALUE GENMASK(31, 0)
+static u8 _get_role_link_mode(u8 role)
+{
+ switch (role) {
+ case RTW89_WIFI_ROLE_STATION:
+ return BTC_WLINK_2G_STA;
+ case RTW89_WIFI_ROLE_P2P_GO:
+ return BTC_WLINK_2G_GO;
+ case RTW89_WIFI_ROLE_P2P_CLIENT:
+ return BTC_WLINK_2G_GC;
+ case RTW89_WIFI_ROLE_AP:
+ return BTC_WLINK_2G_AP;
+ default:
+ return BTC_WLINK_OTHER;
+ }
+}
+
+static bool _chk_role_ch_group(const struct rtw89_btc_chdef *r1,
+ const struct rtw89_btc_chdef *r2)
+{
+ if (r1->chan != r2->chan) { /* primary ch is different */
+ return false;
+ } else if (r1->bw == RTW89_CHANNEL_WIDTH_40 &&
+ r2->bw == RTW89_CHANNEL_WIDTH_40) {
+ if (r1->offset != r2->offset)
+ return false;
+ }
+ return true;
+}
+
+static u8 _chk_dbcc(struct rtw89_dev *rtwdev, struct rtw89_btc_chdef *ch,
+ u8 *phy, u8 *role, u8 *dbcc_2g_phy)
+{
+ struct rtw89_btc_wl_info *wl = &rtwdev->btc.cx.wl;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ bool is_2g_ch_exist = false, is_multi_role_in_2g_phy = false;
+ u8 j, k, dbcc_2g_cid, dbcc_2g_cid2;
+
+ /* find out the 2G-PHY by connect-id ->ch */
+ for (j = 0; j < wl_rinfo->connect_cnt; j++) {
+ if (ch[j].center_ch <= 14) {
+ is_2g_ch_exist = true;
+ break;
+ }
+ }
+
+ /* If no any 2G-port exist, it's impossible because 5G-exclude */
+ if (!is_2g_ch_exist)
+ return BTC_WLINK_OTHER;
+
+ dbcc_2g_cid = j;
+ *dbcc_2g_phy = phy[dbcc_2g_cid];
+
+ /* connect_cnt <= 2 */
+ if (wl_rinfo->connect_cnt < BTC_TDMA_WLROLE_MAX)
+ return (_get_role_link_mode((role[dbcc_2g_cid])));
+
+ /* find the other-port in the 2G-PHY, ex: PHY-0:6G, PHY1: mcc/scc */
+ for (k = 0; k < wl_rinfo->connect_cnt; k++) {
+ if (k == dbcc_2g_cid)
+ continue;
+
+ if (phy[k] == *dbcc_2g_phy) {
+ is_multi_role_in_2g_phy = true;
+ dbcc_2g_cid2 = k;
+ break;
+ }
+ }
+
+ /* Single-role in 2G-PHY */
+ if (!is_multi_role_in_2g_phy)
+ return (_get_role_link_mode(role[dbcc_2g_cid]));
+
+ /* 2-role in 2G-PHY */
+ if (ch[dbcc_2g_cid2].center_ch > 14)
+ return BTC_WLINK_25G_MCC;
+ else if (_chk_role_ch_group(&ch[dbcc_2g_cid], &ch[dbcc_2g_cid2]))
+ return BTC_WLINK_2G_SCC;
+ else
+ return BTC_WLINK_2G_MCC;
+}
+
+static void _update_role_link_mode(struct rtw89_dev *rtwdev,
+ bool client_joined, u32 noa)
+{
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &rtwdev->btc.cx.wl.role_info_v8;
+ u32 type = BTC_WLMROLE_NONE, dur = 0;
+ u32 wl_role = wl_rinfo->role_map;
+
+ /* if no client_joined, don't care P2P-GO/AP role */
+ if (((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
+ (wl_role & BIT(RTW89_WIFI_ROLE_AP))) && !client_joined) {
+ if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ wl_rinfo->connect_cnt--;
+ } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_AP) {
+ wl_rinfo->link_mode = BTC_WLINK_NOLINK;
+ wl_rinfo->connect_cnt--;
+ }
+ }
+
+ /* Identify 2-Role type */
+ if (wl_rinfo->connect_cnt >= 2 &&
+ (wl_rinfo->link_mode == BTC_WLINK_2G_SCC ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_MCC ||
+ wl_rinfo->link_mode == BTC_WLINK_25G_MCC ||
+ wl_rinfo->link_mode == BTC_WLINK_5G)) {
+ if ((wl_role & BIT(RTW89_WIFI_ROLE_P2P_GO)) ||
+ (wl_role & BIT(RTW89_WIFI_ROLE_AP)))
+ type = noa ? BTC_WLMROLE_STA_GO_NOA : BTC_WLMROLE_STA_GO;
+ else if (wl_role & BIT(RTW89_WIFI_ROLE_P2P_CLIENT))
+ type = noa ? BTC_WLMROLE_STA_GC_NOA : BTC_WLMROLE_STA_GC;
+ else
+ type = BTC_WLMROLE_STA_STA;
+
+ dur = noa;
+ }
+
+ wl_rinfo->mrole_type = type;
+ wl_rinfo->mrole_noa_duration = dur;
+}
+
+static void _update_wl_info_v8(struct rtw89_dev *rtwdev, u8 role_id, u8 rlink_id,
+ enum btc_role_state state)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_chdef cid_ch[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER];
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo = &wl->role_info_v8;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ bool client_joined = false, b2g = false, b5g = false;
+ u8 cid_role[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 cid_phy[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER] = {};
+ u8 dbcc_en = 0, pta_req_band = RTW89_MAC_0;
+ u8 i, j, cnt = 0, cnt_2g = 0, cnt_5g = 0;
+ struct rtw89_btc_wl_link_info *wl_linfo;
+ struct rtw89_btc_wl_rlink *rlink = NULL;
+ u8 dbcc_2g_phy = RTW89_PHY_0;
+ u8 mode = BTC_WLINK_NOLINK;
+ u32 noa_dur = 0;
+
+ if (role_id >= RTW89_BE_BTC_WL_MAX_ROLE_NUMBER || rlink_id > RTW89_MAC_1)
+ return;
+
+ /* Extract wl->link_info[role_id][rlink_id] to wl->role_info
+ * role_id: role index
+ * rlink_id: rlink index (= HW-band index)
+ * pid: port_index
+ */
+
+ wl_linfo = &wl->rlink_info[role_id][rlink_id];
+ if (wl_linfo->connected == MLME_LINKING)
+ return;
+
+ rlink = &wl_rinfo->rlink[role_id][rlink_id];
+ rlink->role = wl_linfo->role;
+ rlink->active = wl_linfo->active; /* Doze or not */
+ rlink->pid = wl_linfo->pid;
+ rlink->phy = wl_linfo->phy;
+ rlink->rf_band = wl_linfo->band;
+ rlink->ch = wl_linfo->ch;
+ rlink->bw = wl_linfo->bw;
+ rlink->noa = wl_linfo->noa;
+ rlink->noa_dur = wl_linfo->noa_duration / 1000;
+ rlink->client_cnt = wl_linfo->client_cnt;
+ rlink->mode = wl_linfo->mode;
+
+ switch (wl_linfo->connected) {
+ case MLME_NO_LINK:
+ rlink->connected = 0;
+ if (rlink->role == RTW89_WIFI_ROLE_STATION)
+ btc->dm.leak_ap = 0;
+ break;
+ case MLME_LINKED:
+ rlink->connected = 1;
+ break;
+ default:
+ return;
+ }
+
+ wl->is_5g_hi_channel = false;
+ wl->bg_mode = false;
+ wl_rinfo->role_map = 0;
+ wl_rinfo->p2p_2g = 0;
+ memset(cid_ch, 0, sizeof(cid_ch));
+
+ for (i = 0; i < RTW89_BE_BTC_WL_MAX_ROLE_NUMBER; i++) {
+ for (j = RTW89_MAC_0; j <= RTW89_MAC_1; j++) {
+ rlink = &wl_rinfo->rlink[i][j];
+
+ if (!rlink->active || !rlink->connected)
+ continue;
+
+ cnt++;
+ wl_rinfo->role_map |= BIT(rlink->role);
+
+ /* only if client connect for p2p-Go/AP */
+ if ((rlink->role == RTW89_WIFI_ROLE_P2P_GO ||
+ rlink->role == RTW89_WIFI_ROLE_AP) &&
+ rlink->client_cnt > 1)
+ client_joined = true;
+
+ /* Identufy if P2P-Go (GO/GC/AP) exist at 2G band*/
+ if (rlink->rf_band == RTW89_BAND_2G &&
+ (client_joined || rlink->role == RTW89_WIFI_ROLE_P2P_CLIENT))
+ wl_rinfo->p2p_2g = 1;
+
+ /* only one noa-role exist */
+ if (rlink->noa && rlink->noa_dur > 0)
+ noa_dur = rlink->noa_dur;
+
+ /* for WL 5G-Rx interfered with BT issue */
+ if (rlink->rf_band == RTW89_BAND_5G && rlink->ch >= 100)
+ wl->is_5g_hi_channel = 1;
+
+ if ((rlink->mode & BIT(BTC_WL_MODE_11B)) ||
+ (rlink->mode & BIT(BTC_WL_MODE_11G)))
+ wl->bg_mode = 1;
+
+ if (rtwdev->chip->para_ver & BTC_FEAT_MLO_SUPPORT)
+ continue;
+
+ cid_ch[cnt - 1] = wl_linfo->chdef;
+ cid_phy[cnt - 1] = rlink->phy;
+ cid_role[cnt - 1] = rlink->role;
+
+ if (rlink->rf_band != RTW89_BAND_2G) {
+ cnt_5g++;
+ b5g = true;
+ } else {
+ cnt_2g++;
+ b2g = true;
+ }
+ }
+ }
+
+ if (rtwdev->chip->para_ver & BTC_FEAT_MLO_SUPPORT) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC] rlink cnt_2g=%d cnt_5g=%d\n", cnt_2g, cnt_5g);
+ rtw89_warn(rtwdev, "not support MLO feature yet");
+ } else {
+ dbcc_en = rtwdev->dbcc_en;
+
+ /* Be careful to change the following sequence!! */
+ if (cnt == 0) {
+ mode = BTC_WLINK_NOLINK;
+ } else if (!b2g && b5g) {
+ mode = BTC_WLINK_5G;
+ } else if (wl_rinfo->role_map & BIT(RTW89_WIFI_ROLE_NAN)) {
+ mode = BTC_WLINK_2G_NAN;
+ } else if (cnt > BTC_TDMA_WLROLE_MAX) {
+ mode = BTC_WLINK_OTHER;
+ } else if (dbcc_en) {
+ mode = _chk_dbcc(rtwdev, cid_ch, cid_phy, cid_role,
+ &dbcc_2g_phy);
+ } else if (b2g && b5g && cnt == 2) {
+ mode = BTC_WLINK_25G_MCC;
+ } else if (!b5g && cnt == 2) { /* cnt_connect = 2 */
+ if (_chk_role_ch_group(&cid_ch[0], &cid_ch[cnt - 1]))
+ mode = BTC_WLINK_2G_SCC;
+ else
+ mode = BTC_WLINK_2G_MCC;
+ } else if (!b5g && cnt == 1) { /* cnt_connect = 1 */
+ mode = _get_role_link_mode(cid_role[0]);
+ }
+ }
+
+ wl_rinfo->link_mode = mode;
+ wl_rinfo->connect_cnt = cnt;
+ if (wl_rinfo->connect_cnt == 0)
+ wl_rinfo->role_map = BIT(RTW89_WIFI_ROLE_NONE);
+ _update_role_link_mode(rtwdev, client_joined, noa_dur);
+
+ wl_rinfo->dbcc_2g_phy = dbcc_2g_phy;
+ if (wl_rinfo->dbcc_en != dbcc_en) {
+ wl_rinfo->dbcc_en = dbcc_en;
+ wl_rinfo->dbcc_chg = 1;
+ btc->cx.cnt_wl[BTC_WCNT_DBCC_CHG]++;
+ } else {
+ wl_rinfo->dbcc_chg = 0;
+ }
+
+ if (wl_rinfo->dbcc_en) {
+ memset(wl_dinfo, 0, sizeof(struct rtw89_btc_wl_dbcc_info));
+
+ if (mode == BTC_WLINK_5G) {
+ pta_req_band = RTW89_PHY_0;
+ wl_dinfo->op_band[RTW89_PHY_0] = RTW89_BAND_5G;
+ wl_dinfo->op_band[RTW89_PHY_1] = RTW89_BAND_2G;
+ } else if (wl_rinfo->dbcc_2g_phy == RTW89_PHY_1) {
+ pta_req_band = RTW89_PHY_1;
+ wl_dinfo->op_band[RTW89_PHY_0] = RTW89_BAND_5G;
+ wl_dinfo->op_band[RTW89_PHY_1] = RTW89_BAND_2G;
+ } else {
+ pta_req_band = RTW89_PHY_0;
+ wl_dinfo->op_band[RTW89_PHY_0] = RTW89_BAND_2G;
+ wl_dinfo->op_band[RTW89_PHY_1] = RTW89_BAND_5G;
+ }
+ _update_dbcc_band(rtwdev, RTW89_PHY_0);
+ _update_dbcc_band(rtwdev, RTW89_PHY_1);
+ }
+
+ wl_rinfo->pta_req_band = pta_req_band;
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
+}
+
void rtw89_coex_act1_work(struct work_struct *work)
{
struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
@@ -5445,6 +6496,7 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode, igno_bt, always_freerun;
lockdep_assert_held(&rtwdev->mutex);
@@ -5459,6 +6511,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 8)
+ mode = wl_rinfo_v8->link_mode;
else
return;
@@ -5605,6 +6659,8 @@ void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
_action_wl_2g_scc_v1(rtwdev);
else if (ver->fwlrole == 2)
_action_wl_2g_scc_v2(rtwdev);
+ else if (ver->fwlrole == 8)
+ _action_wl_2g_scc_v8(rtwdev);
break;
case BTC_WLINK_2G_MCC:
bt->scan_rx_low_pri = true;
@@ -5736,8 +6792,8 @@ void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
_set_init_info(rtwdev);
_set_wl_tx_power(rtwdev, RTW89_BTC_WL_DEF_TX_PWR);
- rtw89_btc_fw_set_slots(rtwdev, CXST_MAX, dm->slot);
btc_fw_set_monreg(rtwdev);
+ rtw89_btc_fw_set_slots(rtwdev);
_fw_set_drv_info(rtwdev, CXDRVINFO_INIT);
_fw_set_drv_info(rtwdev, CXDRVINFO_CTRL);
@@ -5956,6 +7012,22 @@ static u8 _update_bt_rssi_level(struct rtw89_dev *rtwdev, u8 rssi)
return rssi_level;
}
+static void _update_zb_coex_tbl(struct rtw89_dev *rtwdev)
+{
+ u8 mode = rtwdev->btc.cx.wl.role_info.link_mode;
+ u32 zb_tbl0 = 0xda5a5a5a, zb_tbl1 = 0xda5a5a5a;
+
+ if (mode == BTC_WLINK_5G || rtwdev->btc.dm.freerun) {
+ zb_tbl0 = 0xffffffff;
+ zb_tbl1 = 0xffffffff;
+ } else if (mode == BTC_WLINK_25G_MCC) {
+ zb_tbl0 = 0xffffffff; /* for E5G slot */
+ zb_tbl1 = 0xda5a5a5a; /* for E2G slot */
+ }
+ rtw89_write32(rtwdev, R_BTC_ZB_COEX_TBL_0, zb_tbl0);
+ rtw89_write32(rtwdev, R_BTC_ZB_COEX_TBL_1, zb_tbl1);
+}
+
#define BT_PROFILE_PROTOCOL_MASK GENMASK(7, 4)
static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
@@ -6093,13 +7165,6 @@ static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
_run_coex(rtwdev, BTC_RSN_UPDATE_BT_INFO);
}
-enum btc_wl_mode {
- BTC_WL_MODE_HT = 0,
- BTC_WL_MODE_VHT = 1,
- BTC_WL_MODE_HE = 2,
- BTC_WL_MODE_NUM,
-};
-
void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta, enum btc_role_state state)
{
@@ -6112,7 +7177,7 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_wl_link_info r = {0};
struct rtw89_btc_wl_link_info *wlinfo = NULL;
- u8 mode = 0;
+ u8 mode = 0, rlink_id, link_mode_ori, pta_req_mac_ori, wa_type;
rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], state=%d\n", state);
rtw89_debug(rtwdev, RTW89_DBG_BTC,
@@ -6162,6 +7227,10 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
r.band = chan->band_type;
r.ch = chan->channel;
r.bw = chan->band_width;
+ r.chdef.band = chan->band_type;
+ r.chdef.center_ch = chan->channel;
+ r.chdef.bw = chan->band_width;
+ r.chdef.chan = chan->primary_channel;
ether_addr_copy(r.mac_addr, rtwvif->mac_addr);
if (rtwsta && vif->type == NL80211_IFTYPE_STATION)
@@ -6171,13 +7240,37 @@ void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif
wlinfo = &wl->link_info[r.pid];
- memcpy(wlinfo, &r, sizeof(*wlinfo));
- if (ver->fwlrole == 0)
+ rlink_id = 0; /* to do */
+ if (ver->fwlrole == 0) {
+ *wlinfo = r;
_update_wl_info(rtwdev);
- else if (ver->fwlrole == 1)
+ } else if (ver->fwlrole == 1) {
+ *wlinfo = r;
_update_wl_info_v1(rtwdev);
- else if (ver->fwlrole == 2)
+ } else if (ver->fwlrole == 2) {
+ *wlinfo = r;
_update_wl_info_v2(rtwdev);
+ } else if (ver->fwlrole == 8) {
+ wlinfo = &wl->rlink_info[r.pid][rlink_id];
+ *wlinfo = r;
+ link_mode_ori = wl->role_info_v8.link_mode;
+ pta_req_mac_ori = wl->pta_req_mac;
+ _update_wl_info_v8(rtwdev, r.pid, rlink_id, state);
+
+ if (wl->role_info_v8.link_mode != link_mode_ori) {
+ wl->role_info_v8.link_mode_chg = 1;
+ if (ver->fcxinit == 7)
+ wa_type = btc->mdinfo.md_v7.wa_type;
+ else
+ wa_type = btc->mdinfo.md.wa_type;
+
+ if (wa_type & BTC_WA_HFP_ZB)
+ _update_zb_coex_tbl(rtwdev);
+ }
+
+ if (wl->pta_req_mac != pta_req_mac_ori)
+ wl->pta_reg_mac_chg = 1;
+ }
if (wlinfo->role == RTW89_WIFI_ROLE_STATION &&
wlinfo->connected == MLME_NO_LINK)
@@ -6715,7 +7808,10 @@ static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
}
for (i = 0; i < RTW89_PORT_NUM; i++) {
- plink = &btc->cx.wl.link_info[i];
+ if (btc->ver->fwlrole == 8)
+ plink = &btc->cx.wl.rlink_info[i][0];
+ else
+ plink = &btc->cx.wl.link_info[i];
if (!plink->active)
continue;
@@ -6760,6 +7856,7 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
struct rtw89_btc_wl_role_info_v1 *wl_rinfo_v1 = &wl->role_info_v1;
struct rtw89_btc_wl_role_info_v2 *wl_rinfo_v2 = &wl->role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 *wl_rinfo_v8 = &wl->role_info_v8;
u8 mode;
if (!(btc->dm.coex_info_map & BTC_COEX_INFO_WL))
@@ -6773,6 +7870,8 @@ static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
mode = wl_rinfo_v1->link_mode;
else if (ver->fwlrole == 2)
mode = wl_rinfo_v2->link_mode;
+ else if (ver->fwlrole == 8)
+ mode = wl_rinfo_v8->link_mode;
else
return;
@@ -6985,11 +8084,6 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
seq_puts(m, "\n");
}
- if (bt->enable.now && bt->ver_info.fw == 0)
- rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true);
- else
- rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, false);
-
if (bt_linfo->profile_cnt.now || bt_linfo->status.map.ble_connect)
rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, true);
else
@@ -7017,6 +8111,79 @@ static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
#define CASE_BTC_EVT_STR(e) case CXEVNT_## e: return #e
#define CASE_BTC_INIT(e) case BTC_MODE_## e: return #e
#define CASE_BTC_ANTPATH_STR(e) case BTC_ANT_##e: return #e
+#define CASE_BTC_POLUT_STR(e) case BTC_PLT_## e: return #e
+#define CASE_BTC_REGTYPE_STR(e) case REG_## e: return #e
+#define CASE_BTC_GDBG_STR(e) case BTC_DBG_## e: return #e
+
+static const char *id_to_polut(u32 id)
+{
+ switch (id) {
+ CASE_BTC_POLUT_STR(NONE);
+ CASE_BTC_POLUT_STR(GNT_BT_TX);
+ CASE_BTC_POLUT_STR(GNT_BT_RX);
+ CASE_BTC_POLUT_STR(GNT_WL);
+ CASE_BTC_POLUT_STR(BT);
+ CASE_BTC_POLUT_STR(ALL);
+ default:
+ return "unknown";
+ }
+}
+
+static const char *id_to_regtype(u32 id)
+{
+ switch (id) {
+ CASE_BTC_REGTYPE_STR(MAC);
+ CASE_BTC_REGTYPE_STR(BB);
+ CASE_BTC_REGTYPE_STR(RF);
+ CASE_BTC_REGTYPE_STR(BT_RF);
+ CASE_BTC_REGTYPE_STR(BT_MODEM);
+ CASE_BTC_REGTYPE_STR(BT_BLUEWIZE);
+ CASE_BTC_REGTYPE_STR(BT_VENDOR);
+ CASE_BTC_REGTYPE_STR(BT_LE);
+ default:
+ return "unknown";
+ }
+}
+
+static const char *id_to_gdbg(u32 id)
+{
+ switch (id) {
+ CASE_BTC_GDBG_STR(GNT_BT);
+ CASE_BTC_GDBG_STR(GNT_WL);
+ CASE_BTC_GDBG_STR(BCN_EARLY);
+ CASE_BTC_GDBG_STR(WL_NULL0);
+ CASE_BTC_GDBG_STR(WL_NULL1);
+ CASE_BTC_GDBG_STR(WL_RXISR);
+ CASE_BTC_GDBG_STR(TDMA_ENTRY);
+ CASE_BTC_GDBG_STR(A2DP_EMPTY);
+ CASE_BTC_GDBG_STR(BT_RETRY);
+ CASE_BTC_GDBG_STR(BT_RELINK);
+ CASE_BTC_GDBG_STR(SLOT_WL);
+ CASE_BTC_GDBG_STR(SLOT_BT);
+ CASE_BTC_GDBG_STR(WL_ERR);
+ CASE_BTC_GDBG_STR(WL_OK);
+ CASE_BTC_GDBG_STR(SLOT_B2W);
+ CASE_BTC_GDBG_STR(SLOT_W1);
+ CASE_BTC_GDBG_STR(SLOT_W2);
+ CASE_BTC_GDBG_STR(SLOT_W2B);
+ CASE_BTC_GDBG_STR(SLOT_B1);
+ CASE_BTC_GDBG_STR(SLOT_B2);
+ CASE_BTC_GDBG_STR(SLOT_B3);
+ CASE_BTC_GDBG_STR(SLOT_B4);
+ CASE_BTC_GDBG_STR(SLOT_LK);
+ CASE_BTC_GDBG_STR(SLOT_E2G);
+ CASE_BTC_GDBG_STR(SLOT_E5G);
+ CASE_BTC_GDBG_STR(SLOT_EBT);
+ CASE_BTC_GDBG_STR(SLOT_WLK);
+ CASE_BTC_GDBG_STR(SLOT_B1FDD);
+ CASE_BTC_GDBG_STR(BT_CHANGE);
+ CASE_BTC_GDBG_STR(WL_CCA);
+ CASE_BTC_GDBG_STR(BT_LEAUDIO);
+ CASE_BTC_GDBG_STR(USER_DEF);
+ default:
+ return "unknown";
+ }
+}
static const char *steps_to_str(u16 step)
{
@@ -7354,6 +8521,10 @@ static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
pcysta->v5 = pfwinfo->rpt_fbtc_cysta.finfo.v5;
except_cnt = pcysta->v5.except_cnt;
exception_map = le32_to_cpu(pcysta->v5.except_map);
+ } else if (ver->fcxcysta == 7) {
+ pcysta->v7 = pfwinfo->rpt_fbtc_cysta.finfo.v7;
+ except_cnt = pcysta->v7.except_cnt;
+ exception_map = le32_to_cpu(pcysta->v7.except_map);
} else {
return;
}
@@ -7430,22 +8601,35 @@ static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
struct rtw89_btc_dm *dm = &btc->dm;
- struct rtw89_btc_fbtc_slot *s;
+ u16 dur, cxtype;
+ u32 tbl;
u8 i = 0;
for (i = 0; i < CXST_MAX; i++) {
- s = &dm->slot_now[i];
+ if (btc->ver->fcxslots == 1) {
+ dur = le16_to_cpu(dm->slot_now.v1[i].dur);
+ tbl = le32_to_cpu(dm->slot_now.v1[i].cxtbl);
+ cxtype = le16_to_cpu(dm->slot_now.v1[i].cxtype);
+ } else if (btc->ver->fcxslots == 7) {
+ dur = le16_to_cpu(dm->slot_now.v7[i].dur);
+ tbl = le32_to_cpu(dm->slot_now.v7[i].cxtbl);
+ cxtype = le16_to_cpu(dm->slot_now.v7[i].cxtype);
+ } else {
+ return;
+ }
+
if (i % 5 == 0)
seq_printf(m,
" %-15s : %5s[%03d/0x%x/%d]",
"[slot_list]",
id_to_slot((u32)i),
- s->dur, s->cxtbl, s->cxtype);
+ dur, tbl, cxtype);
else
seq_printf(m,
", %5s[%03d/0x%x/%d]",
id_to_slot((u32)i),
- s->dur, s->cxtbl, s->cxtype);
+ dur, tbl, cxtype);
+
if (i % 5 == 4)
seq_puts(m, "\n");
}
@@ -7973,6 +9157,136 @@ static void _show_fbtc_cysta_v5(struct rtw89_dev *rtwdev, struct seq_file *m)
}
}
+static void _show_fbtc_cysta_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc_bt_info *bt = &rtwdev->btc.cx.bt;
+ struct rtw89_btc_bt_a2dp_desc *a2dp = &bt->link_info.a2dp_desc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
+ struct rtw89_btc_fbtc_cysta_v7 *pcysta = NULL;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_rpt_cmn_info *pcinfo;
+ u16 cycle, c_begin, c_end, s_id;
+ u8 i, cnt = 0, divide_cnt;
+ u8 slot_pair;
+
+ pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo.v7;
+ seq_printf(m, "\n\r %-15s : cycle:%d", "[slot_stat]",
+ le16_to_cpu(pcysta->cycles));
+
+ for (i = 0; i < CXST_MAX; i++) {
+ if (!le16_to_cpu(pcysta->slot_cnt[i]))
+ continue;
+ seq_printf(m, ", %s:%d",
+ id_to_slot(i), le16_to_cpu(pcysta->slot_cnt[i]));
+ }
+
+ if (dm->tdma_now.rxflctrl)
+ seq_printf(m, ", leak_rx:%d",
+ le32_to_cpu(pcysta->leak_slot.cnt_rximr));
+
+ if (pcysta->collision_cnt)
+ seq_printf(m, ", collision:%d", pcysta->collision_cnt);
+
+ if (pcysta->skip_cnt)
+ seq_printf(m, ", skip:%d", le16_to_cpu(pcysta->skip_cnt));
+
+ seq_printf(m, "\n\r %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_stat]",
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tavg[CXT_BT]),
+ le16_to_cpu(pcysta->leak_slot.tavg) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tavg) % 1000);
+ seq_printf(m, ", max_t[wl:%d/bt:%d(>%dms:%d)/lk:%d.%03d]",
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_WL]),
+ le16_to_cpu(pcysta->cycle_time.tmax[CXT_BT]),
+ dm->bt_slot_flood, dm->cnt_dm[BTC_DCNT_BT_SLOT_FLOOD],
+ le16_to_cpu(pcysta->leak_slot.tamx) / 1000,
+ le16_to_cpu(pcysta->leak_slot.tamx) % 1000);
+ seq_printf(m, ", bcn[all:%d/ok:%d/in_bt:%d/in_bt_ok:%d]",
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_ALL_OK]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_SLOT]),
+ le16_to_cpu(pcysta->bcn_cnt[CXBCN_BT_OK]));
+
+ if (a2dp->exist) {
+ seq_printf(m,
+ "\n\r %-15s : a2dp_ept:%d, a2dp_late:%d(streak 2S:%d/max:%d)",
+ "[a2dp_stat]",
+ le16_to_cpu(pcysta->a2dp_ept.cnt),
+ le16_to_cpu(pcysta->a2dp_ept.cnt_timeout),
+ a2dp->no_empty_streak_2s, a2dp->no_empty_streak_max);
+
+ seq_printf(m, ", avg_t:%d, max_t:%d",
+ le16_to_cpu(pcysta->a2dp_ept.tavg),
+ le16_to_cpu(pcysta->a2dp_ept.tmax));
+ }
+
+ if (le16_to_cpu(pcysta->cycles) <= 1)
+ return;
+
+ /* 1 cycle = 1 wl-slot + 1 bt-slot */
+ slot_pair = BTC_CYCLE_SLOT_MAX / 2;
+
+ if (le16_to_cpu(pcysta->cycles) <= slot_pair)
+ c_begin = 1;
+ else
+ c_begin = le16_to_cpu(pcysta->cycles) - slot_pair + 1;
+
+ c_end = le16_to_cpu(pcysta->cycles);
+
+ if (a2dp->exist)
+ divide_cnt = 2;
+ else
+ divide_cnt = 6;
+
+ if (c_begin > c_end)
+ return;
+
+ for (cycle = c_begin; cycle <= c_end; cycle++) {
+ cnt++;
+ s_id = ((cycle - 1) % slot_pair) * 2;
+
+ if (cnt % divide_cnt == 1) {
+ if (a2dp->exist)
+ seq_printf(m, "\n\r %-15s : ", "[slotT_wermtan]");
+ else
+ seq_printf(m, "\n\r %-15s : ", "[slotT_rxerr]");
+ }
+
+ seq_printf(m, "->b%d", le16_to_cpu(pcysta->slot_step_time[s_id]));
+
+ if (a2dp->exist)
+ seq_printf(m, "(%d/%d/%d/%dM/%d/%d/%d)",
+ pcysta->wl_rx_err_ratio[s_id],
+ pcysta->a2dp_trx[s_id].empty_cnt,
+ pcysta->a2dp_trx[s_id].retry_cnt,
+ (pcysta->a2dp_trx[s_id].tx_rate ? 3 : 2),
+ pcysta->a2dp_trx[s_id].tx_cnt,
+ pcysta->a2dp_trx[s_id].ack_cnt,
+ pcysta->a2dp_trx[s_id].nack_cnt);
+ else
+ seq_printf(m, "(%d)", pcysta->wl_rx_err_ratio[s_id]);
+
+ seq_printf(m, "->w%d", le16_to_cpu(pcysta->slot_step_time[s_id + 1]));
+
+ if (a2dp->exist)
+ seq_printf(m, "(%d/%d/%d/%dM/%d/%d/%d)",
+ pcysta->wl_rx_err_ratio[s_id + 1],
+ pcysta->a2dp_trx[s_id + 1].empty_cnt,
+ pcysta->a2dp_trx[s_id + 1].retry_cnt,
+ (pcysta->a2dp_trx[s_id + 1].tx_rate ? 3 : 2),
+ pcysta->a2dp_trx[s_id + 1].tx_cnt,
+ pcysta->a2dp_trx[s_id + 1].ack_cnt,
+ pcysta->a2dp_trx[s_id + 1].nack_cnt);
+ else
+ seq_printf(m, "(%d)", pcysta->wl_rx_err_ratio[s_id + 1]);
+ }
+}
+
static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -8009,6 +9323,27 @@ static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
le32_to_cpu(ns->v1.max_t[i]) / 1000,
le32_to_cpu(ns->v1.max_t[i]) % 1000);
}
+ } else if (ver->fcxnullsta == 7) {
+ for (i = 0; i < 2; i++) {
+ seq_printf(m, " %-15s : ", "[NULL-STA]");
+ seq_printf(m, "null-%d", i);
+ seq_printf(m, "[Tx:%d/",
+ le32_to_cpu(ns->v7.result[i][4]));
+ seq_printf(m, "[ok:%d/",
+ le32_to_cpu(ns->v7.result[i][1]));
+ seq_printf(m, "fail:%d/",
+ le32_to_cpu(ns->v7.result[i][0]));
+ seq_printf(m, "on_time:%d/",
+ le32_to_cpu(ns->v7.result[i][2]));
+ seq_printf(m, "retry:%d/",
+ le32_to_cpu(ns->v7.result[i][3]));
+ seq_printf(m, "avg_t:%d.%03d/",
+ le32_to_cpu(ns->v7.tavg[i]) / 1000,
+ le32_to_cpu(ns->v7.tavg[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]\n",
+ le32_to_cpu(ns->v7.tmax[i]) / 1000,
+ le32_to_cpu(ns->v7.tmax[i]) % 1000);
+ }
} else {
for (i = 0; i < 2; i++) {
seq_printf(m, " %-15s : ", "[NULL-STA]");
@@ -8185,6 +9520,8 @@ static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_fbtc_cysta_v4(rtwdev, m);
else if (ver->fcxcysta == 5)
_show_fbtc_cysta_v5(rtwdev, m);
+ else if (ver->fcxcysta == 7)
+ _show_fbtc_cysta_v7(rtwdev, m);
_show_fbtc_nullsta(rtwdev, m);
@@ -8202,7 +9539,7 @@ static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt
u32 val, status;
if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
- chip->chip_id == RTL8851B) {
+ chip->chip_id == RTL8851B || chip->chip_id == RTL8852BT) {
rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
rtw89_mac_read_lte(rtwdev, R_AX_GNT_VAL, &status);
@@ -8237,6 +9574,47 @@ static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt
}
}
+static void _show_gpio_dbg(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
+ const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ union rtw89_btc_fbtc_gpio_dbg *gdbg = NULL;
+ u8 *gpio_map, i;
+ u32 en_map;
+
+ pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
+ gdbg = &rtwdev->btc.fwinfo.rpt_fbtc_gpio_dbg.finfo;
+ if (!pcinfo->valid) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n",
+ __func__);
+ seq_puts(m, "\n");
+ return;
+ }
+
+ if (ver->fcxgpiodbg == 7) {
+ en_map = le32_to_cpu(gdbg->v7.en_map);
+ gpio_map = gdbg->v7.gpio_map;
+ } else {
+ en_map = le32_to_cpu(gdbg->v1.en_map);
+ gpio_map = gdbg->v1.gpio_map;
+ }
+
+ if (!en_map)
+ return;
+
+ seq_printf(m, " %-15s : enable_map:0x%08x",
+ "[gpio_dbg]", en_map);
+
+ for (i = 0; i < BTC_DBG_MAX1; i++) {
+ if (!(en_map & BIT(i)))
+ continue;
+ seq_printf(m, ", %s->GPIO%d", id_to_gdbg(i), gpio_map[i]);
+ }
+ seq_puts(m, "\n");
+}
+
static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -8244,7 +9622,6 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_mreg_val_v1 *pmreg = NULL;
- struct rtw89_btc_fbtc_gpio_dbg *gdbg = NULL;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
@@ -8314,29 +9691,6 @@ static void _show_mreg_v1(struct rtw89_dev *rtwdev, struct seq_file *m)
if (i >= pmreg->reg_num)
seq_puts(m, "\n");
}
-
- pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
- if (!pcinfo->valid) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n",
- __func__);
- seq_puts(m, "\n");
- return;
- }
-
- gdbg = &pfwinfo->rpt_fbtc_gpio_dbg.finfo;
- if (!gdbg->en_map)
- return;
-
- seq_printf(m, " %-15s : enable_map:0x%08x",
- "[gpio_dbg]", gdbg->en_map);
-
- for (i = 0; i < BTC_DBG_MAX1; i++) {
- if (!(gdbg->en_map & BIT(i)))
- continue;
- seq_printf(m, ", %d->GPIO%d", (u32)i, gdbg->gpio_map[i]);
- }
- seq_puts(m, "\n");
}
static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
@@ -8346,7 +9700,6 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
struct rtw89_btc_fbtc_mreg_val_v2 *pmreg = NULL;
- struct rtw89_btc_fbtc_gpio_dbg *gdbg = NULL;
struct rtw89_btc_cx *cx = &btc->cx;
struct rtw89_btc_wl_info *wl = &btc->cx.wl;
struct rtw89_btc_bt_info *bt = &btc->cx.bt;
@@ -8371,12 +9724,13 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
gnt = gnt_cfg.band[0];
seq_printf(m,
- " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
+ " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], polut_type:%s",
"[gnt_status]",
chip->chip_id == RTL8852C ? "HW" :
btc->dm.pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
gnt.gnt_wl_sw_en ? "SW" : "HW", gnt.gnt_wl,
- gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt);
+ gnt.gnt_bt_sw_en ? "SW" : "HW", gnt.gnt_bt,
+ id_to_polut(wl->bt_polut_type[wl->pta_req_mac]));
gnt = gnt_cfg.band[1];
seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
@@ -8416,27 +9770,74 @@ static void _show_mreg_v2(struct rtw89_dev *rtwdev, struct seq_file *m)
if (i >= pmreg->reg_num)
seq_puts(m, "\n");
}
+}
- pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
- if (!pcinfo->valid) {
- rtw89_debug(rtwdev, RTW89_DBG_BTC,
- "[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n",
- __func__);
- seq_puts(m, "\n");
+static void _show_mreg_v7(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_fbtc_mreg_val_v7 *pmreg = NULL;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+ struct rtw89_mac_ax_gnt *gnt = NULL;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u8 i, type, cnt = 0;
+ u32 val, offset;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_MREG))
return;
+
+ seq_puts(m, "\n\r========== [HW Status] ==========");
+
+ seq_printf(m,
+ "\n\r %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)",
+ "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
+ bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
+ cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
+
+ /* To avoid I/O if WL LPS or power-off */
+ dm->pta_owner = rtw89_mac_get_ctrl_path(rtwdev);
+
+ seq_printf(m,
+ "\n\r %-15s : pta_owner:%s, pta_req_mac:MAC%d, rf_gnt_source: polut_type:%s",
+ "[gnt_status]",
+ rtwdev->chip->para_ver & BTC_FEAT_PTA_ONOFF_CTRL ? "HW" :
+ dm->pta_owner == BTC_CTRL_BY_WL ? "WL" : "BT",
+ wl->pta_req_mac, id_to_polut(wl->bt_polut_type[wl->pta_req_mac]));
+
+ gnt = &dm->gnt.band[RTW89_PHY_0];
+
+ seq_printf(m, ", phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d]",
+ gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl,
+ gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt);
+
+ if (rtwdev->dbcc_en) {
+ gnt = &dm->gnt.band[RTW89_PHY_1];
+ seq_printf(m, ", phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]",
+ gnt->gnt_wl_sw_en ? "SW" : "HW", gnt->gnt_wl,
+ gnt->gnt_bt_sw_en ? "SW" : "HW", gnt->gnt_bt);
}
- gdbg = &pfwinfo->rpt_fbtc_gpio_dbg.finfo;
- if (!gdbg->en_map)
+ pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
+ if (!pcinfo->valid)
return;
- seq_printf(m, " %-15s : enable_map:0x%08x",
- "[gpio_dbg]", gdbg->en_map);
+ pmreg = &pfwinfo->rpt_fbtc_mregval.finfo.v7;
- for (i = 0; i < BTC_DBG_MAX1; i++) {
- if (!(gdbg->en_map & BIT(i)))
- continue;
- seq_printf(m, ", %d->GPIO%d", (u32)i, gdbg->gpio_map[i]);
+ for (i = 0; i < pmreg->reg_num; i++) {
+ type = (u8)le16_to_cpu(rtwdev->chip->mon_reg[i].type);
+ offset = le32_to_cpu(rtwdev->chip->mon_reg[i].offset);
+ val = le32_to_cpu(pmreg->mreg_val[i]);
+
+ if (cnt % 6 == 0)
+ seq_printf(m, "\n\r %-15s : %s_0x%x=0x%x", "[reg]",
+ id_to_regtype(type), offset, val);
+ else
+ seq_printf(m, ", %s_0x%x=0x%x",
+ id_to_regtype(type), offset, val);
+ cnt++;
}
seq_puts(m, "\n");
}
@@ -8887,6 +10288,108 @@ static void _show_summary_v105(struct rtw89_dev *rtwdev, struct seq_file *m)
cnt[BTC_NCNT_CUSTOMERIZE]);
}
+static void _show_summary_v8(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &rtwdev->btc.fwinfo;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_fbtc_rpt_ctrl_v8 *prptctrl = NULL;
+ struct rtw89_btc_cx *cx = &rtwdev->btc.cx;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ u32 *cnt = rtwdev->btc.dm.cnt_notify;
+ u32 cnt_sum = 0;
+ u8 i;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
+ return;
+
+ seq_printf(m, "%s", "\n\r========== [Statistics] ==========");
+
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ if (pcinfo->valid && wl->status.map.lps != BTC_LPS_RF_OFF &&
+ !wl->status.map.rf_off) {
+ prptctrl = &pfwinfo->rpt_ctrl.finfo.v8;
+
+ seq_printf(m,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d, len:%d, max:fw-%d/drv-%d), ",
+ "[summary]", pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ le16_to_cpu(prptctrl->rpt_info.cnt_h2c), pfwinfo->cnt_c2h,
+ le16_to_cpu(prptctrl->rpt_info.cnt_c2h),
+ le16_to_cpu(prptctrl->rpt_info.len_c2h),
+ (prptctrl->rpt_len_max_h << 8) + prptctrl->rpt_len_max_l,
+ rtwdev->btc.ver->info_buf);
+
+ seq_printf(m, "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x",
+ pfwinfo->event[BTF_EVNT_RPT],
+ le16_to_cpu(prptctrl->rpt_info.cnt),
+ le32_to_cpu(prptctrl->rpt_info.en));
+
+ if (dm->error.map.wl_fw_hang)
+ seq_puts(m, " (WL FW Hang!!)");
+
+ seq_printf(m, "\n\r %-15s : send_ok:%d, send_fail:%d, recv:%d, ",
+ "[mailbox]", le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_ok),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_send_fail),
+ le32_to_cpu(prptctrl->bt_mbx_info.cnt_recv));
+
+ seq_printf(m, "A2DP_empty:%d(stop:%d/tx:%d/ack:%d/nack:%d)",
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_empty),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_flowctrl),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_tx),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_ack),
+ le32_to_cpu(prptctrl->bt_mbx_info.a2dp.cnt_nack));
+
+ seq_printf(m,
+ "\n\r %-15s : wl_rfk[req:%d/go:%d/reject:%d/tout:%d/time:%dms]",
+ "[RFK/LPS]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT],
+ wl->rfk_info.proc_time);
+
+ seq_printf(m, ", bt_rfk[req:%d]",
+ le16_to_cpu(prptctrl->bt_cnt[BTC_BCNT_RFK_REQ]));
+
+ seq_printf(m, ", AOAC[RF_on:%d/RF_off:%d]",
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_on),
+ le16_to_cpu(prptctrl->rpt_info.cnt_aoac_rf_off));
+ } else {
+ seq_printf(m,
+ "\n\r %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d (lps=%d/rf_off=%d)",
+ "[summary]",
+ pfwinfo->cnt_h2c, pfwinfo->cnt_h2c_fail,
+ pfwinfo->cnt_c2h,
+ wl->status.map.lps, wl->status.map.rf_off);
+ }
+
+ for (i = 0; i < BTC_NCNT_NUM; i++)
+ cnt_sum += dm->cnt_notify[i];
+
+ seq_printf(m,
+ "\n\r %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]",
+ cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ seq_printf(m,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
+
+ seq_printf(m,
+ "\n\r %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, switch_chbw=%d, special_pkt=%d, ",
+ "[notify_cnt]",
+ cnt[BTC_NCNT_SCAN_START], cnt[BTC_NCNT_SCAN_FINISH],
+ cnt[BTC_NCNT_SWITCH_BAND], cnt[BTC_NCNT_SWITCH_CHBW],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
+
+ seq_printf(m, "timer=%d, customerize=%d, hub_msg=%d, chg_fw=%d, send_cc=%d",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CUSTOMERIZE],
+ rtwdev->btc.hubmsg_cnt, cnt[BTC_NCNT_RESUME_DL_FW],
+ cnt[BTC_NCNT_COUNTRYCODE]);
+}
+
void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
{
struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal;
@@ -8924,6 +10427,10 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_mreg_v1(rtwdev, m);
else if (ver->fcxmreg == 2)
_show_mreg_v2(rtwdev, m);
+ else if (ver->fcxmreg == 7)
+ _show_mreg_v7(rtwdev, m);
+
+ _show_gpio_dbg(rtwdev, m);
if (ver->fcxbtcrpt == 1)
_show_summary_v1(rtwdev, m);
@@ -8933,6 +10440,8 @@ void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
_show_summary_v5(rtwdev, m);
else if (ver->fcxbtcrpt == 105)
_show_summary_v105(rtwdev, m);
+ else if (ver->fcxbtcrpt == 8)
+ _show_summary_v8(rtwdev, m);
}
void rtw89_coex_recognize_ver(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
index 13303830684e..0e5f268616f7 100644
--- a/drivers/net/wireless/realtek/rtw89/coex.h
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -8,6 +8,8 @@
#include "core.h"
#define BTC_H2C_MAXLEN 2020
+#define BTC_TLV_SLOT_ID_LEN_V7 1
+#define BTC_SLOT_REQ_TH 2
enum btc_mode {
BTC_MODE_NORMAL,
@@ -201,6 +203,60 @@ enum btc_3cx_type {
BTC_3CX_MAX,
};
+enum btc_chip_feature {
+ BTC_FEAT_PTA_ONOFF_CTRL = BIT(0), /* on/off ctrl by HW (not 0x73[2]) */
+ BTC_FEAT_NONBTG_GWL_THRU = BIT(1), /* non-BTG GNT_WL!=0 if GNT_BT = 1 */
+ BTC_FEAT_WLAN_ACT_MUX = BIT(2), /* separate wlan_act/gnt mux */
+ BTC_FEAT_NEW_BBAPI_FLOW = BIT(3), /* new btg_ctrl/pre_agc_ctrl */
+ BTC_FEAT_MLO_SUPPORT = BIT(4),
+ BTC_FEAT_H2C_MACRO = BIT(5),
+};
+
+enum btc_wl_mode {
+ BTC_WL_MODE_11B = 0,
+ BTC_WL_MODE_11A = 1,
+ BTC_WL_MODE_11G = 2,
+ BTC_WL_MODE_HT = 3,
+ BTC_WL_MODE_VHT = 4,
+ BTC_WL_MODE_HE = 5,
+ BTC_WL_MODE_NUM,
+};
+
+enum btc_wl_gpio_debug {
+ BTC_DBG_GNT_BT = 0,
+ BTC_DBG_GNT_WL = 1,
+ BTC_DBG_BCN_EARLY = 2,
+ BTC_DBG_WL_NULL0 = 3,
+ BTC_DBG_WL_NULL1 = 4,
+ BTC_DBG_WL_RXISR = 5,
+ BTC_DBG_TDMA_ENTRY = 6,
+ BTC_DBG_A2DP_EMPTY = 7,
+ BTC_DBG_BT_RETRY = 8,
+ BTC_DBG_BT_RELINK = 9,
+ BTC_DBG_SLOT_WL = 10,
+ BTC_DBG_SLOT_BT = 11,
+ BTC_DBG_WL_ERR = 12,
+ BTC_DBG_WL_OK = 13,
+ BTC_DBG_SLOT_B2W = 14,
+ BTC_DBG_SLOT_W1 = 15,
+ BTC_DBG_SLOT_W2 = 16,
+ BTC_DBG_SLOT_W2B = 17,
+ BTC_DBG_SLOT_B1 = 18,
+ BTC_DBG_SLOT_B2 = 19,
+ BTC_DBG_SLOT_B3 = 20,
+ BTC_DBG_SLOT_B4 = 21,
+ BTC_DBG_SLOT_LK = 22,
+ BTC_DBG_SLOT_E2G = 23,
+ BTC_DBG_SLOT_E5G = 24,
+ BTC_DBG_SLOT_EBT = 25,
+ BTC_DBG_SLOT_WLK = 26,
+ BTC_DBG_SLOT_B1FDD = 27,
+ BTC_DBG_BT_CHANGE = 28,
+ BTC_DBG_WL_CCA = 29,
+ BTC_DBG_BT_LEAUDIO = 30,
+ BTC_DBG_USER_DEF = 31,
+};
+
void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev);
void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode);
@@ -261,4 +317,56 @@ static inline u16 rtw89_coex_query_bt_req_len(struct rtw89_dev *rtwdev,
return btc->bt_req_len;
}
+static inline u32 rtw89_get_antpath_type(u8 phy_map, u8 type)
+{
+ return ((phy_map << 8) + type);
+}
+
+static inline
+void _slot_set_le(struct rtw89_btc *btc, u8 sid, __le16 dura, __le32 tbl, __le16 type)
+{
+ if (btc->ver->fcxslots == 1) {
+ btc->dm.slot.v1[sid].dur = dura;
+ btc->dm.slot.v1[sid].cxtbl = tbl;
+ btc->dm.slot.v1[sid].cxtype = type;
+ } else if (btc->ver->fcxslots == 7) {
+ btc->dm.slot.v7[sid].dur = dura;
+ btc->dm.slot.v7[sid].cxtype = type;
+ btc->dm.slot.v7[sid].cxtbl = tbl;
+ }
+}
+
+static inline
+void _slot_set(struct rtw89_btc *btc, u8 sid, u16 dura, u32 tbl, u16 type)
+{
+ _slot_set_le(btc, sid, cpu_to_le16(dura), cpu_to_le32(tbl), cpu_to_le16(type));
+}
+
+static inline
+void _slot_set_dur(struct rtw89_btc *btc, u8 sid, u16 dura)
+{
+ if (btc->ver->fcxslots == 1)
+ btc->dm.slot.v1[sid].dur = cpu_to_le16(dura);
+ else if (btc->ver->fcxslots == 7)
+ btc->dm.slot.v7[sid].dur = cpu_to_le16(dura);
+}
+
+static inline
+void _slot_set_type(struct rtw89_btc *btc, u8 sid, u16 type)
+{
+ if (btc->ver->fcxslots == 1)
+ btc->dm.slot.v1[sid].cxtype = cpu_to_le16(type);
+ else if (btc->ver->fcxslots == 7)
+ btc->dm.slot.v7[sid].cxtype = cpu_to_le16(type);
+}
+
+static inline
+void _slot_set_tbl(struct rtw89_btc *btc, u8 sid, u32 tbl)
+{
+ if (btc->ver->fcxslots == 1)
+ btc->dm.slot.v1[sid].cxtbl = cpu_to_le32(tbl);
+ else if (btc->ver->fcxslots == 7)
+ btc->dm.slot.v7[sid].cxtbl = cpu_to_le32(tbl);
+}
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
index d474b8d5df3d..7019f7d482a8 100644
--- a/drivers/net/wireless/realtek/rtw89/core.c
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -18,6 +18,7 @@
#include "ser.h"
#include "txrx.h"
#include "util.h"
+#include "wow.h"
static bool rtw89_disable_ps_mode;
module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644);
@@ -82,6 +83,9 @@ static struct ieee80211_channel rtw89_channels_5ghz[] = {
RTW89_DEF_CHAN_5G(5885, 177),
};
+static_assert(RTW89_5GHZ_UNII4_START_INDEX + RTW89_5GHZ_UNII4_CHANNEL_NUM ==
+ ARRAY_SIZE(rtw89_channels_5ghz));
+
static struct ieee80211_channel rtw89_channels_6ghz[] = {
RTW89_DEF_CHAN_6G(5955, 1),
RTW89_DEF_CHAN_6G(5975, 5),
@@ -252,6 +256,9 @@ static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ if (tx && ieee80211_is_assoc_req(hdr->frame_control))
+ rtw89_wow_parse_akm(rtwdev, skb);
+
if (!ieee80211_is_data(hdr->frame_control))
return;
@@ -492,31 +499,21 @@ static void
rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
const struct rtw89_chip_info *chip = rtwdev->chip;
- struct ieee80211_vif *vif = tx_req->vif;
- struct ieee80211_sta *sta = tx_req->sta;
+ const struct rtw89_sec_cam_entry *sec_cam;
struct ieee80211_tx_info *info;
struct ieee80211_key_conf *key;
- struct rtw89_vif *rtwvif;
- struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta);
- struct rtw89_addr_cam_entry *addr_cam;
- struct rtw89_sec_cam_entry *sec_cam;
struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
struct sk_buff *skb = tx_req->skb;
u8 sec_type = RTW89_SEC_KEY_TYPE_NONE;
+ u8 sec_cam_idx;
u64 pn64;
- if (!vif) {
- rtw89_warn(rtwdev, "cannot set sec key without vif\n");
- return;
- }
-
- rtwvif = (struct rtw89_vif *)vif->drv_priv;
- addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta);
-
info = IEEE80211_SKB_CB(skb);
key = info->control.hw_key;
- sec_cam = addr_cam->sec_entries[key->hw_key_idx];
+ sec_cam_idx = key->hw_key_idx;
+ sec_cam = cam_info->sec_entries[sec_cam_idx];
if (!sec_cam) {
rtw89_warn(rtwdev, "sec cam entry is empty\n");
return;
@@ -816,6 +813,8 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req);
desc_info->port = desc_info->hiq ? rtwvif->port : 0;
desc_info->er_cap = rtwsta ? rtwsta->er_cap : false;
+ desc_info->stbc = rtwsta ? rtwsta->ra.stbc_cap : false;
+ desc_info->ldpc = rtwsta ? rtwsta->ra.ldpc_cap : false;
/* enable wd_info for AMPDU */
desc_info->en_wd_info = true;
@@ -1130,6 +1129,8 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
{
u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) |
FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DATA_STBC, desc_info->stbc) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DATA_LDPC, desc_info->ldpc) |
FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port);
@@ -1138,7 +1139,9 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DATA_STBC, desc_info->stbc) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DATA_LDPC, desc_info->ldpc) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port) |
FIELD_PREP(RTW89_TXWD_INFO0_DATA_ER, desc_info->er_cap) |
FIELD_PREP(RTW89_TXWD_INFO0_DATA_BW_ER, 0);
@@ -1304,7 +1307,9 @@ static __le32 rtw89_build_txwd_body7_v2(struct rtw89_tx_desc_info *desc_info)
static __le32 rtw89_build_txwd_info0_v2(struct rtw89_tx_desc_info *desc_info)
{
- u32 dword = FIELD_PREP(BE_TXD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
+ u32 dword = FIELD_PREP(BE_TXD_INFO0_DATA_STBC, desc_info->stbc) |
+ FIELD_PREP(BE_TXD_INFO0_DATA_LDPC, desc_info->ldpc) |
+ FIELD_PREP(BE_TXD_INFO0_DISDATAFB, desc_info->dis_data_fb) |
FIELD_PREP(BE_TXD_INFO0_MULTIPORT_ID, desc_info->port);
return cpu_to_le32(dword);
@@ -1552,6 +1557,12 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev,
u32 t;
phy_ppdu->chan_idx = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_CH_IDX);
+
+ if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR) {
+ phy_ppdu->ldpc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_LDPC);
+ phy_ppdu->stbc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_STBC);
+ }
+
if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6)
return;
@@ -1910,7 +1921,8 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
return;
if (ieee80211_is_beacon(hdr->frame_control)) {
- if (vif->type == NL80211_IFTYPE_STATION) {
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) {
rtw89_vif_sync_bcn_tsf(rtwvif, hdr, skb->len);
rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu);
}
@@ -1977,6 +1989,23 @@ static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status)
rx_status->rate_idx -= 4;
}
+static
+void rtw89_core_update_rx_status_by_ppdu(struct rtw89_dev *rtwdev,
+ struct ieee80211_rx_status *rx_status,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ if (!(rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR))
+ return;
+
+ if (!phy_ppdu)
+ return;
+
+ if (phy_ppdu->ldpc)
+ rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
+ if (phy_ppdu->stbc)
+ rx_status->enc_flags |= u8_encode_bits(1, RX_ENC_FLAG_STBC_MASK);
+}
+
static const u8 rx_status_bw_to_radiotap_eht_usig[] = {
[RATE_INFO_BW_20] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ,
[RATE_INFO_BW_5] = U8_MAX,
@@ -2020,10 +2049,14 @@ static void rtw89_core_update_radiotap_eht(struct rtw89_dev *rtwdev,
eht->user_info[0] =
cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
- IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O);
+ IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
+ IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN);
eht->user_info[0] |=
le32_encode_bits(rx_status->rate_idx, IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
le32_encode_bits(rx_status->nss, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O);
+ if (rx_status->enc_flags & RX_ENC_FLAG_LDPC)
+ eht->user_info[0] |=
+ cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING);
/* U-SIG */
tlv = (void *)tlv + sizeof(*tlv) + ALIGN(eht_len, 4);
@@ -2049,6 +2082,8 @@ static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev,
{
static const struct ieee80211_radiotap_he known_he = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN |
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
};
@@ -2080,6 +2115,7 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev,
rtw89_core_hw_to_sband_rate(rx_status);
rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
+ rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu);
rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status);
/* In low power mode, it does RX in thread context. */
local_bh_disable();
@@ -2485,11 +2521,15 @@ void rtw89_core_napi_stop(struct rtw89_dev *rtwdev)
}
EXPORT_SYMBOL(rtw89_core_napi_stop);
-void rtw89_core_napi_init(struct rtw89_dev *rtwdev)
+int rtw89_core_napi_init(struct rtw89_dev *rtwdev)
{
- init_dummy_netdev(&rtwdev->netdev);
- netif_napi_add(&rtwdev->netdev, &rtwdev->napi,
+ rtwdev->netdev = alloc_netdev_dummy(0);
+ if (!rtwdev->netdev)
+ return -ENOMEM;
+
+ netif_napi_add(rtwdev->netdev, &rtwdev->napi,
rtwdev->hci.ops->napi_poll);
+ return 0;
}
EXPORT_SYMBOL(rtw89_core_napi_init);
@@ -2497,6 +2537,7 @@ void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev)
{
rtw89_core_napi_stop(rtwdev);
netif_napi_del(&rtwdev->napi);
+ free_netdev(rtwdev->netdev);
}
EXPORT_SYMBOL(rtw89_core_napi_deinit);
@@ -3335,20 +3376,23 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
/* for station mode, assign the mac_id from itself */
rtwsta->mac_id = rtwvif->mac_id;
- /* must do rtw89_reg_6ghz_power_recalc() before rfk channel */
- rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, true);
+
+ /* must do rtw89_reg_6ghz_recalc() before rfk channel */
+ ret = rtw89_reg_6ghz_recalc(rtwdev, rtwvif, true);
+ if (ret)
+ return ret;
+
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_CONN_START);
rtw89_chip_rfk_channel(rtwdev);
} else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
- rtwsta->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
- RTW89_MAX_MAC_ID_NUM);
+ rtwsta->mac_id = rtw89_acquire_mac_id(rtwdev);
if (rtwsta->mac_id == RTW89_MAX_MAC_ID_NUM)
return -ENOSPC;
ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false);
if (ret) {
- rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
+ rtw89_release_mac_id(rtwdev, rtwsta->mac_id);
rtw89_warn(rtwdev, "failed to send h2c macid pause\n");
return ret;
}
@@ -3356,7 +3400,7 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
RTW89_ROLE_CREATE);
if (ret) {
- rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
+ rtw89_release_mac_id(rtwdev, rtwsta->mac_id);
rtw89_warn(rtwdev, "failed to send h2c role info\n");
return ret;
}
@@ -3525,11 +3569,11 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
int ret;
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
- rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, false);
+ rtw89_reg_6ghz_recalc(rtwdev, rtwvif, false);
rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
BTC_ROLE_MSTS_STA_DIS_CONN);
} else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) {
- rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id);
+ rtw89_release_mac_id(rtwdev, rtwsta->mac_id);
ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
RTW89_ROLE_REMOVE);
@@ -4015,15 +4059,15 @@ void rtw89_core_update_beacon_work(struct work_struct *work)
int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond)
{
struct completion *cmpl = &wait->completion;
- unsigned long timeout;
+ unsigned long time_left;
unsigned int cur;
cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond);
if (cur != RTW89_WAIT_COND_IDLE)
return -EBUSY;
- timeout = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT);
- if (timeout == 0) {
+ time_left = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT);
+ if (time_left == 0) {
atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE);
return -ETIMEDOUT;
}
@@ -4069,6 +4113,24 @@ void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg eve
}
}
+void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks)
+{
+ const struct dmi_system_id *match;
+ enum rtw89_quirks quirk;
+
+ if (!quirks)
+ return;
+
+ for (match = dmi_first_match(quirks); match; match = dmi_first_match(match + 1)) {
+ quirk = (uintptr_t)match->driver_data;
+ if (quirk >= NUM_OF_RTW89_QUIRKS)
+ continue;
+
+ set_bit(quirk, rtwdev->quirks);
+ }
+}
+EXPORT_SYMBOL(rtw89_check_quirks);
+
int rtw89_core_start(struct rtw89_dev *rtwdev)
{
int ret;
@@ -4161,6 +4223,25 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev)
rtw89_hci_reset(rtwdev);
}
+u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 mac_id_num = chip->support_macid_num;
+ u8 mac_id;
+
+ mac_id = find_first_zero_bit(rtwdev->mac_id_map, mac_id_num);
+ if (mac_id == mac_id_num)
+ return RTW89_MAX_MAC_ID_NUM;
+
+ set_bit(mac_id, rtwdev->mac_id_map);
+ return mac_id;
+}
+
+void rtw89_release_mac_id(struct rtw89_dev *rtwdev, u8 mac_id)
+{
+ clear_bit(mac_id, rtwdev->mac_id_map);
+}
+
int rtw89_core_init(struct rtw89_dev *rtwdev)
{
struct rtw89_btc *btc = &rtwdev->btc;
@@ -4306,7 +4387,7 @@ static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
rtwdev->hal.cv = cv;
- if (chip->chip_id == RTL8852B || chip->chip_id == RTL8851B) {
+ if (rtw89_is_rtl885xb(rtwdev)) {
ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_CV, &val);
if (ret)
return;
@@ -4450,6 +4531,10 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM;
hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL;
+ hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC |
+ IEEE80211_RADIOTAP_MCS_HAVE_STBC;
+ hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC;
+
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, MFP_CAPABLE);
@@ -4486,7 +4571,15 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_TDLS_EXTERNAL_SETUP |
- WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_SPLIT_SCAN_6GHZ;
+ WIPHY_FLAG_AP_UAPSD |
+ WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK;
+
+ if (!chip->support_rnr)
+ hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ;
+
+ if (chip->chip_gen == RTW89_CHIP_BE)
+ hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
+
hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
hw->wiphy->max_scan_ssids = RTW89_SCANOFLD_MAX_SSID;
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
index 2e854c9af709..11fa003a9788 100644
--- a/drivers/net/wireless/realtek/rtw89/core.h
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -7,6 +7,7 @@
#include <linux/average.h>
#include <linux/bitfield.h>
+#include <linux/dmi.h>
#include <linux/firmware.h>
#include <linux/iopoll.h>
#include <linux/workqueue.h>
@@ -131,6 +132,7 @@ enum rtw89_hci_type {
enum rtw89_core_chip_id {
RTL8852A,
RTL8852B,
+ RTL8852BT,
RTL8852C,
RTL8851B,
RTL8922A,
@@ -744,6 +746,14 @@ enum rtw89_reg_6ghz_power {
RTW89_REG_6GHZ_POWER_DFLT = RTW89_REG_6GHZ_POWER_VLP,
};
+#define RTW89_MIN_VALID_POWER_CONSTRAINT (-10) /* unit: dBm */
+
+/* calculate based on ieee80211 Transmit Power Envelope */
+struct rtw89_reg_6ghz_tpe {
+ bool valid;
+ s8 constraint; /* unit: dBm */
+};
+
enum rtw89_fw_pkt_ofld_type {
RTW89_PKT_OFLD_TYPE_PROBE_RSP = 0,
RTW89_PKT_OFLD_TYPE_PS_POLL = 1,
@@ -792,6 +802,8 @@ struct rtw89_rx_phy_ppdu {
u8 evm_max;
u8 evm_min;
} ofdm;
+ bool ldpc;
+ bool stbc;
bool to_self;
bool valid;
};
@@ -799,6 +811,7 @@ struct rtw89_rx_phy_ppdu {
enum rtw89_mac_idx {
RTW89_MAC_0 = 0,
RTW89_MAC_1 = 1,
+ RTW89_MAC_NUM,
};
enum rtw89_phy_idx {
@@ -882,6 +895,13 @@ enum rtw89_ps_mode {
#define RTW89_BYR_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1)
+enum rtw89_pe_duration {
+ RTW89_PE_DURATION_0 = 0,
+ RTW89_PE_DURATION_8 = 1,
+ RTW89_PE_DURATION_16 = 2,
+ RTW89_PE_DURATION_16_20 = 3,
+};
+
enum rtw89_ru_bandwidth {
RTW89_RU26 = 0,
RTW89_RU52 = 1,
@@ -1127,6 +1147,8 @@ struct rtw89_tx_desc_info {
bool hiq;
u8 port;
bool er_cap;
+ bool stbc;
+ bool ldpc;
};
struct rtw89_core_tx_request {
@@ -1178,9 +1200,13 @@ enum rtw89_btc_ncnt {
BTC_NCNT_CUSTOMERIZE,
BTC_NCNT_WL_RFK,
BTC_NCNT_WL_STA,
+ BTC_NCNT_WL_STA_LAST,
BTC_NCNT_FWINFO,
BTC_NCNT_TIMER,
- BTC_NCNT_NUM
+ BTC_NCNT_SWITCH_CHBW,
+ BTC_NCNT_RESUME_DL_FW,
+ BTC_NCNT_COUNTRYCODE,
+ BTC_NCNT_NUM,
};
enum rtw89_btc_btinfo {
@@ -1209,6 +1235,7 @@ enum rtw89_btc_dcnt {
BTC_DCNT_TDMA_NONSYNC,
BTC_DCNT_SLOT_NONSYNC,
BTC_DCNT_BTCNT_HANG,
+ BTC_DCNT_BTTX_HANG,
BTC_DCNT_WL_SLOT_DRIFT,
BTC_DCNT_WL_STA_LAST,
BTC_DCNT_BT_SLOT_DRIFT,
@@ -1216,7 +1243,10 @@ enum rtw89_btc_dcnt {
BTC_DCNT_FDDT_TRIG,
BTC_DCNT_E2G,
BTC_DCNT_E2G_HANG,
- BTC_DCNT_NUM
+ BTC_DCNT_WL_FW_VER_MATCH,
+ BTC_DCNT_NULL_TX_FAIL,
+ BTC_DCNT_WL_STA_NTFY,
+ BTC_DCNT_NUM,
};
enum rtw89_btc_wl_state_cnt {
@@ -1230,6 +1260,13 @@ enum rtw89_btc_wl_state_cnt {
BTC_WCNT_RFK_REJECT,
BTC_WCNT_RFK_TIMEOUT,
BTC_WCNT_CH_UPDATE,
+ BTC_WCNT_DBCC_ALL_2G,
+ BTC_WCNT_DBCC_CHG,
+ BTC_WCNT_RX_OK_LAST,
+ BTC_WCNT_RX_OK_LAST2S,
+ BTC_WCNT_RX_ERR_LAST,
+ BTC_WCNT_RX_ERR_LAST2S,
+ BTC_WCNT_RX_LAST,
BTC_WCNT_NUM
};
@@ -1253,8 +1290,10 @@ enum rtw89_btc_bt_state_cnt {
BTC_BCNT_LOPRI_TX,
BTC_BCNT_LOPRI_RX,
BTC_BCNT_POLUT,
+ BTC_BCNT_POLUT_NOW,
+ BTC_BCNT_POLUT_DIFF,
BTC_BCNT_RATECHG,
- BTC_BCNT_NUM
+ BTC_BCNT_NUM,
};
enum rtw89_btc_bt_profile {
@@ -1299,6 +1338,8 @@ struct rtw89_btc_wl_smap {
u32 scan: 1;
u32 connecting: 1;
u32 roaming: 1;
+ u32 dbccing: 1;
+ u32 transacting: 1;
u32 _4way: 1;
u32 rf_off: 1;
u32 lps: 2;
@@ -1307,6 +1348,8 @@ struct rtw89_btc_wl_smap {
u32 traffic_dir : 2;
u32 rf_off_pre: 1;
u32 lps_pre: 2;
+ u32 lps_exiting: 1;
+ u32 emlsr: 1;
};
enum rtw89_tfc_lv {
@@ -1349,6 +1392,14 @@ struct rtw89_traffic_stats {
u16 rx_rate;
};
+struct rtw89_btc_chdef {
+ u8 center_ch;
+ u8 band;
+ u8 chan;
+ enum rtw89_sc_offset offset;
+ enum rtw89_bandwidth bw;
+};
+
struct rtw89_btc_statistic {
u8 rssi; /* 0%~110% (dBm = rssi -110) */
struct rtw89_traffic_stats traffic;
@@ -1357,6 +1408,7 @@ struct rtw89_btc_statistic {
#define BTC_WL_RSSI_THMAX 4
struct rtw89_btc_wl_link_info {
+ struct rtw89_btc_chdef chdef;
struct rtw89_btc_statistic stat;
enum rtw89_tfc_dir dir;
u8 rssi_state[BTC_WL_RSSI_THMAX];
@@ -1370,6 +1422,7 @@ struct rtw89_btc_wl_link_info {
u8 phy;
u8 dtim_period;
u8 mode;
+ u8 tx_1ss_limit;
u8 mac_id;
u8 tx_retry;
@@ -1379,6 +1432,7 @@ struct rtw89_btc_wl_link_info {
u32 tx_time;
u32 client_cnt;
u32 rx_rate_drop_cnt;
+ u32 noa_duration;
u32 active: 1;
u32 noa: 1;
@@ -1412,6 +1466,11 @@ struct rtw89_btc_bt_a2dp_desc {
u8 type: 3;
u8 active: 1;
u8 sink: 1;
+ u32 handle_update: 1;
+ u32 devinfo_query: 1;
+ u32 no_empty_streak_2s: 8;
+ u32 no_empty_streak_max: 8;
+ u32 rsvd: 6;
u8 bitpool;
u16 vendor_id;
@@ -1589,6 +1648,42 @@ struct rtw89_btc_wl_role_info_v2 { /* struct size must be n*4 bytes */
u32 rsvd: 27;
};
+struct rtw89_btc_wl_rlink { /* H2C info, struct size must be n*4 bytes */
+ u8 connected;
+ u8 pid;
+ u8 phy;
+ u8 noa;
+
+ u8 rf_band; /* enum band_type RF band: 2.4G/5G/6G */
+ u8 active; /* 0:rlink is under doze */
+ u8 bw; /* enum channel_width */
+ u8 role; /*enum role_type */
+
+ u8 ch;
+ u8 noa_dur; /* ms */
+ u8 client_cnt; /* for Role = P2P-Go/AP */
+ u8 mode; /* wifi protocol */
+} __packed;
+
+#define RTW89_BE_BTC_WL_MAX_ROLE_NUMBER 6
+struct rtw89_btc_wl_role_info_v8 { /* H2C info, struct size must be n*4 bytes */
+ u8 connect_cnt;
+ u8 link_mode;
+ u8 link_mode_chg;
+ u8 p2p_2g;
+
+ u8 pta_req_band;
+ u8 dbcc_en; /* 1+1 and 2.4G-included */
+ u8 dbcc_chg;
+ u8 dbcc_2g_phy; /* which phy operate in 2G, HW_PHY_0 or HW_PHY_1 */
+
+ struct rtw89_btc_wl_rlink rlink[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER][RTW89_MAC_NUM];
+
+ u32 role_map;
+ u32 mrole_type; /* btc_wl_mrole_type */
+ u32 mrole_noa_duration; /* ms */
+} __packed;
+
struct rtw89_btc_wl_ver_info {
u32 fw_coex; /* match with which coex_ver */
u32 fw;
@@ -1611,6 +1706,9 @@ struct rtw89_btc_wl_rfk_info {
u32 band: 2;
u32 type: 8;
u32 rsvd: 14;
+
+ u32 start_time;
+ u32 proc_time;
};
struct rtw89_btc_bt_smap {
@@ -1724,12 +1822,14 @@ struct rtw89_btc_wl_nhm {
struct rtw89_btc_wl_info {
struct rtw89_btc_wl_link_info link_info[RTW89_PORT_NUM];
+ struct rtw89_btc_wl_link_info rlink_info[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER][RTW89_MAC_NUM];
struct rtw89_btc_wl_rfk_info rfk_info;
struct rtw89_btc_wl_ver_info ver_info;
struct rtw89_btc_wl_afh_info afh_info;
struct rtw89_btc_wl_role_info role_info;
struct rtw89_btc_wl_role_info_v1 role_info_v1;
struct rtw89_btc_wl_role_info_v2 role_info_v2;
+ struct rtw89_btc_wl_role_info_v8 role_info_v8;
struct rtw89_btc_wl_scan_info scan_info;
struct rtw89_btc_wl_dbcc_info dbcc_info;
struct rtw89_btc_rf_para rf_para;
@@ -1740,9 +1840,14 @@ struct rtw89_btc_wl_info {
u8 rssi_level;
u8 cn_report;
u8 coex_mode;
+ u8 pta_req_mac;
+ u8 bt_polut_type[RTW89_PHY_MAX]; /* BT polluted WL-Tx type for phy0/1 */
+ bool is_5g_hi_channel;
+ bool pta_reg_mac_chg;
bool bg_mode;
bool scbd_change;
+ bool fw_ver_mismatch;
u32 scbd;
};
@@ -1870,9 +1975,18 @@ struct rtw89_btc_fbtc_btscan_v2 {
struct rtw89_btc_bt_scan_info_v2 para[CXSCAN_MAX];
} __packed;
+struct rtw89_btc_fbtc_btscan_v7 {
+ u8 fver; /* btc_ver::fcxbtscan */
+ u8 type;
+ u8 rsvd0;
+ u8 rsvd1;
+ struct rtw89_btc_bt_scan_info_v2 para[CXSCAN_MAX];
+} __packed;
+
union rtw89_btc_fbtc_btscan {
struct rtw89_btc_fbtc_btscan_v1 v1;
struct rtw89_btc_fbtc_btscan_v2 v2;
+ struct rtw89_btc_fbtc_btscan_v7 v7;
};
struct rtw89_btc_bt_info {
@@ -2014,6 +2128,20 @@ struct rtw89_btc_fbtc_rpt_ctrl_info_v5 {
__le16 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_info_v8 {
+ __le16 cnt; /* fw report counter */
+ __le16 cnt_c2h; /* fw send c2h counter */
+ __le16 cnt_h2c; /* fw recv h2c counter */
+ __le16 len_c2h; /* The total length of the last C2H */
+
+ __le16 cnt_aoac_rf_on; /* rf-on counter for aoac switch notify */
+ __le16 cnt_aoac_rf_off; /* rf-off counter for aoac switch notify */
+
+ __le32 cx_ver; /* match which driver's coex version */
+ __le32 fw_ver;
+ __le32 en; /* report map */
+} __packed;
+
struct rtw89_btc_fbtc_rpt_ctrl_wl_fw_info {
__le32 cx_ver; /* match which driver's coex version */
__le32 cx_offload;
@@ -2070,11 +2198,25 @@ struct rtw89_btc_fbtc_rpt_ctrl_v105 {
struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
} __packed;
+struct rtw89_btc_fbtc_rpt_ctrl_v8 {
+ u8 fver;
+ u8 rsvd0;
+ u8 rpt_len_max_l; /* BTC_RPT_MAX bit0~7 */
+ u8 rpt_len_max_h; /* BTC_RPT_MAX bit8~15 */
+
+ u8 gnt_val[RTW89_PHY_MAX][4];
+ __le16 bt_cnt[BTC_BCNT_STA_MAX_V105];
+
+ struct rtw89_btc_fbtc_rpt_ctrl_info_v8 rpt_info;
+ struct rtw89_btc_fbtc_rpt_ctrl_bt_mailbox bt_mbx_info;
+} __packed;
+
union rtw89_btc_fbtc_rpt_ctrl_ver_info {
struct rtw89_btc_fbtc_rpt_ctrl_v1 v1;
struct rtw89_btc_fbtc_rpt_ctrl_v4 v4;
struct rtw89_btc_fbtc_rpt_ctrl_v5 v5;
struct rtw89_btc_fbtc_rpt_ctrl_v105 v105;
+ struct rtw89_btc_fbtc_rpt_ctrl_v8 v8;
};
enum rtw89_fbtc_ext_ctrl_type {
@@ -2181,15 +2323,32 @@ enum rtw89_btc_afh_map_type { /*AFH MAP TYPE */
};
#define BTC_DBG_MAX1 32
-struct rtw89_btc_fbtc_gpio_dbg {
+struct rtw89_btc_fbtc_gpio_dbg_v1 {
u8 fver; /* btc_ver::fcxgpiodbg */
u8 rsvd;
- u16 rsvd2;
- u32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */
- u32 pre_state; /* the debug signal is 1 or 0 */
+ __le16 rsvd2;
+ __le32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */
+ __le32 pre_state; /* the debug signal is 1 or 0 */
u8 gpio_map[BTC_DBG_MAX1]; /*the debug signals to GPIO-Position */
} __packed;
+struct rtw89_btc_fbtc_gpio_dbg_v7 {
+ u8 fver;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+
+ u8 gpio_map[BTC_DBG_MAX1];
+
+ __le32 en_map;
+ __le32 pre_state;
+} __packed;
+
+union rtw89_btc_fbtc_gpio_dbg {
+ struct rtw89_btc_fbtc_gpio_dbg_v1 v1;
+ struct rtw89_btc_fbtc_gpio_dbg_v7 v7;
+};
+
struct rtw89_btc_fbtc_mreg_val_v1 {
u8 fver; /* btc_ver::fcxmreg */
u8 reg_num;
@@ -2204,9 +2363,18 @@ struct rtw89_btc_fbtc_mreg_val_v2 {
__le32 mreg_val[CXMREG_MAX_V2];
} __packed;
+struct rtw89_btc_fbtc_mreg_val_v7 {
+ u8 fver;
+ u8 reg_num;
+ u8 rsvd0;
+ u8 rsvd1;
+ __le32 mreg_val[CXMREG_MAX_V2];
+} __packed;
+
union rtw89_btc_fbtc_mreg_val {
struct rtw89_btc_fbtc_mreg_val_v1 v1;
struct rtw89_btc_fbtc_mreg_val_v2 v2;
+ struct rtw89_btc_fbtc_mreg_val_v7 v7;
};
#define RTW89_DEF_FBTC_MREG(__type, __bytes, __offset) \
@@ -2233,6 +2401,40 @@ struct rtw89_btc_fbtc_slots {
struct rtw89_btc_fbtc_slot slot[CXST_MAX];
} __packed;
+struct rtw89_btc_fbtc_slot_v7 {
+ __le16 dur; /* slot duration */
+ __le16 cxtype;
+ __le32 cxtbl;
+} __packed;
+
+struct rtw89_btc_fbtc_slot_u16 {
+ __le16 dur; /* slot duration */
+ __le16 cxtype;
+ __le16 cxtbl_l16; /* coex table [15:0] */
+ __le16 cxtbl_h16; /* coex table [31:16] */
+} __packed;
+
+struct rtw89_btc_fbtc_1slot_v7 {
+ u8 fver;
+ u8 sid; /* slot id */
+ __le16 rsvd;
+ struct rtw89_btc_fbtc_slot_v7 slot;
+} __packed;
+
+struct rtw89_btc_fbtc_slots_v7 {
+ u8 fver;
+ u8 slot_cnt;
+ u8 rsvd0;
+ u8 rsvd1;
+ struct rtw89_btc_fbtc_slot_u16 slot[CXST_MAX];
+ __le32 update_map;
+} __packed;
+
+union rtw89_btc_fbtc_slots_info {
+ struct rtw89_btc_fbtc_slots v1;
+ struct rtw89_btc_fbtc_slots_v7 v7;
+} __packed;
+
struct rtw89_btc_fbtc_step {
u8 type;
u8 val;
@@ -2339,6 +2541,12 @@ struct rtw89_btc_fbtc_cycle_leak_info {
__le16 tmax; /* max leak-slot time */
} __packed;
+struct rtw89_btc_fbtc_cycle_leak_info_v7 {
+ __le16 tavg;
+ __le16 tamx;
+ __le32 cnt_rximr;
+} __packed;
+
#define RTW89_BTC_FDDT_PHASE_CYCLE GENMASK(9, 0)
#define RTW89_BTC_FDDT_TRAIN_STEP GENMASK(15, 10)
@@ -2451,11 +2659,36 @@ struct rtw89_btc_fbtc_cysta_v5 { /* statistics for cycles */
__le32 except_map;
} __packed;
+struct rtw89_btc_fbtc_cysta_v7 { /* statistics for cycles */
+ u8 fver;
+ u8 rsvd;
+ u8 collision_cnt; /* counter for event/timer occur at the same time */
+ u8 except_cnt;
+
+ u8 wl_rx_err_ratio[BTC_CYCLE_SLOT_MAX];
+
+ struct rtw89_btc_fbtc_a2dp_trx_stat_v4 a2dp_trx[BTC_CYCLE_SLOT_MAX];
+
+ __le16 skip_cnt;
+ __le16 cycles; /* total cycle number */
+
+ __le16 slot_step_time[BTC_CYCLE_SLOT_MAX]; /* record the wl/bt slot time */
+ __le16 slot_cnt[CXST_MAX]; /* slot count */
+ __le16 bcn_cnt[CXBCN_MAX];
+
+ struct rtw89_btc_fbtc_cycle_time_info_v5 cycle_time;
+ struct rtw89_btc_fbtc_cycle_a2dp_empty_info a2dp_ept;
+ struct rtw89_btc_fbtc_cycle_leak_info_v7 leak_slot;
+
+ __le32 except_map;
+} __packed;
+
union rtw89_btc_fbtc_cysta_info {
struct rtw89_btc_fbtc_cysta_v2 v2;
struct rtw89_btc_fbtc_cysta_v3 v3;
struct rtw89_btc_fbtc_cysta_v4 v4;
struct rtw89_btc_fbtc_cysta_v5 v5;
+ struct rtw89_btc_fbtc_cysta_v7 v7;
};
struct rtw89_btc_fbtc_cynullsta_v1 { /* cycle null statistics */
@@ -2476,12 +2709,24 @@ struct rtw89_btc_fbtc_cynullsta_v2 { /* cycle null statistics */
__le32 result[2][5]; /* 0:fail, 1:ok, 2:on_time, 3:retry, 4:tx */
} __packed;
+struct rtw89_btc_fbtc_cynullsta_v7 { /* cycle null statistics */
+ u8 fver;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+
+ __le32 tmax[2];
+ __le32 tavg[2];
+ __le32 result[2][5];
+} __packed;
+
union rtw89_btc_fbtc_cynullsta_info {
struct rtw89_btc_fbtc_cynullsta_v1 v1; /* info from fw */
struct rtw89_btc_fbtc_cynullsta_v2 v2;
+ struct rtw89_btc_fbtc_cynullsta_v7 v7;
};
-struct rtw89_btc_fbtc_btver {
+struct rtw89_btc_fbtc_btver_v1 {
u8 fver; /* btc_ver::fcxbtver */
u8 rsvd;
__le16 rsvd2;
@@ -2490,6 +2735,22 @@ struct rtw89_btc_fbtc_btver {
__le32 feature;
} __packed;
+struct rtw89_btc_fbtc_btver_v7 {
+ u8 fver;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 rsvd2;
+
+ __le32 coex_ver; /*bit[15:8]->shared, bit[7:0]->non-shared */
+ __le32 fw_ver;
+ __le32 feature;
+} __packed;
+
+union rtw89_btc_fbtc_btver {
+ struct rtw89_btc_fbtc_btver_v1 v1;
+ struct rtw89_btc_fbtc_btver_v7 v7;
+} __packed;
+
struct rtw89_btc_fbtc_btafh {
u8 fver; /* btc_ver::fcxbtafh */
u8 rsvd;
@@ -2511,6 +2772,18 @@ struct rtw89_btc_fbtc_btafh_v2 {
u8 afh_le_b[4];
} __packed;
+struct rtw89_btc_fbtc_btafh_v7 {
+ u8 fver;
+ u8 map_type;
+ u8 rsvd0;
+ u8 rsvd1;
+ u8 afh_l[4]; /*bit0:2402, bit1:2403.... bit31:2433 */
+ u8 afh_m[4]; /*bit0:2434, bit1:2435.... bit31:2465 */
+ u8 afh_h[4]; /*bit0:2466, bit1:2467.....bit14:2480 */
+ u8 afh_le_a[4];
+ u8 afh_le_b[4];
+} __packed;
+
struct rtw89_btc_fbtc_btdevinfo {
u8 fver; /* btc_ver::fcxbtdevinfo */
u8 rsvd;
@@ -2551,9 +2824,14 @@ struct rtw89_btc_trx_info {
u32 rx_err_ratio;
};
+union rtw89_btc_fbtc_slot_u {
+ struct rtw89_btc_fbtc_slot v1[CXST_MAX];
+ struct rtw89_btc_fbtc_slot_v7 v7[CXST_MAX];
+};
+
struct rtw89_btc_dm {
- struct rtw89_btc_fbtc_slot slot[CXST_MAX];
- struct rtw89_btc_fbtc_slot slot_now[CXST_MAX];
+ union rtw89_btc_fbtc_slot_u slot;
+ union rtw89_btc_fbtc_slot_u slot_now;
struct rtw89_btc_fbtc_tdma tdma;
struct rtw89_btc_fbtc_tdma tdma_now;
struct rtw89_mac_ax_coex_gnt gnt;
@@ -2569,6 +2847,8 @@ struct rtw89_btc_dm {
u32 update_slot_map;
u32 set_ant_path;
+ u32 e2g_slot_limit;
+ u32 e2g_slot_nulltx_time;
u32 wl_only: 1;
u32 wl_fw_cx_offload: 1;
@@ -2589,6 +2869,7 @@ struct rtw89_btc_dm {
u32 wl_btg_rx_rb: 2;
u16 slot_dur[CXST_MAX];
+ u16 bt_slot_flood;
u8 run_reason;
u8 run_action;
@@ -2596,6 +2877,8 @@ struct rtw89_btc_dm {
u8 wl_pre_agc: 2;
u8 wl_lna2: 1;
u8 wl_pre_agc_rb: 2;
+ u8 bt_select: 2; /* 0:s0, 1:s1, 2:s0 & s1, refer to enum btc_bt_index */
+ u8 slot_req_more: 1;
};
struct rtw89_btc_ctrl {
@@ -2643,6 +2926,7 @@ enum btf_fw_event_report {
BTC_RPT_TYPE_CYSTA,
BTC_RPT_TYPE_STEP,
BTC_RPT_TYPE_NULLSTA,
+ BTC_RPT_TYPE_FDDT, /* added by ver->fwevntrptl == 1 */
BTC_RPT_TYPE_MREG,
BTC_RPT_TYPE_GPIO_DBG,
BTC_RPT_TYPE_BT_VER,
@@ -2650,7 +2934,10 @@ enum btf_fw_event_report {
BTC_RPT_TYPE_BT_AFH,
BTC_RPT_TYPE_BT_DEVICE,
BTC_RPT_TYPE_TEST,
- BTC_RPT_TYPE_MAX = 31
+ BTC_RPT_TYPE_MAX = 31,
+
+ __BTC_RPT_TYPE_V0_SAME = BTC_RPT_TYPE_NULLSTA,
+ __BTC_RPT_TYPE_V0_MAX = 12,
};
enum rtw_btc_btf_reg_type {
@@ -2691,7 +2978,7 @@ struct rtw89_btc_rpt_fbtc_tdma {
struct rtw89_btc_rpt_fbtc_slots {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_slots finfo; /* info from fw */
+ union rtw89_btc_fbtc_slots_info finfo; /* info from fw */
};
struct rtw89_btc_rpt_fbtc_cysta {
@@ -2716,12 +3003,12 @@ struct rtw89_btc_rpt_fbtc_mreg {
struct rtw89_btc_rpt_fbtc_gpio_dbg {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_gpio_dbg finfo; /* info from fw */
+ union rtw89_btc_fbtc_gpio_dbg finfo; /* info from fw */
};
struct rtw89_btc_rpt_fbtc_btver {
struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
- struct rtw89_btc_fbtc_btver finfo; /* info from fw */
+ union rtw89_btc_fbtc_btver finfo; /* info from fw */
};
struct rtw89_btc_rpt_fbtc_btscan {
@@ -2792,6 +3079,7 @@ struct rtw89_btc_ver {
u8 fcxctrl;
u8 fcxinit;
+ u8 fwevntrptl;
u8 drvinfo_type;
u16 info_buf;
u8 max_role_num;
@@ -2821,6 +3109,7 @@ struct rtw89_btc {
u8 btg_pos;
u16 policy_len;
u16 policy_type;
+ u32 hubmsg_cnt;
bool bt_req_en;
bool update_policy_force;
bool lps;
@@ -2981,7 +3270,6 @@ struct rtw89_addr_cam_entry {
DECLARE_BITMAP(sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM);
u8 sec_ent_keyid[RTW89_SEC_CAM_IN_ADDR_CAM];
u8 sec_ent[RTW89_SEC_CAM_IN_ADDR_CAM];
- struct rtw89_sec_cam_entry *sec_entries[RTW89_SEC_CAM_IN_ADDR_CAM];
};
struct rtw89_bssid_cam_entry {
@@ -3117,11 +3405,13 @@ struct rtw89_vif {
bool chanctx_assigned; /* only valid when running with chanctx_ops */
enum rtw89_sub_entity_idx sub_entity_idx;
enum rtw89_reg_6ghz_power reg_6ghz_power;
+ struct rtw89_reg_6ghz_tpe reg_6ghz_tpe;
u8 mac_id;
u8 port;
u8 mac_addr[ETH_ALEN];
u8 bssid[ETH_ALEN];
+ __be32 ip_addr;
u8 phy_idx;
u8 mac_idx;
u8 net_type;
@@ -3875,10 +4165,12 @@ struct rtw89_chip_info {
u8 wde_qempty_acq_grpnum;
u8 wde_qempty_mgq_grpsel;
u32 rf_base_addr[2];
+ u8 support_macid_num;
u8 support_chanctx_num;
u8 support_bands;
u16 support_bandwidths;
bool support_unii4;
+ bool support_rnr;
bool ul_tb_waveform_ctrl;
bool ul_tb_pwr_diff;
bool hw_sec_hdr;
@@ -3954,7 +4246,7 @@ struct rtw89_chip_info {
const u32 *c2h_regs;
struct rtw89_reg_def c2h_counter_reg;
const struct rtw89_page_regs *page_regs;
- u32 wow_reason_reg;
+ const u32 *wow_reason_reg;
bool cfo_src_fd;
bool cfo_hw_comp;
const struct rtw89_reg_def *dcfo_comp;
@@ -3977,6 +4269,7 @@ union rtw89_bus_info {
struct rtw89_driver_info {
const struct rtw89_chip_info *chip;
+ const struct dmi_system_id *quirks;
union rtw89_bus_info bus;
};
@@ -4063,6 +4356,7 @@ enum rtw89_fw_feature {
RTW89_FW_FEATURE_NO_LPS_PG,
RTW89_FW_FEATURE_BEACON_FILTER,
RTW89_FW_FEATURE_MACID_PAUSE_SLEEP,
+ RTW89_FW_FEATURE_WOW_REASON_V1,
};
struct rtw89_fw_suit {
@@ -4165,6 +4459,7 @@ struct rtw89_cam_info {
DECLARE_BITMAP(sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
DECLARE_BITMAP(ba_cam_map, RTW89_MAX_BA_CAM_NUM);
struct rtw89_ba_cam_entry ba_cam_entry[RTW89_MAX_BA_CAM_NUM];
+ const struct rtw89_sec_cam_entry *sec_entries[RTW89_MAX_SEC_CAM_NUM];
};
enum rtw89_sar_sources {
@@ -4324,6 +4619,12 @@ enum rtw89_flags {
NUM_OF_RTW89_FLAGS,
};
+enum rtw89_quirks {
+ RTW89_QUIRK_PCI_BER,
+
+ NUM_OF_RTW89_QUIRKS,
+};
+
enum rtw89_pkt_drop_sel {
RTW89_PKT_DROP_SEL_MACID_BE_ONCE,
RTW89_PKT_DROP_SEL_MACID_BK_ONCE,
@@ -4394,7 +4695,12 @@ struct rtw89_dack_info {
bool msbk_timeout[RTW89_DACK_PATH_NR];
};
-#define RTW89_RFK_CHS_NR 3
+enum rtw89_rfk_chs_nrs {
+ __RTW89_RFK_CHS_NR_V0 = 2,
+ __RTW89_RFK_CHS_NR_V1 = 3,
+
+ RTW89_RFK_CHS_NR = __RTW89_RFK_CHS_NR_V1,
+};
struct rtw89_rfk_mcc_info {
u8 ch[RTW89_RFK_CHS_NR];
@@ -4473,6 +4779,8 @@ struct rtw89_dpk_info {
u8 cur_idx[RTW89_DPK_RF_PATH];
u8 cur_k_set;
struct rtw89_dpk_bkup_para bp[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
+ u8 max_dpk_txagc[RTW89_DPK_RF_PATH];
+ u32 dpk_order[RTW89_DPK_RF_PATH];
};
struct rtw89_fem_info {
@@ -4641,11 +4949,16 @@ struct rtw89_regd {
};
#define RTW89_REGD_MAX_COUNTRY_NUM U8_MAX
+#define RTW89_5GHZ_UNII4_CHANNEL_NUM 3
+#define RTW89_5GHZ_UNII4_START_INDEX 25
struct rtw89_regulatory_info {
const struct rtw89_regd *regd;
enum rtw89_reg_6ghz_power reg_6ghz_power;
+ struct rtw89_reg_6ghz_tpe reg_6ghz_tpe;
+ DECLARE_BITMAP(block_unii4, RTW89_REGD_MAX_COUNTRY_NUM);
DECLARE_BITMAP(block_6ghz, RTW89_REGD_MAX_COUNTRY_NUM);
+ DECLARE_BITMAP(block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM);
};
enum rtw89_ifs_clm_application {
@@ -4925,11 +5238,61 @@ struct rtw89_wow_cam_info {
bool valid;
};
+struct rtw89_wow_key_info {
+ u8 ptk_tx_iv[8];
+ u8 valid_check;
+ u8 symbol_check_en;
+ u8 gtk_keyidx;
+ u8 rsvd[5];
+ u8 ptk_rx_iv[8];
+ u8 gtk_rx_iv[4][8];
+} __packed;
+
+struct rtw89_wow_gtk_info {
+ u8 kck[32];
+ u8 kek[32];
+ u8 tk1[16];
+ u8 txmickey[8];
+ u8 rxmickey[8];
+ __le32 igtk_keyid;
+ __le64 ipn;
+ u8 igtk[2][32];
+ u8 psk[32];
+} __packed;
+
+struct rtw89_wow_aoac_report {
+ u8 rpt_ver;
+ u8 sec_type;
+ u8 key_idx;
+ u8 pattern_idx;
+ u8 rekey_ok;
+ u8 ptk_tx_iv[8];
+ u8 eapol_key_replay_count[8];
+ u8 gtk[32];
+ u8 ptk_rx_iv[8];
+ u8 gtk_rx_iv[4][8];
+ u64 igtk_key_id;
+ u64 igtk_ipn;
+ u8 igtk[32];
+ u8 csa_pri_ch;
+ u8 csa_bw;
+ u8 csa_ch_offset;
+ u8 csa_chsw_failed;
+ u8 csa_ch_band;
+};
+
struct rtw89_wow_param {
struct ieee80211_vif *wow_vif;
DECLARE_BITMAP(flags, RTW89_WOW_FLAG_NUM);
struct rtw89_wow_cam_info patterns[RTW89_MAX_PATTERN_NUM];
+ struct rtw89_wow_key_info key_info;
+ struct rtw89_wow_gtk_info gtk_info;
+ struct rtw89_wow_aoac_report aoac_rpt;
u8 pattern_cnt;
+ u8 ptk_alg;
+ u8 gtk_alg;
+ u8 ptk_keyidx;
+ u8 akm;
};
struct rtw89_mcc_limit {
@@ -5084,6 +5447,7 @@ struct rtw89_dev {
DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM);
DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS);
DECLARE_BITMAP(pkt_offload, RTW89_MAX_PKT_OFLD_NUM);
+ DECLARE_BITMAP(quirks, NUM_OF_RTW89_QUIRKS);
struct rtw89_phy_stat phystat;
struct rtw89_rfk_wait_info rfk_wait;
@@ -5137,7 +5501,7 @@ struct rtw89_dev {
struct rtw89_wow_param wow;
/* napi structure */
- struct net_device netdev;
+ struct net_device *netdev;
struct napi_struct napi;
int napi_budget_countdown;
@@ -6076,6 +6440,16 @@ static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev)
}
}
+static inline bool rtw89_is_rtl885xb(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+
+ if (chip_id == RTL8852B || chip_id == RTL8851B || chip_id == RTL8852BT)
+ return true;
+
+ return false;
+}
+
int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel);
int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
@@ -6109,7 +6483,7 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev,
u8 *data, u32 data_offset);
void rtw89_core_napi_start(struct rtw89_dev *rtwdev);
void rtw89_core_napi_stop(struct rtw89_dev *rtwdev);
-void rtw89_core_napi_init(struct rtw89_dev *rtwdev);
+int rtw89_core_napi_init(struct rtw89_dev *rtwdev);
void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev);
int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif,
@@ -6129,6 +6503,7 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
void rtw89_core_set_tid_config(struct rtw89_dev *rtwdev,
struct ieee80211_sta *sta,
struct cfg80211_tid_config *tid_config);
+void rtw89_check_quirks(struct rtw89_dev *rtwdev, const struct dmi_system_id *quirks);
int rtw89_core_init(struct rtw89_dev *rtwdev);
void rtw89_core_deinit(struct rtw89_dev *rtwdev);
int rtw89_core_register(struct rtw89_dev *rtwdev);
@@ -6137,6 +6512,8 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device,
u32 bus_data_size,
const struct rtw89_chip_info *chip);
void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev);
+u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev);
+void rtw89_release_mac_id(struct rtw89_dev *rtwdev, u8 mac_id);
void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev);
void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef);
void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef,
@@ -6173,8 +6550,8 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
const u8 *mac_addr, bool hw_scan);
void rtw89_core_scan_complete(struct rtw89_dev *rtwdev,
struct ieee80211_vif *vif, bool hw_scan);
-void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, bool active);
+int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool active);
void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event);
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
index affffc4092ba..9e1353cce9cc 100644
--- a/drivers/net/wireless/realtek/rtw89/debug.c
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -818,6 +818,28 @@ static const struct dbgfs_txpwr_table *dbgfs_txpwr_tables[RTW89_CHIP_GEN_NUM] =
[RTW89_CHIP_BE] = &dbgfs_txpwr_table_be,
};
+static
+void rtw89_debug_priv_txpwr_table_get_regd(struct seq_file *m,
+ struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_reg_6ghz_tpe *tpe6 = &regulatory->reg_6ghz_tpe;
+
+ seq_printf(m, "[Chanctx] band %u, ch %u, bw %u\n",
+ chan->band_type, chan->channel, chan->band_width);
+
+ seq_puts(m, "[Regulatory] ");
+ __print_regd(m, rtwdev, chan);
+
+ if (chan->band_type == RTW89_BAND_6G) {
+ seq_printf(m, "[reg6_pwr_type] %u\n", regulatory->reg_6ghz_power);
+
+ if (tpe6->valid)
+ seq_printf(m, "[TPE] %d dBm\n", tpe6->constraint);
+ }
+}
+
static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
{
struct rtw89_debugfs_priv *debugfs_priv = m->private;
@@ -831,8 +853,7 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
rtw89_leave_ps_mode(rtwdev);
chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
- seq_puts(m, "[Regulatory] ");
- __print_regd(m, rtwdev, chan);
+ rtw89_debug_priv_txpwr_table_get_regd(m, rtwdev, chan);
seq_puts(m, "[SAR]\n");
rtw89_print_sar(m, rtwdev, chan->freq);
@@ -2996,7 +3017,7 @@ static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel)
sel >= RTW89_DBG_PORT_SEL_PCIE_TXDMA &&
sel <= RTW89_DBG_PORT_SEL_PCIE_MISC2)
return false;
- if (rtwdev->chip->chip_id == RTL8852B &&
+ if (rtw89_is_rtl885xb(rtwdev) &&
sel >= RTW89_DBG_PORT_SEL_PTCL_C1 &&
sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C1)
return false;
@@ -3531,7 +3552,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
case RX_ENC_HE:
seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx,
status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
- he_gi_str[rate->he_gi] : "N/A");
+ he_gi_str[status->he_gi] : "N/A");
break;
case RX_ENC_EHT:
seq_printf(m, "EHT %dSS MCS-%d GI:%s", status->nss, status->rate_idx,
@@ -3645,17 +3666,21 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
}
static void rtw89_dump_addr_cam(struct seq_file *m,
+ struct rtw89_dev *rtwdev,
struct rtw89_addr_cam_entry *addr_cam)
{
- struct rtw89_sec_cam_entry *sec_entry;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ const struct rtw89_sec_cam_entry *sec_entry;
+ u8 sec_cam_idx;
int i;
seq_printf(m, "\taddr_cam_idx=%u\n", addr_cam->addr_cam_idx);
seq_printf(m, "\t-> bssid_cam_idx=%u\n", addr_cam->bssid_cam_idx);
seq_printf(m, "\tsec_cam_bitmap=%*ph\n", (int)sizeof(addr_cam->sec_cam_map),
addr_cam->sec_cam_map);
- for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) {
- sec_entry = addr_cam->sec_entries[i];
+ for_each_set_bit(i, addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM) {
+ sec_cam_idx = addr_cam->sec_ent[i];
+ sec_entry = cam_info->sec_entries[sec_cam_idx];
if (!sec_entry)
continue;
seq_printf(m, "\tsec[%d]: sec_cam_idx %u", i, sec_entry->sec_cam_idx);
@@ -3694,12 +3719,13 @@ static
void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
struct seq_file *m = (struct seq_file *)data;
struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
seq_printf(m, "VIF [%d] %pM\n", rtwvif->mac_id, rtwvif->mac_addr);
seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx);
- rtw89_dump_addr_cam(m, &rtwvif->addr_cam);
+ rtw89_dump_addr_cam(m, rtwdev, &rtwvif->addr_cam);
rtw89_dump_pkt_offload(m, &rtwvif->general_pkt_list, "\tpkt_ofld[GENERAL]: ");
}
@@ -3726,11 +3752,12 @@ static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_sta *rtwsta)
static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta)
{
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_dev *rtwdev = rtwsta->rtwdev;
struct seq_file *m = (struct seq_file *)data;
seq_printf(m, "STA [%d] %pM %s\n", rtwsta->mac_id, sta->addr,
sta->tdls ? "(TDLS)" : "");
- rtw89_dump_addr_cam(m, &rtwsta->addr_cam);
+ rtw89_dump_addr_cam(m, rtwdev, &rtwsta->addr_cam);
rtw89_dump_ba_cam(m, rtwsta);
}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
index 185cd339c085..fbe08c162b93 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.c
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include <linux/if_arp.h>
#include "cam.h"
#include "chan.h"
#include "coex.h"
@@ -12,6 +13,28 @@
#include "ps.h"
#include "reg.h"
#include "util.h"
+#include "wow.h"
+
+struct rtw89_eapol_2_of_2 {
+ u8 gtkbody[14];
+ u8 key_des_ver;
+ u8 rsvd[92];
+} __packed;
+
+struct rtw89_sa_query {
+ u8 category;
+ u8 action;
+} __packed;
+
+struct rtw89_arp_rsp {
+ u8 llc_hdr[sizeof(rfc1042_header)];
+ __be16 llc_type;
+ struct arphdr arp_hdr;
+ u8 sender_hw[ETH_ALEN];
+ __be32 sender_ip;
+ u8 target_hw[ETH_ALEN];
+ __be32 target_ip;
+} __packed;
static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C};
@@ -437,7 +460,7 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
const u8 *mfw = firmware->data;
u32 mfw_len = firmware->size;
const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
- const struct rtw89_mfw_info *mfw_info;
+ const struct rtw89_mfw_info *mfw_info = NULL, *tmp;
int i;
if (mfw_hdr->sig != RTW89_MFW_SIG) {
@@ -451,15 +474,27 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
}
for (i = 0; i < mfw_hdr->fw_nr; i++) {
- mfw_info = &mfw_hdr->info[i];
- if (mfw_info->type == type) {
- if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp)
- goto found;
- if (type == RTW89_FW_LOGFMT)
- goto found;
+ tmp = &mfw_hdr->info[i];
+ if (tmp->type != type)
+ continue;
+
+ if (type == RTW89_FW_LOGFMT) {
+ mfw_info = tmp;
+ goto found;
+ }
+
+ /* Version order of WiFi firmware in firmware file are not in order,
+ * pass all firmware to find the equal or less but closest version.
+ */
+ if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) {
+ if (!mfw_info || mfw_info->cv < tmp->cv)
+ mfw_info = tmp;
}
}
+ if (mfw_info)
+ goto found;
+
if (!nowarn)
rtw89_err(rtwdev, "no suitable firmware found\n");
return -ENOENT;
@@ -581,10 +616,16 @@ int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev,
struct rtw89_hal *hal = &rtwdev->hal;
struct rtw89_fw_suit *fw_suit;
- if (hal->cv != elm->u.bbmcu.cv)
+ /* Version of BB MCU is in decreasing order in firmware file, so take
+ * first equal or less version, which is equal or less but closest version.
+ */
+ if (hal->cv < elm->u.bbmcu.cv)
return 1; /* ignore this element */
fw_suit = rtw89_fw_suit_get(rtwdev, type);
+ if (fw_suit->data)
+ return 1; /* ignore this element (a firmware is taken already) */
+
fw_suit->data = elm->u.bbmcu.contents;
fw_suit->size = le32_to_cpu(elm->size);
@@ -634,9 +675,12 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = {
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER),
+ __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP),
__CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER),
+ __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1),
};
static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw,
@@ -1349,13 +1393,12 @@ dump:
static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
{
u32 val32;
- u16 val16;
val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
- val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
- rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
+ val32 = rtw89_read32(rtwdev, R_AX_BOOT_DBG);
+ rtw89_err(rtwdev, "[ERR]fwdl 0x83F0 = 0x%x\n", val32);
rtw89_fw_prog_cnt_dump(rtwdev);
}
@@ -1394,8 +1437,9 @@ static int rtw89_fw_download_suit(struct rtw89_dev *rtwdev,
return 0;
}
-int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
- bool include_bb)
+static
+int __rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ bool include_bb)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
struct rtw89_fw_info *fw_info = &rtwdev->fw;
@@ -1433,7 +1477,7 @@ int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
ret = rtw89_fw_check_rdy(rtwdev, RTW89_FWDL_CHECK_FREERTOS_DONE);
if (ret) {
rtw89_warn(rtwdev, "download firmware fail\n");
- return ret;
+ goto fwdl_err;
}
return ret;
@@ -1443,6 +1487,21 @@ fwdl_err:
return ret;
}
+int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ bool include_bb)
+{
+ int retry;
+ int ret;
+
+ for (retry = 0; retry < 5; retry++) {
+ ret = __rtw89_fw_download(rtwdev, type, include_bb);
+ if (!ret)
+ return 0;
+ }
+
+ return ret;
+}
+
int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
{
struct rtw89_fw_info *fw = &rtwdev->fw;
@@ -1710,28 +1769,30 @@ fail:
return ret;
}
-#define H2C_DCTL_SEC_CAM_LEN 68
int rtw89_fw_h2c_dctl_sec_cam_v1(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
struct rtw89_sta *rtwsta)
{
+ struct rtw89_h2c_dctlinfo_ud_v1 *h2c;
+ u32 len = sizeof(*h2c);
struct sk_buff *skb;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_DCTL_SEC_CAM_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
rtw89_err(rtwdev, "failed to alloc skb for dctl sec cam\n");
return -ENOMEM;
}
- skb_put(skb, H2C_DCTL_SEC_CAM_LEN);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_dctlinfo_ud_v1 *)skb->data;
- rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, skb->data);
+ rtw89_cam_fill_dctl_sec_cam_info_v1(rtwdev, rtwvif, rtwsta, h2c);
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
H2C_CL_MAC_FR_EXCHG,
H2C_FUNC_MAC_DCTLINFO_UD_V1, 0, 0,
- H2C_DCTL_SEC_CAM_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -2129,6 +2190,128 @@ fail:
return ret;
}
+static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ static const u8 gtkbody[] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00, 0x88,
+ 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03};
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_eapol_2_of_2 *eapol_pkt;
+ struct ieee80211_hdr_3addr *hdr;
+ struct sk_buff *skb;
+ u8 key_des_ver;
+
+ if (rtw_wow->ptk_alg == 3)
+ key_des_ver = 1;
+ else if (rtw_wow->akm == 1 || rtw_wow->akm == 2)
+ key_des_ver = 2;
+ else if (rtw_wow->akm > 2 && rtw_wow->akm < 7)
+ key_des_ver = 3;
+ else
+ key_des_ver = 0;
+
+ skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt));
+ if (!skb)
+ return NULL;
+
+ hdr = skb_put_zero(skb, sizeof(*hdr));
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_PROTECTED);
+ ether_addr_copy(hdr->addr1, bss_conf->bssid);
+ ether_addr_copy(hdr->addr2, vif->addr);
+ ether_addr_copy(hdr->addr3, bss_conf->bssid);
+
+ skb_put_zero(skb, sec_hdr_len);
+
+ eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt));
+ memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody));
+ eapol_pkt->key_des_ver = key_des_ver;
+
+ return skb;
+}
+
+static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
+ struct ieee80211_hdr_3addr *hdr;
+ struct rtw89_sa_query *sa_query;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query));
+ if (!skb)
+ return NULL;
+
+ hdr = skb_put_zero(skb, sizeof(*hdr));
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION |
+ IEEE80211_FCTL_PROTECTED);
+ ether_addr_copy(hdr->addr1, bss_conf->bssid);
+ ether_addr_copy(hdr->addr2, vif->addr);
+ ether_addr_copy(hdr->addr3, bss_conf->bssid);
+
+ skb_put_zero(skb, sec_hdr_len);
+
+ sa_query = skb_put_zero(skb, sizeof(*sa_query));
+ sa_query->category = WLAN_CATEGORY_SA_QUERY;
+ sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE;
+
+ return skb;
+}
+
+static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev);
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_hdr_3addr *hdr;
+ struct rtw89_arp_rsp *arp_skb;
+ struct arphdr *arp_hdr;
+ struct sk_buff *skb;
+ __le16 fc;
+
+ skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb));
+ if (!skb)
+ return NULL;
+
+ hdr = skb_put_zero(skb, sizeof(*hdr));
+
+ if (rtw_wow->ptk_alg)
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS |
+ IEEE80211_FCTL_PROTECTED);
+ else
+ fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS);
+
+ hdr->frame_control = fc;
+ ether_addr_copy(hdr->addr1, rtwvif->bssid);
+ ether_addr_copy(hdr->addr2, rtwvif->mac_addr);
+ ether_addr_copy(hdr->addr3, rtwvif->bssid);
+
+ skb_put_zero(skb, sec_hdr_len);
+
+ arp_skb = skb_put_zero(skb, sizeof(*arp_skb));
+ memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
+ arp_skb->llc_type = htons(ETH_P_ARP);
+
+ arp_hdr = &arp_skb->arp_hdr;
+ arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
+ arp_hdr->ar_pro = htons(ETH_P_IP);
+ arp_hdr->ar_hln = ETH_ALEN;
+ arp_hdr->ar_pln = 4;
+ arp_hdr->ar_op = htons(ARPOP_REPLY);
+
+ ether_addr_copy(arp_skb->sender_hw, rtwvif->mac_addr);
+ arp_skb->sender_ip = rtwvif->ip_addr;
+
+ return skb;
+}
+
static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif,
enum rtw89_fw_pkt_ofld_type type,
@@ -2156,6 +2339,15 @@ static int rtw89_fw_h2c_add_general_pkt(struct rtw89_dev *rtwdev,
case RTW89_PKT_OFLD_TYPE_QOS_NULL:
skb = ieee80211_nullfunc_get(rtwdev->hw, vif, -1, true);
break;
+ case RTW89_PKT_OFLD_TYPE_EAPOL_KEY:
+ skb = rtw89_eapol_get(rtwdev, rtwvif);
+ break;
+ case RTW89_PKT_OFLD_TYPE_SA_QUERY:
+ skb = rtw89_sa_query_get(rtwdev, rtwvif);
+ break;
+ case RTW89_PKT_OFLD_TYPE_ARP_RSP:
+ skb = rtw89_arp_response_get(rtwdev, rtwvif);
+ break;
default:
goto err;
}
@@ -2304,6 +2496,7 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
struct rtw89_h2c_lps_ch_info *h2c;
u32 len = sizeof(*h2c);
struct sk_buff *skb;
+ u32 done;
int ret;
if (chip->chip_gen != RTW89_CHIP_BE)
@@ -2327,12 +2520,18 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM,
H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len);
+ rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
rtw89_err(rtwdev, "failed to send h2c\n");
goto fail;
}
+ ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000,
+ true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT);
+ if (ret)
+ rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n");
+
return 0;
fail:
dev_kfree_skb_any(skb);
@@ -2595,11 +2794,11 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
if (ppe16 != 7 && ppe8 == 7)
- pads[i] = 2;
+ pads[i] = RTW89_PE_DURATION_16;
else if (ppe8 != 7)
- pads[i] = 1;
+ pads[i] = RTW89_PE_DURATION_8;
else
- pads[i] = 0;
+ pads[i] = RTW89_PE_DURATION_0;
}
}
@@ -2732,11 +2931,11 @@ static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev,
ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
if (ppe16 != 7 && ppe8 == 7)
- pads[i] = 2;
+ pads[i] = RTW89_PE_DURATION_16_20;
else if (ppe8 != 7)
- pads[i] = 1;
+ pads[i] = RTW89_PE_DURATION_8;
else
- pads[i] = 0;
+ pads[i] = RTW89_PE_DURATION_0;
}
}
@@ -4120,6 +4319,48 @@ fail:
return ret;
}
+int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_role_info_v8 *role = &btc->cx.wl.role_info_v8;
+ struct rtw89_h2c_cxrole_v8 *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ int ret;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_cxrole_v8 *)skb->data;
+
+ h2c->hdr.type = type;
+ h2c->hdr.len = len - H2C_LEN_CXDRVHDR_V7;
+ memcpy(&h2c->_u8, role, sizeof(h2c->_u8));
+ h2c->_u32.role_map = cpu_to_le32(role->role_map);
+ h2c->_u32.mrole_type = cpu_to_le32(role->mrole_type);
+ h2c->_u32.mrole_noa_duration = cpu_to_le32(role->mrole_noa_duration);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type)
{
@@ -4631,6 +4872,10 @@ static void rtw89_scan_get_6g_disabled_chan(struct rtw89_dev *rtwdev,
u8 i, idx;
sband = rtwdev->hw->wiphy->bands[NL80211_BAND_6GHZ];
+ if (!sband) {
+ option->prohib_chan = U64_MAX;
+ return;
+ }
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
@@ -4647,9 +4892,11 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
{
struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info;
struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct cfg80211_scan_request *req = rtwvif->scan_req;
struct rtw89_h2c_scanofld_be_macc_role *macc_role;
struct rtw89_chan *op = &scan_info->op_chan;
struct rtw89_h2c_scanofld_be_opch *opch;
+ struct rtw89_pktofld_info *pkt_info;
struct rtw89_h2c_scanofld_be *h2c;
struct sk_buff *skb;
u8 macc_role_size = sizeof(*macc_role) * option->num_macc_role;
@@ -4674,6 +4921,16 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
h2c = (struct rtw89_h2c_scanofld_be *)skb->data;
ptr = skb->data;
+ memset(probe_id, RTW89_SCANOFLD_PKT_NONE, sizeof(probe_id));
+
+ list_for_each_entry(pkt_info, &scan_info->pkt_list[NL80211_BAND_6GHZ], list) {
+ if (pkt_info->wildcard_6ghz) {
+ /* Provide wildcard as template */
+ probe_id[NL80211_BAND_6GHZ] = pkt_info->id;
+ break;
+ }
+ }
+
h2c->w0 = le32_encode_bits(option->operation, RTW89_H2C_SCANOFLD_BE_W0_OP) |
le32_encode_bits(option->scan_mode,
RTW89_H2C_SCANOFLD_BE_W0_SCAN_MODE) |
@@ -4709,6 +4966,15 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev,
RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW);
h2c->w7 = le32_encode_bits(option->prohib_chan >> 32,
RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH);
+ if (req->no_cck) {
+ h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE);
+ h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6,
+ RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) |
+ le32_encode_bits(RTW89_HW_RATE_OFDM6,
+ RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) |
+ le32_encode_bits(RTW89_HW_RATE_OFDM6,
+ RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ);
+ }
ptr += sizeof(*h2c);
for (i = 0; i < option->num_macc_role; i++) {
@@ -5511,6 +5777,7 @@ static bool rtw89_is_6ghz_wildcard_probe_req(struct rtw89_dev *rtwdev,
info->ssid_len = req->ssids[ssid_idx].ssid_len;
return false;
} else {
+ info->wildcard_6ghz = true;
return true;
}
}
@@ -5545,12 +5812,8 @@ static int rtw89_append_probe_req_ie(struct rtw89_dev *rtwdev,
goto out;
}
- if (rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band,
- ssid_idx)) {
- kfree_skb(new);
- kfree(info);
- goto out;
- }
+ rtw89_is_6ghz_wildcard_probe_req(rtwdev, rtwvif, info, band,
+ ssid_idx);
ret = rtw89_fw_h2c_add_pkt_offload(rtwdev, &info->id, new);
if (ret) {
@@ -5708,6 +5971,10 @@ static void rtw89_hw_scan_add_chan(struct rtw89_dev *rtwdev, int chan_type,
continue;
else if (info->channel_6ghz && probe_count != 0)
ch_info->period += RTW89_CHANNEL_TIME_6G;
+
+ if (info->wildcard_6ghz)
+ continue;
+
ch_info->pkt_id[probe_count++] = info->id;
if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
break;
@@ -5762,6 +6029,10 @@ static void rtw89_hw_scan_add_chan_be(struct rtw89_dev *rtwdev, int chan_type,
if (info->channel_6ghz &&
ch_info->pri_ch != info->channel_6ghz)
continue;
+
+ if (info->wildcard_6ghz)
+ continue;
+
ch_info->pkt_id[probe_count++] = info->id;
if (probe_count >= RTW89_SCANOFLD_MAX_SSID)
break;
@@ -6026,7 +6297,14 @@ void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
ret = rtw89_hw_scan_offload(rtwdev, vif, false);
if (ret)
- rtw89_hw_scan_complete(rtwdev, vif, true);
+ rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret);
+
+ /* Indicate ieee80211_scan_completed() before returning, which is safe
+ * because scan abort command always waits for completion of
+ * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan
+ * work properly.
+ */
+ rtw89_hw_scan_complete(rtwdev, vif, true);
}
static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev)
@@ -6228,6 +6506,57 @@ fail:
return ret;
}
+int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool enable)
+{
+ struct rtw89_h2c_arp_offload *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ u8 pkt_id = 0;
+ int ret;
+
+ if (enable) {
+ ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_ARP_RSP,
+ &pkt_id);
+ if (ret)
+ return ret;
+ }
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for arp offload\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_arp_offload *)skb->data;
+
+ h2c->w0 = le32_encode_bits(enable, RTW89_H2C_ARP_OFFLOAD_W0_ENABLE) |
+ le32_encode_bits(0, RTW89_H2C_ARP_OFFLOAD_W0_ACTION) |
+ le32_encode_bits(rtwvif->mac_id, RTW89_H2C_ARP_OFFLOAD_W0_MACID) |
+ le32_encode_bits(pkt_id, RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_ARP_OFLD, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
#define H2C_DISCONNECT_DETECT_LEN 8
int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool enable)
@@ -6273,30 +6602,38 @@ fail:
return ret;
}
-#define H2C_WOW_GLOBAL_LEN 8
int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool enable)
{
- struct sk_buff *skb;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_h2c_wow_global *h2c;
u8 macid = rtwvif->mac_id;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
int ret;
- skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WOW_GLOBAL_LEN);
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
+ rtw89_err(rtwdev, "failed to alloc skb for wow global\n");
return -ENOMEM;
}
- skb_put(skb, H2C_WOW_GLOBAL_LEN);
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_wow_global *)skb->data;
- RTW89_SET_WOW_GLOBAL_ENABLE(skb->data, enable);
- RTW89_SET_WOW_GLOBAL_MAC_ID(skb->data, macid);
+ h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GLOBAL_W0_ENABLE) |
+ le32_encode_bits(macid, RTW89_H2C_WOW_GLOBAL_W0_MAC_ID) |
+ le32_encode_bits(rtw_wow->ptk_alg,
+ RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO) |
+ le32_encode_bits(rtw_wow->gtk_alg,
+ RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO);
+ h2c->key_info = rtw_wow->key_info;
rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
H2C_CAT_MAC,
H2C_CL_MAC_WOW,
H2C_FUNC_WOW_GLOBAL, 0, 1,
- H2C_WOW_GLOBAL_LEN);
+ len);
ret = rtw89_h2c_tx(rtwdev, skb, false);
if (ret) {
@@ -6324,7 +6661,7 @@ int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, H2C_WAKEUP_CTRL_LEN);
if (!skb) {
- rtw89_err(rtwdev, "failed to alloc skb for keep alive\n");
+ rtw89_err(rtwdev, "failed to alloc skb for wakeup ctrl\n");
return -ENOMEM;
}
@@ -6411,6 +6748,110 @@ fail:
return ret;
}
+int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ bool enable)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
+ struct rtw89_h2c_wow_gtk_ofld *h2c;
+ u8 macid = rtwvif->mac_id;
+ u32 len = sizeof(*h2c);
+ u8 pkt_id_sa_query = 0;
+ struct sk_buff *skb;
+ u8 pkt_id_eapol = 0;
+ int ret;
+
+ if (!rtw_wow->gtk_alg)
+ return 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for gtk ofld\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+ h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data;
+
+ if (!enable)
+ goto hdr;
+
+ ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_EAPOL_KEY,
+ &pkt_id_eapol);
+ if (ret)
+ goto fail;
+
+ if (gtk_info->igtk_keyid) {
+ ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif,
+ RTW89_PKT_OFLD_TYPE_SA_QUERY,
+ &pkt_id_sa_query);
+ if (ret)
+ goto fail;
+ }
+
+ /* not support TKIP yet */
+ h2c->w0 = le32_encode_bits(enable, RTW89_H2C_WOW_GTK_OFLD_W0_EN) |
+ le32_encode_bits(0, RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN) |
+ le32_encode_bits(gtk_info->igtk_keyid ? 1 : 0,
+ RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN) |
+ le32_encode_bits(macid, RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID) |
+ le32_encode_bits(pkt_id_eapol, RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID);
+ h2c->w1 = le32_encode_bits(gtk_info->igtk_keyid ? pkt_id_sa_query : 0,
+ RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID) |
+ le32_encode_bits(rtw_wow->akm, RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT);
+ h2c->gtk_info = rtw_wow->gtk_info;
+
+hdr:
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_GTK_OFLD, 0, 1,
+ len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+ return 0;
+
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ struct rtw89_h2c_wow_aoac *h2c;
+ u32 len = sizeof(*h2c);
+ struct sk_buff *skb;
+ unsigned int cond;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(rtwdev, len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for aoac\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, len);
+
+ /* This H2C only nofity firmware to generate AOAC report C2H,
+ * no need any parameter.
+ */
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_WOW,
+ H2C_FUNC_AOAC_REPORT_REQ, 1, 0,
+ len);
+
+ cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ);
+ return rtw89_h2c_tx_and_wait(rtwdev, skb, wait, cond);
+}
+
/* Return < 0, if failures happen during waiting for the condition.
* Return 0, when waiting for the condition succeeds.
* Return > 0, if the wait is considered unreachable due to driver/FW design,
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
index 44311f65b4fa..c3b4324c621c 100644
--- a/drivers/net/wireless/realtek/rtw89/fw.h
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -48,6 +48,32 @@ struct rtw89_c2hreg_phycap {
#define RTW89_C2HREG_PHYCAP_W3_ANT_TX_NUM GENMASK(15, 8)
#define RTW89_C2HREG_PHYCAP_W3_ANT_RX_NUM GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_1_W0_KEY_IDX GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_0 GENMASK(7, 0)
+#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_1 GENMASK(15, 8)
+#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_2 GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_1_W1_IV_3 GENMASK(31, 24)
+#define RTW89_C2HREG_AOAC_RPT_1_W2_IV_4 GENMASK(7, 0)
+#define RTW89_C2HREG_AOAC_RPT_1_W2_IV_5 GENMASK(15, 8)
+#define RTW89_C2HREG_AOAC_RPT_1_W2_IV_6 GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_1_W2_IV_7 GENMASK(31, 24)
+#define RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_0 GENMASK(7, 0)
+#define RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_1 GENMASK(15, 8)
+#define RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_2 GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_3 GENMASK(31, 24)
+#define RTW89_C2HREG_AOAC_RPT_2_W0_PTK_IV_4 GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_2_W0_PTK_IV_5 GENMASK(31, 24)
+#define RTW89_C2HREG_AOAC_RPT_2_W1_PTK_IV_6 GENMASK(7, 0)
+#define RTW89_C2HREG_AOAC_RPT_2_W1_PTK_IV_7 GENMASK(15, 8)
+#define RTW89_C2HREG_AOAC_RPT_2_W1_IGTK_IPN_IV_0 GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_2_W1_IGTK_IPN_IV_1 GENMASK(31, 24)
+#define RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_2 GENMASK(7, 0)
+#define RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_3 GENMASK(15, 8)
+#define RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_4 GENMASK(23, 16)
+#define RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_5 GENMASK(31, 24)
+#define RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_6 GENMASK(7, 0)
+#define RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_7 GENMASK(15, 8)
+
struct rtw89_h2creg_hdr {
u32 w0;
};
@@ -98,8 +124,11 @@ enum rtw89_mac_h2c_type {
RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE,
RTW89_FWCMD_H2CREG_FUNC_GETPKT_INFORM,
RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN,
- RTW89_FWCMD_H2CREG_FUNC_WOW_TRX_STOP = 0x6,
- RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL = 0xA,
+ RTW89_FWCMD_H2CREG_FUNC_WOW_TRX_STOP,
+ RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_1,
+ RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_2,
+ RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_3_REQ,
+ RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL,
};
enum rtw89_mac_c2h_type {
@@ -340,8 +369,9 @@ struct rtw89_mac_chinfo_be {
struct rtw89_pktofld_info {
struct list_head list;
u8 id;
+ bool wildcard_6ghz;
- /* Below fields are for 6 GHz RNR use only */
+ /* Below fields are for WiFi 6 chips 6 GHz RNR use only */
u8 ssid[IEEE80211_MAX_SSID_LEN];
u8 ssid_len;
u8 bssid[ETH_ALEN];
@@ -1432,308 +1462,6 @@ struct rtw89_h2c_cctlinfo_ud_g7 {
#define CCTLINFO_G7_W15_MGNT_CURR_RATE GENMASK(27, 16)
#define CCTLINFO_G7_W15_ALL GENMASK(27, 0)
-static inline void SET_DCTL_MACID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0));
-}
-
-static inline void SET_DCTL_OPERATION_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7));
-}
-
-#define SET_DCTL_MASK_QOS_FIELD_V1 GENMASK(7, 0)
-static inline void SET_DCTL_QOS_FIELD_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(7, 0));
- le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_QOS_FIELD_V1,
- GENMASK(7, 0));
-}
-
-#define SET_DCTL_MASK_SET_DCTL_HW_EXSEQ_MACID GENMASK(6, 0)
-static inline void SET_DCTL_HW_EXSEQ_MACID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 8));
- le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_SET_DCTL_HW_EXSEQ_MACID,
- GENMASK(14, 8));
-}
-
-#define SET_DCTL_MASK_QOS_DATA BIT(0)
-static inline void SET_DCTL_QOS_DATA_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15));
- le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_QOS_DATA,
- BIT(15));
-}
-
-#define SET_DCTL_MASK_AES_IV_L GENMASK(15, 0)
-static inline void SET_DCTL_AES_IV_L_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 16));
- le32p_replace_bits((__le32 *)(table) + 9, SET_DCTL_MASK_AES_IV_L,
- GENMASK(31, 16));
-}
-
-#define SET_DCTL_MASK_AES_IV_H GENMASK(31, 0)
-static inline void SET_DCTL_AES_IV_H_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 0));
- le32p_replace_bits((__le32 *)(table) + 10, SET_DCTL_MASK_AES_IV_H,
- GENMASK(31, 0));
-}
-
-#define SET_DCTL_MASK_SEQ0 GENMASK(11, 0)
-static inline void SET_DCTL_SEQ0_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 0));
- le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_SEQ0,
- GENMASK(11, 0));
-}
-
-#define SET_DCTL_MASK_SEQ1 GENMASK(11, 0)
-static inline void SET_DCTL_SEQ1_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(23, 12));
- le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_SEQ1,
- GENMASK(23, 12));
-}
-
-#define SET_DCTL_MASK_AMSDU_MAX_LEN GENMASK(2, 0)
-static inline void SET_DCTL_AMSDU_MAX_LEN_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 24));
- le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_AMSDU_MAX_LEN,
- GENMASK(26, 24));
-}
-
-#define SET_DCTL_MASK_STA_AMSDU_EN BIT(0)
-static inline void SET_DCTL_STA_AMSDU_EN_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27));
- le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_STA_AMSDU_EN,
- BIT(27));
-}
-
-#define SET_DCTL_MASK_CHKSUM_OFLD_EN BIT(0)
-static inline void SET_DCTL_CHKSUM_OFLD_EN_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 3, val, BIT(28));
- le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_CHKSUM_OFLD_EN,
- BIT(28));
-}
-
-#define SET_DCTL_MASK_WITH_LLC BIT(0)
-static inline void SET_DCTL_WITH_LLC_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 3, val, BIT(29));
- le32p_replace_bits((__le32 *)(table) + 11, SET_DCTL_MASK_WITH_LLC,
- BIT(29));
-}
-
-#define SET_DCTL_MASK_SEQ2 GENMASK(11, 0)
-static inline void SET_DCTL_SEQ2_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(11, 0));
- le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_SEQ2,
- GENMASK(11, 0));
-}
-
-#define SET_DCTL_MASK_SEQ3 GENMASK(11, 0)
-static inline void SET_DCTL_SEQ3_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(23, 12));
- le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_SEQ3,
- GENMASK(23, 12));
-}
-
-#define SET_DCTL_MASK_TGT_IND GENMASK(3, 0)
-static inline void SET_DCTL_TGT_IND_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 24));
- le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_TGT_IND,
- GENMASK(27, 24));
-}
-
-#define SET_DCTL_MASK_TGT_IND_EN BIT(0)
-static inline void SET_DCTL_TGT_IND_EN_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 4, val, BIT(28));
- le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_TGT_IND_EN,
- BIT(28));
-}
-
-#define SET_DCTL_MASK_HTC_LB GENMASK(2, 0)
-static inline void SET_DCTL_HTC_LB_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 29));
- le32p_replace_bits((__le32 *)(table) + 12, SET_DCTL_MASK_HTC_LB,
- GENMASK(31, 29));
-}
-
-#define SET_DCTL_MASK_MHDR_LEN GENMASK(4, 0)
-static inline void SET_DCTL_MHDR_LEN_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(4, 0));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_MHDR_LEN,
- GENMASK(4, 0));
-}
-
-#define SET_DCTL_MASK_VLAN_TAG_VALID BIT(0)
-static inline void SET_DCTL_VLAN_TAG_VALID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(5));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_VLAN_TAG_VALID,
- BIT(5));
-}
-
-#define SET_DCTL_MASK_VLAN_TAG_SEL GENMASK(1, 0)
-static inline void SET_DCTL_VLAN_TAG_SEL_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 6));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_VLAN_TAG_SEL,
- GENMASK(7, 6));
-}
-
-#define SET_DCTL_MASK_HTC_ORDER BIT(0)
-static inline void SET_DCTL_HTC_ORDER_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_HTC_ORDER,
- BIT(8));
-}
-
-#define SET_DCTL_MASK_SEC_KEY_ID GENMASK(1, 0)
-static inline void SET_DCTL_SEC_KEY_ID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(10, 9));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_KEY_ID,
- GENMASK(10, 9));
-}
-
-#define SET_DCTL_MASK_WAPI BIT(0)
-static inline void SET_DCTL_WAPI_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_WAPI,
- BIT(15));
-}
-
-#define SET_DCTL_MASK_SEC_ENT_MODE GENMASK(1, 0)
-static inline void SET_DCTL_SEC_ENT_MODE_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(17, 16));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENT_MODE,
- GENMASK(17, 16));
-}
-
-#define SET_DCTL_MASK_SEC_ENTX_KEYID GENMASK(1, 0)
-static inline void SET_DCTL_SEC_ENT0_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(19, 18));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(19, 18));
-}
-
-static inline void SET_DCTL_SEC_ENT1_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(21, 20));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(21, 20));
-}
-
-static inline void SET_DCTL_SEC_ENT2_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(23, 22));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(23, 22));
-}
-
-static inline void SET_DCTL_SEC_ENT3_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(25, 24));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(25, 24));
-}
-
-static inline void SET_DCTL_SEC_ENT4_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(27, 26));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(27, 26));
-}
-
-static inline void SET_DCTL_SEC_ENT5_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(29, 28));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(29, 28));
-}
-
-static inline void SET_DCTL_SEC_ENT6_KEYID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 30));
- le32p_replace_bits((__le32 *)(table) + 13, SET_DCTL_MASK_SEC_ENTX_KEYID,
- GENMASK(31, 30));
-}
-
-#define SET_DCTL_MASK_SEC_ENT_VALID GENMASK(7, 0)
-static inline void SET_DCTL_SEC_ENT_VALID_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(7, 0));
- le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENT_VALID,
- GENMASK(7, 0));
-}
-
-#define SET_DCTL_MASK_SEC_ENTX GENMASK(7, 0)
-static inline void SET_DCTL_SEC_ENT0_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(15, 8));
- le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(15, 8));
-}
-
-static inline void SET_DCTL_SEC_ENT1_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 16));
- le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(23, 16));
-}
-
-static inline void SET_DCTL_SEC_ENT2_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(31, 24));
- le32p_replace_bits((__le32 *)(table) + 14, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(31, 24));
-}
-
-static inline void SET_DCTL_SEC_ENT3_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0));
- le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(7, 0));
-}
-
-static inline void SET_DCTL_SEC_ENT4_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(15, 8));
- le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(15, 8));
-}
-
-static inline void SET_DCTL_SEC_ENT5_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 16));
- le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(23, 16));
-}
-
-static inline void SET_DCTL_SEC_ENT6_V1(void *table, u32 val)
-{
- le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 24));
- le32p_replace_bits((__le32 *)(table) + 15, SET_DCTL_MASK_SEC_ENTX,
- GENMASK(31, 24));
-}
-
struct rtw89_h2c_bcn_upd {
__le32 w0;
__le32 w1;
@@ -2157,45 +1885,18 @@ static inline void RTW89_SET_DISCONNECT_DETECT_TRYOK_BCNFAIL_COUNT_LIMIT(void *h
le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0));
}
-static inline void RTW89_SET_WOW_GLOBAL_ENABLE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(0));
-}
-
-static inline void RTW89_SET_WOW_GLOBAL_DROP_ALL_PKT(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(1));
-}
-
-static inline void RTW89_SET_WOW_GLOBAL_RX_PARSE_AFTER_WAKE(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(2));
-}
-
-static inline void RTW89_SET_WOW_GLOBAL_WAKE_BAR_PULLED(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, BIT(3));
-}
-
-static inline void RTW89_SET_WOW_GLOBAL_MAC_ID(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8));
-}
-
-static inline void RTW89_SET_WOW_GLOBAL_PAIRWISE_SEC_ALGO(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16));
-}
-
-static inline void RTW89_SET_WOW_GLOBAL_GROUP_SEC_ALGO(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24));
-}
+struct rtw89_h2c_wow_global {
+ __le32 w0;
+ struct rtw89_wow_key_info key_info;
+} __packed;
-static inline void RTW89_SET_WOW_GLOBAL_REMOTECTRL_INFO_CONTENT(void *h2c, u32 val)
-{
- le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(31, 0));
-}
+#define RTW89_H2C_WOW_GLOBAL_W0_ENABLE BIT(0)
+#define RTW89_H2C_WOW_GLOBAL_W0_DROP_ALL_PKT BIT(1)
+#define RTW89_H2C_WOW_GLOBAL_W0_RX_PARSE_AFTER_WAKE BIT(2)
+#define RTW89_H2C_WOW_GLOBAL_W0_WAKE_BAR_PULLED BIT(3)
+#define RTW89_H2C_WOW_GLOBAL_W0_MAC_ID GENMASK(15, 8)
+#define RTW89_H2C_WOW_GLOBAL_W0_PAIRWISE_SEC_ALGO GENMASK(23, 16)
+#define RTW89_H2C_WOW_GLOBAL_W0_GROUP_SEC_ALGO GENMASK(31, 24)
static inline void RTW89_SET_WOW_WAKEUP_CTRL_PATTERN_MATCH_ENABLE(void *h2c, u32 val)
{
@@ -2307,6 +2008,34 @@ static inline void RTW89_SET_WOW_CAM_UPD_VALID(void *h2c, u32 val)
le32p_replace_bits((__le32 *)h2c + 5, val, BIT(31));
}
+struct rtw89_h2c_wow_gtk_ofld {
+ __le32 w0;
+ __le32 w1;
+ struct rtw89_wow_gtk_info gtk_info;
+} __packed;
+
+#define RTW89_H2C_WOW_GTK_OFLD_W0_EN BIT(0)
+#define RTW89_H2C_WOW_GTK_OFLD_W0_TKIP_EN BIT(1)
+#define RTW89_H2C_WOW_GTK_OFLD_W0_IEEE80211W_EN BIT(2)
+#define RTW89_H2C_WOW_GTK_OFLD_W0_PAIRWISE_WAKEUP BIT(3)
+#define RTW89_H2C_WOW_GTK_OFLD_W0_NOREKEY_WAKEUP BIT(4)
+#define RTW89_H2C_WOW_GTK_OFLD_W0_MAC_ID GENMASK(23, 16)
+#define RTW89_H2C_WOW_GTK_OFLD_W0_GTK_RSP_ID GENMASK(31, 24)
+#define RTW89_H2C_WOW_GTK_OFLD_W1_PMF_SA_QUERY_ID GENMASK(7, 0)
+#define RTW89_H2C_WOW_GTK_OFLD_W1_PMF_BIP_SEC_ALGO GENMASK(9, 8)
+#define RTW89_H2C_WOW_GTK_OFLD_W1_ALGO_AKM_SUIT GENMASK(17, 10)
+
+struct rtw89_h2c_arp_offload {
+ __le32 w0;
+ __le32 w1;
+} __packed;
+
+#define RTW89_H2C_ARP_OFFLOAD_W0_ENABLE BIT(0)
+#define RTW89_H2C_ARP_OFFLOAD_W0_ACTION BIT(1)
+#define RTW89_H2C_ARP_OFFLOAD_W0_MACID GENMASK(23, 16)
+#define RTW89_H2C_ARP_OFFLOAD_W0_PKT_ID GENMASK(31, 24)
+#define RTW89_H2C_ARP_OFFLOAD_W1_CONTENT GENMASK(31, 0)
+
enum rtw89_btc_btf_h2c_class {
BTFC_SET = 0x10,
BTFC_GET = 0x11,
@@ -2395,6 +2124,32 @@ struct rtw89_h2c_cxctrl_v7 {
#define H2C_LEN_CXDRVHDR sizeof(struct rtw89_h2c_cxhdr)
#define H2C_LEN_CXDRVHDR_V7 sizeof(struct rtw89_h2c_cxhdr_v7)
+struct rtw89_btc_wl_role_info_v8_u8 {
+ u8 connect_cnt;
+ u8 link_mode;
+ u8 link_mode_chg;
+ u8 p2p_2g;
+
+ u8 pta_req_band;
+ u8 dbcc_en;
+ u8 dbcc_chg;
+ u8 dbcc_2g_phy;
+
+ struct rtw89_btc_wl_rlink rlink[RTW89_BE_BTC_WL_MAX_ROLE_NUMBER][RTW89_MAC_NUM];
+} __packed;
+
+struct rtw89_btc_wl_role_info_v8_u32 {
+ __le32 role_map;
+ __le32 mrole_type;
+ __le32 mrole_noa_duration;
+} __packed;
+
+struct rtw89_h2c_cxrole_v8 {
+ struct rtw89_h2c_cxhdr hdr;
+ struct rtw89_btc_wl_role_info_v8_u8 _u8;
+ struct rtw89_btc_wl_role_info_v8_u32 _u32;
+} __packed;
+
struct rtw89_h2c_cxinit {
struct rtw89_h2c_cxhdr hdr;
u8 ant_type;
@@ -2955,6 +2710,7 @@ struct rtw89_h2c_scanofld_be {
__le32 w5;
__le32 w6;
__le32 w7;
+ __le32 w8;
struct rtw89_h2c_scanofld_be_macc_role role[];
} __packed;
@@ -2966,6 +2722,7 @@ struct rtw89_h2c_scanofld_be {
#define RTW89_H2C_SCANOFLD_BE_W0_MACID GENMASK(23, 8)
#define RTW89_H2C_SCANOFLD_BE_W0_PORT GENMASK(26, 24)
#define RTW89_H2C_SCANOFLD_BE_W0_BAND GENMASK(28, 27)
+#define RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE BIT(29)
#define RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE GENMASK(7, 0)
#define RTW89_H2C_SCANOFLD_BE_W1_NUM_OP GENMASK(15, 8)
#define RTW89_H2C_SCANOFLD_BE_W1_NORM_PD GENMASK(31, 16)
@@ -2982,6 +2739,9 @@ struct rtw89_h2c_scanofld_be {
#define RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE GENMASK(31, 0)
#define RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW GENMASK(31, 0)
#define RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH GENMASK(31, 0)
+#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ GENMASK(7, 0)
+#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ GENMASK(15, 8)
+#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ GENMASK(23, 16)
static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val)
{
@@ -3636,6 +3396,10 @@ struct rtw89_h2c_mrc_upd_duration {
#define RTW89_H2C_MRC_UPD_DURATION_SLOT_SLOT_IDX GENMASK(7, 0)
#define RTW89_H2C_MRC_UPD_DURATION_SLOT_DURATION GENMASK(31, 16)
+struct rtw89_h2c_wow_aoac {
+ __le32 w0;
+} __packed;
+
#define RTW89_C2H_HEADER_LEN 8
struct rtw89_c2h_hdr {
@@ -3864,6 +3628,30 @@ struct rtw89_c2h_pkt_ofld_rsp {
#define RTW89_C2H_PKT_OFLD_RSP_W2_PTK_OP GENMASK(10, 8)
#define RTW89_C2H_PKT_OFLD_RSP_W2_PTK_LEN GENMASK(31, 16)
+struct rtw89_c2h_wow_aoac_report {
+ struct rtw89_c2h_hdr c2h_hdr;
+ u8 rpt_ver;
+ u8 sec_type;
+ u8 key_idx;
+ u8 pattern_idx;
+ u8 rekey_ok;
+ u8 rsvd1[3];
+ u8 ptk_tx_iv[8];
+ u8 eapol_key_replay_count[8];
+ u8 gtk[32];
+ u8 ptk_rx_iv[8];
+ u8 gtk_rx_iv[4][8];
+ __le64 igtk_key_id;
+ __le64 igtk_ipn;
+ u8 igtk[32];
+ u8 csa_pri_ch;
+ u8 csa_bw_ch_offset;
+ u8 csa_ch_band_chsw_failed;
+ u8 csa_rsvd1;
+} __packed;
+
+#define RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX BIT(0)
+
struct rtw89_h2c_bcnfltr {
__le32 w0;
} __packed;
@@ -4141,11 +3929,21 @@ struct rtw89_fw_h2c_rf_reg_info {
/* CLASS 1 - WOW */
#define H2C_CL_MAC_WOW 0x1
-#define H2C_FUNC_KEEP_ALIVE 0x0
-#define H2C_FUNC_DISCONNECT_DETECT 0x1
-#define H2C_FUNC_WOW_GLOBAL 0x2
-#define H2C_FUNC_WAKEUP_CTRL 0x8
-#define H2C_FUNC_WOW_CAM_UPD 0xC
+enum rtw89_wow_h2c_func {
+ H2C_FUNC_KEEP_ALIVE = 0x0,
+ H2C_FUNC_DISCONNECT_DETECT = 0x1,
+ H2C_FUNC_WOW_GLOBAL = 0x2,
+ H2C_FUNC_GTK_OFLD = 0x3,
+ H2C_FUNC_ARP_OFLD = 0x4,
+ H2C_FUNC_WAKEUP_CTRL = 0x8,
+ H2C_FUNC_WOW_CAM_UPD = 0xC,
+ H2C_FUNC_AOAC_REPORT_REQ = 0xD,
+
+ NUM_OF_RTW89_WOW_H2C_FUNC,
+};
+
+#define RTW89_WOW_WAIT_COND(func) \
+ (NUM_OF_RTW89_WOW_H2C_FUNC + (func))
/* CLASS 2 - PS */
#define H2C_CL_MAC_PS 0x2
@@ -4568,6 +4366,7 @@ int rtw89_fw_h2c_cxdrv_init_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v1(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_role_v2(struct rtw89_dev *rtwdev, u8 type);
+int rtw89_fw_h2c_cxdrv_role_v8(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_ctrl_v7(struct rtw89_dev *rtwdev, u8 type);
int rtw89_fw_h2c_cxdrv_trx(struct rtw89_dev *rtwdev, u8 type);
@@ -4653,6 +4452,8 @@ int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool enable);
int rtw89_fw_h2c_keep_alive(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
bool enable);
+int rtw89_fw_h2c_arp_offload(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool enable);
int rtw89_fw_h2c_disconnect_detect(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool enable);
int rtw89_fw_h2c_wow_global(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
@@ -4661,6 +4462,10 @@ int rtw89_fw_h2c_wow_wakeup_ctrl(struct rtw89_dev *rtwdev,
struct rtw89_vif *rtwvif, bool enable);
int rtw89_fw_wow_cam_update(struct rtw89_dev *rtwdev,
struct rtw89_wow_cam_info *cam_info);
+int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ bool enable);
+int rtw89_fw_h2c_wow_request_aoac(struct rtw89_dev *rtwdev);
int rtw89_fw_h2c_add_mcc(struct rtw89_dev *rtwdev,
const struct rtw89_fw_mcc_add_req *p);
int rtw89_fw_h2c_start_mcc(struct rtw89_dev *rtwdev,
@@ -4854,4 +4659,10 @@ const struct rtw89_rfe_parms *
rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev,
const struct rtw89_rfe_parms *init);
+enum rtw89_wow_wakeup_ver {
+ RTW89_WOW_REASON_V0,
+ RTW89_WOW_REASON_V1,
+ RTW89_WOW_REASON_NUM,
+};
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index aa5b396b5d2b..e2399796aeb1 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -1568,6 +1568,8 @@ static int dmac_func_en_ax(struct rtw89_dev *rtwdev)
B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN |
B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN |
B_AX_WD_RLS_CLK_EN | B_AX_BBRPT_CLK_EN);
+ if (chip_id == RTL8852BT)
+ val32 |= B_AX_AXIDMA_CLK_EN;
rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32);
return 0;
@@ -1577,7 +1579,7 @@ static int chip_func_en_ax(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- if (chip_id == RTL8852A || chip_id == RTL8852B)
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
rtw89_write32_set(rtwdev, R_AX_SPS_DIG_ON_CTRL0,
B_AX_OCP_L1_MASK);
@@ -2146,8 +2148,8 @@ int rtw89_mac_preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx,
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
- chip->chip_id == RTL8851B || !is_qta_poh(rtwdev))
+ if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev) ||
+ !is_qta_poh(rtwdev))
return 0;
return preload_init_set(rtwdev, mac_idx, mode);
@@ -2183,8 +2185,7 @@ static void _patch_ss2f_path(struct rtw89_dev *rtwdev)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
- if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
- chip->chip_id == RTL8851B)
+ if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
return;
rtw89_write32_mask(rtwdev, R_AX_SS2FINFO_PATH, B_AX_SS_DEST_QUEUE_MASK,
@@ -2360,7 +2361,7 @@ static int scheduler_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK,
SIFS_MACTXEN_T1);
- if (rtwdev->chip->chip_id == RTL8852B || rtwdev->chip->chip_id == RTL8851B) {
+ if (rtw89_is_rtl885xb(rtwdev)) {
reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SCH_EXT_CTRL, mac_idx);
rtw89_write32_set(rtwdev, reg, B_AX_PORT_RST_TSF_ADV);
}
@@ -2588,7 +2589,9 @@ static int trxptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
case RTL8852A:
sifs = WMAC_SPEC_SIFS_OFDM_52A;
break;
+ case RTL8851B:
case RTL8852B:
+ case RTL8852BT:
sifs = WMAC_SPEC_SIFS_OFDM_52B;
break;
default:
@@ -2632,6 +2635,7 @@ static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
#define RX_MAX_LEN_UNIT 512
#define PLD_RLS_MAX_PG 127
#define RX_SPEC_MAX_LEN (11454 + RX_MAX_LEN_UNIT)
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
u32 reg, rx_max_len, rx_qta;
u16 val;
@@ -2652,6 +2656,8 @@ static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
B_AX_RX_DLK_DATA_TIME_MASK);
val = u16_replace_bits(val, TRXCFG_RMAC_CCA_TO,
B_AX_RX_DLK_CCA_TIME_MASK);
+ if (chip_id == RTL8852BT)
+ val |= B_AX_RX_DLK_RST_EN;
rtw89_write16(rtwdev, reg, val);
reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RCR, mac_idx);
@@ -2668,8 +2674,7 @@ static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
rx_max_len /= RX_MAX_LEN_UNIT;
rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len);
- if (rtwdev->chip->chip_id == RTL8852A &&
- rtwdev->hal.cv == CHIP_CBV) {
+ if (chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) {
rtw89_write16_mask(rtwdev,
rtw89_mac_reg_by_idx(rtwdev, R_AX_DLK_PROTECT_CTL, mac_idx),
B_AX_RX_DLK_CCA_TIME_MASK, 0);
@@ -2700,7 +2705,7 @@ static int cmac_com_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK);
rtw89_write32(rtwdev, reg, val);
- if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_RRSR1, mac_idx);
rtw89_write32_mask(rtwdev, reg, B_AX_RRSR_RATE_EN_MASK, RRSR_OFDM_CCK_EN);
}
@@ -2766,11 +2771,10 @@ static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
static int cmac_dma_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 reg;
int ret;
- if (chip_id != RTL8852B)
+ if (!rtw89_is_rtl885xb(rtwdev))
return 0;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
@@ -3587,13 +3591,11 @@ static int enable_imr_ax(struct rtw89_dev *rtwdev, u8 mac_idx,
static void err_imr_ctrl_ax(struct rtw89_dev *rtwdev, bool en)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
-
rtw89_write32(rtwdev, R_AX_DMAC_ERR_IMR,
en ? DMAC_ERR_IMR_EN : DMAC_ERR_IMR_DIS);
rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR,
en ? CMAC0_ERR_IMR_EN : CMAC0_ERR_IMR_DIS);
- if (chip_id != RTL8852B && rtwdev->mac.dle_info.c1_rx_qta)
+ if (!rtw89_is_rtl885xb(rtwdev) && rtwdev->mac.dle_info.c1_rx_qta)
rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR_C1,
en ? CMAC1_ERR_IMR_EN : CMAC1_ERR_IMR_DIS);
}
@@ -3644,6 +3646,7 @@ static int set_host_rpr_ax(struct rtw89_dev *rtwdev)
static int trx_init_ax(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode;
int ret;
@@ -3687,6 +3690,10 @@ static int trx_init_ax(struct rtw89_dev *rtwdev)
return ret;
}
+ if (chip_id == RTL8852C)
+ rtw89_write32_clr(rtwdev, R_AX_RSP_CHK_SIG,
+ B_AX_RSP_STATIC_RTS_CHK_SERV_BW_EN);
+
return 0;
}
@@ -3714,10 +3721,9 @@ static int rtw89_mac_feat_init(struct rtw89_dev *rtwdev)
static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val32;
- if (chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (rtw89_is_rtl885xb(rtwdev)) {
rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_APB_WRAP_EN);
rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_APB_WRAP_EN);
return;
@@ -3813,7 +3819,7 @@ static void rtw89_mac_dmac_func_pre_en_ax(struct rtw89_dev *rtwdev)
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u32 val;
- if (chip_id == RTL8851B)
+ if (chip_id == RTL8851B || chip_id == RTL8852BT)
val = B_AX_DISPATCHER_CLK_EN | B_AX_AXIDMA_CLK_EN;
else
val = B_AX_DISPATCHER_CLK_EN;
@@ -4659,8 +4665,7 @@ int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
{
int ret;
- rtwvif->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
- RTW89_MAX_MAC_ID_NUM);
+ rtwvif->mac_id = rtw89_acquire_mac_id(rtwdev);
if (rtwvif->mac_id == RTW89_MAX_MAC_ID_NUM)
return -ENOSPC;
@@ -4671,7 +4676,7 @@ int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
return 0;
release_mac_id:
- rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id);
+ rtw89_release_mac_id(rtwdev, rtwvif->mac_id);
return ret;
}
@@ -4681,7 +4686,7 @@ int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
int ret;
ret = rtw89_mac_vif_deinit(rtwdev, rtwvif);
- rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id);
+ rtw89_release_mac_id(rtwdev, rtwvif->mac_id);
return ret;
}
@@ -4752,6 +4757,9 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
}
return;
case RTW89_SCAN_END_SCAN_NOTIFY:
+ if (rtwdev->scan_info.abort)
+ return;
+
if (rtwvif && rtwvif->scan_req &&
last_chan < rtwvif->scan_req->n_channels) {
ret = rtw89_hw_scan_offload(rtwdev, vif, true);
@@ -4760,7 +4768,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb,
rtw89_warn(rtwdev, "HW scan failed: %d\n", ret);
}
} else {
- rtw89_hw_scan_complete(rtwdev, vif, rtwdev->scan_info.abort);
+ rtw89_hw_scan_complete(rtwdev, vif, false);
}
break;
case RTW89_SCAN_ENTER_OP_NOTIFY:
@@ -5132,6 +5140,37 @@ rtw89_mac_c2h_mrc_tsf_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len
}
static void
+rtw89_mac_c2h_wow_aoac_rpt(struct rtw89_dev *rtwdev, struct sk_buff *skb, u32 len)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+ struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait;
+ const struct rtw89_c2h_wow_aoac_report *c2h =
+ (const struct rtw89_c2h_wow_aoac_report *)skb->data;
+ struct rtw89_completion_data data = {};
+ unsigned int cond;
+
+ aoac_rpt->rpt_ver = c2h->rpt_ver;
+ aoac_rpt->sec_type = c2h->sec_type;
+ aoac_rpt->key_idx = c2h->key_idx;
+ aoac_rpt->pattern_idx = c2h->pattern_idx;
+ aoac_rpt->rekey_ok = u8_get_bits(c2h->rekey_ok,
+ RTW89_C2H_WOW_AOAC_RPT_REKEY_IDX);
+ memcpy(aoac_rpt->ptk_tx_iv, c2h->ptk_tx_iv, sizeof(aoac_rpt->ptk_tx_iv));
+ memcpy(aoac_rpt->eapol_key_replay_count, c2h->eapol_key_replay_count,
+ sizeof(aoac_rpt->eapol_key_replay_count));
+ memcpy(aoac_rpt->gtk, c2h->gtk, sizeof(aoac_rpt->gtk));
+ memcpy(aoac_rpt->ptk_rx_iv, c2h->ptk_rx_iv, sizeof(aoac_rpt->ptk_rx_iv));
+ memcpy(aoac_rpt->gtk_rx_iv, c2h->gtk_rx_iv, sizeof(aoac_rpt->gtk_rx_iv));
+ aoac_rpt->igtk_key_id = le64_to_cpu(c2h->igtk_key_id);
+ aoac_rpt->igtk_ipn = le64_to_cpu(c2h->igtk_ipn);
+ memcpy(aoac_rpt->igtk, c2h->igtk, sizeof(aoac_rpt->igtk));
+
+ cond = RTW89_WOW_WAIT_COND(H2C_FUNC_AOAC_REPORT_REQ);
+ rtw89_complete_cond(wait, cond, &data);
+}
+
+static void
rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
{
struct rtw89_wait_info *wait = &rtwdev->mcc.wait;
@@ -5163,6 +5202,46 @@ rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32
case RTW89_MAC_MRC_DEL_SCH_OK:
func = H2C_FUNC_DEL_MRC;
break;
+ case RTW89_MAC_MRC_EMPTY_SCH_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: empty sch fail\n");
+ return;
+ case RTW89_MAC_MRC_ROLE_NOT_EXIST_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: role not exist fail\n");
+ return;
+ case RTW89_MAC_MRC_DATA_NOT_FOUND_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: data not found fail\n");
+ return;
+ case RTW89_MAC_MRC_GET_NEXT_SLOT_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: get next slot fail\n");
+ return;
+ case RTW89_MAC_MRC_ALT_ROLE_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: alt role fail\n");
+ return;
+ case RTW89_MAC_MRC_ADD_PSTIMER_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: add ps timer fail\n");
+ return;
+ case RTW89_MAC_MRC_MALLOC_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: malloc fail\n");
+ return;
+ case RTW89_MAC_MRC_SWITCH_CH_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: switch ch fail\n");
+ return;
+ case RTW89_MAC_MRC_TXNULL0_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: tx null-0 fail\n");
+ return;
+ case RTW89_MAC_MRC_PORT_FUNC_EN_FAIL:
+ rtw89_debug(rtwdev, RTW89_DBG_CHAN,
+ "MRC C2H STS RPT: port func en fail\n");
+ return;
default:
rtw89_debug(rtwdev, RTW89_DBG_CHAN,
"invalid MRC C2H STS RPT: status %d\n", status);
@@ -5218,6 +5297,12 @@ void (* const rtw89_mac_c2h_mrc_handler[])(struct rtw89_dev *rtwdev,
[RTW89_MAC_C2H_FUNC_MRC_STATUS_RPT] = rtw89_mac_c2h_mrc_status_rpt,
};
+static
+void (* const rtw89_mac_c2h_wow_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_AOAC_REPORT] = rtw89_mac_c2h_wow_aoac_rpt,
+};
+
static void rtw89_mac_c2h_scanofld_rsp_atomic(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
@@ -5270,6 +5355,8 @@ bool rtw89_mac_c2h_chk_atomic(struct rtw89_dev *rtwdev, struct sk_buff *c2h,
return true;
case RTW89_MAC_C2H_CLASS_MRC:
return true;
+ case RTW89_MAC_C2H_CLASS_WOW:
+ return true;
}
}
@@ -5296,6 +5383,10 @@ void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
if (func < NUM_OF_RTW89_MAC_C2H_FUNC_MRC)
handler = rtw89_mac_c2h_mrc_handler[func];
break;
+ case RTW89_MAC_C2H_CLASS_WOW:
+ if (func < NUM_OF_RTW89_MAC_C2H_FUNC_WOW)
+ handler = rtw89_mac_c2h_wow_handler[func];
+ break;
case RTW89_MAC_C2H_CLASS_FWDBG:
return;
default:
@@ -5413,18 +5504,19 @@ void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop)
int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
u8 val;
u16 val16;
u32 val32;
int ret;
rtw89_write8_set(rtwdev, R_AX_GPIO_MUXCFG, B_AX_ENBT);
- if (rtwdev->chip->chip_id != RTL8851B)
+ if (chip_id != RTL8851B && chip_id != RTL8852BT)
rtw89_write8_set(rtwdev, R_AX_BTC_FUNC_EN, B_AX_PTA_WL_TX_EN);
rtw89_write8_set(rtwdev, R_AX_BT_COEX_CFG_2 + 1, B_AX_GNT_BT_POLARITY >> 8);
rtw89_write8_set(rtwdev, R_AX_CSR_MODE, B_AX_STATIS_BT_EN | B_AX_WL_ACT_MSK);
rtw89_write8_set(rtwdev, R_AX_CSR_MODE + 2, B_AX_BT_CNT_RST >> 16);
- if (rtwdev->chip->chip_id != RTL8851B)
+ if (chip_id != RTL8851B && chip_id != RTL8852BT)
rtw89_write8_clr(rtwdev, R_AX_TRXPTCL_RESP_0 + 3, B_AX_RSP_CHK_BTCCA >> 24);
val16 = rtw89_read16(rtwdev, R_AX_CCA_CFG_0);
@@ -5705,10 +5797,9 @@ bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
const struct rtw89_chip_info *chip = rtwdev->chip;
u8 val = 0;
- if (chip->chip_id == RTL8852C)
+ if (chip->chip_id == RTL8852C || chip->chip_id == RTL8922A)
return false;
- else if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B ||
- chip->chip_id == RTL8851B)
+ else if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
val = rtw89_read8_mask(rtwdev, R_AX_SYS_SDIO_CTRL + 3,
B_AX_LTE_MUX_CTRL_PATH >> 24);
@@ -6269,9 +6360,30 @@ int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev,
return ret;
}
+int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable)
+{
+ struct rtw89_mac_h2c_info h2c_info = {};
+ struct rtw89_mac_c2h_info c2h_info = {};
+ u32 ret;
+
+ h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL;
+ h2c_info.content_len = sizeof(h2c_info.u.hdr);
+ h2c_info.u.hdr.w0 = u32_encode_bits(wow_enable, RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN);
+
+ ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
+ if (ret)
+ return ret;
+
+ if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK)
+ ret = -EINVAL;
+
+ return ret;
+}
+
static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow)
{
const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
int ret;
if (enable_wow) {
@@ -6282,12 +6394,19 @@ static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow)
}
rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
+ rtw89_mac_cpu_io_rx(rtwdev, enable_wow);
rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE);
rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0);
rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0);
rtw89_write32(rtwdev, R_AX_TF_FWD, 0);
rtw89_write32(rtwdev, R_AX_HW_RPT_FWD, 0);
+
+ if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
+ rtw89_write8(rtwdev, R_BE_DBG_WOW_READY, WOWLAN_NOT_READY);
+ else
+ rtw89_write32_set(rtwdev, R_AX_DBG_WOW,
+ B_AX_DBG_WOW_CPU_IO_RX_EN);
} else {
ret = rtw89_mac_resize_ple_rx_quota(rtwdev, false);
if (ret) {
@@ -6295,6 +6414,7 @@ static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow)
return ret;
}
+ rtw89_mac_cpu_io_rx(rtwdev, enable_wow);
rtw89_write32_clr(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP);
rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD);
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
index 6fb457153a11..d5895516b3ed 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.h
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -419,6 +419,13 @@ enum rtw89_mac_c2h_mrc_func {
NUM_OF_RTW89_MAC_C2H_FUNC_MRC,
};
+enum rtw89_mac_c2h_wow_func {
+ RTW89_MAC_C2H_FUNC_AOAC_REPORT,
+ RTW89_MAC_C2H_FUNC_READ_WOW_CAM,
+
+ NUM_OF_RTW89_MAC_C2H_FUNC_WOW,
+};
+
enum rtw89_mac_c2h_class {
RTW89_MAC_C2H_CLASS_INFO = 0x0,
RTW89_MAC_C2H_CLASS_OFLD = 0x1,
@@ -459,6 +466,16 @@ enum rtw89_mac_mrc_status {
RTW89_MAC_MRC_START_SCH_OK = 0,
RTW89_MAC_MRC_STOP_SCH_OK = 1,
RTW89_MAC_MRC_DEL_SCH_OK = 2,
+ RTW89_MAC_MRC_EMPTY_SCH_FAIL = 16,
+ RTW89_MAC_MRC_ROLE_NOT_EXIST_FAIL = 17,
+ RTW89_MAC_MRC_DATA_NOT_FOUND_FAIL = 18,
+ RTW89_MAC_MRC_GET_NEXT_SLOT_FAIL = 19,
+ RTW89_MAC_MRC_ALT_ROLE_FAIL = 20,
+ RTW89_MAC_MRC_ADD_PSTIMER_FAIL = 21,
+ RTW89_MAC_MRC_MALLOC_FAIL = 22,
+ RTW89_MAC_MRC_SWITCH_CH_FAIL = 23,
+ RTW89_MAC_MRC_TXNULL0_FAIL = 24,
+ RTW89_MAC_MRC_PORT_FUNC_EN_FAIL = 25,
};
struct rtw89_mac_ax_coex {
@@ -1439,5 +1456,6 @@ int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mod
int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev,
enum rtw89_mac_dle_rsvd_qt_type type,
struct rtw89_mac_dle_rsvd_qt_cfg *cfg);
+int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
index 31d1ffb16e83..1508693032cb 100644
--- a/drivers/net/wireless/realtek/rtw89/mac80211.c
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -66,7 +66,7 @@ static int rtw89_ops_start(struct ieee80211_hw *hw)
return ret;
}
-static void rtw89_ops_stop(struct ieee80211_hw *hw)
+static void rtw89_ops_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rtw89_dev *rtwdev = hw->priv;
@@ -318,7 +318,7 @@ static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
u8 sifs;
slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
- sifs = chan->band_type == RTW89_BAND_5G ? 16 : 10;
+ sifs = chan->band_type == RTW89_BAND_2G ? 10 : 16;
return aifsn * slot_time + sifs;
}
@@ -397,15 +397,14 @@ static void rtw89_conf_tx(struct rtw89_dev *rtwdev,
}
static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ieee80211_sta *sta;
if (vif->type != NL80211_IFTYPE_STATION)
return;
- sta = ieee80211_find_sta(vif, conf->bssid);
+ sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
if (!sta) {
rtw89_err(rtwdev, "can't find sta to set sta_assoc state\n");
return;
@@ -416,10 +415,8 @@ static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev,
rtw89_core_sta_assoc(rtwdev, vif, sta);
}
-static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *conf,
- u64 changed)
+static void rtw89_ops_vif_cfg_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u64 changed)
{
struct rtw89_dev *rtwdev = hw->priv;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
@@ -429,7 +426,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc) {
- rtw89_station_mode_sta_assoc(rtwdev, vif, conf);
+ rtw89_station_mode_sta_assoc(rtwdev, vif);
rtw89_phy_set_bss_color(rtwdev, vif);
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif);
rtw89_mac_port_update(rtwdev, rtwvif);
@@ -445,6 +442,26 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
}
}
+ if (changed & BSS_CHANGED_PS)
+ rtw89_recalc_lps(rtwdev);
+
+ if (changed & BSS_CHANGED_ARP_FILTER)
+ rtwvif->ip_addr = vif->cfg.arp_addr_list[0];
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
+ u64 changed)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+
if (changed & BSS_CHANGED_BSSID) {
ether_addr_copy(rtwvif->bssid, conf->bssid);
rtw89_cam_bssid_changed(rtwdev, rtwvif);
@@ -470,8 +487,8 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_CQM)
rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
- if (changed & BSS_CHANGED_PS)
- rtw89_recalc_lps(rtwdev);
+ if (changed & BSS_CHANGED_TPE)
+ rtw89_reg_6ghz_recalc(rtwdev, rtwvif, true);
mutex_unlock(&rtwdev->mutex);
}
@@ -1106,6 +1123,28 @@ static void rtw89_ops_set_wakeup(struct ieee80211_hw *hw, bool enabled)
device_set_wakeup_enable(rtwdev->dev, enabled);
}
+
+static void rtw89_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
+
+ if (data->kek_len > sizeof(gtk_info->kek) ||
+ data->kck_len > sizeof(gtk_info->kck)) {
+ rtw89_warn(rtwdev, "kek or kck length over fw limit\n");
+ return;
+ }
+
+ mutex_lock(&rtwdev->mutex);
+
+ memcpy(gtk_info->kek, data->kek, data->kek_len);
+ memcpy(gtk_info->kck, data->kck, data->kck_len);
+
+ mutex_unlock(&rtwdev->mutex);
+}
#endif
const struct ieee80211_ops rtw89_ops = {
@@ -1118,7 +1157,8 @@ const struct ieee80211_ops rtw89_ops = {
.change_interface = rtw89_ops_change_interface,
.remove_interface = rtw89_ops_remove_interface,
.configure_filter = rtw89_ops_configure_filter,
- .bss_info_changed = rtw89_ops_bss_info_changed,
+ .vif_cfg_changed = rtw89_ops_vif_cfg_changed,
+ .link_info_changed = rtw89_ops_link_info_changed,
.start_ap = rtw89_ops_start_ap,
.stop_ap = rtw89_ops_stop_ap,
.set_tim = rtw89_ops_set_tim,
@@ -1151,6 +1191,7 @@ const struct ieee80211_ops rtw89_ops = {
.suspend = rtw89_ops_suspend,
.resume = rtw89_ops_resume,
.set_wakeup = rtw89_ops_set_wakeup,
+ .set_rekey_data = rtw89_set_rekey_data,
#endif
};
EXPORT_SYMBOL(rtw89_ops);
diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c
index f16467377eab..f212b67771d5 100644
--- a/drivers/net/wireless/realtek/rtw89/mac_be.c
+++ b/drivers/net/wireless/realtek/rtw89/mac_be.c
@@ -1751,6 +1751,7 @@ static int set_host_rpr_be(struct rtw89_dev *rtwdev)
static int trx_init_be(struct rtw89_dev *rtwdev)
{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode;
int ret;
@@ -1794,6 +1795,10 @@ static int trx_init_be(struct rtw89_dev *rtwdev)
return ret;
}
+ if (chip_id == RTL8922A)
+ rtw89_write32_clr(rtwdev, R_BE_RSP_CHK_SIG,
+ B_BE_RSP_STATIC_RTS_CHK_SERV_BW_EN);
+
return 0;
}
@@ -2307,26 +2312,6 @@ static void rtw89_mac_dump_qta_lost_be(struct rtw89_dev *rtwdev)
dump_err_status_dispatcher_be(rtwdev);
}
-static int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable)
-{
- struct rtw89_mac_h2c_info h2c_info = {};
- struct rtw89_mac_c2h_info c2h_info = {};
- u32 ret;
-
- h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL;
- h2c_info.content_len = sizeof(h2c_info.u.hdr);
- h2c_info.u.hdr.w0 = u32_encode_bits(wow_enable, RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN);
-
- ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
- if (ret)
- return ret;
-
- if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK)
- ret = -EINVAL;
-
- return ret;
-}
-
static int rtw89_wow_config_mac_be(struct rtw89_dev *rtwdev, bool enable_wow)
{
if (enable_wow) {
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
index 19001130ad94..02afeb3acce4 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.c
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -19,6 +19,31 @@ MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
+static int rtw89_pci_get_phy_offset_by_link_speed(struct rtw89_dev *rtwdev,
+ u32 *phy_offset)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u32 val;
+ int ret;
+
+ ret = pci_read_config_dword(pdev, RTW89_PCIE_L1_STS_V1, &val);
+ if (ret)
+ return ret;
+
+ val = u32_get_bits(val, RTW89_BCFG_LINK_SPEED_MASK);
+ if (val == RTW89_PCIE_GEN1_SPEED) {
+ *phy_offset = R_RAC_DIRECT_OFFSET_G1;
+ } else if (val == RTW89_PCIE_GEN2_SPEED) {
+ *phy_offset = R_RAC_DIRECT_OFFSET_G2;
+ } else {
+ rtw89_warn(rtwdev, "Unknown PCI link speed %d\n", val);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
static int rtw89_pci_rst_bdram_ax(struct rtw89_dev *rtwdev)
{
u32 val;
@@ -158,14 +183,17 @@ static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
struct sk_buff *skb)
{
- struct rtw89_pci_rxbd_info *rxbd_info;
struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ struct rtw89_pci_rxbd_info *rxbd_info;
+ __le32 info;
rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
- rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS);
- rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
- rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
- rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
+ info = rxbd_info->dword;
+
+ rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS);
+ rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS);
+ rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE);
+ rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG);
}
static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev,
@@ -1089,7 +1117,8 @@ u32 __rtw89_pci_check_and_reclaim_tx_resource_noio(struct rtw89_dev *rtwdev,
spin_lock_bh(&rtwpci->trx_lock);
cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
- cnt = min(cnt, wd_ring->curr_num);
+ if (txch != RTW89_TXCH_CH12)
+ cnt = min(cnt, wd_ring->curr_num);
spin_unlock_bh(&rtwpci->trx_lock);
return cnt;
@@ -1272,10 +1301,12 @@ u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev,
dma_addr_t dma, u8 *add_info_nr)
{
struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr;
+ __le16 option;
txaddr_info->length = cpu_to_le16(total_len);
- txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
- RTW89_PCI_ADDR_NUM(1));
+ option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1));
+ option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK);
+ txaddr_info->option = option;
txaddr_info->dma = cpu_to_le32(dma);
*add_info_nr = 1;
@@ -1302,6 +1333,8 @@ u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev,
length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) |
FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) |
FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0);
+ length_option |= u16_encode_bits(upper_32_bits(dma),
+ B_PCIADDR_HIGH_SEL_V1_MASK);
txaddr_info->length_opt = cpu_to_le16(length_option);
txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma));
txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma));
@@ -1392,6 +1425,7 @@ static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
struct sk_buff *skb = tx_req->skb;
struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
dma_addr_t dma;
+ __le16 opt;
txdesc = skb_push(skb, txdesc_size);
memset(txdesc, 0, txdesc_size);
@@ -1404,7 +1438,9 @@ static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
}
tx_data->dma = dma;
- txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
+ opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
+ opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI);
+ txbd->opt = opt;
txbd->length = cpu_to_le16(skb->len);
txbd->dma = cpu_to_le32(tx_data->dma);
skb_queue_tail(&rtwpci->h2c_queue, skb);
@@ -1420,6 +1456,7 @@ static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
struct rtw89_core_tx_request *tx_req)
{
struct rtw89_pci_tx_wd *txwd;
+ __le16 opt;
int ret;
/* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
@@ -1444,7 +1481,9 @@ static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
list_add_tail(&txwd->list, &tx_ring->busy_pages);
- txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
+ opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS);
+ opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI);
+ txbd->opt = opt;
txbd->length = cpu_to_le16(txwd->len);
txbd->dma = cpu_to_le32(txwd->paddr);
@@ -1543,6 +1582,25 @@ const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = {
};
EXPORT_SYMBOL(rtw89_bd_ram_table_single);
+static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_pci_info *info = rtwdev->pci_info;
+ u32 addr = info->wp_sel_addr;
+ u32 val;
+ int i;
+
+ if (!info->wp_sel_addr)
+ return;
+
+ for (i = 0; i < 16; i += 4) {
+ val = u32_encode_bits(i + 0, MASKBYTE0) |
+ u32_encode_bits(i + 1, MASKBYTE1) |
+ u32_encode_bits(i + 2, MASKBYTE2) |
+ u32_encode_bits(i + 3, MASKBYTE3);
+ rtw89_write32(rtwdev, addr + i, val);
+ }
+}
+
static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
{
struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
@@ -1581,6 +1639,7 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
rtw89_write32(rtwdev, addr_bdram, val32);
}
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
+ rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
}
for (i = 0; i < RTW89_RXCH_NUM; i++) {
@@ -1600,10 +1659,13 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
rtw89_write16(rtwdev, addr_num, bd_ring->len);
rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
+ rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma));
if (info->rx_ring_eq_is_full)
rtw89_write16(rtwdev, addr_idx, bd_ring->wp);
}
+
+ rtw89_pci_init_wp_16sel(rtwdev);
}
static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
@@ -2013,7 +2075,7 @@ static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr,
if (!ret)
return 0;
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
ret = rtw89_dbi_write8(rtwdev, addr, data);
return ret;
@@ -2031,7 +2093,7 @@ static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr,
if (!ret)
return 0;
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B)
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))
ret = rtw89_dbi_read8(rtwdev, addr, value);
return ret;
@@ -2111,10 +2173,9 @@ __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate
static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
- if (chip_id != RTL8852B && chip_id != RTL8851B)
+ if (!rtw89_is_rtl885xb(rtwdev))
return 0;
ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK,
@@ -2124,14 +2185,13 @@ static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev)
static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
enum rtw89_pcie_phy phy_rate;
u16 val16, mgn_set, div_set, tar;
u8 val8, bdr_ori;
bool l1_flag = false;
int ret = 0;
- if (chip_id != RTL8852B && chip_id != RTL8851B)
+ if (!rtw89_is_rtl885xb(rtwdev))
return 0;
ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
@@ -2298,6 +2358,68 @@ static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
return 0;
}
+static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev)
+{
+ u16 g1_oobs, g2_oobs;
+ u32 backup_aspm;
+ u32 phy_offset;
+ u16 oobs_val;
+ int ret;
+
+ if (rtwdev->chip->chip_id != RTL8852C)
+ return;
+
+ g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 +
+ RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
+ g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 +
+ RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL);
+ if (g1_oobs && g2_oobs)
+ return;
+
+ backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK);
+
+ ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset);
+ if (ret)
+ goto out;
+
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA0D * RAC_MULT, BAC_RX_TEST_EN);
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL);
+ rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL);
+
+ oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT,
+ OOBS_LEVEL_MASK);
+
+ rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT,
+ OOBS_SEN_MASK, oobs_val);
+ rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT,
+ BAC_OOBS_SEL);
+
+ rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT,
+ OOBS_SEN_MASK, oobs_val);
+ rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT,
+ BAC_OOBS_SEL);
+
+out:
+ rtw89_write32(rtwdev, R_AX_PCIE_MIX_CFG_V1, backup_aspm);
+}
+
+static void rtw89_pci_ber(struct rtw89_dev *rtwdev)
+{
+ u32 phy_offset;
+
+ if (!test_bit(RTW89_QUIRK_PCI_BER, rtwdev->quirks))
+ return;
+
+ phy_offset = R_RAC_DIRECT_OFFSET_G1;
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G1_VAL);
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
+
+ phy_offset = R_RAC_DIRECT_OFFSET_G2;
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA1E * RAC_MULT, RAC_ANA1E_G2_VAL);
+ rtw89_write16(rtwdev, phy_offset + RAC_ANA2E * RAC_MULT, RAC_ANA2E_VAL);
+}
+
static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
{
if (rtwdev->chip->chip_id != RTL8852A)
@@ -2310,7 +2432,7 @@ static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B)
+ if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev))
return;
rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
@@ -2340,7 +2462,7 @@ static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B)
+ if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev))
return;
rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
@@ -2350,7 +2472,7 @@ static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
{
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
@@ -2363,9 +2485,7 @@ static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev)
{
- enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
-
- if (chip_id != RTL8852B && chip_id != RTL8851B)
+ if (!rtw89_is_rtl885xb(rtwdev))
return 0;
return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK,
@@ -2627,7 +2747,7 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
B_AX_PCIE_RX_APPLEN_MASK, 0);
}
- if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst);
rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst);
} else if (chip_id == RTL8852C) {
@@ -2635,7 +2755,7 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst);
}
- if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
if (tag_mode == MAC_AX_TAG_SGL) {
val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) &
~B_AX_LATENCY_CONTROL;
@@ -2650,7 +2770,7 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev)
rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask,
info->multi_tag_num);
- if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
wd_dma_idle_intvl);
rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
@@ -2695,6 +2815,7 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev)
const struct rtw89_pci_info *info = rtwdev->pci_info;
int ret;
+ rtw89_pci_ber(rtwdev);
rtw89_pci_rxdma_prefth(rtwdev);
rtw89_pci_l1off_pwroff(rtwdev);
rtw89_pci_deglitch_setting(rtwdev);
@@ -2862,7 +2983,7 @@ static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev)
/* ltr sw trigger */
rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
}
- if (chip_id == RTL8852A || chip_id == RTL8852B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
/* ADDR info 8-byte mode */
rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
B_AX_HOST_ADDR_INFO_8B_SEL);
@@ -2905,6 +3026,27 @@ static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
pci_disable_device(pdev);
}
+static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!rtwpci->enable_dac)
+ return;
+
+ switch (chip->chip_id) {
+ case RTL8852A:
+ case RTL8852B:
+ case RTL8851B:
+ case RTL8852BT:
+ break;
+ default:
+ return;
+ }
+
+ rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS);
+}
+
static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
struct pci_dev *pdev)
{
@@ -2919,16 +3061,17 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
goto err;
}
- ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret) {
- rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n");
- goto err_release_regions;
- }
-
- ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (ret) {
- rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
- goto err_release_regions;
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
+ if (!ret) {
+ rtwpci->enable_dac = true;
+ rtw89_pci_cfg_dac(rtwdev);
+ } else {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ rtw89_err(rtwdev,
+ "failed to set dma and consistent mask to 32/36-bit\n");
+ goto err_release_regions;
+ }
}
resource_len = pci_resource_len(pdev, bar_id);
@@ -3079,6 +3222,7 @@ static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
memset(rx_bd, 0, sizeof(*rx_bd));
rx_bd->buf_size = cpu_to_le16(buf_sz);
rx_bd->dma = cpu_to_le32(dma);
+ rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI);
rx_info->dma = dma;
return 0;
@@ -3547,7 +3691,7 @@ static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
unsigned long flags = 0;
int ret;
- flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI;
+ flags |= PCI_IRQ_INTX | PCI_IRQ_MSI;
ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
if (ret < 0) {
rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
@@ -3671,7 +3815,7 @@ static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable)
if (ret)
rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
if (enable)
ret = rtw89_pci_config_byte_set(rtwdev,
RTW89_PCIE_L1_CTRL,
@@ -3723,7 +3867,7 @@ static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable)
if (ret)
rtw89_warn(rtwdev, "failed to read ASPM Delay\n");
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
if (enable)
ret = rtw89_pci_config_byte_set(rtwdev,
RTW89_PCIE_L1_CTRL,
@@ -3822,7 +3966,7 @@ static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable)
enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
int ret;
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
if (enable)
ret = rtw89_pci_config_byte_set(rtwdev,
RTW89_PCIE_TIMER_CTRL,
@@ -4019,7 +4163,7 @@ static int __maybe_unused rtw89_pci_suspend(struct device *dev)
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
@@ -4053,7 +4197,7 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev)
rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
- if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) {
+ if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) {
rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
@@ -4065,6 +4209,8 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev)
B_AX_SEL_REQ_ENTR_L1);
}
rtw89_pci_l2_hci_ldo(rtwdev);
+ rtw89_pci_disable_eq(rtwdev);
+ rtw89_pci_cfg_dac(rtwdev);
rtw89_pci_filter_out(rtwdev);
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);
@@ -4171,6 +4317,8 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
rtwdev->hci.rpwm_addr = pci_info->rpwm_addr;
rtwdev->hci.cpwm_addr = pci_info->cpwm_addr;
+ rtw89_check_quirks(rtwdev, info->quirks);
+
SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
ret = rtw89_core_init(rtwdev);
@@ -4197,11 +4345,16 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_clear_resource;
}
+ rtw89_pci_disable_eq(rtwdev);
rtw89_pci_filter_out(rtwdev);
rtw89_pci_link_cfg(rtwdev);
rtw89_pci_l1ss_cfg(rtwdev);
- rtw89_core_napi_init(rtwdev);
+ ret = rtw89_core_napi_init(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to init napi\n");
+ goto err_clear_resource;
+ }
ret = rtw89_pci_request_irq(rtwdev, pdev);
if (ret) {
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
index a63b6b7c9bfa..48c3ab735db2 100644
--- a/drivers/net/wireless/realtek/rtw89/pci.h
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -12,11 +12,18 @@
#define MDIO_PG0_G2 2
#define MDIO_PG1_G2 3
#define RAC_CTRL_PPR 0x00
+#define RAC_ANA03 0x03
+#define OOBS_SEN_MASK GENMASK(5, 1)
+#define RAC_ANA09 0x09
+#define BAC_OOBS_SEL BIT(4)
#define RAC_ANA0A 0x0A
#define B_BAC_EQ_SEL BIT(5)
#define RAC_ANA0C 0x0C
#define B_PCIE_BIT_PSAVE BIT(15)
+#define RAC_ANA0D 0x0D
+#define BAC_RX_TEST_EN BIT(6)
#define RAC_ANA10 0x10
+#define ADDR_SEL_PINOUT_DIS_VAL 0x3C4
#define B_PCIE_BIT_PINOUT_DIS BIT(3)
#define RAC_REG_REV2 0x1B
#define BAC_CMU_EN_DLY_MASK GENMASK(15, 12)
@@ -26,11 +33,17 @@
#define RAC_REG_FLD_0 0x1D
#define BAC_AUTOK_N_MASK GENMASK(3, 2)
#define PCIE_AUTOK_4 0x3
+#define RAC_ANA1E 0x1E
+#define RAC_ANA1E_G1_VAL 0x66EA
+#define RAC_ANA1E_G2_VAL 0x6EEA
#define RAC_ANA1F 0x1F
+#define OOBS_LEVEL_MASK GENMASK(12, 8)
#define RAC_ANA24 0x24
#define B_AX_DEGLITCH GENMASK(11, 8)
#define RAC_ANA26 0x26
#define B_AX_RXEN GENMASK(15, 14)
+#define RAC_ANA2E 0x2E
+#define RAC_ANA2E_VAL 0xFFFE
#define RAC_CTRL_PPR_V1 0x30
#define B_AX_CLK_CALIB_EN BIT(12)
#define B_AX_CALIB_EN BIT(13)
@@ -711,6 +724,11 @@
#define B_AX_CH11_BUSY BIT(1)
#define B_AX_CH10_BUSY BIT(0)
+#define R_AX_WP_ADDR_H_SEL0_3 0x1334
+#define R_AX_WP_ADDR_H_SEL4_7 0x1338
+#define R_AX_WP_ADDR_H_SEL8_11 0x133C
+#define R_AX_WP_ADDR_H_SEL12_15 0x1340
+
#define R_BE_HAXI_DMA_STOP1 0xB010
#define B_BE_STOP_WPDMA BIT(31)
#define B_BE_STOP_CH14 BIT(14)
@@ -810,6 +828,11 @@
#define R_BE_RPQ0_RXBD_DESA_L_V1 0xB308
#define R_BE_RPQ0_RXBD_DESA_H_V1 0xB30C
+#define R_BE_WP_ADDR_H_SEL0_3_V1 0xB420
+#define R_BE_WP_ADDR_H_SEL4_7_V1 0xB424
+#define R_BE_WP_ADDR_H_SEL8_11_V1 0xB428
+#define R_BE_WP_ADDR_H_SEL12_15_V1 0xB42C
+
/* Configure */
#define R_AX_PCIE_INIT_CFG2 0x1004
#define B_AX_WD_ITVL_IDLE GENMASK(27, 24)
@@ -1042,6 +1065,7 @@
#define RTW89_PCIE_TIMER_CTRL 0x0718
#define RTW89_PCIE_BIT_L1SUB BIT(5)
#define RTW89_PCIE_L1_CTRL 0x0719
+#define RTW89_PCIE_BIT_EN_64BITS BIT(5)
#define RTW89_PCIE_BIT_CLK BIT(4)
#define RTW89_PCIE_BIT_L1 BIT(3)
#define RTW89_PCIE_CLK_CTRL 0x0725
@@ -1291,6 +1315,7 @@ struct rtw89_pci_info {
u32 rpwm_addr;
u32 cpwm_addr;
u32 mit_addr;
+ u32 wp_sel_addr;
u32 tx_dma_ch_mask;
const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power;
const struct rtw89_pci_ch_dma_addr_set *dma_addr_set;
@@ -1317,11 +1342,11 @@ struct rtw89_pci_rx_info {
u32 fs:1, ls:1, tag:13, len:14;
};
-#define RTW89_PCI_TXBD_OPTION_LS BIT(14)
-
struct rtw89_pci_tx_bd_32 {
__le16 length;
- __le16 option;
+ __le16 opt;
+#define RTW89_PCI_TXBD_OPT_LS BIT(14)
+#define RTW89_PCI_TXBD_OPT_DMA_HI GENMASK(13, 6)
__le32 dma;
} __packed;
@@ -1336,7 +1361,7 @@ struct rtw89_pci_tx_wp_info {
#define RTW89_PCI_ADDR_MSDU_LS BIT(15)
#define RTW89_PCI_ADDR_LS BIT(14)
-#define RTW89_PCI_ADDR_HIGH(a) (((a) << 6) & GENMASK(13, 6))
+#define RTW89_PCI_ADDR_HIGH_MASK GENMASK(13, 6)
#define RTW89_PCI_ADDR_NUM(x) ((x) & GENMASK(5, 0))
struct rtw89_pci_tx_addr_info_32 {
@@ -1373,7 +1398,8 @@ struct rtw89_pci_rpp_fmt {
struct rtw89_pci_rx_bd_32 {
__le16 buf_size;
- __le16 rsvd;
+ __le16 opt;
+#define RTW89_PCI_RXBD_OPT_DMA_HI GENMASK(13, 6)
__le32 dma;
} __packed;
@@ -1462,6 +1488,7 @@ struct rtw89_pci {
bool running;
bool low_power;
bool under_recovery;
+ bool enable_dac;
struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM];
struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM];
struct sk_buff_head h2c_queue;
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
index 12da63d64307..ad11d1414874 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.c
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -2,6 +2,7 @@
/* Copyright(c) 2019-2020 Realtek Corporation
*/
+#include "chan.h"
#include "coex.h"
#include "debug.h"
#include "fw.h"
@@ -122,6 +123,7 @@ static u64 get_eht_ra_mask(struct ieee80211_sta *sta)
struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap;
struct ieee80211_eht_mcs_nss_supp_20mhz_only *mcs_nss_20mhz;
struct ieee80211_eht_mcs_nss_supp_bw *mcs_nss;
+ u8 *he_phy_cap = sta->deflink.he_cap.he_cap_elem.phy_cap_info;
switch (sta->deflink.bandwidth) {
case IEEE80211_STA_RX_BW_320:
@@ -132,15 +134,19 @@ static u64 get_eht_ra_mask(struct ieee80211_sta *sta)
mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._160;
/* MCS 9, 11, 13 */
return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
+ case IEEE80211_STA_RX_BW_20:
+ if (!(he_phy_cap[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK_ALL)) {
+ mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz;
+ /* MCS 7, 9, 11, 13 */
+ return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4);
+ }
+ fallthrough;
case IEEE80211_STA_RX_BW_80:
default:
mcs_nss = &eht_cap->eht_mcs_nss_supp.bw._80;
/* MCS 9, 11, 13 */
return get_eht_mcs_ra_mask(mcs_nss->rx_tx_max_nss, 9, 3);
- case IEEE80211_STA_RX_BW_20:
- mcs_nss_20mhz = &eht_cap->eht_mcs_nss_supp.only_20mhz;
- /* MCS 7, 9, 11, 13 */
- return get_eht_mcs_ra_mask(mcs_nss_20mhz->rx_tx_max_nss, 7, 4);
}
}
@@ -1671,7 +1677,7 @@ static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev)
rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000);
if (chip->chip_id != RTL8851B)
rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000);
- if (chip->chip_id == RTL8852B)
+ if (chip->chip_id == RTL8852B || chip->chip_id == RTL8852BT)
rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2);
/* check 0x8080 */
@@ -1842,6 +1848,36 @@ static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf)
return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac);
}
+static s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63);
+}
+
+static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm)
+{
+ const u8 tssi_deviation_point = 0;
+ const u8 tssi_max_deviation = 2;
+
+ if (dbm <= tssi_deviation_point)
+ dbm -= tssi_max_deviation;
+
+ return dbm;
+}
+
+static s8 rtw89_phy_get_tpe_constraint(struct rtw89_dev *rtwdev, u8 band)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_reg_6ghz_tpe *tpe = &regulatory->reg_6ghz_tpe;
+ s8 cstr = S8_MAX;
+
+ if (band == RTW89_BAND_6G && tpe->valid)
+ cstr = rtw89_phy_txpwr_dbm_without_tolerance(tpe->constraint);
+
+ return rtw89_phy_txpwr_dbm_to_mac(rtwdev, cstr);
+}
+
s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw,
const struct rtw89_rate_desc *rate_desc)
{
@@ -1916,6 +1952,7 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
s8 lmt = 0, sar;
+ s8 cstr;
switch (band) {
case RTW89_BAND_2G:
@@ -1948,8 +1985,9 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band,
lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt);
sar = rtw89_query_sar(rtwdev, freq);
+ cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
- return min(lmt, sar);
+ return min3(lmt, sar, cstr);
}
EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit);
@@ -2173,6 +2211,7 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
u8 regd = rtw89_regd_get(rtwdev, band);
u8 reg6 = regulatory->reg_6ghz_power;
s8 lmt_ru = 0, sar;
+ s8 cstr;
switch (band) {
case RTW89_BAND_2G:
@@ -2205,8 +2244,9 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band,
lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru);
sar = rtw89_query_sar(rtwdev, freq);
+ cstr = rtw89_phy_get_tpe_constraint(rtwdev, band);
- return min(lmt_ru, sar);
+ return min3(lmt_ru, sar, cstr);
}
static void
@@ -5964,6 +6004,74 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif
vif->cfg.aid, phy_idx);
}
+static bool rfk_chan_validate_desc(const struct rtw89_rfk_chan_desc *desc)
+{
+ return desc->ch != 0;
+}
+
+static bool rfk_chan_is_equivalent(const struct rtw89_rfk_chan_desc *desc,
+ const struct rtw89_chan *chan)
+{
+ if (!rfk_chan_validate_desc(desc))
+ return false;
+
+ if (desc->ch != chan->channel)
+ return false;
+
+ if (desc->has_band && desc->band != chan->band_type)
+ return false;
+
+ if (desc->has_bw && desc->bw != chan->band_width)
+ return false;
+
+ return true;
+}
+
+struct rfk_chan_iter_data {
+ const struct rtw89_rfk_chan_desc desc;
+ unsigned int found;
+};
+
+static int rfk_chan_iter_search(const struct rtw89_chan *chan, void *data)
+{
+ struct rfk_chan_iter_data *iter_data = data;
+
+ if (rfk_chan_is_equivalent(&iter_data->desc, chan))
+ iter_data->found++;
+
+ return 0;
+}
+
+u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev,
+ const struct rtw89_rfk_chan_desc *desc, u8 desc_nr,
+ const struct rtw89_chan *target_chan)
+{
+ int sel = -1;
+ u8 i;
+
+ for (i = 0; i < desc_nr; i++) {
+ struct rfk_chan_iter_data iter_data = {
+ .desc = desc[i],
+ };
+
+ if (rfk_chan_is_equivalent(&desc[i], target_chan))
+ return i;
+
+ rtw89_iterate_entity_chan(rtwdev, rfk_chan_iter_search, &iter_data);
+ if (!iter_data.found && sel == -1)
+ sel = i;
+ }
+
+ if (sel == -1) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "no idle rfk entry; force replace the first\n");
+ sel = 0;
+ }
+
+ return sel;
+}
+EXPORT_SYMBOL(rtw89_rfk_chan_lookup);
+
static void
_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
{
@@ -6398,10 +6506,8 @@ enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
return RF_D;
case MLO_0_PLUS_2_1RF:
case MLO_2_PLUS_0_1RF:
- if (phy_idx == RTW89_PHY_0)
- return RF_AB;
- else
- return RF_AB;
+ /* for both PHY 0/1 */
+ return RF_AB;
case MLO_0_PLUS_2_2RF:
case MLO_2_PLUS_0_2RF:
case MLO_2_PLUS_2_2RF:
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
index 082231ebbee5..d8df553b9cb0 100644
--- a/drivers/net/wireless/realtek/rtw89/phy.h
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -129,6 +129,7 @@
#define EDCCA_HL_DIFF_NORMAL 8
#define RSSI_UNIT_CONVER 110
#define EDCCA_UNIT_CONVER 128
+#define EDCCA_PWROFST_DEFAULT 18
enum rtw89_phy_c2h_ra_func {
RTW89_PHY_C2H_FUNC_STS_RPT,
@@ -714,6 +715,19 @@ enum rtw89_phy_gain_band_be rtw89_subband_to_gain_band_be(enum rtw89_subband sub
}
}
+struct rtw89_rfk_chan_desc {
+ /* desc is valid iff ch is non-zero */
+ u8 ch;
+
+ /* To avoid us from extending old chip code every time, each new
+ * field must be defined along with a bool flag in positivte way.
+ */
+ bool has_band;
+ u8 band;
+ bool has_bw;
+ u8 bw;
+};
+
enum rtw89_rfk_flag {
RTW89_RFK_F_WRF = 0,
RTW89_RFK_F_WM = 1,
@@ -949,5 +963,8 @@ enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev,
enum rtw89_phy_idx phy_idx);
+u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev,
+ const struct rtw89_rfk_chan_desc *desc, u8 desc_nr,
+ const struct rtw89_chan *target_chan);
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/phy_be.c b/drivers/net/wireless/realtek/rtw89/phy_be.c
index be0148f2b96f..72eda9bbd3ae 100644
--- a/drivers/net/wireless/realtek/rtw89/phy_be.c
+++ b/drivers/net/wireless/realtek/rtw89/phy_be.c
@@ -381,6 +381,23 @@ static void rtw89_phy_bb_wrap_ftm_init(struct rtw89_dev *rtwdev,
rtw89_write32_mask(rtwdev, addr, 0x7, 0);
}
+static void rtw89_phy_bb_wrap_ul_pwr(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u8 mac_idx;
+ u32 addr;
+
+ if (chip_id != RTL8922A)
+ return;
+
+ for (mac_idx = 0; mac_idx < RTW89_MAC_NUM; mac_idx++) {
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_RSSI_TARGET_LMT, mac_idx);
+ rtw89_write32(rtwdev, addr, 0x0201FE00);
+ addr = rtw89_mac_reg_by_idx(rtwdev, R_BE_PWR_TH, mac_idx);
+ rtw89_write32(rtwdev, addr, 0x00FFEC7E);
+ }
+}
+
static void rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev)
{
enum rtw89_mac_idx mac_idx = RTW89_MAC_0;
@@ -391,6 +408,7 @@ static void rtw89_phy_bb_wrap_init_be(struct rtw89_dev *rtwdev)
rtw89_phy_bb_wrap_force_cr_init(rtwdev, mac_idx);
rtw89_phy_bb_wrap_ftm_init(rtwdev, mac_idx);
rtw89_phy_bb_wrap_tpu_set_all(rtwdev, mac_idx);
+ rtw89_phy_bb_wrap_ul_pwr(rtwdev);
}
static void rtw89_phy_ch_info_init_be(struct rtw89_dev *rtwdev)
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
index 31290d8cb7f7..92074b73ebeb 100644
--- a/drivers/net/wireless/realtek/rtw89/ps.c
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -55,7 +55,8 @@ static void rtw89_ps_power_mode_change_with_hci(struct rtw89_dev *rtwdev,
static void rtw89_ps_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
{
- if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode))
+ if (rtwdev->chip->low_power_hci_modes & BIT(rtwdev->ps_mode) &&
+ !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags))
rtw89_ps_power_mode_change_with_hci(rtwdev, enter);
else
rtw89_mac_power_mode_change(rtwdev, enter);
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
index 72e448e91b6f..7df36f3bff0b 100644
--- a/drivers/net/wireless/realtek/rtw89/reg.h
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -235,6 +235,9 @@
#define R_AX_SPSANA_ON_CTRL1 0x0224
+#define R_AX_SPS_ANA_ON_CTRL2 0x0228
+#define RTL8852B_RFE_05_SPS_ANA 0x4A82
+
#define R_AX_WLAN_XTAL_SI_CTRL 0x0270
#define B_AX_WL_XTAL_SI_CMD_POLL BIT(31)
#define B_AX_BT_XTAL_SI_ERR_FLAG BIT(30)
@@ -308,6 +311,9 @@
#define B_AX_S1_LDO2PWRCUT_F BIT(23)
#define B_AX_S0_LDO_VSEL_F_MASK GENMASK(22, 21)
+#define R_AX_DBG_WOW 0x0504
+#define B_AX_DBG_WOW_CPU_IO_RX_EN BIT(8)
+
#define R_AX_SEC_CTRL 0x0C00
#define B_AX_SEC_IDMEM_SIZE_CONFIG_MASK GENMASK(17, 16)
@@ -1891,7 +1897,6 @@
B_AX_B0_IMR_ERR_USRCTL_NOINIT | \
B_AX_B0_IMR_ERR_CMDPSR_1STCMDERR | \
B_AX_B0_IMR_ERR_CMDPSR_CMDTYPE | \
- B_AX_B0_IMR_ERR_CMDPSR_FRZTO | \
B_AX_B0_IMR_ERR_CMDPSR_TBLSZ | \
B_AX_B0_IMR_ERR_MPDUINFO_RECFG | \
B_AX_B0_IMR_ERR_MPDUIF_DATAERR | \
@@ -3167,6 +3172,8 @@
#define R_AX_DLK_PROTECT_CTL_C1 0xEE02
#define B_AX_RX_DLK_CCA_TIME_MASK GENMASK(15, 8)
#define B_AX_RX_DLK_DATA_TIME_MASK GENMASK(7, 4)
+#define B_AX_RX_DLK_RST_EN BIT(1)
+#define B_AX_RX_DLK_INT_EN BIT(0)
#define R_AX_PLCP_HDR_FLTR 0xCE04
#define R_AX_PLCP_HDR_FLTR_C1 0xEE04
@@ -4311,6 +4318,8 @@
#define R_BE_WLCPU_PORT_PC 0x03FC
+#define R_BE_DBG_WOW 0x0504
+
#define R_BE_DCPU_PLATFORM_ENABLE 0x0888
#define B_BE_DCPU_SYM_DPLT_MEM_MUX_EN BIT(10)
#define B_BE_DCPU_WARM_EN BIT(9)
@@ -7497,6 +7506,9 @@
#define B_BE_PWR_BT_VAL GENMASK(8, 0)
#define B_BE_PWR_FORCE_COEX_ON GENMASK(29, 27)
+#define R_BE_PWR_TH 0x11A78
+#define R_BE_PWR_RSSI_TARGET_LMT 0x11A84
+
#define R_BE_PWR_OFST_SW 0x11AE8
#define B_BE_PWR_OFST_SW_DB GENMASK(27, 24)
@@ -7806,6 +7818,8 @@
#define B_UPD_P0_EN BIT(31)
#define R_EMLSR 0x0044
#define B_EMLSR_PARM GENMASK(27, 12)
+#define R_CHK_LPS_STAT 0x0058
+#define B_CHK_LPS_STAT BIT(0)
#define R_SPOOF_CG 0x00B4
#define B_SPOOF_CG_EN BIT(17)
#define R_CHINFO_SEG 0x00B4
@@ -7822,6 +7836,7 @@
#define B_ANAPAR_PW15_H2 GENMASK(27, 26)
#define R_ANAPAR 0x032C
#define B_ANAPAR_15 GENMASK(31, 16)
+#define B_ANAPAR_EN1 BIT(31)
#define B_ANAPAR_ADCCLK BIT(30)
#define B_ANAPAR_FLTRST BIT(22)
#define B_ANAPAR_CRXBB GENMASK(18, 16)
@@ -7863,10 +7878,12 @@
#define R_RXCCA_BE1 0x0520
#define B_RXCCA_BE1_DIS BIT(0)
#define R_UPD_CLK_ADC 0x0700
+#define B_UPD_GEN_ON BIT(27)
#define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
#define B_UPD_CLK_ADC_ON BIT(24)
#define B_ENABLE_CCK BIT(5)
#define R_RSTB_ASYNC 0x0704
+#define B_RSTB_ASYNC_BW80 GENMASK(9, 8)
#define B_RSTB_ASYNC_ALL BIT(1)
#define R_P0_ANT_SW 0x0728
#define B_P0_HW_ANTSW_DIS_BY_GNT_BT BIT(12)
@@ -7930,6 +7947,8 @@
#define B_MEASUREMENT_TRIG_MSK BIT(2)
#define B_CCX_TRIG_OPT_MSK BIT(1)
#define B_CCX_EN_MSK BIT(0)
+#define R_FAHM 0x0C1C
+#define B_RXTD_CKEN BIT(2)
#define R_IFS_COUNTER 0x0C28
#define B_IFS_CLM_PERIOD_MSK GENMASK(31, 16)
#define B_IFS_CLM_COUNTER_UNIT_MSK GENMASK(15, 14)
@@ -7963,6 +7982,7 @@
#define B_IQK_DPK_RST BIT(0)
#define R_TX_COLLISION_T2R_ST 0x0C70
#define B_TX_COLLISION_T2R_ST_M GENMASK(25, 20)
+#define B_TXRX_FORCE_VAL GENMASK(9, 0)
#define R_TXGATING 0x0C74
#define B_TXGATING_EN BIT(4)
#define R_TXRFC 0x0C7C
@@ -8023,6 +8043,7 @@
#define B_P0_RFMODE_FTM_RX GENMASK(11, 0)
#define R_P0_NRBW 0x12B8
#define B_P0_NRBW_DBG BIT(30)
+#define B_P0_NRBW_RSTB BIT(28)
#define R_S0_RXDC 0x12D4
#define B_S0_RXDC_I GENMASK(25, 16)
#define B_S0_RXDC_Q GENMASK(31, 26)
@@ -8104,6 +8125,8 @@
#define R_S0_ADDCK 0x1E00
#define B_S0_ADDCK_I GENMASK(9, 0)
#define B_S0_ADDCK_Q GENMASK(19, 10)
+#define R_TXCKEN_FORCE 0x2008
+#define B_TXCKEN_FORCE_ALL GENMASK(24, 0)
#define R_EDCCA_RPT_SEL 0x20CC
#define B_EDCCA_RPT_SEL_MSK GENMASK(2, 0)
#define R_ADC_FIFO 0x20fc
@@ -8259,6 +8282,7 @@
#define R_DCFO_COMP_S0 0x448C
#define B_DCFO_COMP_S0_MSK GENMASK(11, 0)
#define R_DCFO_WEIGHT 0x4490
+#define B_DAC_CLK_IDX BIT(31)
#define B_DCFO_WEIGHT_MSK GENMASK(27, 24)
#define R_DCFO_OPT 0x4494
#define B_DCFO_OPT_EN BIT(29)
@@ -8374,6 +8398,7 @@
#define B_CDD_EVM_CHK_EN BIT(0)
#define R_PATH0_BAND_SEL_V1 0x4738
#define B_PATH0_BAND_SEL_MSK_V1 BIT(17)
+#define B_PATH0_BAND_NRBW_EN_V1 BIT(16)
#define R_PATH0_BT_SHARE_V1 0x4738
#define B_PATH0_BT_SHARE_V1 BIT(19)
#define R_PATH0_BTG_PATH_V1 0x4738
@@ -8417,6 +8442,7 @@
#define B_PATH1_G_TIA1_LNA6_OP1DB_V1 GENMASK(15, 8)
#define R_PATH1_BAND_SEL_V1 0x4AA4
#define B_PATH1_BAND_SEL_MSK_V1 BIT(17)
+#define B_PATH1_BAND_NRBW_EN_V1 BIT(16)
#define R_PATH1_BT_SHARE_V1 0x4AA4
#define B_PATH1_BT_SHARE_V1 BIT(19)
#define R_PATH1_BTG_PATH_V1 0x4AA4
@@ -8437,6 +8463,8 @@
#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1 BIT(30)
#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK BIT(29)
#define B_SEG0R_PD_LOWER_BOUND_MSK GENMASK(10, 6)
+#define R_PWOFST 0x488C
+#define B_PWOFST GENMASK(21, 17)
#define R_2P4G_BAND 0x4970
#define B_2P4G_BAND_SEL BIT(1)
#define R_FC0_BW 0x4974
@@ -8617,6 +8645,8 @@
#define B_P0_TMETER GENMASK(15, 10)
#define B_P0_TMETER_DIS BIT(16)
#define B_P0_TMETER_TRK BIT(24)
+#define R_P0_ADCFF_EN 0x58C8
+#define B_P0_ADCFF_EN BIT(24)
#define R_P1_TSSIC 0x7814
#define B_P1_TSSIC_BYPASS BIT(11)
#define R_P0_TSSI_TRK 0x5818
@@ -8628,7 +8658,9 @@
#define B_P0_TSSI_EN BIT(31)
#define B_P0_TSSI_AVG GENMASK(15, 12)
#define R_P0_RFCTM 0x5864
+#define B_P0_CLKG_FORCE GENMASK(31, 30)
#define B_P0_RFCTM_EN BIT(29)
+#define B_P0_GOT_TXRX GENMASK(28, 27)
#define B_P0_RFCTM_VAL GENMASK(25, 20)
#define R_P0_RFCTM_RDY BIT(26)
#define R_P0_TRSW 0x5868
@@ -8661,12 +8693,14 @@
#define B_P0_RFM_BT_EN BIT(5)
#define B_P0_RFM_OUT GENMASK(4, 0)
#define R_P0_PATH_RST 0x58AC
+#define B_P0_PATH_RST BIT(27)
#define R_P0_TXDPD 0x58D4
#define B_P0_TXDPD GENMASK(31, 28)
#define R_P0_TXPW_RSTB 0x58DC
#define B_P0_TXPW_RSTB_MANON BIT(30)
#define B_P0_TXPW_RSTB_TSSI BIT(31)
#define R_P0_TSSI_MV_AVG 0x58E4
+#define B_P0_TXPW_RSTB GENMASK(28, 27)
#define B_P0_TSSI_MV_MIX GENMASK(19, 11)
#define B_P0_TSSI_MV_AVG GENMASK(13, 11)
#define B_P0_TSSI_MV_CLR BIT(14)
@@ -8791,6 +8825,10 @@
#define B_P1_TSSI_ALIM2 GENMASK(29, 0)
#define R_P1_TSSI_ADC_CLK 0x766c
#define B_P1_TSSI_ADC_CLK GENMASK(17, 16)
+#define R_P1_TXAGC_TH 0x7800
+#define B_P1_TXAGC_MAXMIN GENMASK(15, 0)
+#define R_P1_TXPW_FORCE 0x780C
+#define B_P1_TXPW_RDY BIT(15)
#define R_P1_TSSIC 0x7814
#define B_P1_TSSIC_BYPASS BIT(11)
#define R_P1_TMETER 0x7810
@@ -8806,14 +8844,20 @@
#define B_P1_TSSI_EN BIT(31)
#define B_P1_TSSI_AVG GENMASK(15, 12)
#define R_P1_RFCTM 0x7864
+#define B_P1_CLKG_FORCE GENMASK(31, 30)
+#define B_P1_GOT_TXRX GENMASK(28, 27)
#define R_P1_RFCTM_RDY BIT(26)
#define B_P1_RFCTM_VAL GENMASK(25, 20)
#define B_P1_RFCTM_DEL GENMASK(19, 11)
#define R_P1_PATH_RST 0x78AC
+#define B_P1_PATH_RST BIT(27)
+#define R_P1_ADCFF_EN 0x78C8
+#define B_P1_ADCFF_EN BIT(24)
#define R_P1_TXPW_RSTB 0x78DC
#define B_P1_TXPW_RSTB_MANON BIT(30)
#define B_P1_TXPW_RSTB_TSSI BIT(31)
#define R_P1_TSSI_MV_AVG 0x78E4
+#define B_P1_TXPW_RSTB GENMASK(28, 27)
#define B_P1_TSSI_MV_MIX GENMASK(19, 11)
#define B_P1_TSSI_MV_AVG GENMASK(13, 11)
#define B_P1_TSSI_MV_CLR BIT(14)
@@ -8998,6 +9042,7 @@
#define R_IQRSN 0x8220
#define B_IQRSN_K1 BIT(28)
#define B_IQRSN_K2 BIT(16)
+#define R_DPD_CH0B 0x82BC
#define R_RXCFIR_P0C0 0x8D40
#define R_RXCFIR_P0C1 0x8D84
#define R_RXCFIR_P0C2 0x8DC8
@@ -9031,15 +9076,18 @@
#define B_IQKINF2_FCNT GENMASK(23, 16)
#define B_IQKINF2_KCNT GENMASK(15, 8)
#define B_IQKINF2_NCTLV GENMASK(7, 0)
+#define R_RFK_ST 0xBFF8
#define R_DCOF0 0xC000
#define B_DCOF0_RST BIT(17)
#define B_DCOF0_V GENMASK(4, 1)
#define R_DCOF1 0xC004
+#define B_DCOF1_VAL GENMASK(31, 20)
#define B_DCOF1_RST BIT(17)
#define B_DCOF1_S BIT(0)
#define R_DCOF8 0xC020
#define B_DCOF8_V GENMASK(4, 1)
#define R_DCOF9 0xC024
+#define B_DCOF9_VAL GENMASK(31, 20)
#define B_DCOF9_RST BIT(17)
#define R_DACK_S0P0 0xC040
#define B_DACK_S0P0_OK BIT(31)
@@ -9090,6 +9138,7 @@
#define R_ADCMOD 0xC0E8
#define B_ADCMOD_LP GENMASK(31, 16)
#define R_DCIM 0xC0EC
+#define B_DCIM_RC GENMASK(23, 16)
#define B_DCIM_FR GENMASK(14, 13)
#define R_ADDCK0D 0xC0F0
#define B_ADDCK0D_VAL2 GENMASK(31, 26)
@@ -9112,11 +9161,18 @@
#define B_ADDCKR0_DC GENMASK(15, 4)
#define B_ADDCKR0_A1 GENMASK(9, 0)
#define R_DACK10 0xC100
+#define B_DACK10_RST BIT(17)
#define B_DACK10 GENMASK(4, 1)
#define R_DACK1_K 0xc104
+#define B_DACK1_VAL GENMASK(31, 20)
+#define B_DACK1_RST BIT(17)
#define B_DACK1_EN BIT(0)
#define R_DACK11 0xC120
#define B_DACK11 GENMASK(4, 1)
+#define R_DACK2_K 0xC124
+#define B_DACK2_VAL GENMASK(31, 20)
+#define B_DACK2_RST BIT(17)
+#define B_DACK2_EN BIT(0)
#define R_DACK_S1P0 0xC140
#define B_DACK_S1P0_OK BIT(31)
#define R_DACK_BIAS10 0xC148
@@ -9165,6 +9221,11 @@
#define B_DACKN0_V GENMASK(21, 14)
#define R_DACKN1_CTL 0xC224
#define B_DACKN1_V GENMASK(21, 14)
+#define B_DACKN1_ON BIT(0)
+#define R_DACKN2_CTL 0xC238
+#define B_DACKN2_ON BIT(0)
+#define R_DACKN3_CTL 0xC24C
+#define B_DACKN3_ON BIT(0)
#define R_GAIN_MAP0 0xE44C
#define B_GAIN_MAP0_EN BIT(0)
#define R_GAIN_MAP1 0xE54C
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
index d0857ef60ea6..a251b0e3b16e 100644
--- a/drivers/net/wireless/realtek/rtw89/regd.c
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -341,51 +341,60 @@ do { \
static void rtw89_regd_setup_unii4(struct rtw89_dev *rtwdev,
struct wiphy *wiphy)
{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
const struct rtw89_chip_info *chip = rtwdev->chip;
- bool regd_allow_unii_4 = chip->support_unii4;
struct ieee80211_supported_band *sband;
struct rtw89_acpi_dsm_result res = {};
+ bool enable_by_fcc;
+ bool enable_by_ic;
int ret;
u8 val;
+ int i;
- if (!chip->support_unii4)
- goto bottom;
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
+ if (!sband)
+ return;
+
+ if (!chip->support_unii4) {
+ sband->n_channels -= RTW89_5GHZ_UNII4_CHANNEL_NUM;
+ return;
+ }
+
+ bitmap_fill(regulatory->block_unii4, RTW89_REGD_MAX_COUNTRY_NUM);
- ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_59G_EN, &res);
+ ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_UNII4_SUP, &res);
if (ret) {
rtw89_debug(rtwdev, RTW89_DBG_REGD,
"acpi: cannot eval unii 4: %d\n", ret);
+ enable_by_fcc = true;
+ enable_by_ic = false;
goto bottom;
}
val = res.u.value;
+ enable_by_fcc = u8_get_bits(val, RTW89_ACPI_CONF_UNII4_FCC);
+ enable_by_ic = u8_get_bits(val, RTW89_ACPI_CONF_UNII4_IC);
rtw89_debug(rtwdev, RTW89_DBG_REGD,
- "acpi: eval if allow unii 4: %d\n", val);
-
- switch (val) {
- case 0:
- regd_allow_unii_4 = false;
- break;
- case 1:
- regd_allow_unii_4 = true;
- break;
- default:
- break;
- }
+ "acpi: eval if allow unii-4: 0x%x\n", val);
bottom:
- rtw89_debug(rtwdev, RTW89_DBG_REGD, "regd: allow unii 4: %d\n",
- regd_allow_unii_4);
-
- if (regd_allow_unii_4)
- return;
-
- sband = wiphy->bands[NL80211_BAND_5GHZ];
- if (!sband)
- return;
+ for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) {
+ const struct rtw89_regd *regd = &rtw89_regd_map[i];
- sband->n_channels -= 3;
+ switch (regd->txpwr_regd[RTW89_BAND_5G]) {
+ case RTW89_FCC:
+ if (enable_by_fcc)
+ clear_bit(i, regulatory->block_unii4);
+ break;
+ case RTW89_IC:
+ if (enable_by_ic)
+ clear_bit(i, regulatory->block_unii4);
+ break;
+ default:
+ break;
+ }
+ }
}
static void __rtw89_regd_setup_policy_6ghz(struct rtw89_dev *rtwdev, bool block,
@@ -459,6 +468,51 @@ out:
kfree(ptr);
}
+static void rtw89_regd_setup_policy_6ghz_sp(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_acpi_policy_6ghz_sp *ptr;
+ struct rtw89_acpi_dsm_result res = {};
+ bool enable_by_us;
+ int ret;
+ int i;
+
+ ret = rtw89_acpi_evaluate_dsm(rtwdev, RTW89_ACPI_DSM_FUNC_6GHZ_SP_SUP, &res);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "acpi: cannot eval policy 6ghz-sp: %d\n", ret);
+ return;
+ }
+
+ ptr = res.u.policy_6ghz_sp;
+
+ switch (ptr->override) {
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "%s: unknown override case: %d\n", __func__,
+ ptr->override);
+ fallthrough;
+ case 0:
+ goto out;
+ case 1:
+ break;
+ }
+
+ bitmap_fill(regulatory->block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM);
+
+ enable_by_us = u8_get_bits(ptr->conf, RTW89_ACPI_CONF_6GHZ_SP_US);
+
+ for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) {
+ const struct rtw89_regd *tmp = &rtw89_regd_map[i];
+
+ if (enable_by_us && memcmp(tmp->alpha2, "US", 2) == 0)
+ clear_bit(i, regulatory->block_6ghz_sp);
+ }
+
+out:
+ kfree(ptr);
+}
+
static void rtw89_regd_setup_6ghz(struct rtw89_dev *rtwdev, struct wiphy *wiphy)
{
const struct rtw89_chip_info *chip = rtwdev->chip;
@@ -501,6 +555,7 @@ bottom:
if (regd_allow_6ghz) {
rtw89_regd_setup_policy_6ghz(rtwdev);
+ rtw89_regd_setup_policy_6ghz_sp(rtwdev);
return;
}
@@ -562,20 +617,47 @@ int rtw89_regd_init(struct rtw89_dev *rtwdev,
return 0;
}
-static void rtw89_regd_apply_policy_6ghz(struct rtw89_dev *rtwdev,
- struct wiphy *wiphy)
+static void rtw89_regd_apply_policy_unii4(struct rtw89_dev *rtwdev,
+ struct wiphy *wiphy)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
const struct rtw89_regd *regd = regulatory->regd;
struct ieee80211_supported_band *sband;
u8 index;
int i;
+ sband = wiphy->bands[NL80211_BAND_5GHZ];
+ if (!sband)
+ return;
+
+ if (!chip->support_unii4)
+ return;
+
index = rtw89_regd_get_index(regd);
- if (index == RTW89_REGD_MAX_COUNTRY_NUM)
+ if (index != RTW89_REGD_MAX_COUNTRY_NUM &&
+ !test_bit(index, regulatory->block_unii4))
return;
- if (!test_bit(index, regulatory->block_6ghz))
+ rtw89_debug(rtwdev, RTW89_DBG_REGD, "%c%c unii-4 is blocked by policy\n",
+ regd->alpha2[0], regd->alpha2[1]);
+
+ for (i = RTW89_5GHZ_UNII4_START_INDEX; i < sband->n_channels; i++)
+ sband->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+}
+
+static void rtw89_regd_apply_policy_6ghz(struct rtw89_dev *rtwdev,
+ struct wiphy *wiphy)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd *regd = regulatory->regd;
+ struct ieee80211_supported_band *sband;
+ u8 index;
+ int i;
+
+ index = rtw89_regd_get_index(regd);
+ if (index != RTW89_REGD_MAX_COUNTRY_NUM &&
+ !test_bit(index, regulatory->block_6ghz))
return;
rtw89_debug(rtwdev, RTW89_DBG_REGD, "%c%c 6 GHz is blocked by policy\n",
@@ -604,6 +686,7 @@ static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev,
else
wiphy->regulatory_flags &= ~REGULATORY_COUNTRY_IE_IGNORE;
+ rtw89_regd_apply_policy_unii4(rtwdev, wiphy);
rtw89_regd_apply_policy_6ghz(rtwdev, wiphy);
}
@@ -631,13 +714,162 @@ exit:
mutex_unlock(&rtwdev->mutex);
}
-static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
+/* Maximum Transmit Power field (@raw) can be EIRP or PSD.
+ * Both units are 0.5 dB-based. Return a constraint in dB.
+ */
+static s8 tpe_get_constraint(s8 raw)
+{
+ const u8 hw_deviation = 3; /* unit: 0.5 dB */
+ const u8 antenna_gain = 10; /* unit: 0.5 dB */
+ const u8 array_gain = 6; /* unit: 0.5 dB */
+ const u8 offset = hw_deviation + antenna_gain + array_gain;
+
+ return (raw - offset) / 2;
+}
+
+static void tpe_intersect_constraint(struct rtw89_reg_6ghz_tpe *tpe, s8 cstr)
+{
+ if (tpe->valid) {
+ tpe->constraint = min(tpe->constraint, cstr);
+ return;
+ }
+
+ tpe->constraint = cstr;
+ tpe->valid = true;
+}
+
+static void tpe_deal_with_eirp(struct rtw89_reg_6ghz_tpe *tpe,
+ const struct ieee80211_parsed_tpe_eirp *eirp)
+{
+ unsigned int i;
+ s8 cstr;
+
+ if (!eirp->valid)
+ return;
+
+ for (i = 0; i < eirp->count; i++) {
+ cstr = tpe_get_constraint(eirp->power[i]);
+ tpe_intersect_constraint(tpe, cstr);
+ }
+}
+
+static s8 tpe_convert_psd_to_eirp(s8 psd)
+{
+ static const unsigned int mlog20 = 1301;
+
+ return psd + 10 * mlog20 / 1000;
+}
+
+static void tpe_deal_with_psd(struct rtw89_reg_6ghz_tpe *tpe,
+ const struct ieee80211_parsed_tpe_psd *psd)
+{
+ unsigned int i;
+ s8 cstr_psd;
+ s8 cstr;
+
+ if (!psd->valid)
+ return;
+
+ for (i = 0; i < psd->count; i++) {
+ cstr_psd = tpe_get_constraint(psd->power[i]);
+ cstr = tpe_convert_psd_to_eirp(cstr_psd);
+ tpe_intersect_constraint(tpe, cstr);
+ }
+}
+
+static void rtw89_calculate_tpe(struct rtw89_dev *rtwdev,
+ struct rtw89_reg_6ghz_tpe *result_tpe,
+ const struct ieee80211_parsed_tpe *parsed_tpe)
+{
+ static const u8 category = IEEE80211_TPE_CAT_6GHZ_DEFAULT;
+
+ tpe_deal_with_eirp(result_tpe, &parsed_tpe->max_local[category]);
+ tpe_deal_with_eirp(result_tpe, &parsed_tpe->max_reg_client[category]);
+ tpe_deal_with_psd(result_tpe, &parsed_tpe->psd_local[category]);
+ tpe_deal_with_psd(result_tpe, &parsed_tpe->psd_reg_client[category]);
+}
+
+static bool __rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ struct rtw89_reg_6ghz_tpe new = {};
+ struct rtw89_vif *rtwvif;
+ bool changed = false;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif) {
+ const struct rtw89_reg_6ghz_tpe *tmp;
+ const struct rtw89_chan *chan;
+
+ chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
+ if (chan->band_type != RTW89_BAND_6G)
+ continue;
+
+ tmp = &rtwvif->reg_6ghz_tpe;
+ if (!tmp->valid)
+ continue;
+
+ tpe_intersect_constraint(&new, tmp->constraint);
+ }
+
+ if (memcmp(&regulatory->reg_6ghz_tpe, &new,
+ sizeof(regulatory->reg_6ghz_tpe)) != 0)
+ changed = true;
+
+ if (changed) {
+ if (new.valid)
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "recalc 6 GHz reg TPE to %d dBm\n",
+ new.constraint);
+ else
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "recalc 6 GHz reg TPE to none\n");
+
+ regulatory->reg_6ghz_tpe = new;
+ }
+
+ return changed;
+}
+
+static int rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool active,
+ unsigned int *changed)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct rtw89_reg_6ghz_tpe *tpe = &rtwvif->reg_6ghz_tpe;
+
+ memset(tpe, 0, sizeof(*tpe));
+
+ if (!active || rtwvif->reg_6ghz_power != RTW89_REG_6GHZ_POWER_STD)
+ goto bottom;
+
+ rtw89_calculate_tpe(rtwdev, tpe, &bss_conf->tpe);
+ if (!tpe->valid)
+ goto bottom;
+
+ if (tpe->constraint < RTW89_MIN_VALID_POWER_CONSTRAINT) {
+ rtw89_err(rtwdev,
+ "%s: constraint %d dBm is less than min valid val\n",
+ __func__, tpe->constraint);
+
+ tpe->valid = false;
+ return -EINVAL;
+ }
+
+bottom:
+ *changed += __rtw89_reg_6ghz_tpe_recalc(rtwdev);
+ return 0;
+}
+
+static bool __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
{
struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory;
+ const struct rtw89_regd *regd = regulatory->regd;
enum rtw89_reg_6ghz_power sel;
const struct rtw89_chan *chan;
struct rtw89_vif *rtwvif;
int count = 0;
+ u8 index;
rtw89_for_each_rtwvif(rtwdev, rtwvif) {
chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx);
@@ -654,24 +886,33 @@ static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev)
if (count != 1)
sel = RTW89_REG_6GHZ_POWER_DFLT;
+ if (sel == RTW89_REG_6GHZ_POWER_STD) {
+ index = rtw89_regd_get_index(regd);
+ if (index == RTW89_REGD_MAX_COUNTRY_NUM ||
+ test_bit(index, regulatory->block_6ghz_sp)) {
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "%c%c 6 GHz SP is blocked by policy\n",
+ regd->alpha2[0], regd->alpha2[1]);
+ sel = RTW89_REG_6GHZ_POWER_DFLT;
+ }
+ }
+
if (regulatory->reg_6ghz_power == sel)
- return;
+ return false;
rtw89_debug(rtwdev, RTW89_DBG_REGD,
"recalc 6 GHz reg power type to %d\n", sel);
regulatory->reg_6ghz_power = sel;
-
- rtw89_core_set_chip_txpwr(rtwdev);
+ return true;
}
-void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
- struct rtw89_vif *rtwvif, bool active)
+static int rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool active,
+ unsigned int *changed)
{
struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
- lockdep_assert_held(&rtwdev->mutex);
-
if (active) {
switch (vif->bss_conf.power_type) {
case IEEE80211_REG_VLP_AP:
@@ -691,5 +932,32 @@ void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev,
rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT;
}
- __rtw89_reg_6ghz_power_recalc(rtwdev);
+ *changed += __rtw89_reg_6ghz_power_recalc(rtwdev);
+ return 0;
+}
+
+int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ bool active)
+{
+ unsigned int changed = 0;
+ int ret;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ /* The result of reg_6ghz_tpe may depend on reg_6ghz_power type,
+ * so must do reg_6ghz_tpe_recalc() after reg_6ghz_power_recalc().
+ */
+
+ ret = rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, active, &changed);
+ if (ret)
+ return ret;
+
+ ret = rtw89_reg_6ghz_tpe_recalc(rtwdev, rtwvif, active, &changed);
+ if (ret)
+ return ret;
+
+ if (changed)
+ rtw89_core_set_chip_txpwr(rtwdev);
+
+ return 0;
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
index 51d3e61eaa1d..40cf84a79c46 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c
@@ -105,6 +105,10 @@ static const u32 rtw8851b_c2h_regs[RTW89_C2HREG_MAX] = {
R_AX_C2HREG_DATA3
};
+static const u32 rtw8851b_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = {
+ R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3,
+};
+
static const struct rtw89_page_regs rtw8851b_page_regs = {
.hci_fc_ctrl = R_AX_HCI_FC_CTRL,
.ch_page_ctrl = R_AX_CH_PAGE_CTRL,
@@ -2320,6 +2324,7 @@ static int rtw8851b_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
u8 wl_rfc_s1;
int ret;
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
@@ -2446,7 +2451,9 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.dig_table = NULL,
.dig_regs = &rtw8851b_dig_regs,
.tssi_dbw_table = NULL,
+ .support_macid_num = RTW89_MAX_MAC_ID_NUM,
.support_chanctx_num = 0,
+ .support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
.support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
@@ -2506,7 +2513,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8851b_c2h_regs,
.page_regs = &rtw8851b_page_regs,
- .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
+ .wow_reason_reg = rtw8851b_wow_wakeup_regs,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = &rtw8851b_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
index ca1374a71727..d334924faec8 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c
@@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
.rpwm_addr = R_AX_PCIE_HRPWM,
.cpwm_addr = R_AX_CPWM,
.mit_addr = R_AX_INT_MIT_RX,
+ .wp_sel_addr = 0,
.tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
@@ -63,6 +64,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = {
static const struct rtw89_driver_info rtw89_8851be_info = {
.chip = &rtw8851b_chip_info,
+ .quirks = NULL,
.bus = {
.pci = &rtw8851b_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
index 2deadec715cf..08e148328c62 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -398,6 +398,10 @@ static const u32 rtw8852a_c2h_regs[RTW89_C2HREG_MAX] = {
R_AX_C2HREG_DATA3
};
+static const u32 rtw8852a_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = {
+ R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3,
+};
+
static const struct rtw89_page_regs rtw8852a_page_regs = {
.hci_fc_ctrl = R_AX_HCI_FC_CTRL,
.ch_page_ctrl = R_AX_CH_PAGE_CTRL,
@@ -2162,7 +2166,9 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.dig_table = &rtw89_8852a_phy_dig_table,
.dig_regs = &rtw8852a_dig_regs,
.tssi_dbw_table = NULL,
+ .support_macid_num = RTW89_MAX_MAC_ID_NUM,
.support_chanctx_num = 1,
+ .support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
.support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
@@ -2223,7 +2229,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = {
.c2h_regs = rtw8852a_c2h_regs,
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.page_regs = &rtw8852a_page_regs,
- .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
+ .wow_reason_reg = rtw8852a_wow_wakeup_regs,
.cfo_src_fd = false,
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852a_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
index 7c6ffedb77e2..9a675e2193bc 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c
@@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
.rpwm_addr = R_AX_PCIE_HRPWM,
.cpwm_addr = R_AX_CPWM,
.mit_addr = R_AX_INT_MIT_RX,
+ .wp_sel_addr = 0,
.tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = NULL,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set,
@@ -61,6 +62,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = {
static const struct rtw89_driver_info rtw89_8852ae_info = {
.chip = &rtw8852a_chip_info,
+ .quirks = NULL,
.bus = {
.pci = &rtw8852a_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
index d025c4135e1c..a22847a311ad 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c
@@ -8,6 +8,7 @@
#include "phy.h"
#include "reg.h"
#include "rtw8852b.h"
+#include "rtw8852b_common.h"
#include "rtw8852b_rfk.h"
#include "rtw8852b_table.h"
#include "txrx.h"
@@ -65,167 +66,6 @@ static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = {
NULL},
};
-static const struct rtw89_reg3_def rtw8852b_pmac_ht20_mcs7_tbl[] = {
- {0x4580, 0x0000ffff, 0x0},
- {0x4580, 0xffff0000, 0x0},
- {0x4584, 0x0000ffff, 0x0},
- {0x4584, 0xffff0000, 0x0},
- {0x4580, 0x0000ffff, 0x1},
- {0x4578, 0x00ffffff, 0x2018b},
- {0x4570, 0x03ffffff, 0x7},
- {0x4574, 0x03ffffff, 0x32407},
- {0x45b8, 0x00000010, 0x0},
- {0x45b8, 0x00000100, 0x0},
- {0x45b8, 0x00000080, 0x0},
- {0x45b8, 0x00000008, 0x0},
- {0x45a0, 0x0000ff00, 0x0},
- {0x45a0, 0xff000000, 0x1},
- {0x45a4, 0x0000ff00, 0x2},
- {0x45a4, 0xff000000, 0x3},
- {0x45b8, 0x00000020, 0x0},
- {0x4568, 0xe0000000, 0x0},
- {0x45b8, 0x00000002, 0x1},
- {0x456c, 0xe0000000, 0x0},
- {0x45b4, 0x00006000, 0x0},
- {0x45b4, 0x00001800, 0x1},
- {0x45b8, 0x00000040, 0x0},
- {0x45b8, 0x00000004, 0x0},
- {0x45b8, 0x00000200, 0x0},
- {0x4598, 0xf8000000, 0x0},
- {0x45b8, 0x00100000, 0x0},
- {0x45a8, 0x00000fc0, 0x0},
- {0x45b8, 0x00200000, 0x0},
- {0x45b0, 0x00000038, 0x0},
- {0x45b0, 0x000001c0, 0x0},
- {0x45a0, 0x000000ff, 0x0},
- {0x45b8, 0x00400000, 0x0},
- {0x4590, 0x000007ff, 0x0},
- {0x45b0, 0x00000e00, 0x0},
- {0x45ac, 0x0000001f, 0x0},
- {0x45b8, 0x00800000, 0x0},
- {0x45a8, 0x0003f000, 0x0},
- {0x45b8, 0x01000000, 0x0},
- {0x45b0, 0x00007000, 0x0},
- {0x45b0, 0x00038000, 0x0},
- {0x45a0, 0x00ff0000, 0x0},
- {0x45b8, 0x02000000, 0x0},
- {0x4590, 0x003ff800, 0x0},
- {0x45b0, 0x001c0000, 0x0},
- {0x45ac, 0x000003e0, 0x0},
- {0x45b8, 0x04000000, 0x0},
- {0x45a8, 0x00fc0000, 0x0},
- {0x45b8, 0x08000000, 0x0},
- {0x45b0, 0x00e00000, 0x0},
- {0x45b0, 0x07000000, 0x0},
- {0x45a4, 0x000000ff, 0x0},
- {0x45b8, 0x10000000, 0x0},
- {0x4594, 0x000007ff, 0x0},
- {0x45b0, 0x38000000, 0x0},
- {0x45ac, 0x00007c00, 0x0},
- {0x45b8, 0x20000000, 0x0},
- {0x45a8, 0x3f000000, 0x0},
- {0x45b8, 0x40000000, 0x0},
- {0x45b4, 0x00000007, 0x0},
- {0x45b4, 0x00000038, 0x0},
- {0x45a4, 0x00ff0000, 0x0},
- {0x45b8, 0x80000000, 0x0},
- {0x4594, 0x003ff800, 0x0},
- {0x45b4, 0x000001c0, 0x0},
- {0x4598, 0xf8000000, 0x0},
- {0x45b8, 0x00100000, 0x0},
- {0x45a8, 0x00000fc0, 0x7},
- {0x45b8, 0x00200000, 0x0},
- {0x45b0, 0x00000038, 0x0},
- {0x45b0, 0x000001c0, 0x0},
- {0x45a0, 0x000000ff, 0x0},
- {0x45b4, 0x06000000, 0x0},
- {0x45b0, 0x00000007, 0x0},
- {0x45b8, 0x00080000, 0x0},
- {0x45a8, 0x0000003f, 0x0},
- {0x457c, 0xffe00000, 0x1},
- {0x4530, 0xffffffff, 0x0},
- {0x4588, 0x00003fff, 0x0},
- {0x4598, 0x000001ff, 0x0},
- {0x4534, 0xffffffff, 0x0},
- {0x4538, 0xffffffff, 0x0},
- {0x453c, 0xffffffff, 0x0},
- {0x4588, 0x0fffc000, 0x0},
- {0x4598, 0x0003fe00, 0x0},
- {0x4540, 0xffffffff, 0x0},
- {0x4544, 0xffffffff, 0x0},
- {0x4548, 0xffffffff, 0x0},
- {0x458c, 0x00003fff, 0x0},
- {0x4598, 0x07fc0000, 0x0},
- {0x454c, 0xffffffff, 0x0},
- {0x4550, 0xffffffff, 0x0},
- {0x4554, 0xffffffff, 0x0},
- {0x458c, 0x0fffc000, 0x0},
- {0x459c, 0x000001ff, 0x0},
- {0x4558, 0xffffffff, 0x0},
- {0x455c, 0xffffffff, 0x0},
- {0x4530, 0xffffffff, 0x4e790001},
- {0x4588, 0x00003fff, 0x0},
- {0x4598, 0x000001ff, 0x1},
- {0x4534, 0xffffffff, 0x0},
- {0x4538, 0xffffffff, 0x4b},
- {0x45ac, 0x38000000, 0x7},
- {0x4588, 0xf0000000, 0x0},
- {0x459c, 0x7e000000, 0x0},
- {0x45b8, 0x00040000, 0x0},
- {0x45b8, 0x00020000, 0x0},
- {0x4590, 0xffc00000, 0x0},
- {0x45b8, 0x00004000, 0x0},
- {0x4578, 0xff000000, 0x0},
- {0x45b8, 0x00000400, 0x0},
- {0x45b8, 0x00000800, 0x0},
- {0x45b8, 0x00001000, 0x0},
- {0x45b8, 0x00002000, 0x0},
- {0x45b4, 0x00018000, 0x0},
- {0x45ac, 0x07800000, 0x0},
- {0x45b4, 0x00000600, 0x2},
- {0x459c, 0x0001fe00, 0x80},
- {0x45ac, 0x00078000, 0x3},
- {0x459c, 0x01fe0000, 0x1},
-};
-
-static const struct rtw89_reg3_def rtw8852b_btc_preagc_en_defs[] = {
- {0x46D0, GENMASK(1, 0), 0x3},
- {0x4790, GENMASK(1, 0), 0x3},
- {0x4AD4, GENMASK(31, 0), 0xf},
- {0x4AE0, GENMASK(31, 0), 0xf},
- {0x4688, GENMASK(31, 24), 0x80},
- {0x476C, GENMASK(31, 24), 0x80},
- {0x4694, GENMASK(7, 0), 0x80},
- {0x4694, GENMASK(15, 8), 0x80},
- {0x4778, GENMASK(7, 0), 0x80},
- {0x4778, GENMASK(15, 8), 0x80},
- {0x4AE4, GENMASK(23, 0), 0x780D1E},
- {0x4AEC, GENMASK(23, 0), 0x780D1E},
- {0x469C, GENMASK(31, 26), 0x34},
- {0x49F0, GENMASK(31, 26), 0x34},
-};
-
-static DECLARE_PHY_REG3_TBL(rtw8852b_btc_preagc_en_defs);
-
-static const struct rtw89_reg3_def rtw8852b_btc_preagc_dis_defs[] = {
- {0x46D0, GENMASK(1, 0), 0x0},
- {0x4790, GENMASK(1, 0), 0x0},
- {0x4AD4, GENMASK(31, 0), 0x60},
- {0x4AE0, GENMASK(31, 0), 0x60},
- {0x4688, GENMASK(31, 24), 0x1a},
- {0x476C, GENMASK(31, 24), 0x1a},
- {0x4694, GENMASK(7, 0), 0x2a},
- {0x4694, GENMASK(15, 8), 0x2a},
- {0x4778, GENMASK(7, 0), 0x2a},
- {0x4778, GENMASK(15, 8), 0x2a},
- {0x4AE4, GENMASK(23, 0), 0x79E99E},
- {0x4AEC, GENMASK(23, 0), 0x79E99E},
- {0x469C, GENMASK(31, 26), 0x26},
- {0x49F0, GENMASK(31, 26), 0x26},
-};
-
-static DECLARE_PHY_REG3_TBL(rtw8852b_btc_preagc_dis_defs);
-
static const u32 rtw8852b_h2c_regs[RTW89_H2CREG_MAX] = {
R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, R_AX_H2CREG_DATA2,
R_AX_H2CREG_DATA3
@@ -236,6 +76,10 @@ static const u32 rtw8852b_c2h_regs[RTW89_C2HREG_MAX] = {
R_AX_C2HREG_DATA3
};
+static const u32 rtw8852b_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = {
+ R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3,
+};
+
static const struct rtw89_page_regs rtw8852b_page_regs = {
.hci_fc_ctrl = R_AX_HCI_FC_CTRL,
.ch_page_ctrl = R_AX_CH_PAGE_CTRL,
@@ -390,11 +234,21 @@ static const struct rtw89_btc_fbtc_mreg rtw89_btc_8852b_mon_reg[] = {
static const u8 rtw89_btc_8852b_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {70, 60, 50, 40};
static const u8 rtw89_btc_8852b_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {50, 40, 30, 20};
+static void rtw8852b_pwr_sps_ana(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+
+ if (efuse->rfe_type == 0x5)
+ rtw89_write16(rtwdev, R_AX_SPS_ANA_ON_CTRL2, RTL8852B_RFE_05_SPS_ANA);
+}
+
static int rtw8852b_pwr_on_func(struct rtw89_dev *rtwdev)
{
u32 val32;
u32 ret;
+ rtw8852b_pwr_sps_ana(rtwdev);
+
rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN |
B_AX_AFSM_PCIE_SUS_EN);
rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_DIS_WLBT_PDNSUSEN_SOPC);
@@ -522,6 +376,8 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev)
u32 val32;
u32 ret;
+ rtw8852b_pwr_sps_ana(rtwdev);
+
ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF,
XTAL_SI_RFC2RF);
if (ret)
@@ -550,6 +406,7 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev)
return ret;
rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB);
rtw89_write32_clr(rtwdev, R_AX_SYS_ADIE_PAD_PWR_CTRL, B_AX_SYM_PADPDN_WL_RFC_1P3);
@@ -578,806 +435,6 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev)
return 0;
}
-static void rtw8852be_efuse_parsing(struct rtw89_efuse *efuse,
- struct rtw8852b_efuse *map)
-{
- ether_addr_copy(efuse->addr, map->e.mac_addr);
- efuse->rfe_type = map->rfe_type;
- efuse->xtal_cap = map->xtal_k;
-}
-
-static void rtw8852b_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
- struct rtw8852b_efuse *map)
-{
- struct rtw89_tssi_info *tssi = &rtwdev->tssi;
- struct rtw8852b_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
- u8 i, j;
-
- tssi->thermal[RF_PATH_A] = map->path_a_therm;
- tssi->thermal[RF_PATH_B] = map->path_b_therm;
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++) {
- memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi,
- sizeof(ofst[i]->cck_tssi));
-
- for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++)
- rtw89_debug(rtwdev, RTW89_DBG_TSSI,
- "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n",
- i, j, tssi->tssi_cck[i][j]);
-
- memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi,
- sizeof(ofst[i]->bw40_tssi));
- memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM,
- ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g));
-
- for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++)
- rtw89_debug(rtwdev, RTW89_DBG_TSSI,
- "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n",
- i, j, tssi->tssi_mcs[i][j]);
- }
-}
-
-static bool _decode_efuse_gain(u8 data, s8 *high, s8 *low)
-{
- if (high)
- *high = sign_extend32(FIELD_GET(GENMASK(7, 4), data), 3);
- if (low)
- *low = sign_extend32(FIELD_GET(GENMASK(3, 0), data), 3);
-
- return data != 0xff;
-}
-
-static void rtw8852b_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev,
- struct rtw8852b_efuse *map)
-{
- struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
- bool valid = false;
-
- valid |= _decode_efuse_gain(map->rx_gain_2g_cck,
- &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK],
- &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK]);
- valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm,
- &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM],
- &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM]);
- valid |= _decode_efuse_gain(map->rx_gain_5g_low,
- &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW],
- &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW]);
- valid |= _decode_efuse_gain(map->rx_gain_5g_mid,
- &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID],
- &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID]);
- valid |= _decode_efuse_gain(map->rx_gain_5g_high,
- &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH],
- &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]);
-
- gain->offset_valid = valid;
-}
-
-static int rtw8852b_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
- enum rtw89_efuse_block block)
-{
- struct rtw89_efuse *efuse = &rtwdev->efuse;
- struct rtw8852b_efuse *map;
-
- map = (struct rtw8852b_efuse *)log_map;
-
- efuse->country_code[0] = map->country_code[0];
- efuse->country_code[1] = map->country_code[1];
- rtw8852b_efuse_parsing_tssi(rtwdev, map);
- rtw8852b_efuse_parsing_gain_offset(rtwdev, map);
-
- switch (rtwdev->hci.type) {
- case RTW89_HCI_TYPE_PCIE:
- rtw8852be_efuse_parsing(efuse, map);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
-
- return 0;
-}
-
-static void rtw8852b_phycap_parsing_power_cal(struct rtw89_dev *rtwdev, u8 *phycap_map)
-{
-#define PWR_K_CHK_OFFSET 0x5E9
-#define PWR_K_CHK_VALUE 0xAA
- u32 offset = PWR_K_CHK_OFFSET - rtwdev->chip->phycap_addr;
-
- if (phycap_map[offset] == PWR_K_CHK_VALUE)
- rtwdev->efuse.power_k_valid = true;
-}
-
-static void rtw8852b_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map)
-{
- struct rtw89_tssi_info *tssi = &rtwdev->tssi;
- static const u32 tssi_trim_addr[RF_PATH_NUM_8852B] = {0x5D6, 0x5AB};
- u32 addr = rtwdev->chip->phycap_addr;
- bool pg = false;
- u32 ofst;
- u8 i, j;
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++) {
- for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) {
- /* addrs are in decreasing order */
- ofst = tssi_trim_addr[i] - addr - j;
- tssi->tssi_trim[i][j] = phycap_map[ofst];
-
- if (phycap_map[ofst] != 0xff)
- pg = true;
- }
- }
-
- if (!pg) {
- memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim));
- rtw89_debug(rtwdev, RTW89_DBG_TSSI,
- "[TSSI][TRIM] no PG, set all trim info to 0\n");
- }
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++)
- for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++)
- rtw89_debug(rtwdev, RTW89_DBG_TSSI,
- "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n",
- i, j, tssi->tssi_trim[i][j],
- tssi_trim_addr[i] - j);
-}
-
-static void rtw8852b_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev,
- u8 *phycap_map)
-{
- struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
- static const u32 thm_trim_addr[RF_PATH_NUM_8852B] = {0x5DF, 0x5DC};
- u32 addr = rtwdev->chip->phycap_addr;
- u8 i;
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++) {
- info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr];
-
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n",
- i, info->thermal_trim[i]);
-
- if (info->thermal_trim[i] != 0xff)
- info->pg_thermal_trim = true;
- }
-}
-
-static void rtw8852b_thermal_trim(struct rtw89_dev *rtwdev)
-{
-#define __thm_setting(raw) \
-({ \
- u8 __v = (raw); \
- ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \
-})
- struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
- u8 i, val;
-
- if (!info->pg_thermal_trim) {
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[THERMAL][TRIM] no PG, do nothing\n");
-
- return;
- }
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++) {
- val = __thm_setting(info->thermal_trim[i]);
- rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val);
-
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n",
- i, val);
- }
-#undef __thm_setting
-}
-
-static void rtw8852b_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
- u8 *phycap_map)
-{
- struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
- static const u32 pabias_trim_addr[RF_PATH_NUM_8852B] = {0x5DE, 0x5DB};
- u32 addr = rtwdev->chip->phycap_addr;
- u8 i;
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++) {
- info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr];
-
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n",
- i, info->pa_bias_trim[i]);
-
- if (info->pa_bias_trim[i] != 0xff)
- info->pg_pa_bias_trim = true;
- }
-}
-
-static void rtw8852b_pa_bias_trim(struct rtw89_dev *rtwdev)
-{
- struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
- u8 pabias_2g, pabias_5g;
- u8 i;
-
- if (!info->pg_pa_bias_trim) {
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[PA_BIAS][TRIM] no PG, do nothing\n");
-
- return;
- }
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++) {
- pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
- pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
-
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
- i, pabias_2g, pabias_5g);
-
- rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g);
- rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g);
- }
-}
-
-static void rtw8852b_phycap_parsing_gain_comp(struct rtw89_dev *rtwdev, u8 *phycap_map)
-{
- static const u32 comp_addrs[][RTW89_SUBBAND_2GHZ_5GHZ_NR] = {
- {0x5BB, 0x5BA, 0, 0x5B9, 0x5B8},
- {0x590, 0x58F, 0, 0x58E, 0x58D},
- };
- struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
- u32 phycap_addr = rtwdev->chip->phycap_addr;
- bool valid = false;
- int path, i;
- u8 data;
-
- for (path = 0; path < 2; path++)
- for (i = 0; i < RTW89_SUBBAND_2GHZ_5GHZ_NR; i++) {
- if (comp_addrs[path][i] == 0)
- continue;
-
- data = phycap_map[comp_addrs[path][i] - phycap_addr];
- valid |= _decode_efuse_gain(data, NULL,
- &gain->comp[path][i]);
- }
-
- gain->comp_valid = valid;
-}
-
-static int rtw8852b_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
-{
- rtw8852b_phycap_parsing_power_cal(rtwdev, phycap_map);
- rtw8852b_phycap_parsing_tssi(rtwdev, phycap_map);
- rtw8852b_phycap_parsing_thermal_trim(rtwdev, phycap_map);
- rtw8852b_phycap_parsing_pa_bias_trim(rtwdev, phycap_map);
- rtw8852b_phycap_parsing_gain_comp(rtwdev, phycap_map);
-
- return 0;
-}
-
-static void rtw8852b_power_trim(struct rtw89_dev *rtwdev)
-{
- rtw8852b_thermal_trim(rtwdev);
- rtw8852b_pa_bias_trim(rtwdev);
-}
-
-static void rtw8852b_set_channel_mac(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- u8 mac_idx)
-{
- u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx);
- u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx);
- u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx);
- u8 txsc20 = 0, txsc40 = 0;
-
- switch (chan->band_width) {
- case RTW89_CHANNEL_WIDTH_80:
- txsc40 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_40);
- fallthrough;
- case RTW89_CHANNEL_WIDTH_40:
- txsc20 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_20);
- break;
- default:
- break;
- }
-
- switch (chan->band_width) {
- case RTW89_CHANNEL_WIDTH_80:
- rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1));
- rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4));
- break;
- case RTW89_CHANNEL_WIDTH_40:
- rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(0));
- rtw89_write32(rtwdev, sub_carr, txsc20);
- break;
- case RTW89_CHANNEL_WIDTH_20:
- rtw89_write8_clr(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK);
- rtw89_write32(rtwdev, sub_carr, 0);
- break;
- default:
- break;
- }
-
- if (chan->channel > 14) {
- rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE);
- rtw89_write8_set(rtwdev, chk_rate,
- B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
- } else {
- rtw89_write8_set(rtwdev, chk_rate, B_AX_BAND_MODE);
- rtw89_write8_clr(rtwdev, chk_rate,
- B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
- }
-}
-
-static const u32 rtw8852b_sco_barker_threshold[14] = {
- 0x1cfea, 0x1d0e1, 0x1d1d7, 0x1d2cd, 0x1d3c3, 0x1d4b9, 0x1d5b0, 0x1d6a6,
- 0x1d79c, 0x1d892, 0x1d988, 0x1da7f, 0x1db75, 0x1ddc4
-};
-
-static const u32 rtw8852b_sco_cck_threshold[14] = {
- 0x27de3, 0x27f35, 0x28088, 0x281da, 0x2832d, 0x2847f, 0x285d2, 0x28724,
- 0x28877, 0x289c9, 0x28b1c, 0x28c6e, 0x28dc1, 0x290ed
-};
-
-static void rtw8852b_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 primary_ch)
-{
- u8 ch_element = primary_ch - 1;
-
- rtw89_phy_write32_mask(rtwdev, R_RXSCOBC, B_RXSCOBC_TH,
- rtw8852b_sco_barker_threshold[ch_element]);
- rtw89_phy_write32_mask(rtwdev, R_RXSCOCCK, B_RXSCOCCK_TH,
- rtw8852b_sco_cck_threshold[ch_element]);
-}
-
-static u8 rtw8852b_sco_mapping(u8 central_ch)
-{
- if (central_ch == 1)
- return 109;
- else if (central_ch >= 2 && central_ch <= 6)
- return 108;
- else if (central_ch >= 7 && central_ch <= 10)
- return 107;
- else if (central_ch >= 11 && central_ch <= 14)
- return 106;
- else if (central_ch == 36 || central_ch == 38)
- return 51;
- else if (central_ch >= 40 && central_ch <= 58)
- return 50;
- else if (central_ch >= 60 && central_ch <= 64)
- return 49;
- else if (central_ch == 100 || central_ch == 102)
- return 48;
- else if (central_ch >= 104 && central_ch <= 126)
- return 47;
- else if (central_ch >= 128 && central_ch <= 151)
- return 46;
- else if (central_ch >= 153 && central_ch <= 177)
- return 45;
- else
- return 0;
-}
-
-struct rtw8852b_bb_gain {
- u32 gain_g[BB_PATH_NUM_8852B];
- u32 gain_a[BB_PATH_NUM_8852B];
- u32 gain_mask;
-};
-
-static const struct rtw8852b_bb_gain bb_gain_lna[LNA_GAIN_NUM] = {
- { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
- .gain_mask = 0x00ff0000 },
- { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
- .gain_mask = 0xff000000 },
- { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
- .gain_mask = 0x000000ff },
- { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
- .gain_mask = 0x0000ff00 },
- { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
- .gain_mask = 0x00ff0000 },
- { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
- .gain_mask = 0xff000000 },
- { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
- .gain_mask = 0x000000ff },
-};
-
-static const struct rtw8852b_bb_gain bb_gain_tia[TIA_GAIN_NUM] = {
- { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
- .gain_mask = 0x00ff0000 },
- { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
- .gain_mask = 0xff000000 },
-};
-
-static void rtw8852b_set_gain_error(struct rtw89_dev *rtwdev,
- enum rtw89_subband subband,
- enum rtw89_rf_path path)
-{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
- u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
- s32 val;
- u32 reg;
- u32 mask;
- int i;
-
- for (i = 0; i < LNA_GAIN_NUM; i++) {
- if (subband == RTW89_CH_2G)
- reg = bb_gain_lna[i].gain_g[path];
- else
- reg = bb_gain_lna[i].gain_a[path];
-
- mask = bb_gain_lna[i].gain_mask;
- val = gain->lna_gain[gain_band][path][i];
- rtw89_phy_write32_mask(rtwdev, reg, mask, val);
- }
-
- for (i = 0; i < TIA_GAIN_NUM; i++) {
- if (subband == RTW89_CH_2G)
- reg = bb_gain_tia[i].gain_g[path];
- else
- reg = bb_gain_tia[i].gain_a[path];
-
- mask = bb_gain_tia[i].gain_mask;
- val = gain->tia_gain[gain_band][path][i];
- rtw89_phy_write32_mask(rtwdev, reg, mask, val);
- }
-}
-
-static void rtw8852b_set_gain_offset(struct rtw89_dev *rtwdev,
- enum rtw89_subband subband,
- enum rtw89_phy_idx phy_idx)
-{
- static const u32 gain_err_addr[2] = {R_P0_AGC_RSVD, R_P1_AGC_RSVD};
- static const u32 rssi_ofst_addr[2] = {R_PATH0_G_TIA1_LNA6_OP1DB_V1,
- R_PATH1_G_TIA1_LNA6_OP1DB_V1};
- struct rtw89_hal *hal = &rtwdev->hal;
- struct rtw89_phy_efuse_gain *efuse_gain = &rtwdev->efuse_gain;
- enum rtw89_gain_offset gain_ofdm_band;
- s32 offset_a, offset_b;
- s32 offset_ofdm, offset_cck;
- s32 tmp;
- u8 path;
-
- if (!efuse_gain->comp_valid)
- goto next;
-
- for (path = RF_PATH_A; path < BB_PATH_NUM_8852B; path++) {
- tmp = efuse_gain->comp[path][subband];
- tmp = clamp_t(s32, tmp << 2, S8_MIN, S8_MAX);
- rtw89_phy_write32_mask(rtwdev, gain_err_addr[path], MASKBYTE0, tmp);
- }
-
-next:
- if (!efuse_gain->offset_valid)
- return;
-
- gain_ofdm_band = rtw89_subband_to_gain_offset_band_of_ofdm(subband);
-
- offset_a = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band];
- offset_b = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band];
-
- tmp = -((offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2));
- tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
- rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_A], B_PATH0_R_G_OFST_MASK, tmp);
-
- tmp = -((offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2));
- tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
- rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_B], B_PATH0_R_G_OFST_MASK, tmp);
-
- if (hal->antenna_rx == RF_B) {
- offset_ofdm = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band];
- offset_cck = -efuse_gain->offset[RF_PATH_B][0];
- } else {
- offset_ofdm = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band];
- offset_cck = -efuse_gain->offset[RF_PATH_A][0];
- }
-
- tmp = (offset_ofdm << 4) + efuse_gain->offset_base[RTW89_PHY_0];
- tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
- rtw89_phy_write32_idx(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx);
-
- tmp = (offset_ofdm << 4) + efuse_gain->rssi_base[RTW89_PHY_0];
- tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
- rtw89_phy_write32_idx(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx);
-
- if (subband == RTW89_CH_2G) {
- tmp = (offset_cck << 3) + (efuse_gain->offset_base[RTW89_PHY_0] >> 1);
- tmp = clamp_t(s32, tmp, S8_MIN >> 1, S8_MAX >> 1);
- rtw89_phy_write32_mask(rtwdev, R_RX_RPL_OFST,
- B_RX_RPL_OFST_CCK_MASK, tmp);
- }
-}
-
-static
-void rtw8852b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
-{
- const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
- u8 band = rtw89_subband_to_bb_gain_band(subband);
- u32 val;
-
- val = FIELD_PREP(B_P0_RPL1_20_MASK, (gain->rpl_ofst_20[band][RF_PATH_A] +
- gain->rpl_ofst_20[band][RF_PATH_B]) / 2) |
- FIELD_PREP(B_P0_RPL1_40_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][0] +
- gain->rpl_ofst_40[band][RF_PATH_B][0]) / 2) |
- FIELD_PREP(B_P0_RPL1_41_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][1] +
- gain->rpl_ofst_40[band][RF_PATH_B][1]) / 2);
- val >>= B_P0_RPL1_SHIFT;
- rtw89_phy_write32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_MASK, val);
- rtw89_phy_write32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_MASK, val);
-
- val = FIELD_PREP(B_P0_RTL2_42_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][2] +
- gain->rpl_ofst_40[band][RF_PATH_B][2]) / 2) |
- FIELD_PREP(B_P0_RTL2_80_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][0] +
- gain->rpl_ofst_80[band][RF_PATH_B][0]) / 2) |
- FIELD_PREP(B_P0_RTL2_81_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][1] +
- gain->rpl_ofst_80[band][RF_PATH_B][1]) / 2) |
- FIELD_PREP(B_P0_RTL2_8A_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][10] +
- gain->rpl_ofst_80[band][RF_PATH_B][10]) / 2);
- rtw89_phy_write32(rtwdev, R_P0_RPL2, val);
- rtw89_phy_write32(rtwdev, R_P1_RPL2, val);
-
- val = FIELD_PREP(B_P0_RTL3_82_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][2] +
- gain->rpl_ofst_80[band][RF_PATH_B][2]) / 2) |
- FIELD_PREP(B_P0_RTL3_83_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][3] +
- gain->rpl_ofst_80[band][RF_PATH_B][3]) / 2) |
- FIELD_PREP(B_P0_RTL3_84_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][4] +
- gain->rpl_ofst_80[band][RF_PATH_B][4]) / 2) |
- FIELD_PREP(B_P0_RTL3_89_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][9] +
- gain->rpl_ofst_80[band][RF_PATH_B][9]) / 2);
- rtw89_phy_write32(rtwdev, R_P0_RPL3, val);
- rtw89_phy_write32(rtwdev, R_P1_RPL3, val);
-}
-
-static void rtw8852b_ctrl_ch(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- u8 central_ch = chan->channel;
- u8 subband = chan->subband_type;
- u8 sco_comp;
- bool is_2g = central_ch <= 14;
-
- /* Path A */
- if (is_2g)
- rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
- B_PATH0_BAND_SEL_MSK_V1, 1, phy_idx);
- else
- rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
- B_PATH0_BAND_SEL_MSK_V1, 0, phy_idx);
-
- /* Path B */
- if (is_2g)
- rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
- B_PATH1_BAND_SEL_MSK_V1, 1, phy_idx);
- else
- rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
- B_PATH1_BAND_SEL_MSK_V1, 0, phy_idx);
-
- /* SCO compensate FC setting */
- sco_comp = rtw8852b_sco_mapping(central_ch);
- rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_INV, sco_comp, phy_idx);
-
- if (chan->band_type == RTW89_BAND_6G)
- return;
-
- /* CCK parameters */
- if (central_ch == 14) {
- rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3b13ff);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x1c42de);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfdb0ad);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xf60f6e);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xfd8f92);
- rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0x2d011);
- rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0x1c02c);
- rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xfff00a);
- } else {
- rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3d23ff);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x29b354);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfc1c8);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xfdb053);
- rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xf86f9a);
- rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0xfaef92);
- rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0xfe5fcc);
- rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xffdff5);
- }
-
- rtw8852b_set_gain_error(rtwdev, subband, RF_PATH_A);
- rtw8852b_set_gain_error(rtwdev, subband, RF_PATH_B);
- rtw8852b_set_gain_offset(rtwdev, subband, phy_idx);
- rtw8852b_set_rxsc_rpl_comp(rtwdev, subband);
-}
-
-static void rtw8852b_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path)
-{
- static const u32 adc_sel[2] = {0xC0EC, 0xC1EC};
- static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4};
-
- switch (bw) {
- case RTW89_CHANNEL_WIDTH_5:
- rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1);
- rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0);
- break;
- case RTW89_CHANNEL_WIDTH_10:
- rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2);
- rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1);
- break;
- case RTW89_CHANNEL_WIDTH_20:
- rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
- rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
- break;
- case RTW89_CHANNEL_WIDTH_40:
- rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
- rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
- break;
- case RTW89_CHANNEL_WIDTH_80:
- rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
- rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
- break;
- default:
- rtw89_warn(rtwdev, "Fail to set ADC\n");
- }
-}
-
-static void rtw8852b_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw,
- enum rtw89_phy_idx phy_idx)
-{
- u32 rx_path_0;
-
- switch (bw) {
- case RTW89_CHANNEL_WIDTH_5:
- rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x1, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
-
- /*Set RF mode at 3 */
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
- B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
- B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- break;
- case RTW89_CHANNEL_WIDTH_10:
- rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x2, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
-
- /*Set RF mode at 3 */
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
- B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
- B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- break;
- case RTW89_CHANNEL_WIDTH_20:
- rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
-
- /*Set RF mode at 3 */
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
- B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
- B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- break;
- case RTW89_CHANNEL_WIDTH_40:
- rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x1, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH,
- pri_ch, phy_idx);
-
- /*Set RF mode at 3 */
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
- B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
- B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
- /*CCK primary channel */
- if (pri_ch == RTW89_SC_20_UPPER)
- rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 1);
- else
- rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 0);
-
- break;
- case RTW89_CHANNEL_WIDTH_80:
- rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x2, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH,
- pri_ch, phy_idx);
-
- /*Set RF mode at A */
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
- B_P0_RFMODE_ORI_RX_ALL, 0xaaa, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
- B_P1_RFMODE_ORI_RX_ALL, 0xaaa, phy_idx);
- break;
- default:
- rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw,
- pri_ch);
- }
-
- rtw8852b_bw_setting(rtwdev, bw, RF_PATH_A);
- rtw8852b_bw_setting(rtwdev, bw, RF_PATH_B);
-
- rx_path_0 = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0,
- phy_idx);
- if (rx_path_0 == 0x1)
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
- B_P1_RFMODE_ORI_RX_ALL, 0x111, phy_idx);
- else if (rx_path_0 == 0x2)
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
- B_P0_RFMODE_ORI_RX_ALL, 0x111, phy_idx);
-}
-
-static void rtw8852b_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en)
-{
- if (cck_en) {
- rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0);
- } else {
- rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1);
- }
-}
-
-static void rtw8852b_5m_mask(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- u8 pri_ch = chan->pri_ch_idx;
- bool mask_5m_low;
- bool mask_5m_en;
-
- switch (chan->band_width) {
- case RTW89_CHANNEL_WIDTH_40:
- /* Prich=1: Mask 5M High, Prich=2: Mask 5M Low */
- mask_5m_en = true;
- mask_5m_low = pri_ch == RTW89_SC_20_LOWER;
- break;
- case RTW89_CHANNEL_WIDTH_80:
- /* Prich=3: Mask 5M High, Prich=4: Mask 5M Low, Else: Disable */
- mask_5m_en = pri_ch == RTW89_SC_20_UPMOST ||
- pri_ch == RTW89_SC_20_LOWEST;
- mask_5m_low = pri_ch == RTW89_SC_20_LOWEST;
- break;
- default:
- mask_5m_en = false;
- break;
- }
-
- if (!mask_5m_en) {
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x0);
- rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1,
- B_ASSIGN_SBD_OPT_EN_V1, 0x0, phy_idx);
- return;
- }
-
- if (mask_5m_low) {
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x1);
- } else {
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x0);
- }
- rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1,
- B_ASSIGN_SBD_OPT_EN_V1, 0x1, phy_idx);
-}
-
-static void rtw8852b_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
-{
- rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
- fsleep(1);
- rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
- rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
-}
-
static void rtw8852b_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band,
enum rtw89_phy_idx phy_idx, bool en)
{
@@ -1409,87 +466,20 @@ static void rtw8852b_bb_reset(struct rtw89_dev *rtwdev,
rtw89_phy_write32_set(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
rtw89_phy_write32_set(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON);
rtw89_phy_write32_set(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
- rtw8852b_bb_reset_all(rtwdev, phy_idx);
+ rtw8852bx_bb_reset_all(rtwdev, phy_idx);
rtw89_phy_write32_clr(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON);
rtw89_phy_write32_clr(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
rtw89_phy_write32_clr(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON);
rtw89_phy_write32_clr(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
}
-static void rtw8852b_bb_macid_ctrl_init(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
-{
- u32 addr;
-
- for (addr = R_AX_PWR_MACID_LMT_TABLE0;
- addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4)
- rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0);
-}
-
-static void rtw8852b_bb_sethw(struct rtw89_dev *rtwdev)
-{
- struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
-
- rtw89_phy_write32_clr(rtwdev, R_P0_EN_SOUND_WO_NDP, B_P0_EN_SOUND_WO_NDP);
- rtw89_phy_write32_clr(rtwdev, R_P1_EN_SOUND_WO_NDP, B_P1_EN_SOUND_WO_NDP);
-
- rtw8852b_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0);
-
- /* read these registers after loading BB parameters */
- gain->offset_base[RTW89_PHY_0] =
- rtw89_phy_read32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK);
- gain->rssi_base[RTW89_PHY_0] =
- rtw89_phy_read32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK);
-}
-
-static void rtw8852b_bb_set_pop(struct rtw89_dev *rtwdev)
-{
- if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR)
- rtw89_phy_write32_clr(rtwdev, R_PKT_CTRL, B_PKT_POP_EN);
-}
-
-static void rtw8852b_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- bool cck_en = chan->channel <= 14;
- u8 pri_ch_idx = chan->pri_ch_idx;
- u8 band = chan->band_type, chan_idx;
-
- if (cck_en)
- rtw8852b_ctrl_sco_cck(rtwdev, chan->primary_channel);
-
- rtw8852b_ctrl_ch(rtwdev, chan, phy_idx);
- rtw8852b_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx);
- rtw8852b_ctrl_cck_en(rtwdev, cck_en);
- if (chan->band_type == RTW89_BAND_5G) {
- rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
- B_PATH0_BT_SHARE_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
- B_PATH0_BTG_PATH_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
- B_PATH1_BT_SHARE_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
- B_PATH1_BTG_PATH_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
- B_BT_DYN_DC_EST_EN_MSK, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0);
- }
- chan_idx = rtw89_encode_chan_idx(rtwdev, chan->primary_channel, band);
- rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx);
- rtw8852b_5m_mask(rtwdev, chan, phy_idx);
- rtw8852b_bb_set_pop(rtwdev);
- rtw8852b_bb_reset_all(rtwdev, phy_idx);
-}
-
static void rtw8852b_set_channel(struct rtw89_dev *rtwdev,
const struct rtw89_chan *chan,
enum rtw89_mac_idx mac_idx,
enum rtw89_phy_idx phy_idx)
{
- rtw8852b_set_channel_mac(rtwdev, chan, mac_idx);
- rtw8852b_set_channel_bb(rtwdev, chan, phy_idx);
+ rtw8852bx_set_channel_mac(rtwdev, chan, mac_idx);
+ rtw8852bx_set_channel_bb(rtwdev, chan, phy_idx);
rtw8852b_set_channel_rf(rtwdev, chan, phy_idx);
}
@@ -1589,540 +579,6 @@ static void rtw8852b_rfk_track(struct rtw89_dev *rtwdev)
rtw8852b_dpk_track(rtwdev);
}
-static u32 rtw8852b_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx, s16 ref)
-{
- const u16 tssi_16dbm_cw = 0x12c;
- const u8 base_cw_0db = 0x27;
- const s8 ofst_int = 0;
- s16 pwr_s10_3;
- s16 rf_pwr_cw;
- u16 bb_pwr_cw;
- u32 pwr_cw;
- u32 tssi_ofst_cw;
-
- pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
- bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3);
- rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3);
- rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
- pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
-
- tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
- "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
- tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
-
- return FIELD_PREP(B_DPD_TSSI_CW, tssi_ofst_cw) |
- FIELD_PREP(B_DPD_PWR_CW, pwr_cw) |
- FIELD_PREP(B_DPD_REF, ref);
-}
-
-static void rtw8852b_set_txpwr_ref(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
-{
- static const u32 addr[RF_PATH_NUM_8852B] = {0x5800, 0x7800};
- const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF;
- const u8 ofst_ofdm = 0x4;
- const u8 ofst_cck = 0x8;
- const s16 ref_ofdm = 0;
- const s16 ref_cck = 0;
- u32 val;
- u8 i;
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n");
-
- rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
- B_AX_PWR_REF, 0x0);
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
- val = rtw8852b_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
- phy_idx);
-
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
- val = rtw8852b_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
-
- for (i = 0; i < RF_PATH_NUM_8852B; i++)
- rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
- phy_idx);
-}
-
-static void rtw8852b_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- u8 tx_shape_idx,
- enum rtw89_phy_idx phy_idx)
-{
-#define __DFIR_CFG_ADDR(i) (R_TXFIR0 + ((i) << 2))
-#define __DFIR_CFG_MASK 0xffffffff
-#define __DFIR_CFG_NR 8
-#define __DECL_DFIR_PARAM(_name, _val...) \
- static const u32 param_ ## _name[] = {_val}; \
- static_assert(ARRAY_SIZE(param_ ## _name) == __DFIR_CFG_NR)
-
- __DECL_DFIR_PARAM(flat,
- 0x023D23FF, 0x0029B354, 0x000FC1C8, 0x00FDB053,
- 0x00F86F9A, 0x06FAEF92, 0x00FE5FCC, 0x00FFDFF5);
- __DECL_DFIR_PARAM(sharp,
- 0x023D83FF, 0x002C636A, 0x0013F204, 0x00008090,
- 0x00F87FB0, 0x06F99F83, 0x00FDBFBA, 0x00003FF5);
- __DECL_DFIR_PARAM(sharp_14,
- 0x023B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E,
- 0x00FD8F92, 0x0602D011, 0x0001C02C, 0x00FFF00A);
- u8 ch = chan->channel;
- const u32 *param;
- u32 addr;
- int i;
-
- if (ch > 14) {
- rtw89_warn(rtwdev,
- "set tx shape dfir by unknown ch: %d on 2G\n", ch);
- return;
- }
-
- if (ch == 14)
- param = param_sharp_14;
- else
- param = tx_shape_idx == 0 ? param_flat : param_sharp;
-
- for (i = 0; i < __DFIR_CFG_NR; i++) {
- addr = __DFIR_CFG_ADDR(i);
- rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
- "set tx shape dfir: 0x%x: 0x%x\n", addr, param[i]);
- rtw89_phy_write32_idx(rtwdev, addr, __DFIR_CFG_MASK, param[i],
- phy_idx);
- }
-
-#undef __DECL_DFIR_PARAM
-#undef __DFIR_CFG_NR
-#undef __DFIR_CFG_MASK
-#undef __DECL_CFG_ADDR
-}
-
-static void rtw8852b_set_tx_shape(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
- u8 band = chan->band_type;
- u8 regd = rtw89_regd_get(rtwdev, band);
- u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd];
- u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd];
-
- if (band == RTW89_BAND_2G)
- rtw8852b_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx);
-
- rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG,
- tx_shape_ofdm);
-}
-
-static void rtw8852b_set_txpwr(struct rtw89_dev *rtwdev,
- const struct rtw89_chan *chan,
- enum rtw89_phy_idx phy_idx)
-{
- rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx);
- rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx);
- rtw8852b_set_tx_shape(rtwdev, chan, phy_idx);
- rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
- rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
-}
-
-static void rtw8852b_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx phy_idx)
-{
- rtw8852b_set_txpwr_ref(rtwdev, phy_idx);
-}
-
-static
-void rtw8852b_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
- s8 pw_ofst, enum rtw89_mac_idx mac_idx)
-{
- u32 reg;
-
- if (pw_ofst < -16 || pw_ofst > 15) {
- rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst);
- return;
- }
-
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx);
- rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN);
-
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst);
-
- pw_ofst = max_t(s8, pw_ofst - 3, -16);
- reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx);
- rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst);
-}
-
-static int
-rtw8852b_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
-{
- int ret;
-
- ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333);
- if (ret)
- return ret;
-
- ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf000);
- if (ret)
- return ret;
-
- ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff);
- if (ret)
- return ret;
-
- rtw8852b_set_txpwr_ul_tb_offset(rtwdev, 0, phy_idx == RTW89_PHY_1 ?
- RTW89_MAC_1 : RTW89_MAC_0);
-
- return 0;
-}
-
-void rtw8852b_bb_set_plcp_tx(struct rtw89_dev *rtwdev)
-{
- const struct rtw89_reg3_def *def = rtw8852b_pmac_ht20_mcs7_tbl;
- u8 i;
-
- for (i = 0; i < ARRAY_SIZE(rtw8852b_pmac_ht20_mcs7_tbl); i++, def++)
- rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
-}
-
-static void rtw8852b_stop_pmac_tx(struct rtw89_dev *rtwdev,
- struct rtw8852b_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
-{
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Stop Tx");
- if (tx_info->mode == CONT_TX)
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 0, idx);
- else if (tx_info->mode == PKTS_TX)
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 0, idx);
-}
-
-static void rtw8852b_start_pmac_tx(struct rtw89_dev *rtwdev,
- struct rtw8852b_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
-{
- enum rtw8852b_pmac_mode mode = tx_info->mode;
- u32 pkt_cnt = tx_info->tx_cnt;
- u16 period = tx_info->period;
-
- if (mode == CONT_TX && !tx_info->is_cck) {
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 1, idx);
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CTx Start");
- } else if (mode == PKTS_TX) {
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 1, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD,
- B_PMAC_TX_PRD_MSK, period, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CNT, B_PMAC_TX_CNT_MSK,
- pkt_cnt, idx);
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC PTx Start");
- }
-
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 1, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 0, idx);
-}
-
-void rtw8852b_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
- struct rtw8852b_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx)
-{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
-
- if (!tx_info->en_pmac_tx) {
- rtw8852b_stop_pmac_tx(rtwdev, tx_info, idx);
- rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
- if (chan->band_type == RTW89_BAND_2G)
- rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS);
- return;
- }
-
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Tx Enable");
-
- rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 1, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 1, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0x3f, idx);
- rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 1, idx);
- rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
- rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, idx);
-
- rtw8852b_start_pmac_tx(rtwdev, tx_info, idx);
-}
-
-void rtw8852b_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
- u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx)
-{
- struct rtw8852b_bb_pmac_info tx_info = {0};
-
- tx_info.en_pmac_tx = enable;
- tx_info.is_cck = 0;
- tx_info.mode = PKTS_TX;
- tx_info.tx_cnt = tx_cnt;
- tx_info.period = period;
- tx_info.tx_time = tx_time;
-
- rtw8852b_bb_set_pmac_tx(rtwdev, &tx_info, idx);
-}
-
-void rtw8852b_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
- enum rtw89_phy_idx idx)
-{
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx PWR = %d", pwr_dbm);
-
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx);
- rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, pwr_dbm, idx);
-}
-
-void rtw8852b_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path)
-{
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_0);
-
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx Path = %d", tx_path);
-
- if (tx_path == RF_PATH_A) {
- rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 1);
- rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0);
- } else if (tx_path == RF_PATH_B) {
- rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 2);
- rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0);
- } else if (tx_path == RF_PATH_AB) {
- rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 3);
- rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 4);
- } else {
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Error Tx Path");
- }
-}
-
-void rtw8852b_bb_tx_mode_switch(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx idx, u8 mode)
-{
- if (mode != 0)
- return;
-
- rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Tx mode switch");
-
- rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_PMAC_RXMOD, B_PMAC_RXMOD_MSK, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_DPD_EN, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, idx);
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx);
-}
-
-void rtw8852b_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
- struct rtw8852b_bb_tssi_bak *bak)
-{
- s32 tmp;
-
- bak->tx_path = rtw89_phy_read32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, idx);
- bak->rx_path = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, idx);
- bak->p0_rfmode = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, idx);
- bak->p0_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, idx);
- bak->p1_rfmode = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, idx);
- bak->p1_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, idx);
- tmp = rtw89_phy_read32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, idx);
- bak->tx_pwr = sign_extend32(tmp, 8);
-}
-
-void rtw8852b_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
- const struct rtw8852b_bb_tssi_bak *bak)
-{
- rtw89_phy_write32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, bak->tx_path, idx);
- if (bak->tx_path == RF_AB)
- rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x4);
- else
- rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x0);
- rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, bak->rx_path, idx);
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx);
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, bak->p0_rfmode, idx);
- rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, bak->p0_rfmode_ftm, idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, bak->p1_rfmode, idx);
- rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, bak->p1_rfmode_ftm, idx);
- rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, bak->tx_pwr, idx);
-}
-
-static void rtw8852b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
- enum rtw89_phy_idx phy_idx)
-{
- rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8852b_btc_preagc_en_defs_tbl :
- &rtw8852b_btc_preagc_dis_defs_tbl);
-}
-
-static void rtw8852b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
- enum rtw89_phy_idx phy_idx)
-{
- if (en) {
- rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
- B_PATH0_BT_SHARE_V1, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
- B_PATH0_BTG_PATH_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
- B_PATH1_G_LNA6_OP1DB_V1, 0x20);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
- B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x30);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
- B_PATH1_BT_SHARE_V1, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
- B_PATH1_BTG_PATH_V1, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x2);
- rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
- B_BT_DYN_DC_EST_EN_MSK, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x1);
- } else {
- rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
- B_PATH0_BT_SHARE_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
- B_PATH0_BTG_PATH_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
- B_PATH1_G_LNA6_OP1DB_V1, 0x1a);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
- B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
- B_PATH1_BT_SHARE_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
- B_PATH1_BTG_PATH_V1, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc);
- rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0);
- rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
- B_BT_DYN_DC_EST_EN_MSK, 0x1);
- rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0);
- }
-}
-
-void rtw8852b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path)
-{
- const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
- u32 rst_mask0;
- u32 rst_mask1;
-
- if (rx_path == RF_A) {
- rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 1);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 1);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
- } else if (rx_path == RF_B) {
- rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 2);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 2);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 2);
- rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
- } else if (rx_path == RF_AB) {
- rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 3);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 3);
- rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 3);
- rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
- }
-
- rtw8852b_set_gain_offset(rtwdev, chan->subband_type, RTW89_PHY_0);
-
- if (chan->band_type == RTW89_BAND_2G &&
- (rx_path == RF_B || rx_path == RF_AB))
- rtw8852b_ctrl_btg_bt_rx(rtwdev, true, RTW89_PHY_0);
- else
- rtw8852b_ctrl_btg_bt_rx(rtwdev, false, RTW89_PHY_0);
-
- rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI;
- rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI;
- if (rx_path == RF_A) {
- rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1);
- rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3);
- } else {
- rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1);
- rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3);
- }
-}
-
-static void rtw8852b_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path)
-{
- if (rx_path == RF_A) {
- rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
- B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
- rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
- B_P0_RFMODE_FTM_RX, 0x333);
- rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
- B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1111111);
- rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
- B_P1_RFMODE_FTM_RX, 0x111);
- } else if (rx_path == RF_B) {
- rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
- B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1111111);
- rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
- B_P0_RFMODE_FTM_RX, 0x111);
- rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
- B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
- rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
- B_P1_RFMODE_FTM_RX, 0x333);
- } else if (rx_path == RF_AB) {
- rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
- B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
- rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
- B_P0_RFMODE_FTM_RX, 0x333);
- rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
- B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
- rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
- B_P1_RFMODE_FTM_RX, 0x333);
- }
-}
-
-static void rtw8852b_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
-{
- struct rtw89_hal *hal = &rtwdev->hal;
- enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB;
-
- rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path);
- rtw8852b_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path);
-
- if (rtwdev->hal.rx_nss == 1) {
- rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
- } else {
- rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
- rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
- }
-
- rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_0);
-}
-
-static u8 rtw8852b_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
-{
- if (rtwdev->is_tssi_mode[rf_path]) {
- u32 addr = 0x1c10 + (rf_path << 13);
-
- return rtw89_phy_read32_mask(rtwdev, addr, 0x3F000000);
- }
-
- rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
- rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0);
- rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
-
- fsleep(200);
-
- return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL);
-}
-
static void rtw8852b_btc_set_rfe(struct rtw89_dev *rtwdev)
{
const struct rtw89_btc_ver *ver = rtwdev->btc.ver;
@@ -2177,86 +633,6 @@ static void rtw8852b_btc_set_rfe(struct rtw89_dev *rtwdev)
}
}
-static
-void rtw8852b_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
-{
- rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x20000);
- rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
- rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val);
- rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
-}
-
-static void rtw8852b_btc_init_cfg(struct rtw89_dev *rtwdev)
-{
- struct rtw89_btc *btc = &rtwdev->btc;
- const struct rtw89_chip_info *chip = rtwdev->chip;
- const struct rtw89_mac_ax_coex coex_params = {
- .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
- .direction = RTW89_MAC_AX_COEX_INNER,
- };
-
- /* PTA init */
- rtw89_mac_coex_init(rtwdev, &coex_params);
-
- /* set WL Tx response = Hi-Pri */
- chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true);
- chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true);
-
- /* set rf gnt debug off */
- rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, RFREG_MASK, 0x0);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
-
- /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
- if (btc->ant_type == BTC_ANT_SHARED) {
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
- /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x55f);
- } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5df);
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5df);
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
- rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x5ff);
- }
-
- /* set PTA break table */
- rtw89_write32(rtwdev, R_BTC_BREAK_TABLE, BTC_BREAK_PARAM);
-
- /* enable BT counter 0xda40[16,2] = 2b'11 */
- rtw89_write32_set(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST | B_AX_STATIS_BT_EN);
- btc->cx.wl.status.map.init_ok = true;
-}
-
-static
-void rtw8852b_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
-{
- u32 bitmap;
- u32 reg;
-
- switch (map) {
- case BTC_PRI_MASK_TX_RESP:
- reg = R_BTC_BT_COEX_MSK_TABLE;
- bitmap = B_BTC_PRI_MASK_TX_RESP_V1;
- break;
- case BTC_PRI_MASK_BEACON:
- reg = R_AX_WL_PRI_MSK;
- bitmap = B_AX_PTA_WL_PRI_MASK_BCNQ;
- break;
- case BTC_PRI_MASK_RX_CCK:
- reg = R_BTC_BT_COEX_MSK_TABLE;
- bitmap = B_BTC_PRI_MASK_RXCCK_V1;
- break;
- default:
- return;
- }
-
- if (state)
- rtw89_write32_set(rtwdev, reg, bitmap);
- else
- rtw89_write32_clr(rtwdev, reg, bitmap);
-}
-
union rtw8852b_btc_wl_txpwr_ctrl {
u32 txpwr_val;
struct {
@@ -2324,185 +700,19 @@ do { \
#undef __write_ctrl
}
-static
-s8 rtw8852b_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
-{
- /* +6 for compensate offset */
- return clamp_t(s8, val + 6, -100, 0) + 100;
-}
-
-static
-void rtw8852b_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
-{
- /* Feature move to firmware */
-}
-
-static void rtw8852b_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
-{
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x31);
-
- /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */
- if (state)
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x179);
- else
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x20);
-
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
-}
-
-static void rtw8852b_btc_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level)
-{
- switch (level) {
- case 0: /* default */
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
- break;
- case 1: /* Fix LNA2=5 */
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
- rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
- break;
- }
-}
-
-static void rtw8852b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
-{
- struct rtw89_btc *btc = &rtwdev->btc;
-
- switch (level) {
- case 0: /* original */
- default:
- rtw8852b_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0);
- btc->dm.wl_lna2 = 0;
- break;
- case 1: /* for FDD free-run */
- rtw8852b_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0);
- btc->dm.wl_lna2 = 0;
- break;
- case 2: /* for BTG Co-Rx*/
- rtw8852b_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0);
- btc->dm.wl_lna2 = 1;
- break;
- }
-
- rtw8852b_btc_set_wl_lna2(rtwdev, btc->dm.wl_lna2);
-}
-
-static void rtw8852b_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
- struct rtw89_rx_phy_ppdu *phy_ppdu,
- struct ieee80211_rx_status *status)
-{
- u16 chan = phy_ppdu->chan_idx;
- enum nl80211_band band;
- u8 ch;
-
- if (chan == 0)
- return;
-
- rtw89_decode_chan_idx(rtwdev, chan, &ch, &band);
- status->freq = ieee80211_channel_to_frequency(ch, band);
- status->band = band;
-}
-
-static void rtw8852b_query_ppdu(struct rtw89_dev *rtwdev,
- struct rtw89_rx_phy_ppdu *phy_ppdu,
- struct ieee80211_rx_status *status)
-{
- u8 path;
- u8 *rx_power = phy_ppdu->rssi;
-
- status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
- for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
- status->chains |= BIT(path);
- status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
- }
- if (phy_ppdu->valid)
- rtw8852b_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
-}
-
-static int rtw8852b_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
-{
- int ret;
-
- rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
- B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
- rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x1);
- rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
- rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
- rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
-
- ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xC7,
- FULL_BIT_MASK);
- if (ret)
- return ret;
-
- ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xC7,
- FULL_BIT_MASK);
- if (ret)
- return ret;
-
- rtw89_write8(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_XYN_CYCLE);
-
- return 0;
-}
-
-static int rtw8852b_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
-{
- u8 wl_rfc_s0;
- u8 wl_rfc_s1;
- int ret;
-
- rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
- B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
-
- ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, &wl_rfc_s0);
- if (ret)
- return ret;
- wl_rfc_s0 &= ~XTAL_SI_RF00S_EN;
- ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, wl_rfc_s0,
- FULL_BIT_MASK);
- if (ret)
- return ret;
-
- ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, &wl_rfc_s1);
- if (ret)
- return ret;
- wl_rfc_s1 &= ~XTAL_SI_RF10S_EN;
- ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, wl_rfc_s1,
- FULL_BIT_MASK);
- return ret;
-}
-
static const struct rtw89_chip_ops rtw8852b_chip_ops = {
- .enable_bb_rf = rtw8852b_mac_enable_bb_rf,
- .disable_bb_rf = rtw8852b_mac_disable_bb_rf,
+ .enable_bb_rf = rtw8852bx_mac_enable_bb_rf,
+ .disable_bb_rf = rtw8852bx_mac_disable_bb_rf,
.bb_preinit = NULL,
.bb_postinit = NULL,
.bb_reset = rtw8852b_bb_reset,
- .bb_sethw = rtw8852b_bb_sethw,
+ .bb_sethw = rtw8852bx_bb_sethw,
.read_rf = rtw89_phy_read_rf_v1,
.write_rf = rtw89_phy_write_rf_v1,
.set_channel = rtw8852b_set_channel,
.set_channel_help = rtw8852b_set_channel_help,
- .read_efuse = rtw8852b_read_efuse,
- .read_phycap = rtw8852b_read_phycap,
+ .read_efuse = rtw8852bx_read_efuse,
+ .read_phycap = rtw8852bx_read_phycap,
.fem_setup = NULL,
.rfe_gpio = NULL,
.rfk_hw_init = NULL,
@@ -2512,16 +722,16 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.rfk_band_changed = rtw8852b_rfk_band_changed,
.rfk_scan = rtw8852b_rfk_scan,
.rfk_track = rtw8852b_rfk_track,
- .power_trim = rtw8852b_power_trim,
- .set_txpwr = rtw8852b_set_txpwr,
- .set_txpwr_ctrl = rtw8852b_set_txpwr_ctrl,
- .init_txpwr_unit = rtw8852b_init_txpwr_unit,
- .get_thermal = rtw8852b_get_thermal,
- .ctrl_btg_bt_rx = rtw8852b_ctrl_btg_bt_rx,
- .query_ppdu = rtw8852b_query_ppdu,
- .ctrl_nbtg_bt_tx = rtw8852b_ctrl_nbtg_bt_tx,
- .cfg_txrx_path = rtw8852b_bb_cfg_txrx_path,
- .set_txpwr_ul_tb_offset = rtw8852b_set_txpwr_ul_tb_offset,
+ .power_trim = rtw8852bx_power_trim,
+ .set_txpwr = rtw8852bx_set_txpwr,
+ .set_txpwr_ctrl = rtw8852bx_set_txpwr_ctrl,
+ .init_txpwr_unit = rtw8852bx_init_txpwr_unit,
+ .get_thermal = rtw8852bx_get_thermal,
+ .ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx,
+ .query_ppdu = rtw8852bx_query_ppdu,
+ .ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx,
+ .cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path,
+ .set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset,
.pwr_on_func = rtw8852b_pwr_on_func,
.pwr_off_func = rtw8852b_pwr_off_func,
.query_rxdesc = rtw89_core_query_rxdesc,
@@ -2540,13 +750,13 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = {
.h2c_ba_cam = rtw89_fw_h2c_ba_cam,
.btc_set_rfe = rtw8852b_btc_set_rfe,
- .btc_init_cfg = rtw8852b_btc_init_cfg,
- .btc_set_wl_pri = rtw8852b_btc_set_wl_pri,
+ .btc_init_cfg = rtw8852bx_btc_init_cfg,
+ .btc_set_wl_pri = rtw8852bx_btc_set_wl_pri,
.btc_set_wl_txpwr_ctrl = rtw8852b_btc_set_wl_txpwr_ctrl,
- .btc_get_bt_rssi = rtw8852b_btc_get_bt_rssi,
- .btc_update_bt_cnt = rtw8852b_btc_update_bt_cnt,
- .btc_wl_s1_standby = rtw8852b_btc_wl_s1_standby,
- .btc_set_wl_rx_gain = rtw8852b_btc_set_wl_rx_gain,
+ .btc_get_bt_rssi = rtw8852bx_btc_get_bt_rssi,
+ .btc_update_bt_cnt = rtw8852bx_btc_update_bt_cnt,
+ .btc_wl_s1_standby = rtw8852bx_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8852bx_btc_set_wl_rx_gain,
.btc_set_policy = rtw89_btc_set_policy_v1,
};
@@ -2573,7 +783,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.fifo_size = 196608,
.small_fifo_size = true,
.dle_scc_rsvd_size = 98304,
- .max_amsdu_limit = 3500,
+ .max_amsdu_limit = 5000,
.dis_2g_40m_ul_ofdma = true,
.rsvd_ple_ofst = 0x2f800,
.hfc_param_ini = rtw8852b_hfc_param_ini_pcie,
@@ -2596,7 +806,9 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.dig_table = NULL,
.dig_regs = &rtw8852b_dig_regs,
.tssi_dbw_table = NULL,
+ .support_macid_num = RTW89_MAX_MAC_ID_NUM,
.support_chanctx_num = 0,
+ .support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ),
.support_bandwidths = BIT(NL80211_CHAN_WIDTH_20) |
@@ -2657,7 +869,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8852b_c2h_regs,
.page_regs = &rtw8852b_page_regs,
- .wow_reason_reg = R_AX_C2HREG_DATA3 + 3,
+ .wow_reason_reg = rtw8852b_wow_wakeup_regs,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = &rtw8852b_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.h b/drivers/net/wireless/realtek/rtw89/rtw8852b.h
index 4f9b3d476879..5ec7180fd355 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b.h
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.h
@@ -10,128 +10,6 @@
#define RF_PATH_NUM_8852B 2
#define BB_PATH_NUM_8852B 2
-enum rtw8852b_pmac_mode {
- NONE_TEST,
- PKTS_TX,
- PKTS_RX,
- CONT_TX
-};
-
-struct rtw8852b_u_efuse {
- u8 rsvd[0x88];
- u8 mac_addr[ETH_ALEN];
-};
-
-struct rtw8852b_e_efuse {
- u8 mac_addr[ETH_ALEN];
-};
-
-struct rtw8852b_tssi_offset {
- u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM];
- u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM];
- u8 rsvd[7];
- u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM];
-} __packed;
-
-struct rtw8852b_efuse {
- u8 rsvd[0x210];
- struct rtw8852b_tssi_offset path_a_tssi;
- u8 rsvd1[10];
- struct rtw8852b_tssi_offset path_b_tssi;
- u8 rsvd2[94];
- u8 channel_plan;
- u8 xtal_k;
- u8 rsvd3;
- u8 iqk_lck;
- u8 rsvd4[5];
- u8 reg_setting:2;
- u8 tx_diversity:1;
- u8 rx_diversity:2;
- u8 ac_mode:1;
- u8 module_type:2;
- u8 rsvd5;
- u8 shared_ant:1;
- u8 coex_type:3;
- u8 ant_iso:1;
- u8 radio_on_off:1;
- u8 rsvd6:2;
- u8 eeprom_version;
- u8 customer_id;
- u8 tx_bb_swing_2g;
- u8 tx_bb_swing_5g;
- u8 tx_cali_pwr_trk_mode;
- u8 trx_path_selection;
- u8 rfe_type;
- u8 country_code[2];
- u8 rsvd7[3];
- u8 path_a_therm;
- u8 path_b_therm;
- u8 rsvd8[2];
- u8 rx_gain_2g_ofdm;
- u8 rsvd9;
- u8 rx_gain_2g_cck;
- u8 rsvd10;
- u8 rx_gain_5g_low;
- u8 rsvd11;
- u8 rx_gain_5g_mid;
- u8 rsvd12;
- u8 rx_gain_5g_high;
- u8 rsvd13[35];
- u8 path_a_cck_pwr_idx[6];
- u8 path_a_bw40_1tx_pwr_idx[5];
- u8 path_a_ofdm_1tx_pwr_idx_diff:4;
- u8 path_a_bw20_1tx_pwr_idx_diff:4;
- u8 path_a_bw20_2tx_pwr_idx_diff:4;
- u8 path_a_bw40_2tx_pwr_idx_diff:4;
- u8 path_a_cck_2tx_pwr_idx_diff:4;
- u8 path_a_ofdm_2tx_pwr_idx_diff:4;
- u8 rsvd14[0xf2];
- union {
- struct rtw8852b_u_efuse u;
- struct rtw8852b_e_efuse e;
- };
-} __packed;
-
-struct rtw8852b_bb_pmac_info {
- u8 en_pmac_tx:1;
- u8 is_cck:1;
- u8 mode:3;
- u8 rsvd:3;
- u16 tx_cnt;
- u16 period;
- u16 tx_time;
- u8 duty_cycle;
-};
-
-struct rtw8852b_bb_tssi_bak {
- u8 tx_path;
- u8 rx_path;
- u32 p0_rfmode;
- u32 p0_rfmode_ftm;
- u32 p1_rfmode;
- u32 p1_rfmode_ftm;
- s16 tx_pwr; /* S9 */
-};
-
extern const struct rtw89_chip_info rtw8852b_chip_info;
-void rtw8852b_bb_set_plcp_tx(struct rtw89_dev *rtwdev);
-void rtw8852b_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
- struct rtw8852b_bb_pmac_info *tx_info,
- enum rtw89_phy_idx idx);
-void rtw8852b_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
- u16 tx_cnt, u16 period, u16 tx_time,
- enum rtw89_phy_idx idx);
-void rtw8852b_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
- enum rtw89_phy_idx idx);
-void rtw8852b_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path);
-void rtw8852b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
- enum rtw89_rf_path_bit rx_path);
-void rtw8852b_bb_tx_mode_switch(struct rtw89_dev *rtwdev,
- enum rtw89_phy_idx idx, u8 mode);
-void rtw8852b_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
- struct rtw8852b_bb_tssi_bak *bak);
-void rtw8852b_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
- const struct rtw8852b_bb_tssi_bak *bak);
-
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
new file mode 100644
index 000000000000..1745c2882acf
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c
@@ -0,0 +1,2053 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "debug.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852b_common.h"
+#include "util.h"
+
+static const struct rtw89_reg3_def rtw8852bx_pmac_ht20_mcs7_tbl[] = {
+ {0x4580, 0x0000ffff, 0x0},
+ {0x4580, 0xffff0000, 0x0},
+ {0x4584, 0x0000ffff, 0x0},
+ {0x4584, 0xffff0000, 0x0},
+ {0x4580, 0x0000ffff, 0x1},
+ {0x4578, 0x00ffffff, 0x2018b},
+ {0x4570, 0x03ffffff, 0x7},
+ {0x4574, 0x03ffffff, 0x32407},
+ {0x45b8, 0x00000010, 0x0},
+ {0x45b8, 0x00000100, 0x0},
+ {0x45b8, 0x00000080, 0x0},
+ {0x45b8, 0x00000008, 0x0},
+ {0x45a0, 0x0000ff00, 0x0},
+ {0x45a0, 0xff000000, 0x1},
+ {0x45a4, 0x0000ff00, 0x2},
+ {0x45a4, 0xff000000, 0x3},
+ {0x45b8, 0x00000020, 0x0},
+ {0x4568, 0xe0000000, 0x0},
+ {0x45b8, 0x00000002, 0x1},
+ {0x456c, 0xe0000000, 0x0},
+ {0x45b4, 0x00006000, 0x0},
+ {0x45b4, 0x00001800, 0x1},
+ {0x45b8, 0x00000040, 0x0},
+ {0x45b8, 0x00000004, 0x0},
+ {0x45b8, 0x00000200, 0x0},
+ {0x4598, 0xf8000000, 0x0},
+ {0x45b8, 0x00100000, 0x0},
+ {0x45a8, 0x00000fc0, 0x0},
+ {0x45b8, 0x00200000, 0x0},
+ {0x45b0, 0x00000038, 0x0},
+ {0x45b0, 0x000001c0, 0x0},
+ {0x45a0, 0x000000ff, 0x0},
+ {0x45b8, 0x00400000, 0x0},
+ {0x4590, 0x000007ff, 0x0},
+ {0x45b0, 0x00000e00, 0x0},
+ {0x45ac, 0x0000001f, 0x0},
+ {0x45b8, 0x00800000, 0x0},
+ {0x45a8, 0x0003f000, 0x0},
+ {0x45b8, 0x01000000, 0x0},
+ {0x45b0, 0x00007000, 0x0},
+ {0x45b0, 0x00038000, 0x0},
+ {0x45a0, 0x00ff0000, 0x0},
+ {0x45b8, 0x02000000, 0x0},
+ {0x4590, 0x003ff800, 0x0},
+ {0x45b0, 0x001c0000, 0x0},
+ {0x45ac, 0x000003e0, 0x0},
+ {0x45b8, 0x04000000, 0x0},
+ {0x45a8, 0x00fc0000, 0x0},
+ {0x45b8, 0x08000000, 0x0},
+ {0x45b0, 0x00e00000, 0x0},
+ {0x45b0, 0x07000000, 0x0},
+ {0x45a4, 0x000000ff, 0x0},
+ {0x45b8, 0x10000000, 0x0},
+ {0x4594, 0x000007ff, 0x0},
+ {0x45b0, 0x38000000, 0x0},
+ {0x45ac, 0x00007c00, 0x0},
+ {0x45b8, 0x20000000, 0x0},
+ {0x45a8, 0x3f000000, 0x0},
+ {0x45b8, 0x40000000, 0x0},
+ {0x45b4, 0x00000007, 0x0},
+ {0x45b4, 0x00000038, 0x0},
+ {0x45a4, 0x00ff0000, 0x0},
+ {0x45b8, 0x80000000, 0x0},
+ {0x4594, 0x003ff800, 0x0},
+ {0x45b4, 0x000001c0, 0x0},
+ {0x4598, 0xf8000000, 0x0},
+ {0x45b8, 0x00100000, 0x0},
+ {0x45a8, 0x00000fc0, 0x7},
+ {0x45b8, 0x00200000, 0x0},
+ {0x45b0, 0x00000038, 0x0},
+ {0x45b0, 0x000001c0, 0x0},
+ {0x45a0, 0x000000ff, 0x0},
+ {0x45b4, 0x06000000, 0x0},
+ {0x45b0, 0x00000007, 0x0},
+ {0x45b8, 0x00080000, 0x0},
+ {0x45a8, 0x0000003f, 0x0},
+ {0x457c, 0xffe00000, 0x1},
+ {0x4530, 0xffffffff, 0x0},
+ {0x4588, 0x00003fff, 0x0},
+ {0x4598, 0x000001ff, 0x0},
+ {0x4534, 0xffffffff, 0x0},
+ {0x4538, 0xffffffff, 0x0},
+ {0x453c, 0xffffffff, 0x0},
+ {0x4588, 0x0fffc000, 0x0},
+ {0x4598, 0x0003fe00, 0x0},
+ {0x4540, 0xffffffff, 0x0},
+ {0x4544, 0xffffffff, 0x0},
+ {0x4548, 0xffffffff, 0x0},
+ {0x458c, 0x00003fff, 0x0},
+ {0x4598, 0x07fc0000, 0x0},
+ {0x454c, 0xffffffff, 0x0},
+ {0x4550, 0xffffffff, 0x0},
+ {0x4554, 0xffffffff, 0x0},
+ {0x458c, 0x0fffc000, 0x0},
+ {0x459c, 0x000001ff, 0x0},
+ {0x4558, 0xffffffff, 0x0},
+ {0x455c, 0xffffffff, 0x0},
+ {0x4530, 0xffffffff, 0x4e790001},
+ {0x4588, 0x00003fff, 0x0},
+ {0x4598, 0x000001ff, 0x1},
+ {0x4534, 0xffffffff, 0x0},
+ {0x4538, 0xffffffff, 0x4b},
+ {0x45ac, 0x38000000, 0x7},
+ {0x4588, 0xf0000000, 0x0},
+ {0x459c, 0x7e000000, 0x0},
+ {0x45b8, 0x00040000, 0x0},
+ {0x45b8, 0x00020000, 0x0},
+ {0x4590, 0xffc00000, 0x0},
+ {0x45b8, 0x00004000, 0x0},
+ {0x4578, 0xff000000, 0x0},
+ {0x45b8, 0x00000400, 0x0},
+ {0x45b8, 0x00000800, 0x0},
+ {0x45b8, 0x00001000, 0x0},
+ {0x45b8, 0x00002000, 0x0},
+ {0x45b4, 0x00018000, 0x0},
+ {0x45ac, 0x07800000, 0x0},
+ {0x45b4, 0x00000600, 0x2},
+ {0x459c, 0x0001fe00, 0x80},
+ {0x45ac, 0x00078000, 0x3},
+ {0x459c, 0x01fe0000, 0x1},
+};
+
+static const struct rtw89_reg3_def rtw8852bx_btc_preagc_en_defs[] = {
+ {0x46D0, GENMASK(1, 0), 0x3},
+ {0x4790, GENMASK(1, 0), 0x3},
+ {0x4AD4, GENMASK(31, 0), 0xf},
+ {0x4AE0, GENMASK(31, 0), 0xf},
+ {0x4688, GENMASK(31, 24), 0x80},
+ {0x476C, GENMASK(31, 24), 0x80},
+ {0x4694, GENMASK(7, 0), 0x80},
+ {0x4694, GENMASK(15, 8), 0x80},
+ {0x4778, GENMASK(7, 0), 0x80},
+ {0x4778, GENMASK(15, 8), 0x80},
+ {0x4AE4, GENMASK(23, 0), 0x780D1E},
+ {0x4AEC, GENMASK(23, 0), 0x780D1E},
+ {0x469C, GENMASK(31, 26), 0x34},
+ {0x49F0, GENMASK(31, 26), 0x34},
+};
+
+static DECLARE_PHY_REG3_TBL(rtw8852bx_btc_preagc_en_defs);
+
+static const struct rtw89_reg3_def rtw8852bx_btc_preagc_dis_defs[] = {
+ {0x46D0, GENMASK(1, 0), 0x0},
+ {0x4790, GENMASK(1, 0), 0x0},
+ {0x4AD4, GENMASK(31, 0), 0x60},
+ {0x4AE0, GENMASK(31, 0), 0x60},
+ {0x4688, GENMASK(31, 24), 0x1a},
+ {0x476C, GENMASK(31, 24), 0x1a},
+ {0x4694, GENMASK(7, 0), 0x2a},
+ {0x4694, GENMASK(15, 8), 0x2a},
+ {0x4778, GENMASK(7, 0), 0x2a},
+ {0x4778, GENMASK(15, 8), 0x2a},
+ {0x4AE4, GENMASK(23, 0), 0x79E99E},
+ {0x4AEC, GENMASK(23, 0), 0x79E99E},
+ {0x469C, GENMASK(31, 26), 0x26},
+ {0x49F0, GENMASK(31, 26), 0x26},
+};
+
+static DECLARE_PHY_REG3_TBL(rtw8852bx_btc_preagc_dis_defs);
+
+static void rtw8852be_efuse_parsing(struct rtw89_efuse *efuse,
+ struct rtw8852bx_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+ efuse->rfe_type = map->rfe_type;
+ efuse->xtal_cap = map->xtal_k;
+}
+
+static void rtw8852bx_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
+ struct rtw8852bx_efuse *map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ struct rtw8852bx_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
+ u8 i, j;
+
+ tssi->thermal[RF_PATH_A] = map->path_a_therm;
+ tssi->thermal[RF_PATH_B] = map->path_b_therm;
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi,
+ sizeof(ofst[i]->cck_tssi));
+
+ for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n",
+ i, j, tssi->tssi_cck[i][j]);
+
+ memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi,
+ sizeof(ofst[i]->bw40_tssi));
+ memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM,
+ ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g));
+
+ for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n",
+ i, j, tssi->tssi_mcs[i][j]);
+ }
+}
+
+static bool _decode_efuse_gain(u8 data, s8 *high, s8 *low)
+{
+ if (high)
+ *high = sign_extend32(FIELD_GET(GENMASK(7, 4), data), 3);
+ if (low)
+ *low = sign_extend32(FIELD_GET(GENMASK(3, 0), data), 3);
+
+ return data != 0xff;
+}
+
+static void rtw8852bx_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev,
+ struct rtw8852bx_efuse *map)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ bool valid = false;
+
+ valid |= _decode_efuse_gain(map->rx_gain_2g_cck,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK]);
+ valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_low,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_mid,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID]);
+ valid |= _decode_efuse_gain(map->rx_gain_5g_high,
+ &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH],
+ &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]);
+
+ gain->offset_valid = valid;
+}
+
+static int __rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
+ enum rtw89_efuse_block block)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ struct rtw8852bx_efuse *map;
+
+ map = (struct rtw8852bx_efuse *)log_map;
+
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ rtw8852bx_efuse_parsing_tssi(rtwdev, map);
+ rtw8852bx_efuse_parsing_gain_offset(rtwdev, map);
+
+ switch (rtwdev->hci.type) {
+ case RTW89_HCI_TYPE_PCIE:
+ rtw8852be_efuse_parsing(efuse, map);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
+
+ return 0;
+}
+
+static void rtw8852bx_phycap_parsing_power_cal(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+#define PWR_K_CHK_OFFSET 0x5E9
+#define PWR_K_CHK_VALUE 0xAA
+ u32 offset = PWR_K_CHK_OFFSET - rtwdev->chip->phycap_addr;
+
+ if (phycap_map[offset] == PWR_K_CHK_VALUE)
+ rtwdev->efuse.power_k_valid = true;
+}
+
+static void rtw8852bx_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ static const u32 tssi_trim_addr[RF_PATH_NUM_8852BX] = {0x5D6, 0x5AB};
+ u32 addr = rtwdev->chip->phycap_addr;
+ bool pg = false;
+ u32 ofst;
+ u8 i, j;
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) {
+ /* addrs are in decreasing order */
+ ofst = tssi_trim_addr[i] - addr - j;
+ tssi->tssi_trim[i][j] = phycap_map[ofst];
+
+ if (phycap_map[ofst] != 0xff)
+ pg = true;
+ }
+ }
+
+ if (!pg) {
+ memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim));
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM] no PG, set all trim info to 0\n");
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++)
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n",
+ i, j, tssi->tssi_trim[i][j],
+ tssi_trim_addr[i] - j);
+}
+
+static void rtw8852bx_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 thm_trim_addr[RF_PATH_NUM_8852BX] = {0x5DF, 0x5DC};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n",
+ i, info->thermal_trim[i]);
+
+ if (info->thermal_trim[i] != 0xff)
+ info->pg_thermal_trim = true;
+ }
+}
+
+static void rtw8852bx_thermal_trim(struct rtw89_dev *rtwdev)
+{
+#define __thm_setting(raw) \
+({ \
+ u8 __v = (raw); \
+ ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \
+})
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 i, val;
+
+ if (!info->pg_thermal_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ val = __thm_setting(info->thermal_trim[i]);
+ rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n",
+ i, val);
+ }
+#undef __thm_setting
+}
+
+static void rtw8852bx_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 pabias_trim_addr[RF_PATH_NUM_8852BX] = {0x5DE, 0x5DB};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n",
+ i, info->pa_bias_trim[i]);
+
+ if (info->pa_bias_trim[i] != 0xff)
+ info->pg_pa_bias_trim = true;
+ }
+}
+
+static void rtw8852bx_pa_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pabias_2g, pabias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++) {
+ pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
+ pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pabias_2g, pabias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g);
+ }
+}
+
+static void rtw8852bx_phycap_parsing_gain_comp(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ static const u32 comp_addrs[][RTW89_SUBBAND_2GHZ_5GHZ_NR] = {
+ {0x5BB, 0x5BA, 0, 0x5B9, 0x5B8},
+ {0x590, 0x58F, 0, 0x58E, 0x58D},
+ };
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+ u32 phycap_addr = rtwdev->chip->phycap_addr;
+ bool valid = false;
+ int path, i;
+ u8 data;
+
+ for (path = 0; path < 2; path++)
+ for (i = 0; i < RTW89_SUBBAND_2GHZ_5GHZ_NR; i++) {
+ if (comp_addrs[path][i] == 0)
+ continue;
+
+ data = phycap_map[comp_addrs[path][i] - phycap_addr];
+ valid |= _decode_efuse_gain(data, NULL,
+ &gain->comp[path][i]);
+ }
+
+ gain->comp_valid = valid;
+}
+
+static int __rtw8852bx_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ rtw8852bx_phycap_parsing_power_cal(rtwdev, phycap_map);
+ rtw8852bx_phycap_parsing_tssi(rtwdev, phycap_map);
+ rtw8852bx_phycap_parsing_thermal_trim(rtwdev, phycap_map);
+ rtw8852bx_phycap_parsing_pa_bias_trim(rtwdev, phycap_map);
+ rtw8852bx_phycap_parsing_gain_comp(rtwdev, phycap_map);
+
+ return 0;
+}
+
+static void __rtw8852bx_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_thermal_trim(rtwdev);
+ rtw8852bx_pa_bias_trim(rtwdev);
+}
+
+static void __rtw8852bx_set_channel_mac(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ u8 mac_idx)
+{
+ u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx);
+ u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx);
+ u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx);
+ u8 txsc20 = 0, txsc40 = 0;
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_80:
+ txsc40 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_40);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_40:
+ txsc20 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_20);
+ break;
+ default:
+ break;
+ }
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1));
+ rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4));
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(0));
+ rtw89_write32(rtwdev, sub_carr, txsc20);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_write8_clr(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK);
+ rtw89_write32(rtwdev, sub_carr, 0);
+ break;
+ default:
+ break;
+ }
+
+ if (chan->channel > 14) {
+ rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE);
+ rtw89_write8_set(rtwdev, chk_rate,
+ B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
+ } else {
+ rtw89_write8_set(rtwdev, chk_rate, B_AX_BAND_MODE);
+ rtw89_write8_clr(rtwdev, chk_rate,
+ B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
+ }
+}
+
+static const u32 rtw8852bx_sco_barker_threshold[14] = {
+ 0x1cfea, 0x1d0e1, 0x1d1d7, 0x1d2cd, 0x1d3c3, 0x1d4b9, 0x1d5b0, 0x1d6a6,
+ 0x1d79c, 0x1d892, 0x1d988, 0x1da7f, 0x1db75, 0x1ddc4
+};
+
+static const u32 rtw8852bx_sco_cck_threshold[14] = {
+ 0x27de3, 0x27f35, 0x28088, 0x281da, 0x2832d, 0x2847f, 0x285d2, 0x28724,
+ 0x28877, 0x289c9, 0x28b1c, 0x28c6e, 0x28dc1, 0x290ed
+};
+
+static void rtw8852bx_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 primary_ch)
+{
+ u8 ch_element = primary_ch - 1;
+
+ rtw89_phy_write32_mask(rtwdev, R_RXSCOBC, B_RXSCOBC_TH,
+ rtw8852bx_sco_barker_threshold[ch_element]);
+ rtw89_phy_write32_mask(rtwdev, R_RXSCOCCK, B_RXSCOCCK_TH,
+ rtw8852bx_sco_cck_threshold[ch_element]);
+}
+
+static u8 rtw8852bx_sco_mapping(u8 central_ch)
+{
+ if (central_ch == 1)
+ return 109;
+ else if (central_ch >= 2 && central_ch <= 6)
+ return 108;
+ else if (central_ch >= 7 && central_ch <= 10)
+ return 107;
+ else if (central_ch >= 11 && central_ch <= 14)
+ return 106;
+ else if (central_ch == 36 || central_ch == 38)
+ return 51;
+ else if (central_ch >= 40 && central_ch <= 58)
+ return 50;
+ else if (central_ch >= 60 && central_ch <= 64)
+ return 49;
+ else if (central_ch == 100 || central_ch == 102)
+ return 48;
+ else if (central_ch >= 104 && central_ch <= 126)
+ return 47;
+ else if (central_ch >= 128 && central_ch <= 151)
+ return 46;
+ else if (central_ch >= 153 && central_ch <= 177)
+ return 45;
+ else
+ return 0;
+}
+
+struct rtw8852bx_bb_gain {
+ u32 gain_g[BB_PATH_NUM_8852BX];
+ u32 gain_a[BB_PATH_NUM_8852BX];
+ u32 gain_mask;
+};
+
+static const struct rtw8852bx_bb_gain bb_gain_lna[LNA_GAIN_NUM] = {
+ { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740},
+ .gain_mask = 0xff000000 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x000000ff },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x0000ff00 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744},
+ .gain_mask = 0xff000000 },
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0x000000ff },
+};
+
+static const struct rtw8852bx_bb_gain bb_gain_tia[TIA_GAIN_NUM] = {
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0x00ff0000 },
+ { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748},
+ .gain_mask = 0xff000000 },
+};
+
+static void rtw8852bx_set_gain_error(struct rtw89_dev *rtwdev,
+ enum rtw89_subband subband,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
+ u8 gain_band = rtw89_subband_to_bb_gain_band(subband);
+ s32 val;
+ u32 reg;
+ u32 mask;
+ int i;
+
+ for (i = 0; i < LNA_GAIN_NUM; i++) {
+ if (subband == RTW89_CH_2G)
+ reg = bb_gain_lna[i].gain_g[path];
+ else
+ reg = bb_gain_lna[i].gain_a[path];
+
+ mask = bb_gain_lna[i].gain_mask;
+ val = gain->lna_gain[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+ }
+
+ for (i = 0; i < TIA_GAIN_NUM; i++) {
+ if (subband == RTW89_CH_2G)
+ reg = bb_gain_tia[i].gain_g[path];
+ else
+ reg = bb_gain_tia[i].gain_a[path];
+
+ mask = bb_gain_tia[i].gain_mask;
+ val = gain->tia_gain[gain_band][path][i];
+ rtw89_phy_write32_mask(rtwdev, reg, mask, val);
+ }
+}
+
+static void rtw8852bt_ext_loss_avg_update(struct rtw89_dev *rtwdev,
+ s8 ext_loss_a, s8 ext_loss_b)
+{
+ s8 ext_loss_avg;
+ u64 linear;
+ u8 pwrofst;
+
+ if (ext_loss_a == ext_loss_b) {
+ ext_loss_avg = ext_loss_a;
+ } else {
+ linear = rtw89_db_2_linear(abs(ext_loss_a - ext_loss_b)) + 1;
+ linear = DIV_ROUND_CLOSEST_ULL(linear / 2, 1 << RTW89_LINEAR_FRAC_BITS);
+ ext_loss_avg = rtw89_linear_2_db(linear);
+ ext_loss_avg += min(ext_loss_a, ext_loss_b);
+ }
+
+ pwrofst = max(DIV_ROUND_CLOSEST(ext_loss_avg, 4) + 16, EDCCA_PWROFST_DEFAULT);
+
+ rtw89_phy_write32_mask(rtwdev, R_PWOFST, B_PWOFST, pwrofst);
+}
+
+static void rtw8852bx_set_gain_offset(struct rtw89_dev *rtwdev,
+ enum rtw89_subband subband,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 gain_err_addr[2] = {R_P0_AGC_RSVD, R_P1_AGC_RSVD};
+ static const u32 rssi_ofst_addr[2] = {R_PATH0_G_TIA1_LNA6_OP1DB_V1,
+ R_PATH1_G_TIA1_LNA6_OP1DB_V1};
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_phy_efuse_gain *efuse_gain = &rtwdev->efuse_gain;
+ enum rtw89_gain_offset gain_ofdm_band;
+ s8 ext_loss_a = 0, ext_loss_b = 0;
+ s32 offset_a, offset_b;
+ s32 offset_ofdm, offset_cck;
+ s32 tmp;
+ u8 path;
+
+ if (!efuse_gain->comp_valid)
+ goto next;
+
+ for (path = RF_PATH_A; path < BB_PATH_NUM_8852BX; path++) {
+ tmp = efuse_gain->comp[path][subband];
+ tmp = clamp_t(s32, tmp << 2, S8_MIN, S8_MAX);
+ rtw89_phy_write32_mask(rtwdev, gain_err_addr[path], MASKBYTE0, tmp);
+ }
+
+next:
+ if (!efuse_gain->offset_valid)
+ goto ext_loss;
+
+ gain_ofdm_band = rtw89_subband_to_gain_offset_band_of_ofdm(subband);
+
+ offset_a = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band];
+ offset_b = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band];
+
+ tmp = -((offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2));
+ tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_A], B_PATH0_R_G_OFST_MASK, tmp);
+
+ tmp = -((offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2));
+ tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_B], B_PATH0_R_G_OFST_MASK, tmp);
+
+ if (hal->antenna_rx == RF_B) {
+ offset_ofdm = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band];
+ offset_cck = -efuse_gain->offset[RF_PATH_B][0];
+ } else {
+ offset_ofdm = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band];
+ offset_cck = -efuse_gain->offset[RF_PATH_A][0];
+ }
+
+ tmp = (offset_ofdm << 4) + efuse_gain->offset_base[RTW89_PHY_0];
+ tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_idx(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx);
+
+ tmp = (offset_ofdm << 4) + efuse_gain->rssi_base[RTW89_PHY_0];
+ tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx);
+
+ if (subband == RTW89_CH_2G) {
+ tmp = (offset_cck << 3) + (efuse_gain->offset_base[RTW89_PHY_0] >> 1);
+ tmp = clamp_t(s32, tmp, S8_MIN >> 1, S8_MAX >> 1);
+ rtw89_phy_write32_mask(rtwdev, R_RX_RPL_OFST,
+ B_RX_RPL_OFST_CCK_MASK, tmp);
+ }
+
+ ext_loss_a = (offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2);
+ ext_loss_b = (offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2);
+
+ext_loss:
+ if (rtwdev->chip->chip_id == RTL8852BT)
+ rtw8852bt_ext_loss_avg_update(rtwdev, ext_loss_a, ext_loss_b);
+}
+
+static
+void rtw8852bx_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband)
+{
+ const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax;
+ u8 band = rtw89_subband_to_bb_gain_band(subband);
+ u32 val;
+
+ val = u32_encode_bits((gain->rpl_ofst_20[band][RF_PATH_A] +
+ gain->rpl_ofst_20[band][RF_PATH_B]) >> 1, B_P0_RPL1_20_MASK) |
+ u32_encode_bits((gain->rpl_ofst_40[band][RF_PATH_A][0] +
+ gain->rpl_ofst_40[band][RF_PATH_B][0]) >> 1, B_P0_RPL1_40_MASK) |
+ u32_encode_bits((gain->rpl_ofst_40[band][RF_PATH_A][1] +
+ gain->rpl_ofst_40[band][RF_PATH_B][1]) >> 1, B_P0_RPL1_41_MASK);
+ val >>= B_P0_RPL1_SHIFT;
+ rtw89_phy_write32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_MASK, val);
+
+ val = u32_encode_bits((gain->rpl_ofst_40[band][RF_PATH_A][2] +
+ gain->rpl_ofst_40[band][RF_PATH_B][2]) >> 1, B_P0_RTL2_42_MASK) |
+ u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][0] +
+ gain->rpl_ofst_80[band][RF_PATH_B][0]) >> 1, B_P0_RTL2_80_MASK) |
+ u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][1] +
+ gain->rpl_ofst_80[band][RF_PATH_B][1]) >> 1, B_P0_RTL2_81_MASK) |
+ u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][10] +
+ gain->rpl_ofst_80[band][RF_PATH_B][10]) >> 1, B_P0_RTL2_8A_MASK);
+ rtw89_phy_write32(rtwdev, R_P0_RPL2, val);
+ rtw89_phy_write32(rtwdev, R_P1_RPL2, val);
+
+ val = u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][2] +
+ gain->rpl_ofst_80[band][RF_PATH_B][2]) >> 1, B_P0_RTL3_82_MASK) |
+ u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][3] +
+ gain->rpl_ofst_80[band][RF_PATH_B][3]) >> 1, B_P0_RTL3_83_MASK) |
+ u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][4] +
+ gain->rpl_ofst_80[band][RF_PATH_B][4]) >> 1, B_P0_RTL3_84_MASK) |
+ u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][9] +
+ gain->rpl_ofst_80[band][RF_PATH_B][9]) >> 1, B_P0_RTL3_89_MASK);
+ rtw89_phy_write32(rtwdev, R_P0_RPL3, val);
+ rtw89_phy_write32(rtwdev, R_P1_RPL3, val);
+}
+
+static void rtw8852bx_ctrl_ch(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 central_ch = chan->channel;
+ u8 subband = chan->subband_type;
+ u8 sco_comp;
+ bool is_2g = central_ch <= 14;
+
+ /* Path A */
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_SEL_MSK_V1, 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_SEL_MSK_V1, 0, phy_idx);
+
+ /* Path B */
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1, 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_SEL_MSK_V1, 0, phy_idx);
+
+ /* SCO compensate FC setting */
+ sco_comp = rtw8852bx_sco_mapping(central_ch);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_INV, sco_comp, phy_idx);
+
+ if (chan->band_type == RTW89_BAND_6G)
+ return;
+
+ /* CCK parameters */
+ if (central_ch == 14) {
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3b13ff);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x1c42de);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfdb0ad);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xf60f6e);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xfd8f92);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0x2d011);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0x1c02c);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xfff00a);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3d23ff);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x29b354);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfc1c8);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xfdb053);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xf86f9a);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0xfaef92);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0xfe5fcc);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xffdff5);
+ }
+
+ rtw8852bx_set_gain_error(rtwdev, subband, RF_PATH_A);
+ rtw8852bx_set_gain_error(rtwdev, subband, RF_PATH_B);
+ rtw8852bx_set_gain_offset(rtwdev, subband, phy_idx);
+ rtw8852bx_set_rxsc_rpl_comp(rtwdev, subband);
+}
+
+static void rtw8852b_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path)
+{
+ static const u32 adc_sel[2] = {0xC0EC, 0xC1EC};
+ static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4};
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0);
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to set ADC\n");
+ }
+}
+
+static
+void rtw8852bt_adc_cfg(struct rtw89_dev *rtwdev, u8 bw, u8 path)
+{
+ static const u32 rck_reset_count[2] = {0xC0E8, 0xC1E8};
+ static const u32 adc_op5_bw_sel[2] = {0xC0D8, 0xC1D8};
+ static const u32 adc_sample_td[2] = {0xC0D4, 0xC1D4};
+ static const u32 adc_rst_cycle[2] = {0xC0EC, 0xC1EC};
+ static const u32 decim_filter[2] = {0xC0EC, 0xC1EC};
+ static const u32 rck_offset[2] = {0xC0C4, 0xC1C4};
+ static const u32 rx_adc_clk[2] = {0x12A0, 0x32A0};
+ static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4};
+ static const u32 idac2_1[2] = {0xC0D4, 0xC1D4};
+ static const u32 idac2[2] = {0xC0D4, 0xC1D4};
+ static const u32 upd_clk_adc = {0x704};
+
+ if (rtwdev->chip->chip_id != RTL8852BT)
+ return;
+
+ rtw89_phy_write32_mask(rtwdev, idac2[path], B_P0_CFCH_CTL, 0x8);
+ rtw89_phy_write32_mask(rtwdev, rck_reset_count[path], B_ADCMOD_LP, 0x9);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], B_WDADC_SEL, 0x2);
+ rtw89_phy_write32_mask(rtwdev, rx_adc_clk[path], B_P0_RXCK_ADJ, 0x49);
+ rtw89_phy_write32_mask(rtwdev, decim_filter[path], B_DCIM_FR, 0x0);
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ case RTW89_CHANNEL_WIDTH_10:
+ case RTW89_CHANNEL_WIDTH_20:
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_mask(rtwdev, idac2_1[path], B_P0_CFCH_EN, 0x2);
+ rtw89_phy_write32_mask(rtwdev, adc_sample_td[path], B_P0_CFCH_BW0, 0x3);
+ rtw89_phy_write32_mask(rtwdev, adc_op5_bw_sel[path], B_P0_CFCH_BW1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, rck_offset[path], B_DRCK_MUL, 0x0);
+ /* Tx TSSI ADC update */
+ rtw89_phy_write32_mask(rtwdev, upd_clk_adc, B_RSTB_ASYNC_BW80, 0);
+
+ if (rtwdev->efuse.rfe_type >= 51)
+ rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x2);
+ else
+ rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x3);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, idac2_1[path], B_P0_CFCH_EN, 0x2);
+ rtw89_phy_write32_mask(rtwdev, adc_sample_td[path], B_P0_CFCH_BW0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, adc_op5_bw_sel[path], B_P0_CFCH_BW1, 0x8);
+ rtw89_phy_write32_mask(rtwdev, rck_offset[path], B_DRCK_MUL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x3);
+ /* Tx TSSI ADC update */
+ rtw89_phy_write32_mask(rtwdev, upd_clk_adc, B_RSTB_ASYNC_BW80, 1);
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ rtw89_phy_write32_mask(rtwdev, idac2_1[path], B_P0_CFCH_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, adc_sample_td[path], B_P0_CFCH_BW0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, adc_op5_bw_sel[path], B_P0_CFCH_BW1, 0x4);
+ rtw89_phy_write32_mask(rtwdev, rck_offset[path], B_DRCK_MUL, 0x6);
+ rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x3);
+ /* Tx TSSI ADC update */
+ rtw89_phy_write32_mask(rtwdev, upd_clk_adc, B_RSTB_ASYNC_BW80, 2);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to set ADC\n");
+ break;
+ }
+}
+
+static void rtw8852bx_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 rx_path_0;
+ u32 val;
+
+ rx_path_0 = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, phy_idx);
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
+
+ /*Set RF mode at 3 */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ if (chip_id == RTL8852BT) {
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_NRBW_EN_V1, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_NRBW_EN_V1, 0x0, phy_idx);
+ }
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
+
+ /*Set RF mode at 3 */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ if (chip_id == RTL8852BT) {
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_NRBW_EN_V1, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_NRBW_EN_V1, 0x0, phy_idx);
+ }
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx);
+
+ /*Set RF mode at 3 */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ if (chip_id == RTL8852BT) {
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1,
+ B_PATH0_BAND_NRBW_EN_V1, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1,
+ B_PATH1_BAND_NRBW_EN_V1, 0x1, phy_idx);
+ }
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH,
+ pri_ch, phy_idx);
+
+ /*Set RF mode at 3 */
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx);
+ /*CCK primary channel */
+ if (pri_ch == RTW89_SC_20_UPPER)
+ rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 1);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 0);
+
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x2, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH,
+ pri_ch, phy_idx);
+
+ /*Set RF mode at A */
+ val = chip_id == RTL8852BT ? 0x333 : 0xaaa;
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, val, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, val, phy_idx);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw,
+ pri_ch);
+ }
+
+ if (chip_id == RTL8852B) {
+ rtw8852b_bw_setting(rtwdev, bw, RF_PATH_A);
+ rtw8852b_bw_setting(rtwdev, bw, RF_PATH_B);
+ } else if (chip_id == RTL8852BT) {
+ rtw8852bt_adc_cfg(rtwdev, bw, RF_PATH_A);
+ rtw8852bt_adc_cfg(rtwdev, bw, RF_PATH_B);
+ }
+
+ if (rx_path_0 == 0x1)
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX,
+ B_P1_RFMODE_ORI_RX_ALL, 0x111, phy_idx);
+ else if (rx_path_0 == 0x2)
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX,
+ B_P0_RFMODE_ORI_RX_ALL, 0x111, phy_idx);
+}
+
+static void rtw8852bx_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en)
+{
+ if (cck_en) {
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1);
+ }
+}
+
+static void rtw8852bx_5m_mask(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 pri_ch = chan->pri_ch_idx;
+ bool mask_5m_low;
+ bool mask_5m_en;
+
+ switch (chan->band_width) {
+ case RTW89_CHANNEL_WIDTH_40:
+ /* Prich=1: Mask 5M High, Prich=2: Mask 5M Low */
+ mask_5m_en = true;
+ mask_5m_low = pri_ch == RTW89_SC_20_LOWER;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ /* Prich=3: Mask 5M High, Prich=4: Mask 5M Low, Else: Disable */
+ mask_5m_en = pri_ch == RTW89_SC_20_UPMOST ||
+ pri_ch == RTW89_SC_20_LOWEST;
+ mask_5m_low = pri_ch == RTW89_SC_20_LOWEST;
+ break;
+ default:
+ mask_5m_en = false;
+ break;
+ }
+
+ if (!mask_5m_en) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x0);
+ rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1,
+ B_ASSIGN_SBD_OPT_EN_V1, 0x0, phy_idx);
+ return;
+ }
+
+ if (mask_5m_low) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x0);
+ }
+ rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1,
+ B_ASSIGN_SBD_OPT_EN_V1, 0x1, phy_idx);
+}
+
+static void __rtw8852bx_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx);
+ fsleep(1);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx);
+}
+
+static void rtw8852bx_bb_macid_ctrl_init(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 addr;
+
+ for (addr = R_AX_PWR_MACID_LMT_TABLE0;
+ addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4)
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0);
+}
+
+static void __rtw8852bx_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain;
+
+ rtw89_phy_write32_clr(rtwdev, R_P0_EN_SOUND_WO_NDP, B_P0_EN_SOUND_WO_NDP);
+ rtw89_phy_write32_clr(rtwdev, R_P1_EN_SOUND_WO_NDP, B_P1_EN_SOUND_WO_NDP);
+
+ rtw8852bx_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0);
+
+ /* read these registers after loading BB parameters */
+ gain->offset_base[RTW89_PHY_0] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK);
+ gain->rssi_base[RTW89_PHY_0] =
+ rtw89_phy_read32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK);
+}
+
+static void rtw8852bx_bb_set_pop(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR)
+ rtw89_phy_write32_clr(rtwdev, R_PKT_CTRL, B_PKT_POP_EN);
+}
+
+static u32 rtw8852bt_spur_freq(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan)
+{
+ u8 center_chan = chan->channel;
+
+ switch (chan->band_type) {
+ case RTW89_BAND_5G:
+ if (center_chan == 151 || center_chan == 153 ||
+ center_chan == 155 || center_chan == 163)
+ return 5760;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+#define CARRIER_SPACING_312_5 312500 /* 312.5 kHz */
+#define CARRIER_SPACING_78_125 78125 /* 78.125 kHz */
+#define MAX_TONE_NUM 2048
+
+static void rtw8852bt_set_csi_tone_idx(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ s32 freq_diff, csi_idx, csi_tone_idx;
+ u32 spur_freq;
+
+ spur_freq = rtw8852bt_spur_freq(rtwdev, chan);
+ if (spur_freq == 0) {
+ rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN_V1, B_SEG0CSI_EN,
+ 0, phy_idx);
+ return;
+ }
+
+ freq_diff = (spur_freq - chan->freq) * 1000000;
+ csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125);
+ s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx);
+
+ rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_V1, B_SEG0CSI_IDX,
+ csi_tone_idx, phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN_V1, B_SEG0CSI_EN, 1, phy_idx);
+}
+
+static
+void __rtw8852bx_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ bool cck_en = chan->channel <= 14;
+ u8 pri_ch_idx = chan->pri_ch_idx;
+ u8 band = chan->band_type, chan_idx;
+
+ if (cck_en)
+ rtw8852bx_ctrl_sco_cck(rtwdev, chan->primary_channel);
+
+ rtw8852bx_ctrl_ch(rtwdev, chan, phy_idx);
+ rtw8852bx_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx);
+ rtw8852bx_ctrl_cck_en(rtwdev, cck_en);
+ if (chip_id == RTL8852BT)
+ rtw8852bt_set_csi_tone_idx(rtwdev, chan, phy_idx);
+ if (chip_id == RTL8852B && chan->band_type == RTW89_BAND_5G) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
+ B_BT_DYN_DC_EST_EN_MSK, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0);
+ }
+ chan_idx = rtw89_encode_chan_idx(rtwdev, chan->primary_channel, band);
+ rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx);
+ rtw8852bx_5m_mask(rtwdev, chan, phy_idx);
+ rtw8852bx_bb_set_pop(rtwdev);
+ __rtw8852bx_bb_reset_all(rtwdev, phy_idx);
+}
+
+static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, s16 ref)
+{
+ const u16 tssi_16dbm_cw = 0x12c;
+ const u8 base_cw_0db = 0x27;
+ const s8 ofst_int = 0;
+ s16 pwr_s10_3;
+ s16 rf_pwr_cw;
+ u16 bb_pwr_cw;
+ u32 pwr_cw;
+ u32 tssi_ofst_cw;
+
+ pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
+ bb_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(2, 0));
+ rf_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(8, 3));
+ rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
+ pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
+
+ tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
+ tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
+
+ return u32_encode_bits(tssi_ofst_cw, B_DPD_TSSI_CW) |
+ u32_encode_bits(pwr_cw, B_DPD_PWR_CW) |
+ u32_encode_bits(ref, B_DPD_REF);
+}
+
+static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 addr[RF_PATH_NUM_8852BX] = {0x5800, 0x7800};
+ const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF;
+ const u8 ofst_ofdm = 0x4;
+ const u8 ofst_cck = 0x8;
+ const s16 ref_ofdm = 0;
+ const s16 ref_cck = 0;
+ u32 val;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n");
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
+ B_AX_PWR_REF, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
+ val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++)
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
+ phy_idx);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
+ val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
+
+ for (i = 0; i < RF_PATH_NUM_8852BX; i++)
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
+ phy_idx);
+}
+
+static void rtw8852bx_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ u8 tx_shape_idx,
+ enum rtw89_phy_idx phy_idx)
+{
+#define __DFIR_CFG_ADDR(i) (R_TXFIR0 + ((i) << 2))
+#define __DFIR_CFG_MASK 0xffffffff
+#define __DFIR_CFG_NR 8
+#define __DECL_DFIR_PARAM(_name, _val...) \
+ static const u32 param_ ## _name[] = {_val}; \
+ static_assert(ARRAY_SIZE(param_ ## _name) == __DFIR_CFG_NR)
+
+ __DECL_DFIR_PARAM(flat,
+ 0x023D23FF, 0x0029B354, 0x000FC1C8, 0x00FDB053,
+ 0x00F86F9A, 0x06FAEF92, 0x00FE5FCC, 0x00FFDFF5);
+ __DECL_DFIR_PARAM(sharp,
+ 0x023D83FF, 0x002C636A, 0x0013F204, 0x00008090,
+ 0x00F87FB0, 0x06F99F83, 0x00FDBFBA, 0x00003FF5);
+ __DECL_DFIR_PARAM(sharp_14,
+ 0x023B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E,
+ 0x00FD8F92, 0x0602D011, 0x0001C02C, 0x00FFF00A);
+ u8 ch = chan->channel;
+ const u32 *param;
+ u32 addr;
+ int i;
+
+ if (ch > 14) {
+ rtw89_warn(rtwdev,
+ "set tx shape dfir by unknown ch: %d on 2G\n", ch);
+ return;
+ }
+
+ if (ch == 14)
+ param = param_sharp_14;
+ else
+ param = tx_shape_idx == 0 ? param_flat : param_sharp;
+
+ for (i = 0; i < __DFIR_CFG_NR; i++) {
+ addr = __DFIR_CFG_ADDR(i);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "set tx shape dfir: 0x%x: 0x%x\n", addr, param[i]);
+ rtw89_phy_write32_idx(rtwdev, addr, __DFIR_CFG_MASK, param[i],
+ phy_idx);
+ }
+
+#undef __DECL_DFIR_PARAM
+#undef __DFIR_CFG_NR
+#undef __DFIR_CFG_MASK
+#undef __DECL_CFG_ADDR
+}
+
+static void rtw8852bx_set_tx_shape(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms;
+ u8 band = chan->band_type;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+ u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd];
+ u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd];
+
+ if (band == RTW89_BAND_2G)
+ rtw8852bx_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx);
+
+ rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG,
+ tx_shape_ofdm);
+}
+
+static void __rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx);
+ rtw8852bx_set_tx_shape(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx);
+ rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx);
+}
+
+static void __rtw8852bx_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_set_txpwr_ref(rtwdev, phy_idx);
+}
+
+static
+void __rtw8852bx_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx)
+{
+ u32 reg;
+
+ if (pw_ofst < -16 || pw_ofst > 15) {
+ rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst);
+ return;
+ }
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN);
+
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst);
+
+ pw_ofst = max_t(s8, pw_ofst - 3, -16);
+ reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst);
+}
+
+static int
+__rtw8852bx_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ int ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf000);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff);
+ if (ret)
+ return ret;
+
+ rtw8852bx_set_txpwr_ul_tb_offset(rtwdev, 0, phy_idx == RTW89_PHY_1 ?
+ RTW89_MAC_1 : RTW89_MAC_0);
+
+ return 0;
+}
+
+static
+void __rtw8852bx_bb_set_plcp_tx(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_reg3_def *def = rtw8852bx_pmac_ht20_mcs7_tbl;
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(rtw8852bx_pmac_ht20_mcs7_tbl); i++, def++)
+ rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
+}
+
+static void rtw8852bx_stop_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852bx_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Stop Tx");
+ if (tx_info->mode == CONT_TX)
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 0, idx);
+ else if (tx_info->mode == PKTS_TX)
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 0, idx);
+}
+
+static void rtw8852bx_start_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852bx_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx)
+{
+ enum rtw8852bx_pmac_mode mode = tx_info->mode;
+ u32 pkt_cnt = tx_info->tx_cnt;
+ u16 period = tx_info->period;
+
+ if (mode == CONT_TX && !tx_info->is_cck) {
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 1, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CTx Start");
+ } else if (mode == PKTS_TX) {
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD,
+ B_PMAC_TX_PRD_MSK, period, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CNT, B_PMAC_TX_CNT_MSK,
+ pkt_cnt, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC PTx Start");
+ }
+
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 0, idx);
+}
+
+static
+void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852bx_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+
+ if (!tx_info->en_pmac_tx) {
+ rtw8852bx_stop_pmac_tx(rtwdev, tx_info, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
+ if (chan->band_type == RTW89_BAND_2G)
+ rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Tx Enable");
+
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0x3f, idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 1, idx);
+ rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, idx);
+
+ rtw8852bx_start_pmac_tx(rtwdev, tx_info, idx);
+}
+
+static
+void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
+ u16 tx_cnt, u16 period, u16 tx_time,
+ enum rtw89_phy_idx idx)
+{
+ struct rtw8852bx_bb_pmac_info tx_info = {0};
+
+ tx_info.en_pmac_tx = enable;
+ tx_info.is_cck = 0;
+ tx_info.mode = PKTS_TX;
+ tx_info.tx_cnt = tx_cnt;
+ tx_info.period = period;
+ tx_info.tx_time = tx_time;
+
+ rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx);
+}
+
+static
+void __rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
+ enum rtw89_phy_idx idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx PWR = %d", pwr_dbm);
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, pwr_dbm, idx);
+}
+
+static
+void __rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path)
+{
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx Path = %d", tx_path);
+
+ if (tx_path == RF_PATH_A) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 1);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0);
+ } else if (tx_path == RF_PATH_B) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 2);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0);
+ } else if (tx_path == RF_PATH_AB) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 3);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 4);
+ } else {
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Error Tx Path");
+ }
+}
+
+static
+void __rtw8852bx_bb_tx_mode_switch(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx idx, u8 mode)
+{
+ if (mode != 0)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Tx mode switch");
+
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_RXMOD, B_PMAC_RXMOD_MSK, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_DPD_EN, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx);
+}
+
+static
+void __rtw8852bx_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ struct rtw8852bx_bb_tssi_bak *bak)
+{
+ s32 tmp;
+
+ bak->tx_path = rtw89_phy_read32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, idx);
+ bak->rx_path = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, idx);
+ bak->p0_rfmode = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, idx);
+ bak->p0_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, idx);
+ bak->p1_rfmode = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, idx);
+ bak->p1_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, idx);
+ tmp = rtw89_phy_read32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, idx);
+ bak->tx_pwr = sign_extend32(tmp, 8);
+}
+
+static
+void __rtw8852bx_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ const struct rtw8852bx_bb_tssi_bak *bak)
+{
+ rtw89_phy_write32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, bak->tx_path, idx);
+ if (bak->tx_path == RF_AB)
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x4);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x0);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, bak->rx_path, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, bak->p0_rfmode, idx);
+ rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, bak->p0_rfmode_ftm, idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, bak->p1_rfmode, idx);
+ rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, bak->p1_rfmode_ftm, idx);
+ rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, bak->tx_pwr, idx);
+}
+
+static void __rtw8852bx_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8852bx_btc_preagc_en_defs_tbl :
+ &rtw8852bx_btc_preagc_dis_defs_tbl);
+}
+
+static void __rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (en) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x20);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x30);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
+ B_BT_DYN_DC_EST_EN_MSK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1,
+ B_PATH0_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1,
+ B_PATH0_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1,
+ B_PATH1_G_LNA6_OP1DB_V1, 0x1a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1,
+ B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1,
+ B_PATH1_BT_SHARE_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1,
+ B_PATH1_BTG_PATH_V1, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc);
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1,
+ B_BT_DYN_DC_EST_EN_MSK, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0);
+ }
+}
+
+static
+void __rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path_bit rx_path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u32 rst_mask0;
+ u32 rst_mask1;
+
+ if (rx_path == RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+ } else if (rx_path == RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 2);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 2);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 2);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+ } else if (rx_path == RF_AB) {
+ rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 3);
+ rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 3);
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
+ }
+
+ rtw8852bx_set_gain_offset(rtwdev, chan->subband_type, RTW89_PHY_0);
+
+ if (chan->band_type == RTW89_BAND_2G &&
+ (rx_path == RF_B || rx_path == RF_AB))
+ rtw8852bx_ctrl_btg_bt_rx(rtwdev, true, RTW89_PHY_0);
+ else
+ rtw8852bx_ctrl_btg_bt_rx(rtwdev, false, RTW89_PHY_0);
+
+ rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI;
+ rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI;
+ if (rx_path == RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3);
+ }
+}
+
+static void rtw8852bx_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path_bit rx_path)
+{
+ if (rx_path == RF_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
+ B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
+ B_P0_RFMODE_FTM_RX, 0x333);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
+ B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1111111);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
+ B_P1_RFMODE_FTM_RX, 0x111);
+ } else if (rx_path == RF_B) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
+ B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1111111);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
+ B_P0_RFMODE_FTM_RX, 0x111);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
+ B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
+ B_P1_RFMODE_FTM_RX, 0x333);
+ } else if (rx_path == RF_AB) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE,
+ B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX,
+ B_P0_RFMODE_FTM_RX, 0x333);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE,
+ B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX,
+ B_P1_RFMODE_FTM_RX, 0x333);
+ }
+}
+
+static void __rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB;
+
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path);
+
+ if (rtwdev->hal.rx_nss == 1) {
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1);
+ rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1);
+ }
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_0);
+}
+
+static u8 __rtw8852bx_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
+{
+ if (rtwdev->is_tssi_mode[rf_path]) {
+ u32 addr = 0x1c10 + (rf_path << 13);
+
+ return rtw89_phy_read32_mask(rtwdev, addr, 0x3F000000);
+ }
+
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+
+ fsleep(200);
+
+ return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL);
+}
+
+static
+void rtw8852bx_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x20000);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
+}
+
+static void __rtw8852bx_btc_init_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_mac_ax_coex coex_params = {
+ .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
+ .direction = RTW89_MAC_AX_COEX_INNER,
+ };
+
+ /* PTA init */
+ rtw89_mac_coex_init(rtwdev, &coex_params);
+
+ /* set WL Tx response = Hi-Pri */
+ chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true);
+ chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true);
+
+ /* set rf gnt debug off */
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0);
+
+ /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
+ if (btc->ant_type == BTC_ANT_SHARED) {
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
+ /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x55f);
+ } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5df);
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5df);
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff);
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x5ff);
+ }
+
+ if (rtwdev->chip->chip_id == RTL8852BT) {
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_RX_GROUP, 0x5df);
+ rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_RX_GROUP, 0x5df);
+ }
+
+ /* set PTA break table */
+ rtw89_write32(rtwdev, R_BTC_BREAK_TABLE, BTC_BREAK_PARAM);
+
+ /* enable BT counter 0xda40[16,2] = 2b'11 */
+ rtw89_write32_set(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST | B_AX_STATIS_BT_EN);
+ btc->cx.wl.status.map.init_ok = true;
+}
+
+static
+void __rtw8852bx_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
+{
+ u32 bitmap;
+ u32 reg;
+
+ switch (map) {
+ case BTC_PRI_MASK_TX_RESP:
+ reg = R_BTC_BT_COEX_MSK_TABLE;
+ bitmap = B_BTC_PRI_MASK_TX_RESP_V1;
+ break;
+ case BTC_PRI_MASK_BEACON:
+ reg = R_AX_WL_PRI_MSK;
+ bitmap = B_AX_PTA_WL_PRI_MASK_BCNQ;
+ break;
+ case BTC_PRI_MASK_RX_CCK:
+ reg = R_BTC_BT_COEX_MSK_TABLE;
+ bitmap = B_BTC_PRI_MASK_RXCCK_V1;
+ break;
+ default:
+ return;
+ }
+
+ if (state)
+ rtw89_write32_set(rtwdev, reg, bitmap);
+ else
+ rtw89_write32_clr(rtwdev, reg, bitmap);
+}
+
+static
+s8 __rtw8852bx_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
+{
+ /* +6 for compensate offset */
+ return clamp_t(s8, val + 6, -100, 0) + 100;
+}
+
+static
+void __rtw8852bx_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
+{
+ /* Feature move to firmware */
+}
+
+static void __rtw8852bx_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
+{
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x31);
+
+ /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */
+ if (state)
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x179);
+ else
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x20);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+}
+
+static void rtw8852bx_btc_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level)
+{
+ switch (level) {
+ case 0: /* default */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ case 1: /* Fix LNA2=5 */
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+ break;
+ }
+}
+
+static void __rtw8852bx_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ switch (level) {
+ case 0: /* original */
+ default:
+ rtw8852bx_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0);
+ btc->dm.wl_lna2 = 0;
+ break;
+ case 1: /* for FDD free-run */
+ rtw8852bx_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0);
+ btc->dm.wl_lna2 = 0;
+ break;
+ case 2: /* for BTG Co-Rx*/
+ rtw8852bx_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0);
+ btc->dm.wl_lna2 = 1;
+ break;
+ }
+
+ rtw8852bx_btc_set_wl_lna2(rtwdev, btc->dm.wl_lna2);
+}
+
+static void rtw8852bx_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u16 chan = phy_ppdu->chan_idx;
+ enum nl80211_band band;
+ u8 ch;
+
+ if (chan == 0)
+ return;
+
+ rtw89_decode_chan_idx(rtwdev, chan, &ch, &band);
+ status->freq = ieee80211_channel_to_frequency(ch, band);
+ status->band = band;
+}
+
+static void __rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 path;
+ u8 *rx_power = phy_ppdu->rssi;
+
+ status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B]));
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ status->chains |= BIT(path);
+ status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]);
+ }
+ if (phy_ppdu->valid)
+ rtw8852bx_fill_freq_with_ppdu(rtwdev, phy_ppdu, status);
+}
+
+static int __rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
+ u32 val32;
+ int ret;
+
+ rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x1);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+
+ if (chip_id == RTL8852BT) {
+ val32 = rtw89_read32(rtwdev, R_AX_AFE_OFF_CTRL1);
+ val32 = u32_replace_bits(val32, 0x1, B_AX_S0_LDO_VSEL_F_MASK);
+ val32 = u32_replace_bits(val32, 0x1, B_AX_S1_LDO_VSEL_F_MASK);
+ rtw89_write32(rtwdev, R_AX_AFE_OFF_CTRL1, val32);
+ }
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xC7,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xC7,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ rtw89_write8(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_XYN_CYCLE);
+
+ return 0;
+}
+
+static int __rtw8852bx_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ u8 wl_rfc_s0;
+ u8 wl_rfc_s1;
+ int ret;
+
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, &wl_rfc_s0);
+ if (ret)
+ return ret;
+ wl_rfc_s0 &= ~XTAL_SI_RF00S_EN;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, wl_rfc_s0,
+ FULL_BIT_MASK);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, &wl_rfc_s1);
+ if (ret)
+ return ret;
+ wl_rfc_s1 &= ~XTAL_SI_RF10S_EN;
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, wl_rfc_s1,
+ FULL_BIT_MASK);
+ return ret;
+}
+
+const struct rtw8852bx_info rtw8852bx_info = {
+ .mac_enable_bb_rf = __rtw8852bx_mac_enable_bb_rf,
+ .mac_disable_bb_rf = __rtw8852bx_mac_disable_bb_rf,
+ .bb_sethw = __rtw8852bx_bb_sethw,
+ .bb_reset_all = __rtw8852bx_bb_reset_all,
+ .bb_cfg_txrx_path = __rtw8852bx_bb_cfg_txrx_path,
+ .bb_cfg_tx_path = __rtw8852bx_bb_cfg_tx_path,
+ .bb_ctrl_rx_path = __rtw8852bx_bb_ctrl_rx_path,
+ .bb_set_plcp_tx = __rtw8852bx_bb_set_plcp_tx,
+ .bb_set_power = __rtw8852bx_bb_set_power,
+ .bb_set_pmac_pkt_tx = __rtw8852bx_bb_set_pmac_pkt_tx,
+ .bb_backup_tssi = __rtw8852bx_bb_backup_tssi,
+ .bb_restore_tssi = __rtw8852bx_bb_restore_tssi,
+ .bb_tx_mode_switch = __rtw8852bx_bb_tx_mode_switch,
+ .set_channel_mac = __rtw8852bx_set_channel_mac,
+ .set_channel_bb = __rtw8852bx_set_channel_bb,
+ .ctrl_nbtg_bt_tx = __rtw8852bx_ctrl_nbtg_bt_tx,
+ .ctrl_btg_bt_rx = __rtw8852bx_ctrl_btg_bt_rx,
+ .query_ppdu = __rtw8852bx_query_ppdu,
+ .read_efuse = __rtw8852bx_read_efuse,
+ .read_phycap = __rtw8852bx_read_phycap,
+ .power_trim = __rtw8852bx_power_trim,
+ .set_txpwr = __rtw8852bx_set_txpwr,
+ .set_txpwr_ctrl = __rtw8852bx_set_txpwr_ctrl,
+ .init_txpwr_unit = __rtw8852bx_init_txpwr_unit,
+ .set_txpwr_ul_tb_offset = __rtw8852bx_set_txpwr_ul_tb_offset,
+ .get_thermal = __rtw8852bx_get_thermal,
+ .adc_cfg = rtw8852bt_adc_cfg,
+ .btc_init_cfg = __rtw8852bx_btc_init_cfg,
+ .btc_set_wl_pri = __rtw8852bx_btc_set_wl_pri,
+ .btc_get_bt_rssi = __rtw8852bx_btc_get_bt_rssi,
+ .btc_update_bt_cnt = __rtw8852bx_btc_update_bt_cnt,
+ .btc_wl_s1_standby = __rtw8852bx_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = __rtw8852bx_btc_set_wl_rx_gain,
+};
+EXPORT_SYMBOL(rtw8852bx_info);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852B common routines");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
new file mode 100644
index 000000000000..801e7ab9f4fa
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h
@@ -0,0 +1,388 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852BX_H__
+#define __RTW89_8852BX_H__
+
+#include "core.h"
+
+#define RF_PATH_NUM_8852BX 2
+#define BB_PATH_NUM_8852BX 2
+
+enum rtw8852bx_pmac_mode {
+ NONE_TEST,
+ PKTS_TX,
+ PKTS_RX,
+ CONT_TX
+};
+
+struct rtw8852bx_u_efuse {
+ u8 rsvd[0x88];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852bx_e_efuse {
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852bx_tssi_offset {
+ u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM];
+ u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM];
+ u8 rsvd[7];
+ u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM];
+} __packed;
+
+struct rtw8852bx_efuse {
+ u8 rsvd[0x210];
+ struct rtw8852bx_tssi_offset path_a_tssi;
+ u8 rsvd1[10];
+ struct rtw8852bx_tssi_offset path_b_tssi;
+ u8 rsvd2[94];
+ u8 channel_plan;
+ u8 xtal_k;
+ u8 rsvd3;
+ u8 iqk_lck;
+ u8 rsvd4[5];
+ u8 reg_setting:2;
+ u8 tx_diversity:1;
+ u8 rx_diversity:2;
+ u8 ac_mode:1;
+ u8 module_type:2;
+ u8 rsvd5;
+ u8 shared_ant:1;
+ u8 coex_type:3;
+ u8 ant_iso:1;
+ u8 radio_on_off:1;
+ u8 rsvd6:2;
+ u8 eeprom_version;
+ u8 customer_id;
+ u8 tx_bb_swing_2g;
+ u8 tx_bb_swing_5g;
+ u8 tx_cali_pwr_trk_mode;
+ u8 trx_path_selection;
+ u8 rfe_type;
+ u8 country_code[2];
+ u8 rsvd7[3];
+ u8 path_a_therm;
+ u8 path_b_therm;
+ u8 rsvd8[2];
+ u8 rx_gain_2g_ofdm;
+ u8 rsvd9;
+ u8 rx_gain_2g_cck;
+ u8 rsvd10;
+ u8 rx_gain_5g_low;
+ u8 rsvd11;
+ u8 rx_gain_5g_mid;
+ u8 rsvd12;
+ u8 rx_gain_5g_high;
+ u8 rsvd13[35];
+ u8 path_a_cck_pwr_idx[6];
+ u8 path_a_bw40_1tx_pwr_idx[5];
+ u8 path_a_ofdm_1tx_pwr_idx_diff:4;
+ u8 path_a_bw20_1tx_pwr_idx_diff:4;
+ u8 path_a_bw20_2tx_pwr_idx_diff:4;
+ u8 path_a_bw40_2tx_pwr_idx_diff:4;
+ u8 path_a_cck_2tx_pwr_idx_diff:4;
+ u8 path_a_ofdm_2tx_pwr_idx_diff:4;
+ u8 rsvd14[0xf2];
+ union {
+ struct rtw8852bx_u_efuse u;
+ struct rtw8852bx_e_efuse e;
+ };
+} __packed;
+
+struct rtw8852bx_bb_pmac_info {
+ u8 en_pmac_tx:1;
+ u8 is_cck:1;
+ u8 mode:3;
+ u8 rsvd:3;
+ u16 tx_cnt;
+ u16 period;
+ u16 tx_time;
+ u8 duty_cycle;
+};
+
+struct rtw8852bx_bb_tssi_bak {
+ u8 tx_path;
+ u8 rx_path;
+ u32 p0_rfmode;
+ u32 p0_rfmode_ftm;
+ u32 p1_rfmode;
+ u32 p1_rfmode_ftm;
+ s16 tx_pwr; /* S9 */
+};
+
+struct rtw8852bx_info {
+ int (*mac_enable_bb_rf)(struct rtw89_dev *rtwdev);
+ int (*mac_disable_bb_rf)(struct rtw89_dev *rtwdev);
+ void (*bb_sethw)(struct rtw89_dev *rtwdev);
+ void (*bb_reset_all)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ void (*bb_cfg_txrx_path)(struct rtw89_dev *rtwdev);
+ void (*bb_cfg_tx_path)(struct rtw89_dev *rtwdev, u8 tx_path);
+ void (*bb_ctrl_rx_path)(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path_bit rx_path);
+ void (*bb_set_plcp_tx)(struct rtw89_dev *rtwdev);
+ void (*bb_set_power)(struct rtw89_dev *rtwdev, s16 pwr_dbm,
+ enum rtw89_phy_idx idx);
+ void (*bb_set_pmac_pkt_tx)(struct rtw89_dev *rtwdev, u8 enable,
+ u16 tx_cnt, u16 period, u16 tx_time,
+ enum rtw89_phy_idx idx);
+ void (*bb_backup_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ struct rtw8852bx_bb_tssi_bak *bak);
+ void (*bb_restore_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ const struct rtw8852bx_bb_tssi_bak *bak);
+ void (*bb_tx_mode_switch)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx idx, u8 mode);
+ void (*set_channel_mac)(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan, u8 mac_idx);
+ void (*set_channel_bb)(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+ void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx);
+ void (*ctrl_btg_bt_rx)(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx);
+ void (*query_ppdu)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status);
+ int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map,
+ enum rtw89_efuse_block block);
+ int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
+ void (*power_trim)(struct rtw89_dev *rtwdev);
+ void (*set_txpwr)(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx);
+ void (*set_txpwr_ctrl)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
+ int (*init_txpwr_unit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev,
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx);
+ u8 (*get_thermal)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path);
+ void (*adc_cfg)(struct rtw89_dev *rtwdev, u8 bw, u8 path);
+ void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
+ void (*btc_set_wl_pri)(struct rtw89_dev *rtwdev, u8 map, bool state);
+ s8 (*btc_get_bt_rssi)(struct rtw89_dev *rtwdev, s8 val);
+ void (*btc_update_bt_cnt)(struct rtw89_dev *rtwdev);
+ void (*btc_wl_s1_standby)(struct rtw89_dev *rtwdev, bool state);
+ void (*btc_set_wl_rx_gain)(struct rtw89_dev *rtwdev, u32 level);
+};
+
+extern const struct rtw8852bx_info rtw8852bx_info;
+
+static inline
+int rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ return rtw8852bx_info.mac_enable_bb_rf(rtwdev);
+}
+
+static inline
+int rtw8852bx_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ return rtw8852bx_info.mac_disable_bb_rf(rtwdev);
+}
+
+static inline
+void rtw8852bx_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_info.bb_sethw(rtwdev);
+}
+
+static inline
+void rtw8852bx_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_info.bb_reset_all(rtwdev, phy_idx);
+}
+
+static inline
+void rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_info.bb_cfg_txrx_path(rtwdev);
+}
+
+static inline
+void rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path)
+{
+ rtw8852bx_info.bb_cfg_tx_path(rtwdev, tx_path);
+}
+
+static inline
+void rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path_bit rx_path)
+{
+ rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path);
+}
+
+static inline
+void rtw8852bx_bb_set_plcp_tx(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_info.bb_set_plcp_tx(rtwdev);
+}
+
+static inline
+void rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
+ enum rtw89_phy_idx idx)
+{
+ rtw8852bx_info.bb_set_power(rtwdev, pwr_dbm, idx);
+}
+
+static inline
+void rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
+ u16 tx_cnt, u16 period, u16 tx_time,
+ enum rtw89_phy_idx idx)
+{
+ rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx);
+}
+
+static inline
+void rtw8852bx_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ struct rtw8852bx_bb_tssi_bak *bak)
+{
+ rtw8852bx_info.bb_backup_tssi(rtwdev, idx, bak);
+}
+
+static inline
+void rtw8852bx_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx,
+ const struct rtw8852bx_bb_tssi_bak *bak)
+{
+ rtw8852bx_info.bb_restore_tssi(rtwdev, idx, bak);
+}
+
+static inline
+void rtw8852bx_bb_tx_mode_switch(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx idx, u8 mode)
+{
+ rtw8852bx_info.bb_tx_mode_switch(rtwdev, idx, mode);
+}
+
+static inline
+void rtw8852bx_set_channel_mac(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan, u8 mac_idx)
+{
+ rtw8852bx_info.set_channel_mac(rtwdev, chan, mac_idx);
+}
+
+static inline
+void rtw8852bx_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_info.set_channel_bb(rtwdev, chan, phy_idx);
+}
+
+static inline
+void rtw8852bx_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_info.ctrl_nbtg_bt_tx(rtwdev, en, phy_idx);
+}
+
+static inline
+void rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_info.ctrl_btg_bt_rx(rtwdev, en, phy_idx);
+}
+
+static inline
+void rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ rtw8852bx_info.query_ppdu(rtwdev, phy_ppdu, status);
+}
+
+static inline
+int rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map,
+ enum rtw89_efuse_block block)
+{
+ return rtw8852bx_info.read_efuse(rtwdev, log_map, block);
+}
+
+static inline
+int rtw8852bx_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ return rtw8852bx_info.read_phycap(rtwdev, phycap_map);
+}
+
+static inline
+void rtw8852bx_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_info.power_trim(rtwdev);
+}
+
+static inline
+void rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev,
+ const struct rtw89_chan *chan,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_info.set_txpwr(rtwdev, chan, phy_idx);
+}
+
+static inline
+void rtw8852bx_set_txpwr_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw8852bx_info.set_txpwr_ctrl(rtwdev, phy_idx);
+}
+
+static inline
+int rtw8852bx_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ return rtw8852bx_info.init_txpwr_unit(rtwdev, phy_idx);
+}
+
+static inline
+void rtw8852bx_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
+ s8 pw_ofst, enum rtw89_mac_idx mac_idx)
+{
+ rtw8852bx_info.set_txpwr_ul_tb_offset(rtwdev, pw_ofst, mac_idx);
+}
+
+static inline
+u8 rtw8852bx_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
+{
+ return rtw8852bx_info.get_thermal(rtwdev, rf_path);
+}
+
+static inline
+void rtw8852bx_adc_cfg(struct rtw89_dev *rtwdev, u8 bw, u8 path)
+{
+ rtw8852bx_info.adc_cfg(rtwdev, bw, path);
+}
+
+static inline
+void rtw8852bx_btc_init_cfg(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_info.btc_init_cfg(rtwdev);
+}
+
+static inline
+void rtw8852bx_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
+{
+ rtw8852bx_info.btc_set_wl_pri(rtwdev, map, state);
+}
+
+static inline
+s8 rtw8852bx_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
+{
+ return rtw8852bx_info.btc_get_bt_rssi(rtwdev, val);
+}
+
+static inline
+void rtw8852bx_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
+{
+ rtw8852bx_info.btc_update_bt_cnt(rtwdev);
+}
+
+static inline
+void rtw8852bx_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
+{
+ rtw8852bx_info.btc_wl_s1_standby(rtwdev, state);
+}
+
+static inline
+void rtw8852bx_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+ rtw8852bx_info.btc_set_wl_rx_gain(rtwdev, level);
+}
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
index 259df67836a0..12354612441c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c
@@ -8,6 +8,7 @@
#include "phy.h"
#include "reg.h"
#include "rtw8852b.h"
+#include "rtw8852b_common.h"
#include "rtw8852b_rfk.h"
#include "rtw8852b_rfk_table.h"
#include "rtw8852b_table.h"
@@ -20,7 +21,7 @@
#define RTW8852B_RF_REL_VERSION 34
#define RTW8852B_DPK_VER 0x0d
#define RTW8852B_DPK_RF_PATH 2
-#define RTW8852B_DPK_KIP_REG_NUM 2
+#define RTW8852B_DPK_KIP_REG_NUM 3
#define _TSSI_DE_MASK GENMASK(21, 12)
#define ADDC_T_AVG 100
@@ -3433,13 +3434,13 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
rx_path = RF_ABCD; /* don't change path, but still set others */
if (enable) {
- rtw8852b_bb_set_plcp_tx(rtwdev);
- rtw8852b_bb_cfg_tx_path(rtwdev, path);
- rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path);
- rtw8852b_bb_set_power(rtwdev, pwr_dbm, phy);
+ rtw8852bx_bb_set_plcp_tx(rtwdev);
+ rtw8852bx_bb_cfg_tx_path(rtwdev, path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
}
- rtw8852b_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+ rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
}
static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
@@ -3578,7 +3579,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0};
u8 channel = chan->channel;
u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
- struct rtw8852b_bb_tssi_bak tssi_bak;
+ struct rtw8852bx_bb_tssi_bak tssi_bak;
s32 aliment_diff, tssi_cw_default;
u32 start_time, finish_time;
u32 bb_reg_backup[8] = {0};
@@ -3626,7 +3627,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
else
band = TSSI_ALIMK_2G;
- rtw8852b_bb_backup_tssi(rtwdev, phy, &tssi_bak);
+ rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak);
_tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
@@ -3730,8 +3731,8 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
out:
_tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup));
- rtw8852b_bb_restore_tssi(rtwdev, phy, &tssi_bak);
- rtw8852b_bb_tx_mode_switch(rtwdev, phy, 0);
+ rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
+ rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
finish_time = ktime_get_ns();
tssi_info->tssi_alimk_time += finish_time - start_time;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
index ed71364e6437..d8f9d92ca0fb 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c
@@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
.rpwm_addr = R_AX_PCIE_HRPWM,
.cpwm_addr = R_AX_CPWM,
.mit_addr = R_AX_INT_MIT_RX,
+ .wp_sel_addr = 0,
.tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) |
BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) |
BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11),
@@ -63,6 +64,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = {
static const struct rtw89_driver_info rtw89_8852be_info = {
.chip = &rtw8852b_chip_info,
+ .quirks = NULL,
.bus = {
.pci = &rtw8852b_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h
new file mode 100644
index 000000000000..6177f36ad667
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852BT_H__
+#define __RTW89_8852BT_H__
+
+#include "core.h"
+
+#define RF_PATH_NUM_8852BT 2
+#define BB_PATH_NUM_8852BT 2
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
new file mode 100644
index 000000000000..fa0e49d58112
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c
@@ -0,0 +1,4019 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852bt.h"
+#include "rtw8852bt_rfk.h"
+#include "rtw8852bt_rfk_table.h"
+#include "rtw8852b_common.h"
+
+#define RTW8852BT_RXDCK_VER 0x1
+#define RTW8852BT_IQK_VER 0x2a
+#define RTW8852BT_SS 2
+#define RTW8852BT_TSSI_PATH_NR 2
+#define RTW8852BT_DPK_VER 0x06
+#define DPK_RF_PATH_MAX_8852BT 2
+
+#define _TSSI_DE_MASK GENMASK(21, 12)
+#define DPK_TXAGC_LOWER 0x2e
+#define DPK_TXAGC_UPPER 0x3f
+#define DPK_TXAGC_INVAL 0xff
+#define RFREG_MASKRXBB 0x003e0
+#define RFREG_MASKMODE 0xf0000
+
+enum rf_mode {
+ RF_SHUT_DOWN = 0x0,
+ RF_STANDBY = 0x1,
+ RF_TX = 0x2,
+ RF_RX = 0x3,
+ RF_TXIQK = 0x4,
+ RF_DPK = 0x5,
+ RF_RXK1 = 0x6,
+ RF_RXK2 = 0x7,
+};
+
+enum rtw8852bt_dpk_id {
+ LBK_RXIQK = 0x06,
+ SYNC = 0x10,
+ MDPK_IDL = 0x11,
+ MDPK_MPA = 0x12,
+ GAIN_LOSS = 0x13,
+ GAIN_CAL = 0x14,
+ DPK_RXAGC = 0x15,
+ KIP_PRESET = 0x16,
+ KIP_RESTORE = 0x17,
+ DPK_TXAGC = 0x19,
+ D_KIP_PRESET = 0x28,
+ D_TXAGC = 0x29,
+ D_RXAGC = 0x2a,
+ D_SYNC = 0x2b,
+ D_GAIN_LOSS = 0x2c,
+ D_MDPK_IDL = 0x2d,
+ D_GAIN_NORM = 0x2f,
+ D_KIP_THERMAL = 0x30,
+ D_KIP_RESTORE = 0x31
+};
+
+enum dpk_agc_step {
+ DPK_AGC_STEP_SYNC_DGAIN,
+ DPK_AGC_STEP_GAIN_ADJ,
+ DPK_AGC_STEP_GAIN_LOSS_IDX,
+ DPK_AGC_STEP_GL_GT_CRITERION,
+ DPK_AGC_STEP_GL_LT_CRITERION,
+ DPK_AGC_STEP_SET_TX_GAIN,
+};
+
+enum rtw8852bt_iqk_type {
+ ID_TXAGC = 0x0,
+ ID_FLOK_COARSE = 0x1,
+ ID_FLOK_FINE = 0x2,
+ ID_TXK = 0x3,
+ ID_RXAGC = 0x4,
+ ID_RXK = 0x5,
+ ID_NBTXK = 0x6,
+ ID_NBRXK = 0x7,
+ ID_FLOK_VBUFFER = 0x8,
+ ID_A_FLOK_COARSE = 0x9,
+ ID_G_FLOK_COARSE = 0xa,
+ ID_A_FLOK_FINE = 0xb,
+ ID_G_FLOK_FINE = 0xc,
+ ID_IQK_RESTORE = 0x10,
+};
+
+enum adc_ck {
+ ADC_NA = 0,
+ ADC_480M = 1,
+ ADC_960M = 2,
+ ADC_1920M = 3,
+};
+
+enum dac_ck {
+ DAC_40M = 0,
+ DAC_80M = 1,
+ DAC_120M = 2,
+ DAC_160M = 3,
+ DAC_240M = 4,
+ DAC_320M = 5,
+ DAC_480M = 6,
+ DAC_960M = 7,
+};
+
+static const u32 _tssi_trigger[RTW8852BT_TSSI_PATH_NR] = {0x5820, 0x7820};
+static const u32 _tssi_cw_rpt_addr[RTW8852BT_TSSI_PATH_NR] = {0x1c18, 0x3c18};
+static const u32 _tssi_cw_default_addr[RTW8852BT_TSSI_PATH_NR][4] = {
+ {0x5634, 0x5630, 0x5630, 0x5630},
+ {0x7634, 0x7630, 0x7630, 0x7630} };
+static const u32 _tssi_cw_default_mask[4] = {
+ 0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff};
+static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852BT] = {0x5858, 0x7858};
+static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852BT] = {0x5860, 0x7860};
+static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852BT] = {0x5838, 0x7838};
+static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852BT] = {0x5840, 0x7840};
+static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852BT] = {0x5848, 0x7848};
+static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852BT] = {0x5850, 0x7850};
+static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852BT] = {0x5828, 0x7828};
+static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852BT] = {0x5830, 0x7830};
+
+static const u32 rtw8852bt_backup_bb_regs[] = {0x2344, 0x5800, 0x7800, 0x0704};
+static const u32 rtw8852bt_backup_rf_regs[] = {
+ 0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x5, 0x10005};
+static const u32 rtw8852bt_backup_kip_regs[] = {
+ 0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec,
+ 0x823c, 0x8224, 0x8220, 0xc1d4, 0xc1d8, 0xc1c4, 0xc1ec};
+
+#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852bt_backup_bb_regs)
+#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852bt_backup_rf_regs)
+#define BACKUP_KIP_REGS_NR ARRAY_SIZE(rtw8852bt_backup_kip_regs)
+
+static void _rfk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1);
+
+ udelay(200);
+
+ dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
+ dpk->bp[path][kidx].ther_dpk);
+}
+
+static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ backup_bb_reg_val[i] =
+ rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_bb_regs[i], MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]backup bb reg : %x, value =%x\n",
+ rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_backup_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_KIP_REGS_NR; i++) {
+ backup_kip_reg_val[i] =
+ rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_kip_regs[i],
+ MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
+ rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]);
+ }
+}
+
+static
+void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ backup_rf_reg_val[i] =
+ rtw89_read_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i],
+ RFREG_MASK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup RF S%d 0x%x = %x\n",
+ rf_path, rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static void _rfk_reload_bb_reg(struct rtw89_dev *rtwdev, const u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_bb_regs[i],
+ MASKDWORD, backup_bb_reg_val[i]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]restore bb reg : %x, value =%x\n",
+ rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_reload_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_KIP_REGS_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_kip_regs[i],
+ MASKDWORD, backup_kip_reg_val[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]restore kip reg : %x, value =%x\n",
+ rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]);
+ }
+}
+
+static void _rfk_reload_rf_reg(struct rtw89_dev *rtwdev,
+ const u32 backup_rf_reg_val[], u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ rtw89_write_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i],
+ RFREG_MASK, backup_rf_reg_val[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path,
+ rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u8 val;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n",
+ rtwdev->dbcc_en, phy_idx);
+
+ if (!rtwdev->dbcc_en) {
+ val = RF_AB;
+ } else {
+ if (phy_idx == RTW89_PHY_0)
+ val = RF_A;
+ else
+ val = RF_B;
+ }
+ return val;
+}
+
+static
+void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force,
+ enum dac_ck ck)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0);
+
+ if (!force)
+ return;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1);
+}
+
+static
+void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force,
+ enum adc_ck ck)
+{
+ u32 bw = 0;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0);
+
+ if (!force)
+ return;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1);
+
+ switch (ck) {
+ case ADC_480M:
+ bw = RTW89_CHANNEL_WIDTH_40;
+ break;
+ case ADC_960M:
+ bw = RTW89_CHANNEL_WIDTH_80;
+ break;
+ case ADC_1920M:
+ bw = RTW89_CHANNEL_WIDTH_160;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s==>Invalid ck", __func__);
+ break;
+ }
+
+ rtw8852bx_adc_cfg(rtwdev, bw, path);
+}
+
+static void _rfk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kpath)
+{
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303);
+ rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff);
+ rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff);
+ rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1);
+
+ _txck_force(rtwdev, RF_PATH_A, true, DAC_960M);
+ _txck_force(rtwdev, RF_PATH_B, true, DAC_960M);
+ _rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M);
+ _rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M);
+
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x5);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x3333);
+
+ rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN, MASKLWORD, 0x0000);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXAGC_TH, MASKLWORD, 0x0000);
+}
+
+static void _rfk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kpath)
+{
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x63);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0000);
+ rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
+
+ rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x2);
+}
+
+static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
+ mdelay(1);
+}
+
+static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 path, dck_tune;
+ u32 rf_reg5;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n",
+ RTW8852BT_RXDCK_VER, rtwdev->hal.cv);
+
+ for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+ dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
+
+ if (rtwdev->is_tssi_mode[path])
+ rtw89_phy_write32_mask(rtwdev,
+ R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, 0x1);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+ _set_rx_dck(rtwdev, phy, path);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ if (rtwdev->is_tssi_mode[path])
+ rtw89_phy_write32_mask(rtwdev,
+ R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, 0x0);
+ }
+}
+
+static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u32 rf_reg5;
+ u32 rck_val;
+ u32 val;
+ int ret;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
+
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n",
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
+
+ /* RCK trigger */
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30,
+ false, rtwdev, path, RR_RCKS, BIT(3));
+
+ rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n",
+ rck_val, ret);
+
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK));
+}
+
+static void _drck(struct rtw89_dev *rtwdev)
+{
+ u32 rck_d;
+ u32 val;
+ int ret;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n");
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false,
+ rtwdev, R_DRCK_RES, B_DRCK_POL);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n");
+
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0);
+
+ rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, 0x7c00);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, rck_d);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD));
+}
+
+static void _dack_backup_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+
+ for (i = 0; i < 0x10; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i);
+ dack->msbk_d[0][0][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0);
+
+ rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i);
+ dack->msbk_d[0][1][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1);
+ }
+
+ dack->biask_d[0][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00);
+ dack->biask_d[0][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01);
+
+ dack->dadck_d[0][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00);
+ dack->dadck_d[0][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01);
+}
+
+static void _dack_backup_s1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+
+ for (i = 0; i < 0x10; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i);
+ dack->msbk_d[1][0][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S);
+
+ rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i);
+ dack->msbk_d[1][1][i] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S);
+ }
+
+ dack->biask_d[1][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10);
+ dack->biask_d[1][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11);
+
+ dack->dadck_d[1][0] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10);
+ dack->dadck_d[1][1] =
+ rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11);
+}
+
+static
+void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ if (path == RF_PATH_A) {
+ rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x1);
+ }
+}
+
+static
+void _dack_reload_by_path(struct rtw89_dev *rtwdev, u8 path, u8 index)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 tmp, tmp_offset, tmp_reg;
+ u32 idx_offset, path_offset;
+ u8 i;
+
+ if (index == 0)
+ idx_offset = 0;
+ else
+ idx_offset = 0x14;
+
+ if (path == RF_PATH_A)
+ path_offset = 0;
+ else
+ path_offset = 0x28;
+
+ tmp_offset = idx_offset + path_offset;
+
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_RST, 0x1);
+
+ /* msbk_d: 15/14/13/12 */
+ tmp = 0x0;
+ for (i = 0; i < 4; i++)
+ tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
+ tmp_reg = 0xc200 + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+
+ /* msbk_d: 11/10/9/8 */
+ tmp = 0x0;
+ for (i = 0; i < 4; i++)
+ tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
+ tmp_reg = 0xc204 + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+
+ /* msbk_d: 7/6/5/4 */
+ tmp = 0x0;
+ for (i = 0; i < 4; i++)
+ tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
+ tmp_reg = 0xc208 + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+
+ /* msbk_d: 3/2/1/0 */
+ tmp = 0x0;
+ for (i = 0; i < 4; i++)
+ tmp |= dack->msbk_d[path][index][i] << (i * 8);
+ tmp_reg = 0xc20c + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+
+ /* dadak_d/biask_d */
+ tmp = (dack->biask_d[path][index] << 22) |
+ (dack->dadck_d[path][index] << 14);
+ tmp_reg = 0xc210 + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+
+ /* enable DACK result from reg */
+ rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + tmp_offset, B_DACKN0_EN, 0x1);
+}
+
+static
+void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u8 i;
+
+ for (i = 0; i < 2; i++)
+ _dack_reload_by_path(rtwdev, path, i);
+}
+
+static bool _dack_s0_poll(struct rtw89_dev *rtwdev)
+{
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0)
+ return false;
+
+ return true;
+}
+
+static void _dack_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ bool done;
+ int ret;
+
+ _txck_force(rtwdev, RF_PATH_A, true, DAC_160M);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0);
+ udelay(100);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_VAL, 0x30);
+ rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_VAL, 0x30);
+
+ _dack_reset(rtwdev, RF_PATH_A);
+
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1);
+ udelay(1);
+
+ dack->msbk_timeout[0] = false;
+
+ ret = read_poll_timeout_atomic(_dack_s0_poll, done, done,
+ 1, 20000, false, rtwdev);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n");
+ dack->msbk_timeout[0] = true;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0);
+
+ _txck_force(rtwdev, RF_PATH_A, false, DAC_960M);
+ _dack_backup_s0(rtwdev);
+ _dack_reload(rtwdev, RF_PATH_A);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+}
+
+static bool _dack_s1_poll(struct rtw89_dev *rtwdev)
+{
+ if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 ||
+ rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0)
+ return false;
+
+ return true;
+}
+
+static void _dack_s1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ bool done;
+ int ret;
+
+ _txck_force(rtwdev, RF_PATH_B, true, DAC_160M);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0);
+ udelay(100);
+ rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_VAL, 0x30);
+ rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_VAL, 0x30);
+
+ _dack_reset(rtwdev, RF_PATH_B);
+
+ rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1);
+ udelay(1);
+
+ dack->msbk_timeout[1] = false;
+
+ ret = read_poll_timeout_atomic(_dack_s1_poll, done, done,
+ 1, 10000, false, rtwdev);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n");
+ dack->msbk_timeout[1] = true;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0);
+
+ _txck_force(rtwdev, RF_PATH_B, false, DAC_960M);
+ _dack_backup_s1(rtwdev);
+ _dack_reload(rtwdev, RF_PATH_B);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
+}
+
+static void _dack(struct rtw89_dev *rtwdev)
+{
+ _dack_s0(rtwdev);
+ _dack_s1(rtwdev);
+}
+
+static void _dack_dump(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+ u8 t;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[0][0], dack->addck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[1][0], dack->addck_d[1][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[0][0], dack->dadck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[1][0], dack->dadck_d[1][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[0][0], dack->biask_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[1][0], dack->biask_d[1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
+ for (i = 0; i < 0x10; i++) {
+ t = dack->msbk_d[0][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[0][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[1][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[1][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+}
+
+static void _addck_ori(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 val;
+ int ret;
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
+ udelay(100);
+
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0);
+ udelay(1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1);
+ dack->addck_timeout[0] = false;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false,
+ rtwdev, R_ADDCKR0, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
+ dack->addck_timeout[0] = true;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0);
+ dack->addck_d[0][0] =
+ rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0);
+ dack->addck_d[0][1] =
+ rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf);
+ udelay(100);
+
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0);
+ udelay(1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1);
+ dack->addck_timeout[1] = false;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val,
+ 1, 10000, false,
+ rtwdev, R_ADDCKR1, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
+ dack->addck_timeout[1] = true;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0);
+ dack->addck_d[1][0] =
+ rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0);
+ dack->addck_d[1][1] =
+ rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
+}
+
+static void _addck_reload(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, dack->addck_d[0][0]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, dack->addck_d[0][1]);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1, dack->addck_d[1][0]);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0, dack->addck_d[1][1]);
+
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3);
+}
+
+static void _dack_manual_off(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x0);
+
+ rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DACKN2_CTL, B_DACKN2_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DACKN3_CTL, B_DACKN3_ON, 0x0);
+}
+
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ dack->dack_done = false;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
+
+ _drck(rtwdev);
+ _dack_manual_off(rtwdev);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1);
+ _rxck_force(rtwdev, RF_PATH_A, true, ADC_960M);
+ _rxck_force(rtwdev, RF_PATH_B, true, ADC_960M);
+ _addck_ori(rtwdev);
+
+ _rxck_force(rtwdev, RF_PATH_A, false, ADC_960M);
+ _rxck_force(rtwdev, RF_PATH_B, false, ADC_960M);
+ _addck_reload(rtwdev);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
+
+ _dack(rtwdev);
+ _dack_dump(rtwdev);
+ dack->dack_done = true;
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x1);
+
+ dack->dack_cnt++;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
+}
+
+static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
+{
+ bool notready = false;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+ 10, 8200, false,
+ rtwdev, R_RFK_ST, MASKBYTE0);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n");
+
+ udelay(10);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
+ 10, 400, false,
+ rtwdev, R_RPT_COM, B_RPT_COM_RDY);
+ if (ret) {
+ notready = true;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL2 IQK timeout!!!\n");
+ }
+
+ udelay(10);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
+
+ return notready;
+}
+
+static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path, u8 ktype)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 iqk_cmd;
+ bool fail;
+
+ switch (ktype) {
+ case ID_TXAGC:
+ iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
+ break;
+ case ID_FLOK_COARSE:
+ iqk_cmd = 0x108 | (1 << (4 + path));
+ break;
+ case ID_FLOK_FINE:
+ iqk_cmd = 0x208 | (1 << (4 + path));
+ break;
+ case ID_FLOK_VBUFFER:
+ iqk_cmd = 0x308 | (1 << (4 + path));
+ break;
+ case ID_TXK:
+ iqk_cmd = 0x008 | (1 << (path + 4)) |
+ (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
+ break;
+ case ID_RXAGC:
+ iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
+ break;
+ case ID_RXK:
+ iqk_cmd = 0x008 | (1 << (path + 4)) |
+ (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
+ break;
+ case ID_NBTXK:
+ iqk_cmd = 0x408 | (1 << (4 + path));
+ break;
+ case ID_NBRXK:
+ iqk_cmd = 0x608 | (1 << (4 + path));
+ break;
+ default:
+ return false;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s, iqk_cmd = %x\n",
+ __func__, iqk_cmd + 1);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
+ fail = _iqk_check_cal(rtwdev, path, ktype);
+
+ return fail;
+}
+
+static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x5);
+ udelay(1);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4);
+ udelay(1);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ return false;
+}
+
+static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ return false;
+}
+
+static bool _iqk_2g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ static const u32 g_power_range[4] = {0x0, 0x0, 0x0, 0x0};
+ static const u32 g_track_range[4] = {0x4, 0x4, 0x6, 0x6};
+ static const u32 g_gain_bb[4] = {0x08, 0x0e, 0x08, 0x0e};
+ static const u32 g_itqt[4] = {0x09, 0x12, 0x1b, 0x24};
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool notready = false;
+ bool kfail = false;
+ u8 gp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ for (gp = 0x0; gp < 0x4; gp++) {
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0,
+ g_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1,
+ g_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG,
+ g_gain_bb[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000100, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000010, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000004, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000003, gp);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
+ 0x009);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, g_itqt[gp]);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
+
+ if (iqk_info->is_nbiqk)
+ break;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, g_itqt[gp]);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n",
+ path, gp, 1 << path, iqk_info->nb_txcfir[path]);
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ iqk_info->nb_txcfir[path] = 0x40000002;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_TXCFIR, 0x0);
+ }
+
+ return kfail;
+}
+
+static bool _iqk_5g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ static const u32 a_power_range[4] = {0x0, 0x0, 0x0, 0x0};
+ static const u32 a_track_range[4] = {0x3, 0x3, 0x6, 0x6};
+ static const u32 a_gain_bb[4] = {0x08, 0x10, 0x08, 0x0e};
+ static const u32 a_itqt[4] = {0x09, 0x12, 0x1b, 0x24};
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool notready = false;
+ bool kfail = false;
+ u8 gp;
+
+ for (gp = 0x0; gp < 0x4; gp++) {
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, a_itqt[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000100, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000010, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000004, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000003, gp);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT,
+ 0x009);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, a_itqt[gp]);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ iqk_info->nb_txcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
+
+ if (iqk_info->is_nbiqk)
+ break;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ B_KIP_IQP_IQSW, a_itqt[gp]);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n",
+ path, gp, 1 << path, iqk_info->nb_txcfir[path]);
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ iqk_info->nb_txcfir[path] = 0x40000002;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_TXCFIR, 0x0);
+ }
+
+ return kfail;
+}
+
+static void _iqk_adc_fifo_rst(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
+ udelay(10);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
+}
+
+static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
+
+ if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
+ _rxck_force(rtwdev, RF_PATH_A, true, ADC_960M);
+ _rxck_force(rtwdev, RF_PATH_B, true, ADC_960M);
+ udelay(1);
+
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_VAL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
+ B_PATH0_SAMPL_DLY_T_MSK_V1, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
+ B_PATH1_SAMPL_DLY_T_MSK_V1, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
+ B_PATH1_BW_SEL_MSK_V1, 0x8);
+ } else {
+ _rxck_force(rtwdev, RF_PATH_A, true, ADC_480M);
+ _rxck_force(rtwdev, RF_PATH_B, true, ADC_480M);
+ udelay(1);
+
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_VAL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1,
+ B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1,
+ B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1,
+ B_PATH1_BW_SEL_MSK_V1, 0xf);
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00000780, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00000780, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00007800, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00007800, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_MUL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
+}
+
+static bool _iqk_2g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ static const u32 g_idxrxgain[2] = {0x212, 0x310};
+ static const u32 g_idxattc2[2] = {0x00, 0x20};
+ static const u32 g_idxattc1[2] = {0x3, 0x2};
+ static const u32 g_idxrxagc[2] = {0x0, 0x2};
+ static const u32 g_idx[2] = {0x0, 0x2};
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool notready = false;
+ bool kfail = false;
+ u32 rf_18, tmp;
+ u8 gp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
+ rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
+
+ for (gp = 0x0; gp < 0x2; gp++) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, g_idxattc2[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, g_idxattc1[gp]);
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000100, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000010, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000007, g_idx[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ udelay(100);
+ udelay(100);
+
+ tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb = %x\n", path,
+ rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0));
+
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ udelay(100);
+ udelay(100);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ iqk_info->nb_rxcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD) | 0x2;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n", path,
+ g_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ if (iqk_info->is_nbiqk)
+ break;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ iqk_info->nb_txcfir[path] = 0x40000002;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_RXCFIR, 0x0);
+ }
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
+
+ return kfail;
+}
+
+static bool _iqk_5g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ static const u32 a_idxrxgain[2] = {0x110, 0x290};
+ static const u32 a_idxattc2[2] = {0x0f, 0x0f};
+ static const u32 a_idxattc1[2] = {0x2, 0x2};
+ static const u32 a_idxrxagc[2] = {0x4, 0x6};
+ static const u32 a_idx[2] = {0x0, 0x2};
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool notready = false;
+ bool kfail = false;
+ u32 rf_18, tmp;
+ u8 gp;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
+ rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
+
+ for (gp = 0x0; gp < 0x2; gp++) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT, a_idxattc2[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2, a_idxattc1[gp]);
+
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000100, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000010, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ 0x00000007, a_idx[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ udelay(100);
+ udelay(100);
+
+ tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
+
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb = %x\n", path,
+ rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0));
+
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013);
+ udelay(200);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+ iqk_info->nb_rxcfir[path] =
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD) | 0x2;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n",
+ path, a_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ if (iqk_info->is_nbiqk)
+ break;
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+ }
+
+ if (!notready)
+ kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG);
+
+ if (kfail) {
+ iqk_info->nb_txcfir[path] = 0x40000002;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_RXCFIR, 0x0);
+ }
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
+
+ return kfail;
+}
+
+static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool lok_result = false;
+ bool txk_result = false;
+ bool rxk_result = false;
+ u8 i;
+
+ for (i = 0; i < 3; i++) {
+ _iqk_txk_setting(rtwdev, path);
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ lok_result = _iqk_2g_lok(rtwdev, phy_idx, path);
+ else
+ lok_result = _iqk_5g_lok(rtwdev, phy_idx, path);
+
+ if (!lok_result)
+ break;
+ }
+
+ if (lok_result) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]!!!!!!!!!!LOK by Pass !!!!!!!!!!!\n");
+ rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
+ rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
+ rtw89_write_rf(rtwdev, path, RR_LOKVB, RFREG_MASK, 0x80200);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x08[00:19] = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x09[00:19] = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_RSV2, RFREG_MASK));
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x0a[00:19] = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK));
+
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ txk_result = _iqk_2g_tx(rtwdev, phy_idx, path);
+ else
+ txk_result = _iqk_5g_tx(rtwdev, phy_idx, path);
+
+ _iqk_rxclk_setting(rtwdev, path);
+ _iqk_adc_fifo_rst(rtwdev, phy_idx, path);
+
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ rxk_result = _iqk_2g_rx(rtwdev, phy_idx, path);
+ else
+ rxk_result = _iqk_5g_rx(rtwdev, phy_idx, path);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]result : lok_= %x, txk_= %x, rxk_= %x\n",
+ lok_result, txk_result, rxk_result);
+}
+
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 get_empty_table = false;
+ u32 reg_rf18;
+ u32 reg_35c;
+ u8 idx;
+
+ for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
+ if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
+ get_empty_table = true;
+ break;
+ }
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx);
+
+ if (!get_empty_table) {
+ idx = iqk_info->iqk_table_idx[path] + 1;
+ if (idx > 1)
+ idx = 0;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx);
+
+ reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN);
+
+ iqk_info->iqk_band[path] = chan->band_type;
+ iqk_info->iqk_bw[path] = chan->band_width;
+ iqk_info->iqk_ch[path] = chan->channel;
+ iqk_info->iqk_mcc_ch[idx][path] = chan->channel;
+ iqk_info->iqk_table_idx[path] = idx;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n",
+ path, reg_rf18, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n",
+ path, reg_rf18);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x35c= 0x%x\n",
+ path, reg_35c);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n",
+ iqk_info->iqk_times, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n",
+ idx, path, iqk_info->iqk_mcc_ch[idx][path]);
+}
+
+static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ _iqk_by_path(rtwdev, phy_idx, path);
+}
+
+static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
+
+ if (iqk_info->is_nbiqk) {
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD, iqk_info->nb_txcfir[path]);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD, iqk_info->nb_rxcfir[path]);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8),
+ MASKDWORD, 0x40000000);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD, 0x40000000);
+ }
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD,
+ 0x00000e19 + (path << 4));
+
+ _iqk_check_cal(rtwdev, path, 0x0);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), BIT(28), 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
+}
+
+static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0000000);
+ rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x0000001f, 0x03);
+ rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x000003e0, 0x03);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC,
+ B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0000);
+ rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0);
+}
+
+static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
+{
+ u8 idx = 0;
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), 0x00000001, idx);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 0x00000008, idx);
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000000);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
+}
+
+static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_GOT_TXRX, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_GOT_TXRX, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P0_CLKG_FORCE, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff);
+ rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff);
+ rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1);
+
+ _txck_force(rtwdev, RF_PATH_A, true, DAC_960M);
+ _txck_force(rtwdev, RF_PATH_B, true, DAC_960M);
+ _rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M);
+ _rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M);
+
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x2);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1);
+ udelay(10);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
+ udelay(10);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
+ udelay(10);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333);
+}
+
+static void _iqk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 idx, path;
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0);
+
+ if (iqk_info->is_iqk_init)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ iqk_info->is_iqk_init = true;
+ iqk_info->is_nbiqk = false;
+ iqk_info->iqk_fft_en = false;
+ iqk_info->iqk_sram_en = false;
+ iqk_info->iqk_cfir_en = false;
+ iqk_info->iqk_xym_en = false;
+ iqk_info->iqk_times = 0x0;
+
+ for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
+ iqk_info->iqk_channel[idx] = 0x0;
+ for (path = 0; path < RTW8852BT_SS; path++) {
+ iqk_info->lok_cor_fail[idx][path] = false;
+ iqk_info->lok_fin_fail[idx][path] = false;
+ iqk_info->iqk_tx_fail[idx][path] = false;
+ iqk_info->iqk_rx_fail[idx][path] = false;
+ iqk_info->iqk_mcc_ch[idx][path] = 0x0;
+ iqk_info->iqk_table_idx[path] = 0x0;
+ }
+ }
+}
+
+static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
+{
+ u32 rf_mode;
+ u8 path;
+ int ret;
+
+ for (path = 0; path < RF_PATH_MAX; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode,
+ rf_mode != 2, 2, 5000, false,
+ rtwdev, path, RR_MOD, RR_MOD_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret);
+ }
+}
+
+static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx,
+ bool is_pause)
+{
+ if (!is_pause)
+ return;
+
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx));
+}
+
+static void _doiqk(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 backup_bb_val[BACKUP_BB_REGS_NR];
+ u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]==========IQK start!!!!!==========\n");
+ iqk_info->iqk_times++;
+ iqk_info->version = RTW8852BT_IQK_VER;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
+ _iqk_get_ch_info(rtwdev, phy_idx, path);
+
+ _rfk_backup_bb_reg(rtwdev, backup_bb_val);
+ _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
+ _iqk_macbb_setting(rtwdev, phy_idx, path);
+ _iqk_preset(rtwdev, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_restore(rtwdev, path);
+ _iqk_afebb_restore(rtwdev, phy_idx, path);
+ _rfk_reload_bb_reg(rtwdev, backup_bb_val);
+ _rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
+}
+
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+{
+ u8 kpath = _kpath(rtwdev, phy_idx);
+
+ switch (kpath) {
+ case RF_A:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ break;
+ case RF_B:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ break;
+ case RF_AB:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ break;
+ default:
+ break;
+ }
+}
+
+static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 val, kidx = dpk->cur_idx[path];
+ bool off_reverse;
+
+ val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
+
+ if (off)
+ off_reverse = false;
+ else
+ off_reverse = true;
+
+ val = dpk->is_dpk_enable & off_reverse & dpk->bp[path][kidx].path_ok;
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ BIT(24), val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
+ kidx, dpk->is_dpk_enable & off_reverse ? "enable" : "disable");
+}
+
+static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, enum rtw8852bt_dpk_id id)
+{
+ u16 dpk_cmd;
+ u32 val;
+ int ret;
+
+ dpk_cmd = (id << 8) | (0x19 + (path << 4));
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+ 1, 30000, false,
+ rtwdev, R_RFK_ST, MASKBYTE0);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 1 over 30ms!!!!\n");
+
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000,
+ 1, 2000, false,
+ rtwdev, R_RPT_COM, MASKLWORD);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 2 over 2ms!!!!\n");
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] one-shot for %s = 0x%04x\n",
+ id == 0x06 ? "LBK_RXIQK" :
+ id == 0x10 ? "SYNC" :
+ id == 0x11 ? "MDPK_IDL" :
+ id == 0x12 ? "MDPK_MPA" :
+ id == 0x13 ? "GAIN_LOSS" :
+ id == 0x14 ? "PWR_CAL" :
+ id == 0x15 ? "DPK_RXAGC" :
+ id == 0x16 ? "KIP_PRESET" :
+ id == 0x17 ? "KIP_RESOTRE" :
+ "DPK_TXAGC", dpk_cmd);
+}
+
+static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
+
+ udelay(600);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d RXDCK\n", path);
+}
+
+static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ u8 kidx = dpk->cur_idx[path];
+
+ dpk->bp[path][kidx].band = chan->band_type;
+ dpk->bp[path][kidx].ch = chan->channel;
+ dpk->bp[path][kidx].bw = chan->band_width;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
+ path, dpk->cur_idx[path], phy,
+ rtwdev->is_tssi_mode[path] ? "on" : "off",
+ rtwdev->dbcc_en ? "on" : "off",
+ dpk->bp[path][kidx].band == 0 ? "2G" :
+ dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
+ dpk->bp[path][kidx].ch,
+ dpk->bp[path][kidx].bw == 0 ? "20M" :
+ dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
+}
+
+static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_pause)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, is_pause);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
+ is_pause ? "pause" : "resume");
+}
+
+static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
+
+ if (rtwdev->hal.cv > CHIP_CAV)
+ rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8),
+ B_DPD_COM_OF, 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
+}
+
+static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 cur_rxbb, u32 rf_18)
+{
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
+
+ if (cur_rxbb >= 0x11)
+ rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13);
+ else if (cur_rxbb <= 0xa)
+ rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00);
+ else
+ rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05);
+
+ rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014);
+
+ udelay(100);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025);
+
+ _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5);
+}
+
+static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2);
+ rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
+ } else {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5);
+ rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC);
+ rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0);
+ rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800);
+ }
+
+ rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
+ rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
+}
+
+static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_bypass)
+{
+ if (is_bypass) {
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ B_RXIQC_BYPASS2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ B_RXIQC_BYPASS, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ B_RXIQC_BYPASS2, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ B_RXIQC_BYPASS, 0x0);
+ }
+}
+
+static
+void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0);
+ else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
+}
+
+static void _dpk_table_select(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u8 kidx, u8 gain)
+{
+ u8 val;
+
+ val = 0x80 + kidx * 0x20 + gain * 0x10;
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
+ gain, val);
+}
+
+static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+#define DPK_SYNC_TH_DC_I 200
+#define DPK_SYNC_TH_DC_Q 200
+#define DPK_SYNC_TH_CORR 170
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 corr_val, corr_idx;
+ u16 dc_i, dc_q;
+ u32 corr, dc;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
+
+ corr = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
+ corr_idx = u32_get_bits(corr, B_PRT_COM_CORI);
+ corr_val = u32_get_bits(corr, B_PRT_COM_CORV);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d Corr_idx / Corr_val = %d / %d\n",
+ path, corr_idx, corr_val);
+
+ dpk->corr_idx[path][kidx] = corr_idx;
+ dpk->corr_val[path][kidx] = corr_val;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
+
+ dc = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
+ dc_i = u32_get_bits(dc, B_PRT_COM_DCI);
+ dc_q = u32_get_bits(dc, B_PRT_COM_DCQ);
+
+ dc_i = abs(sign_extend32(dc_i, 11));
+ dc_q = abs(sign_extend32(dc_q, 11));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
+ path, dc_i, dc_q);
+
+ dpk->dc_i[path][kidx] = dc_i;
+ dpk->dc_q[path][kidx] = dc_q;
+
+ if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
+ corr_val < DPK_SYNC_TH_CORR)
+ return true;
+ else
+ return false;
+}
+
+static void _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_one_shot(rtwdev, phy, path, SYNC);
+}
+
+static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
+{
+ u16 dgain;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0);
+
+ dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain);
+
+ return dgain;
+}
+
+static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
+{
+ static const u16 bnd[15] = {
+ 0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556,
+ 0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220
+ };
+ s8 offset;
+
+ if (dgain >= bnd[0])
+ offset = 0x6;
+ else if (bnd[0] > dgain && dgain >= bnd[1])
+ offset = 0x6;
+ else if (bnd[1] > dgain && dgain >= bnd[2])
+ offset = 0x5;
+ else if (bnd[2] > dgain && dgain >= bnd[3])
+ offset = 0x4;
+ else if (bnd[3] > dgain && dgain >= bnd[4])
+ offset = 0x3;
+ else if (bnd[4] > dgain && dgain >= bnd[5])
+ offset = 0x2;
+ else if (bnd[5] > dgain && dgain >= bnd[6])
+ offset = 0x1;
+ else if (bnd[6] > dgain && dgain >= bnd[7])
+ offset = 0x0;
+ else if (bnd[7] > dgain && dgain >= bnd[8])
+ offset = 0xff;
+ else if (bnd[8] > dgain && dgain >= bnd[9])
+ offset = 0xfe;
+ else if (bnd[9] > dgain && dgain >= bnd[10])
+ offset = 0xfd;
+ else if (bnd[10] > dgain && dgain >= bnd[11])
+ offset = 0xfc;
+ else if (bnd[11] > dgain && dgain >= bnd[12])
+ offset = 0xfb;
+ else if (bnd[12] > dgain && dgain >= bnd[13])
+ offset = 0xfa;
+ else if (bnd[13] > dgain && dgain >= bnd[14])
+ offset = 0xf9;
+ else if (bnd[14] > dgain)
+ offset = 0xf8;
+ else
+ offset = 0x0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset);
+
+ return offset;
+}
+
+static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
+
+ return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
+}
+
+static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
+}
+
+static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_tpg_sel(rtwdev, path, kidx);
+ _dpk_one_shot(rtwdev, phy, path, KIP_PRESET);
+}
+
+static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n");
+}
+
+static
+u8 _dpk_txagc_check_8852bt(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 txagc)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (txagc >= dpk->max_dpk_txagc[path])
+ txagc = dpk->max_dpk_txagc[path];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set TxAGC = 0x%x\n", txagc);
+
+ return txagc;
+}
+
+static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 txagc)
+{
+ u8 val;
+
+ val = _dpk_txagc_check_8852bt(rtwdev, path, txagc);
+ rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ _dpk_one_shot(rtwdev, phy, path, DPK_TXAGC);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc);
+}
+
+static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 0x50220);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1);
+ _dpk_one_shot(rtwdev, phy, path, DPK_RXAGC);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0);
+}
+
+static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 txagc, s8 gain_offset)
+{
+ txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK);
+
+ if ((txagc - gain_offset) < DPK_TXAGC_LOWER)
+ txagc = DPK_TXAGC_LOWER;
+ else if ((txagc - gain_offset) > DPK_TXAGC_UPPER)
+ txagc = DPK_TXAGC_UPPER;
+ else
+ txagc = txagc - gain_offset;
+
+ _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
+ gain_offset, txagc);
+ return txagc;
+}
+
+static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path,
+ u8 is_check)
+{
+ u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
+ u8 i;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08);
+
+ if (is_check) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
+ val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+ val1_i = abs(sign_extend32(val1_i, 11));
+ val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+ val1_q = abs(sign_extend32(val1_q, 11));
+
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
+ val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+ val2_i = abs(sign_extend32(val2_i, 11));
+ val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+ val2_q = abs(sign_extend32(val2_q, 11));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
+ phy_div(val1_i * val1_i + val1_q * val1_q,
+ val2_i * val2_i + val2_q * val2_q));
+ } else {
+ for (i = 0; i < 32; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
+ }
+ }
+
+ if (val1_i * val1_i + val1_q * val1_q >=
+ (val2_i * val2_i + val2_q * val2_q) * 8 / 5)
+ return true;
+
+ return false;
+}
+
+static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
+ bool loss_only)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0;
+ u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0;
+ u8 step = DPK_AGC_STEP_SYNC_DGAIN;
+ int limit = 200;
+ s8 offset = 0;
+ u16 dgain = 0;
+ u32 rf_18;
+
+ tmp_txagc = init_txagc;
+
+ tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
+ rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+
+ do {
+ switch (step) {
+ case DPK_AGC_STEP_SYNC_DGAIN:
+ _dpk_sync(rtwdev, phy, path, kidx);
+ if (agc_cnt == 0) {
+ if (chan->band_width < 2)
+ _dpk_bypass_rxcfir(rtwdev, path, true);
+ else
+ _dpk_lbk_rxiqk(rtwdev, phy, path,
+ tmp_rxbb, rf_18);
+ }
+
+ if (_dpk_sync_check(rtwdev, path, kidx) == true) {
+ tmp_txagc = 0xff;
+ goout = 1;
+ break;
+ }
+
+ dgain = _dpk_dgain_read(rtwdev);
+ offset = _dpk_dgain_mapping(rtwdev, dgain);
+
+ if (loss_only == 1 || limited_rxbb == 1 || offset == 0)
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ else
+ step = DPK_AGC_STEP_GAIN_ADJ;
+ break;
+ case DPK_AGC_STEP_GAIN_ADJ:
+ tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB);
+
+ if (tmp_rxbb + offset > 0x1f) {
+ tmp_rxbb = 0x1f;
+ limited_rxbb = 1;
+ } else if (tmp_rxbb + offset < 0) {
+ tmp_rxbb = 0;
+ limited_rxbb = 1;
+ } else {
+ tmp_rxbb = tmp_rxbb + offset;
+ }
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB, tmp_rxbb);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_80)
+ _dpk_lbk_rxiqk(rtwdev, phy, path, tmp_rxbb, rf_18);
+ if (dgain > 1922 || dgain < 342)
+ step = DPK_AGC_STEP_SYNC_DGAIN;
+ else
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+
+ agc_cnt++;
+ break;
+ case DPK_AGC_STEP_GAIN_LOSS_IDX:
+ _dpk_gainloss(rtwdev, phy, path, kidx);
+
+ tmp_gl_idx = _dpk_gainloss_read(rtwdev);
+
+ if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, path, true)) ||
+ tmp_gl_idx >= 7)
+ step = DPK_AGC_STEP_GL_GT_CRITERION;
+ else if (tmp_gl_idx == 0)
+ step = DPK_AGC_STEP_GL_LT_CRITERION;
+ else
+ step = DPK_AGC_STEP_SET_TX_GAIN;
+
+ gl_cnt++;
+ break;
+ case DPK_AGC_STEP_GL_GT_CRITERION:
+ if (tmp_txagc == 0x2e ||
+ tmp_txagc == dpk->max_dpk_txagc[path]) {
+ goout = 1;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Txagc@lower bound!!\n");
+ } else {
+ tmp_txagc = _dpk_set_offset(rtwdev, phy, path,
+ tmp_txagc, 0x3);
+ }
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_GL_LT_CRITERION:
+ if (tmp_txagc == 0x3f || tmp_txagc == dpk->max_dpk_txagc[path]) {
+ goout = 1;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Txagc@upper bound!!\n");
+ } else {
+ tmp_txagc = _dpk_set_offset(rtwdev, phy, path,
+ tmp_txagc, 0xfe);
+ }
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_SET_TX_GAIN:
+ tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_txagc,
+ tmp_gl_idx);
+ goout = 1;
+ agc_cnt++;
+ break;
+
+ default:
+ goout = 1;
+ break;
+ }
+ } while (!goout && agc_cnt < 6 && limit-- > 0);
+
+ if (gl_cnt >= 6)
+ _dpk_pas_read(rtwdev, path, false);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc, tmp_rxbb);
+
+ return tmp_txagc;
+}
+
+static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u8 order)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ switch (order) {
+ case 0: /* (5,3,1) */
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
+ dpk->dpk_order[path] = 0x3;
+ break;
+ case 1: /* (5,3,0) */
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0);
+ dpk->dpk_order[path] = 0x1;
+ break;
+ case 2: /* (5,0,0) */
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0);
+ dpk->dpk_order[path] = 0x0;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Wrong MDPD order!!(0x%x)\n", order);
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n",
+ order == 0x0 ? "(5,3,1)" :
+ order == 0x1 ? "(5,3,0)" : "(5,0,0)");
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Set MDPD order to 0x%x for IDL\n", order);
+}
+
+static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 gain)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 &&
+ dpk->bp[path][kidx].band == RTW89_BAND_5G)
+ _dpk_set_mdpd_para(rtwdev, path, 0x2);
+ else
+ _dpk_set_mdpd_para(rtwdev, path, 0x0);
+
+ _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
+}
+
+static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 gs = dpk->dpk_gs[phy];
+ u16 pwsf = 0x78;
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), BIT(8), kidx);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n",
+ txagc, pwsf, gs);
+
+ dpk->bp[path][kidx].txagc_dpk = txagc;
+ rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
+ 0x3F << ((gain << 3) + (kidx << 4)), txagc);
+
+ dpk->bp[path][kidx].pwsf = pwsf;
+ rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
+ 0x1FF << (gain << 4), pwsf);
+
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0);
+
+ dpk->bp[path][kidx].gs = gs;
+ if (dpk->dpk_gs[phy] == 0x7f)
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_CH0A + (path << 8) + (kidx << 2),
+ MASKDWORD, 0x007f7f7f);
+ else
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_CH0A + (path << 8) + (kidx << 2),
+ MASKDWORD, 0x005b5b5b);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ B_DPD_ORDER_V1, dpk->dpk_order[path]);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0);
+}
+
+static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 idx, cur_band, cur_ch;
+ bool is_reload = false;
+
+ cur_band = chan->band_type;
+ cur_ch = chan->channel;
+
+ for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
+ if (cur_band != dpk->bp[path][idx].band ||
+ cur_ch != dpk->bp[path][idx].ch)
+ continue;
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
+ B_COEF_SEL_MDPD, idx);
+ dpk->cur_idx[path] = idx;
+ is_reload = true;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] reload S%d[%d] success\n", path, idx);
+ }
+
+ return is_reload;
+}
+
+static
+void _rf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb)
+{
+ if (is_bybb)
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+}
+
+static
+void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb)
+{
+ if (is_bybb)
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0);
+}
+
+static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 gain)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 txagc = 0x38, kidx = dpk->cur_idx[path];
+ bool is_fail = false;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx);
+
+ _rf_direct_cntrl(rtwdev, path, false);
+ _drf_direct_cntrl(rtwdev, path, false);
+
+ _dpk_kip_pwr_clk_on(rtwdev, path);
+ _dpk_kip_set_txagc(rtwdev, phy, path, txagc);
+ _dpk_rf_setting(rtwdev, gain, path, kidx);
+ _dpk_rx_dck(rtwdev, phy, path);
+ _dpk_kip_preset(rtwdev, phy, path, kidx);
+ _dpk_kip_set_rxagc(rtwdev, phy, path);
+ _dpk_table_select(rtwdev, path, kidx, gain);
+
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+
+ _rfk_get_thermal(rtwdev, kidx, path);
+
+ if (txagc == 0xff) {
+ is_fail = true;
+ goto _error;
+ }
+
+ _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, RF_RX);
+ _dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc);
+
+_error:
+ if (!is_fail)
+ dpk->bp[path][kidx].path_ok = 1;
+ else
+ dpk->bp[path][kidx].path_ok = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
+ is_fail ? "Check" : "Success");
+
+ _dpk_onoff(rtwdev, path, is_fail);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
+ is_fail ? "Check" : "Success");
+
+ return is_fail;
+}
+
+static void _dpk_cal_select(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, u8 kpath)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u32 backup_kip_val[BACKUP_KIP_REGS_NR];
+ u32 backup_bb_val[BACKUP_BB_REGS_NR];
+ u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR];
+ bool reloaded[2] = {false};
+ u8 path;
+
+ for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ if (!reloaded[path] && dpk->bp[path][0].ch != 0)
+ dpk->cur_idx[path] = !dpk->cur_idx[path];
+ else
+ _dpk_onoff(rtwdev, path, false);
+ }
+
+ _rfk_backup_bb_reg(rtwdev, backup_bb_val);
+ _rfk_backup_kip_reg(rtwdev, backup_kip_val);
+
+ for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
+ _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path);
+ _dpk_information(rtwdev, phy, path);
+ if (rtwdev->is_tssi_mode[path])
+ _dpk_tssi_pause(rtwdev, path, true);
+ }
+
+ _rfk_bb_afe_setting(rtwdev, phy, path, kpath);
+
+ for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++)
+ _dpk_main(rtwdev, phy, path, 1);
+
+ _rfk_bb_afe_restore(rtwdev, phy, path, kpath);
+
+ _dpk_kip_restore(rtwdev, path);
+ _rfk_reload_bb_reg(rtwdev, backup_bb_val);
+ _rfk_reload_kip_reg(rtwdev, backup_kip_val);
+
+ for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) {
+ _rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path);
+ if (rtwdev->is_tssi_mode[path])
+ _dpk_tssi_pause(rtwdev, path, false);
+ }
+}
+
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_fem_info *fem = &rtwdev->fem;
+
+ if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
+ return true;
+ } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
+ return true;
+ } else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Skip DPK due to 6G_ext_PA exist!!\n");
+ return true;
+ }
+
+ return false;
+}
+
+static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 path, kpath;
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < RTW8852BT_SS; path++) {
+ if (kpath & BIT(path))
+ _dpk_onoff(rtwdev, path, true);
+ }
+}
+
+static void _dpk_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst;
+ s8 delta_ther[2] = {};
+ u8 trk_idx, txagc_rf;
+ u8 path, kidx;
+ u16 pwsf[2];
+ u8 cur_ther;
+ u32 tmp;
+
+ for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
+ kidx = dpk->cur_idx[path];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
+ path, kidx, dpk->bp[path][kidx].ch);
+
+ cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] thermal now = %d\n", cur_ther);
+
+ if (dpk->bp[path][kidx].ch && cur_ther)
+ delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
+
+ if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
+ delta_ther[path] = delta_ther[path] * 3 / 2;
+ else
+ delta_ther[path] = delta_ther[path] * 5 / 2;
+
+ txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+ B_TXAGC_RF);
+
+ if (rtwdev->is_tssi_mode[path]) {
+ trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
+ txagc_rf, trk_idx);
+
+ txagc_bb =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+ MASKBYTE2);
+ txagc_bb_tp =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13),
+ B_TXAGC_TP);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
+ txagc_bb_tp, txagc_bb);
+
+ txagc_ofst =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+ MASKBYTE3);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
+ txagc_ofst, delta_ther[path]);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
+ B_DPD_COM_OF);
+ if (tmp == 0x1) {
+ txagc_ofst = 0;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] HW txagc offset mode\n");
+ }
+
+ if (txagc_rf && cur_ther)
+ ini_diff = txagc_ofst + (delta_ther[path]);
+
+ tmp = rtw89_phy_read32_mask(rtwdev,
+ R_P0_TXDPD + (path << 13),
+ B_P0_TXDPD);
+ if (tmp == 0x0) {
+ pwsf[0] = dpk->bp[path][kidx].pwsf +
+ txagc_bb_tp - txagc_bb + ini_diff;
+ pwsf[1] = dpk->bp[path][kidx].pwsf +
+ txagc_bb_tp - txagc_bb + ini_diff;
+ } else {
+ pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff;
+ pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff;
+ }
+ } else {
+ pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
+ pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
+ }
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS);
+ if (!tmp && txagc_rf) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
+ pwsf[0], pwsf[1]);
+
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_BND + (path << 8) + (kidx << 2),
+ B_DPD_BND_0, pwsf[0]);
+ rtw89_phy_write32_mask(rtwdev,
+ R_DPD_BND + (path << 8) + (kidx << 2),
+ B_DPD_BND_1, pwsf[1]);
+ }
+ }
+}
+
+static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 tx_scale, ofdm_bkof, path, kpath;
+
+ kpath = _kpath(rtwdev, phy);
+
+ ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM);
+ tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA);
+
+ if (ofdm_bkof + tx_scale >= 44) {
+ /* move dpd backoff to bb, and set dpd backoff to 0 */
+ dpk->dpk_gs[phy] = 0x7f;
+ for (path = 0; path < RF_PATH_NUM_8852BT; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8),
+ B_DPD_CFG, 0x7f7f7f);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Set S%d DPD backoff to 0dB\n", path);
+ }
+ } else {
+ dpk->dpk_gs[phy] = 0x5b;
+ }
+}
+
+static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A, BIT(24), 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0B, BIT(24), 0x0);
+}
+
+static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+
+ if (band == RTW89_BAND_2G)
+ rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
+}
+
+static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852bt_tssi_sys_defs_tbl);
+
+ if (chan->band_width == RTW89_CHANNEL_WIDTH_80)
+ rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x1);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x0);
+
+ if (path == RF_PATH_A)
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852bt_tssi_sys_a_defs_2g_tbl,
+ &rtw8852bt_tssi_sys_a_defs_5g_tbl);
+ else
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852bt_tssi_sys_b_defs_2g_tbl,
+ &rtw8852bt_tssi_sys_b_defs_5g_tbl);
+}
+
+static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852bt_tssi_init_txpwr_defs_a_tbl,
+ &rtw8852bt_tssi_init_txpwr_defs_b_tbl);
+}
+
+static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852bt_tssi_init_txpwr_he_tb_defs_a_tbl,
+ &rtw8852bt_tssi_init_txpwr_he_tb_defs_b_tbl);
+}
+
+static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852bt_tssi_dck_defs_a_tbl,
+ &rtw8852bt_tssi_dck_defs_b_tbl);
+}
+
+static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+#define RTW8852BT_TSSI_GET_VAL(ptr, idx) \
+({ \
+ s8 *__ptr = (ptr); \
+ u8 __idx = (idx), __i, __v; \
+ u32 __val = 0; \
+ for (__i = 0; __i < 4; __i++) { \
+ __v = (__ptr[__idx + __i]); \
+ __val |= (__v << (8 * __i)); \
+ } \
+ __val; \
+})
+ struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk;
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 subband = chan->subband_type;
+ const s8 *thm_up_a = NULL;
+ const s8 *thm_down_a = NULL;
+ const s8 *thm_up_b = NULL;
+ const s8 *thm_down_b = NULL;
+ u8 thermal = 0xff;
+ s8 thm_ofst[64] = {0};
+ u32 tmp = 0;
+ u8 i, j;
+
+ switch (subband) {
+ default:
+ case RTW89_CH_2G:
+ thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0];
+ thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0];
+ thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0];
+ thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0];
+ break;
+ case RTW89_CH_5G_BAND_1:
+ thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0];
+ thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0];
+ thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0];
+ thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0];
+ break;
+ case RTW89_CH_5G_BAND_3:
+ thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1];
+ thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1];
+ thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1];
+ thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1];
+ break;
+ case RTW89_CH_5G_BAND_4:
+ thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2];
+ thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2];
+ thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2];
+ thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2];
+ break;
+ }
+
+ if (path == RF_PATH_A) {
+ thermal = tssi_info->thermal[RF_PATH_A];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
+
+ if (thermal == 0xff) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
+
+ for (i = 0; i < 64; i += 4) {
+ rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ R_P0_TSSI_BASE + i, 0x0);
+ }
+
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER,
+ thermal);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
+ thermal);
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down_a[i++] :
+ -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 63; j >= 32; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up_a[i++] :
+ thm_up_a[DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 64; i += 4) {
+ tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i);
+ rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x5c00 + i, tmp);
+ }
+ }
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
+
+ } else {
+ thermal = tssi_info->thermal[RF_PATH_B];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
+
+ if (thermal == 0xff) {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
+
+ for (i = 0; i < 64; i += 4) {
+ rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x7c00 + i, 0x0);
+ }
+
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER,
+ thermal);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
+ thermal);
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down_b[i++] :
+ -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 63; j >= 32; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up_b[i++] :
+ thm_up_b[DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 64; i += 4) {
+ tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i);
+ rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x7c00 + i, tmp);
+ }
+ }
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
+ }
+#undef RTW8852BT_TSSI_GET_VAL
+}
+
+static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852bt_tssi_dac_gain_defs_a_tbl,
+ &rtw8852bt_tssi_dac_gain_defs_b_tbl);
+}
+
+static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+
+ if (path == RF_PATH_A)
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852bt_tssi_slope_a_defs_2g_tbl,
+ &rtw8852bt_tssi_slope_a_defs_5g_tbl);
+ else
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852bt_tssi_slope_b_defs_2g_tbl,
+ &rtw8852bt_tssi_slope_b_defs_5g_tbl);
+}
+
+static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, bool all)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ enum rtw89_band band = chan->band_type;
+ const struct rtw89_rfk_tbl *tbl = NULL;
+ u8 ch = chan->channel;
+
+ if (path == RF_PATH_A) {
+ if (band == RTW89_BAND_2G)
+ tbl = &rtw8852bt_tssi_align_a_2g_all_defs_tbl;
+ else if (ch >= 36 && ch <= 64)
+ tbl = &rtw8852bt_tssi_align_a_5g1_all_defs_tbl;
+ else if (ch >= 100 && ch <= 144)
+ tbl = &rtw8852bt_tssi_align_a_5g2_all_defs_tbl;
+ else if (ch >= 149 && ch <= 177)
+ tbl = &rtw8852bt_tssi_align_a_5g3_all_defs_tbl;
+ } else {
+ if (ch >= 1 && ch <= 14)
+ tbl = &rtw8852bt_tssi_align_b_2g_all_defs_tbl;
+ else if (ch >= 36 && ch <= 64)
+ tbl = &rtw8852bt_tssi_align_b_5g1_all_defs_tbl;
+ else if (ch >= 100 && ch <= 144)
+ tbl = &rtw8852bt_tssi_align_b_5g2_all_defs_tbl;
+ else if (ch >= 149 && ch <= 177)
+ tbl = &rtw8852bt_tssi_align_b_5g3_all_defs_tbl;
+ }
+
+ if (tbl)
+ rtw89_rfk_parser(rtwdev, tbl);
+}
+
+static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852bt_tssi_slope_defs_a_tbl,
+ &rtw8852bt_tssi_slope_defs_b_tbl);
+}
+
+static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ if (path == RF_PATH_A)
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0);
+}
+
+static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s path=%d\n", __func__,
+ path);
+
+ if (path == RF_PATH_A)
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
+ B_P0_TSSI_MV_MIX, 0x010);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
+ B_P1_RFCTM_DEL, 0x010);
+}
+
+static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852BT; i++) {
+ _tssi_set_tssi_track(rtwdev, phy, i);
+ _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
+
+ if (i == RF_PATH_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG,
+ B_P0_TSSI_MV_CLR, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
+ B_P0_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG,
+ B_P0_TSSI_EN, 0x1);
+ rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
+ RR_TXGA_V1_TRK_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
+ B_P0_TSSI_RFC, 0x3);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
+ B_P0_TSSI_OFT, 0xc0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
+ B_P0_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK,
+ B_P0_TSSI_OFT_EN, 0x1);
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = true;
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG,
+ B_P1_TSSI_MV_CLR, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
+ B_P1_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG,
+ B_P1_TSSI_EN, 0x1);
+ rtw89_write_rf(rtwdev, i, RR_TXGA_V1,
+ RR_TXGA_V1_TRK_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
+ B_P1_TSSI_RFC, 0x3);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
+ B_P1_TSSI_OFT, 0xc0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
+ B_P1_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK,
+ B_P1_TSSI_OFT_EN, 0x1);
+
+ rtwdev->is_tssi_mode[RF_PATH_B] = true;
+ }
+ }
+}
+
+static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1);
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+}
+
+static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 13:
+ return 4;
+ case 14:
+ return 5;
+ }
+
+ return 0;
+}
+
+#define TSSI_EXTRA_GROUP_BIT (BIT(31))
+#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
+#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
+#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
+#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
+
+static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 14:
+ return 4;
+ case 36 ... 40:
+ return 5;
+ case 41 ... 43:
+ return TSSI_EXTRA_GROUP(5);
+ case 44 ... 48:
+ return 6;
+ case 49 ... 51:
+ return TSSI_EXTRA_GROUP(6);
+ case 52 ... 56:
+ return 7;
+ case 57 ... 59:
+ return TSSI_EXTRA_GROUP(7);
+ case 60 ... 64:
+ return 8;
+ case 100 ... 104:
+ return 9;
+ case 105 ... 107:
+ return TSSI_EXTRA_GROUP(9);
+ case 108 ... 112:
+ return 10;
+ case 113 ... 115:
+ return TSSI_EXTRA_GROUP(10);
+ case 116 ... 120:
+ return 11;
+ case 121 ... 123:
+ return TSSI_EXTRA_GROUP(11);
+ case 124 ... 128:
+ return 12;
+ case 129 ... 131:
+ return TSSI_EXTRA_GROUP(12);
+ case 132 ... 136:
+ return 13;
+ case 137 ... 139:
+ return TSSI_EXTRA_GROUP(13);
+ case 140 ... 144:
+ return 14;
+ case 149 ... 153:
+ return 15;
+ case 154 ... 156:
+ return TSSI_EXTRA_GROUP(15);
+ case 157 ... 161:
+ return 16;
+ case 162 ... 164:
+ return TSSI_EXTRA_GROUP(16);
+ case 165 ... 169:
+ return 17;
+ case 170 ... 172:
+ return TSSI_EXTRA_GROUP(17);
+ case 173 ... 177:
+ return 18;
+ }
+
+ return 0;
+}
+
+static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 8:
+ return 0;
+ case 9 ... 14:
+ return 1;
+ case 36 ... 48:
+ return 2;
+ case 52 ... 64:
+ return 3;
+ case 100 ... 112:
+ return 4;
+ case 116 ... 128:
+ return 5;
+ case 132 ... 144:
+ return 6;
+ case 149 ... 177:
+ return 7;
+ }
+
+ return 0;
+}
+
+static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u32 gidx, gidx_1st, gidx_2nd;
+ s8 de_1st;
+ s8 de_2nd;
+ s8 val;
+
+ gidx = _tssi_get_ofdm_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx);
+
+ if (IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+
+ return val;
+}
+
+static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u32 tgidx, tgidx_1st, tgidx_2nd;
+ s8 tde_1st;
+ s8 tde_2nd;
+ s8 val;
+
+ tgidx = _tssi_get_trim_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+
+ return val;
+}
+
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 ch = chan->channel;
+ u8 gidx;
+ s8 ofdm_de;
+ s8 trim_de;
+ s32 val;
+ u32 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
+ phy, ch);
+
+ for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
+ gidx = _tssi_get_cck_group(rtwdev, ch);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ val = tssi_info->tssi_cck[i][gidx] + trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
+ i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
+
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
+ _tssi_de_cck_long[i],
+ rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i],
+ _TSSI_DE_MASK));
+
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ val = ofdm_de + trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
+ i, ofdm_de, trim_de);
+
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i],
+ _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
+ _tssi_de_mcs_20m[i],
+ rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i],
+ _TSSI_DE_MASK));
+ }
+}
+
+static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n"
+ "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n",
+ R_TSSI_PA_K1 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_TSSI_PA_K1 + (path << 13)),
+ R_TSSI_PA_K2 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_TSSI_PA_K2 + (path << 13)),
+ R_P0_TSSI_ALIM1 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM1 + (path << 13)),
+ R_P0_TSSI_ALIM3 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM3 + (path << 13)),
+ R_TSSI_PA_K5 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_TSSI_PA_K5 + (path << 13)),
+ R_P0_TSSI_ALIM2 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM2 + (path << 13)),
+ R_P0_TSSI_ALIM4 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM4 + (path << 13)),
+ R_TSSI_PA_K8 + (path << 13),
+ rtw89_phy_read32(rtwdev, R_TSSI_PA_K8 + (path << 13)));
+}
+
+static void _tssi_alimentk_done(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 channel = chan->channel;
+ u8 band;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======>%s phy=%d path=%d\n", __func__, phy, path);
+
+ if (channel >= 1 && channel <= 14)
+ band = TSSI_ALIMK_2G;
+ else if (channel >= 36 && channel <= 64)
+ band = TSSI_ALIMK_5GL;
+ else if (channel >= 100 && channel <= 144)
+ band = TSSI_ALIMK_5GM;
+ else if (channel >= 149 && channel <= 177)
+ band = TSSI_ALIMK_5GH;
+ else
+ band = TSSI_ALIMK_2G;
+
+ if (tssi_info->alignment_done[path][band]) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD,
+ tssi_info->alignment_value[path][band][0]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD,
+ tssi_info->alignment_value[path][band][1]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD,
+ tssi_info->alignment_value[path][band][2]);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD,
+ tssi_info->alignment_value[path][band][3]);
+ }
+
+ _tssi_alimentk_dump_result(rtwdev, path);
+}
+
+static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm,
+ u8 enable)
+{
+ enum rtw89_rf_path_bit rx_path;
+
+ if (path == RF_PATH_A)
+ rx_path = RF_A;
+ else if (path == RF_PATH_B)
+ rx_path = RF_B;
+ else if (path == RF_PATH_AB)
+ rx_path = RF_AB;
+ else
+ rx_path = RF_ABCD; /* don't change path, but still set others */
+
+ if (enable) {
+ rtw8852bx_bb_set_plcp_tx(rtwdev);
+ rtw8852bx_bb_cfg_tx_path(rtwdev, path);
+ rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path);
+ rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy);
+ }
+
+ rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy);
+}
+
+static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, const u32 reg[],
+ u32 reg_backup[], u32 reg_num)
+{
+ u32 i;
+
+ for (i = 0; i < reg_num; i++) {
+ reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i],
+ reg_backup[i]);
+ }
+}
+
+static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, const u32 reg[],
+ u32 reg_backup[], u32 reg_num)
+
+{
+ u32 i;
+
+ for (i = 0; i < reg_num; i++) {
+ rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i],
+ reg_backup[i]);
+ }
+}
+
+static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel)
+{
+ u8 channel_index;
+
+ if (channel >= 1 && channel <= 14)
+ channel_index = channel - 1;
+ else if (channel >= 36 && channel <= 64)
+ channel_index = (channel - 36) / 2 + 14;
+ else if (channel >= 100 && channel <= 144)
+ channel_index = ((channel - 100) / 2) + 15 + 14;
+ else if (channel >= 149 && channel <= 177)
+ channel_index = ((channel - 149) / 2) + 38 + 14;
+ else
+ channel_index = 0;
+
+ return channel_index;
+}
+
+static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, const s16 *power,
+ u32 *tssi_cw_rpt)
+{
+ u32 tx_counter, tx_counter_tmp;
+ const int retry = 100;
+ u32 tmp;
+ int j, k;
+
+ for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) {
+ rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1);
+
+ tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] 0x%x = 0x%08x path=%d\n",
+ _tssi_trigger[path], tmp, path);
+
+ if (j == 0)
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true);
+ else
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true);
+
+ tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+ tx_counter_tmp -= tx_counter;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] First HWTXcounter=%d path=%d\n",
+ tx_counter_tmp, path);
+
+ for (k = 0; k < retry; k++) {
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
+ B_TSSI_CWRPT_RDY);
+ if (tmp)
+ break;
+
+ udelay(30);
+
+ tx_counter_tmp =
+ rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+ tx_counter_tmp -= tx_counter;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n",
+ k, tx_counter_tmp, path);
+ }
+
+ if (k >= retry) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n",
+ k, path);
+
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+ return false;
+ }
+
+ tssi_cw_rpt[j] =
+ rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path],
+ B_TSSI_CWRPT);
+
+ _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false);
+
+ tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+ tx_counter_tmp -= tx_counter;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] Final HWTXcounter=%d path=%d\n",
+ tx_counter_tmp, path);
+ }
+
+ return true;
+}
+
+static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4,
+ 0x78e4, 0x49c0, 0x0d18, 0x0d80};
+ static const s16 power_2g[4] = {48, 20, 4, -8};
+ static const s16 power_5g[4] = {48, 20, 4, 4};
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3;
+ u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {};
+ u8 channel = chan->channel;
+ u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel);
+ struct rtw8852bx_bb_tssi_bak tssi_bak;
+ s32 aliment_diff, tssi_cw_default;
+ u32 start_time, finish_time;
+ u32 bb_reg_backup[8] = {};
+ const s16 *power;
+ u8 band;
+ bool ok;
+ u32 tmp;
+ u8 j;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======> %s channel=%d path=%d\n", __func__, channel,
+ path);
+
+ start_time = ktime_get_ns();
+
+ if (chan->band_type == RTW89_BAND_2G)
+ power = power_2g;
+ else
+ power = power_5g;
+
+ if (channel >= 1 && channel <= 14)
+ band = TSSI_ALIMK_2G;
+ else if (channel >= 36 && channel <= 64)
+ band = TSSI_ALIMK_5GL;
+ else if (channel >= 100 && channel <= 144)
+ band = TSSI_ALIMK_5GM;
+ else if (channel >= 149 && channel <= 177)
+ band = TSSI_ALIMK_5GH;
+ else
+ band = TSSI_ALIMK_2G;
+
+ rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak);
+ _tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup,
+ ARRAY_SIZE(bb_reg_backup));
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
+
+ ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt);
+ if (!ok)
+ goto out;
+
+ for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] power[%d]=%d tssi_cw_rpt[%d]=%d\n", j,
+ power[j], j, tssi_cw_rpt[j]);
+ }
+
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1],
+ _tssi_cw_default_mask[1]);
+ tssi_cw_default = sign_extend32(tmp, 8);
+ tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) -
+ tssi_cw_rpt[1] + tssi_cw_default;
+ aliment_diff = tssi_alim_offset_1 - tssi_cw_default;
+
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2],
+ _tssi_cw_default_mask[2]);
+ tssi_cw_default = sign_extend32(tmp, 8);
+ tssi_alim_offset_2 = tssi_cw_default + aliment_diff;
+
+ tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3],
+ _tssi_cw_default_mask[3]);
+ tssi_cw_default = sign_extend32(tmp, 8);
+ tssi_alim_offset_3 = tssi_cw_default + aliment_diff;
+
+ if (path == RF_PATH_A) {
+ tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
+ FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
+ FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12),
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13));
+ } else {
+ tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) |
+ FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) |
+ FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n",
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13));
+ }
+
+ tssi_info->alignment_done[path][band] = true;
+ tssi_info->alignment_value[path][band][0] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
+ tssi_info->alignment_value[path][band][1] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
+ tssi_info->alignment_value[path][band][2] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
+ tssi_info->alignment_value[path][band][3] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
+
+ tssi_info->check_backup_aligmk[path][ch_idx] = true;
+ tssi_info->alignment_backup_by_ch[path][ch_idx][0] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD);
+ tssi_info->alignment_backup_by_ch[path][ch_idx][1] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD);
+ tssi_info->alignment_backup_by_ch[path][ch_idx][2] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD);
+ tssi_info->alignment_backup_by_ch[path][ch_idx][3] =
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n",
+ path, band, R_P0_TSSI_ALIM1 + (path << 13),
+ tssi_info->alignment_value[path][band][0]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n",
+ path, band, R_P0_TSSI_ALIM3 + (path << 13),
+ tssi_info->alignment_value[path][band][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n",
+ path, band, R_P0_TSSI_ALIM2 + (path << 13),
+ tssi_info->alignment_value[path][band][2]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n",
+ path, band, R_P0_TSSI_ALIM4 + (path << 13),
+ tssi_info->alignment_value[path][band][3]);
+
+out:
+ _tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup,
+ ARRAY_SIZE(bb_reg_backup));
+ rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak);
+ rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0);
+
+ finish_time = ktime_get_ns();
+ tssi_info->tssi_alimk_time += finish_time - start_time;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[TSSI PA K] %s processing time = %d ms\n", __func__,
+ tssi_info->tssi_alimk_time);
+}
+
+void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ u8 path;
+
+ for (path = 0; path < 2; path++) {
+ dpk->cur_idx[path] = 0;
+ dpk->max_dpk_txagc[path] = 0x3F;
+ }
+
+ dpk->is_dpk_enable = true;
+ dpk->is_dpk_reload_en = false;
+ _set_dpd_backoff(rtwdev, RTW89_PHY_0);
+}
+
+void rtw8852bt_rck(struct rtw89_dev *rtwdev)
+{
+ u8 path;
+
+ for (path = 0; path < RF_PATH_NUM_8852BT; path++)
+ _rck(rtwdev, path);
+}
+
+void rtw8852bt_dack(struct rtw89_dev *rtwdev)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
+ _dac_cal(rtwdev, false);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
+}
+
+void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u32 tx_en;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ _iqk_init(rtwdev);
+ _iqk(rtwdev, phy_idx, false);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
+}
+
+void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+ u32 tx_en;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
+ rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ _rx_dck(rtwdev, phy_idx);
+
+ rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
+}
+
+void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER);
+
+ if (_dpk_bypass_check(rtwdev, phy_idx))
+ _dpk_force_bypass(rtwdev, phy_idx);
+ else
+ _dpk_cal_select(rtwdev, phy_idx, RF_AB);
+}
+
+void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev)
+{
+ _dpk_track(rtwdev);
+}
+
+void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en)
+{
+ static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B};
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB);
+ u32 reg_backup[2] = {};
+ u32 tx_en;
+ u8 i;
+
+ _tssi_backup_bb_registers(rtwdev, phy, reg, reg_backup, 2);
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
+
+ _tssi_dpk_off(rtwdev, phy);
+ _tssi_disable(rtwdev, phy);
+
+ for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) {
+ _tssi_rf_setting(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i);
+ _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
+ _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
+ _tssi_set_dck(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_dac_gain_tbl(rtwdev, phy, i);
+ _tssi_slope_cal_org(rtwdev, phy, i);
+ _tssi_alignment_default(rtwdev, phy, i, true);
+ _tssi_set_tssi_slope(rtwdev, phy, i);
+
+ rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _tmac_tx_pause(rtwdev, phy, true);
+ if (hwtx_en)
+ _tssi_alimentk(rtwdev, phy, i);
+ _tmac_tx_pause(rtwdev, phy, false);
+ rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en);
+ }
+
+ _tssi_enable(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy);
+
+ _tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, 2);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
+}
+
+void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 channel = chan->channel;
+ u8 band;
+ u32 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======>%s phy=%d channel=%d\n", __func__, phy, channel);
+
+ if (channel >= 1 && channel <= 14)
+ band = TSSI_ALIMK_2G;
+ else if (channel >= 36 && channel <= 64)
+ band = TSSI_ALIMK_5GL;
+ else if (channel >= 100 && channel <= 144)
+ band = TSSI_ALIMK_5GM;
+ else if (channel >= 149 && channel <= 177)
+ band = TSSI_ALIMK_5GH;
+ else
+ band = TSSI_ALIMK_2G;
+
+ _tssi_disable(rtwdev, phy);
+
+ for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) {
+ _tssi_rf_setting(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i);
+
+ if (tssi_info->alignment_done[i][band])
+ _tssi_alimentk_done(rtwdev, phy, i);
+ else
+ _tssi_alignment_default(rtwdev, phy, i, true);
+ }
+
+ _tssi_enable(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy);
+}
+
+static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, bool enable)
+{
+ const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0);
+ u8 channel = chan->channel;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n",
+ __func__, channel);
+
+ if (enable)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
+ __func__,
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, 0xc0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
+
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_A);
+ _tssi_alimentk_done(rtwdev, phy, RF_PATH_B);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n",
+ __func__,
+ rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT),
+ rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "======> %s SCAN_END\n", __func__);
+}
+
+void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
+ enum rtw89_phy_idx phy_idx)
+{
+ if (scan_start)
+ rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true);
+ else
+ rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
new file mode 100644
index 000000000000..09918835c6e8
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852BT_RFK_H__
+#define __RTW89_8852BT_RFK_H__
+
+#include "core.h"
+
+void rtw8852bt_rck(struct rtw89_dev *rtwdev);
+void rtw8852bt_dack(struct rtw89_dev *rtwdev);
+void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev);
+void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev);
+void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en);
+void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
+ enum rtw89_phy_idx phy_idx);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c
new file mode 100644
index 000000000000..782144bb7f49
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c
@@ -0,0 +1,490 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#include "rtw8852bt_rfk_table.h"
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_sys_defs[] = {
+ RTW89_DECL_RFK_WM(0x12a8, 0x0000000f, 0x4),
+ RTW89_DECL_RFK_WM(0x32a8, 0x0000000f, 0x4),
+ RTW89_DECL_RFK_WM(0x12bc, 0x000ffff0, 0x5555),
+ RTW89_DECL_RFK_WM(0x32bc, 0x000ffff0, 0x5555),
+ RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x16),
+ RTW89_DECL_RFK_WM(0x0304, 0x000000ff, 0x19),
+ RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x2041),
+ RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x2041),
+ RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x20012041),
+ RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x3),
+ RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x3),
+ RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x601e),
+ RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x601e),
+ RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x4),
+ RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x4),
+ RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x0),
+ RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_sys_a_defs_2g[] = {
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x33),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x33),
+ RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1e),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_a_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_sys_a_defs_5g[] = {
+ RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x44),
+ RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x44),
+ RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x0),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1d),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_a_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_sys_b_defs_2g[] = {
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x33),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x33),
+ RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1e),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_b_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_sys_b_defs_5g[] = {
+ RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x44),
+ RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x44),
+ RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x0),
+ RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1d),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_b_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0),
+ RTW89_DECL_RFK_WM(0x5800, 0xffffffff, 0x003f807f),
+ RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x40),
+ RTW89_DECL_RFK_WM(0x580c, 0x0fffff00, 0x00040),
+ RTW89_DECL_RFK_WM(0x5810, 0xffffffff, 0x59010000),
+ RTW89_DECL_RFK_WM(0x5814, 0x01ffffff, 0x002d000),
+ RTW89_DECL_RFK_WM(0x5814, 0xf8000000, 0x00),
+ RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c1800),
+ RTW89_DECL_RFK_WM(0x581c, 0x3fffffff, 0x1dc80280),
+ RTW89_DECL_RFK_WM(0x5820, 0xffffffff, 0x00002080),
+ RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5834, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5838, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x5854, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x5858, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x5864, 0x07ffffff, 0x00801ff),
+ RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x16),
+ RTW89_DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58b4, 0x7fffffff, 0x0a002000),
+ RTW89_DECL_RFK_WM(0x58b8, 0x7fffffff, 0x00007628),
+ RTW89_DECL_RFK_WM(0x58bc, 0x07ffffff, 0x7a7807f),
+ RTW89_DECL_RFK_WM(0x58c0, 0xfffe0000, 0x003f),
+ RTW89_DECL_RFK_WM(0x58c4, 0xffffffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x0),
+ RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x58d0, 0x07ffffff, 0x2008101),
+ RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00),
+ RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x0ff),
+ RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x100),
+ RTW89_DECL_RFK_WM(0x58d8, 0xffffffff, 0x8008016c),
+ RTW89_DECL_RFK_WM(0x58dc, 0x0001ffff, 0x0807f),
+ RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0xc00),
+ RTW89_DECL_RFK_WM(0x58f0, 0x0003ffff, 0x001ff),
+ RTW89_DECL_RFK_WM(0x58f4, 0x000fffff, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_defs_a);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0),
+ RTW89_DECL_RFK_WM(0x7800, 0xffffffff, 0x003f807f),
+ RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x40),
+ RTW89_DECL_RFK_WM(0x780c, 0x0fffff00, 0x00040),
+ RTW89_DECL_RFK_WM(0x7810, 0xffffffff, 0x59010000),
+ RTW89_DECL_RFK_WM(0x7814, 0x01ffffff, 0x002d000),
+ RTW89_DECL_RFK_WM(0x7814, 0xf8000000, 0x00),
+ RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c1800),
+ RTW89_DECL_RFK_WM(0x781c, 0x3fffffff, 0x1dc80280),
+ RTW89_DECL_RFK_WM(0x7820, 0xffffffff, 0x00002080),
+ RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1),
+ RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7834, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7838, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x7854, 0x3fffffff, 0x000115f2),
+ RTW89_DECL_RFK_WM(0x7858, 0x7fffffff, 0x0000121),
+ RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x0),
+ RTW89_DECL_RFK_WM(0x7864, 0x07ffffff, 0x00801ff),
+ RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x16),
+ RTW89_DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78b4, 0x7fffffff, 0x0a002000),
+ RTW89_DECL_RFK_WM(0x78b8, 0x7fffffff, 0x00007628),
+ RTW89_DECL_RFK_WM(0x78bc, 0x07ffffff, 0x7a7807f),
+ RTW89_DECL_RFK_WM(0x78c0, 0xfffe0000, 0x003f),
+ RTW89_DECL_RFK_WM(0x78c4, 0xffffffff, 0x0003ffff),
+ RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x000000),
+ RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x0),
+ RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x78d0, 0x07ffffff, 0x2008101),
+ RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00),
+ RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x0ff),
+ RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x100),
+ RTW89_DECL_RFK_WM(0x78d8, 0xffffffff, 0x8008016c),
+ RTW89_DECL_RFK_WM(0x78dc, 0x0001ffff, 0x0807f),
+ RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0xc00),
+ RTW89_DECL_RFK_WM(0x78f0, 0x0003ffff, 0x001ff),
+ RTW89_DECL_RFK_WM(0x78f4, 0x000fffff, 0x000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_defs_b);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_he_tb_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fe),
+ RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x1f),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_he_tb_defs_a);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_he_tb_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fe),
+ RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x1f),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_he_tb_defs_b);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_dck_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x5814, 0x003ff000, 0x0ef),
+ RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dck_defs_a);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_dck_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000),
+ RTW89_DECL_RFK_WM(0x7814, 0x003ff000, 0x0ef),
+ RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dck_defs_b);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_dac_gain_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000400, 0x1),
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000fff, 0x000),
+ RTW89_DECL_RFK_WM(0x58b0, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dac_gain_defs_a);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_dac_gain_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x78b0, 0x00000fff, 0x000),
+ RTW89_DECL_RFK_WM(0x78b0, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dac_gain_defs_b);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_slope_a_defs_2g[] = {
+ RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0801008),
+ RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201020),
+ RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0804008),
+ RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08081e28),
+ RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08081e28),
+ RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_a_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_slope_a_defs_5g[] = {
+ RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201019),
+ RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08081808),
+ RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_a_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_slope_b_defs_2g[] = {
+ RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0801008),
+ RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201020),
+ RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0804008),
+ RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08081e28),
+ RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08081e28),
+ RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_b_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_slope_b_defs_5g[] = {
+ RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201019),
+ RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008),
+ RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008),
+ RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808),
+ RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08081808),
+ RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808),
+ RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808),
+ RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_b_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_2g_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x029f57c0),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000077),
+ RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x029f5bc0),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000076),
+ RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_2g_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_5g1_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x007ff3d7),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000068),
+ RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_5g1_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_5g2_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00a003db),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000065),
+ RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_5g2_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_5g3_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x01101be2),
+ RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000065),
+ RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_5g3_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_2g_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x023f3fb9),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000075),
+ RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x01df3fb8),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000074),
+ RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_2g_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_5g1_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x010017e0),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000069),
+ RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_5g1_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_5g2_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x01201fe2),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000066),
+ RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_5g2_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_5g3_all_defs[] = {
+ RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721),
+ RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101),
+ RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x01602fe5),
+ RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000068),
+ RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000),
+ RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000),
+ RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_5g3_all_defs);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_slope_defs_a[] = {
+ RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_defs_a);
+
+static const struct rtw89_reg5_def rtw8852bt_tssi_slope_defs_b[] = {
+ RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x1),
+ RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1),
+ RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x1),
+};
+
+RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_defs_b);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h
new file mode 100644
index 000000000000..beb246237d17
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852BT_RFK_TABLE_H__
+#define __RTW89_8852BT_RFK_TABLE_H__
+
+#include "phy.h"
+
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_a_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_a_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_b_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_b_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_he_tb_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_he_tb_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dck_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dck_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dac_gain_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dac_gain_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_a_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_a_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_b_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_b_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_2g_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_5g1_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_5g2_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_5g3_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_2g_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_5g1_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_5g2_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_5g3_all_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_defs_b_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
index 17e6164855fa..193168dc7b6c 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c
@@ -73,6 +73,10 @@ static const u32 rtw8852c_c2h_regs[RTW89_H2CREG_MAX] = {
R_AX_C2HREG_DATA3_V1
};
+static const u32 rtw8852c_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = {
+ R_AX_C2HREG_DATA3_V1 + 3, R_AX_DBG_WOW,
+};
+
static const struct rtw89_page_regs rtw8852c_page_regs = {
.hci_fc_ctrl = R_AX_HCI_FC_CTRL_V1,
.ch_page_ctrl = R_AX_CH_PAGE_CTRL_V1,
@@ -203,6 +207,9 @@ static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev)
rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APDM_HPDN);
rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0,
+ B_AX_OCP_L1_MASK, 0x7);
+
ret = read_poll_timeout(rtw89_read32, val32, val32 & B_AX_RDY_SYSPWR,
1000, 20000, false, rtwdev, R_AX_SYS_PW_CTRL);
if (ret)
@@ -266,7 +273,7 @@ static int rtw8852c_pwr_on_func(struct rtw89_dev *rtwdev)
ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, 0, XTAL_SI_SRAM2RFC);
if (ret)
return ret;
- ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0, XTAL_SI_LDO_LPS);
+ ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_2, 0x10, XTAL_SI_LDO_LPS);
if (ret)
return ret;
ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_XTAL_XMD_4, 0, XTAL_SI_LPS_CAP);
@@ -338,6 +345,7 @@ static int rtw8852c_pwr_off_func(struct rtw89_dev *rtwdev)
return ret;
rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_EN_WLON);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, B_AX_FEN_BB_GLB_RSTN | B_AX_FEN_BBRSTB);
rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
B_AX_R_SYM_FEN_WLBBGLB_1 | B_AX_R_SYM_FEN_WLBBFUN_1);
@@ -360,8 +368,11 @@ static int rtw8852c_pwr_off_func(struct rtw89_dev *rtwdev)
if (ret)
return ret;
- rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, 0x0001A0B0);
+ rtw89_write32(rtwdev, R_AX_WLLPS_CTRL, SW_LPS_OPTION);
rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_XTAL_OFF_A_DIE);
+ rtw89_write32_set(rtwdev, R_AX_SYS_SWR_CTRL1, B_AX_SYM_CTRL_SPS_PWMFREQ);
+ rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0,
+ B_AX_REG_ZCDC_H_MASK, 0x3);
rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_APFM_SWLPS);
return 0;
@@ -2816,6 +2827,7 @@ static int rtw8852c_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
static int rtw8852c_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
{
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG);
rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
@@ -2933,7 +2945,9 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.dig_table = NULL,
.dig_regs = &rtw8852c_dig_regs,
.tssi_dbw_table = &rtw89_8852c_tssi_dbw_table,
+ .support_macid_num = RTW89_MAX_MAC_ID_NUM,
.support_chanctx_num = 2,
+ .support_rnr = false,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
@@ -2997,7 +3011,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = {
.c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8852c_c2h_regs,
.page_regs = &rtw8852c_page_regs,
- .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3,
+ .wow_reason_reg = rtw8852c_wow_wakeup_regs,
.cfo_src_fd = false,
.cfo_hw_comp = false,
.dcfo_comp = &rtw8852c_dcfo_comp,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
index 654e3e5507cb..743f7014bf3e 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c
@@ -4070,12 +4070,11 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev,
void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
{
struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
- DECLARE_BITMAP(map, RTW89_IQK_CHS_NR) = {};
+ struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V0] = {};
const struct rtw89_chan *chan;
enum rtw89_entity_mode mode;
u8 chan_idx;
u8 idx;
- u8 i;
mode = rtw89_get_entity_mode(rtwdev);
switch (mode) {
@@ -4087,34 +4086,21 @@ void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_i
break;
}
- for (i = 0; i <= chan_idx; i++) {
- chan = rtw89_chan_get(rtwdev, i);
+ chan = rtw89_chan_get(rtwdev, chan_idx);
- for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
- if (rfk_mcc->ch[idx] == chan->channel &&
- rfk_mcc->band[idx] == chan->band_type) {
- if (i != chan_idx) {
- set_bit(idx, map);
- break;
- }
+ for (idx = 0; idx < ARRAY_SIZE(desc); idx++) {
+ struct rtw89_rfk_chan_desc *p = &desc[idx];
- goto bottom;
- }
- }
- }
+ p->ch = rfk_mcc->ch[idx];
- idx = find_first_zero_bit(map, RTW89_IQK_CHS_NR);
- if (idx == RTW89_IQK_CHS_NR) {
- rtw89_debug(rtwdev, RTW89_DBG_RFK,
- "%s: no empty rfk table; force replace the first\n",
- __func__);
- idx = 0;
+ p->has_band = true;
+ p->band = rfk_mcc->band[idx];
}
+ idx = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan);
+
rfk_mcc->ch[idx] = chan->channel;
rfk_mcc->band[idx] = chan->band_type;
-
-bottom:
rfk_mcc->table_idx = idx;
}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
index ab1a0aadc869..24c390b6f3d3 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_table.c
@@ -34521,7 +34521,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][48] = 72,
[0][0][1][0][RTW89_ETSI][48] = 127,
[0][0][1][0][RTW89_MKK][48] = 127,
- [0][0][1][0][RTW89_IC][48] = 127,
+ [0][0][1][0][RTW89_IC][48] = 72,
[0][0][1][0][RTW89_KCC][48] = 127,
[0][0][1][0][RTW89_ACMA][48] = 127,
[0][0][1][0][RTW89_CN][48] = 127,
@@ -34534,7 +34534,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][50] = 72,
[0][0][1][0][RTW89_ETSI][50] = 127,
[0][0][1][0][RTW89_MKK][50] = 127,
- [0][0][1][0][RTW89_IC][50] = 127,
+ [0][0][1][0][RTW89_IC][50] = 72,
[0][0][1][0][RTW89_KCC][50] = 127,
[0][0][1][0][RTW89_ACMA][50] = 127,
[0][0][1][0][RTW89_CN][50] = 127,
@@ -34547,7 +34547,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_FCC][52] = 72,
[0][0][1][0][RTW89_ETSI][52] = 127,
[0][0][1][0][RTW89_MKK][52] = 127,
- [0][0][1][0][RTW89_IC][52] = 127,
+ [0][0][1][0][RTW89_IC][52] = 72,
[0][0][1][0][RTW89_KCC][52] = 127,
[0][0][1][0][RTW89_ACMA][52] = 127,
[0][0][1][0][RTW89_CN][52] = 127,
@@ -34885,7 +34885,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][48] = 48,
[0][1][1][0][RTW89_ETSI][48] = 127,
[0][1][1][0][RTW89_MKK][48] = 127,
- [0][1][1][0][RTW89_IC][48] = 127,
+ [0][1][1][0][RTW89_IC][48] = 48,
[0][1][1][0][RTW89_KCC][48] = 127,
[0][1][1][0][RTW89_ACMA][48] = 127,
[0][1][1][0][RTW89_CN][48] = 127,
@@ -34898,7 +34898,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][50] = 48,
[0][1][1][0][RTW89_ETSI][50] = 127,
[0][1][1][0][RTW89_MKK][50] = 127,
- [0][1][1][0][RTW89_IC][50] = 127,
+ [0][1][1][0][RTW89_IC][50] = 48,
[0][1][1][0][RTW89_KCC][50] = 127,
[0][1][1][0][RTW89_ACMA][50] = 127,
[0][1][1][0][RTW89_CN][50] = 127,
@@ -34911,7 +34911,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_FCC][52] = 48,
[0][1][1][0][RTW89_ETSI][52] = 127,
[0][1][1][0][RTW89_MKK][52] = 127,
- [0][1][1][0][RTW89_IC][52] = 127,
+ [0][1][1][0][RTW89_IC][52] = 48,
[0][1][1][0][RTW89_KCC][52] = 127,
[0][1][1][0][RTW89_ACMA][52] = 127,
[0][1][1][0][RTW89_CN][52] = 127,
@@ -35249,7 +35249,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][48] = 72,
[0][0][2][0][RTW89_ETSI][48] = 127,
[0][0][2][0][RTW89_MKK][48] = 127,
- [0][0][2][0][RTW89_IC][48] = 127,
+ [0][0][2][0][RTW89_IC][48] = 72,
[0][0][2][0][RTW89_KCC][48] = 127,
[0][0][2][0][RTW89_ACMA][48] = 127,
[0][0][2][0][RTW89_CN][48] = 127,
@@ -35262,7 +35262,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][50] = 72,
[0][0][2][0][RTW89_ETSI][50] = 127,
[0][0][2][0][RTW89_MKK][50] = 127,
- [0][0][2][0][RTW89_IC][50] = 127,
+ [0][0][2][0][RTW89_IC][50] = 72,
[0][0][2][0][RTW89_KCC][50] = 127,
[0][0][2][0][RTW89_ACMA][50] = 127,
[0][0][2][0][RTW89_CN][50] = 127,
@@ -35275,7 +35275,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_FCC][52] = 72,
[0][0][2][0][RTW89_ETSI][52] = 127,
[0][0][2][0][RTW89_MKK][52] = 127,
- [0][0][2][0][RTW89_IC][52] = 127,
+ [0][0][2][0][RTW89_IC][52] = 72,
[0][0][2][0][RTW89_KCC][52] = 127,
[0][0][2][0][RTW89_ACMA][52] = 127,
[0][0][2][0][RTW89_CN][52] = 127,
@@ -35613,7 +35613,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][48] = 48,
[0][1][2][0][RTW89_ETSI][48] = 127,
[0][1][2][0][RTW89_MKK][48] = 127,
- [0][1][2][0][RTW89_IC][48] = 127,
+ [0][1][2][0][RTW89_IC][48] = 48,
[0][1][2][0][RTW89_KCC][48] = 127,
[0][1][2][0][RTW89_ACMA][48] = 127,
[0][1][2][0][RTW89_CN][48] = 127,
@@ -35626,7 +35626,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][50] = 50,
[0][1][2][0][RTW89_ETSI][50] = 127,
[0][1][2][0][RTW89_MKK][50] = 127,
- [0][1][2][0][RTW89_IC][50] = 127,
+ [0][1][2][0][RTW89_IC][50] = 50,
[0][1][2][0][RTW89_KCC][50] = 127,
[0][1][2][0][RTW89_ACMA][50] = 127,
[0][1][2][0][RTW89_CN][50] = 127,
@@ -35639,7 +35639,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_FCC][52] = 48,
[0][1][2][0][RTW89_ETSI][52] = 127,
[0][1][2][0][RTW89_MKK][52] = 127,
- [0][1][2][0][RTW89_IC][52] = 127,
+ [0][1][2][0][RTW89_IC][52] = 48,
[0][1][2][0][RTW89_KCC][52] = 127,
[0][1][2][0][RTW89_ACMA][52] = 127,
[0][1][2][0][RTW89_CN][52] = 127,
@@ -35977,7 +35977,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][48] = 48,
[0][1][2][1][RTW89_ETSI][48] = 127,
[0][1][2][1][RTW89_MKK][48] = 127,
- [0][1][2][1][RTW89_IC][48] = 127,
+ [0][1][2][1][RTW89_IC][48] = 48,
[0][1][2][1][RTW89_KCC][48] = 127,
[0][1][2][1][RTW89_ACMA][48] = 127,
[0][1][2][1][RTW89_CN][48] = 127,
@@ -35990,7 +35990,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][50] = 50,
[0][1][2][1][RTW89_ETSI][50] = 127,
[0][1][2][1][RTW89_MKK][50] = 127,
- [0][1][2][1][RTW89_IC][50] = 127,
+ [0][1][2][1][RTW89_IC][50] = 50,
[0][1][2][1][RTW89_KCC][50] = 127,
[0][1][2][1][RTW89_ACMA][50] = 127,
[0][1][2][1][RTW89_CN][50] = 127,
@@ -36003,7 +36003,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_FCC][52] = 48,
[0][1][2][1][RTW89_ETSI][52] = 127,
[0][1][2][1][RTW89_MKK][52] = 127,
- [0][1][2][1][RTW89_IC][52] = 127,
+ [0][1][2][1][RTW89_IC][52] = 48,
[0][1][2][1][RTW89_KCC][52] = 127,
[0][1][2][1][RTW89_ACMA][52] = 127,
[0][1][2][1][RTW89_CN][52] = 127,
@@ -36172,7 +36172,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][47] = 68,
[1][0][2][0][RTW89_ETSI][47] = 127,
[1][0][2][0][RTW89_MKK][47] = 127,
- [1][0][2][0][RTW89_IC][47] = 127,
+ [1][0][2][0][RTW89_IC][47] = 68,
[1][0][2][0][RTW89_KCC][47] = 127,
[1][0][2][0][RTW89_ACMA][47] = 127,
[1][0][2][0][RTW89_CN][47] = 127,
@@ -36185,7 +36185,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_FCC][51] = 68,
[1][0][2][0][RTW89_ETSI][51] = 127,
[1][0][2][0][RTW89_MKK][51] = 127,
- [1][0][2][0][RTW89_IC][51] = 127,
+ [1][0][2][0][RTW89_IC][51] = 68,
[1][0][2][0][RTW89_KCC][51] = 127,
[1][0][2][0][RTW89_ACMA][51] = 127,
[1][0][2][0][RTW89_CN][51] = 127,
@@ -36354,7 +36354,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_FCC][47] = 62,
[1][1][2][0][RTW89_ETSI][47] = 127,
[1][1][2][0][RTW89_MKK][47] = 127,
- [1][1][2][0][RTW89_IC][47] = 127,
+ [1][1][2][0][RTW89_IC][47] = 62,
[1][1][2][0][RTW89_KCC][47] = 127,
[1][1][2][0][RTW89_ACMA][47] = 127,
[1][1][2][0][RTW89_CN][47] = 127,
@@ -36367,7 +36367,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_FCC][51] = 60,
[1][1][2][0][RTW89_ETSI][51] = 127,
[1][1][2][0][RTW89_MKK][51] = 127,
- [1][1][2][0][RTW89_IC][51] = 127,
+ [1][1][2][0][RTW89_IC][51] = 60,
[1][1][2][0][RTW89_KCC][51] = 127,
[1][1][2][0][RTW89_ACMA][51] = 127,
[1][1][2][0][RTW89_CN][51] = 127,
@@ -36536,7 +36536,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_FCC][47] = 62,
[1][1][2][1][RTW89_ETSI][47] = 127,
[1][1][2][1][RTW89_MKK][47] = 127,
- [1][1][2][1][RTW89_IC][47] = 127,
+ [1][1][2][1][RTW89_IC][47] = 62,
[1][1][2][1][RTW89_KCC][47] = 127,
[1][1][2][1][RTW89_ACMA][47] = 127,
[1][1][2][1][RTW89_CN][47] = 127,
@@ -36549,7 +36549,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_FCC][51] = 60,
[1][1][2][1][RTW89_ETSI][51] = 127,
[1][1][2][1][RTW89_MKK][51] = 127,
- [1][1][2][1][RTW89_IC][51] = 127,
+ [1][1][2][1][RTW89_IC][51] = 60,
[1][1][2][1][RTW89_KCC][51] = 127,
[1][1][2][1][RTW89_ACMA][51] = 127,
[1][1][2][1][RTW89_CN][51] = 127,
@@ -36640,7 +36640,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_FCC][49] = 62,
[2][0][2][0][RTW89_ETSI][49] = 127,
[2][0][2][0][RTW89_MKK][49] = 127,
- [2][0][2][0][RTW89_IC][49] = 127,
+ [2][0][2][0][RTW89_IC][49] = 62,
[2][0][2][0][RTW89_KCC][49] = 127,
[2][0][2][0][RTW89_ACMA][49] = 127,
[2][0][2][0][RTW89_CN][49] = 127,
@@ -36731,7 +36731,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_FCC][49] = 62,
[2][1][2][0][RTW89_ETSI][49] = 127,
[2][1][2][0][RTW89_MKK][49] = 127,
- [2][1][2][0][RTW89_IC][49] = 127,
+ [2][1][2][0][RTW89_IC][49] = 62,
[2][1][2][0][RTW89_KCC][49] = 127,
[2][1][2][0][RTW89_ACMA][49] = 127,
[2][1][2][0][RTW89_CN][49] = 127,
@@ -36822,7 +36822,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_FCC][49] = 62,
[2][1][2][1][RTW89_ETSI][49] = 127,
[2][1][2][1][RTW89_MKK][49] = 127,
- [2][1][2][1][RTW89_IC][49] = 127,
+ [2][1][2][1][RTW89_IC][49] = 62,
[2][1][2][1][RTW89_KCC][49] = 127,
[2][1][2][1][RTW89_ACMA][49] = 127,
[2][1][2][1][RTW89_CN][49] = 127,
@@ -36861,7 +36861,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_FCC][45] = 52,
[3][0][2][0][RTW89_ETSI][45] = 127,
[3][0][2][0][RTW89_MKK][45] = 127,
- [3][0][2][0][RTW89_IC][45] = 127,
+ [3][0][2][0][RTW89_IC][45] = 52,
[3][0][2][0][RTW89_KCC][45] = 127,
[3][0][2][0][RTW89_ACMA][45] = 127,
[3][0][2][0][RTW89_CN][45] = 127,
@@ -36900,7 +36900,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_FCC][45] = 46,
[3][1][2][0][RTW89_ETSI][45] = 127,
[3][1][2][0][RTW89_MKK][45] = 127,
- [3][1][2][0][RTW89_IC][45] = 127,
+ [3][1][2][0][RTW89_IC][45] = 46,
[3][1][2][0][RTW89_KCC][45] = 127,
[3][1][2][0][RTW89_ACMA][45] = 127,
[3][1][2][0][RTW89_CN][45] = 127,
@@ -36939,7 +36939,7 @@ const s8 rtw89_8852c_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_FCC][45] = 46,
[3][1][2][1][RTW89_ETSI][45] = 127,
[3][1][2][1][RTW89_MKK][45] = 127,
- [3][1][2][1][RTW89_IC][45] = 127,
+ [3][1][2][1][RTW89_IC][45] = 46,
[3][1][2][1][RTW89_KCC][45] = 127,
[3][1][2][1][RTW89_ACMA][45] = 127,
[3][1][2][1][RTW89_CN][45] = 127,
@@ -36958,1476 +36958,986 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[RTW89_6G_CH_NUM] = {
[0][0][1][0][RTW89_WW][0][0] = 24,
[0][0][1][0][RTW89_WW][1][0] = 24,
- [0][0][1][0][RTW89_WW][2][0] = 56,
[0][0][1][0][RTW89_WW][0][2] = 22,
[0][0][1][0][RTW89_WW][1][2] = 22,
- [0][0][1][0][RTW89_WW][2][2] = 56,
[0][0][1][0][RTW89_WW][0][4] = 22,
[0][0][1][0][RTW89_WW][1][4] = 22,
- [0][0][1][0][RTW89_WW][2][4] = 56,
[0][0][1][0][RTW89_WW][0][6] = 22,
[0][0][1][0][RTW89_WW][1][6] = 22,
- [0][0][1][0][RTW89_WW][2][6] = 56,
[0][0][1][0][RTW89_WW][0][8] = 22,
[0][0][1][0][RTW89_WW][1][8] = 22,
- [0][0][1][0][RTW89_WW][2][8] = 56,
[0][0][1][0][RTW89_WW][0][10] = 22,
[0][0][1][0][RTW89_WW][1][10] = 22,
- [0][0][1][0][RTW89_WW][2][10] = 56,
[0][0][1][0][RTW89_WW][0][12] = 22,
[0][0][1][0][RTW89_WW][1][12] = 22,
- [0][0][1][0][RTW89_WW][2][12] = 56,
[0][0][1][0][RTW89_WW][0][14] = 22,
[0][0][1][0][RTW89_WW][1][14] = 22,
- [0][0][1][0][RTW89_WW][2][14] = 56,
[0][0][1][0][RTW89_WW][0][15] = 22,
[0][0][1][0][RTW89_WW][1][15] = 22,
- [0][0][1][0][RTW89_WW][2][15] = 56,
[0][0][1][0][RTW89_WW][0][17] = 22,
[0][0][1][0][RTW89_WW][1][17] = 22,
- [0][0][1][0][RTW89_WW][2][17] = 56,
[0][0][1][0][RTW89_WW][0][19] = 22,
[0][0][1][0][RTW89_WW][1][19] = 22,
- [0][0][1][0][RTW89_WW][2][19] = 56,
[0][0][1][0][RTW89_WW][0][21] = 22,
[0][0][1][0][RTW89_WW][1][21] = 22,
- [0][0][1][0][RTW89_WW][2][21] = 56,
[0][0][1][0][RTW89_WW][0][23] = 22,
[0][0][1][0][RTW89_WW][1][23] = 22,
- [0][0][1][0][RTW89_WW][2][23] = 70,
[0][0][1][0][RTW89_WW][0][25] = 22,
[0][0][1][0][RTW89_WW][1][25] = 22,
- [0][0][1][0][RTW89_WW][2][25] = 70,
[0][0][1][0][RTW89_WW][0][27] = 22,
[0][0][1][0][RTW89_WW][1][27] = 22,
- [0][0][1][0][RTW89_WW][2][27] = 70,
[0][0][1][0][RTW89_WW][0][29] = 22,
[0][0][1][0][RTW89_WW][1][29] = 22,
- [0][0][1][0][RTW89_WW][2][29] = 70,
[0][0][1][0][RTW89_WW][0][30] = 22,
[0][0][1][0][RTW89_WW][1][30] = 22,
- [0][0][1][0][RTW89_WW][2][30] = 70,
[0][0][1][0][RTW89_WW][0][32] = 22,
[0][0][1][0][RTW89_WW][1][32] = 22,
- [0][0][1][0][RTW89_WW][2][32] = 70,
[0][0][1][0][RTW89_WW][0][34] = 22,
[0][0][1][0][RTW89_WW][1][34] = 22,
- [0][0][1][0][RTW89_WW][2][34] = 70,
[0][0][1][0][RTW89_WW][0][36] = 22,
[0][0][1][0][RTW89_WW][1][36] = 22,
- [0][0][1][0][RTW89_WW][2][36] = 70,
[0][0][1][0][RTW89_WW][0][38] = 22,
[0][0][1][0][RTW89_WW][1][38] = 22,
- [0][0][1][0][RTW89_WW][2][38] = 70,
[0][0][1][0][RTW89_WW][0][40] = 22,
[0][0][1][0][RTW89_WW][1][40] = 22,
- [0][0][1][0][RTW89_WW][2][40] = 70,
[0][0][1][0][RTW89_WW][0][42] = 22,
[0][0][1][0][RTW89_WW][1][42] = 22,
- [0][0][1][0][RTW89_WW][2][42] = 70,
[0][0][1][0][RTW89_WW][0][44] = 22,
[0][0][1][0][RTW89_WW][1][44] = 22,
- [0][0][1][0][RTW89_WW][2][44] = 70,
[0][0][1][0][RTW89_WW][0][45] = 22,
[0][0][1][0][RTW89_WW][1][45] = 22,
- [0][0][1][0][RTW89_WW][2][45] = 70,
[0][0][1][0][RTW89_WW][0][47] = 22,
[0][0][1][0][RTW89_WW][1][47] = 22,
- [0][0][1][0][RTW89_WW][2][47] = 70,
[0][0][1][0][RTW89_WW][0][49] = 24,
[0][0][1][0][RTW89_WW][1][49] = 24,
- [0][0][1][0][RTW89_WW][2][49] = 70,
[0][0][1][0][RTW89_WW][0][51] = 22,
[0][0][1][0][RTW89_WW][1][51] = 22,
- [0][0][1][0][RTW89_WW][2][51] = 70,
[0][0][1][0][RTW89_WW][0][53] = 22,
[0][0][1][0][RTW89_WW][1][53] = 22,
- [0][0][1][0][RTW89_WW][2][53] = 70,
[0][0][1][0][RTW89_WW][0][55] = 22,
[0][0][1][0][RTW89_WW][1][55] = 22,
- [0][0][1][0][RTW89_WW][2][55] = 68,
[0][0][1][0][RTW89_WW][0][57] = 22,
[0][0][1][0][RTW89_WW][1][57] = 22,
- [0][0][1][0][RTW89_WW][2][57] = 68,
[0][0][1][0][RTW89_WW][0][59] = 22,
[0][0][1][0][RTW89_WW][1][59] = 22,
- [0][0][1][0][RTW89_WW][2][59] = 68,
[0][0][1][0][RTW89_WW][0][60] = 22,
[0][0][1][0][RTW89_WW][1][60] = 22,
- [0][0][1][0][RTW89_WW][2][60] = 68,
[0][0][1][0][RTW89_WW][0][62] = 22,
[0][0][1][0][RTW89_WW][1][62] = 22,
- [0][0][1][0][RTW89_WW][2][62] = 68,
[0][0][1][0][RTW89_WW][0][64] = 22,
[0][0][1][0][RTW89_WW][1][64] = 22,
- [0][0][1][0][RTW89_WW][2][64] = 68,
[0][0][1][0][RTW89_WW][0][66] = 22,
[0][0][1][0][RTW89_WW][1][66] = 22,
- [0][0][1][0][RTW89_WW][2][66] = 68,
[0][0][1][0][RTW89_WW][0][68] = 22,
[0][0][1][0][RTW89_WW][1][68] = 22,
- [0][0][1][0][RTW89_WW][2][68] = 68,
[0][0][1][0][RTW89_WW][0][70] = 24,
[0][0][1][0][RTW89_WW][1][70] = 24,
- [0][0][1][0][RTW89_WW][2][70] = 68,
[0][0][1][0][RTW89_WW][0][72] = 22,
[0][0][1][0][RTW89_WW][1][72] = 22,
- [0][0][1][0][RTW89_WW][2][72] = 68,
[0][0][1][0][RTW89_WW][0][74] = 22,
[0][0][1][0][RTW89_WW][1][74] = 22,
- [0][0][1][0][RTW89_WW][2][74] = 68,
[0][0][1][0][RTW89_WW][0][75] = 22,
[0][0][1][0][RTW89_WW][1][75] = 22,
- [0][0][1][0][RTW89_WW][2][75] = 68,
[0][0][1][0][RTW89_WW][0][77] = 22,
[0][0][1][0][RTW89_WW][1][77] = 22,
- [0][0][1][0][RTW89_WW][2][77] = 68,
[0][0][1][0][RTW89_WW][0][79] = 22,
[0][0][1][0][RTW89_WW][1][79] = 22,
- [0][0][1][0][RTW89_WW][2][79] = 68,
[0][0][1][0][RTW89_WW][0][81] = 22,
[0][0][1][0][RTW89_WW][1][81] = 22,
- [0][0][1][0][RTW89_WW][2][81] = 68,
[0][0][1][0][RTW89_WW][0][83] = 22,
[0][0][1][0][RTW89_WW][1][83] = 22,
- [0][0][1][0][RTW89_WW][2][83] = 68,
[0][0][1][0][RTW89_WW][0][85] = 22,
[0][0][1][0][RTW89_WW][1][85] = 22,
- [0][0][1][0][RTW89_WW][2][85] = 68,
[0][0][1][0][RTW89_WW][0][87] = 22,
[0][0][1][0][RTW89_WW][1][87] = 22,
- [0][0][1][0][RTW89_WW][2][87] = 0,
[0][0][1][0][RTW89_WW][0][89] = 22,
[0][0][1][0][RTW89_WW][1][89] = 22,
- [0][0][1][0][RTW89_WW][2][89] = 0,
[0][0][1][0][RTW89_WW][0][90] = 22,
[0][0][1][0][RTW89_WW][1][90] = 22,
- [0][0][1][0][RTW89_WW][2][90] = 0,
[0][0][1][0][RTW89_WW][0][92] = 22,
[0][0][1][0][RTW89_WW][1][92] = 22,
- [0][0][1][0][RTW89_WW][2][92] = 0,
[0][0][1][0][RTW89_WW][0][94] = 22,
[0][0][1][0][RTW89_WW][1][94] = 22,
- [0][0][1][0][RTW89_WW][2][94] = 0,
[0][0][1][0][RTW89_WW][0][96] = 22,
[0][0][1][0][RTW89_WW][1][96] = 22,
- [0][0][1][0][RTW89_WW][2][96] = 0,
[0][0][1][0][RTW89_WW][0][98] = 22,
[0][0][1][0][RTW89_WW][1][98] = 22,
- [0][0][1][0][RTW89_WW][2][98] = 0,
[0][0][1][0][RTW89_WW][0][100] = 22,
[0][0][1][0][RTW89_WW][1][100] = 22,
- [0][0][1][0][RTW89_WW][2][100] = 0,
[0][0][1][0][RTW89_WW][0][102] = 22,
[0][0][1][0][RTW89_WW][1][102] = 22,
- [0][0][1][0][RTW89_WW][2][102] = 0,
[0][0][1][0][RTW89_WW][0][104] = 22,
[0][0][1][0][RTW89_WW][1][104] = 22,
- [0][0][1][0][RTW89_WW][2][104] = 0,
[0][0][1][0][RTW89_WW][0][105] = 22,
[0][0][1][0][RTW89_WW][1][105] = 22,
- [0][0][1][0][RTW89_WW][2][105] = 0,
[0][0][1][0][RTW89_WW][0][107] = 24,
[0][0][1][0][RTW89_WW][1][107] = 24,
- [0][0][1][0][RTW89_WW][2][107] = 0,
[0][0][1][0][RTW89_WW][0][109] = 24,
[0][0][1][0][RTW89_WW][1][109] = 24,
- [0][0][1][0][RTW89_WW][2][109] = 0,
[0][0][1][0][RTW89_WW][0][111] = 0,
[0][0][1][0][RTW89_WW][1][111] = 0,
- [0][0][1][0][RTW89_WW][2][111] = 0,
[0][0][1][0][RTW89_WW][0][113] = 0,
[0][0][1][0][RTW89_WW][1][113] = 0,
- [0][0][1][0][RTW89_WW][2][113] = 0,
[0][0][1][0][RTW89_WW][0][115] = 0,
[0][0][1][0][RTW89_WW][1][115] = 0,
- [0][0][1][0][RTW89_WW][2][115] = 0,
[0][0][1][0][RTW89_WW][0][117] = 0,
[0][0][1][0][RTW89_WW][1][117] = 0,
- [0][0][1][0][RTW89_WW][2][117] = 0,
[0][0][1][0][RTW89_WW][0][119] = 0,
[0][0][1][0][RTW89_WW][1][119] = 0,
- [0][0][1][0][RTW89_WW][2][119] = 0,
[0][1][1][0][RTW89_WW][0][0] = -2,
[0][1][1][0][RTW89_WW][1][0] = -2,
- [0][1][1][0][RTW89_WW][2][0] = 54,
[0][1][1][0][RTW89_WW][0][2] = -4,
[0][1][1][0][RTW89_WW][1][2] = -4,
- [0][1][1][0][RTW89_WW][2][2] = 54,
[0][1][1][0][RTW89_WW][0][4] = -4,
[0][1][1][0][RTW89_WW][1][4] = -4,
- [0][1][1][0][RTW89_WW][2][4] = 54,
[0][1][1][0][RTW89_WW][0][6] = -4,
[0][1][1][0][RTW89_WW][1][6] = -4,
- [0][1][1][0][RTW89_WW][2][6] = 54,
[0][1][1][0][RTW89_WW][0][8] = -4,
[0][1][1][0][RTW89_WW][1][8] = -4,
- [0][1][1][0][RTW89_WW][2][8] = 54,
[0][1][1][0][RTW89_WW][0][10] = -4,
[0][1][1][0][RTW89_WW][1][10] = -4,
- [0][1][1][0][RTW89_WW][2][10] = 54,
[0][1][1][0][RTW89_WW][0][12] = -4,
[0][1][1][0][RTW89_WW][1][12] = -4,
- [0][1][1][0][RTW89_WW][2][12] = 54,
[0][1][1][0][RTW89_WW][0][14] = -4,
[0][1][1][0][RTW89_WW][1][14] = -4,
- [0][1][1][0][RTW89_WW][2][14] = 54,
[0][1][1][0][RTW89_WW][0][15] = -4,
[0][1][1][0][RTW89_WW][1][15] = -4,
- [0][1][1][0][RTW89_WW][2][15] = 54,
[0][1][1][0][RTW89_WW][0][17] = -4,
[0][1][1][0][RTW89_WW][1][17] = -4,
- [0][1][1][0][RTW89_WW][2][17] = 54,
[0][1][1][0][RTW89_WW][0][19] = -4,
[0][1][1][0][RTW89_WW][1][19] = -4,
- [0][1][1][0][RTW89_WW][2][19] = 54,
[0][1][1][0][RTW89_WW][0][21] = -4,
[0][1][1][0][RTW89_WW][1][21] = -4,
- [0][1][1][0][RTW89_WW][2][21] = 54,
[0][1][1][0][RTW89_WW][0][23] = -4,
[0][1][1][0][RTW89_WW][1][23] = -4,
- [0][1][1][0][RTW89_WW][2][23] = 68,
[0][1][1][0][RTW89_WW][0][25] = -4,
[0][1][1][0][RTW89_WW][1][25] = -4,
- [0][1][1][0][RTW89_WW][2][25] = 68,
[0][1][1][0][RTW89_WW][0][27] = -4,
[0][1][1][0][RTW89_WW][1][27] = -4,
- [0][1][1][0][RTW89_WW][2][27] = 68,
[0][1][1][0][RTW89_WW][0][29] = -4,
[0][1][1][0][RTW89_WW][1][29] = -4,
- [0][1][1][0][RTW89_WW][2][29] = 68,
[0][1][1][0][RTW89_WW][0][30] = -4,
[0][1][1][0][RTW89_WW][1][30] = -4,
- [0][1][1][0][RTW89_WW][2][30] = 68,
[0][1][1][0][RTW89_WW][0][32] = -4,
[0][1][1][0][RTW89_WW][1][32] = -4,
- [0][1][1][0][RTW89_WW][2][32] = 68,
[0][1][1][0][RTW89_WW][0][34] = -4,
[0][1][1][0][RTW89_WW][1][34] = -4,
- [0][1][1][0][RTW89_WW][2][34] = 68,
[0][1][1][0][RTW89_WW][0][36] = -4,
[0][1][1][0][RTW89_WW][1][36] = -4,
- [0][1][1][0][RTW89_WW][2][36] = 68,
[0][1][1][0][RTW89_WW][0][38] = -4,
[0][1][1][0][RTW89_WW][1][38] = -4,
- [0][1][1][0][RTW89_WW][2][38] = 68,
[0][1][1][0][RTW89_WW][0][40] = -4,
[0][1][1][0][RTW89_WW][1][40] = -4,
- [0][1][1][0][RTW89_WW][2][40] = 68,
[0][1][1][0][RTW89_WW][0][42] = -4,
[0][1][1][0][RTW89_WW][1][42] = -4,
- [0][1][1][0][RTW89_WW][2][42] = 68,
[0][1][1][0][RTW89_WW][0][44] = -2,
[0][1][1][0][RTW89_WW][1][44] = -2,
- [0][1][1][0][RTW89_WW][2][44] = 68,
[0][1][1][0][RTW89_WW][0][45] = -2,
[0][1][1][0][RTW89_WW][1][45] = -2,
- [0][1][1][0][RTW89_WW][2][45] = 70,
[0][1][1][0][RTW89_WW][0][47] = -2,
[0][1][1][0][RTW89_WW][1][47] = -2,
- [0][1][1][0][RTW89_WW][2][47] = 68,
[0][1][1][0][RTW89_WW][0][49] = -2,
[0][1][1][0][RTW89_WW][1][49] = -2,
- [0][1][1][0][RTW89_WW][2][49] = 68,
[0][1][1][0][RTW89_WW][0][51] = -2,
[0][1][1][0][RTW89_WW][1][51] = -2,
- [0][1][1][0][RTW89_WW][2][51] = 68,
[0][1][1][0][RTW89_WW][0][53] = -2,
[0][1][1][0][RTW89_WW][1][53] = -2,
- [0][1][1][0][RTW89_WW][2][53] = 68,
[0][1][1][0][RTW89_WW][0][55] = -2,
[0][1][1][0][RTW89_WW][1][55] = -2,
- [0][1][1][0][RTW89_WW][2][55] = 68,
[0][1][1][0][RTW89_WW][0][57] = -2,
[0][1][1][0][RTW89_WW][1][57] = -2,
- [0][1][1][0][RTW89_WW][2][57] = 68,
[0][1][1][0][RTW89_WW][0][59] = -2,
[0][1][1][0][RTW89_WW][1][59] = -2,
- [0][1][1][0][RTW89_WW][2][59] = 68,
[0][1][1][0][RTW89_WW][0][60] = -2,
[0][1][1][0][RTW89_WW][1][60] = -2,
- [0][1][1][0][RTW89_WW][2][60] = 68,
[0][1][1][0][RTW89_WW][0][62] = -2,
[0][1][1][0][RTW89_WW][1][62] = -2,
- [0][1][1][0][RTW89_WW][2][62] = 68,
[0][1][1][0][RTW89_WW][0][64] = -2,
[0][1][1][0][RTW89_WW][1][64] = -2,
- [0][1][1][0][RTW89_WW][2][64] = 68,
[0][1][1][0][RTW89_WW][0][66] = -2,
[0][1][1][0][RTW89_WW][1][66] = -2,
- [0][1][1][0][RTW89_WW][2][66] = 68,
[0][1][1][0][RTW89_WW][0][68] = -2,
[0][1][1][0][RTW89_WW][1][68] = -2,
- [0][1][1][0][RTW89_WW][2][68] = 68,
[0][1][1][0][RTW89_WW][0][70] = -2,
[0][1][1][0][RTW89_WW][1][70] = -2,
- [0][1][1][0][RTW89_WW][2][70] = 68,
[0][1][1][0][RTW89_WW][0][72] = -2,
[0][1][1][0][RTW89_WW][1][72] = -2,
- [0][1][1][0][RTW89_WW][2][72] = 68,
[0][1][1][0][RTW89_WW][0][74] = -2,
[0][1][1][0][RTW89_WW][1][74] = -2,
- [0][1][1][0][RTW89_WW][2][74] = 68,
[0][1][1][0][RTW89_WW][0][75] = -2,
[0][1][1][0][RTW89_WW][1][75] = -2,
- [0][1][1][0][RTW89_WW][2][75] = 68,
[0][1][1][0][RTW89_WW][0][77] = -2,
[0][1][1][0][RTW89_WW][1][77] = -2,
- [0][1][1][0][RTW89_WW][2][77] = 68,
[0][1][1][0][RTW89_WW][0][79] = -2,
[0][1][1][0][RTW89_WW][1][79] = -2,
- [0][1][1][0][RTW89_WW][2][79] = 68,
[0][1][1][0][RTW89_WW][0][81] = -2,
[0][1][1][0][RTW89_WW][1][81] = -2,
- [0][1][1][0][RTW89_WW][2][81] = 68,
[0][1][1][0][RTW89_WW][0][83] = -2,
[0][1][1][0][RTW89_WW][1][83] = -2,
- [0][1][1][0][RTW89_WW][2][83] = 68,
[0][1][1][0][RTW89_WW][0][85] = -2,
[0][1][1][0][RTW89_WW][1][85] = -2,
- [0][1][1][0][RTW89_WW][2][85] = 68,
[0][1][1][0][RTW89_WW][0][87] = -2,
[0][1][1][0][RTW89_WW][1][87] = -2,
- [0][1][1][0][RTW89_WW][2][87] = 0,
[0][1][1][0][RTW89_WW][0][89] = -2,
[0][1][1][0][RTW89_WW][1][89] = -2,
- [0][1][1][0][RTW89_WW][2][89] = 0,
[0][1][1][0][RTW89_WW][0][90] = -2,
[0][1][1][0][RTW89_WW][1][90] = -2,
- [0][1][1][0][RTW89_WW][2][90] = 0,
[0][1][1][0][RTW89_WW][0][92] = -2,
[0][1][1][0][RTW89_WW][1][92] = -2,
- [0][1][1][0][RTW89_WW][2][92] = 0,
[0][1][1][0][RTW89_WW][0][94] = -2,
[0][1][1][0][RTW89_WW][1][94] = -2,
- [0][1][1][0][RTW89_WW][2][94] = 0,
[0][1][1][0][RTW89_WW][0][96] = -2,
[0][1][1][0][RTW89_WW][1][96] = -2,
- [0][1][1][0][RTW89_WW][2][96] = 0,
[0][1][1][0][RTW89_WW][0][98] = -2,
[0][1][1][0][RTW89_WW][1][98] = -2,
- [0][1][1][0][RTW89_WW][2][98] = 0,
[0][1][1][0][RTW89_WW][0][100] = -2,
[0][1][1][0][RTW89_WW][1][100] = -2,
- [0][1][1][0][RTW89_WW][2][100] = 0,
[0][1][1][0][RTW89_WW][0][102] = -2,
[0][1][1][0][RTW89_WW][1][102] = -2,
- [0][1][1][0][RTW89_WW][2][102] = 0,
[0][1][1][0][RTW89_WW][0][104] = -2,
[0][1][1][0][RTW89_WW][1][104] = -2,
- [0][1][1][0][RTW89_WW][2][104] = 0,
[0][1][1][0][RTW89_WW][0][105] = -2,
[0][1][1][0][RTW89_WW][1][105] = -2,
- [0][1][1][0][RTW89_WW][2][105] = 0,
[0][1][1][0][RTW89_WW][0][107] = 1,
[0][1][1][0][RTW89_WW][1][107] = 1,
- [0][1][1][0][RTW89_WW][2][107] = 0,
[0][1][1][0][RTW89_WW][0][109] = 1,
[0][1][1][0][RTW89_WW][1][109] = 1,
- [0][1][1][0][RTW89_WW][2][109] = 0,
[0][1][1][0][RTW89_WW][0][111] = 0,
[0][1][1][0][RTW89_WW][1][111] = 0,
- [0][1][1][0][RTW89_WW][2][111] = 0,
[0][1][1][0][RTW89_WW][0][113] = 0,
[0][1][1][0][RTW89_WW][1][113] = 0,
- [0][1][1][0][RTW89_WW][2][113] = 0,
[0][1][1][0][RTW89_WW][0][115] = 0,
[0][1][1][0][RTW89_WW][1][115] = 0,
- [0][1][1][0][RTW89_WW][2][115] = 0,
[0][1][1][0][RTW89_WW][0][117] = 0,
[0][1][1][0][RTW89_WW][1][117] = 0,
- [0][1][1][0][RTW89_WW][2][117] = 0,
[0][1][1][0][RTW89_WW][0][119] = 0,
[0][1][1][0][RTW89_WW][1][119] = 0,
- [0][1][1][0][RTW89_WW][2][119] = 0,
[0][0][2][0][RTW89_WW][0][0] = 24,
[0][0][2][0][RTW89_WW][1][0] = 24,
- [0][0][2][0][RTW89_WW][2][0] = 56,
[0][0][2][0][RTW89_WW][0][2] = 22,
[0][0][2][0][RTW89_WW][1][2] = 22,
- [0][0][2][0][RTW89_WW][2][2] = 56,
[0][0][2][0][RTW89_WW][0][4] = 22,
[0][0][2][0][RTW89_WW][1][4] = 22,
- [0][0][2][0][RTW89_WW][2][4] = 56,
[0][0][2][0][RTW89_WW][0][6] = 22,
[0][0][2][0][RTW89_WW][1][6] = 22,
- [0][0][2][0][RTW89_WW][2][6] = 56,
[0][0][2][0][RTW89_WW][0][8] = 22,
[0][0][2][0][RTW89_WW][1][8] = 22,
- [0][0][2][0][RTW89_WW][2][8] = 56,
[0][0][2][0][RTW89_WW][0][10] = 22,
[0][0][2][0][RTW89_WW][1][10] = 22,
- [0][0][2][0][RTW89_WW][2][10] = 56,
[0][0][2][0][RTW89_WW][0][12] = 22,
[0][0][2][0][RTW89_WW][1][12] = 22,
- [0][0][2][0][RTW89_WW][2][12] = 56,
[0][0][2][0][RTW89_WW][0][14] = 22,
[0][0][2][0][RTW89_WW][1][14] = 22,
- [0][0][2][0][RTW89_WW][2][14] = 56,
[0][0][2][0][RTW89_WW][0][15] = 22,
[0][0][2][0][RTW89_WW][1][15] = 22,
- [0][0][2][0][RTW89_WW][2][15] = 56,
[0][0][2][0][RTW89_WW][0][17] = 22,
[0][0][2][0][RTW89_WW][1][17] = 22,
- [0][0][2][0][RTW89_WW][2][17] = 56,
[0][0][2][0][RTW89_WW][0][19] = 22,
[0][0][2][0][RTW89_WW][1][19] = 22,
- [0][0][2][0][RTW89_WW][2][19] = 56,
[0][0][2][0][RTW89_WW][0][21] = 22,
[0][0][2][0][RTW89_WW][1][21] = 22,
- [0][0][2][0][RTW89_WW][2][21] = 56,
[0][0][2][0][RTW89_WW][0][23] = 22,
[0][0][2][0][RTW89_WW][1][23] = 22,
- [0][0][2][0][RTW89_WW][2][23] = 70,
[0][0][2][0][RTW89_WW][0][25] = 22,
[0][0][2][0][RTW89_WW][1][25] = 22,
- [0][0][2][0][RTW89_WW][2][25] = 70,
[0][0][2][0][RTW89_WW][0][27] = 22,
[0][0][2][0][RTW89_WW][1][27] = 22,
- [0][0][2][0][RTW89_WW][2][27] = 70,
[0][0][2][0][RTW89_WW][0][29] = 22,
[0][0][2][0][RTW89_WW][1][29] = 22,
- [0][0][2][0][RTW89_WW][2][29] = 70,
[0][0][2][0][RTW89_WW][0][30] = 22,
[0][0][2][0][RTW89_WW][1][30] = 22,
- [0][0][2][0][RTW89_WW][2][30] = 70,
[0][0][2][0][RTW89_WW][0][32] = 22,
[0][0][2][0][RTW89_WW][1][32] = 22,
- [0][0][2][0][RTW89_WW][2][32] = 70,
[0][0][2][0][RTW89_WW][0][34] = 22,
[0][0][2][0][RTW89_WW][1][34] = 22,
- [0][0][2][0][RTW89_WW][2][34] = 70,
[0][0][2][0][RTW89_WW][0][36] = 22,
[0][0][2][0][RTW89_WW][1][36] = 22,
- [0][0][2][0][RTW89_WW][2][36] = 70,
[0][0][2][0][RTW89_WW][0][38] = 22,
[0][0][2][0][RTW89_WW][1][38] = 22,
- [0][0][2][0][RTW89_WW][2][38] = 70,
[0][0][2][0][RTW89_WW][0][40] = 22,
[0][0][2][0][RTW89_WW][1][40] = 22,
- [0][0][2][0][RTW89_WW][2][40] = 70,
[0][0][2][0][RTW89_WW][0][42] = 22,
[0][0][2][0][RTW89_WW][1][42] = 22,
- [0][0][2][0][RTW89_WW][2][42] = 70,
[0][0][2][0][RTW89_WW][0][44] = 22,
[0][0][2][0][RTW89_WW][1][44] = 22,
- [0][0][2][0][RTW89_WW][2][44] = 70,
[0][0][2][0][RTW89_WW][0][45] = 22,
[0][0][2][0][RTW89_WW][1][45] = 22,
- [0][0][2][0][RTW89_WW][2][45] = 70,
[0][0][2][0][RTW89_WW][0][47] = 22,
[0][0][2][0][RTW89_WW][1][47] = 22,
- [0][0][2][0][RTW89_WW][2][47] = 70,
[0][0][2][0][RTW89_WW][0][49] = 24,
[0][0][2][0][RTW89_WW][1][49] = 24,
- [0][0][2][0][RTW89_WW][2][49] = 70,
[0][0][2][0][RTW89_WW][0][51] = 22,
[0][0][2][0][RTW89_WW][1][51] = 22,
- [0][0][2][0][RTW89_WW][2][51] = 70,
[0][0][2][0][RTW89_WW][0][53] = 22,
[0][0][2][0][RTW89_WW][1][53] = 22,
- [0][0][2][0][RTW89_WW][2][53] = 70,
[0][0][2][0][RTW89_WW][0][55] = 22,
[0][0][2][0][RTW89_WW][1][55] = 22,
- [0][0][2][0][RTW89_WW][2][55] = 68,
[0][0][2][0][RTW89_WW][0][57] = 22,
[0][0][2][0][RTW89_WW][1][57] = 22,
- [0][0][2][0][RTW89_WW][2][57] = 68,
[0][0][2][0][RTW89_WW][0][59] = 22,
[0][0][2][0][RTW89_WW][1][59] = 22,
- [0][0][2][0][RTW89_WW][2][59] = 68,
[0][0][2][0][RTW89_WW][0][60] = 22,
[0][0][2][0][RTW89_WW][1][60] = 22,
- [0][0][2][0][RTW89_WW][2][60] = 68,
[0][0][2][0][RTW89_WW][0][62] = 22,
[0][0][2][0][RTW89_WW][1][62] = 22,
- [0][0][2][0][RTW89_WW][2][62] = 68,
[0][0][2][0][RTW89_WW][0][64] = 22,
[0][0][2][0][RTW89_WW][1][64] = 22,
- [0][0][2][0][RTW89_WW][2][64] = 68,
[0][0][2][0][RTW89_WW][0][66] = 22,
[0][0][2][0][RTW89_WW][1][66] = 22,
- [0][0][2][0][RTW89_WW][2][66] = 68,
[0][0][2][0][RTW89_WW][0][68] = 22,
[0][0][2][0][RTW89_WW][1][68] = 22,
- [0][0][2][0][RTW89_WW][2][68] = 68,
[0][0][2][0][RTW89_WW][0][70] = 24,
[0][0][2][0][RTW89_WW][1][70] = 24,
- [0][0][2][0][RTW89_WW][2][70] = 68,
[0][0][2][0][RTW89_WW][0][72] = 22,
[0][0][2][0][RTW89_WW][1][72] = 22,
- [0][0][2][0][RTW89_WW][2][72] = 68,
[0][0][2][0][RTW89_WW][0][74] = 22,
[0][0][2][0][RTW89_WW][1][74] = 22,
- [0][0][2][0][RTW89_WW][2][74] = 68,
[0][0][2][0][RTW89_WW][0][75] = 22,
[0][0][2][0][RTW89_WW][1][75] = 22,
- [0][0][2][0][RTW89_WW][2][75] = 68,
[0][0][2][0][RTW89_WW][0][77] = 22,
[0][0][2][0][RTW89_WW][1][77] = 22,
- [0][0][2][0][RTW89_WW][2][77] = 68,
[0][0][2][0][RTW89_WW][0][79] = 22,
[0][0][2][0][RTW89_WW][1][79] = 22,
- [0][0][2][0][RTW89_WW][2][79] = 68,
[0][0][2][0][RTW89_WW][0][81] = 22,
[0][0][2][0][RTW89_WW][1][81] = 22,
- [0][0][2][0][RTW89_WW][2][81] = 68,
[0][0][2][0][RTW89_WW][0][83] = 22,
[0][0][2][0][RTW89_WW][1][83] = 22,
- [0][0][2][0][RTW89_WW][2][83] = 68,
[0][0][2][0][RTW89_WW][0][85] = 22,
[0][0][2][0][RTW89_WW][1][85] = 22,
- [0][0][2][0][RTW89_WW][2][85] = 68,
[0][0][2][0][RTW89_WW][0][87] = 22,
[0][0][2][0][RTW89_WW][1][87] = 22,
- [0][0][2][0][RTW89_WW][2][87] = 0,
[0][0][2][0][RTW89_WW][0][89] = 22,
[0][0][2][0][RTW89_WW][1][89] = 22,
- [0][0][2][0][RTW89_WW][2][89] = 0,
[0][0][2][0][RTW89_WW][0][90] = 22,
[0][0][2][0][RTW89_WW][1][90] = 22,
- [0][0][2][0][RTW89_WW][2][90] = 0,
[0][0][2][0][RTW89_WW][0][92] = 22,
[0][0][2][0][RTW89_WW][1][92] = 22,
- [0][0][2][0][RTW89_WW][2][92] = 0,
[0][0][2][0][RTW89_WW][0][94] = 22,
[0][0][2][0][RTW89_WW][1][94] = 22,
- [0][0][2][0][RTW89_WW][2][94] = 0,
[0][0][2][0][RTW89_WW][0][96] = 22,
[0][0][2][0][RTW89_WW][1][96] = 22,
- [0][0][2][0][RTW89_WW][2][96] = 0,
[0][0][2][0][RTW89_WW][0][98] = 22,
[0][0][2][0][RTW89_WW][1][98] = 22,
- [0][0][2][0][RTW89_WW][2][98] = 0,
[0][0][2][0][RTW89_WW][0][100] = 22,
[0][0][2][0][RTW89_WW][1][100] = 22,
- [0][0][2][0][RTW89_WW][2][100] = 0,
[0][0][2][0][RTW89_WW][0][102] = 22,
[0][0][2][0][RTW89_WW][1][102] = 22,
- [0][0][2][0][RTW89_WW][2][102] = 0,
[0][0][2][0][RTW89_WW][0][104] = 22,
[0][0][2][0][RTW89_WW][1][104] = 22,
- [0][0][2][0][RTW89_WW][2][104] = 0,
[0][0][2][0][RTW89_WW][0][105] = 22,
[0][0][2][0][RTW89_WW][1][105] = 22,
- [0][0][2][0][RTW89_WW][2][105] = 0,
[0][0][2][0][RTW89_WW][0][107] = 24,
[0][0][2][0][RTW89_WW][1][107] = 24,
- [0][0][2][0][RTW89_WW][2][107] = 0,
[0][0][2][0][RTW89_WW][0][109] = 24,
[0][0][2][0][RTW89_WW][1][109] = 24,
- [0][0][2][0][RTW89_WW][2][109] = 0,
[0][0][2][0][RTW89_WW][0][111] = 0,
[0][0][2][0][RTW89_WW][1][111] = 0,
- [0][0][2][0][RTW89_WW][2][111] = 0,
[0][0][2][0][RTW89_WW][0][113] = 0,
[0][0][2][0][RTW89_WW][1][113] = 0,
- [0][0][2][0][RTW89_WW][2][113] = 0,
[0][0][2][0][RTW89_WW][0][115] = 0,
[0][0][2][0][RTW89_WW][1][115] = 0,
- [0][0][2][0][RTW89_WW][2][115] = 0,
[0][0][2][0][RTW89_WW][0][117] = 0,
[0][0][2][0][RTW89_WW][1][117] = 0,
- [0][0][2][0][RTW89_WW][2][117] = 0,
[0][0][2][0][RTW89_WW][0][119] = 0,
[0][0][2][0][RTW89_WW][1][119] = 0,
- [0][0][2][0][RTW89_WW][2][119] = 0,
[0][1][2][0][RTW89_WW][0][0] = -2,
[0][1][2][0][RTW89_WW][1][0] = -2,
- [0][1][2][0][RTW89_WW][2][0] = 54,
[0][1][2][0][RTW89_WW][0][2] = -4,
[0][1][2][0][RTW89_WW][1][2] = -4,
- [0][1][2][0][RTW89_WW][2][2] = 54,
[0][1][2][0][RTW89_WW][0][4] = -4,
[0][1][2][0][RTW89_WW][1][4] = -4,
- [0][1][2][0][RTW89_WW][2][4] = 54,
[0][1][2][0][RTW89_WW][0][6] = -4,
[0][1][2][0][RTW89_WW][1][6] = -4,
- [0][1][2][0][RTW89_WW][2][6] = 54,
[0][1][2][0][RTW89_WW][0][8] = -4,
[0][1][2][0][RTW89_WW][1][8] = -4,
- [0][1][2][0][RTW89_WW][2][8] = 54,
[0][1][2][0][RTW89_WW][0][10] = -4,
[0][1][2][0][RTW89_WW][1][10] = -4,
- [0][1][2][0][RTW89_WW][2][10] = 54,
[0][1][2][0][RTW89_WW][0][12] = -4,
[0][1][2][0][RTW89_WW][1][12] = -4,
- [0][1][2][0][RTW89_WW][2][12] = 54,
[0][1][2][0][RTW89_WW][0][14] = -4,
[0][1][2][0][RTW89_WW][1][14] = -4,
- [0][1][2][0][RTW89_WW][2][14] = 54,
[0][1][2][0][RTW89_WW][0][15] = -4,
[0][1][2][0][RTW89_WW][1][15] = -4,
- [0][1][2][0][RTW89_WW][2][15] = 54,
[0][1][2][0][RTW89_WW][0][17] = -4,
[0][1][2][0][RTW89_WW][1][17] = -4,
- [0][1][2][0][RTW89_WW][2][17] = 54,
[0][1][2][0][RTW89_WW][0][19] = -4,
[0][1][2][0][RTW89_WW][1][19] = -4,
- [0][1][2][0][RTW89_WW][2][19] = 54,
[0][1][2][0][RTW89_WW][0][21] = -4,
[0][1][2][0][RTW89_WW][1][21] = -4,
- [0][1][2][0][RTW89_WW][2][21] = 54,
[0][1][2][0][RTW89_WW][0][23] = -4,
[0][1][2][0][RTW89_WW][1][23] = -4,
- [0][1][2][0][RTW89_WW][2][23] = 68,
[0][1][2][0][RTW89_WW][0][25] = -4,
[0][1][2][0][RTW89_WW][1][25] = -4,
- [0][1][2][0][RTW89_WW][2][25] = 68,
[0][1][2][0][RTW89_WW][0][27] = -4,
[0][1][2][0][RTW89_WW][1][27] = -4,
- [0][1][2][0][RTW89_WW][2][27] = 68,
[0][1][2][0][RTW89_WW][0][29] = -4,
[0][1][2][0][RTW89_WW][1][29] = -4,
- [0][1][2][0][RTW89_WW][2][29] = 68,
[0][1][2][0][RTW89_WW][0][30] = -4,
[0][1][2][0][RTW89_WW][1][30] = -4,
- [0][1][2][0][RTW89_WW][2][30] = 68,
[0][1][2][0][RTW89_WW][0][32] = -4,
[0][1][2][0][RTW89_WW][1][32] = -4,
- [0][1][2][0][RTW89_WW][2][32] = 68,
[0][1][2][0][RTW89_WW][0][34] = -4,
[0][1][2][0][RTW89_WW][1][34] = -4,
- [0][1][2][0][RTW89_WW][2][34] = 68,
[0][1][2][0][RTW89_WW][0][36] = -4,
[0][1][2][0][RTW89_WW][1][36] = -4,
- [0][1][2][0][RTW89_WW][2][36] = 68,
[0][1][2][0][RTW89_WW][0][38] = -4,
[0][1][2][0][RTW89_WW][1][38] = -4,
- [0][1][2][0][RTW89_WW][2][38] = 68,
[0][1][2][0][RTW89_WW][0][40] = -4,
[0][1][2][0][RTW89_WW][1][40] = -4,
- [0][1][2][0][RTW89_WW][2][40] = 68,
[0][1][2][0][RTW89_WW][0][42] = -4,
[0][1][2][0][RTW89_WW][1][42] = -4,
- [0][1][2][0][RTW89_WW][2][42] = 68,
[0][1][2][0][RTW89_WW][0][44] = -2,
[0][1][2][0][RTW89_WW][1][44] = -2,
- [0][1][2][0][RTW89_WW][2][44] = 68,
[0][1][2][0][RTW89_WW][0][45] = -2,
[0][1][2][0][RTW89_WW][1][45] = -2,
- [0][1][2][0][RTW89_WW][2][45] = 70,
[0][1][2][0][RTW89_WW][0][47] = -2,
[0][1][2][0][RTW89_WW][1][47] = -2,
- [0][1][2][0][RTW89_WW][2][47] = 68,
[0][1][2][0][RTW89_WW][0][49] = -2,
[0][1][2][0][RTW89_WW][1][49] = -2,
- [0][1][2][0][RTW89_WW][2][49] = 68,
[0][1][2][0][RTW89_WW][0][51] = -2,
[0][1][2][0][RTW89_WW][1][51] = -2,
- [0][1][2][0][RTW89_WW][2][51] = 68,
[0][1][2][0][RTW89_WW][0][53] = -2,
[0][1][2][0][RTW89_WW][1][53] = -2,
- [0][1][2][0][RTW89_WW][2][53] = 68,
[0][1][2][0][RTW89_WW][0][55] = -2,
[0][1][2][0][RTW89_WW][1][55] = -2,
- [0][1][2][0][RTW89_WW][2][55] = 68,
[0][1][2][0][RTW89_WW][0][57] = -2,
[0][1][2][0][RTW89_WW][1][57] = -2,
- [0][1][2][0][RTW89_WW][2][57] = 68,
[0][1][2][0][RTW89_WW][0][59] = -2,
[0][1][2][0][RTW89_WW][1][59] = -2,
- [0][1][2][0][RTW89_WW][2][59] = 68,
[0][1][2][0][RTW89_WW][0][60] = -2,
[0][1][2][0][RTW89_WW][1][60] = -2,
- [0][1][2][0][RTW89_WW][2][60] = 68,
[0][1][2][0][RTW89_WW][0][62] = -2,
[0][1][2][0][RTW89_WW][1][62] = -2,
- [0][1][2][0][RTW89_WW][2][62] = 68,
[0][1][2][0][RTW89_WW][0][64] = -2,
[0][1][2][0][RTW89_WW][1][64] = -2,
- [0][1][2][0][RTW89_WW][2][64] = 68,
[0][1][2][0][RTW89_WW][0][66] = -2,
[0][1][2][0][RTW89_WW][1][66] = -2,
- [0][1][2][0][RTW89_WW][2][66] = 68,
[0][1][2][0][RTW89_WW][0][68] = -2,
[0][1][2][0][RTW89_WW][1][68] = -2,
- [0][1][2][0][RTW89_WW][2][68] = 68,
[0][1][2][0][RTW89_WW][0][70] = -2,
[0][1][2][0][RTW89_WW][1][70] = -2,
- [0][1][2][0][RTW89_WW][2][70] = 68,
[0][1][2][0][RTW89_WW][0][72] = -2,
[0][1][2][0][RTW89_WW][1][72] = -2,
- [0][1][2][0][RTW89_WW][2][72] = 68,
[0][1][2][0][RTW89_WW][0][74] = -2,
[0][1][2][0][RTW89_WW][1][74] = -2,
- [0][1][2][0][RTW89_WW][2][74] = 68,
[0][1][2][0][RTW89_WW][0][75] = -2,
[0][1][2][0][RTW89_WW][1][75] = -2,
- [0][1][2][0][RTW89_WW][2][75] = 68,
[0][1][2][0][RTW89_WW][0][77] = -2,
[0][1][2][0][RTW89_WW][1][77] = -2,
- [0][1][2][0][RTW89_WW][2][77] = 68,
[0][1][2][0][RTW89_WW][0][79] = -2,
[0][1][2][0][RTW89_WW][1][79] = -2,
- [0][1][2][0][RTW89_WW][2][79] = 68,
[0][1][2][0][RTW89_WW][0][81] = -2,
[0][1][2][0][RTW89_WW][1][81] = -2,
- [0][1][2][0][RTW89_WW][2][81] = 68,
[0][1][2][0][RTW89_WW][0][83] = -2,
[0][1][2][0][RTW89_WW][1][83] = -2,
- [0][1][2][0][RTW89_WW][2][83] = 68,
[0][1][2][0][RTW89_WW][0][85] = -2,
[0][1][2][0][RTW89_WW][1][85] = -2,
- [0][1][2][0][RTW89_WW][2][85] = 68,
[0][1][2][0][RTW89_WW][0][87] = -2,
[0][1][2][0][RTW89_WW][1][87] = -2,
- [0][1][2][0][RTW89_WW][2][87] = 0,
[0][1][2][0][RTW89_WW][0][89] = -2,
[0][1][2][0][RTW89_WW][1][89] = -2,
- [0][1][2][0][RTW89_WW][2][89] = 0,
[0][1][2][0][RTW89_WW][0][90] = -2,
[0][1][2][0][RTW89_WW][1][90] = -2,
- [0][1][2][0][RTW89_WW][2][90] = 0,
[0][1][2][0][RTW89_WW][0][92] = -2,
[0][1][2][0][RTW89_WW][1][92] = -2,
- [0][1][2][0][RTW89_WW][2][92] = 0,
[0][1][2][0][RTW89_WW][0][94] = -2,
[0][1][2][0][RTW89_WW][1][94] = -2,
- [0][1][2][0][RTW89_WW][2][94] = 0,
[0][1][2][0][RTW89_WW][0][96] = -2,
[0][1][2][0][RTW89_WW][1][96] = -2,
- [0][1][2][0][RTW89_WW][2][96] = 0,
[0][1][2][0][RTW89_WW][0][98] = -2,
[0][1][2][0][RTW89_WW][1][98] = -2,
- [0][1][2][0][RTW89_WW][2][98] = 0,
[0][1][2][0][RTW89_WW][0][100] = -2,
[0][1][2][0][RTW89_WW][1][100] = -2,
- [0][1][2][0][RTW89_WW][2][100] = 0,
[0][1][2][0][RTW89_WW][0][102] = -2,
[0][1][2][0][RTW89_WW][1][102] = -2,
- [0][1][2][0][RTW89_WW][2][102] = 0,
[0][1][2][0][RTW89_WW][0][104] = -2,
[0][1][2][0][RTW89_WW][1][104] = -2,
- [0][1][2][0][RTW89_WW][2][104] = 0,
[0][1][2][0][RTW89_WW][0][105] = -2,
[0][1][2][0][RTW89_WW][1][105] = -2,
- [0][1][2][0][RTW89_WW][2][105] = 0,
[0][1][2][0][RTW89_WW][0][107] = 1,
[0][1][2][0][RTW89_WW][1][107] = 1,
- [0][1][2][0][RTW89_WW][2][107] = 0,
[0][1][2][0][RTW89_WW][0][109] = 1,
[0][1][2][0][RTW89_WW][1][109] = 1,
- [0][1][2][0][RTW89_WW][2][109] = 0,
[0][1][2][0][RTW89_WW][0][111] = 0,
[0][1][2][0][RTW89_WW][1][111] = 0,
- [0][1][2][0][RTW89_WW][2][111] = 0,
[0][1][2][0][RTW89_WW][0][113] = 0,
[0][1][2][0][RTW89_WW][1][113] = 0,
- [0][1][2][0][RTW89_WW][2][113] = 0,
[0][1][2][0][RTW89_WW][0][115] = 0,
[0][1][2][0][RTW89_WW][1][115] = 0,
- [0][1][2][0][RTW89_WW][2][115] = 0,
[0][1][2][0][RTW89_WW][0][117] = 0,
[0][1][2][0][RTW89_WW][1][117] = 0,
- [0][1][2][0][RTW89_WW][2][117] = 0,
[0][1][2][0][RTW89_WW][0][119] = 0,
[0][1][2][0][RTW89_WW][1][119] = 0,
- [0][1][2][0][RTW89_WW][2][119] = 0,
[0][1][2][1][RTW89_WW][0][0] = -2,
[0][1][2][1][RTW89_WW][1][0] = -2,
- [0][1][2][1][RTW89_WW][2][0] = 54,
[0][1][2][1][RTW89_WW][0][2] = -4,
[0][1][2][1][RTW89_WW][1][2] = -4,
- [0][1][2][1][RTW89_WW][2][2] = 54,
[0][1][2][1][RTW89_WW][0][4] = -4,
[0][1][2][1][RTW89_WW][1][4] = -4,
- [0][1][2][1][RTW89_WW][2][4] = 54,
[0][1][2][1][RTW89_WW][0][6] = -4,
[0][1][2][1][RTW89_WW][1][6] = -4,
- [0][1][2][1][RTW89_WW][2][6] = 54,
[0][1][2][1][RTW89_WW][0][8] = -4,
[0][1][2][1][RTW89_WW][1][8] = -4,
- [0][1][2][1][RTW89_WW][2][8] = 54,
[0][1][2][1][RTW89_WW][0][10] = -4,
[0][1][2][1][RTW89_WW][1][10] = -4,
- [0][1][2][1][RTW89_WW][2][10] = 54,
[0][1][2][1][RTW89_WW][0][12] = -4,
[0][1][2][1][RTW89_WW][1][12] = -4,
- [0][1][2][1][RTW89_WW][2][12] = 54,
[0][1][2][1][RTW89_WW][0][14] = -4,
[0][1][2][1][RTW89_WW][1][14] = -4,
- [0][1][2][1][RTW89_WW][2][14] = 54,
[0][1][2][1][RTW89_WW][0][15] = -4,
[0][1][2][1][RTW89_WW][1][15] = -4,
- [0][1][2][1][RTW89_WW][2][15] = 54,
[0][1][2][1][RTW89_WW][0][17] = -4,
[0][1][2][1][RTW89_WW][1][17] = -4,
- [0][1][2][1][RTW89_WW][2][17] = 54,
[0][1][2][1][RTW89_WW][0][19] = -4,
[0][1][2][1][RTW89_WW][1][19] = -4,
- [0][1][2][1][RTW89_WW][2][19] = 54,
[0][1][2][1][RTW89_WW][0][21] = -4,
[0][1][2][1][RTW89_WW][1][21] = -4,
- [0][1][2][1][RTW89_WW][2][21] = 54,
[0][1][2][1][RTW89_WW][0][23] = -4,
[0][1][2][1][RTW89_WW][1][23] = -4,
- [0][1][2][1][RTW89_WW][2][23] = 68,
[0][1][2][1][RTW89_WW][0][25] = -4,
[0][1][2][1][RTW89_WW][1][25] = -4,
- [0][1][2][1][RTW89_WW][2][25] = 68,
[0][1][2][1][RTW89_WW][0][27] = -4,
[0][1][2][1][RTW89_WW][1][27] = -4,
- [0][1][2][1][RTW89_WW][2][27] = 68,
[0][1][2][1][RTW89_WW][0][29] = -4,
[0][1][2][1][RTW89_WW][1][29] = -4,
- [0][1][2][1][RTW89_WW][2][29] = 68,
[0][1][2][1][RTW89_WW][0][30] = -4,
[0][1][2][1][RTW89_WW][1][30] = -4,
- [0][1][2][1][RTW89_WW][2][30] = 68,
[0][1][2][1][RTW89_WW][0][32] = -4,
[0][1][2][1][RTW89_WW][1][32] = -4,
- [0][1][2][1][RTW89_WW][2][32] = 68,
[0][1][2][1][RTW89_WW][0][34] = -4,
[0][1][2][1][RTW89_WW][1][34] = -4,
- [0][1][2][1][RTW89_WW][2][34] = 68,
[0][1][2][1][RTW89_WW][0][36] = -4,
[0][1][2][1][RTW89_WW][1][36] = -4,
- [0][1][2][1][RTW89_WW][2][36] = 68,
[0][1][2][1][RTW89_WW][0][38] = -4,
[0][1][2][1][RTW89_WW][1][38] = -4,
- [0][1][2][1][RTW89_WW][2][38] = 68,
[0][1][2][1][RTW89_WW][0][40] = -4,
[0][1][2][1][RTW89_WW][1][40] = -4,
- [0][1][2][1][RTW89_WW][2][40] = 68,
[0][1][2][1][RTW89_WW][0][42] = -4,
[0][1][2][1][RTW89_WW][1][42] = -4,
- [0][1][2][1][RTW89_WW][2][42] = 68,
[0][1][2][1][RTW89_WW][0][44] = -2,
[0][1][2][1][RTW89_WW][1][44] = -2,
- [0][1][2][1][RTW89_WW][2][44] = 68,
[0][1][2][1][RTW89_WW][0][45] = -2,
[0][1][2][1][RTW89_WW][1][45] = -2,
- [0][1][2][1][RTW89_WW][2][45] = 70,
[0][1][2][1][RTW89_WW][0][47] = -2,
[0][1][2][1][RTW89_WW][1][47] = -2,
- [0][1][2][1][RTW89_WW][2][47] = 68,
[0][1][2][1][RTW89_WW][0][49] = -2,
[0][1][2][1][RTW89_WW][1][49] = -2,
- [0][1][2][1][RTW89_WW][2][49] = 68,
[0][1][2][1][RTW89_WW][0][51] = -2,
[0][1][2][1][RTW89_WW][1][51] = -2,
- [0][1][2][1][RTW89_WW][2][51] = 68,
[0][1][2][1][RTW89_WW][0][53] = -2,
[0][1][2][1][RTW89_WW][1][53] = -2,
- [0][1][2][1][RTW89_WW][2][53] = 68,
[0][1][2][1][RTW89_WW][0][55] = -2,
[0][1][2][1][RTW89_WW][1][55] = -2,
- [0][1][2][1][RTW89_WW][2][55] = 68,
[0][1][2][1][RTW89_WW][0][57] = -2,
[0][1][2][1][RTW89_WW][1][57] = -2,
- [0][1][2][1][RTW89_WW][2][57] = 68,
[0][1][2][1][RTW89_WW][0][59] = -2,
[0][1][2][1][RTW89_WW][1][59] = -2,
- [0][1][2][1][RTW89_WW][2][59] = 68,
[0][1][2][1][RTW89_WW][0][60] = -2,
[0][1][2][1][RTW89_WW][1][60] = -2,
- [0][1][2][1][RTW89_WW][2][60] = 68,
[0][1][2][1][RTW89_WW][0][62] = -2,
[0][1][2][1][RTW89_WW][1][62] = -2,
- [0][1][2][1][RTW89_WW][2][62] = 68,
[0][1][2][1][RTW89_WW][0][64] = -2,
[0][1][2][1][RTW89_WW][1][64] = -2,
- [0][1][2][1][RTW89_WW][2][64] = 68,
[0][1][2][1][RTW89_WW][0][66] = -2,
[0][1][2][1][RTW89_WW][1][66] = -2,
- [0][1][2][1][RTW89_WW][2][66] = 68,
[0][1][2][1][RTW89_WW][0][68] = -2,
[0][1][2][1][RTW89_WW][1][68] = -2,
- [0][1][2][1][RTW89_WW][2][68] = 68,
[0][1][2][1][RTW89_WW][0][70] = -2,
[0][1][2][1][RTW89_WW][1][70] = -2,
- [0][1][2][1][RTW89_WW][2][70] = 68,
[0][1][2][1][RTW89_WW][0][72] = -2,
[0][1][2][1][RTW89_WW][1][72] = -2,
- [0][1][2][1][RTW89_WW][2][72] = 68,
[0][1][2][1][RTW89_WW][0][74] = -2,
[0][1][2][1][RTW89_WW][1][74] = -2,
- [0][1][2][1][RTW89_WW][2][74] = 68,
[0][1][2][1][RTW89_WW][0][75] = -2,
[0][1][2][1][RTW89_WW][1][75] = -2,
- [0][1][2][1][RTW89_WW][2][75] = 68,
[0][1][2][1][RTW89_WW][0][77] = -2,
[0][1][2][1][RTW89_WW][1][77] = -2,
- [0][1][2][1][RTW89_WW][2][77] = 68,
[0][1][2][1][RTW89_WW][0][79] = -2,
[0][1][2][1][RTW89_WW][1][79] = -2,
- [0][1][2][1][RTW89_WW][2][79] = 68,
[0][1][2][1][RTW89_WW][0][81] = -2,
[0][1][2][1][RTW89_WW][1][81] = -2,
- [0][1][2][1][RTW89_WW][2][81] = 68,
[0][1][2][1][RTW89_WW][0][83] = -2,
[0][1][2][1][RTW89_WW][1][83] = -2,
- [0][1][2][1][RTW89_WW][2][83] = 68,
[0][1][2][1][RTW89_WW][0][85] = -2,
[0][1][2][1][RTW89_WW][1][85] = -2,
- [0][1][2][1][RTW89_WW][2][85] = 68,
[0][1][2][1][RTW89_WW][0][87] = -2,
[0][1][2][1][RTW89_WW][1][87] = -2,
- [0][1][2][1][RTW89_WW][2][87] = 0,
[0][1][2][1][RTW89_WW][0][89] = -2,
[0][1][2][1][RTW89_WW][1][89] = -2,
- [0][1][2][1][RTW89_WW][2][89] = 0,
[0][1][2][1][RTW89_WW][0][90] = -2,
[0][1][2][1][RTW89_WW][1][90] = -2,
- [0][1][2][1][RTW89_WW][2][90] = 0,
[0][1][2][1][RTW89_WW][0][92] = -2,
[0][1][2][1][RTW89_WW][1][92] = -2,
- [0][1][2][1][RTW89_WW][2][92] = 0,
[0][1][2][1][RTW89_WW][0][94] = -2,
[0][1][2][1][RTW89_WW][1][94] = -2,
- [0][1][2][1][RTW89_WW][2][94] = 0,
[0][1][2][1][RTW89_WW][0][96] = -2,
[0][1][2][1][RTW89_WW][1][96] = -2,
- [0][1][2][1][RTW89_WW][2][96] = 0,
[0][1][2][1][RTW89_WW][0][98] = -2,
[0][1][2][1][RTW89_WW][1][98] = -2,
- [0][1][2][1][RTW89_WW][2][98] = 0,
[0][1][2][1][RTW89_WW][0][100] = -2,
[0][1][2][1][RTW89_WW][1][100] = -2,
- [0][1][2][1][RTW89_WW][2][100] = 0,
[0][1][2][1][RTW89_WW][0][102] = -2,
[0][1][2][1][RTW89_WW][1][102] = -2,
- [0][1][2][1][RTW89_WW][2][102] = 0,
[0][1][2][1][RTW89_WW][0][104] = -2,
[0][1][2][1][RTW89_WW][1][104] = -2,
- [0][1][2][1][RTW89_WW][2][104] = 0,
[0][1][2][1][RTW89_WW][0][105] = -2,
[0][1][2][1][RTW89_WW][1][105] = -2,
- [0][1][2][1][RTW89_WW][2][105] = 0,
[0][1][2][1][RTW89_WW][0][107] = 1,
[0][1][2][1][RTW89_WW][1][107] = 1,
- [0][1][2][1][RTW89_WW][2][107] = 0,
[0][1][2][1][RTW89_WW][0][109] = 1,
[0][1][2][1][RTW89_WW][1][109] = 1,
- [0][1][2][1][RTW89_WW][2][109] = 0,
[0][1][2][1][RTW89_WW][0][111] = 0,
[0][1][2][1][RTW89_WW][1][111] = 0,
- [0][1][2][1][RTW89_WW][2][111] = 0,
[0][1][2][1][RTW89_WW][0][113] = 0,
[0][1][2][1][RTW89_WW][1][113] = 0,
- [0][1][2][1][RTW89_WW][2][113] = 0,
[0][1][2][1][RTW89_WW][0][115] = 0,
[0][1][2][1][RTW89_WW][1][115] = 0,
- [0][1][2][1][RTW89_WW][2][115] = 0,
[0][1][2][1][RTW89_WW][0][117] = 0,
[0][1][2][1][RTW89_WW][1][117] = 0,
- [0][1][2][1][RTW89_WW][2][117] = 0,
[0][1][2][1][RTW89_WW][0][119] = 0,
[0][1][2][1][RTW89_WW][1][119] = 0,
- [0][1][2][1][RTW89_WW][2][119] = 0,
[1][0][2][0][RTW89_WW][0][1] = 24,
[1][0][2][0][RTW89_WW][1][1] = 34,
- [1][0][2][0][RTW89_WW][2][1] = 70,
[1][0][2][0][RTW89_WW][0][5] = 24,
[1][0][2][0][RTW89_WW][1][5] = 34,
- [1][0][2][0][RTW89_WW][2][5] = 70,
[1][0][2][0][RTW89_WW][0][9] = 24,
[1][0][2][0][RTW89_WW][1][9] = 34,
- [1][0][2][0][RTW89_WW][2][9] = 70,
[1][0][2][0][RTW89_WW][0][13] = 24,
[1][0][2][0][RTW89_WW][1][13] = 34,
- [1][0][2][0][RTW89_WW][2][13] = 70,
[1][0][2][0][RTW89_WW][0][16] = 24,
[1][0][2][0][RTW89_WW][1][16] = 34,
- [1][0][2][0][RTW89_WW][2][16] = 70,
[1][0][2][0][RTW89_WW][0][20] = 24,
[1][0][2][0][RTW89_WW][1][20] = 34,
- [1][0][2][0][RTW89_WW][2][20] = 70,
[1][0][2][0][RTW89_WW][0][24] = 26,
[1][0][2][0][RTW89_WW][1][24] = 36,
- [1][0][2][0][RTW89_WW][2][24] = 70,
[1][0][2][0][RTW89_WW][0][28] = 26,
[1][0][2][0][RTW89_WW][1][28] = 34,
- [1][0][2][0][RTW89_WW][2][28] = 70,
[1][0][2][0][RTW89_WW][0][31] = 26,
[1][0][2][0][RTW89_WW][1][31] = 34,
- [1][0][2][0][RTW89_WW][2][31] = 70,
[1][0][2][0][RTW89_WW][0][35] = 26,
[1][0][2][0][RTW89_WW][1][35] = 34,
- [1][0][2][0][RTW89_WW][2][35] = 70,
[1][0][2][0][RTW89_WW][0][39] = 26,
[1][0][2][0][RTW89_WW][1][39] = 34,
- [1][0][2][0][RTW89_WW][2][39] = 70,
[1][0][2][0][RTW89_WW][0][43] = 26,
[1][0][2][0][RTW89_WW][1][43] = 34,
- [1][0][2][0][RTW89_WW][2][43] = 70,
[1][0][2][0][RTW89_WW][0][46] = 34,
[1][0][2][0][RTW89_WW][1][46] = 34,
- [1][0][2][0][RTW89_WW][2][46] = 68,
[1][0][2][0][RTW89_WW][0][50] = 34,
[1][0][2][0][RTW89_WW][1][50] = 34,
- [1][0][2][0][RTW89_WW][2][50] = 68,
[1][0][2][0][RTW89_WW][0][54] = 36,
[1][0][2][0][RTW89_WW][1][54] = 36,
- [1][0][2][0][RTW89_WW][2][54] = 0,
[1][0][2][0][RTW89_WW][0][58] = 36,
[1][0][2][0][RTW89_WW][1][58] = 36,
- [1][0][2][0][RTW89_WW][2][58] = 66,
[1][0][2][0][RTW89_WW][0][61] = 34,
[1][0][2][0][RTW89_WW][1][61] = 34,
- [1][0][2][0][RTW89_WW][2][61] = 66,
[1][0][2][0][RTW89_WW][0][65] = 34,
[1][0][2][0][RTW89_WW][1][65] = 34,
- [1][0][2][0][RTW89_WW][2][65] = 66,
[1][0][2][0][RTW89_WW][0][69] = 34,
[1][0][2][0][RTW89_WW][1][69] = 34,
- [1][0][2][0][RTW89_WW][2][69] = 66,
[1][0][2][0][RTW89_WW][0][73] = 34,
[1][0][2][0][RTW89_WW][1][73] = 34,
- [1][0][2][0][RTW89_WW][2][73] = 66,
[1][0][2][0][RTW89_WW][0][76] = 34,
[1][0][2][0][RTW89_WW][1][76] = 34,
- [1][0][2][0][RTW89_WW][2][76] = 66,
[1][0][2][0][RTW89_WW][0][80] = 34,
[1][0][2][0][RTW89_WW][1][80] = 34,
- [1][0][2][0][RTW89_WW][2][80] = 66,
[1][0][2][0][RTW89_WW][0][84] = 34,
[1][0][2][0][RTW89_WW][1][84] = 34,
- [1][0][2][0][RTW89_WW][2][84] = 66,
[1][0][2][0][RTW89_WW][0][88] = 34,
[1][0][2][0][RTW89_WW][1][88] = 34,
- [1][0][2][0][RTW89_WW][2][88] = 0,
[1][0][2][0][RTW89_WW][0][91] = 36,
[1][0][2][0][RTW89_WW][1][91] = 36,
- [1][0][2][0][RTW89_WW][2][91] = 0,
[1][0][2][0][RTW89_WW][0][95] = 34,
[1][0][2][0][RTW89_WW][1][95] = 34,
- [1][0][2][0][RTW89_WW][2][95] = 0,
[1][0][2][0][RTW89_WW][0][99] = 34,
[1][0][2][0][RTW89_WW][1][99] = 34,
- [1][0][2][0][RTW89_WW][2][99] = 0,
[1][0][2][0][RTW89_WW][0][103] = 34,
[1][0][2][0][RTW89_WW][1][103] = 34,
- [1][0][2][0][RTW89_WW][2][103] = 0,
[1][0][2][0][RTW89_WW][0][106] = 36,
[1][0][2][0][RTW89_WW][1][106] = 36,
- [1][0][2][0][RTW89_WW][2][106] = 0,
[1][0][2][0][RTW89_WW][0][110] = 0,
[1][0][2][0][RTW89_WW][1][110] = 0,
- [1][0][2][0][RTW89_WW][2][110] = 0,
[1][0][2][0][RTW89_WW][0][114] = 0,
[1][0][2][0][RTW89_WW][1][114] = 0,
- [1][0][2][0][RTW89_WW][2][114] = 0,
[1][0][2][0][RTW89_WW][0][118] = 0,
[1][0][2][0][RTW89_WW][1][118] = 0,
- [1][0][2][0][RTW89_WW][2][118] = 0,
[1][1][2][0][RTW89_WW][0][1] = 10,
[1][1][2][0][RTW89_WW][1][1] = 10,
- [1][1][2][0][RTW89_WW][2][1] = 58,
[1][1][2][0][RTW89_WW][0][5] = 10,
[1][1][2][0][RTW89_WW][1][5] = 10,
- [1][1][2][0][RTW89_WW][2][5] = 58,
[1][1][2][0][RTW89_WW][0][9] = 10,
[1][1][2][0][RTW89_WW][1][9] = 10,
- [1][1][2][0][RTW89_WW][2][9] = 58,
[1][1][2][0][RTW89_WW][0][13] = 10,
[1][1][2][0][RTW89_WW][1][13] = 10,
- [1][1][2][0][RTW89_WW][2][13] = 58,
[1][1][2][0][RTW89_WW][0][16] = 10,
[1][1][2][0][RTW89_WW][1][16] = 10,
- [1][1][2][0][RTW89_WW][2][16] = 58,
[1][1][2][0][RTW89_WW][0][20] = 10,
[1][1][2][0][RTW89_WW][1][20] = 10,
- [1][1][2][0][RTW89_WW][2][20] = 58,
[1][1][2][0][RTW89_WW][0][24] = 10,
[1][1][2][0][RTW89_WW][1][24] = 10,
- [1][1][2][0][RTW89_WW][2][24] = 70,
[1][1][2][0][RTW89_WW][0][28] = 10,
[1][1][2][0][RTW89_WW][1][28] = 10,
- [1][1][2][0][RTW89_WW][2][28] = 70,
[1][1][2][0][RTW89_WW][0][31] = 10,
[1][1][2][0][RTW89_WW][1][31] = 10,
- [1][1][2][0][RTW89_WW][2][31] = 70,
[1][1][2][0][RTW89_WW][0][35] = 10,
[1][1][2][0][RTW89_WW][1][35] = 10,
- [1][1][2][0][RTW89_WW][2][35] = 70,
[1][1][2][0][RTW89_WW][0][39] = 10,
[1][1][2][0][RTW89_WW][1][39] = 10,
- [1][1][2][0][RTW89_WW][2][39] = 70,
[1][1][2][0][RTW89_WW][0][43] = 10,
[1][1][2][0][RTW89_WW][1][43] = 10,
- [1][1][2][0][RTW89_WW][2][43] = 70,
[1][1][2][0][RTW89_WW][0][46] = 12,
[1][1][2][0][RTW89_WW][1][46] = 12,
- [1][1][2][0][RTW89_WW][2][46] = 68,
[1][1][2][0][RTW89_WW][0][50] = 12,
[1][1][2][0][RTW89_WW][1][50] = 12,
- [1][1][2][0][RTW89_WW][2][50] = 68,
[1][1][2][0][RTW89_WW][0][54] = 10,
[1][1][2][0][RTW89_WW][1][54] = 10,
- [1][1][2][0][RTW89_WW][2][54] = 0,
[1][1][2][0][RTW89_WW][0][58] = 10,
[1][1][2][0][RTW89_WW][1][58] = 10,
- [1][1][2][0][RTW89_WW][2][58] = 66,
[1][1][2][0][RTW89_WW][0][61] = 10,
[1][1][2][0][RTW89_WW][1][61] = 10,
- [1][1][2][0][RTW89_WW][2][61] = 66,
[1][1][2][0][RTW89_WW][0][65] = 10,
[1][1][2][0][RTW89_WW][1][65] = 10,
- [1][1][2][0][RTW89_WW][2][65] = 66,
[1][1][2][0][RTW89_WW][0][69] = 10,
[1][1][2][0][RTW89_WW][1][69] = 10,
- [1][1][2][0][RTW89_WW][2][69] = 66,
[1][1][2][0][RTW89_WW][0][73] = 10,
[1][1][2][0][RTW89_WW][1][73] = 10,
- [1][1][2][0][RTW89_WW][2][73] = 66,
[1][1][2][0][RTW89_WW][0][76] = 10,
[1][1][2][0][RTW89_WW][1][76] = 10,
- [1][1][2][0][RTW89_WW][2][76] = 66,
[1][1][2][0][RTW89_WW][0][80] = 10,
[1][1][2][0][RTW89_WW][1][80] = 10,
- [1][1][2][0][RTW89_WW][2][80] = 66,
[1][1][2][0][RTW89_WW][0][84] = 10,
[1][1][2][0][RTW89_WW][1][84] = 10,
- [1][1][2][0][RTW89_WW][2][84] = 66,
[1][1][2][0][RTW89_WW][0][88] = 10,
[1][1][2][0][RTW89_WW][1][88] = 10,
- [1][1][2][0][RTW89_WW][2][88] = 0,
[1][1][2][0][RTW89_WW][0][91] = 12,
[1][1][2][0][RTW89_WW][1][91] = 12,
- [1][1][2][0][RTW89_WW][2][91] = 0,
[1][1][2][0][RTW89_WW][0][95] = 10,
[1][1][2][0][RTW89_WW][1][95] = 10,
- [1][1][2][0][RTW89_WW][2][95] = 0,
[1][1][2][0][RTW89_WW][0][99] = 10,
[1][1][2][0][RTW89_WW][1][99] = 10,
- [1][1][2][0][RTW89_WW][2][99] = 0,
[1][1][2][0][RTW89_WW][0][103] = 10,
[1][1][2][0][RTW89_WW][1][103] = 10,
- [1][1][2][0][RTW89_WW][2][103] = 0,
[1][1][2][0][RTW89_WW][0][106] = 12,
[1][1][2][0][RTW89_WW][1][106] = 12,
- [1][1][2][0][RTW89_WW][2][106] = 0,
[1][1][2][0][RTW89_WW][0][110] = 0,
[1][1][2][0][RTW89_WW][1][110] = 0,
- [1][1][2][0][RTW89_WW][2][110] = 0,
[1][1][2][0][RTW89_WW][0][114] = 0,
[1][1][2][0][RTW89_WW][1][114] = 0,
- [1][1][2][0][RTW89_WW][2][114] = 0,
[1][1][2][0][RTW89_WW][0][118] = 0,
[1][1][2][0][RTW89_WW][1][118] = 0,
- [1][1][2][0][RTW89_WW][2][118] = 0,
[1][1][2][1][RTW89_WW][0][1] = 6,
[1][1][2][1][RTW89_WW][1][1] = 10,
- [1][1][2][1][RTW89_WW][2][1] = 58,
[1][1][2][1][RTW89_WW][0][5] = 6,
[1][1][2][1][RTW89_WW][1][5] = 10,
- [1][1][2][1][RTW89_WW][2][5] = 58,
[1][1][2][1][RTW89_WW][0][9] = 6,
[1][1][2][1][RTW89_WW][1][9] = 10,
- [1][1][2][1][RTW89_WW][2][9] = 58,
[1][1][2][1][RTW89_WW][0][13] = 6,
[1][1][2][1][RTW89_WW][1][13] = 10,
- [1][1][2][1][RTW89_WW][2][13] = 58,
[1][1][2][1][RTW89_WW][0][16] = 6,
[1][1][2][1][RTW89_WW][1][16] = 10,
- [1][1][2][1][RTW89_WW][2][16] = 58,
[1][1][2][1][RTW89_WW][0][20] = 6,
[1][1][2][1][RTW89_WW][1][20] = 10,
- [1][1][2][1][RTW89_WW][2][20] = 58,
[1][1][2][1][RTW89_WW][0][24] = 6,
[1][1][2][1][RTW89_WW][1][24] = 10,
- [1][1][2][1][RTW89_WW][2][24] = 70,
[1][1][2][1][RTW89_WW][0][28] = 6,
[1][1][2][1][RTW89_WW][1][28] = 10,
- [1][1][2][1][RTW89_WW][2][28] = 70,
[1][1][2][1][RTW89_WW][0][31] = 6,
[1][1][2][1][RTW89_WW][1][31] = 10,
- [1][1][2][1][RTW89_WW][2][31] = 70,
[1][1][2][1][RTW89_WW][0][35] = 6,
[1][1][2][1][RTW89_WW][1][35] = 10,
- [1][1][2][1][RTW89_WW][2][35] = 70,
[1][1][2][1][RTW89_WW][0][39] = 6,
[1][1][2][1][RTW89_WW][1][39] = 10,
- [1][1][2][1][RTW89_WW][2][39] = 70,
[1][1][2][1][RTW89_WW][0][43] = 6,
[1][1][2][1][RTW89_WW][1][43] = 10,
- [1][1][2][1][RTW89_WW][2][43] = 70,
[1][1][2][1][RTW89_WW][0][46] = 12,
[1][1][2][1][RTW89_WW][1][46] = 12,
- [1][1][2][1][RTW89_WW][2][46] = 68,
[1][1][2][1][RTW89_WW][0][50] = 12,
[1][1][2][1][RTW89_WW][1][50] = 12,
- [1][1][2][1][RTW89_WW][2][50] = 68,
[1][1][2][1][RTW89_WW][0][54] = 10,
[1][1][2][1][RTW89_WW][1][54] = 10,
- [1][1][2][1][RTW89_WW][2][54] = 0,
[1][1][2][1][RTW89_WW][0][58] = 10,
[1][1][2][1][RTW89_WW][1][58] = 10,
- [1][1][2][1][RTW89_WW][2][58] = 66,
[1][1][2][1][RTW89_WW][0][61] = 10,
[1][1][2][1][RTW89_WW][1][61] = 10,
- [1][1][2][1][RTW89_WW][2][61] = 66,
[1][1][2][1][RTW89_WW][0][65] = 10,
[1][1][2][1][RTW89_WW][1][65] = 10,
- [1][1][2][1][RTW89_WW][2][65] = 66,
[1][1][2][1][RTW89_WW][0][69] = 10,
[1][1][2][1][RTW89_WW][1][69] = 10,
- [1][1][2][1][RTW89_WW][2][69] = 66,
[1][1][2][1][RTW89_WW][0][73] = 10,
[1][1][2][1][RTW89_WW][1][73] = 10,
- [1][1][2][1][RTW89_WW][2][73] = 66,
[1][1][2][1][RTW89_WW][0][76] = 10,
[1][1][2][1][RTW89_WW][1][76] = 10,
- [1][1][2][1][RTW89_WW][2][76] = 66,
[1][1][2][1][RTW89_WW][0][80] = 10,
[1][1][2][1][RTW89_WW][1][80] = 10,
- [1][1][2][1][RTW89_WW][2][80] = 66,
[1][1][2][1][RTW89_WW][0][84] = 10,
[1][1][2][1][RTW89_WW][1][84] = 10,
- [1][1][2][1][RTW89_WW][2][84] = 66,
[1][1][2][1][RTW89_WW][0][88] = 10,
[1][1][2][1][RTW89_WW][1][88] = 10,
- [1][1][2][1][RTW89_WW][2][88] = 0,
[1][1][2][1][RTW89_WW][0][91] = 12,
[1][1][2][1][RTW89_WW][1][91] = 12,
- [1][1][2][1][RTW89_WW][2][91] = 0,
[1][1][2][1][RTW89_WW][0][95] = 10,
[1][1][2][1][RTW89_WW][1][95] = 10,
- [1][1][2][1][RTW89_WW][2][95] = 0,
[1][1][2][1][RTW89_WW][0][99] = 10,
[1][1][2][1][RTW89_WW][1][99] = 10,
- [1][1][2][1][RTW89_WW][2][99] = 0,
[1][1][2][1][RTW89_WW][0][103] = 10,
[1][1][2][1][RTW89_WW][1][103] = 10,
- [1][1][2][1][RTW89_WW][2][103] = 0,
[1][1][2][1][RTW89_WW][0][106] = 12,
[1][1][2][1][RTW89_WW][1][106] = 12,
- [1][1][2][1][RTW89_WW][2][106] = 0,
[1][1][2][1][RTW89_WW][0][110] = 0,
[1][1][2][1][RTW89_WW][1][110] = 0,
- [1][1][2][1][RTW89_WW][2][110] = 0,
[1][1][2][1][RTW89_WW][0][114] = 0,
[1][1][2][1][RTW89_WW][1][114] = 0,
- [1][1][2][1][RTW89_WW][2][114] = 0,
[1][1][2][1][RTW89_WW][0][118] = 0,
[1][1][2][1][RTW89_WW][1][118] = 0,
- [1][1][2][1][RTW89_WW][2][118] = 0,
[2][0][2][0][RTW89_WW][0][3] = 24,
[2][0][2][0][RTW89_WW][1][3] = 46,
- [2][0][2][0][RTW89_WW][2][3] = 60,
[2][0][2][0][RTW89_WW][0][11] = 24,
[2][0][2][0][RTW89_WW][1][11] = 46,
- [2][0][2][0][RTW89_WW][2][11] = 60,
[2][0][2][0][RTW89_WW][0][18] = 24,
[2][0][2][0][RTW89_WW][1][18] = 46,
- [2][0][2][0][RTW89_WW][2][18] = 60,
[2][0][2][0][RTW89_WW][0][26] = 24,
[2][0][2][0][RTW89_WW][1][26] = 46,
- [2][0][2][0][RTW89_WW][2][26] = 60,
[2][0][2][0][RTW89_WW][0][33] = 24,
[2][0][2][0][RTW89_WW][1][33] = 46,
- [2][0][2][0][RTW89_WW][2][33] = 60,
[2][0][2][0][RTW89_WW][0][41] = 24,
[2][0][2][0][RTW89_WW][1][41] = 46,
- [2][0][2][0][RTW89_WW][2][41] = 60,
[2][0][2][0][RTW89_WW][0][48] = 46,
[2][0][2][0][RTW89_WW][1][48] = 46,
- [2][0][2][0][RTW89_WW][2][48] = 60,
[2][0][2][0][RTW89_WW][0][56] = 46,
[2][0][2][0][RTW89_WW][1][56] = 46,
- [2][0][2][0][RTW89_WW][2][56] = 58,
[2][0][2][0][RTW89_WW][0][63] = 46,
[2][0][2][0][RTW89_WW][1][63] = 46,
- [2][0][2][0][RTW89_WW][2][63] = 58,
[2][0][2][0][RTW89_WW][0][71] = 46,
[2][0][2][0][RTW89_WW][1][71] = 46,
- [2][0][2][0][RTW89_WW][2][71] = 58,
[2][0][2][0][RTW89_WW][0][78] = 46,
[2][0][2][0][RTW89_WW][1][78] = 46,
- [2][0][2][0][RTW89_WW][2][78] = 58,
[2][0][2][0][RTW89_WW][0][86] = 46,
[2][0][2][0][RTW89_WW][1][86] = 46,
- [2][0][2][0][RTW89_WW][2][86] = 0,
[2][0][2][0][RTW89_WW][0][93] = 46,
[2][0][2][0][RTW89_WW][1][93] = 46,
- [2][0][2][0][RTW89_WW][2][93] = 0,
[2][0][2][0][RTW89_WW][0][101] = 44,
[2][0][2][0][RTW89_WW][1][101] = 44,
- [2][0][2][0][RTW89_WW][2][101] = 0,
[2][0][2][0][RTW89_WW][0][108] = 0,
[2][0][2][0][RTW89_WW][1][108] = 0,
- [2][0][2][0][RTW89_WW][2][108] = 0,
[2][0][2][0][RTW89_WW][0][116] = 0,
[2][0][2][0][RTW89_WW][1][116] = 0,
- [2][0][2][0][RTW89_WW][2][116] = 0,
[2][1][2][0][RTW89_WW][0][3] = 12,
[2][1][2][0][RTW89_WW][1][3] = 22,
- [2][1][2][0][RTW89_WW][2][3] = 50,
[2][1][2][0][RTW89_WW][0][11] = 12,
[2][1][2][0][RTW89_WW][1][11] = 20,
- [2][1][2][0][RTW89_WW][2][11] = 50,
[2][1][2][0][RTW89_WW][0][18] = 12,
[2][1][2][0][RTW89_WW][1][18] = 20,
- [2][1][2][0][RTW89_WW][2][18] = 50,
[2][1][2][0][RTW89_WW][0][26] = 12,
[2][1][2][0][RTW89_WW][1][26] = 20,
- [2][1][2][0][RTW89_WW][2][26] = 60,
[2][1][2][0][RTW89_WW][0][33] = 12,
[2][1][2][0][RTW89_WW][1][33] = 20,
- [2][1][2][0][RTW89_WW][2][33] = 60,
[2][1][2][0][RTW89_WW][0][41] = 12,
[2][1][2][0][RTW89_WW][1][41] = 22,
- [2][1][2][0][RTW89_WW][2][41] = 60,
[2][1][2][0][RTW89_WW][0][48] = 22,
[2][1][2][0][RTW89_WW][1][48] = 22,
- [2][1][2][0][RTW89_WW][2][48] = 60,
[2][1][2][0][RTW89_WW][0][56] = 20,
[2][1][2][0][RTW89_WW][1][56] = 20,
- [2][1][2][0][RTW89_WW][2][56] = 56,
[2][1][2][0][RTW89_WW][0][63] = 22,
[2][1][2][0][RTW89_WW][1][63] = 22,
- [2][1][2][0][RTW89_WW][2][63] = 58,
[2][1][2][0][RTW89_WW][0][71] = 20,
[2][1][2][0][RTW89_WW][1][71] = 20,
- [2][1][2][0][RTW89_WW][2][71] = 58,
[2][1][2][0][RTW89_WW][0][78] = 20,
[2][1][2][0][RTW89_WW][1][78] = 20,
- [2][1][2][0][RTW89_WW][2][78] = 58,
[2][1][2][0][RTW89_WW][0][86] = 20,
[2][1][2][0][RTW89_WW][1][86] = 20,
- [2][1][2][0][RTW89_WW][2][86] = 0,
[2][1][2][0][RTW89_WW][0][93] = 22,
[2][1][2][0][RTW89_WW][1][93] = 22,
- [2][1][2][0][RTW89_WW][2][93] = 0,
[2][1][2][0][RTW89_WW][0][101] = 22,
[2][1][2][0][RTW89_WW][1][101] = 22,
- [2][1][2][0][RTW89_WW][2][101] = 0,
[2][1][2][0][RTW89_WW][0][108] = 0,
[2][1][2][0][RTW89_WW][1][108] = 0,
- [2][1][2][0][RTW89_WW][2][108] = 0,
[2][1][2][0][RTW89_WW][0][116] = 0,
[2][1][2][0][RTW89_WW][1][116] = 0,
- [2][1][2][0][RTW89_WW][2][116] = 0,
[2][1][2][1][RTW89_WW][0][3] = 6,
[2][1][2][1][RTW89_WW][1][3] = 22,
- [2][1][2][1][RTW89_WW][2][3] = 50,
[2][1][2][1][RTW89_WW][0][11] = 6,
[2][1][2][1][RTW89_WW][1][11] = 20,
- [2][1][2][1][RTW89_WW][2][11] = 50,
[2][1][2][1][RTW89_WW][0][18] = 6,
[2][1][2][1][RTW89_WW][1][18] = 20,
- [2][1][2][1][RTW89_WW][2][18] = 50,
[2][1][2][1][RTW89_WW][0][26] = 6,
[2][1][2][1][RTW89_WW][1][26] = 20,
- [2][1][2][1][RTW89_WW][2][26] = 60,
[2][1][2][1][RTW89_WW][0][33] = 6,
[2][1][2][1][RTW89_WW][1][33] = 20,
- [2][1][2][1][RTW89_WW][2][33] = 60,
[2][1][2][1][RTW89_WW][0][41] = 6,
[2][1][2][1][RTW89_WW][1][41] = 22,
- [2][1][2][1][RTW89_WW][2][41] = 60,
[2][1][2][1][RTW89_WW][0][48] = 22,
[2][1][2][1][RTW89_WW][1][48] = 22,
- [2][1][2][1][RTW89_WW][2][48] = 60,
[2][1][2][1][RTW89_WW][0][56] = 20,
[2][1][2][1][RTW89_WW][1][56] = 20,
- [2][1][2][1][RTW89_WW][2][56] = 56,
[2][1][2][1][RTW89_WW][0][63] = 22,
[2][1][2][1][RTW89_WW][1][63] = 22,
- [2][1][2][1][RTW89_WW][2][63] = 58,
[2][1][2][1][RTW89_WW][0][71] = 20,
[2][1][2][1][RTW89_WW][1][71] = 20,
- [2][1][2][1][RTW89_WW][2][71] = 58,
[2][1][2][1][RTW89_WW][0][78] = 20,
[2][1][2][1][RTW89_WW][1][78] = 20,
- [2][1][2][1][RTW89_WW][2][78] = 58,
[2][1][2][1][RTW89_WW][0][86] = 20,
[2][1][2][1][RTW89_WW][1][86] = 20,
- [2][1][2][1][RTW89_WW][2][86] = 0,
[2][1][2][1][RTW89_WW][0][93] = 22,
[2][1][2][1][RTW89_WW][1][93] = 22,
- [2][1][2][1][RTW89_WW][2][93] = 0,
[2][1][2][1][RTW89_WW][0][101] = 22,
[2][1][2][1][RTW89_WW][1][101] = 22,
- [2][1][2][1][RTW89_WW][2][101] = 0,
[2][1][2][1][RTW89_WW][0][108] = 0,
[2][1][2][1][RTW89_WW][1][108] = 0,
- [2][1][2][1][RTW89_WW][2][108] = 0,
[2][1][2][1][RTW89_WW][0][116] = 0,
[2][1][2][1][RTW89_WW][1][116] = 0,
- [2][1][2][1][RTW89_WW][2][116] = 0,
[3][0][2][0][RTW89_WW][0][7] = 22,
[3][0][2][0][RTW89_WW][1][7] = 42,
- [3][0][2][0][RTW89_WW][2][7] = 52,
[3][0][2][0][RTW89_WW][0][22] = 20,
[3][0][2][0][RTW89_WW][1][22] = 42,
- [3][0][2][0][RTW89_WW][2][22] = 52,
[3][0][2][0][RTW89_WW][0][37] = 20,
[3][0][2][0][RTW89_WW][1][37] = 42,
- [3][0][2][0][RTW89_WW][2][37] = 52,
[3][0][2][0][RTW89_WW][0][52] = 54,
[3][0][2][0][RTW89_WW][1][52] = 54,
- [3][0][2][0][RTW89_WW][2][52] = 56,
[3][0][2][0][RTW89_WW][0][67] = 54,
[3][0][2][0][RTW89_WW][1][67] = 54,
- [3][0][2][0][RTW89_WW][2][67] = 54,
[3][0][2][0][RTW89_WW][0][82] = 26,
[3][0][2][0][RTW89_WW][1][82] = 26,
- [3][0][2][0][RTW89_WW][2][82] = 0,
[3][0][2][0][RTW89_WW][0][97] = 26,
[3][0][2][0][RTW89_WW][1][97] = 26,
- [3][0][2][0][RTW89_WW][2][97] = 0,
[3][0][2][0][RTW89_WW][0][112] = 0,
[3][0][2][0][RTW89_WW][1][112] = 0,
- [3][0][2][0][RTW89_WW][2][112] = 0,
[3][1][2][0][RTW89_WW][0][7] = 10,
[3][1][2][0][RTW89_WW][1][7] = 32,
- [3][1][2][0][RTW89_WW][2][7] = 46,
[3][1][2][0][RTW89_WW][0][22] = 8,
[3][1][2][0][RTW89_WW][1][22] = 30,
- [3][1][2][0][RTW89_WW][2][22] = 52,
[3][1][2][0][RTW89_WW][0][37] = 8,
[3][1][2][0][RTW89_WW][1][37] = 30,
- [3][1][2][0][RTW89_WW][2][37] = 52,
[3][1][2][0][RTW89_WW][0][52] = 30,
[3][1][2][0][RTW89_WW][1][52] = 30,
- [3][1][2][0][RTW89_WW][2][52] = 56,
[3][1][2][0][RTW89_WW][0][67] = 32,
[3][1][2][0][RTW89_WW][1][67] = 32,
- [3][1][2][0][RTW89_WW][2][67] = 54,
[3][1][2][0][RTW89_WW][0][82] = 24,
[3][1][2][0][RTW89_WW][1][82] = 24,
- [3][1][2][0][RTW89_WW][2][82] = 0,
[3][1][2][0][RTW89_WW][0][97] = 24,
[3][1][2][0][RTW89_WW][1][97] = 24,
- [3][1][2][0][RTW89_WW][2][97] = 0,
[3][1][2][0][RTW89_WW][0][112] = 0,
[3][1][2][0][RTW89_WW][1][112] = 0,
- [3][1][2][0][RTW89_WW][2][112] = 0,
[3][1][2][1][RTW89_WW][0][7] = 6,
[3][1][2][1][RTW89_WW][1][7] = 32,
- [3][1][2][1][RTW89_WW][2][7] = 46,
[3][1][2][1][RTW89_WW][0][22] = 6,
[3][1][2][1][RTW89_WW][1][22] = 30,
- [3][1][2][1][RTW89_WW][2][22] = 52,
[3][1][2][1][RTW89_WW][0][37] = 6,
[3][1][2][1][RTW89_WW][1][37] = 30,
- [3][1][2][1][RTW89_WW][2][37] = 52,
[3][1][2][1][RTW89_WW][0][52] = 30,
[3][1][2][1][RTW89_WW][1][52] = 30,
- [3][1][2][1][RTW89_WW][2][52] = 56,
[3][1][2][1][RTW89_WW][0][67] = 32,
[3][1][2][1][RTW89_WW][1][67] = 32,
- [3][1][2][1][RTW89_WW][2][67] = 54,
[3][1][2][1][RTW89_WW][0][82] = 24,
[3][1][2][1][RTW89_WW][1][82] = 24,
- [3][1][2][1][RTW89_WW][2][82] = 0,
[3][1][2][1][RTW89_WW][0][97] = 24,
[3][1][2][1][RTW89_WW][1][97] = 24,
- [3][1][2][1][RTW89_WW][2][97] = 0,
[3][1][2][1][RTW89_WW][0][112] = 0,
[3][1][2][1][RTW89_WW][1][112] = 0,
- [3][1][2][1][RTW89_WW][2][112] = 0,
[0][0][1][0][RTW89_FCC][1][0] = 24,
- [0][0][1][0][RTW89_FCC][2][0] = 56,
[0][0][1][0][RTW89_ETSI][1][0] = 66,
[0][0][1][0][RTW89_ETSI][0][0] = 28,
[0][0][1][0][RTW89_MKK][1][0] = 66,
[0][0][1][0][RTW89_MKK][0][0] = 26,
[0][0][1][0][RTW89_IC][1][0] = 24,
- [0][0][1][0][RTW89_IC][2][0] = 56,
[0][0][1][0][RTW89_KCC][1][0] = 24,
[0][0][1][0][RTW89_KCC][0][0] = 24,
[0][0][1][0][RTW89_ACMA][1][0] = 66,
@@ -38440,13 +37950,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][0] = 56,
[0][0][1][0][RTW89_THAILAND][0][0] = 24,
[0][0][1][0][RTW89_FCC][1][2] = 22,
- [0][0][1][0][RTW89_FCC][2][2] = 56,
[0][0][1][0][RTW89_ETSI][1][2] = 66,
[0][0][1][0][RTW89_ETSI][0][2] = 28,
[0][0][1][0][RTW89_MKK][1][2] = 66,
[0][0][1][0][RTW89_MKK][0][2] = 26,
[0][0][1][0][RTW89_IC][1][2] = 22,
- [0][0][1][0][RTW89_IC][2][2] = 56,
[0][0][1][0][RTW89_KCC][1][2] = 24,
[0][0][1][0][RTW89_KCC][0][2] = 24,
[0][0][1][0][RTW89_ACMA][1][2] = 66,
@@ -38459,13 +37967,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][2] = 56,
[0][0][1][0][RTW89_THAILAND][0][2] = 22,
[0][0][1][0][RTW89_FCC][1][4] = 22,
- [0][0][1][0][RTW89_FCC][2][4] = 56,
[0][0][1][0][RTW89_ETSI][1][4] = 66,
[0][0][1][0][RTW89_ETSI][0][4] = 28,
[0][0][1][0][RTW89_MKK][1][4] = 66,
[0][0][1][0][RTW89_MKK][0][4] = 26,
[0][0][1][0][RTW89_IC][1][4] = 22,
- [0][0][1][0][RTW89_IC][2][4] = 56,
[0][0][1][0][RTW89_KCC][1][4] = 24,
[0][0][1][0][RTW89_KCC][0][4] = 24,
[0][0][1][0][RTW89_ACMA][1][4] = 66,
@@ -38478,13 +37984,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][4] = 56,
[0][0][1][0][RTW89_THAILAND][0][4] = 22,
[0][0][1][0][RTW89_FCC][1][6] = 22,
- [0][0][1][0][RTW89_FCC][2][6] = 56,
[0][0][1][0][RTW89_ETSI][1][6] = 66,
[0][0][1][0][RTW89_ETSI][0][6] = 28,
[0][0][1][0][RTW89_MKK][1][6] = 66,
[0][0][1][0][RTW89_MKK][0][6] = 26,
[0][0][1][0][RTW89_IC][1][6] = 22,
- [0][0][1][0][RTW89_IC][2][6] = 56,
[0][0][1][0][RTW89_KCC][1][6] = 24,
[0][0][1][0][RTW89_KCC][0][6] = 24,
[0][0][1][0][RTW89_ACMA][1][6] = 66,
@@ -38497,13 +38001,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][6] = 56,
[0][0][1][0][RTW89_THAILAND][0][6] = 22,
[0][0][1][0][RTW89_FCC][1][8] = 22,
- [0][0][1][0][RTW89_FCC][2][8] = 56,
[0][0][1][0][RTW89_ETSI][1][8] = 66,
[0][0][1][0][RTW89_ETSI][0][8] = 28,
[0][0][1][0][RTW89_MKK][1][8] = 66,
[0][0][1][0][RTW89_MKK][0][8] = 26,
[0][0][1][0][RTW89_IC][1][8] = 22,
- [0][0][1][0][RTW89_IC][2][8] = 56,
[0][0][1][0][RTW89_KCC][1][8] = 24,
[0][0][1][0][RTW89_KCC][0][8] = 24,
[0][0][1][0][RTW89_ACMA][1][8] = 66,
@@ -38516,13 +38018,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][8] = 56,
[0][0][1][0][RTW89_THAILAND][0][8] = 22,
[0][0][1][0][RTW89_FCC][1][10] = 22,
- [0][0][1][0][RTW89_FCC][2][10] = 56,
[0][0][1][0][RTW89_ETSI][1][10] = 66,
[0][0][1][0][RTW89_ETSI][0][10] = 28,
[0][0][1][0][RTW89_MKK][1][10] = 66,
[0][0][1][0][RTW89_MKK][0][10] = 26,
[0][0][1][0][RTW89_IC][1][10] = 22,
- [0][0][1][0][RTW89_IC][2][10] = 56,
[0][0][1][0][RTW89_KCC][1][10] = 24,
[0][0][1][0][RTW89_KCC][0][10] = 24,
[0][0][1][0][RTW89_ACMA][1][10] = 66,
@@ -38535,13 +38035,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][10] = 56,
[0][0][1][0][RTW89_THAILAND][0][10] = 22,
[0][0][1][0][RTW89_FCC][1][12] = 22,
- [0][0][1][0][RTW89_FCC][2][12] = 56,
[0][0][1][0][RTW89_ETSI][1][12] = 66,
[0][0][1][0][RTW89_ETSI][0][12] = 28,
[0][0][1][0][RTW89_MKK][1][12] = 66,
[0][0][1][0][RTW89_MKK][0][12] = 26,
[0][0][1][0][RTW89_IC][1][12] = 22,
- [0][0][1][0][RTW89_IC][2][12] = 56,
[0][0][1][0][RTW89_KCC][1][12] = 24,
[0][0][1][0][RTW89_KCC][0][12] = 24,
[0][0][1][0][RTW89_ACMA][1][12] = 66,
@@ -38554,13 +38052,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][12] = 56,
[0][0][1][0][RTW89_THAILAND][0][12] = 22,
[0][0][1][0][RTW89_FCC][1][14] = 22,
- [0][0][1][0][RTW89_FCC][2][14] = 56,
[0][0][1][0][RTW89_ETSI][1][14] = 66,
[0][0][1][0][RTW89_ETSI][0][14] = 28,
[0][0][1][0][RTW89_MKK][1][14] = 66,
[0][0][1][0][RTW89_MKK][0][14] = 26,
[0][0][1][0][RTW89_IC][1][14] = 22,
- [0][0][1][0][RTW89_IC][2][14] = 56,
[0][0][1][0][RTW89_KCC][1][14] = 24,
[0][0][1][0][RTW89_KCC][0][14] = 24,
[0][0][1][0][RTW89_ACMA][1][14] = 66,
@@ -38573,13 +38069,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][14] = 56,
[0][0][1][0][RTW89_THAILAND][0][14] = 22,
[0][0][1][0][RTW89_FCC][1][15] = 22,
- [0][0][1][0][RTW89_FCC][2][15] = 56,
[0][0][1][0][RTW89_ETSI][1][15] = 66,
[0][0][1][0][RTW89_ETSI][0][15] = 28,
[0][0][1][0][RTW89_MKK][1][15] = 66,
[0][0][1][0][RTW89_MKK][0][15] = 26,
[0][0][1][0][RTW89_IC][1][15] = 22,
- [0][0][1][0][RTW89_IC][2][15] = 56,
[0][0][1][0][RTW89_KCC][1][15] = 24,
[0][0][1][0][RTW89_KCC][0][15] = 24,
[0][0][1][0][RTW89_ACMA][1][15] = 66,
@@ -38592,13 +38086,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][15] = 56,
[0][0][1][0][RTW89_THAILAND][0][15] = 22,
[0][0][1][0][RTW89_FCC][1][17] = 22,
- [0][0][1][0][RTW89_FCC][2][17] = 56,
[0][0][1][0][RTW89_ETSI][1][17] = 66,
[0][0][1][0][RTW89_ETSI][0][17] = 28,
[0][0][1][0][RTW89_MKK][1][17] = 66,
[0][0][1][0][RTW89_MKK][0][17] = 26,
[0][0][1][0][RTW89_IC][1][17] = 22,
- [0][0][1][0][RTW89_IC][2][17] = 56,
[0][0][1][0][RTW89_KCC][1][17] = 24,
[0][0][1][0][RTW89_KCC][0][17] = 24,
[0][0][1][0][RTW89_ACMA][1][17] = 66,
@@ -38611,13 +38103,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][17] = 56,
[0][0][1][0][RTW89_THAILAND][0][17] = 22,
[0][0][1][0][RTW89_FCC][1][19] = 22,
- [0][0][1][0][RTW89_FCC][2][19] = 56,
[0][0][1][0][RTW89_ETSI][1][19] = 66,
[0][0][1][0][RTW89_ETSI][0][19] = 28,
[0][0][1][0][RTW89_MKK][1][19] = 66,
[0][0][1][0][RTW89_MKK][0][19] = 26,
[0][0][1][0][RTW89_IC][1][19] = 22,
- [0][0][1][0][RTW89_IC][2][19] = 56,
[0][0][1][0][RTW89_KCC][1][19] = 24,
[0][0][1][0][RTW89_KCC][0][19] = 24,
[0][0][1][0][RTW89_ACMA][1][19] = 66,
@@ -38630,13 +38120,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][19] = 56,
[0][0][1][0][RTW89_THAILAND][0][19] = 22,
[0][0][1][0][RTW89_FCC][1][21] = 22,
- [0][0][1][0][RTW89_FCC][2][21] = 56,
[0][0][1][0][RTW89_ETSI][1][21] = 66,
[0][0][1][0][RTW89_ETSI][0][21] = 28,
[0][0][1][0][RTW89_MKK][1][21] = 66,
[0][0][1][0][RTW89_MKK][0][21] = 26,
[0][0][1][0][RTW89_IC][1][21] = 22,
- [0][0][1][0][RTW89_IC][2][21] = 56,
[0][0][1][0][RTW89_KCC][1][21] = 24,
[0][0][1][0][RTW89_KCC][0][21] = 24,
[0][0][1][0][RTW89_ACMA][1][21] = 66,
@@ -38649,13 +38137,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][21] = 56,
[0][0][1][0][RTW89_THAILAND][0][21] = 22,
[0][0][1][0][RTW89_FCC][1][23] = 22,
- [0][0][1][0][RTW89_FCC][2][23] = 70,
[0][0][1][0][RTW89_ETSI][1][23] = 66,
[0][0][1][0][RTW89_ETSI][0][23] = 28,
[0][0][1][0][RTW89_MKK][1][23] = 66,
[0][0][1][0][RTW89_MKK][0][23] = 26,
[0][0][1][0][RTW89_IC][1][23] = 22,
- [0][0][1][0][RTW89_IC][2][23] = 70,
[0][0][1][0][RTW89_KCC][1][23] = 24,
[0][0][1][0][RTW89_KCC][0][23] = 26,
[0][0][1][0][RTW89_ACMA][1][23] = 66,
@@ -38668,13 +38154,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][23] = 66,
[0][0][1][0][RTW89_THAILAND][0][23] = 22,
[0][0][1][0][RTW89_FCC][1][25] = 22,
- [0][0][1][0][RTW89_FCC][2][25] = 70,
[0][0][1][0][RTW89_ETSI][1][25] = 66,
[0][0][1][0][RTW89_ETSI][0][25] = 28,
[0][0][1][0][RTW89_MKK][1][25] = 66,
[0][0][1][0][RTW89_MKK][0][25] = 26,
[0][0][1][0][RTW89_IC][1][25] = 22,
- [0][0][1][0][RTW89_IC][2][25] = 70,
[0][0][1][0][RTW89_KCC][1][25] = 24,
[0][0][1][0][RTW89_KCC][0][25] = 26,
[0][0][1][0][RTW89_ACMA][1][25] = 66,
@@ -38687,13 +38171,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][25] = 66,
[0][0][1][0][RTW89_THAILAND][0][25] = 22,
[0][0][1][0][RTW89_FCC][1][27] = 22,
- [0][0][1][0][RTW89_FCC][2][27] = 70,
[0][0][1][0][RTW89_ETSI][1][27] = 66,
[0][0][1][0][RTW89_ETSI][0][27] = 28,
[0][0][1][0][RTW89_MKK][1][27] = 66,
[0][0][1][0][RTW89_MKK][0][27] = 26,
[0][0][1][0][RTW89_IC][1][27] = 22,
- [0][0][1][0][RTW89_IC][2][27] = 70,
[0][0][1][0][RTW89_KCC][1][27] = 24,
[0][0][1][0][RTW89_KCC][0][27] = 26,
[0][0][1][0][RTW89_ACMA][1][27] = 66,
@@ -38706,13 +38188,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][27] = 66,
[0][0][1][0][RTW89_THAILAND][0][27] = 22,
[0][0][1][0][RTW89_FCC][1][29] = 22,
- [0][0][1][0][RTW89_FCC][2][29] = 70,
[0][0][1][0][RTW89_ETSI][1][29] = 66,
[0][0][1][0][RTW89_ETSI][0][29] = 28,
[0][0][1][0][RTW89_MKK][1][29] = 66,
[0][0][1][0][RTW89_MKK][0][29] = 26,
[0][0][1][0][RTW89_IC][1][29] = 22,
- [0][0][1][0][RTW89_IC][2][29] = 70,
[0][0][1][0][RTW89_KCC][1][29] = 24,
[0][0][1][0][RTW89_KCC][0][29] = 26,
[0][0][1][0][RTW89_ACMA][1][29] = 66,
@@ -38725,13 +38205,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][29] = 66,
[0][0][1][0][RTW89_THAILAND][0][29] = 22,
[0][0][1][0][RTW89_FCC][1][30] = 22,
- [0][0][1][0][RTW89_FCC][2][30] = 70,
[0][0][1][0][RTW89_ETSI][1][30] = 66,
[0][0][1][0][RTW89_ETSI][0][30] = 28,
[0][0][1][0][RTW89_MKK][1][30] = 66,
[0][0][1][0][RTW89_MKK][0][30] = 26,
[0][0][1][0][RTW89_IC][1][30] = 22,
- [0][0][1][0][RTW89_IC][2][30] = 70,
[0][0][1][0][RTW89_KCC][1][30] = 24,
[0][0][1][0][RTW89_KCC][0][30] = 26,
[0][0][1][0][RTW89_ACMA][1][30] = 66,
@@ -38744,13 +38222,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][30] = 66,
[0][0][1][0][RTW89_THAILAND][0][30] = 22,
[0][0][1][0][RTW89_FCC][1][32] = 22,
- [0][0][1][0][RTW89_FCC][2][32] = 70,
[0][0][1][0][RTW89_ETSI][1][32] = 66,
[0][0][1][0][RTW89_ETSI][0][32] = 28,
[0][0][1][0][RTW89_MKK][1][32] = 66,
[0][0][1][0][RTW89_MKK][0][32] = 26,
[0][0][1][0][RTW89_IC][1][32] = 22,
- [0][0][1][0][RTW89_IC][2][32] = 70,
[0][0][1][0][RTW89_KCC][1][32] = 24,
[0][0][1][0][RTW89_KCC][0][32] = 26,
[0][0][1][0][RTW89_ACMA][1][32] = 66,
@@ -38763,13 +38239,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][32] = 66,
[0][0][1][0][RTW89_THAILAND][0][32] = 22,
[0][0][1][0][RTW89_FCC][1][34] = 22,
- [0][0][1][0][RTW89_FCC][2][34] = 70,
[0][0][1][0][RTW89_ETSI][1][34] = 66,
[0][0][1][0][RTW89_ETSI][0][34] = 28,
[0][0][1][0][RTW89_MKK][1][34] = 66,
[0][0][1][0][RTW89_MKK][0][34] = 26,
[0][0][1][0][RTW89_IC][1][34] = 22,
- [0][0][1][0][RTW89_IC][2][34] = 70,
[0][0][1][0][RTW89_KCC][1][34] = 24,
[0][0][1][0][RTW89_KCC][0][34] = 26,
[0][0][1][0][RTW89_ACMA][1][34] = 66,
@@ -38782,13 +38256,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][34] = 66,
[0][0][1][0][RTW89_THAILAND][0][34] = 22,
[0][0][1][0][RTW89_FCC][1][36] = 22,
- [0][0][1][0][RTW89_FCC][2][36] = 70,
[0][0][1][0][RTW89_ETSI][1][36] = 66,
[0][0][1][0][RTW89_ETSI][0][36] = 28,
[0][0][1][0][RTW89_MKK][1][36] = 66,
[0][0][1][0][RTW89_MKK][0][36] = 26,
[0][0][1][0][RTW89_IC][1][36] = 22,
- [0][0][1][0][RTW89_IC][2][36] = 70,
[0][0][1][0][RTW89_KCC][1][36] = 24,
[0][0][1][0][RTW89_KCC][0][36] = 26,
[0][0][1][0][RTW89_ACMA][1][36] = 66,
@@ -38801,13 +38273,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][36] = 66,
[0][0][1][0][RTW89_THAILAND][0][36] = 22,
[0][0][1][0][RTW89_FCC][1][38] = 22,
- [0][0][1][0][RTW89_FCC][2][38] = 70,
[0][0][1][0][RTW89_ETSI][1][38] = 66,
[0][0][1][0][RTW89_ETSI][0][38] = 28,
[0][0][1][0][RTW89_MKK][1][38] = 66,
[0][0][1][0][RTW89_MKK][0][38] = 26,
[0][0][1][0][RTW89_IC][1][38] = 22,
- [0][0][1][0][RTW89_IC][2][38] = 70,
[0][0][1][0][RTW89_KCC][1][38] = 24,
[0][0][1][0][RTW89_KCC][0][38] = 26,
[0][0][1][0][RTW89_ACMA][1][38] = 66,
@@ -38820,13 +38290,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][38] = 66,
[0][0][1][0][RTW89_THAILAND][0][38] = 22,
[0][0][1][0][RTW89_FCC][1][40] = 22,
- [0][0][1][0][RTW89_FCC][2][40] = 70,
[0][0][1][0][RTW89_ETSI][1][40] = 66,
[0][0][1][0][RTW89_ETSI][0][40] = 28,
[0][0][1][0][RTW89_MKK][1][40] = 66,
[0][0][1][0][RTW89_MKK][0][40] = 26,
[0][0][1][0][RTW89_IC][1][40] = 22,
- [0][0][1][0][RTW89_IC][2][40] = 70,
[0][0][1][0][RTW89_KCC][1][40] = 24,
[0][0][1][0][RTW89_KCC][0][40] = 26,
[0][0][1][0][RTW89_ACMA][1][40] = 66,
@@ -38839,13 +38307,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][40] = 66,
[0][0][1][0][RTW89_THAILAND][0][40] = 22,
[0][0][1][0][RTW89_FCC][1][42] = 22,
- [0][0][1][0][RTW89_FCC][2][42] = 70,
[0][0][1][0][RTW89_ETSI][1][42] = 66,
[0][0][1][0][RTW89_ETSI][0][42] = 28,
[0][0][1][0][RTW89_MKK][1][42] = 66,
[0][0][1][0][RTW89_MKK][0][42] = 26,
[0][0][1][0][RTW89_IC][1][42] = 22,
- [0][0][1][0][RTW89_IC][2][42] = 70,
[0][0][1][0][RTW89_KCC][1][42] = 24,
[0][0][1][0][RTW89_KCC][0][42] = 26,
[0][0][1][0][RTW89_ACMA][1][42] = 66,
@@ -38858,13 +38324,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][42] = 66,
[0][0][1][0][RTW89_THAILAND][0][42] = 22,
[0][0][1][0][RTW89_FCC][1][44] = 22,
- [0][0][1][0][RTW89_FCC][2][44] = 70,
[0][0][1][0][RTW89_ETSI][1][44] = 66,
[0][0][1][0][RTW89_ETSI][0][44] = 30,
[0][0][1][0][RTW89_MKK][1][44] = 44,
[0][0][1][0][RTW89_MKK][0][44] = 28,
[0][0][1][0][RTW89_IC][1][44] = 22,
- [0][0][1][0][RTW89_IC][2][44] = 70,
[0][0][1][0][RTW89_KCC][1][44] = 24,
[0][0][1][0][RTW89_KCC][0][44] = 26,
[0][0][1][0][RTW89_ACMA][1][44] = 66,
@@ -38877,13 +38341,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][44] = 68,
[0][0][1][0][RTW89_THAILAND][0][44] = 22,
[0][0][1][0][RTW89_FCC][1][45] = 22,
- [0][0][1][0][RTW89_FCC][2][45] = 127,
[0][0][1][0][RTW89_ETSI][1][45] = 127,
[0][0][1][0][RTW89_ETSI][0][45] = 127,
[0][0][1][0][RTW89_MKK][1][45] = 127,
[0][0][1][0][RTW89_MKK][0][45] = 127,
[0][0][1][0][RTW89_IC][1][45] = 22,
- [0][0][1][0][RTW89_IC][2][45] = 70,
[0][0][1][0][RTW89_KCC][1][45] = 24,
[0][0][1][0][RTW89_KCC][0][45] = 127,
[0][0][1][0][RTW89_ACMA][1][45] = 127,
@@ -38896,13 +38358,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][45] = 127,
[0][0][1][0][RTW89_THAILAND][0][45] = 127,
[0][0][1][0][RTW89_FCC][1][47] = 22,
- [0][0][1][0][RTW89_FCC][2][47] = 127,
[0][0][1][0][RTW89_ETSI][1][47] = 127,
[0][0][1][0][RTW89_ETSI][0][47] = 127,
[0][0][1][0][RTW89_MKK][1][47] = 127,
[0][0][1][0][RTW89_MKK][0][47] = 127,
[0][0][1][0][RTW89_IC][1][47] = 22,
- [0][0][1][0][RTW89_IC][2][47] = 70,
[0][0][1][0][RTW89_KCC][1][47] = 24,
[0][0][1][0][RTW89_KCC][0][47] = 127,
[0][0][1][0][RTW89_ACMA][1][47] = 127,
@@ -38915,13 +38375,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][47] = 127,
[0][0][1][0][RTW89_THAILAND][0][47] = 127,
[0][0][1][0][RTW89_FCC][1][49] = 24,
- [0][0][1][0][RTW89_FCC][2][49] = 127,
[0][0][1][0][RTW89_ETSI][1][49] = 127,
[0][0][1][0][RTW89_ETSI][0][49] = 127,
[0][0][1][0][RTW89_MKK][1][49] = 127,
[0][0][1][0][RTW89_MKK][0][49] = 127,
[0][0][1][0][RTW89_IC][1][49] = 24,
- [0][0][1][0][RTW89_IC][2][49] = 70,
[0][0][1][0][RTW89_KCC][1][49] = 24,
[0][0][1][0][RTW89_KCC][0][49] = 127,
[0][0][1][0][RTW89_ACMA][1][49] = 127,
@@ -38934,13 +38392,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][49] = 127,
[0][0][1][0][RTW89_THAILAND][0][49] = 127,
[0][0][1][0][RTW89_FCC][1][51] = 22,
- [0][0][1][0][RTW89_FCC][2][51] = 127,
[0][0][1][0][RTW89_ETSI][1][51] = 127,
[0][0][1][0][RTW89_ETSI][0][51] = 127,
[0][0][1][0][RTW89_MKK][1][51] = 127,
[0][0][1][0][RTW89_MKK][0][51] = 127,
[0][0][1][0][RTW89_IC][1][51] = 22,
- [0][0][1][0][RTW89_IC][2][51] = 70,
[0][0][1][0][RTW89_KCC][1][51] = 24,
[0][0][1][0][RTW89_KCC][0][51] = 127,
[0][0][1][0][RTW89_ACMA][1][51] = 127,
@@ -38953,13 +38409,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][51] = 127,
[0][0][1][0][RTW89_THAILAND][0][51] = 127,
[0][0][1][0][RTW89_FCC][1][53] = 22,
- [0][0][1][0][RTW89_FCC][2][53] = 127,
[0][0][1][0][RTW89_ETSI][1][53] = 127,
[0][0][1][0][RTW89_ETSI][0][53] = 127,
[0][0][1][0][RTW89_MKK][1][53] = 127,
[0][0][1][0][RTW89_MKK][0][53] = 127,
[0][0][1][0][RTW89_IC][1][53] = 22,
- [0][0][1][0][RTW89_IC][2][53] = 70,
[0][0][1][0][RTW89_KCC][1][53] = 24,
[0][0][1][0][RTW89_KCC][0][53] = 127,
[0][0][1][0][RTW89_ACMA][1][53] = 127,
@@ -38972,13 +38426,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][53] = 127,
[0][0][1][0][RTW89_THAILAND][0][53] = 127,
[0][0][1][0][RTW89_FCC][1][55] = 22,
- [0][0][1][0][RTW89_FCC][2][55] = 68,
[0][0][1][0][RTW89_ETSI][1][55] = 127,
[0][0][1][0][RTW89_ETSI][0][55] = 127,
[0][0][1][0][RTW89_MKK][1][55] = 127,
[0][0][1][0][RTW89_MKK][0][55] = 127,
[0][0][1][0][RTW89_IC][1][55] = 22,
- [0][0][1][0][RTW89_IC][2][55] = 68,
[0][0][1][0][RTW89_KCC][1][55] = 26,
[0][0][1][0][RTW89_KCC][0][55] = 127,
[0][0][1][0][RTW89_ACMA][1][55] = 127,
@@ -38991,13 +38443,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][55] = 127,
[0][0][1][0][RTW89_THAILAND][0][55] = 127,
[0][0][1][0][RTW89_FCC][1][57] = 22,
- [0][0][1][0][RTW89_FCC][2][57] = 68,
[0][0][1][0][RTW89_ETSI][1][57] = 127,
[0][0][1][0][RTW89_ETSI][0][57] = 127,
[0][0][1][0][RTW89_MKK][1][57] = 127,
[0][0][1][0][RTW89_MKK][0][57] = 127,
[0][0][1][0][RTW89_IC][1][57] = 22,
- [0][0][1][0][RTW89_IC][2][57] = 68,
[0][0][1][0][RTW89_KCC][1][57] = 26,
[0][0][1][0][RTW89_KCC][0][57] = 127,
[0][0][1][0][RTW89_ACMA][1][57] = 127,
@@ -39010,13 +38460,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][57] = 127,
[0][0][1][0][RTW89_THAILAND][0][57] = 127,
[0][0][1][0][RTW89_FCC][1][59] = 22,
- [0][0][1][0][RTW89_FCC][2][59] = 68,
[0][0][1][0][RTW89_ETSI][1][59] = 127,
[0][0][1][0][RTW89_ETSI][0][59] = 127,
[0][0][1][0][RTW89_MKK][1][59] = 127,
[0][0][1][0][RTW89_MKK][0][59] = 127,
[0][0][1][0][RTW89_IC][1][59] = 22,
- [0][0][1][0][RTW89_IC][2][59] = 68,
[0][0][1][0][RTW89_KCC][1][59] = 26,
[0][0][1][0][RTW89_KCC][0][59] = 127,
[0][0][1][0][RTW89_ACMA][1][59] = 127,
@@ -39029,13 +38477,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][59] = 127,
[0][0][1][0][RTW89_THAILAND][0][59] = 127,
[0][0][1][0][RTW89_FCC][1][60] = 22,
- [0][0][1][0][RTW89_FCC][2][60] = 68,
[0][0][1][0][RTW89_ETSI][1][60] = 127,
[0][0][1][0][RTW89_ETSI][0][60] = 127,
[0][0][1][0][RTW89_MKK][1][60] = 127,
[0][0][1][0][RTW89_MKK][0][60] = 127,
[0][0][1][0][RTW89_IC][1][60] = 22,
- [0][0][1][0][RTW89_IC][2][60] = 68,
[0][0][1][0][RTW89_KCC][1][60] = 26,
[0][0][1][0][RTW89_KCC][0][60] = 127,
[0][0][1][0][RTW89_ACMA][1][60] = 127,
@@ -39048,13 +38494,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][60] = 127,
[0][0][1][0][RTW89_THAILAND][0][60] = 127,
[0][0][1][0][RTW89_FCC][1][62] = 22,
- [0][0][1][0][RTW89_FCC][2][62] = 68,
[0][0][1][0][RTW89_ETSI][1][62] = 127,
[0][0][1][0][RTW89_ETSI][0][62] = 127,
[0][0][1][0][RTW89_MKK][1][62] = 127,
[0][0][1][0][RTW89_MKK][0][62] = 127,
[0][0][1][0][RTW89_IC][1][62] = 22,
- [0][0][1][0][RTW89_IC][2][62] = 68,
[0][0][1][0][RTW89_KCC][1][62] = 26,
[0][0][1][0][RTW89_KCC][0][62] = 127,
[0][0][1][0][RTW89_ACMA][1][62] = 127,
@@ -39067,13 +38511,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][62] = 127,
[0][0][1][0][RTW89_THAILAND][0][62] = 127,
[0][0][1][0][RTW89_FCC][1][64] = 22,
- [0][0][1][0][RTW89_FCC][2][64] = 68,
[0][0][1][0][RTW89_ETSI][1][64] = 127,
[0][0][1][0][RTW89_ETSI][0][64] = 127,
[0][0][1][0][RTW89_MKK][1][64] = 127,
[0][0][1][0][RTW89_MKK][0][64] = 127,
[0][0][1][0][RTW89_IC][1][64] = 22,
- [0][0][1][0][RTW89_IC][2][64] = 68,
[0][0][1][0][RTW89_KCC][1][64] = 26,
[0][0][1][0][RTW89_KCC][0][64] = 127,
[0][0][1][0][RTW89_ACMA][1][64] = 127,
@@ -39086,13 +38528,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][64] = 127,
[0][0][1][0][RTW89_THAILAND][0][64] = 127,
[0][0][1][0][RTW89_FCC][1][66] = 22,
- [0][0][1][0][RTW89_FCC][2][66] = 68,
[0][0][1][0][RTW89_ETSI][1][66] = 127,
[0][0][1][0][RTW89_ETSI][0][66] = 127,
[0][0][1][0][RTW89_MKK][1][66] = 127,
[0][0][1][0][RTW89_MKK][0][66] = 127,
[0][0][1][0][RTW89_IC][1][66] = 22,
- [0][0][1][0][RTW89_IC][2][66] = 68,
[0][0][1][0][RTW89_KCC][1][66] = 26,
[0][0][1][0][RTW89_KCC][0][66] = 127,
[0][0][1][0][RTW89_ACMA][1][66] = 127,
@@ -39105,13 +38545,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][66] = 127,
[0][0][1][0][RTW89_THAILAND][0][66] = 127,
[0][0][1][0][RTW89_FCC][1][68] = 22,
- [0][0][1][0][RTW89_FCC][2][68] = 68,
[0][0][1][0][RTW89_ETSI][1][68] = 127,
[0][0][1][0][RTW89_ETSI][0][68] = 127,
[0][0][1][0][RTW89_MKK][1][68] = 127,
[0][0][1][0][RTW89_MKK][0][68] = 127,
[0][0][1][0][RTW89_IC][1][68] = 22,
- [0][0][1][0][RTW89_IC][2][68] = 68,
[0][0][1][0][RTW89_KCC][1][68] = 26,
[0][0][1][0][RTW89_KCC][0][68] = 127,
[0][0][1][0][RTW89_ACMA][1][68] = 127,
@@ -39124,13 +38562,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][68] = 127,
[0][0][1][0][RTW89_THAILAND][0][68] = 127,
[0][0][1][0][RTW89_FCC][1][70] = 24,
- [0][0][1][0][RTW89_FCC][2][70] = 68,
[0][0][1][0][RTW89_ETSI][1][70] = 127,
[0][0][1][0][RTW89_ETSI][0][70] = 127,
[0][0][1][0][RTW89_MKK][1][70] = 127,
[0][0][1][0][RTW89_MKK][0][70] = 127,
[0][0][1][0][RTW89_IC][1][70] = 24,
- [0][0][1][0][RTW89_IC][2][70] = 68,
[0][0][1][0][RTW89_KCC][1][70] = 26,
[0][0][1][0][RTW89_KCC][0][70] = 127,
[0][0][1][0][RTW89_ACMA][1][70] = 127,
@@ -39143,13 +38579,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][70] = 127,
[0][0][1][0][RTW89_THAILAND][0][70] = 127,
[0][0][1][0][RTW89_FCC][1][72] = 22,
- [0][0][1][0][RTW89_FCC][2][72] = 68,
[0][0][1][0][RTW89_ETSI][1][72] = 127,
[0][0][1][0][RTW89_ETSI][0][72] = 127,
[0][0][1][0][RTW89_MKK][1][72] = 127,
[0][0][1][0][RTW89_MKK][0][72] = 127,
[0][0][1][0][RTW89_IC][1][72] = 22,
- [0][0][1][0][RTW89_IC][2][72] = 68,
[0][0][1][0][RTW89_KCC][1][72] = 26,
[0][0][1][0][RTW89_KCC][0][72] = 127,
[0][0][1][0][RTW89_ACMA][1][72] = 127,
@@ -39162,13 +38596,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][72] = 127,
[0][0][1][0][RTW89_THAILAND][0][72] = 127,
[0][0][1][0][RTW89_FCC][1][74] = 22,
- [0][0][1][0][RTW89_FCC][2][74] = 68,
[0][0][1][0][RTW89_ETSI][1][74] = 127,
[0][0][1][0][RTW89_ETSI][0][74] = 127,
[0][0][1][0][RTW89_MKK][1][74] = 127,
[0][0][1][0][RTW89_MKK][0][74] = 127,
[0][0][1][0][RTW89_IC][1][74] = 22,
- [0][0][1][0][RTW89_IC][2][74] = 68,
[0][0][1][0][RTW89_KCC][1][74] = 26,
[0][0][1][0][RTW89_KCC][0][74] = 127,
[0][0][1][0][RTW89_ACMA][1][74] = 127,
@@ -39181,13 +38613,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][74] = 127,
[0][0][1][0][RTW89_THAILAND][0][74] = 127,
[0][0][1][0][RTW89_FCC][1][75] = 22,
- [0][0][1][0][RTW89_FCC][2][75] = 68,
[0][0][1][0][RTW89_ETSI][1][75] = 127,
[0][0][1][0][RTW89_ETSI][0][75] = 127,
[0][0][1][0][RTW89_MKK][1][75] = 127,
[0][0][1][0][RTW89_MKK][0][75] = 127,
[0][0][1][0][RTW89_IC][1][75] = 22,
- [0][0][1][0][RTW89_IC][2][75] = 68,
[0][0][1][0][RTW89_KCC][1][75] = 26,
[0][0][1][0][RTW89_KCC][0][75] = 127,
[0][0][1][0][RTW89_ACMA][1][75] = 127,
@@ -39200,13 +38630,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][75] = 127,
[0][0][1][0][RTW89_THAILAND][0][75] = 127,
[0][0][1][0][RTW89_FCC][1][77] = 22,
- [0][0][1][0][RTW89_FCC][2][77] = 68,
[0][0][1][0][RTW89_ETSI][1][77] = 127,
[0][0][1][0][RTW89_ETSI][0][77] = 127,
[0][0][1][0][RTW89_MKK][1][77] = 127,
[0][0][1][0][RTW89_MKK][0][77] = 127,
[0][0][1][0][RTW89_IC][1][77] = 22,
- [0][0][1][0][RTW89_IC][2][77] = 68,
[0][0][1][0][RTW89_KCC][1][77] = 26,
[0][0][1][0][RTW89_KCC][0][77] = 127,
[0][0][1][0][RTW89_ACMA][1][77] = 127,
@@ -39219,13 +38647,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][77] = 127,
[0][0][1][0][RTW89_THAILAND][0][77] = 127,
[0][0][1][0][RTW89_FCC][1][79] = 22,
- [0][0][1][0][RTW89_FCC][2][79] = 68,
[0][0][1][0][RTW89_ETSI][1][79] = 127,
[0][0][1][0][RTW89_ETSI][0][79] = 127,
[0][0][1][0][RTW89_MKK][1][79] = 127,
[0][0][1][0][RTW89_MKK][0][79] = 127,
[0][0][1][0][RTW89_IC][1][79] = 22,
- [0][0][1][0][RTW89_IC][2][79] = 68,
[0][0][1][0][RTW89_KCC][1][79] = 26,
[0][0][1][0][RTW89_KCC][0][79] = 127,
[0][0][1][0][RTW89_ACMA][1][79] = 127,
@@ -39238,13 +38664,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][79] = 127,
[0][0][1][0][RTW89_THAILAND][0][79] = 127,
[0][0][1][0][RTW89_FCC][1][81] = 22,
- [0][0][1][0][RTW89_FCC][2][81] = 68,
[0][0][1][0][RTW89_ETSI][1][81] = 127,
[0][0][1][0][RTW89_ETSI][0][81] = 127,
[0][0][1][0][RTW89_MKK][1][81] = 127,
[0][0][1][0][RTW89_MKK][0][81] = 127,
[0][0][1][0][RTW89_IC][1][81] = 22,
- [0][0][1][0][RTW89_IC][2][81] = 68,
[0][0][1][0][RTW89_KCC][1][81] = 26,
[0][0][1][0][RTW89_KCC][0][81] = 127,
[0][0][1][0][RTW89_ACMA][1][81] = 127,
@@ -39257,13 +38681,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][81] = 127,
[0][0][1][0][RTW89_THAILAND][0][81] = 127,
[0][0][1][0][RTW89_FCC][1][83] = 22,
- [0][0][1][0][RTW89_FCC][2][83] = 68,
[0][0][1][0][RTW89_ETSI][1][83] = 127,
[0][0][1][0][RTW89_ETSI][0][83] = 127,
[0][0][1][0][RTW89_MKK][1][83] = 127,
[0][0][1][0][RTW89_MKK][0][83] = 127,
[0][0][1][0][RTW89_IC][1][83] = 22,
- [0][0][1][0][RTW89_IC][2][83] = 68,
[0][0][1][0][RTW89_KCC][1][83] = 32,
[0][0][1][0][RTW89_KCC][0][83] = 127,
[0][0][1][0][RTW89_ACMA][1][83] = 127,
@@ -39276,13 +38698,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][83] = 127,
[0][0][1][0][RTW89_THAILAND][0][83] = 127,
[0][0][1][0][RTW89_FCC][1][85] = 22,
- [0][0][1][0][RTW89_FCC][2][85] = 68,
[0][0][1][0][RTW89_ETSI][1][85] = 127,
[0][0][1][0][RTW89_ETSI][0][85] = 127,
[0][0][1][0][RTW89_MKK][1][85] = 127,
[0][0][1][0][RTW89_MKK][0][85] = 127,
[0][0][1][0][RTW89_IC][1][85] = 22,
- [0][0][1][0][RTW89_IC][2][85] = 68,
[0][0][1][0][RTW89_KCC][1][85] = 32,
[0][0][1][0][RTW89_KCC][0][85] = 127,
[0][0][1][0][RTW89_ACMA][1][85] = 127,
@@ -39295,13 +38715,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][85] = 127,
[0][0][1][0][RTW89_THAILAND][0][85] = 127,
[0][0][1][0][RTW89_FCC][1][87] = 22,
- [0][0][1][0][RTW89_FCC][2][87] = 127,
[0][0][1][0][RTW89_ETSI][1][87] = 127,
[0][0][1][0][RTW89_ETSI][0][87] = 127,
[0][0][1][0][RTW89_MKK][1][87] = 127,
[0][0][1][0][RTW89_MKK][0][87] = 127,
[0][0][1][0][RTW89_IC][1][87] = 22,
- [0][0][1][0][RTW89_IC][2][87] = 127,
[0][0][1][0][RTW89_KCC][1][87] = 32,
[0][0][1][0][RTW89_KCC][0][87] = 127,
[0][0][1][0][RTW89_ACMA][1][87] = 127,
@@ -39314,13 +38732,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][87] = 127,
[0][0][1][0][RTW89_THAILAND][0][87] = 127,
[0][0][1][0][RTW89_FCC][1][89] = 22,
- [0][0][1][0][RTW89_FCC][2][89] = 127,
[0][0][1][0][RTW89_ETSI][1][89] = 127,
[0][0][1][0][RTW89_ETSI][0][89] = 127,
[0][0][1][0][RTW89_MKK][1][89] = 127,
[0][0][1][0][RTW89_MKK][0][89] = 127,
[0][0][1][0][RTW89_IC][1][89] = 22,
- [0][0][1][0][RTW89_IC][2][89] = 127,
[0][0][1][0][RTW89_KCC][1][89] = 32,
[0][0][1][0][RTW89_KCC][0][89] = 127,
[0][0][1][0][RTW89_ACMA][1][89] = 127,
@@ -39333,13 +38749,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][89] = 127,
[0][0][1][0][RTW89_THAILAND][0][89] = 127,
[0][0][1][0][RTW89_FCC][1][90] = 22,
- [0][0][1][0][RTW89_FCC][2][90] = 127,
[0][0][1][0][RTW89_ETSI][1][90] = 127,
[0][0][1][0][RTW89_ETSI][0][90] = 127,
[0][0][1][0][RTW89_MKK][1][90] = 127,
[0][0][1][0][RTW89_MKK][0][90] = 127,
[0][0][1][0][RTW89_IC][1][90] = 22,
- [0][0][1][0][RTW89_IC][2][90] = 127,
[0][0][1][0][RTW89_KCC][1][90] = 32,
[0][0][1][0][RTW89_KCC][0][90] = 127,
[0][0][1][0][RTW89_ACMA][1][90] = 127,
@@ -39352,13 +38766,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][90] = 127,
[0][0][1][0][RTW89_THAILAND][0][90] = 127,
[0][0][1][0][RTW89_FCC][1][92] = 22,
- [0][0][1][0][RTW89_FCC][2][92] = 127,
[0][0][1][0][RTW89_ETSI][1][92] = 127,
[0][0][1][0][RTW89_ETSI][0][92] = 127,
[0][0][1][0][RTW89_MKK][1][92] = 127,
[0][0][1][0][RTW89_MKK][0][92] = 127,
[0][0][1][0][RTW89_IC][1][92] = 22,
- [0][0][1][0][RTW89_IC][2][92] = 127,
[0][0][1][0][RTW89_KCC][1][92] = 32,
[0][0][1][0][RTW89_KCC][0][92] = 127,
[0][0][1][0][RTW89_ACMA][1][92] = 127,
@@ -39371,13 +38783,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][92] = 127,
[0][0][1][0][RTW89_THAILAND][0][92] = 127,
[0][0][1][0][RTW89_FCC][1][94] = 22,
- [0][0][1][0][RTW89_FCC][2][94] = 127,
[0][0][1][0][RTW89_ETSI][1][94] = 127,
[0][0][1][0][RTW89_ETSI][0][94] = 127,
[0][0][1][0][RTW89_MKK][1][94] = 127,
[0][0][1][0][RTW89_MKK][0][94] = 127,
[0][0][1][0][RTW89_IC][1][94] = 22,
- [0][0][1][0][RTW89_IC][2][94] = 127,
[0][0][1][0][RTW89_KCC][1][94] = 32,
[0][0][1][0][RTW89_KCC][0][94] = 127,
[0][0][1][0][RTW89_ACMA][1][94] = 127,
@@ -39390,13 +38800,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][94] = 127,
[0][0][1][0][RTW89_THAILAND][0][94] = 127,
[0][0][1][0][RTW89_FCC][1][96] = 22,
- [0][0][1][0][RTW89_FCC][2][96] = 127,
[0][0][1][0][RTW89_ETSI][1][96] = 127,
[0][0][1][0][RTW89_ETSI][0][96] = 127,
[0][0][1][0][RTW89_MKK][1][96] = 127,
[0][0][1][0][RTW89_MKK][0][96] = 127,
[0][0][1][0][RTW89_IC][1][96] = 22,
- [0][0][1][0][RTW89_IC][2][96] = 127,
[0][0][1][0][RTW89_KCC][1][96] = 32,
[0][0][1][0][RTW89_KCC][0][96] = 127,
[0][0][1][0][RTW89_ACMA][1][96] = 127,
@@ -39409,13 +38817,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][96] = 127,
[0][0][1][0][RTW89_THAILAND][0][96] = 127,
[0][0][1][0][RTW89_FCC][1][98] = 22,
- [0][0][1][0][RTW89_FCC][2][98] = 127,
[0][0][1][0][RTW89_ETSI][1][98] = 127,
[0][0][1][0][RTW89_ETSI][0][98] = 127,
[0][0][1][0][RTW89_MKK][1][98] = 127,
[0][0][1][0][RTW89_MKK][0][98] = 127,
[0][0][1][0][RTW89_IC][1][98] = 22,
- [0][0][1][0][RTW89_IC][2][98] = 127,
[0][0][1][0][RTW89_KCC][1][98] = 32,
[0][0][1][0][RTW89_KCC][0][98] = 127,
[0][0][1][0][RTW89_ACMA][1][98] = 127,
@@ -39428,13 +38834,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][98] = 127,
[0][0][1][0][RTW89_THAILAND][0][98] = 127,
[0][0][1][0][RTW89_FCC][1][100] = 22,
- [0][0][1][0][RTW89_FCC][2][100] = 127,
[0][0][1][0][RTW89_ETSI][1][100] = 127,
[0][0][1][0][RTW89_ETSI][0][100] = 127,
[0][0][1][0][RTW89_MKK][1][100] = 127,
[0][0][1][0][RTW89_MKK][0][100] = 127,
[0][0][1][0][RTW89_IC][1][100] = 22,
- [0][0][1][0][RTW89_IC][2][100] = 127,
[0][0][1][0][RTW89_KCC][1][100] = 32,
[0][0][1][0][RTW89_KCC][0][100] = 127,
[0][0][1][0][RTW89_ACMA][1][100] = 127,
@@ -39447,13 +38851,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][100] = 127,
[0][0][1][0][RTW89_THAILAND][0][100] = 127,
[0][0][1][0][RTW89_FCC][1][102] = 22,
- [0][0][1][0][RTW89_FCC][2][102] = 127,
[0][0][1][0][RTW89_ETSI][1][102] = 127,
[0][0][1][0][RTW89_ETSI][0][102] = 127,
[0][0][1][0][RTW89_MKK][1][102] = 127,
[0][0][1][0][RTW89_MKK][0][102] = 127,
[0][0][1][0][RTW89_IC][1][102] = 22,
- [0][0][1][0][RTW89_IC][2][102] = 127,
[0][0][1][0][RTW89_KCC][1][102] = 32,
[0][0][1][0][RTW89_KCC][0][102] = 127,
[0][0][1][0][RTW89_ACMA][1][102] = 127,
@@ -39466,13 +38868,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][102] = 127,
[0][0][1][0][RTW89_THAILAND][0][102] = 127,
[0][0][1][0][RTW89_FCC][1][104] = 22,
- [0][0][1][0][RTW89_FCC][2][104] = 127,
[0][0][1][0][RTW89_ETSI][1][104] = 127,
[0][0][1][0][RTW89_ETSI][0][104] = 127,
[0][0][1][0][RTW89_MKK][1][104] = 127,
[0][0][1][0][RTW89_MKK][0][104] = 127,
[0][0][1][0][RTW89_IC][1][104] = 22,
- [0][0][1][0][RTW89_IC][2][104] = 127,
[0][0][1][0][RTW89_KCC][1][104] = 32,
[0][0][1][0][RTW89_KCC][0][104] = 127,
[0][0][1][0][RTW89_ACMA][1][104] = 127,
@@ -39485,13 +38885,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][104] = 127,
[0][0][1][0][RTW89_THAILAND][0][104] = 127,
[0][0][1][0][RTW89_FCC][1][105] = 22,
- [0][0][1][0][RTW89_FCC][2][105] = 127,
[0][0][1][0][RTW89_ETSI][1][105] = 127,
[0][0][1][0][RTW89_ETSI][0][105] = 127,
[0][0][1][0][RTW89_MKK][1][105] = 127,
[0][0][1][0][RTW89_MKK][0][105] = 127,
[0][0][1][0][RTW89_IC][1][105] = 22,
- [0][0][1][0][RTW89_IC][2][105] = 127,
[0][0][1][0][RTW89_KCC][1][105] = 32,
[0][0][1][0][RTW89_KCC][0][105] = 127,
[0][0][1][0][RTW89_ACMA][1][105] = 127,
@@ -39504,13 +38902,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][105] = 127,
[0][0][1][0][RTW89_THAILAND][0][105] = 127,
[0][0][1][0][RTW89_FCC][1][107] = 24,
- [0][0][1][0][RTW89_FCC][2][107] = 127,
[0][0][1][0][RTW89_ETSI][1][107] = 127,
[0][0][1][0][RTW89_ETSI][0][107] = 127,
[0][0][1][0][RTW89_MKK][1][107] = 127,
[0][0][1][0][RTW89_MKK][0][107] = 127,
[0][0][1][0][RTW89_IC][1][107] = 24,
- [0][0][1][0][RTW89_IC][2][107] = 127,
[0][0][1][0][RTW89_KCC][1][107] = 32,
[0][0][1][0][RTW89_KCC][0][107] = 127,
[0][0][1][0][RTW89_ACMA][1][107] = 127,
@@ -39523,13 +38919,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][107] = 127,
[0][0][1][0][RTW89_THAILAND][0][107] = 127,
[0][0][1][0][RTW89_FCC][1][109] = 24,
- [0][0][1][0][RTW89_FCC][2][109] = 127,
[0][0][1][0][RTW89_ETSI][1][109] = 127,
[0][0][1][0][RTW89_ETSI][0][109] = 127,
[0][0][1][0][RTW89_MKK][1][109] = 127,
[0][0][1][0][RTW89_MKK][0][109] = 127,
[0][0][1][0][RTW89_IC][1][109] = 24,
- [0][0][1][0][RTW89_IC][2][109] = 127,
[0][0][1][0][RTW89_KCC][1][109] = 32,
[0][0][1][0][RTW89_KCC][0][109] = 127,
[0][0][1][0][RTW89_ACMA][1][109] = 127,
@@ -39542,13 +38936,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][109] = 127,
[0][0][1][0][RTW89_THAILAND][0][109] = 127,
[0][0][1][0][RTW89_FCC][1][111] = 127,
- [0][0][1][0][RTW89_FCC][2][111] = 127,
[0][0][1][0][RTW89_ETSI][1][111] = 127,
[0][0][1][0][RTW89_ETSI][0][111] = 127,
[0][0][1][0][RTW89_MKK][1][111] = 127,
[0][0][1][0][RTW89_MKK][0][111] = 127,
[0][0][1][0][RTW89_IC][1][111] = 127,
- [0][0][1][0][RTW89_IC][2][111] = 127,
[0][0][1][0][RTW89_KCC][1][111] = 127,
[0][0][1][0][RTW89_KCC][0][111] = 127,
[0][0][1][0][RTW89_ACMA][1][111] = 127,
@@ -39561,13 +38953,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][111] = 127,
[0][0][1][0][RTW89_THAILAND][0][111] = 127,
[0][0][1][0][RTW89_FCC][1][113] = 127,
- [0][0][1][0][RTW89_FCC][2][113] = 127,
[0][0][1][0][RTW89_ETSI][1][113] = 127,
[0][0][1][0][RTW89_ETSI][0][113] = 127,
[0][0][1][0][RTW89_MKK][1][113] = 127,
[0][0][1][0][RTW89_MKK][0][113] = 127,
[0][0][1][0][RTW89_IC][1][113] = 127,
- [0][0][1][0][RTW89_IC][2][113] = 127,
[0][0][1][0][RTW89_KCC][1][113] = 127,
[0][0][1][0][RTW89_KCC][0][113] = 127,
[0][0][1][0][RTW89_ACMA][1][113] = 127,
@@ -39580,13 +38970,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][113] = 127,
[0][0][1][0][RTW89_THAILAND][0][113] = 127,
[0][0][1][0][RTW89_FCC][1][115] = 127,
- [0][0][1][0][RTW89_FCC][2][115] = 127,
[0][0][1][0][RTW89_ETSI][1][115] = 127,
[0][0][1][0][RTW89_ETSI][0][115] = 127,
[0][0][1][0][RTW89_MKK][1][115] = 127,
[0][0][1][0][RTW89_MKK][0][115] = 127,
[0][0][1][0][RTW89_IC][1][115] = 127,
- [0][0][1][0][RTW89_IC][2][115] = 127,
[0][0][1][0][RTW89_KCC][1][115] = 127,
[0][0][1][0][RTW89_KCC][0][115] = 127,
[0][0][1][0][RTW89_ACMA][1][115] = 127,
@@ -39599,13 +38987,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][115] = 127,
[0][0][1][0][RTW89_THAILAND][0][115] = 127,
[0][0][1][0][RTW89_FCC][1][117] = 127,
- [0][0][1][0][RTW89_FCC][2][117] = 127,
[0][0][1][0][RTW89_ETSI][1][117] = 127,
[0][0][1][0][RTW89_ETSI][0][117] = 127,
[0][0][1][0][RTW89_MKK][1][117] = 127,
[0][0][1][0][RTW89_MKK][0][117] = 127,
[0][0][1][0][RTW89_IC][1][117] = 127,
- [0][0][1][0][RTW89_IC][2][117] = 127,
[0][0][1][0][RTW89_KCC][1][117] = 127,
[0][0][1][0][RTW89_KCC][0][117] = 127,
[0][0][1][0][RTW89_ACMA][1][117] = 127,
@@ -39618,13 +39004,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][117] = 127,
[0][0][1][0][RTW89_THAILAND][0][117] = 127,
[0][0][1][0][RTW89_FCC][1][119] = 127,
- [0][0][1][0][RTW89_FCC][2][119] = 127,
[0][0][1][0][RTW89_ETSI][1][119] = 127,
[0][0][1][0][RTW89_ETSI][0][119] = 127,
[0][0][1][0][RTW89_MKK][1][119] = 127,
[0][0][1][0][RTW89_MKK][0][119] = 127,
[0][0][1][0][RTW89_IC][1][119] = 127,
- [0][0][1][0][RTW89_IC][2][119] = 127,
[0][0][1][0][RTW89_KCC][1][119] = 127,
[0][0][1][0][RTW89_KCC][0][119] = 127,
[0][0][1][0][RTW89_ACMA][1][119] = 127,
@@ -39637,13 +39021,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][1][0][RTW89_THAILAND][1][119] = 127,
[0][0][1][0][RTW89_THAILAND][0][119] = 127,
[0][1][1][0][RTW89_FCC][1][0] = -2,
- [0][1][1][0][RTW89_FCC][2][0] = 54,
[0][1][1][0][RTW89_ETSI][1][0] = 54,
[0][1][1][0][RTW89_ETSI][0][0] = 18,
[0][1][1][0][RTW89_MKK][1][0] = 56,
[0][1][1][0][RTW89_MKK][0][0] = 16,
[0][1][1][0][RTW89_IC][1][0] = -2,
- [0][1][1][0][RTW89_IC][2][0] = 54,
[0][1][1][0][RTW89_KCC][1][0] = 12,
[0][1][1][0][RTW89_KCC][0][0] = 10,
[0][1][1][0][RTW89_ACMA][1][0] = 54,
@@ -39656,13 +39038,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][0] = 44,
[0][1][1][0][RTW89_THAILAND][0][0] = -2,
[0][1][1][0][RTW89_FCC][1][2] = -4,
- [0][1][1][0][RTW89_FCC][2][2] = 54,
[0][1][1][0][RTW89_ETSI][1][2] = 54,
[0][1][1][0][RTW89_ETSI][0][2] = 18,
[0][1][1][0][RTW89_MKK][1][2] = 54,
[0][1][1][0][RTW89_MKK][0][2] = 16,
[0][1][1][0][RTW89_IC][1][2] = -4,
- [0][1][1][0][RTW89_IC][2][2] = 54,
[0][1][1][0][RTW89_KCC][1][2] = 12,
[0][1][1][0][RTW89_KCC][0][2] = 12,
[0][1][1][0][RTW89_ACMA][1][2] = 54,
@@ -39675,13 +39055,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][2] = 44,
[0][1][1][0][RTW89_THAILAND][0][2] = -4,
[0][1][1][0][RTW89_FCC][1][4] = -4,
- [0][1][1][0][RTW89_FCC][2][4] = 54,
[0][1][1][0][RTW89_ETSI][1][4] = 54,
[0][1][1][0][RTW89_ETSI][0][4] = 18,
[0][1][1][0][RTW89_MKK][1][4] = 54,
[0][1][1][0][RTW89_MKK][0][4] = 16,
[0][1][1][0][RTW89_IC][1][4] = -4,
- [0][1][1][0][RTW89_IC][2][4] = 54,
[0][1][1][0][RTW89_KCC][1][4] = 12,
[0][1][1][0][RTW89_KCC][0][4] = 12,
[0][1][1][0][RTW89_ACMA][1][4] = 54,
@@ -39694,13 +39072,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][4] = 44,
[0][1][1][0][RTW89_THAILAND][0][4] = -4,
[0][1][1][0][RTW89_FCC][1][6] = -4,
- [0][1][1][0][RTW89_FCC][2][6] = 54,
[0][1][1][0][RTW89_ETSI][1][6] = 54,
[0][1][1][0][RTW89_ETSI][0][6] = 18,
[0][1][1][0][RTW89_MKK][1][6] = 54,
[0][1][1][0][RTW89_MKK][0][6] = 16,
[0][1][1][0][RTW89_IC][1][6] = -4,
- [0][1][1][0][RTW89_IC][2][6] = 54,
[0][1][1][0][RTW89_KCC][1][6] = 12,
[0][1][1][0][RTW89_KCC][0][6] = 12,
[0][1][1][0][RTW89_ACMA][1][6] = 54,
@@ -39713,13 +39089,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][6] = 44,
[0][1][1][0][RTW89_THAILAND][0][6] = -4,
[0][1][1][0][RTW89_FCC][1][8] = -4,
- [0][1][1][0][RTW89_FCC][2][8] = 54,
[0][1][1][0][RTW89_ETSI][1][8] = 54,
[0][1][1][0][RTW89_ETSI][0][8] = 18,
[0][1][1][0][RTW89_MKK][1][8] = 54,
[0][1][1][0][RTW89_MKK][0][8] = 16,
[0][1][1][0][RTW89_IC][1][8] = -4,
- [0][1][1][0][RTW89_IC][2][8] = 54,
[0][1][1][0][RTW89_KCC][1][8] = 12,
[0][1][1][0][RTW89_KCC][0][8] = 12,
[0][1][1][0][RTW89_ACMA][1][8] = 54,
@@ -39732,13 +39106,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][8] = 44,
[0][1][1][0][RTW89_THAILAND][0][8] = -4,
[0][1][1][0][RTW89_FCC][1][10] = -4,
- [0][1][1][0][RTW89_FCC][2][10] = 54,
[0][1][1][0][RTW89_ETSI][1][10] = 54,
[0][1][1][0][RTW89_ETSI][0][10] = 18,
[0][1][1][0][RTW89_MKK][1][10] = 54,
[0][1][1][0][RTW89_MKK][0][10] = 16,
[0][1][1][0][RTW89_IC][1][10] = -4,
- [0][1][1][0][RTW89_IC][2][10] = 54,
[0][1][1][0][RTW89_KCC][1][10] = 12,
[0][1][1][0][RTW89_KCC][0][10] = 12,
[0][1][1][0][RTW89_ACMA][1][10] = 54,
@@ -39751,13 +39123,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][10] = 44,
[0][1][1][0][RTW89_THAILAND][0][10] = -4,
[0][1][1][0][RTW89_FCC][1][12] = -4,
- [0][1][1][0][RTW89_FCC][2][12] = 54,
[0][1][1][0][RTW89_ETSI][1][12] = 54,
[0][1][1][0][RTW89_ETSI][0][12] = 18,
[0][1][1][0][RTW89_MKK][1][12] = 54,
[0][1][1][0][RTW89_MKK][0][12] = 16,
[0][1][1][0][RTW89_IC][1][12] = -4,
- [0][1][1][0][RTW89_IC][2][12] = 54,
[0][1][1][0][RTW89_KCC][1][12] = 12,
[0][1][1][0][RTW89_KCC][0][12] = 12,
[0][1][1][0][RTW89_ACMA][1][12] = 54,
@@ -39770,13 +39140,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][12] = 44,
[0][1][1][0][RTW89_THAILAND][0][12] = -4,
[0][1][1][0][RTW89_FCC][1][14] = -4,
- [0][1][1][0][RTW89_FCC][2][14] = 54,
[0][1][1][0][RTW89_ETSI][1][14] = 54,
[0][1][1][0][RTW89_ETSI][0][14] = 18,
[0][1][1][0][RTW89_MKK][1][14] = 54,
[0][1][1][0][RTW89_MKK][0][14] = 16,
[0][1][1][0][RTW89_IC][1][14] = -4,
- [0][1][1][0][RTW89_IC][2][14] = 54,
[0][1][1][0][RTW89_KCC][1][14] = 12,
[0][1][1][0][RTW89_KCC][0][14] = 12,
[0][1][1][0][RTW89_ACMA][1][14] = 54,
@@ -39789,13 +39157,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][14] = 44,
[0][1][1][0][RTW89_THAILAND][0][14] = -4,
[0][1][1][0][RTW89_FCC][1][15] = -4,
- [0][1][1][0][RTW89_FCC][2][15] = 54,
[0][1][1][0][RTW89_ETSI][1][15] = 54,
[0][1][1][0][RTW89_ETSI][0][15] = 18,
[0][1][1][0][RTW89_MKK][1][15] = 54,
[0][1][1][0][RTW89_MKK][0][15] = 16,
[0][1][1][0][RTW89_IC][1][15] = -4,
- [0][1][1][0][RTW89_IC][2][15] = 54,
[0][1][1][0][RTW89_KCC][1][15] = 12,
[0][1][1][0][RTW89_KCC][0][15] = 12,
[0][1][1][0][RTW89_ACMA][1][15] = 54,
@@ -39808,13 +39174,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][15] = 44,
[0][1][1][0][RTW89_THAILAND][0][15] = -4,
[0][1][1][0][RTW89_FCC][1][17] = -4,
- [0][1][1][0][RTW89_FCC][2][17] = 54,
[0][1][1][0][RTW89_ETSI][1][17] = 54,
[0][1][1][0][RTW89_ETSI][0][17] = 18,
[0][1][1][0][RTW89_MKK][1][17] = 54,
[0][1][1][0][RTW89_MKK][0][17] = 16,
[0][1][1][0][RTW89_IC][1][17] = -4,
- [0][1][1][0][RTW89_IC][2][17] = 54,
[0][1][1][0][RTW89_KCC][1][17] = 12,
[0][1][1][0][RTW89_KCC][0][17] = 12,
[0][1][1][0][RTW89_ACMA][1][17] = 54,
@@ -39827,13 +39191,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][17] = 44,
[0][1][1][0][RTW89_THAILAND][0][17] = -4,
[0][1][1][0][RTW89_FCC][1][19] = -4,
- [0][1][1][0][RTW89_FCC][2][19] = 54,
[0][1][1][0][RTW89_ETSI][1][19] = 54,
[0][1][1][0][RTW89_ETSI][0][19] = 18,
[0][1][1][0][RTW89_MKK][1][19] = 54,
[0][1][1][0][RTW89_MKK][0][19] = 16,
[0][1][1][0][RTW89_IC][1][19] = -4,
- [0][1][1][0][RTW89_IC][2][19] = 54,
[0][1][1][0][RTW89_KCC][1][19] = 12,
[0][1][1][0][RTW89_KCC][0][19] = 12,
[0][1][1][0][RTW89_ACMA][1][19] = 54,
@@ -39846,13 +39208,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][19] = 44,
[0][1][1][0][RTW89_THAILAND][0][19] = -4,
[0][1][1][0][RTW89_FCC][1][21] = -4,
- [0][1][1][0][RTW89_FCC][2][21] = 54,
[0][1][1][0][RTW89_ETSI][1][21] = 54,
[0][1][1][0][RTW89_ETSI][0][21] = 18,
[0][1][1][0][RTW89_MKK][1][21] = 54,
[0][1][1][0][RTW89_MKK][0][21] = 16,
[0][1][1][0][RTW89_IC][1][21] = -4,
- [0][1][1][0][RTW89_IC][2][21] = 54,
[0][1][1][0][RTW89_KCC][1][21] = 12,
[0][1][1][0][RTW89_KCC][0][21] = 12,
[0][1][1][0][RTW89_ACMA][1][21] = 54,
@@ -39865,13 +39225,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][21] = 44,
[0][1][1][0][RTW89_THAILAND][0][21] = -4,
[0][1][1][0][RTW89_FCC][1][23] = -4,
- [0][1][1][0][RTW89_FCC][2][23] = 68,
[0][1][1][0][RTW89_ETSI][1][23] = 54,
[0][1][1][0][RTW89_ETSI][0][23] = 18,
[0][1][1][0][RTW89_MKK][1][23] = 54,
[0][1][1][0][RTW89_MKK][0][23] = 16,
[0][1][1][0][RTW89_IC][1][23] = -4,
- [0][1][1][0][RTW89_IC][2][23] = 68,
[0][1][1][0][RTW89_KCC][1][23] = 12,
[0][1][1][0][RTW89_KCC][0][23] = 10,
[0][1][1][0][RTW89_ACMA][1][23] = 54,
@@ -39884,13 +39242,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][23] = 44,
[0][1][1][0][RTW89_THAILAND][0][23] = -4,
[0][1][1][0][RTW89_FCC][1][25] = -4,
- [0][1][1][0][RTW89_FCC][2][25] = 68,
[0][1][1][0][RTW89_ETSI][1][25] = 54,
[0][1][1][0][RTW89_ETSI][0][25] = 18,
[0][1][1][0][RTW89_MKK][1][25] = 54,
[0][1][1][0][RTW89_MKK][0][25] = 16,
[0][1][1][0][RTW89_IC][1][25] = -4,
- [0][1][1][0][RTW89_IC][2][25] = 68,
[0][1][1][0][RTW89_KCC][1][25] = 12,
[0][1][1][0][RTW89_KCC][0][25] = 14,
[0][1][1][0][RTW89_ACMA][1][25] = 54,
@@ -39903,13 +39259,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][25] = 42,
[0][1][1][0][RTW89_THAILAND][0][25] = -4,
[0][1][1][0][RTW89_FCC][1][27] = -4,
- [0][1][1][0][RTW89_FCC][2][27] = 68,
[0][1][1][0][RTW89_ETSI][1][27] = 54,
[0][1][1][0][RTW89_ETSI][0][27] = 18,
[0][1][1][0][RTW89_MKK][1][27] = 54,
[0][1][1][0][RTW89_MKK][0][27] = 16,
[0][1][1][0][RTW89_IC][1][27] = -4,
- [0][1][1][0][RTW89_IC][2][27] = 68,
[0][1][1][0][RTW89_KCC][1][27] = 12,
[0][1][1][0][RTW89_KCC][0][27] = 14,
[0][1][1][0][RTW89_ACMA][1][27] = 54,
@@ -39922,13 +39276,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][27] = 42,
[0][1][1][0][RTW89_THAILAND][0][27] = -4,
[0][1][1][0][RTW89_FCC][1][29] = -4,
- [0][1][1][0][RTW89_FCC][2][29] = 68,
[0][1][1][0][RTW89_ETSI][1][29] = 54,
[0][1][1][0][RTW89_ETSI][0][29] = 18,
[0][1][1][0][RTW89_MKK][1][29] = 54,
[0][1][1][0][RTW89_MKK][0][29] = 16,
[0][1][1][0][RTW89_IC][1][29] = -4,
- [0][1][1][0][RTW89_IC][2][29] = 68,
[0][1][1][0][RTW89_KCC][1][29] = 12,
[0][1][1][0][RTW89_KCC][0][29] = 14,
[0][1][1][0][RTW89_ACMA][1][29] = 54,
@@ -39941,13 +39293,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][29] = 42,
[0][1][1][0][RTW89_THAILAND][0][29] = -4,
[0][1][1][0][RTW89_FCC][1][30] = -4,
- [0][1][1][0][RTW89_FCC][2][30] = 68,
[0][1][1][0][RTW89_ETSI][1][30] = 54,
[0][1][1][0][RTW89_ETSI][0][30] = 18,
[0][1][1][0][RTW89_MKK][1][30] = 54,
[0][1][1][0][RTW89_MKK][0][30] = 16,
[0][1][1][0][RTW89_IC][1][30] = -4,
- [0][1][1][0][RTW89_IC][2][30] = 68,
[0][1][1][0][RTW89_KCC][1][30] = 12,
[0][1][1][0][RTW89_KCC][0][30] = 14,
[0][1][1][0][RTW89_ACMA][1][30] = 54,
@@ -39960,13 +39310,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][30] = 42,
[0][1][1][0][RTW89_THAILAND][0][30] = -4,
[0][1][1][0][RTW89_FCC][1][32] = -4,
- [0][1][1][0][RTW89_FCC][2][32] = 68,
[0][1][1][0][RTW89_ETSI][1][32] = 54,
[0][1][1][0][RTW89_ETSI][0][32] = 18,
[0][1][1][0][RTW89_MKK][1][32] = 54,
[0][1][1][0][RTW89_MKK][0][32] = 16,
[0][1][1][0][RTW89_IC][1][32] = -4,
- [0][1][1][0][RTW89_IC][2][32] = 68,
[0][1][1][0][RTW89_KCC][1][32] = 12,
[0][1][1][0][RTW89_KCC][0][32] = 14,
[0][1][1][0][RTW89_ACMA][1][32] = 54,
@@ -39979,13 +39327,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][32] = 42,
[0][1][1][0][RTW89_THAILAND][0][32] = -4,
[0][1][1][0][RTW89_FCC][1][34] = -4,
- [0][1][1][0][RTW89_FCC][2][34] = 68,
[0][1][1][0][RTW89_ETSI][1][34] = 54,
[0][1][1][0][RTW89_ETSI][0][34] = 18,
[0][1][1][0][RTW89_MKK][1][34] = 54,
[0][1][1][0][RTW89_MKK][0][34] = 16,
[0][1][1][0][RTW89_IC][1][34] = -4,
- [0][1][1][0][RTW89_IC][2][34] = 68,
[0][1][1][0][RTW89_KCC][1][34] = 12,
[0][1][1][0][RTW89_KCC][0][34] = 14,
[0][1][1][0][RTW89_ACMA][1][34] = 54,
@@ -39998,13 +39344,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][34] = 42,
[0][1][1][0][RTW89_THAILAND][0][34] = -4,
[0][1][1][0][RTW89_FCC][1][36] = -4,
- [0][1][1][0][RTW89_FCC][2][36] = 68,
[0][1][1][0][RTW89_ETSI][1][36] = 54,
[0][1][1][0][RTW89_ETSI][0][36] = 18,
[0][1][1][0][RTW89_MKK][1][36] = 54,
[0][1][1][0][RTW89_MKK][0][36] = 16,
[0][1][1][0][RTW89_IC][1][36] = -4,
- [0][1][1][0][RTW89_IC][2][36] = 68,
[0][1][1][0][RTW89_KCC][1][36] = 12,
[0][1][1][0][RTW89_KCC][0][36] = 14,
[0][1][1][0][RTW89_ACMA][1][36] = 54,
@@ -40017,13 +39361,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][36] = 42,
[0][1][1][0][RTW89_THAILAND][0][36] = -4,
[0][1][1][0][RTW89_FCC][1][38] = -4,
- [0][1][1][0][RTW89_FCC][2][38] = 68,
[0][1][1][0][RTW89_ETSI][1][38] = 54,
[0][1][1][0][RTW89_ETSI][0][38] = 18,
[0][1][1][0][RTW89_MKK][1][38] = 54,
[0][1][1][0][RTW89_MKK][0][38] = 16,
[0][1][1][0][RTW89_IC][1][38] = -4,
- [0][1][1][0][RTW89_IC][2][38] = 68,
[0][1][1][0][RTW89_KCC][1][38] = 12,
[0][1][1][0][RTW89_KCC][0][38] = 14,
[0][1][1][0][RTW89_ACMA][1][38] = 54,
@@ -40036,13 +39378,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][38] = 42,
[0][1][1][0][RTW89_THAILAND][0][38] = -4,
[0][1][1][0][RTW89_FCC][1][40] = -4,
- [0][1][1][0][RTW89_FCC][2][40] = 68,
[0][1][1][0][RTW89_ETSI][1][40] = 54,
[0][1][1][0][RTW89_ETSI][0][40] = 18,
[0][1][1][0][RTW89_MKK][1][40] = 54,
[0][1][1][0][RTW89_MKK][0][40] = 16,
[0][1][1][0][RTW89_IC][1][40] = -4,
- [0][1][1][0][RTW89_IC][2][40] = 68,
[0][1][1][0][RTW89_KCC][1][40] = 12,
[0][1][1][0][RTW89_KCC][0][40] = 14,
[0][1][1][0][RTW89_ACMA][1][40] = 54,
@@ -40055,13 +39395,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][40] = 42,
[0][1][1][0][RTW89_THAILAND][0][40] = -4,
[0][1][1][0][RTW89_FCC][1][42] = -4,
- [0][1][1][0][RTW89_FCC][2][42] = 68,
[0][1][1][0][RTW89_ETSI][1][42] = 54,
[0][1][1][0][RTW89_ETSI][0][42] = 18,
[0][1][1][0][RTW89_MKK][1][42] = 54,
[0][1][1][0][RTW89_MKK][0][42] = 16,
[0][1][1][0][RTW89_IC][1][42] = -4,
- [0][1][1][0][RTW89_IC][2][42] = 68,
[0][1][1][0][RTW89_KCC][1][42] = 12,
[0][1][1][0][RTW89_KCC][0][42] = 14,
[0][1][1][0][RTW89_ACMA][1][42] = 54,
@@ -40074,13 +39412,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][42] = 42,
[0][1][1][0][RTW89_THAILAND][0][42] = -4,
[0][1][1][0][RTW89_FCC][1][44] = -2,
- [0][1][1][0][RTW89_FCC][2][44] = 68,
[0][1][1][0][RTW89_ETSI][1][44] = 54,
[0][1][1][0][RTW89_ETSI][0][44] = 18,
[0][1][1][0][RTW89_MKK][1][44] = 34,
[0][1][1][0][RTW89_MKK][0][44] = 16,
[0][1][1][0][RTW89_IC][1][44] = -2,
- [0][1][1][0][RTW89_IC][2][44] = 68,
[0][1][1][0][RTW89_KCC][1][44] = 12,
[0][1][1][0][RTW89_KCC][0][44] = 12,
[0][1][1][0][RTW89_ACMA][1][44] = 54,
@@ -40093,13 +39429,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][44] = 42,
[0][1][1][0][RTW89_THAILAND][0][44] = -2,
[0][1][1][0][RTW89_FCC][1][45] = -2,
- [0][1][1][0][RTW89_FCC][2][45] = 127,
[0][1][1][0][RTW89_ETSI][1][45] = 127,
[0][1][1][0][RTW89_ETSI][0][45] = 127,
[0][1][1][0][RTW89_MKK][1][45] = 127,
[0][1][1][0][RTW89_MKK][0][45] = 127,
[0][1][1][0][RTW89_IC][1][45] = -2,
- [0][1][1][0][RTW89_IC][2][45] = 70,
[0][1][1][0][RTW89_KCC][1][45] = 12,
[0][1][1][0][RTW89_KCC][0][45] = 127,
[0][1][1][0][RTW89_ACMA][1][45] = 127,
@@ -40112,13 +39446,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][45] = 127,
[0][1][1][0][RTW89_THAILAND][0][45] = 127,
[0][1][1][0][RTW89_FCC][1][47] = -2,
- [0][1][1][0][RTW89_FCC][2][47] = 127,
[0][1][1][0][RTW89_ETSI][1][47] = 127,
[0][1][1][0][RTW89_ETSI][0][47] = 127,
[0][1][1][0][RTW89_MKK][1][47] = 127,
[0][1][1][0][RTW89_MKK][0][47] = 127,
[0][1][1][0][RTW89_IC][1][47] = -2,
- [0][1][1][0][RTW89_IC][2][47] = 68,
[0][1][1][0][RTW89_KCC][1][47] = 12,
[0][1][1][0][RTW89_KCC][0][47] = 127,
[0][1][1][0][RTW89_ACMA][1][47] = 127,
@@ -40131,13 +39463,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][47] = 127,
[0][1][1][0][RTW89_THAILAND][0][47] = 127,
[0][1][1][0][RTW89_FCC][1][49] = -2,
- [0][1][1][0][RTW89_FCC][2][49] = 127,
[0][1][1][0][RTW89_ETSI][1][49] = 127,
[0][1][1][0][RTW89_ETSI][0][49] = 127,
[0][1][1][0][RTW89_MKK][1][49] = 127,
[0][1][1][0][RTW89_MKK][0][49] = 127,
[0][1][1][0][RTW89_IC][1][49] = -2,
- [0][1][1][0][RTW89_IC][2][49] = 68,
[0][1][1][0][RTW89_KCC][1][49] = 12,
[0][1][1][0][RTW89_KCC][0][49] = 127,
[0][1][1][0][RTW89_ACMA][1][49] = 127,
@@ -40150,13 +39480,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][49] = 127,
[0][1][1][0][RTW89_THAILAND][0][49] = 127,
[0][1][1][0][RTW89_FCC][1][51] = -2,
- [0][1][1][0][RTW89_FCC][2][51] = 127,
[0][1][1][0][RTW89_ETSI][1][51] = 127,
[0][1][1][0][RTW89_ETSI][0][51] = 127,
[0][1][1][0][RTW89_MKK][1][51] = 127,
[0][1][1][0][RTW89_MKK][0][51] = 127,
[0][1][1][0][RTW89_IC][1][51] = -2,
- [0][1][1][0][RTW89_IC][2][51] = 68,
[0][1][1][0][RTW89_KCC][1][51] = 12,
[0][1][1][0][RTW89_KCC][0][51] = 127,
[0][1][1][0][RTW89_ACMA][1][51] = 127,
@@ -40169,13 +39497,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][51] = 127,
[0][1][1][0][RTW89_THAILAND][0][51] = 127,
[0][1][1][0][RTW89_FCC][1][53] = -2,
- [0][1][1][0][RTW89_FCC][2][53] = 127,
[0][1][1][0][RTW89_ETSI][1][53] = 127,
[0][1][1][0][RTW89_ETSI][0][53] = 127,
[0][1][1][0][RTW89_MKK][1][53] = 127,
[0][1][1][0][RTW89_MKK][0][53] = 127,
[0][1][1][0][RTW89_IC][1][53] = -2,
- [0][1][1][0][RTW89_IC][2][53] = 68,
[0][1][1][0][RTW89_KCC][1][53] = 12,
[0][1][1][0][RTW89_KCC][0][53] = 127,
[0][1][1][0][RTW89_ACMA][1][53] = 127,
@@ -40188,13 +39514,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][53] = 127,
[0][1][1][0][RTW89_THAILAND][0][53] = 127,
[0][1][1][0][RTW89_FCC][1][55] = -2,
- [0][1][1][0][RTW89_FCC][2][55] = 68,
[0][1][1][0][RTW89_ETSI][1][55] = 127,
[0][1][1][0][RTW89_ETSI][0][55] = 127,
[0][1][1][0][RTW89_MKK][1][55] = 127,
[0][1][1][0][RTW89_MKK][0][55] = 127,
[0][1][1][0][RTW89_IC][1][55] = -2,
- [0][1][1][0][RTW89_IC][2][55] = 68,
[0][1][1][0][RTW89_KCC][1][55] = 12,
[0][1][1][0][RTW89_KCC][0][55] = 127,
[0][1][1][0][RTW89_ACMA][1][55] = 127,
@@ -40207,13 +39531,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][55] = 127,
[0][1][1][0][RTW89_THAILAND][0][55] = 127,
[0][1][1][0][RTW89_FCC][1][57] = -2,
- [0][1][1][0][RTW89_FCC][2][57] = 68,
[0][1][1][0][RTW89_ETSI][1][57] = 127,
[0][1][1][0][RTW89_ETSI][0][57] = 127,
[0][1][1][0][RTW89_MKK][1][57] = 127,
[0][1][1][0][RTW89_MKK][0][57] = 127,
[0][1][1][0][RTW89_IC][1][57] = -2,
- [0][1][1][0][RTW89_IC][2][57] = 68,
[0][1][1][0][RTW89_KCC][1][57] = 12,
[0][1][1][0][RTW89_KCC][0][57] = 127,
[0][1][1][0][RTW89_ACMA][1][57] = 127,
@@ -40226,13 +39548,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][57] = 127,
[0][1][1][0][RTW89_THAILAND][0][57] = 127,
[0][1][1][0][RTW89_FCC][1][59] = -2,
- [0][1][1][0][RTW89_FCC][2][59] = 68,
[0][1][1][0][RTW89_ETSI][1][59] = 127,
[0][1][1][0][RTW89_ETSI][0][59] = 127,
[0][1][1][0][RTW89_MKK][1][59] = 127,
[0][1][1][0][RTW89_MKK][0][59] = 127,
[0][1][1][0][RTW89_IC][1][59] = -2,
- [0][1][1][0][RTW89_IC][2][59] = 68,
[0][1][1][0][RTW89_KCC][1][59] = 12,
[0][1][1][0][RTW89_KCC][0][59] = 127,
[0][1][1][0][RTW89_ACMA][1][59] = 127,
@@ -40245,13 +39565,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][59] = 127,
[0][1][1][0][RTW89_THAILAND][0][59] = 127,
[0][1][1][0][RTW89_FCC][1][60] = -2,
- [0][1][1][0][RTW89_FCC][2][60] = 68,
[0][1][1][0][RTW89_ETSI][1][60] = 127,
[0][1][1][0][RTW89_ETSI][0][60] = 127,
[0][1][1][0][RTW89_MKK][1][60] = 127,
[0][1][1][0][RTW89_MKK][0][60] = 127,
[0][1][1][0][RTW89_IC][1][60] = -2,
- [0][1][1][0][RTW89_IC][2][60] = 68,
[0][1][1][0][RTW89_KCC][1][60] = 12,
[0][1][1][0][RTW89_KCC][0][60] = 127,
[0][1][1][0][RTW89_ACMA][1][60] = 127,
@@ -40264,13 +39582,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][60] = 127,
[0][1][1][0][RTW89_THAILAND][0][60] = 127,
[0][1][1][0][RTW89_FCC][1][62] = -2,
- [0][1][1][0][RTW89_FCC][2][62] = 68,
[0][1][1][0][RTW89_ETSI][1][62] = 127,
[0][1][1][0][RTW89_ETSI][0][62] = 127,
[0][1][1][0][RTW89_MKK][1][62] = 127,
[0][1][1][0][RTW89_MKK][0][62] = 127,
[0][1][1][0][RTW89_IC][1][62] = -2,
- [0][1][1][0][RTW89_IC][2][62] = 68,
[0][1][1][0][RTW89_KCC][1][62] = 12,
[0][1][1][0][RTW89_KCC][0][62] = 127,
[0][1][1][0][RTW89_ACMA][1][62] = 127,
@@ -40283,13 +39599,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][62] = 127,
[0][1][1][0][RTW89_THAILAND][0][62] = 127,
[0][1][1][0][RTW89_FCC][1][64] = -2,
- [0][1][1][0][RTW89_FCC][2][64] = 68,
[0][1][1][0][RTW89_ETSI][1][64] = 127,
[0][1][1][0][RTW89_ETSI][0][64] = 127,
[0][1][1][0][RTW89_MKK][1][64] = 127,
[0][1][1][0][RTW89_MKK][0][64] = 127,
[0][1][1][0][RTW89_IC][1][64] = -2,
- [0][1][1][0][RTW89_IC][2][64] = 68,
[0][1][1][0][RTW89_KCC][1][64] = 12,
[0][1][1][0][RTW89_KCC][0][64] = 127,
[0][1][1][0][RTW89_ACMA][1][64] = 127,
@@ -40302,13 +39616,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][64] = 127,
[0][1][1][0][RTW89_THAILAND][0][64] = 127,
[0][1][1][0][RTW89_FCC][1][66] = -2,
- [0][1][1][0][RTW89_FCC][2][66] = 68,
[0][1][1][0][RTW89_ETSI][1][66] = 127,
[0][1][1][0][RTW89_ETSI][0][66] = 127,
[0][1][1][0][RTW89_MKK][1][66] = 127,
[0][1][1][0][RTW89_MKK][0][66] = 127,
[0][1][1][0][RTW89_IC][1][66] = -2,
- [0][1][1][0][RTW89_IC][2][66] = 68,
[0][1][1][0][RTW89_KCC][1][66] = 12,
[0][1][1][0][RTW89_KCC][0][66] = 127,
[0][1][1][0][RTW89_ACMA][1][66] = 127,
@@ -40321,13 +39633,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][66] = 127,
[0][1][1][0][RTW89_THAILAND][0][66] = 127,
[0][1][1][0][RTW89_FCC][1][68] = -2,
- [0][1][1][0][RTW89_FCC][2][68] = 68,
[0][1][1][0][RTW89_ETSI][1][68] = 127,
[0][1][1][0][RTW89_ETSI][0][68] = 127,
[0][1][1][0][RTW89_MKK][1][68] = 127,
[0][1][1][0][RTW89_MKK][0][68] = 127,
[0][1][1][0][RTW89_IC][1][68] = -2,
- [0][1][1][0][RTW89_IC][2][68] = 68,
[0][1][1][0][RTW89_KCC][1][68] = 12,
[0][1][1][0][RTW89_KCC][0][68] = 127,
[0][1][1][0][RTW89_ACMA][1][68] = 127,
@@ -40340,13 +39650,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][68] = 127,
[0][1][1][0][RTW89_THAILAND][0][68] = 127,
[0][1][1][0][RTW89_FCC][1][70] = -2,
- [0][1][1][0][RTW89_FCC][2][70] = 68,
[0][1][1][0][RTW89_ETSI][1][70] = 127,
[0][1][1][0][RTW89_ETSI][0][70] = 127,
[0][1][1][0][RTW89_MKK][1][70] = 127,
[0][1][1][0][RTW89_MKK][0][70] = 127,
[0][1][1][0][RTW89_IC][1][70] = -2,
- [0][1][1][0][RTW89_IC][2][70] = 68,
[0][1][1][0][RTW89_KCC][1][70] = 12,
[0][1][1][0][RTW89_KCC][0][70] = 127,
[0][1][1][0][RTW89_ACMA][1][70] = 127,
@@ -40359,13 +39667,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][70] = 127,
[0][1][1][0][RTW89_THAILAND][0][70] = 127,
[0][1][1][0][RTW89_FCC][1][72] = -2,
- [0][1][1][0][RTW89_FCC][2][72] = 68,
[0][1][1][0][RTW89_ETSI][1][72] = 127,
[0][1][1][0][RTW89_ETSI][0][72] = 127,
[0][1][1][0][RTW89_MKK][1][72] = 127,
[0][1][1][0][RTW89_MKK][0][72] = 127,
[0][1][1][0][RTW89_IC][1][72] = -2,
- [0][1][1][0][RTW89_IC][2][72] = 68,
[0][1][1][0][RTW89_KCC][1][72] = 12,
[0][1][1][0][RTW89_KCC][0][72] = 127,
[0][1][1][0][RTW89_ACMA][1][72] = 127,
@@ -40378,13 +39684,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][72] = 127,
[0][1][1][0][RTW89_THAILAND][0][72] = 127,
[0][1][1][0][RTW89_FCC][1][74] = -2,
- [0][1][1][0][RTW89_FCC][2][74] = 68,
[0][1][1][0][RTW89_ETSI][1][74] = 127,
[0][1][1][0][RTW89_ETSI][0][74] = 127,
[0][1][1][0][RTW89_MKK][1][74] = 127,
[0][1][1][0][RTW89_MKK][0][74] = 127,
[0][1][1][0][RTW89_IC][1][74] = -2,
- [0][1][1][0][RTW89_IC][2][74] = 68,
[0][1][1][0][RTW89_KCC][1][74] = 12,
[0][1][1][0][RTW89_KCC][0][74] = 127,
[0][1][1][0][RTW89_ACMA][1][74] = 127,
@@ -40397,13 +39701,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][74] = 127,
[0][1][1][0][RTW89_THAILAND][0][74] = 127,
[0][1][1][0][RTW89_FCC][1][75] = -2,
- [0][1][1][0][RTW89_FCC][2][75] = 68,
[0][1][1][0][RTW89_ETSI][1][75] = 127,
[0][1][1][0][RTW89_ETSI][0][75] = 127,
[0][1][1][0][RTW89_MKK][1][75] = 127,
[0][1][1][0][RTW89_MKK][0][75] = 127,
[0][1][1][0][RTW89_IC][1][75] = -2,
- [0][1][1][0][RTW89_IC][2][75] = 68,
[0][1][1][0][RTW89_KCC][1][75] = 12,
[0][1][1][0][RTW89_KCC][0][75] = 127,
[0][1][1][0][RTW89_ACMA][1][75] = 127,
@@ -40416,13 +39718,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][75] = 127,
[0][1][1][0][RTW89_THAILAND][0][75] = 127,
[0][1][1][0][RTW89_FCC][1][77] = -2,
- [0][1][1][0][RTW89_FCC][2][77] = 68,
[0][1][1][0][RTW89_ETSI][1][77] = 127,
[0][1][1][0][RTW89_ETSI][0][77] = 127,
[0][1][1][0][RTW89_MKK][1][77] = 127,
[0][1][1][0][RTW89_MKK][0][77] = 127,
[0][1][1][0][RTW89_IC][1][77] = -2,
- [0][1][1][0][RTW89_IC][2][77] = 68,
[0][1][1][0][RTW89_KCC][1][77] = 12,
[0][1][1][0][RTW89_KCC][0][77] = 127,
[0][1][1][0][RTW89_ACMA][1][77] = 127,
@@ -40435,13 +39735,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][77] = 127,
[0][1][1][0][RTW89_THAILAND][0][77] = 127,
[0][1][1][0][RTW89_FCC][1][79] = -2,
- [0][1][1][0][RTW89_FCC][2][79] = 68,
[0][1][1][0][RTW89_ETSI][1][79] = 127,
[0][1][1][0][RTW89_ETSI][0][79] = 127,
[0][1][1][0][RTW89_MKK][1][79] = 127,
[0][1][1][0][RTW89_MKK][0][79] = 127,
[0][1][1][0][RTW89_IC][1][79] = -2,
- [0][1][1][0][RTW89_IC][2][79] = 68,
[0][1][1][0][RTW89_KCC][1][79] = 12,
[0][1][1][0][RTW89_KCC][0][79] = 127,
[0][1][1][0][RTW89_ACMA][1][79] = 127,
@@ -40454,13 +39752,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][79] = 127,
[0][1][1][0][RTW89_THAILAND][0][79] = 127,
[0][1][1][0][RTW89_FCC][1][81] = -2,
- [0][1][1][0][RTW89_FCC][2][81] = 68,
[0][1][1][0][RTW89_ETSI][1][81] = 127,
[0][1][1][0][RTW89_ETSI][0][81] = 127,
[0][1][1][0][RTW89_MKK][1][81] = 127,
[0][1][1][0][RTW89_MKK][0][81] = 127,
[0][1][1][0][RTW89_IC][1][81] = -2,
- [0][1][1][0][RTW89_IC][2][81] = 68,
[0][1][1][0][RTW89_KCC][1][81] = 12,
[0][1][1][0][RTW89_KCC][0][81] = 127,
[0][1][1][0][RTW89_ACMA][1][81] = 127,
@@ -40473,13 +39769,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][81] = 127,
[0][1][1][0][RTW89_THAILAND][0][81] = 127,
[0][1][1][0][RTW89_FCC][1][83] = -2,
- [0][1][1][0][RTW89_FCC][2][83] = 68,
[0][1][1][0][RTW89_ETSI][1][83] = 127,
[0][1][1][0][RTW89_ETSI][0][83] = 127,
[0][1][1][0][RTW89_MKK][1][83] = 127,
[0][1][1][0][RTW89_MKK][0][83] = 127,
[0][1][1][0][RTW89_IC][1][83] = -2,
- [0][1][1][0][RTW89_IC][2][83] = 68,
[0][1][1][0][RTW89_KCC][1][83] = 20,
[0][1][1][0][RTW89_KCC][0][83] = 127,
[0][1][1][0][RTW89_ACMA][1][83] = 127,
@@ -40492,13 +39786,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][83] = 127,
[0][1][1][0][RTW89_THAILAND][0][83] = 127,
[0][1][1][0][RTW89_FCC][1][85] = -2,
- [0][1][1][0][RTW89_FCC][2][85] = 68,
[0][1][1][0][RTW89_ETSI][1][85] = 127,
[0][1][1][0][RTW89_ETSI][0][85] = 127,
[0][1][1][0][RTW89_MKK][1][85] = 127,
[0][1][1][0][RTW89_MKK][0][85] = 127,
[0][1][1][0][RTW89_IC][1][85] = -2,
- [0][1][1][0][RTW89_IC][2][85] = 68,
[0][1][1][0][RTW89_KCC][1][85] = 20,
[0][1][1][0][RTW89_KCC][0][85] = 127,
[0][1][1][0][RTW89_ACMA][1][85] = 127,
@@ -40511,13 +39803,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][85] = 127,
[0][1][1][0][RTW89_THAILAND][0][85] = 127,
[0][1][1][0][RTW89_FCC][1][87] = -2,
- [0][1][1][0][RTW89_FCC][2][87] = 127,
[0][1][1][0][RTW89_ETSI][1][87] = 127,
[0][1][1][0][RTW89_ETSI][0][87] = 127,
[0][1][1][0][RTW89_MKK][1][87] = 127,
[0][1][1][0][RTW89_MKK][0][87] = 127,
[0][1][1][0][RTW89_IC][1][87] = -2,
- [0][1][1][0][RTW89_IC][2][87] = 127,
[0][1][1][0][RTW89_KCC][1][87] = 20,
[0][1][1][0][RTW89_KCC][0][87] = 127,
[0][1][1][0][RTW89_ACMA][1][87] = 127,
@@ -40530,13 +39820,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][87] = 127,
[0][1][1][0][RTW89_THAILAND][0][87] = 127,
[0][1][1][0][RTW89_FCC][1][89] = -2,
- [0][1][1][0][RTW89_FCC][2][89] = 127,
[0][1][1][0][RTW89_ETSI][1][89] = 127,
[0][1][1][0][RTW89_ETSI][0][89] = 127,
[0][1][1][0][RTW89_MKK][1][89] = 127,
[0][1][1][0][RTW89_MKK][0][89] = 127,
[0][1][1][0][RTW89_IC][1][89] = -2,
- [0][1][1][0][RTW89_IC][2][89] = 127,
[0][1][1][0][RTW89_KCC][1][89] = 20,
[0][1][1][0][RTW89_KCC][0][89] = 127,
[0][1][1][0][RTW89_ACMA][1][89] = 127,
@@ -40549,13 +39837,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][89] = 127,
[0][1][1][0][RTW89_THAILAND][0][89] = 127,
[0][1][1][0][RTW89_FCC][1][90] = -2,
- [0][1][1][0][RTW89_FCC][2][90] = 127,
[0][1][1][0][RTW89_ETSI][1][90] = 127,
[0][1][1][0][RTW89_ETSI][0][90] = 127,
[0][1][1][0][RTW89_MKK][1][90] = 127,
[0][1][1][0][RTW89_MKK][0][90] = 127,
[0][1][1][0][RTW89_IC][1][90] = -2,
- [0][1][1][0][RTW89_IC][2][90] = 127,
[0][1][1][0][RTW89_KCC][1][90] = 20,
[0][1][1][0][RTW89_KCC][0][90] = 127,
[0][1][1][0][RTW89_ACMA][1][90] = 127,
@@ -40568,13 +39854,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][90] = 127,
[0][1][1][0][RTW89_THAILAND][0][90] = 127,
[0][1][1][0][RTW89_FCC][1][92] = -2,
- [0][1][1][0][RTW89_FCC][2][92] = 127,
[0][1][1][0][RTW89_ETSI][1][92] = 127,
[0][1][1][0][RTW89_ETSI][0][92] = 127,
[0][1][1][0][RTW89_MKK][1][92] = 127,
[0][1][1][0][RTW89_MKK][0][92] = 127,
[0][1][1][0][RTW89_IC][1][92] = -2,
- [0][1][1][0][RTW89_IC][2][92] = 127,
[0][1][1][0][RTW89_KCC][1][92] = 20,
[0][1][1][0][RTW89_KCC][0][92] = 127,
[0][1][1][0][RTW89_ACMA][1][92] = 127,
@@ -40587,13 +39871,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][92] = 127,
[0][1][1][0][RTW89_THAILAND][0][92] = 127,
[0][1][1][0][RTW89_FCC][1][94] = -2,
- [0][1][1][0][RTW89_FCC][2][94] = 127,
[0][1][1][0][RTW89_ETSI][1][94] = 127,
[0][1][1][0][RTW89_ETSI][0][94] = 127,
[0][1][1][0][RTW89_MKK][1][94] = 127,
[0][1][1][0][RTW89_MKK][0][94] = 127,
[0][1][1][0][RTW89_IC][1][94] = -2,
- [0][1][1][0][RTW89_IC][2][94] = 127,
[0][1][1][0][RTW89_KCC][1][94] = 20,
[0][1][1][0][RTW89_KCC][0][94] = 127,
[0][1][1][0][RTW89_ACMA][1][94] = 127,
@@ -40606,13 +39888,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][94] = 127,
[0][1][1][0][RTW89_THAILAND][0][94] = 127,
[0][1][1][0][RTW89_FCC][1][96] = -2,
- [0][1][1][0][RTW89_FCC][2][96] = 127,
[0][1][1][0][RTW89_ETSI][1][96] = 127,
[0][1][1][0][RTW89_ETSI][0][96] = 127,
[0][1][1][0][RTW89_MKK][1][96] = 127,
[0][1][1][0][RTW89_MKK][0][96] = 127,
[0][1][1][0][RTW89_IC][1][96] = -2,
- [0][1][1][0][RTW89_IC][2][96] = 127,
[0][1][1][0][RTW89_KCC][1][96] = 20,
[0][1][1][0][RTW89_KCC][0][96] = 127,
[0][1][1][0][RTW89_ACMA][1][96] = 127,
@@ -40625,13 +39905,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][96] = 127,
[0][1][1][0][RTW89_THAILAND][0][96] = 127,
[0][1][1][0][RTW89_FCC][1][98] = -2,
- [0][1][1][0][RTW89_FCC][2][98] = 127,
[0][1][1][0][RTW89_ETSI][1][98] = 127,
[0][1][1][0][RTW89_ETSI][0][98] = 127,
[0][1][1][0][RTW89_MKK][1][98] = 127,
[0][1][1][0][RTW89_MKK][0][98] = 127,
[0][1][1][0][RTW89_IC][1][98] = -2,
- [0][1][1][0][RTW89_IC][2][98] = 127,
[0][1][1][0][RTW89_KCC][1][98] = 20,
[0][1][1][0][RTW89_KCC][0][98] = 127,
[0][1][1][0][RTW89_ACMA][1][98] = 127,
@@ -40644,13 +39922,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][98] = 127,
[0][1][1][0][RTW89_THAILAND][0][98] = 127,
[0][1][1][0][RTW89_FCC][1][100] = -2,
- [0][1][1][0][RTW89_FCC][2][100] = 127,
[0][1][1][0][RTW89_ETSI][1][100] = 127,
[0][1][1][0][RTW89_ETSI][0][100] = 127,
[0][1][1][0][RTW89_MKK][1][100] = 127,
[0][1][1][0][RTW89_MKK][0][100] = 127,
[0][1][1][0][RTW89_IC][1][100] = -2,
- [0][1][1][0][RTW89_IC][2][100] = 127,
[0][1][1][0][RTW89_KCC][1][100] = 20,
[0][1][1][0][RTW89_KCC][0][100] = 127,
[0][1][1][0][RTW89_ACMA][1][100] = 127,
@@ -40663,13 +39939,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][100] = 127,
[0][1][1][0][RTW89_THAILAND][0][100] = 127,
[0][1][1][0][RTW89_FCC][1][102] = -2,
- [0][1][1][0][RTW89_FCC][2][102] = 127,
[0][1][1][0][RTW89_ETSI][1][102] = 127,
[0][1][1][0][RTW89_ETSI][0][102] = 127,
[0][1][1][0][RTW89_MKK][1][102] = 127,
[0][1][1][0][RTW89_MKK][0][102] = 127,
[0][1][1][0][RTW89_IC][1][102] = -2,
- [0][1][1][0][RTW89_IC][2][102] = 127,
[0][1][1][0][RTW89_KCC][1][102] = 20,
[0][1][1][0][RTW89_KCC][0][102] = 127,
[0][1][1][0][RTW89_ACMA][1][102] = 127,
@@ -40682,13 +39956,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][102] = 127,
[0][1][1][0][RTW89_THAILAND][0][102] = 127,
[0][1][1][0][RTW89_FCC][1][104] = -2,
- [0][1][1][0][RTW89_FCC][2][104] = 127,
[0][1][1][0][RTW89_ETSI][1][104] = 127,
[0][1][1][0][RTW89_ETSI][0][104] = 127,
[0][1][1][0][RTW89_MKK][1][104] = 127,
[0][1][1][0][RTW89_MKK][0][104] = 127,
[0][1][1][0][RTW89_IC][1][104] = -2,
- [0][1][1][0][RTW89_IC][2][104] = 127,
[0][1][1][0][RTW89_KCC][1][104] = 20,
[0][1][1][0][RTW89_KCC][0][104] = 127,
[0][1][1][0][RTW89_ACMA][1][104] = 127,
@@ -40701,13 +39973,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][104] = 127,
[0][1][1][0][RTW89_THAILAND][0][104] = 127,
[0][1][1][0][RTW89_FCC][1][105] = -2,
- [0][1][1][0][RTW89_FCC][2][105] = 127,
[0][1][1][0][RTW89_ETSI][1][105] = 127,
[0][1][1][0][RTW89_ETSI][0][105] = 127,
[0][1][1][0][RTW89_MKK][1][105] = 127,
[0][1][1][0][RTW89_MKK][0][105] = 127,
[0][1][1][0][RTW89_IC][1][105] = -2,
- [0][1][1][0][RTW89_IC][2][105] = 127,
[0][1][1][0][RTW89_KCC][1][105] = 20,
[0][1][1][0][RTW89_KCC][0][105] = 127,
[0][1][1][0][RTW89_ACMA][1][105] = 127,
@@ -40720,13 +39990,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][105] = 127,
[0][1][1][0][RTW89_THAILAND][0][105] = 127,
[0][1][1][0][RTW89_FCC][1][107] = 1,
- [0][1][1][0][RTW89_FCC][2][107] = 127,
[0][1][1][0][RTW89_ETSI][1][107] = 127,
[0][1][1][0][RTW89_ETSI][0][107] = 127,
[0][1][1][0][RTW89_MKK][1][107] = 127,
[0][1][1][0][RTW89_MKK][0][107] = 127,
[0][1][1][0][RTW89_IC][1][107] = 1,
- [0][1][1][0][RTW89_IC][2][107] = 127,
[0][1][1][0][RTW89_KCC][1][107] = 20,
[0][1][1][0][RTW89_KCC][0][107] = 127,
[0][1][1][0][RTW89_ACMA][1][107] = 127,
@@ -40739,13 +40007,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][107] = 127,
[0][1][1][0][RTW89_THAILAND][0][107] = 127,
[0][1][1][0][RTW89_FCC][1][109] = 1,
- [0][1][1][0][RTW89_FCC][2][109] = 127,
[0][1][1][0][RTW89_ETSI][1][109] = 127,
[0][1][1][0][RTW89_ETSI][0][109] = 127,
[0][1][1][0][RTW89_MKK][1][109] = 127,
[0][1][1][0][RTW89_MKK][0][109] = 127,
[0][1][1][0][RTW89_IC][1][109] = 1,
- [0][1][1][0][RTW89_IC][2][109] = 127,
[0][1][1][0][RTW89_KCC][1][109] = 20,
[0][1][1][0][RTW89_KCC][0][109] = 127,
[0][1][1][0][RTW89_ACMA][1][109] = 127,
@@ -40758,13 +40024,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][109] = 127,
[0][1][1][0][RTW89_THAILAND][0][109] = 127,
[0][1][1][0][RTW89_FCC][1][111] = 127,
- [0][1][1][0][RTW89_FCC][2][111] = 127,
[0][1][1][0][RTW89_ETSI][1][111] = 127,
[0][1][1][0][RTW89_ETSI][0][111] = 127,
[0][1][1][0][RTW89_MKK][1][111] = 127,
[0][1][1][0][RTW89_MKK][0][111] = 127,
[0][1][1][0][RTW89_IC][1][111] = 127,
- [0][1][1][0][RTW89_IC][2][111] = 127,
[0][1][1][0][RTW89_KCC][1][111] = 127,
[0][1][1][0][RTW89_KCC][0][111] = 127,
[0][1][1][0][RTW89_ACMA][1][111] = 127,
@@ -40777,13 +40041,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][111] = 127,
[0][1][1][0][RTW89_THAILAND][0][111] = 127,
[0][1][1][0][RTW89_FCC][1][113] = 127,
- [0][1][1][0][RTW89_FCC][2][113] = 127,
[0][1][1][0][RTW89_ETSI][1][113] = 127,
[0][1][1][0][RTW89_ETSI][0][113] = 127,
[0][1][1][0][RTW89_MKK][1][113] = 127,
[0][1][1][0][RTW89_MKK][0][113] = 127,
[0][1][1][0][RTW89_IC][1][113] = 127,
- [0][1][1][0][RTW89_IC][2][113] = 127,
[0][1][1][0][RTW89_KCC][1][113] = 127,
[0][1][1][0][RTW89_KCC][0][113] = 127,
[0][1][1][0][RTW89_ACMA][1][113] = 127,
@@ -40796,13 +40058,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][113] = 127,
[0][1][1][0][RTW89_THAILAND][0][113] = 127,
[0][1][1][0][RTW89_FCC][1][115] = 127,
- [0][1][1][0][RTW89_FCC][2][115] = 127,
[0][1][1][0][RTW89_ETSI][1][115] = 127,
[0][1][1][0][RTW89_ETSI][0][115] = 127,
[0][1][1][0][RTW89_MKK][1][115] = 127,
[0][1][1][0][RTW89_MKK][0][115] = 127,
[0][1][1][0][RTW89_IC][1][115] = 127,
- [0][1][1][0][RTW89_IC][2][115] = 127,
[0][1][1][0][RTW89_KCC][1][115] = 127,
[0][1][1][0][RTW89_KCC][0][115] = 127,
[0][1][1][0][RTW89_ACMA][1][115] = 127,
@@ -40815,13 +40075,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][115] = 127,
[0][1][1][0][RTW89_THAILAND][0][115] = 127,
[0][1][1][0][RTW89_FCC][1][117] = 127,
- [0][1][1][0][RTW89_FCC][2][117] = 127,
[0][1][1][0][RTW89_ETSI][1][117] = 127,
[0][1][1][0][RTW89_ETSI][0][117] = 127,
[0][1][1][0][RTW89_MKK][1][117] = 127,
[0][1][1][0][RTW89_MKK][0][117] = 127,
[0][1][1][0][RTW89_IC][1][117] = 127,
- [0][1][1][0][RTW89_IC][2][117] = 127,
[0][1][1][0][RTW89_KCC][1][117] = 127,
[0][1][1][0][RTW89_KCC][0][117] = 127,
[0][1][1][0][RTW89_ACMA][1][117] = 127,
@@ -40834,13 +40092,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][117] = 127,
[0][1][1][0][RTW89_THAILAND][0][117] = 127,
[0][1][1][0][RTW89_FCC][1][119] = 127,
- [0][1][1][0][RTW89_FCC][2][119] = 127,
[0][1][1][0][RTW89_ETSI][1][119] = 127,
[0][1][1][0][RTW89_ETSI][0][119] = 127,
[0][1][1][0][RTW89_MKK][1][119] = 127,
[0][1][1][0][RTW89_MKK][0][119] = 127,
[0][1][1][0][RTW89_IC][1][119] = 127,
- [0][1][1][0][RTW89_IC][2][119] = 127,
[0][1][1][0][RTW89_KCC][1][119] = 127,
[0][1][1][0][RTW89_KCC][0][119] = 127,
[0][1][1][0][RTW89_ACMA][1][119] = 127,
@@ -40853,13 +40109,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][1][0][RTW89_THAILAND][1][119] = 127,
[0][1][1][0][RTW89_THAILAND][0][119] = 127,
[0][0][2][0][RTW89_FCC][1][0] = 24,
- [0][0][2][0][RTW89_FCC][2][0] = 56,
[0][0][2][0][RTW89_ETSI][1][0] = 66,
[0][0][2][0][RTW89_ETSI][0][0] = 28,
[0][0][2][0][RTW89_MKK][1][0] = 66,
[0][0][2][0][RTW89_MKK][0][0] = 26,
[0][0][2][0][RTW89_IC][1][0] = 24,
- [0][0][2][0][RTW89_IC][2][0] = 56,
[0][0][2][0][RTW89_KCC][1][0] = 24,
[0][0][2][0][RTW89_KCC][0][0] = 24,
[0][0][2][0][RTW89_ACMA][1][0] = 66,
@@ -40872,13 +40126,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][0] = 56,
[0][0][2][0][RTW89_THAILAND][0][0] = 24,
[0][0][2][0][RTW89_FCC][1][2] = 22,
- [0][0][2][0][RTW89_FCC][2][2] = 56,
[0][0][2][0][RTW89_ETSI][1][2] = 66,
[0][0][2][0][RTW89_ETSI][0][2] = 28,
[0][0][2][0][RTW89_MKK][1][2] = 66,
[0][0][2][0][RTW89_MKK][0][2] = 26,
[0][0][2][0][RTW89_IC][1][2] = 22,
- [0][0][2][0][RTW89_IC][2][2] = 56,
[0][0][2][0][RTW89_KCC][1][2] = 24,
[0][0][2][0][RTW89_KCC][0][2] = 24,
[0][0][2][0][RTW89_ACMA][1][2] = 66,
@@ -40891,13 +40143,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][2] = 56,
[0][0][2][0][RTW89_THAILAND][0][2] = 22,
[0][0][2][0][RTW89_FCC][1][4] = 22,
- [0][0][2][0][RTW89_FCC][2][4] = 56,
[0][0][2][0][RTW89_ETSI][1][4] = 66,
[0][0][2][0][RTW89_ETSI][0][4] = 28,
[0][0][2][0][RTW89_MKK][1][4] = 66,
[0][0][2][0][RTW89_MKK][0][4] = 26,
[0][0][2][0][RTW89_IC][1][4] = 22,
- [0][0][2][0][RTW89_IC][2][4] = 56,
[0][0][2][0][RTW89_KCC][1][4] = 24,
[0][0][2][0][RTW89_KCC][0][4] = 24,
[0][0][2][0][RTW89_ACMA][1][4] = 66,
@@ -40910,13 +40160,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][4] = 56,
[0][0][2][0][RTW89_THAILAND][0][4] = 22,
[0][0][2][0][RTW89_FCC][1][6] = 22,
- [0][0][2][0][RTW89_FCC][2][6] = 56,
[0][0][2][0][RTW89_ETSI][1][6] = 66,
[0][0][2][0][RTW89_ETSI][0][6] = 28,
[0][0][2][0][RTW89_MKK][1][6] = 66,
[0][0][2][0][RTW89_MKK][0][6] = 26,
[0][0][2][0][RTW89_IC][1][6] = 22,
- [0][0][2][0][RTW89_IC][2][6] = 56,
[0][0][2][0][RTW89_KCC][1][6] = 24,
[0][0][2][0][RTW89_KCC][0][6] = 24,
[0][0][2][0][RTW89_ACMA][1][6] = 66,
@@ -40929,13 +40177,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][6] = 56,
[0][0][2][0][RTW89_THAILAND][0][6] = 22,
[0][0][2][0][RTW89_FCC][1][8] = 22,
- [0][0][2][0][RTW89_FCC][2][8] = 56,
[0][0][2][0][RTW89_ETSI][1][8] = 66,
[0][0][2][0][RTW89_ETSI][0][8] = 28,
[0][0][2][0][RTW89_MKK][1][8] = 66,
[0][0][2][0][RTW89_MKK][0][8] = 26,
[0][0][2][0][RTW89_IC][1][8] = 22,
- [0][0][2][0][RTW89_IC][2][8] = 56,
[0][0][2][0][RTW89_KCC][1][8] = 24,
[0][0][2][0][RTW89_KCC][0][8] = 24,
[0][0][2][0][RTW89_ACMA][1][8] = 66,
@@ -40948,13 +40194,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][8] = 56,
[0][0][2][0][RTW89_THAILAND][0][8] = 22,
[0][0][2][0][RTW89_FCC][1][10] = 22,
- [0][0][2][0][RTW89_FCC][2][10] = 56,
[0][0][2][0][RTW89_ETSI][1][10] = 66,
[0][0][2][0][RTW89_ETSI][0][10] = 28,
[0][0][2][0][RTW89_MKK][1][10] = 66,
[0][0][2][0][RTW89_MKK][0][10] = 26,
[0][0][2][0][RTW89_IC][1][10] = 22,
- [0][0][2][0][RTW89_IC][2][10] = 56,
[0][0][2][0][RTW89_KCC][1][10] = 24,
[0][0][2][0][RTW89_KCC][0][10] = 24,
[0][0][2][0][RTW89_ACMA][1][10] = 66,
@@ -40967,13 +40211,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][10] = 56,
[0][0][2][0][RTW89_THAILAND][0][10] = 22,
[0][0][2][0][RTW89_FCC][1][12] = 22,
- [0][0][2][0][RTW89_FCC][2][12] = 56,
[0][0][2][0][RTW89_ETSI][1][12] = 66,
[0][0][2][0][RTW89_ETSI][0][12] = 28,
[0][0][2][0][RTW89_MKK][1][12] = 66,
[0][0][2][0][RTW89_MKK][0][12] = 26,
[0][0][2][0][RTW89_IC][1][12] = 22,
- [0][0][2][0][RTW89_IC][2][12] = 56,
[0][0][2][0][RTW89_KCC][1][12] = 24,
[0][0][2][0][RTW89_KCC][0][12] = 24,
[0][0][2][0][RTW89_ACMA][1][12] = 66,
@@ -40986,13 +40228,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][12] = 56,
[0][0][2][0][RTW89_THAILAND][0][12] = 22,
[0][0][2][0][RTW89_FCC][1][14] = 22,
- [0][0][2][0][RTW89_FCC][2][14] = 56,
[0][0][2][0][RTW89_ETSI][1][14] = 66,
[0][0][2][0][RTW89_ETSI][0][14] = 28,
[0][0][2][0][RTW89_MKK][1][14] = 66,
[0][0][2][0][RTW89_MKK][0][14] = 26,
[0][0][2][0][RTW89_IC][1][14] = 22,
- [0][0][2][0][RTW89_IC][2][14] = 56,
[0][0][2][0][RTW89_KCC][1][14] = 24,
[0][0][2][0][RTW89_KCC][0][14] = 24,
[0][0][2][0][RTW89_ACMA][1][14] = 66,
@@ -41005,13 +40245,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][14] = 56,
[0][0][2][0][RTW89_THAILAND][0][14] = 22,
[0][0][2][0][RTW89_FCC][1][15] = 22,
- [0][0][2][0][RTW89_FCC][2][15] = 56,
[0][0][2][0][RTW89_ETSI][1][15] = 66,
[0][0][2][0][RTW89_ETSI][0][15] = 28,
[0][0][2][0][RTW89_MKK][1][15] = 66,
[0][0][2][0][RTW89_MKK][0][15] = 26,
[0][0][2][0][RTW89_IC][1][15] = 22,
- [0][0][2][0][RTW89_IC][2][15] = 56,
[0][0][2][0][RTW89_KCC][1][15] = 24,
[0][0][2][0][RTW89_KCC][0][15] = 24,
[0][0][2][0][RTW89_ACMA][1][15] = 66,
@@ -41024,13 +40262,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][15] = 56,
[0][0][2][0][RTW89_THAILAND][0][15] = 22,
[0][0][2][0][RTW89_FCC][1][17] = 22,
- [0][0][2][0][RTW89_FCC][2][17] = 56,
[0][0][2][0][RTW89_ETSI][1][17] = 66,
[0][0][2][0][RTW89_ETSI][0][17] = 28,
[0][0][2][0][RTW89_MKK][1][17] = 66,
[0][0][2][0][RTW89_MKK][0][17] = 26,
[0][0][2][0][RTW89_IC][1][17] = 22,
- [0][0][2][0][RTW89_IC][2][17] = 56,
[0][0][2][0][RTW89_KCC][1][17] = 24,
[0][0][2][0][RTW89_KCC][0][17] = 24,
[0][0][2][0][RTW89_ACMA][1][17] = 66,
@@ -41043,13 +40279,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][17] = 56,
[0][0][2][0][RTW89_THAILAND][0][17] = 22,
[0][0][2][0][RTW89_FCC][1][19] = 22,
- [0][0][2][0][RTW89_FCC][2][19] = 56,
[0][0][2][0][RTW89_ETSI][1][19] = 66,
[0][0][2][0][RTW89_ETSI][0][19] = 28,
[0][0][2][0][RTW89_MKK][1][19] = 66,
[0][0][2][0][RTW89_MKK][0][19] = 26,
[0][0][2][0][RTW89_IC][1][19] = 22,
- [0][0][2][0][RTW89_IC][2][19] = 56,
[0][0][2][0][RTW89_KCC][1][19] = 24,
[0][0][2][0][RTW89_KCC][0][19] = 24,
[0][0][2][0][RTW89_ACMA][1][19] = 66,
@@ -41062,13 +40296,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][19] = 56,
[0][0][2][0][RTW89_THAILAND][0][19] = 22,
[0][0][2][0][RTW89_FCC][1][21] = 22,
- [0][0][2][0][RTW89_FCC][2][21] = 56,
[0][0][2][0][RTW89_ETSI][1][21] = 66,
[0][0][2][0][RTW89_ETSI][0][21] = 28,
[0][0][2][0][RTW89_MKK][1][21] = 66,
[0][0][2][0][RTW89_MKK][0][21] = 26,
[0][0][2][0][RTW89_IC][1][21] = 22,
- [0][0][2][0][RTW89_IC][2][21] = 56,
[0][0][2][0][RTW89_KCC][1][21] = 24,
[0][0][2][0][RTW89_KCC][0][21] = 24,
[0][0][2][0][RTW89_ACMA][1][21] = 66,
@@ -41081,13 +40313,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][21] = 56,
[0][0][2][0][RTW89_THAILAND][0][21] = 22,
[0][0][2][0][RTW89_FCC][1][23] = 22,
- [0][0][2][0][RTW89_FCC][2][23] = 70,
[0][0][2][0][RTW89_ETSI][1][23] = 66,
[0][0][2][0][RTW89_ETSI][0][23] = 28,
[0][0][2][0][RTW89_MKK][1][23] = 66,
[0][0][2][0][RTW89_MKK][0][23] = 26,
[0][0][2][0][RTW89_IC][1][23] = 22,
- [0][0][2][0][RTW89_IC][2][23] = 70,
[0][0][2][0][RTW89_KCC][1][23] = 24,
[0][0][2][0][RTW89_KCC][0][23] = 26,
[0][0][2][0][RTW89_ACMA][1][23] = 66,
@@ -41100,13 +40330,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][23] = 66,
[0][0][2][0][RTW89_THAILAND][0][23] = 22,
[0][0][2][0][RTW89_FCC][1][25] = 22,
- [0][0][2][0][RTW89_FCC][2][25] = 70,
[0][0][2][0][RTW89_ETSI][1][25] = 66,
[0][0][2][0][RTW89_ETSI][0][25] = 28,
[0][0][2][0][RTW89_MKK][1][25] = 66,
[0][0][2][0][RTW89_MKK][0][25] = 26,
[0][0][2][0][RTW89_IC][1][25] = 22,
- [0][0][2][0][RTW89_IC][2][25] = 70,
[0][0][2][0][RTW89_KCC][1][25] = 24,
[0][0][2][0][RTW89_KCC][0][25] = 26,
[0][0][2][0][RTW89_ACMA][1][25] = 66,
@@ -41119,13 +40347,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][25] = 66,
[0][0][2][0][RTW89_THAILAND][0][25] = 22,
[0][0][2][0][RTW89_FCC][1][27] = 22,
- [0][0][2][0][RTW89_FCC][2][27] = 70,
[0][0][2][0][RTW89_ETSI][1][27] = 66,
[0][0][2][0][RTW89_ETSI][0][27] = 28,
[0][0][2][0][RTW89_MKK][1][27] = 66,
[0][0][2][0][RTW89_MKK][0][27] = 26,
[0][0][2][0][RTW89_IC][1][27] = 22,
- [0][0][2][0][RTW89_IC][2][27] = 70,
[0][0][2][0][RTW89_KCC][1][27] = 24,
[0][0][2][0][RTW89_KCC][0][27] = 26,
[0][0][2][0][RTW89_ACMA][1][27] = 66,
@@ -41138,13 +40364,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][27] = 66,
[0][0][2][0][RTW89_THAILAND][0][27] = 22,
[0][0][2][0][RTW89_FCC][1][29] = 22,
- [0][0][2][0][RTW89_FCC][2][29] = 70,
[0][0][2][0][RTW89_ETSI][1][29] = 66,
[0][0][2][0][RTW89_ETSI][0][29] = 28,
[0][0][2][0][RTW89_MKK][1][29] = 66,
[0][0][2][0][RTW89_MKK][0][29] = 26,
[0][0][2][0][RTW89_IC][1][29] = 22,
- [0][0][2][0][RTW89_IC][2][29] = 70,
[0][0][2][0][RTW89_KCC][1][29] = 24,
[0][0][2][0][RTW89_KCC][0][29] = 26,
[0][0][2][0][RTW89_ACMA][1][29] = 66,
@@ -41157,13 +40381,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][29] = 66,
[0][0][2][0][RTW89_THAILAND][0][29] = 22,
[0][0][2][0][RTW89_FCC][1][30] = 22,
- [0][0][2][0][RTW89_FCC][2][30] = 70,
[0][0][2][0][RTW89_ETSI][1][30] = 66,
[0][0][2][0][RTW89_ETSI][0][30] = 28,
[0][0][2][0][RTW89_MKK][1][30] = 66,
[0][0][2][0][RTW89_MKK][0][30] = 26,
[0][0][2][0][RTW89_IC][1][30] = 22,
- [0][0][2][0][RTW89_IC][2][30] = 70,
[0][0][2][0][RTW89_KCC][1][30] = 24,
[0][0][2][0][RTW89_KCC][0][30] = 26,
[0][0][2][0][RTW89_ACMA][1][30] = 66,
@@ -41176,13 +40398,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][30] = 66,
[0][0][2][0][RTW89_THAILAND][0][30] = 22,
[0][0][2][0][RTW89_FCC][1][32] = 22,
- [0][0][2][0][RTW89_FCC][2][32] = 70,
[0][0][2][0][RTW89_ETSI][1][32] = 66,
[0][0][2][0][RTW89_ETSI][0][32] = 28,
[0][0][2][0][RTW89_MKK][1][32] = 66,
[0][0][2][0][RTW89_MKK][0][32] = 26,
[0][0][2][0][RTW89_IC][1][32] = 22,
- [0][0][2][0][RTW89_IC][2][32] = 70,
[0][0][2][0][RTW89_KCC][1][32] = 24,
[0][0][2][0][RTW89_KCC][0][32] = 26,
[0][0][2][0][RTW89_ACMA][1][32] = 66,
@@ -41195,13 +40415,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][32] = 66,
[0][0][2][0][RTW89_THAILAND][0][32] = 22,
[0][0][2][0][RTW89_FCC][1][34] = 22,
- [0][0][2][0][RTW89_FCC][2][34] = 70,
[0][0][2][0][RTW89_ETSI][1][34] = 66,
[0][0][2][0][RTW89_ETSI][0][34] = 28,
[0][0][2][0][RTW89_MKK][1][34] = 66,
[0][0][2][0][RTW89_MKK][0][34] = 26,
[0][0][2][0][RTW89_IC][1][34] = 22,
- [0][0][2][0][RTW89_IC][2][34] = 70,
[0][0][2][0][RTW89_KCC][1][34] = 24,
[0][0][2][0][RTW89_KCC][0][34] = 26,
[0][0][2][0][RTW89_ACMA][1][34] = 66,
@@ -41214,13 +40432,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][34] = 66,
[0][0][2][0][RTW89_THAILAND][0][34] = 22,
[0][0][2][0][RTW89_FCC][1][36] = 22,
- [0][0][2][0][RTW89_FCC][2][36] = 70,
[0][0][2][0][RTW89_ETSI][1][36] = 66,
[0][0][2][0][RTW89_ETSI][0][36] = 28,
[0][0][2][0][RTW89_MKK][1][36] = 66,
[0][0][2][0][RTW89_MKK][0][36] = 26,
[0][0][2][0][RTW89_IC][1][36] = 22,
- [0][0][2][0][RTW89_IC][2][36] = 70,
[0][0][2][0][RTW89_KCC][1][36] = 24,
[0][0][2][0][RTW89_KCC][0][36] = 26,
[0][0][2][0][RTW89_ACMA][1][36] = 66,
@@ -41233,13 +40449,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][36] = 66,
[0][0][2][0][RTW89_THAILAND][0][36] = 22,
[0][0][2][0][RTW89_FCC][1][38] = 22,
- [0][0][2][0][RTW89_FCC][2][38] = 70,
[0][0][2][0][RTW89_ETSI][1][38] = 66,
[0][0][2][0][RTW89_ETSI][0][38] = 28,
[0][0][2][0][RTW89_MKK][1][38] = 66,
[0][0][2][0][RTW89_MKK][0][38] = 26,
[0][0][2][0][RTW89_IC][1][38] = 22,
- [0][0][2][0][RTW89_IC][2][38] = 70,
[0][0][2][0][RTW89_KCC][1][38] = 24,
[0][0][2][0][RTW89_KCC][0][38] = 26,
[0][0][2][0][RTW89_ACMA][1][38] = 66,
@@ -41252,13 +40466,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][38] = 66,
[0][0][2][0][RTW89_THAILAND][0][38] = 22,
[0][0][2][0][RTW89_FCC][1][40] = 22,
- [0][0][2][0][RTW89_FCC][2][40] = 70,
[0][0][2][0][RTW89_ETSI][1][40] = 66,
[0][0][2][0][RTW89_ETSI][0][40] = 28,
[0][0][2][0][RTW89_MKK][1][40] = 66,
[0][0][2][0][RTW89_MKK][0][40] = 26,
[0][0][2][0][RTW89_IC][1][40] = 22,
- [0][0][2][0][RTW89_IC][2][40] = 70,
[0][0][2][0][RTW89_KCC][1][40] = 24,
[0][0][2][0][RTW89_KCC][0][40] = 26,
[0][0][2][0][RTW89_ACMA][1][40] = 66,
@@ -41271,13 +40483,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][40] = 66,
[0][0][2][0][RTW89_THAILAND][0][40] = 22,
[0][0][2][0][RTW89_FCC][1][42] = 22,
- [0][0][2][0][RTW89_FCC][2][42] = 70,
[0][0][2][0][RTW89_ETSI][1][42] = 66,
[0][0][2][0][RTW89_ETSI][0][42] = 28,
[0][0][2][0][RTW89_MKK][1][42] = 66,
[0][0][2][0][RTW89_MKK][0][42] = 26,
[0][0][2][0][RTW89_IC][1][42] = 22,
- [0][0][2][0][RTW89_IC][2][42] = 70,
[0][0][2][0][RTW89_KCC][1][42] = 24,
[0][0][2][0][RTW89_KCC][0][42] = 26,
[0][0][2][0][RTW89_ACMA][1][42] = 66,
@@ -41290,13 +40500,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][42] = 66,
[0][0][2][0][RTW89_THAILAND][0][42] = 22,
[0][0][2][0][RTW89_FCC][1][44] = 22,
- [0][0][2][0][RTW89_FCC][2][44] = 70,
[0][0][2][0][RTW89_ETSI][1][44] = 66,
[0][0][2][0][RTW89_ETSI][0][44] = 30,
[0][0][2][0][RTW89_MKK][1][44] = 44,
[0][0][2][0][RTW89_MKK][0][44] = 28,
[0][0][2][0][RTW89_IC][1][44] = 22,
- [0][0][2][0][RTW89_IC][2][44] = 70,
[0][0][2][0][RTW89_KCC][1][44] = 24,
[0][0][2][0][RTW89_KCC][0][44] = 26,
[0][0][2][0][RTW89_ACMA][1][44] = 66,
@@ -41309,13 +40517,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][44] = 68,
[0][0][2][0][RTW89_THAILAND][0][44] = 22,
[0][0][2][0][RTW89_FCC][1][45] = 22,
- [0][0][2][0][RTW89_FCC][2][45] = 127,
[0][0][2][0][RTW89_ETSI][1][45] = 127,
[0][0][2][0][RTW89_ETSI][0][45] = 127,
[0][0][2][0][RTW89_MKK][1][45] = 127,
[0][0][2][0][RTW89_MKK][0][45] = 127,
[0][0][2][0][RTW89_IC][1][45] = 22,
- [0][0][2][0][RTW89_IC][2][45] = 70,
[0][0][2][0][RTW89_KCC][1][45] = 24,
[0][0][2][0][RTW89_KCC][0][45] = 127,
[0][0][2][0][RTW89_ACMA][1][45] = 127,
@@ -41328,13 +40534,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][45] = 127,
[0][0][2][0][RTW89_THAILAND][0][45] = 127,
[0][0][2][0][RTW89_FCC][1][47] = 22,
- [0][0][2][0][RTW89_FCC][2][47] = 127,
[0][0][2][0][RTW89_ETSI][1][47] = 127,
[0][0][2][0][RTW89_ETSI][0][47] = 127,
[0][0][2][0][RTW89_MKK][1][47] = 127,
[0][0][2][0][RTW89_MKK][0][47] = 127,
[0][0][2][0][RTW89_IC][1][47] = 22,
- [0][0][2][0][RTW89_IC][2][47] = 70,
[0][0][2][0][RTW89_KCC][1][47] = 24,
[0][0][2][0][RTW89_KCC][0][47] = 127,
[0][0][2][0][RTW89_ACMA][1][47] = 127,
@@ -41347,13 +40551,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][47] = 127,
[0][0][2][0][RTW89_THAILAND][0][47] = 127,
[0][0][2][0][RTW89_FCC][1][49] = 24,
- [0][0][2][0][RTW89_FCC][2][49] = 127,
[0][0][2][0][RTW89_ETSI][1][49] = 127,
[0][0][2][0][RTW89_ETSI][0][49] = 127,
[0][0][2][0][RTW89_MKK][1][49] = 127,
[0][0][2][0][RTW89_MKK][0][49] = 127,
[0][0][2][0][RTW89_IC][1][49] = 24,
- [0][0][2][0][RTW89_IC][2][49] = 70,
[0][0][2][0][RTW89_KCC][1][49] = 24,
[0][0][2][0][RTW89_KCC][0][49] = 127,
[0][0][2][0][RTW89_ACMA][1][49] = 127,
@@ -41366,13 +40568,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][49] = 127,
[0][0][2][0][RTW89_THAILAND][0][49] = 127,
[0][0][2][0][RTW89_FCC][1][51] = 22,
- [0][0][2][0][RTW89_FCC][2][51] = 127,
[0][0][2][0][RTW89_ETSI][1][51] = 127,
[0][0][2][0][RTW89_ETSI][0][51] = 127,
[0][0][2][0][RTW89_MKK][1][51] = 127,
[0][0][2][0][RTW89_MKK][0][51] = 127,
[0][0][2][0][RTW89_IC][1][51] = 22,
- [0][0][2][0][RTW89_IC][2][51] = 70,
[0][0][2][0][RTW89_KCC][1][51] = 24,
[0][0][2][0][RTW89_KCC][0][51] = 127,
[0][0][2][0][RTW89_ACMA][1][51] = 127,
@@ -41385,13 +40585,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][51] = 127,
[0][0][2][0][RTW89_THAILAND][0][51] = 127,
[0][0][2][0][RTW89_FCC][1][53] = 22,
- [0][0][2][0][RTW89_FCC][2][53] = 127,
[0][0][2][0][RTW89_ETSI][1][53] = 127,
[0][0][2][0][RTW89_ETSI][0][53] = 127,
[0][0][2][0][RTW89_MKK][1][53] = 127,
[0][0][2][0][RTW89_MKK][0][53] = 127,
[0][0][2][0][RTW89_IC][1][53] = 22,
- [0][0][2][0][RTW89_IC][2][53] = 70,
[0][0][2][0][RTW89_KCC][1][53] = 24,
[0][0][2][0][RTW89_KCC][0][53] = 127,
[0][0][2][0][RTW89_ACMA][1][53] = 127,
@@ -41404,13 +40602,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][53] = 127,
[0][0][2][0][RTW89_THAILAND][0][53] = 127,
[0][0][2][0][RTW89_FCC][1][55] = 22,
- [0][0][2][0][RTW89_FCC][2][55] = 68,
[0][0][2][0][RTW89_ETSI][1][55] = 127,
[0][0][2][0][RTW89_ETSI][0][55] = 127,
[0][0][2][0][RTW89_MKK][1][55] = 127,
[0][0][2][0][RTW89_MKK][0][55] = 127,
[0][0][2][0][RTW89_IC][1][55] = 22,
- [0][0][2][0][RTW89_IC][2][55] = 68,
[0][0][2][0][RTW89_KCC][1][55] = 26,
[0][0][2][0][RTW89_KCC][0][55] = 127,
[0][0][2][0][RTW89_ACMA][1][55] = 127,
@@ -41423,13 +40619,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][55] = 127,
[0][0][2][0][RTW89_THAILAND][0][55] = 127,
[0][0][2][0][RTW89_FCC][1][57] = 22,
- [0][0][2][0][RTW89_FCC][2][57] = 68,
[0][0][2][0][RTW89_ETSI][1][57] = 127,
[0][0][2][0][RTW89_ETSI][0][57] = 127,
[0][0][2][0][RTW89_MKK][1][57] = 127,
[0][0][2][0][RTW89_MKK][0][57] = 127,
[0][0][2][0][RTW89_IC][1][57] = 22,
- [0][0][2][0][RTW89_IC][2][57] = 68,
[0][0][2][0][RTW89_KCC][1][57] = 26,
[0][0][2][0][RTW89_KCC][0][57] = 127,
[0][0][2][0][RTW89_ACMA][1][57] = 127,
@@ -41442,13 +40636,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][57] = 127,
[0][0][2][0][RTW89_THAILAND][0][57] = 127,
[0][0][2][0][RTW89_FCC][1][59] = 22,
- [0][0][2][0][RTW89_FCC][2][59] = 68,
[0][0][2][0][RTW89_ETSI][1][59] = 127,
[0][0][2][0][RTW89_ETSI][0][59] = 127,
[0][0][2][0][RTW89_MKK][1][59] = 127,
[0][0][2][0][RTW89_MKK][0][59] = 127,
[0][0][2][0][RTW89_IC][1][59] = 22,
- [0][0][2][0][RTW89_IC][2][59] = 68,
[0][0][2][0][RTW89_KCC][1][59] = 26,
[0][0][2][0][RTW89_KCC][0][59] = 127,
[0][0][2][0][RTW89_ACMA][1][59] = 127,
@@ -41461,13 +40653,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][59] = 127,
[0][0][2][0][RTW89_THAILAND][0][59] = 127,
[0][0][2][0][RTW89_FCC][1][60] = 22,
- [0][0][2][0][RTW89_FCC][2][60] = 68,
[0][0][2][0][RTW89_ETSI][1][60] = 127,
[0][0][2][0][RTW89_ETSI][0][60] = 127,
[0][0][2][0][RTW89_MKK][1][60] = 127,
[0][0][2][0][RTW89_MKK][0][60] = 127,
[0][0][2][0][RTW89_IC][1][60] = 22,
- [0][0][2][0][RTW89_IC][2][60] = 68,
[0][0][2][0][RTW89_KCC][1][60] = 26,
[0][0][2][0][RTW89_KCC][0][60] = 127,
[0][0][2][0][RTW89_ACMA][1][60] = 127,
@@ -41480,13 +40670,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][60] = 127,
[0][0][2][0][RTW89_THAILAND][0][60] = 127,
[0][0][2][0][RTW89_FCC][1][62] = 22,
- [0][0][2][0][RTW89_FCC][2][62] = 68,
[0][0][2][0][RTW89_ETSI][1][62] = 127,
[0][0][2][0][RTW89_ETSI][0][62] = 127,
[0][0][2][0][RTW89_MKK][1][62] = 127,
[0][0][2][0][RTW89_MKK][0][62] = 127,
[0][0][2][0][RTW89_IC][1][62] = 22,
- [0][0][2][0][RTW89_IC][2][62] = 68,
[0][0][2][0][RTW89_KCC][1][62] = 26,
[0][0][2][0][RTW89_KCC][0][62] = 127,
[0][0][2][0][RTW89_ACMA][1][62] = 127,
@@ -41499,13 +40687,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][62] = 127,
[0][0][2][0][RTW89_THAILAND][0][62] = 127,
[0][0][2][0][RTW89_FCC][1][64] = 22,
- [0][0][2][0][RTW89_FCC][2][64] = 68,
[0][0][2][0][RTW89_ETSI][1][64] = 127,
[0][0][2][0][RTW89_ETSI][0][64] = 127,
[0][0][2][0][RTW89_MKK][1][64] = 127,
[0][0][2][0][RTW89_MKK][0][64] = 127,
[0][0][2][0][RTW89_IC][1][64] = 22,
- [0][0][2][0][RTW89_IC][2][64] = 68,
[0][0][2][0][RTW89_KCC][1][64] = 26,
[0][0][2][0][RTW89_KCC][0][64] = 127,
[0][0][2][0][RTW89_ACMA][1][64] = 127,
@@ -41518,13 +40704,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][64] = 127,
[0][0][2][0][RTW89_THAILAND][0][64] = 127,
[0][0][2][0][RTW89_FCC][1][66] = 22,
- [0][0][2][0][RTW89_FCC][2][66] = 68,
[0][0][2][0][RTW89_ETSI][1][66] = 127,
[0][0][2][0][RTW89_ETSI][0][66] = 127,
[0][0][2][0][RTW89_MKK][1][66] = 127,
[0][0][2][0][RTW89_MKK][0][66] = 127,
[0][0][2][0][RTW89_IC][1][66] = 22,
- [0][0][2][0][RTW89_IC][2][66] = 68,
[0][0][2][0][RTW89_KCC][1][66] = 26,
[0][0][2][0][RTW89_KCC][0][66] = 127,
[0][0][2][0][RTW89_ACMA][1][66] = 127,
@@ -41537,13 +40721,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][66] = 127,
[0][0][2][0][RTW89_THAILAND][0][66] = 127,
[0][0][2][0][RTW89_FCC][1][68] = 22,
- [0][0][2][0][RTW89_FCC][2][68] = 68,
[0][0][2][0][RTW89_ETSI][1][68] = 127,
[0][0][2][0][RTW89_ETSI][0][68] = 127,
[0][0][2][0][RTW89_MKK][1][68] = 127,
[0][0][2][0][RTW89_MKK][0][68] = 127,
[0][0][2][0][RTW89_IC][1][68] = 22,
- [0][0][2][0][RTW89_IC][2][68] = 68,
[0][0][2][0][RTW89_KCC][1][68] = 26,
[0][0][2][0][RTW89_KCC][0][68] = 127,
[0][0][2][0][RTW89_ACMA][1][68] = 127,
@@ -41556,13 +40738,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][68] = 127,
[0][0][2][0][RTW89_THAILAND][0][68] = 127,
[0][0][2][0][RTW89_FCC][1][70] = 24,
- [0][0][2][0][RTW89_FCC][2][70] = 68,
[0][0][2][0][RTW89_ETSI][1][70] = 127,
[0][0][2][0][RTW89_ETSI][0][70] = 127,
[0][0][2][0][RTW89_MKK][1][70] = 127,
[0][0][2][0][RTW89_MKK][0][70] = 127,
[0][0][2][0][RTW89_IC][1][70] = 24,
- [0][0][2][0][RTW89_IC][2][70] = 68,
[0][0][2][0][RTW89_KCC][1][70] = 26,
[0][0][2][0][RTW89_KCC][0][70] = 127,
[0][0][2][0][RTW89_ACMA][1][70] = 127,
@@ -41575,13 +40755,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][70] = 127,
[0][0][2][0][RTW89_THAILAND][0][70] = 127,
[0][0][2][0][RTW89_FCC][1][72] = 22,
- [0][0][2][0][RTW89_FCC][2][72] = 68,
[0][0][2][0][RTW89_ETSI][1][72] = 127,
[0][0][2][0][RTW89_ETSI][0][72] = 127,
[0][0][2][0][RTW89_MKK][1][72] = 127,
[0][0][2][0][RTW89_MKK][0][72] = 127,
[0][0][2][0][RTW89_IC][1][72] = 22,
- [0][0][2][0][RTW89_IC][2][72] = 68,
[0][0][2][0][RTW89_KCC][1][72] = 26,
[0][0][2][0][RTW89_KCC][0][72] = 127,
[0][0][2][0][RTW89_ACMA][1][72] = 127,
@@ -41594,13 +40772,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][72] = 127,
[0][0][2][0][RTW89_THAILAND][0][72] = 127,
[0][0][2][0][RTW89_FCC][1][74] = 22,
- [0][0][2][0][RTW89_FCC][2][74] = 68,
[0][0][2][0][RTW89_ETSI][1][74] = 127,
[0][0][2][0][RTW89_ETSI][0][74] = 127,
[0][0][2][0][RTW89_MKK][1][74] = 127,
[0][0][2][0][RTW89_MKK][0][74] = 127,
[0][0][2][0][RTW89_IC][1][74] = 22,
- [0][0][2][0][RTW89_IC][2][74] = 68,
[0][0][2][0][RTW89_KCC][1][74] = 26,
[0][0][2][0][RTW89_KCC][0][74] = 127,
[0][0][2][0][RTW89_ACMA][1][74] = 127,
@@ -41613,13 +40789,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][74] = 127,
[0][0][2][0][RTW89_THAILAND][0][74] = 127,
[0][0][2][0][RTW89_FCC][1][75] = 22,
- [0][0][2][0][RTW89_FCC][2][75] = 68,
[0][0][2][0][RTW89_ETSI][1][75] = 127,
[0][0][2][0][RTW89_ETSI][0][75] = 127,
[0][0][2][0][RTW89_MKK][1][75] = 127,
[0][0][2][0][RTW89_MKK][0][75] = 127,
[0][0][2][0][RTW89_IC][1][75] = 22,
- [0][0][2][0][RTW89_IC][2][75] = 68,
[0][0][2][0][RTW89_KCC][1][75] = 26,
[0][0][2][0][RTW89_KCC][0][75] = 127,
[0][0][2][0][RTW89_ACMA][1][75] = 127,
@@ -41632,13 +40806,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][75] = 127,
[0][0][2][0][RTW89_THAILAND][0][75] = 127,
[0][0][2][0][RTW89_FCC][1][77] = 22,
- [0][0][2][0][RTW89_FCC][2][77] = 68,
[0][0][2][0][RTW89_ETSI][1][77] = 127,
[0][0][2][0][RTW89_ETSI][0][77] = 127,
[0][0][2][0][RTW89_MKK][1][77] = 127,
[0][0][2][0][RTW89_MKK][0][77] = 127,
[0][0][2][0][RTW89_IC][1][77] = 22,
- [0][0][2][0][RTW89_IC][2][77] = 68,
[0][0][2][0][RTW89_KCC][1][77] = 26,
[0][0][2][0][RTW89_KCC][0][77] = 127,
[0][0][2][0][RTW89_ACMA][1][77] = 127,
@@ -41651,13 +40823,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][77] = 127,
[0][0][2][0][RTW89_THAILAND][0][77] = 127,
[0][0][2][0][RTW89_FCC][1][79] = 22,
- [0][0][2][0][RTW89_FCC][2][79] = 68,
[0][0][2][0][RTW89_ETSI][1][79] = 127,
[0][0][2][0][RTW89_ETSI][0][79] = 127,
[0][0][2][0][RTW89_MKK][1][79] = 127,
[0][0][2][0][RTW89_MKK][0][79] = 127,
[0][0][2][0][RTW89_IC][1][79] = 22,
- [0][0][2][0][RTW89_IC][2][79] = 68,
[0][0][2][0][RTW89_KCC][1][79] = 26,
[0][0][2][0][RTW89_KCC][0][79] = 127,
[0][0][2][0][RTW89_ACMA][1][79] = 127,
@@ -41670,13 +40840,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][79] = 127,
[0][0][2][0][RTW89_THAILAND][0][79] = 127,
[0][0][2][0][RTW89_FCC][1][81] = 22,
- [0][0][2][0][RTW89_FCC][2][81] = 68,
[0][0][2][0][RTW89_ETSI][1][81] = 127,
[0][0][2][0][RTW89_ETSI][0][81] = 127,
[0][0][2][0][RTW89_MKK][1][81] = 127,
[0][0][2][0][RTW89_MKK][0][81] = 127,
[0][0][2][0][RTW89_IC][1][81] = 22,
- [0][0][2][0][RTW89_IC][2][81] = 68,
[0][0][2][0][RTW89_KCC][1][81] = 26,
[0][0][2][0][RTW89_KCC][0][81] = 127,
[0][0][2][0][RTW89_ACMA][1][81] = 127,
@@ -41689,13 +40857,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][81] = 127,
[0][0][2][0][RTW89_THAILAND][0][81] = 127,
[0][0][2][0][RTW89_FCC][1][83] = 22,
- [0][0][2][0][RTW89_FCC][2][83] = 68,
[0][0][2][0][RTW89_ETSI][1][83] = 127,
[0][0][2][0][RTW89_ETSI][0][83] = 127,
[0][0][2][0][RTW89_MKK][1][83] = 127,
[0][0][2][0][RTW89_MKK][0][83] = 127,
[0][0][2][0][RTW89_IC][1][83] = 22,
- [0][0][2][0][RTW89_IC][2][83] = 68,
[0][0][2][0][RTW89_KCC][1][83] = 32,
[0][0][2][0][RTW89_KCC][0][83] = 127,
[0][0][2][0][RTW89_ACMA][1][83] = 127,
@@ -41708,13 +40874,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][83] = 127,
[0][0][2][0][RTW89_THAILAND][0][83] = 127,
[0][0][2][0][RTW89_FCC][1][85] = 22,
- [0][0][2][0][RTW89_FCC][2][85] = 68,
[0][0][2][0][RTW89_ETSI][1][85] = 127,
[0][0][2][0][RTW89_ETSI][0][85] = 127,
[0][0][2][0][RTW89_MKK][1][85] = 127,
[0][0][2][0][RTW89_MKK][0][85] = 127,
[0][0][2][0][RTW89_IC][1][85] = 22,
- [0][0][2][0][RTW89_IC][2][85] = 68,
[0][0][2][0][RTW89_KCC][1][85] = 32,
[0][0][2][0][RTW89_KCC][0][85] = 127,
[0][0][2][0][RTW89_ACMA][1][85] = 127,
@@ -41727,13 +40891,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][85] = 127,
[0][0][2][0][RTW89_THAILAND][0][85] = 127,
[0][0][2][0][RTW89_FCC][1][87] = 22,
- [0][0][2][0][RTW89_FCC][2][87] = 127,
[0][0][2][0][RTW89_ETSI][1][87] = 127,
[0][0][2][0][RTW89_ETSI][0][87] = 127,
[0][0][2][0][RTW89_MKK][1][87] = 127,
[0][0][2][0][RTW89_MKK][0][87] = 127,
[0][0][2][0][RTW89_IC][1][87] = 22,
- [0][0][2][0][RTW89_IC][2][87] = 127,
[0][0][2][0][RTW89_KCC][1][87] = 32,
[0][0][2][0][RTW89_KCC][0][87] = 127,
[0][0][2][0][RTW89_ACMA][1][87] = 127,
@@ -41746,13 +40908,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][87] = 127,
[0][0][2][0][RTW89_THAILAND][0][87] = 127,
[0][0][2][0][RTW89_FCC][1][89] = 22,
- [0][0][2][0][RTW89_FCC][2][89] = 127,
[0][0][2][0][RTW89_ETSI][1][89] = 127,
[0][0][2][0][RTW89_ETSI][0][89] = 127,
[0][0][2][0][RTW89_MKK][1][89] = 127,
[0][0][2][0][RTW89_MKK][0][89] = 127,
[0][0][2][0][RTW89_IC][1][89] = 22,
- [0][0][2][0][RTW89_IC][2][89] = 127,
[0][0][2][0][RTW89_KCC][1][89] = 32,
[0][0][2][0][RTW89_KCC][0][89] = 127,
[0][0][2][0][RTW89_ACMA][1][89] = 127,
@@ -41765,13 +40925,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][89] = 127,
[0][0][2][0][RTW89_THAILAND][0][89] = 127,
[0][0][2][0][RTW89_FCC][1][90] = 22,
- [0][0][2][0][RTW89_FCC][2][90] = 127,
[0][0][2][0][RTW89_ETSI][1][90] = 127,
[0][0][2][0][RTW89_ETSI][0][90] = 127,
[0][0][2][0][RTW89_MKK][1][90] = 127,
[0][0][2][0][RTW89_MKK][0][90] = 127,
[0][0][2][0][RTW89_IC][1][90] = 22,
- [0][0][2][0][RTW89_IC][2][90] = 127,
[0][0][2][0][RTW89_KCC][1][90] = 32,
[0][0][2][0][RTW89_KCC][0][90] = 127,
[0][0][2][0][RTW89_ACMA][1][90] = 127,
@@ -41784,13 +40942,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][90] = 127,
[0][0][2][0][RTW89_THAILAND][0][90] = 127,
[0][0][2][0][RTW89_FCC][1][92] = 22,
- [0][0][2][0][RTW89_FCC][2][92] = 127,
[0][0][2][0][RTW89_ETSI][1][92] = 127,
[0][0][2][0][RTW89_ETSI][0][92] = 127,
[0][0][2][0][RTW89_MKK][1][92] = 127,
[0][0][2][0][RTW89_MKK][0][92] = 127,
[0][0][2][0][RTW89_IC][1][92] = 22,
- [0][0][2][0][RTW89_IC][2][92] = 127,
[0][0][2][0][RTW89_KCC][1][92] = 32,
[0][0][2][0][RTW89_KCC][0][92] = 127,
[0][0][2][0][RTW89_ACMA][1][92] = 127,
@@ -41803,13 +40959,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][92] = 127,
[0][0][2][0][RTW89_THAILAND][0][92] = 127,
[0][0][2][0][RTW89_FCC][1][94] = 22,
- [0][0][2][0][RTW89_FCC][2][94] = 127,
[0][0][2][0][RTW89_ETSI][1][94] = 127,
[0][0][2][0][RTW89_ETSI][0][94] = 127,
[0][0][2][0][RTW89_MKK][1][94] = 127,
[0][0][2][0][RTW89_MKK][0][94] = 127,
[0][0][2][0][RTW89_IC][1][94] = 22,
- [0][0][2][0][RTW89_IC][2][94] = 127,
[0][0][2][0][RTW89_KCC][1][94] = 32,
[0][0][2][0][RTW89_KCC][0][94] = 127,
[0][0][2][0][RTW89_ACMA][1][94] = 127,
@@ -41822,13 +40976,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][94] = 127,
[0][0][2][0][RTW89_THAILAND][0][94] = 127,
[0][0][2][0][RTW89_FCC][1][96] = 22,
- [0][0][2][0][RTW89_FCC][2][96] = 127,
[0][0][2][0][RTW89_ETSI][1][96] = 127,
[0][0][2][0][RTW89_ETSI][0][96] = 127,
[0][0][2][0][RTW89_MKK][1][96] = 127,
[0][0][2][0][RTW89_MKK][0][96] = 127,
[0][0][2][0][RTW89_IC][1][96] = 22,
- [0][0][2][0][RTW89_IC][2][96] = 127,
[0][0][2][0][RTW89_KCC][1][96] = 32,
[0][0][2][0][RTW89_KCC][0][96] = 127,
[0][0][2][0][RTW89_ACMA][1][96] = 127,
@@ -41841,13 +40993,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][96] = 127,
[0][0][2][0][RTW89_THAILAND][0][96] = 127,
[0][0][2][0][RTW89_FCC][1][98] = 22,
- [0][0][2][0][RTW89_FCC][2][98] = 127,
[0][0][2][0][RTW89_ETSI][1][98] = 127,
[0][0][2][0][RTW89_ETSI][0][98] = 127,
[0][0][2][0][RTW89_MKK][1][98] = 127,
[0][0][2][0][RTW89_MKK][0][98] = 127,
[0][0][2][0][RTW89_IC][1][98] = 22,
- [0][0][2][0][RTW89_IC][2][98] = 127,
[0][0][2][0][RTW89_KCC][1][98] = 32,
[0][0][2][0][RTW89_KCC][0][98] = 127,
[0][0][2][0][RTW89_ACMA][1][98] = 127,
@@ -41860,13 +41010,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][98] = 127,
[0][0][2][0][RTW89_THAILAND][0][98] = 127,
[0][0][2][0][RTW89_FCC][1][100] = 22,
- [0][0][2][0][RTW89_FCC][2][100] = 127,
[0][0][2][0][RTW89_ETSI][1][100] = 127,
[0][0][2][0][RTW89_ETSI][0][100] = 127,
[0][0][2][0][RTW89_MKK][1][100] = 127,
[0][0][2][0][RTW89_MKK][0][100] = 127,
[0][0][2][0][RTW89_IC][1][100] = 22,
- [0][0][2][0][RTW89_IC][2][100] = 127,
[0][0][2][0][RTW89_KCC][1][100] = 32,
[0][0][2][0][RTW89_KCC][0][100] = 127,
[0][0][2][0][RTW89_ACMA][1][100] = 127,
@@ -41879,13 +41027,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][100] = 127,
[0][0][2][0][RTW89_THAILAND][0][100] = 127,
[0][0][2][0][RTW89_FCC][1][102] = 22,
- [0][0][2][0][RTW89_FCC][2][102] = 127,
[0][0][2][0][RTW89_ETSI][1][102] = 127,
[0][0][2][0][RTW89_ETSI][0][102] = 127,
[0][0][2][0][RTW89_MKK][1][102] = 127,
[0][0][2][0][RTW89_MKK][0][102] = 127,
[0][0][2][0][RTW89_IC][1][102] = 22,
- [0][0][2][0][RTW89_IC][2][102] = 127,
[0][0][2][0][RTW89_KCC][1][102] = 32,
[0][0][2][0][RTW89_KCC][0][102] = 127,
[0][0][2][0][RTW89_ACMA][1][102] = 127,
@@ -41898,13 +41044,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][102] = 127,
[0][0][2][0][RTW89_THAILAND][0][102] = 127,
[0][0][2][0][RTW89_FCC][1][104] = 22,
- [0][0][2][0][RTW89_FCC][2][104] = 127,
[0][0][2][0][RTW89_ETSI][1][104] = 127,
[0][0][2][0][RTW89_ETSI][0][104] = 127,
[0][0][2][0][RTW89_MKK][1][104] = 127,
[0][0][2][0][RTW89_MKK][0][104] = 127,
[0][0][2][0][RTW89_IC][1][104] = 22,
- [0][0][2][0][RTW89_IC][2][104] = 127,
[0][0][2][0][RTW89_KCC][1][104] = 32,
[0][0][2][0][RTW89_KCC][0][104] = 127,
[0][0][2][0][RTW89_ACMA][1][104] = 127,
@@ -41917,13 +41061,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][104] = 127,
[0][0][2][0][RTW89_THAILAND][0][104] = 127,
[0][0][2][0][RTW89_FCC][1][105] = 22,
- [0][0][2][0][RTW89_FCC][2][105] = 127,
[0][0][2][0][RTW89_ETSI][1][105] = 127,
[0][0][2][0][RTW89_ETSI][0][105] = 127,
[0][0][2][0][RTW89_MKK][1][105] = 127,
[0][0][2][0][RTW89_MKK][0][105] = 127,
[0][0][2][0][RTW89_IC][1][105] = 22,
- [0][0][2][0][RTW89_IC][2][105] = 127,
[0][0][2][0][RTW89_KCC][1][105] = 32,
[0][0][2][0][RTW89_KCC][0][105] = 127,
[0][0][2][0][RTW89_ACMA][1][105] = 127,
@@ -41936,13 +41078,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][105] = 127,
[0][0][2][0][RTW89_THAILAND][0][105] = 127,
[0][0][2][0][RTW89_FCC][1][107] = 24,
- [0][0][2][0][RTW89_FCC][2][107] = 127,
[0][0][2][0][RTW89_ETSI][1][107] = 127,
[0][0][2][0][RTW89_ETSI][0][107] = 127,
[0][0][2][0][RTW89_MKK][1][107] = 127,
[0][0][2][0][RTW89_MKK][0][107] = 127,
[0][0][2][0][RTW89_IC][1][107] = 24,
- [0][0][2][0][RTW89_IC][2][107] = 127,
[0][0][2][0][RTW89_KCC][1][107] = 32,
[0][0][2][0][RTW89_KCC][0][107] = 127,
[0][0][2][0][RTW89_ACMA][1][107] = 127,
@@ -41955,13 +41095,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][107] = 127,
[0][0][2][0][RTW89_THAILAND][0][107] = 127,
[0][0][2][0][RTW89_FCC][1][109] = 24,
- [0][0][2][0][RTW89_FCC][2][109] = 127,
[0][0][2][0][RTW89_ETSI][1][109] = 127,
[0][0][2][0][RTW89_ETSI][0][109] = 127,
[0][0][2][0][RTW89_MKK][1][109] = 127,
[0][0][2][0][RTW89_MKK][0][109] = 127,
[0][0][2][0][RTW89_IC][1][109] = 24,
- [0][0][2][0][RTW89_IC][2][109] = 127,
[0][0][2][0][RTW89_KCC][1][109] = 32,
[0][0][2][0][RTW89_KCC][0][109] = 127,
[0][0][2][0][RTW89_ACMA][1][109] = 127,
@@ -41974,13 +41112,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][109] = 127,
[0][0][2][0][RTW89_THAILAND][0][109] = 127,
[0][0][2][0][RTW89_FCC][1][111] = 127,
- [0][0][2][0][RTW89_FCC][2][111] = 127,
[0][0][2][0][RTW89_ETSI][1][111] = 127,
[0][0][2][0][RTW89_ETSI][0][111] = 127,
[0][0][2][0][RTW89_MKK][1][111] = 127,
[0][0][2][0][RTW89_MKK][0][111] = 127,
[0][0][2][0][RTW89_IC][1][111] = 127,
- [0][0][2][0][RTW89_IC][2][111] = 127,
[0][0][2][0][RTW89_KCC][1][111] = 127,
[0][0][2][0][RTW89_KCC][0][111] = 127,
[0][0][2][0][RTW89_ACMA][1][111] = 127,
@@ -41993,13 +41129,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][111] = 127,
[0][0][2][0][RTW89_THAILAND][0][111] = 127,
[0][0][2][0][RTW89_FCC][1][113] = 127,
- [0][0][2][0][RTW89_FCC][2][113] = 127,
[0][0][2][0][RTW89_ETSI][1][113] = 127,
[0][0][2][0][RTW89_ETSI][0][113] = 127,
[0][0][2][0][RTW89_MKK][1][113] = 127,
[0][0][2][0][RTW89_MKK][0][113] = 127,
[0][0][2][0][RTW89_IC][1][113] = 127,
- [0][0][2][0][RTW89_IC][2][113] = 127,
[0][0][2][0][RTW89_KCC][1][113] = 127,
[0][0][2][0][RTW89_KCC][0][113] = 127,
[0][0][2][0][RTW89_ACMA][1][113] = 127,
@@ -42012,13 +41146,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][113] = 127,
[0][0][2][0][RTW89_THAILAND][0][113] = 127,
[0][0][2][0][RTW89_FCC][1][115] = 127,
- [0][0][2][0][RTW89_FCC][2][115] = 127,
[0][0][2][0][RTW89_ETSI][1][115] = 127,
[0][0][2][0][RTW89_ETSI][0][115] = 127,
[0][0][2][0][RTW89_MKK][1][115] = 127,
[0][0][2][0][RTW89_MKK][0][115] = 127,
[0][0][2][0][RTW89_IC][1][115] = 127,
- [0][0][2][0][RTW89_IC][2][115] = 127,
[0][0][2][0][RTW89_KCC][1][115] = 127,
[0][0][2][0][RTW89_KCC][0][115] = 127,
[0][0][2][0][RTW89_ACMA][1][115] = 127,
@@ -42031,13 +41163,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][115] = 127,
[0][0][2][0][RTW89_THAILAND][0][115] = 127,
[0][0][2][0][RTW89_FCC][1][117] = 127,
- [0][0][2][0][RTW89_FCC][2][117] = 127,
[0][0][2][0][RTW89_ETSI][1][117] = 127,
[0][0][2][0][RTW89_ETSI][0][117] = 127,
[0][0][2][0][RTW89_MKK][1][117] = 127,
[0][0][2][0][RTW89_MKK][0][117] = 127,
[0][0][2][0][RTW89_IC][1][117] = 127,
- [0][0][2][0][RTW89_IC][2][117] = 127,
[0][0][2][0][RTW89_KCC][1][117] = 127,
[0][0][2][0][RTW89_KCC][0][117] = 127,
[0][0][2][0][RTW89_ACMA][1][117] = 127,
@@ -42050,13 +41180,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][117] = 127,
[0][0][2][0][RTW89_THAILAND][0][117] = 127,
[0][0][2][0][RTW89_FCC][1][119] = 127,
- [0][0][2][0][RTW89_FCC][2][119] = 127,
[0][0][2][0][RTW89_ETSI][1][119] = 127,
[0][0][2][0][RTW89_ETSI][0][119] = 127,
[0][0][2][0][RTW89_MKK][1][119] = 127,
[0][0][2][0][RTW89_MKK][0][119] = 127,
[0][0][2][0][RTW89_IC][1][119] = 127,
- [0][0][2][0][RTW89_IC][2][119] = 127,
[0][0][2][0][RTW89_KCC][1][119] = 127,
[0][0][2][0][RTW89_KCC][0][119] = 127,
[0][0][2][0][RTW89_ACMA][1][119] = 127,
@@ -42069,13 +41197,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][0][2][0][RTW89_THAILAND][1][119] = 127,
[0][0][2][0][RTW89_THAILAND][0][119] = 127,
[0][1][2][0][RTW89_FCC][1][0] = -2,
- [0][1][2][0][RTW89_FCC][2][0] = 54,
[0][1][2][0][RTW89_ETSI][1][0] = 54,
[0][1][2][0][RTW89_ETSI][0][0] = 18,
[0][1][2][0][RTW89_MKK][1][0] = 56,
[0][1][2][0][RTW89_MKK][0][0] = 16,
[0][1][2][0][RTW89_IC][1][0] = -2,
- [0][1][2][0][RTW89_IC][2][0] = 54,
[0][1][2][0][RTW89_KCC][1][0] = 12,
[0][1][2][0][RTW89_KCC][0][0] = 10,
[0][1][2][0][RTW89_ACMA][1][0] = 54,
@@ -42088,13 +41214,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][0] = 44,
[0][1][2][0][RTW89_THAILAND][0][0] = -2,
[0][1][2][0][RTW89_FCC][1][2] = -4,
- [0][1][2][0][RTW89_FCC][2][2] = 54,
[0][1][2][0][RTW89_ETSI][1][2] = 54,
[0][1][2][0][RTW89_ETSI][0][2] = 18,
[0][1][2][0][RTW89_MKK][1][2] = 54,
[0][1][2][0][RTW89_MKK][0][2] = 16,
[0][1][2][0][RTW89_IC][1][2] = -4,
- [0][1][2][0][RTW89_IC][2][2] = 54,
[0][1][2][0][RTW89_KCC][1][2] = 12,
[0][1][2][0][RTW89_KCC][0][2] = 12,
[0][1][2][0][RTW89_ACMA][1][2] = 54,
@@ -42107,13 +41231,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][2] = 44,
[0][1][2][0][RTW89_THAILAND][0][2] = -4,
[0][1][2][0][RTW89_FCC][1][4] = -4,
- [0][1][2][0][RTW89_FCC][2][4] = 54,
[0][1][2][0][RTW89_ETSI][1][4] = 54,
[0][1][2][0][RTW89_ETSI][0][4] = 18,
[0][1][2][0][RTW89_MKK][1][4] = 54,
[0][1][2][0][RTW89_MKK][0][4] = 16,
[0][1][2][0][RTW89_IC][1][4] = -4,
- [0][1][2][0][RTW89_IC][2][4] = 54,
[0][1][2][0][RTW89_KCC][1][4] = 12,
[0][1][2][0][RTW89_KCC][0][4] = 12,
[0][1][2][0][RTW89_ACMA][1][4] = 54,
@@ -42126,13 +41248,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][4] = 44,
[0][1][2][0][RTW89_THAILAND][0][4] = -4,
[0][1][2][0][RTW89_FCC][1][6] = -4,
- [0][1][2][0][RTW89_FCC][2][6] = 54,
[0][1][2][0][RTW89_ETSI][1][6] = 54,
[0][1][2][0][RTW89_ETSI][0][6] = 18,
[0][1][2][0][RTW89_MKK][1][6] = 54,
[0][1][2][0][RTW89_MKK][0][6] = 16,
[0][1][2][0][RTW89_IC][1][6] = -4,
- [0][1][2][0][RTW89_IC][2][6] = 54,
[0][1][2][0][RTW89_KCC][1][6] = 12,
[0][1][2][0][RTW89_KCC][0][6] = 12,
[0][1][2][0][RTW89_ACMA][1][6] = 54,
@@ -42145,13 +41265,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][6] = 44,
[0][1][2][0][RTW89_THAILAND][0][6] = -4,
[0][1][2][0][RTW89_FCC][1][8] = -4,
- [0][1][2][0][RTW89_FCC][2][8] = 54,
[0][1][2][0][RTW89_ETSI][1][8] = 54,
[0][1][2][0][RTW89_ETSI][0][8] = 18,
[0][1][2][0][RTW89_MKK][1][8] = 54,
[0][1][2][0][RTW89_MKK][0][8] = 16,
[0][1][2][0][RTW89_IC][1][8] = -4,
- [0][1][2][0][RTW89_IC][2][8] = 54,
[0][1][2][0][RTW89_KCC][1][8] = 12,
[0][1][2][0][RTW89_KCC][0][8] = 12,
[0][1][2][0][RTW89_ACMA][1][8] = 54,
@@ -42164,13 +41282,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][8] = 44,
[0][1][2][0][RTW89_THAILAND][0][8] = -4,
[0][1][2][0][RTW89_FCC][1][10] = -4,
- [0][1][2][0][RTW89_FCC][2][10] = 54,
[0][1][2][0][RTW89_ETSI][1][10] = 54,
[0][1][2][0][RTW89_ETSI][0][10] = 18,
[0][1][2][0][RTW89_MKK][1][10] = 54,
[0][1][2][0][RTW89_MKK][0][10] = 16,
[0][1][2][0][RTW89_IC][1][10] = -4,
- [0][1][2][0][RTW89_IC][2][10] = 54,
[0][1][2][0][RTW89_KCC][1][10] = 12,
[0][1][2][0][RTW89_KCC][0][10] = 12,
[0][1][2][0][RTW89_ACMA][1][10] = 54,
@@ -42183,13 +41299,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][10] = 44,
[0][1][2][0][RTW89_THAILAND][0][10] = -4,
[0][1][2][0][RTW89_FCC][1][12] = -4,
- [0][1][2][0][RTW89_FCC][2][12] = 54,
[0][1][2][0][RTW89_ETSI][1][12] = 54,
[0][1][2][0][RTW89_ETSI][0][12] = 18,
[0][1][2][0][RTW89_MKK][1][12] = 54,
[0][1][2][0][RTW89_MKK][0][12] = 16,
[0][1][2][0][RTW89_IC][1][12] = -4,
- [0][1][2][0][RTW89_IC][2][12] = 54,
[0][1][2][0][RTW89_KCC][1][12] = 12,
[0][1][2][0][RTW89_KCC][0][12] = 12,
[0][1][2][0][RTW89_ACMA][1][12] = 54,
@@ -42202,13 +41316,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][12] = 44,
[0][1][2][0][RTW89_THAILAND][0][12] = -4,
[0][1][2][0][RTW89_FCC][1][14] = -4,
- [0][1][2][0][RTW89_FCC][2][14] = 54,
[0][1][2][0][RTW89_ETSI][1][14] = 54,
[0][1][2][0][RTW89_ETSI][0][14] = 18,
[0][1][2][0][RTW89_MKK][1][14] = 54,
[0][1][2][0][RTW89_MKK][0][14] = 16,
[0][1][2][0][RTW89_IC][1][14] = -4,
- [0][1][2][0][RTW89_IC][2][14] = 54,
[0][1][2][0][RTW89_KCC][1][14] = 12,
[0][1][2][0][RTW89_KCC][0][14] = 12,
[0][1][2][0][RTW89_ACMA][1][14] = 54,
@@ -42221,13 +41333,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][14] = 44,
[0][1][2][0][RTW89_THAILAND][0][14] = -4,
[0][1][2][0][RTW89_FCC][1][15] = -4,
- [0][1][2][0][RTW89_FCC][2][15] = 54,
[0][1][2][0][RTW89_ETSI][1][15] = 54,
[0][1][2][0][RTW89_ETSI][0][15] = 18,
[0][1][2][0][RTW89_MKK][1][15] = 54,
[0][1][2][0][RTW89_MKK][0][15] = 16,
[0][1][2][0][RTW89_IC][1][15] = -4,
- [0][1][2][0][RTW89_IC][2][15] = 54,
[0][1][2][0][RTW89_KCC][1][15] = 12,
[0][1][2][0][RTW89_KCC][0][15] = 12,
[0][1][2][0][RTW89_ACMA][1][15] = 54,
@@ -42240,13 +41350,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][15] = 44,
[0][1][2][0][RTW89_THAILAND][0][15] = -4,
[0][1][2][0][RTW89_FCC][1][17] = -4,
- [0][1][2][0][RTW89_FCC][2][17] = 54,
[0][1][2][0][RTW89_ETSI][1][17] = 54,
[0][1][2][0][RTW89_ETSI][0][17] = 18,
[0][1][2][0][RTW89_MKK][1][17] = 54,
[0][1][2][0][RTW89_MKK][0][17] = 16,
[0][1][2][0][RTW89_IC][1][17] = -4,
- [0][1][2][0][RTW89_IC][2][17] = 54,
[0][1][2][0][RTW89_KCC][1][17] = 12,
[0][1][2][0][RTW89_KCC][0][17] = 12,
[0][1][2][0][RTW89_ACMA][1][17] = 54,
@@ -42259,13 +41367,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][17] = 44,
[0][1][2][0][RTW89_THAILAND][0][17] = -4,
[0][1][2][0][RTW89_FCC][1][19] = -4,
- [0][1][2][0][RTW89_FCC][2][19] = 54,
[0][1][2][0][RTW89_ETSI][1][19] = 54,
[0][1][2][0][RTW89_ETSI][0][19] = 18,
[0][1][2][0][RTW89_MKK][1][19] = 54,
[0][1][2][0][RTW89_MKK][0][19] = 16,
[0][1][2][0][RTW89_IC][1][19] = -4,
- [0][1][2][0][RTW89_IC][2][19] = 54,
[0][1][2][0][RTW89_KCC][1][19] = 12,
[0][1][2][0][RTW89_KCC][0][19] = 12,
[0][1][2][0][RTW89_ACMA][1][19] = 54,
@@ -42278,13 +41384,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][19] = 44,
[0][1][2][0][RTW89_THAILAND][0][19] = -4,
[0][1][2][0][RTW89_FCC][1][21] = -4,
- [0][1][2][0][RTW89_FCC][2][21] = 54,
[0][1][2][0][RTW89_ETSI][1][21] = 54,
[0][1][2][0][RTW89_ETSI][0][21] = 18,
[0][1][2][0][RTW89_MKK][1][21] = 54,
[0][1][2][0][RTW89_MKK][0][21] = 16,
[0][1][2][0][RTW89_IC][1][21] = -4,
- [0][1][2][0][RTW89_IC][2][21] = 54,
[0][1][2][0][RTW89_KCC][1][21] = 12,
[0][1][2][0][RTW89_KCC][0][21] = 12,
[0][1][2][0][RTW89_ACMA][1][21] = 54,
@@ -42297,13 +41401,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][21] = 44,
[0][1][2][0][RTW89_THAILAND][0][21] = -4,
[0][1][2][0][RTW89_FCC][1][23] = -4,
- [0][1][2][0][RTW89_FCC][2][23] = 68,
[0][1][2][0][RTW89_ETSI][1][23] = 54,
[0][1][2][0][RTW89_ETSI][0][23] = 18,
[0][1][2][0][RTW89_MKK][1][23] = 54,
[0][1][2][0][RTW89_MKK][0][23] = 16,
[0][1][2][0][RTW89_IC][1][23] = -4,
- [0][1][2][0][RTW89_IC][2][23] = 68,
[0][1][2][0][RTW89_KCC][1][23] = 12,
[0][1][2][0][RTW89_KCC][0][23] = 10,
[0][1][2][0][RTW89_ACMA][1][23] = 54,
@@ -42316,13 +41418,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][23] = 44,
[0][1][2][0][RTW89_THAILAND][0][23] = -4,
[0][1][2][0][RTW89_FCC][1][25] = -4,
- [0][1][2][0][RTW89_FCC][2][25] = 68,
[0][1][2][0][RTW89_ETSI][1][25] = 54,
[0][1][2][0][RTW89_ETSI][0][25] = 18,
[0][1][2][0][RTW89_MKK][1][25] = 54,
[0][1][2][0][RTW89_MKK][0][25] = 16,
[0][1][2][0][RTW89_IC][1][25] = -4,
- [0][1][2][0][RTW89_IC][2][25] = 68,
[0][1][2][0][RTW89_KCC][1][25] = 12,
[0][1][2][0][RTW89_KCC][0][25] = 14,
[0][1][2][0][RTW89_ACMA][1][25] = 54,
@@ -42335,13 +41435,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][25] = 42,
[0][1][2][0][RTW89_THAILAND][0][25] = -4,
[0][1][2][0][RTW89_FCC][1][27] = -4,
- [0][1][2][0][RTW89_FCC][2][27] = 68,
[0][1][2][0][RTW89_ETSI][1][27] = 54,
[0][1][2][0][RTW89_ETSI][0][27] = 18,
[0][1][2][0][RTW89_MKK][1][27] = 54,
[0][1][2][0][RTW89_MKK][0][27] = 16,
[0][1][2][0][RTW89_IC][1][27] = -4,
- [0][1][2][0][RTW89_IC][2][27] = 68,
[0][1][2][0][RTW89_KCC][1][27] = 12,
[0][1][2][0][RTW89_KCC][0][27] = 14,
[0][1][2][0][RTW89_ACMA][1][27] = 54,
@@ -42354,13 +41452,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][27] = 42,
[0][1][2][0][RTW89_THAILAND][0][27] = -4,
[0][1][2][0][RTW89_FCC][1][29] = -4,
- [0][1][2][0][RTW89_FCC][2][29] = 68,
[0][1][2][0][RTW89_ETSI][1][29] = 54,
[0][1][2][0][RTW89_ETSI][0][29] = 18,
[0][1][2][0][RTW89_MKK][1][29] = 54,
[0][1][2][0][RTW89_MKK][0][29] = 16,
[0][1][2][0][RTW89_IC][1][29] = -4,
- [0][1][2][0][RTW89_IC][2][29] = 68,
[0][1][2][0][RTW89_KCC][1][29] = 12,
[0][1][2][0][RTW89_KCC][0][29] = 14,
[0][1][2][0][RTW89_ACMA][1][29] = 54,
@@ -42373,13 +41469,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][29] = 42,
[0][1][2][0][RTW89_THAILAND][0][29] = -4,
[0][1][2][0][RTW89_FCC][1][30] = -4,
- [0][1][2][0][RTW89_FCC][2][30] = 68,
[0][1][2][0][RTW89_ETSI][1][30] = 54,
[0][1][2][0][RTW89_ETSI][0][30] = 18,
[0][1][2][0][RTW89_MKK][1][30] = 54,
[0][1][2][0][RTW89_MKK][0][30] = 16,
[0][1][2][0][RTW89_IC][1][30] = -4,
- [0][1][2][0][RTW89_IC][2][30] = 68,
[0][1][2][0][RTW89_KCC][1][30] = 12,
[0][1][2][0][RTW89_KCC][0][30] = 14,
[0][1][2][0][RTW89_ACMA][1][30] = 54,
@@ -42392,13 +41486,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][30] = 42,
[0][1][2][0][RTW89_THAILAND][0][30] = -4,
[0][1][2][0][RTW89_FCC][1][32] = -4,
- [0][1][2][0][RTW89_FCC][2][32] = 68,
[0][1][2][0][RTW89_ETSI][1][32] = 54,
[0][1][2][0][RTW89_ETSI][0][32] = 18,
[0][1][2][0][RTW89_MKK][1][32] = 54,
[0][1][2][0][RTW89_MKK][0][32] = 16,
[0][1][2][0][RTW89_IC][1][32] = -4,
- [0][1][2][0][RTW89_IC][2][32] = 68,
[0][1][2][0][RTW89_KCC][1][32] = 12,
[0][1][2][0][RTW89_KCC][0][32] = 14,
[0][1][2][0][RTW89_ACMA][1][32] = 54,
@@ -42411,13 +41503,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][32] = 42,
[0][1][2][0][RTW89_THAILAND][0][32] = -4,
[0][1][2][0][RTW89_FCC][1][34] = -4,
- [0][1][2][0][RTW89_FCC][2][34] = 68,
[0][1][2][0][RTW89_ETSI][1][34] = 54,
[0][1][2][0][RTW89_ETSI][0][34] = 18,
[0][1][2][0][RTW89_MKK][1][34] = 54,
[0][1][2][0][RTW89_MKK][0][34] = 16,
[0][1][2][0][RTW89_IC][1][34] = -4,
- [0][1][2][0][RTW89_IC][2][34] = 68,
[0][1][2][0][RTW89_KCC][1][34] = 12,
[0][1][2][0][RTW89_KCC][0][34] = 14,
[0][1][2][0][RTW89_ACMA][1][34] = 54,
@@ -42430,13 +41520,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][34] = 42,
[0][1][2][0][RTW89_THAILAND][0][34] = -4,
[0][1][2][0][RTW89_FCC][1][36] = -4,
- [0][1][2][0][RTW89_FCC][2][36] = 68,
[0][1][2][0][RTW89_ETSI][1][36] = 54,
[0][1][2][0][RTW89_ETSI][0][36] = 18,
[0][1][2][0][RTW89_MKK][1][36] = 54,
[0][1][2][0][RTW89_MKK][0][36] = 16,
[0][1][2][0][RTW89_IC][1][36] = -4,
- [0][1][2][0][RTW89_IC][2][36] = 68,
[0][1][2][0][RTW89_KCC][1][36] = 12,
[0][1][2][0][RTW89_KCC][0][36] = 14,
[0][1][2][0][RTW89_ACMA][1][36] = 54,
@@ -42449,13 +41537,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][36] = 42,
[0][1][2][0][RTW89_THAILAND][0][36] = -4,
[0][1][2][0][RTW89_FCC][1][38] = -4,
- [0][1][2][0][RTW89_FCC][2][38] = 68,
[0][1][2][0][RTW89_ETSI][1][38] = 54,
[0][1][2][0][RTW89_ETSI][0][38] = 18,
[0][1][2][0][RTW89_MKK][1][38] = 54,
[0][1][2][0][RTW89_MKK][0][38] = 16,
[0][1][2][0][RTW89_IC][1][38] = -4,
- [0][1][2][0][RTW89_IC][2][38] = 68,
[0][1][2][0][RTW89_KCC][1][38] = 12,
[0][1][2][0][RTW89_KCC][0][38] = 14,
[0][1][2][0][RTW89_ACMA][1][38] = 54,
@@ -42468,13 +41554,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][38] = 42,
[0][1][2][0][RTW89_THAILAND][0][38] = -4,
[0][1][2][0][RTW89_FCC][1][40] = -4,
- [0][1][2][0][RTW89_FCC][2][40] = 68,
[0][1][2][0][RTW89_ETSI][1][40] = 54,
[0][1][2][0][RTW89_ETSI][0][40] = 18,
[0][1][2][0][RTW89_MKK][1][40] = 54,
[0][1][2][0][RTW89_MKK][0][40] = 16,
[0][1][2][0][RTW89_IC][1][40] = -4,
- [0][1][2][0][RTW89_IC][2][40] = 68,
[0][1][2][0][RTW89_KCC][1][40] = 12,
[0][1][2][0][RTW89_KCC][0][40] = 14,
[0][1][2][0][RTW89_ACMA][1][40] = 54,
@@ -42487,13 +41571,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][40] = 42,
[0][1][2][0][RTW89_THAILAND][0][40] = -4,
[0][1][2][0][RTW89_FCC][1][42] = -4,
- [0][1][2][0][RTW89_FCC][2][42] = 68,
[0][1][2][0][RTW89_ETSI][1][42] = 54,
[0][1][2][0][RTW89_ETSI][0][42] = 18,
[0][1][2][0][RTW89_MKK][1][42] = 54,
[0][1][2][0][RTW89_MKK][0][42] = 16,
[0][1][2][0][RTW89_IC][1][42] = -4,
- [0][1][2][0][RTW89_IC][2][42] = 68,
[0][1][2][0][RTW89_KCC][1][42] = 12,
[0][1][2][0][RTW89_KCC][0][42] = 14,
[0][1][2][0][RTW89_ACMA][1][42] = 54,
@@ -42506,13 +41588,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][42] = 42,
[0][1][2][0][RTW89_THAILAND][0][42] = -4,
[0][1][2][0][RTW89_FCC][1][44] = -2,
- [0][1][2][0][RTW89_FCC][2][44] = 68,
[0][1][2][0][RTW89_ETSI][1][44] = 54,
[0][1][2][0][RTW89_ETSI][0][44] = 18,
[0][1][2][0][RTW89_MKK][1][44] = 34,
[0][1][2][0][RTW89_MKK][0][44] = 16,
[0][1][2][0][RTW89_IC][1][44] = -2,
- [0][1][2][0][RTW89_IC][2][44] = 68,
[0][1][2][0][RTW89_KCC][1][44] = 12,
[0][1][2][0][RTW89_KCC][0][44] = 12,
[0][1][2][0][RTW89_ACMA][1][44] = 54,
@@ -42525,13 +41605,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][44] = 42,
[0][1][2][0][RTW89_THAILAND][0][44] = -2,
[0][1][2][0][RTW89_FCC][1][45] = -2,
- [0][1][2][0][RTW89_FCC][2][45] = 127,
[0][1][2][0][RTW89_ETSI][1][45] = 127,
[0][1][2][0][RTW89_ETSI][0][45] = 127,
[0][1][2][0][RTW89_MKK][1][45] = 127,
[0][1][2][0][RTW89_MKK][0][45] = 127,
[0][1][2][0][RTW89_IC][1][45] = -2,
- [0][1][2][0][RTW89_IC][2][45] = 70,
[0][1][2][0][RTW89_KCC][1][45] = 12,
[0][1][2][0][RTW89_KCC][0][45] = 127,
[0][1][2][0][RTW89_ACMA][1][45] = 127,
@@ -42544,13 +41622,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][45] = 127,
[0][1][2][0][RTW89_THAILAND][0][45] = 127,
[0][1][2][0][RTW89_FCC][1][47] = -2,
- [0][1][2][0][RTW89_FCC][2][47] = 127,
[0][1][2][0][RTW89_ETSI][1][47] = 127,
[0][1][2][0][RTW89_ETSI][0][47] = 127,
[0][1][2][0][RTW89_MKK][1][47] = 127,
[0][1][2][0][RTW89_MKK][0][47] = 127,
[0][1][2][0][RTW89_IC][1][47] = -2,
- [0][1][2][0][RTW89_IC][2][47] = 68,
[0][1][2][0][RTW89_KCC][1][47] = 12,
[0][1][2][0][RTW89_KCC][0][47] = 127,
[0][1][2][0][RTW89_ACMA][1][47] = 127,
@@ -42563,13 +41639,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][47] = 127,
[0][1][2][0][RTW89_THAILAND][0][47] = 127,
[0][1][2][0][RTW89_FCC][1][49] = -2,
- [0][1][2][0][RTW89_FCC][2][49] = 127,
[0][1][2][0][RTW89_ETSI][1][49] = 127,
[0][1][2][0][RTW89_ETSI][0][49] = 127,
[0][1][2][0][RTW89_MKK][1][49] = 127,
[0][1][2][0][RTW89_MKK][0][49] = 127,
[0][1][2][0][RTW89_IC][1][49] = -2,
- [0][1][2][0][RTW89_IC][2][49] = 68,
[0][1][2][0][RTW89_KCC][1][49] = 12,
[0][1][2][0][RTW89_KCC][0][49] = 127,
[0][1][2][0][RTW89_ACMA][1][49] = 127,
@@ -42582,13 +41656,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][49] = 127,
[0][1][2][0][RTW89_THAILAND][0][49] = 127,
[0][1][2][0][RTW89_FCC][1][51] = -2,
- [0][1][2][0][RTW89_FCC][2][51] = 127,
[0][1][2][0][RTW89_ETSI][1][51] = 127,
[0][1][2][0][RTW89_ETSI][0][51] = 127,
[0][1][2][0][RTW89_MKK][1][51] = 127,
[0][1][2][0][RTW89_MKK][0][51] = 127,
[0][1][2][0][RTW89_IC][1][51] = -2,
- [0][1][2][0][RTW89_IC][2][51] = 68,
[0][1][2][0][RTW89_KCC][1][51] = 12,
[0][1][2][0][RTW89_KCC][0][51] = 127,
[0][1][2][0][RTW89_ACMA][1][51] = 127,
@@ -42601,13 +41673,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][51] = 127,
[0][1][2][0][RTW89_THAILAND][0][51] = 127,
[0][1][2][0][RTW89_FCC][1][53] = -2,
- [0][1][2][0][RTW89_FCC][2][53] = 127,
[0][1][2][0][RTW89_ETSI][1][53] = 127,
[0][1][2][0][RTW89_ETSI][0][53] = 127,
[0][1][2][0][RTW89_MKK][1][53] = 127,
[0][1][2][0][RTW89_MKK][0][53] = 127,
[0][1][2][0][RTW89_IC][1][53] = -2,
- [0][1][2][0][RTW89_IC][2][53] = 68,
[0][1][2][0][RTW89_KCC][1][53] = 12,
[0][1][2][0][RTW89_KCC][0][53] = 127,
[0][1][2][0][RTW89_ACMA][1][53] = 127,
@@ -42620,13 +41690,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][53] = 127,
[0][1][2][0][RTW89_THAILAND][0][53] = 127,
[0][1][2][0][RTW89_FCC][1][55] = -2,
- [0][1][2][0][RTW89_FCC][2][55] = 68,
[0][1][2][0][RTW89_ETSI][1][55] = 127,
[0][1][2][0][RTW89_ETSI][0][55] = 127,
[0][1][2][0][RTW89_MKK][1][55] = 127,
[0][1][2][0][RTW89_MKK][0][55] = 127,
[0][1][2][0][RTW89_IC][1][55] = -2,
- [0][1][2][0][RTW89_IC][2][55] = 68,
[0][1][2][0][RTW89_KCC][1][55] = 12,
[0][1][2][0][RTW89_KCC][0][55] = 127,
[0][1][2][0][RTW89_ACMA][1][55] = 127,
@@ -42639,13 +41707,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][55] = 127,
[0][1][2][0][RTW89_THAILAND][0][55] = 127,
[0][1][2][0][RTW89_FCC][1][57] = -2,
- [0][1][2][0][RTW89_FCC][2][57] = 68,
[0][1][2][0][RTW89_ETSI][1][57] = 127,
[0][1][2][0][RTW89_ETSI][0][57] = 127,
[0][1][2][0][RTW89_MKK][1][57] = 127,
[0][1][2][0][RTW89_MKK][0][57] = 127,
[0][1][2][0][RTW89_IC][1][57] = -2,
- [0][1][2][0][RTW89_IC][2][57] = 68,
[0][1][2][0][RTW89_KCC][1][57] = 12,
[0][1][2][0][RTW89_KCC][0][57] = 127,
[0][1][2][0][RTW89_ACMA][1][57] = 127,
@@ -42658,13 +41724,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][57] = 127,
[0][1][2][0][RTW89_THAILAND][0][57] = 127,
[0][1][2][0][RTW89_FCC][1][59] = -2,
- [0][1][2][0][RTW89_FCC][2][59] = 68,
[0][1][2][0][RTW89_ETSI][1][59] = 127,
[0][1][2][0][RTW89_ETSI][0][59] = 127,
[0][1][2][0][RTW89_MKK][1][59] = 127,
[0][1][2][0][RTW89_MKK][0][59] = 127,
[0][1][2][0][RTW89_IC][1][59] = -2,
- [0][1][2][0][RTW89_IC][2][59] = 68,
[0][1][2][0][RTW89_KCC][1][59] = 12,
[0][1][2][0][RTW89_KCC][0][59] = 127,
[0][1][2][0][RTW89_ACMA][1][59] = 127,
@@ -42677,13 +41741,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][59] = 127,
[0][1][2][0][RTW89_THAILAND][0][59] = 127,
[0][1][2][0][RTW89_FCC][1][60] = -2,
- [0][1][2][0][RTW89_FCC][2][60] = 68,
[0][1][2][0][RTW89_ETSI][1][60] = 127,
[0][1][2][0][RTW89_ETSI][0][60] = 127,
[0][1][2][0][RTW89_MKK][1][60] = 127,
[0][1][2][0][RTW89_MKK][0][60] = 127,
[0][1][2][0][RTW89_IC][1][60] = -2,
- [0][1][2][0][RTW89_IC][2][60] = 68,
[0][1][2][0][RTW89_KCC][1][60] = 12,
[0][1][2][0][RTW89_KCC][0][60] = 127,
[0][1][2][0][RTW89_ACMA][1][60] = 127,
@@ -42696,13 +41758,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][60] = 127,
[0][1][2][0][RTW89_THAILAND][0][60] = 127,
[0][1][2][0][RTW89_FCC][1][62] = -2,
- [0][1][2][0][RTW89_FCC][2][62] = 68,
[0][1][2][0][RTW89_ETSI][1][62] = 127,
[0][1][2][0][RTW89_ETSI][0][62] = 127,
[0][1][2][0][RTW89_MKK][1][62] = 127,
[0][1][2][0][RTW89_MKK][0][62] = 127,
[0][1][2][0][RTW89_IC][1][62] = -2,
- [0][1][2][0][RTW89_IC][2][62] = 68,
[0][1][2][0][RTW89_KCC][1][62] = 12,
[0][1][2][0][RTW89_KCC][0][62] = 127,
[0][1][2][0][RTW89_ACMA][1][62] = 127,
@@ -42715,13 +41775,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][62] = 127,
[0][1][2][0][RTW89_THAILAND][0][62] = 127,
[0][1][2][0][RTW89_FCC][1][64] = -2,
- [0][1][2][0][RTW89_FCC][2][64] = 68,
[0][1][2][0][RTW89_ETSI][1][64] = 127,
[0][1][2][0][RTW89_ETSI][0][64] = 127,
[0][1][2][0][RTW89_MKK][1][64] = 127,
[0][1][2][0][RTW89_MKK][0][64] = 127,
[0][1][2][0][RTW89_IC][1][64] = -2,
- [0][1][2][0][RTW89_IC][2][64] = 68,
[0][1][2][0][RTW89_KCC][1][64] = 12,
[0][1][2][0][RTW89_KCC][0][64] = 127,
[0][1][2][0][RTW89_ACMA][1][64] = 127,
@@ -42734,13 +41792,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][64] = 127,
[0][1][2][0][RTW89_THAILAND][0][64] = 127,
[0][1][2][0][RTW89_FCC][1][66] = -2,
- [0][1][2][0][RTW89_FCC][2][66] = 68,
[0][1][2][0][RTW89_ETSI][1][66] = 127,
[0][1][2][0][RTW89_ETSI][0][66] = 127,
[0][1][2][0][RTW89_MKK][1][66] = 127,
[0][1][2][0][RTW89_MKK][0][66] = 127,
[0][1][2][0][RTW89_IC][1][66] = -2,
- [0][1][2][0][RTW89_IC][2][66] = 68,
[0][1][2][0][RTW89_KCC][1][66] = 12,
[0][1][2][0][RTW89_KCC][0][66] = 127,
[0][1][2][0][RTW89_ACMA][1][66] = 127,
@@ -42753,13 +41809,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][66] = 127,
[0][1][2][0][RTW89_THAILAND][0][66] = 127,
[0][1][2][0][RTW89_FCC][1][68] = -2,
- [0][1][2][0][RTW89_FCC][2][68] = 68,
[0][1][2][0][RTW89_ETSI][1][68] = 127,
[0][1][2][0][RTW89_ETSI][0][68] = 127,
[0][1][2][0][RTW89_MKK][1][68] = 127,
[0][1][2][0][RTW89_MKK][0][68] = 127,
[0][1][2][0][RTW89_IC][1][68] = -2,
- [0][1][2][0][RTW89_IC][2][68] = 68,
[0][1][2][0][RTW89_KCC][1][68] = 12,
[0][1][2][0][RTW89_KCC][0][68] = 127,
[0][1][2][0][RTW89_ACMA][1][68] = 127,
@@ -42772,13 +41826,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][68] = 127,
[0][1][2][0][RTW89_THAILAND][0][68] = 127,
[0][1][2][0][RTW89_FCC][1][70] = -2,
- [0][1][2][0][RTW89_FCC][2][70] = 68,
[0][1][2][0][RTW89_ETSI][1][70] = 127,
[0][1][2][0][RTW89_ETSI][0][70] = 127,
[0][1][2][0][RTW89_MKK][1][70] = 127,
[0][1][2][0][RTW89_MKK][0][70] = 127,
[0][1][2][0][RTW89_IC][1][70] = -2,
- [0][1][2][0][RTW89_IC][2][70] = 68,
[0][1][2][0][RTW89_KCC][1][70] = 12,
[0][1][2][0][RTW89_KCC][0][70] = 127,
[0][1][2][0][RTW89_ACMA][1][70] = 127,
@@ -42791,13 +41843,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][70] = 127,
[0][1][2][0][RTW89_THAILAND][0][70] = 127,
[0][1][2][0][RTW89_FCC][1][72] = -2,
- [0][1][2][0][RTW89_FCC][2][72] = 68,
[0][1][2][0][RTW89_ETSI][1][72] = 127,
[0][1][2][0][RTW89_ETSI][0][72] = 127,
[0][1][2][0][RTW89_MKK][1][72] = 127,
[0][1][2][0][RTW89_MKK][0][72] = 127,
[0][1][2][0][RTW89_IC][1][72] = -2,
- [0][1][2][0][RTW89_IC][2][72] = 68,
[0][1][2][0][RTW89_KCC][1][72] = 12,
[0][1][2][0][RTW89_KCC][0][72] = 127,
[0][1][2][0][RTW89_ACMA][1][72] = 127,
@@ -42810,13 +41860,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][72] = 127,
[0][1][2][0][RTW89_THAILAND][0][72] = 127,
[0][1][2][0][RTW89_FCC][1][74] = -2,
- [0][1][2][0][RTW89_FCC][2][74] = 68,
[0][1][2][0][RTW89_ETSI][1][74] = 127,
[0][1][2][0][RTW89_ETSI][0][74] = 127,
[0][1][2][0][RTW89_MKK][1][74] = 127,
[0][1][2][0][RTW89_MKK][0][74] = 127,
[0][1][2][0][RTW89_IC][1][74] = -2,
- [0][1][2][0][RTW89_IC][2][74] = 68,
[0][1][2][0][RTW89_KCC][1][74] = 12,
[0][1][2][0][RTW89_KCC][0][74] = 127,
[0][1][2][0][RTW89_ACMA][1][74] = 127,
@@ -42829,13 +41877,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][74] = 127,
[0][1][2][0][RTW89_THAILAND][0][74] = 127,
[0][1][2][0][RTW89_FCC][1][75] = -2,
- [0][1][2][0][RTW89_FCC][2][75] = 68,
[0][1][2][0][RTW89_ETSI][1][75] = 127,
[0][1][2][0][RTW89_ETSI][0][75] = 127,
[0][1][2][0][RTW89_MKK][1][75] = 127,
[0][1][2][0][RTW89_MKK][0][75] = 127,
[0][1][2][0][RTW89_IC][1][75] = -2,
- [0][1][2][0][RTW89_IC][2][75] = 68,
[0][1][2][0][RTW89_KCC][1][75] = 12,
[0][1][2][0][RTW89_KCC][0][75] = 127,
[0][1][2][0][RTW89_ACMA][1][75] = 127,
@@ -42848,13 +41894,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][75] = 127,
[0][1][2][0][RTW89_THAILAND][0][75] = 127,
[0][1][2][0][RTW89_FCC][1][77] = -2,
- [0][1][2][0][RTW89_FCC][2][77] = 68,
[0][1][2][0][RTW89_ETSI][1][77] = 127,
[0][1][2][0][RTW89_ETSI][0][77] = 127,
[0][1][2][0][RTW89_MKK][1][77] = 127,
[0][1][2][0][RTW89_MKK][0][77] = 127,
[0][1][2][0][RTW89_IC][1][77] = -2,
- [0][1][2][0][RTW89_IC][2][77] = 68,
[0][1][2][0][RTW89_KCC][1][77] = 12,
[0][1][2][0][RTW89_KCC][0][77] = 127,
[0][1][2][0][RTW89_ACMA][1][77] = 127,
@@ -42867,13 +41911,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][77] = 127,
[0][1][2][0][RTW89_THAILAND][0][77] = 127,
[0][1][2][0][RTW89_FCC][1][79] = -2,
- [0][1][2][0][RTW89_FCC][2][79] = 68,
[0][1][2][0][RTW89_ETSI][1][79] = 127,
[0][1][2][0][RTW89_ETSI][0][79] = 127,
[0][1][2][0][RTW89_MKK][1][79] = 127,
[0][1][2][0][RTW89_MKK][0][79] = 127,
[0][1][2][0][RTW89_IC][1][79] = -2,
- [0][1][2][0][RTW89_IC][2][79] = 68,
[0][1][2][0][RTW89_KCC][1][79] = 12,
[0][1][2][0][RTW89_KCC][0][79] = 127,
[0][1][2][0][RTW89_ACMA][1][79] = 127,
@@ -42886,13 +41928,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][79] = 127,
[0][1][2][0][RTW89_THAILAND][0][79] = 127,
[0][1][2][0][RTW89_FCC][1][81] = -2,
- [0][1][2][0][RTW89_FCC][2][81] = 68,
[0][1][2][0][RTW89_ETSI][1][81] = 127,
[0][1][2][0][RTW89_ETSI][0][81] = 127,
[0][1][2][0][RTW89_MKK][1][81] = 127,
[0][1][2][0][RTW89_MKK][0][81] = 127,
[0][1][2][0][RTW89_IC][1][81] = -2,
- [0][1][2][0][RTW89_IC][2][81] = 68,
[0][1][2][0][RTW89_KCC][1][81] = 12,
[0][1][2][0][RTW89_KCC][0][81] = 127,
[0][1][2][0][RTW89_ACMA][1][81] = 127,
@@ -42905,13 +41945,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][81] = 127,
[0][1][2][0][RTW89_THAILAND][0][81] = 127,
[0][1][2][0][RTW89_FCC][1][83] = -2,
- [0][1][2][0][RTW89_FCC][2][83] = 68,
[0][1][2][0][RTW89_ETSI][1][83] = 127,
[0][1][2][0][RTW89_ETSI][0][83] = 127,
[0][1][2][0][RTW89_MKK][1][83] = 127,
[0][1][2][0][RTW89_MKK][0][83] = 127,
[0][1][2][0][RTW89_IC][1][83] = -2,
- [0][1][2][0][RTW89_IC][2][83] = 68,
[0][1][2][0][RTW89_KCC][1][83] = 20,
[0][1][2][0][RTW89_KCC][0][83] = 127,
[0][1][2][0][RTW89_ACMA][1][83] = 127,
@@ -42924,13 +41962,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][83] = 127,
[0][1][2][0][RTW89_THAILAND][0][83] = 127,
[0][1][2][0][RTW89_FCC][1][85] = -2,
- [0][1][2][0][RTW89_FCC][2][85] = 68,
[0][1][2][0][RTW89_ETSI][1][85] = 127,
[0][1][2][0][RTW89_ETSI][0][85] = 127,
[0][1][2][0][RTW89_MKK][1][85] = 127,
[0][1][2][0][RTW89_MKK][0][85] = 127,
[0][1][2][0][RTW89_IC][1][85] = -2,
- [0][1][2][0][RTW89_IC][2][85] = 68,
[0][1][2][0][RTW89_KCC][1][85] = 20,
[0][1][2][0][RTW89_KCC][0][85] = 127,
[0][1][2][0][RTW89_ACMA][1][85] = 127,
@@ -42943,13 +41979,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][85] = 127,
[0][1][2][0][RTW89_THAILAND][0][85] = 127,
[0][1][2][0][RTW89_FCC][1][87] = -2,
- [0][1][2][0][RTW89_FCC][2][87] = 127,
[0][1][2][0][RTW89_ETSI][1][87] = 127,
[0][1][2][0][RTW89_ETSI][0][87] = 127,
[0][1][2][0][RTW89_MKK][1][87] = 127,
[0][1][2][0][RTW89_MKK][0][87] = 127,
[0][1][2][0][RTW89_IC][1][87] = -2,
- [0][1][2][0][RTW89_IC][2][87] = 127,
[0][1][2][0][RTW89_KCC][1][87] = 20,
[0][1][2][0][RTW89_KCC][0][87] = 127,
[0][1][2][0][RTW89_ACMA][1][87] = 127,
@@ -42962,13 +41996,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][87] = 127,
[0][1][2][0][RTW89_THAILAND][0][87] = 127,
[0][1][2][0][RTW89_FCC][1][89] = -2,
- [0][1][2][0][RTW89_FCC][2][89] = 127,
[0][1][2][0][RTW89_ETSI][1][89] = 127,
[0][1][2][0][RTW89_ETSI][0][89] = 127,
[0][1][2][0][RTW89_MKK][1][89] = 127,
[0][1][2][0][RTW89_MKK][0][89] = 127,
[0][1][2][0][RTW89_IC][1][89] = -2,
- [0][1][2][0][RTW89_IC][2][89] = 127,
[0][1][2][0][RTW89_KCC][1][89] = 20,
[0][1][2][0][RTW89_KCC][0][89] = 127,
[0][1][2][0][RTW89_ACMA][1][89] = 127,
@@ -42981,13 +42013,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][89] = 127,
[0][1][2][0][RTW89_THAILAND][0][89] = 127,
[0][1][2][0][RTW89_FCC][1][90] = -2,
- [0][1][2][0][RTW89_FCC][2][90] = 127,
[0][1][2][0][RTW89_ETSI][1][90] = 127,
[0][1][2][0][RTW89_ETSI][0][90] = 127,
[0][1][2][0][RTW89_MKK][1][90] = 127,
[0][1][2][0][RTW89_MKK][0][90] = 127,
[0][1][2][0][RTW89_IC][1][90] = -2,
- [0][1][2][0][RTW89_IC][2][90] = 127,
[0][1][2][0][RTW89_KCC][1][90] = 20,
[0][1][2][0][RTW89_KCC][0][90] = 127,
[0][1][2][0][RTW89_ACMA][1][90] = 127,
@@ -43000,13 +42030,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][90] = 127,
[0][1][2][0][RTW89_THAILAND][0][90] = 127,
[0][1][2][0][RTW89_FCC][1][92] = -2,
- [0][1][2][0][RTW89_FCC][2][92] = 127,
[0][1][2][0][RTW89_ETSI][1][92] = 127,
[0][1][2][0][RTW89_ETSI][0][92] = 127,
[0][1][2][0][RTW89_MKK][1][92] = 127,
[0][1][2][0][RTW89_MKK][0][92] = 127,
[0][1][2][0][RTW89_IC][1][92] = -2,
- [0][1][2][0][RTW89_IC][2][92] = 127,
[0][1][2][0][RTW89_KCC][1][92] = 20,
[0][1][2][0][RTW89_KCC][0][92] = 127,
[0][1][2][0][RTW89_ACMA][1][92] = 127,
@@ -43019,13 +42047,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][92] = 127,
[0][1][2][0][RTW89_THAILAND][0][92] = 127,
[0][1][2][0][RTW89_FCC][1][94] = -2,
- [0][1][2][0][RTW89_FCC][2][94] = 127,
[0][1][2][0][RTW89_ETSI][1][94] = 127,
[0][1][2][0][RTW89_ETSI][0][94] = 127,
[0][1][2][0][RTW89_MKK][1][94] = 127,
[0][1][2][0][RTW89_MKK][0][94] = 127,
[0][1][2][0][RTW89_IC][1][94] = -2,
- [0][1][2][0][RTW89_IC][2][94] = 127,
[0][1][2][0][RTW89_KCC][1][94] = 20,
[0][1][2][0][RTW89_KCC][0][94] = 127,
[0][1][2][0][RTW89_ACMA][1][94] = 127,
@@ -43038,13 +42064,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][94] = 127,
[0][1][2][0][RTW89_THAILAND][0][94] = 127,
[0][1][2][0][RTW89_FCC][1][96] = -2,
- [0][1][2][0][RTW89_FCC][2][96] = 127,
[0][1][2][0][RTW89_ETSI][1][96] = 127,
[0][1][2][0][RTW89_ETSI][0][96] = 127,
[0][1][2][0][RTW89_MKK][1][96] = 127,
[0][1][2][0][RTW89_MKK][0][96] = 127,
[0][1][2][0][RTW89_IC][1][96] = -2,
- [0][1][2][0][RTW89_IC][2][96] = 127,
[0][1][2][0][RTW89_KCC][1][96] = 20,
[0][1][2][0][RTW89_KCC][0][96] = 127,
[0][1][2][0][RTW89_ACMA][1][96] = 127,
@@ -43057,13 +42081,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][96] = 127,
[0][1][2][0][RTW89_THAILAND][0][96] = 127,
[0][1][2][0][RTW89_FCC][1][98] = -2,
- [0][1][2][0][RTW89_FCC][2][98] = 127,
[0][1][2][0][RTW89_ETSI][1][98] = 127,
[0][1][2][0][RTW89_ETSI][0][98] = 127,
[0][1][2][0][RTW89_MKK][1][98] = 127,
[0][1][2][0][RTW89_MKK][0][98] = 127,
[0][1][2][0][RTW89_IC][1][98] = -2,
- [0][1][2][0][RTW89_IC][2][98] = 127,
[0][1][2][0][RTW89_KCC][1][98] = 20,
[0][1][2][0][RTW89_KCC][0][98] = 127,
[0][1][2][0][RTW89_ACMA][1][98] = 127,
@@ -43076,13 +42098,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][98] = 127,
[0][1][2][0][RTW89_THAILAND][0][98] = 127,
[0][1][2][0][RTW89_FCC][1][100] = -2,
- [0][1][2][0][RTW89_FCC][2][100] = 127,
[0][1][2][0][RTW89_ETSI][1][100] = 127,
[0][1][2][0][RTW89_ETSI][0][100] = 127,
[0][1][2][0][RTW89_MKK][1][100] = 127,
[0][1][2][0][RTW89_MKK][0][100] = 127,
[0][1][2][0][RTW89_IC][1][100] = -2,
- [0][1][2][0][RTW89_IC][2][100] = 127,
[0][1][2][0][RTW89_KCC][1][100] = 20,
[0][1][2][0][RTW89_KCC][0][100] = 127,
[0][1][2][0][RTW89_ACMA][1][100] = 127,
@@ -43095,13 +42115,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][100] = 127,
[0][1][2][0][RTW89_THAILAND][0][100] = 127,
[0][1][2][0][RTW89_FCC][1][102] = -2,
- [0][1][2][0][RTW89_FCC][2][102] = 127,
[0][1][2][0][RTW89_ETSI][1][102] = 127,
[0][1][2][0][RTW89_ETSI][0][102] = 127,
[0][1][2][0][RTW89_MKK][1][102] = 127,
[0][1][2][0][RTW89_MKK][0][102] = 127,
[0][1][2][0][RTW89_IC][1][102] = -2,
- [0][1][2][0][RTW89_IC][2][102] = 127,
[0][1][2][0][RTW89_KCC][1][102] = 20,
[0][1][2][0][RTW89_KCC][0][102] = 127,
[0][1][2][0][RTW89_ACMA][1][102] = 127,
@@ -43114,13 +42132,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][102] = 127,
[0][1][2][0][RTW89_THAILAND][0][102] = 127,
[0][1][2][0][RTW89_FCC][1][104] = -2,
- [0][1][2][0][RTW89_FCC][2][104] = 127,
[0][1][2][0][RTW89_ETSI][1][104] = 127,
[0][1][2][0][RTW89_ETSI][0][104] = 127,
[0][1][2][0][RTW89_MKK][1][104] = 127,
[0][1][2][0][RTW89_MKK][0][104] = 127,
[0][1][2][0][RTW89_IC][1][104] = -2,
- [0][1][2][0][RTW89_IC][2][104] = 127,
[0][1][2][0][RTW89_KCC][1][104] = 20,
[0][1][2][0][RTW89_KCC][0][104] = 127,
[0][1][2][0][RTW89_ACMA][1][104] = 127,
@@ -43133,13 +42149,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][104] = 127,
[0][1][2][0][RTW89_THAILAND][0][104] = 127,
[0][1][2][0][RTW89_FCC][1][105] = -2,
- [0][1][2][0][RTW89_FCC][2][105] = 127,
[0][1][2][0][RTW89_ETSI][1][105] = 127,
[0][1][2][0][RTW89_ETSI][0][105] = 127,
[0][1][2][0][RTW89_MKK][1][105] = 127,
[0][1][2][0][RTW89_MKK][0][105] = 127,
[0][1][2][0][RTW89_IC][1][105] = -2,
- [0][1][2][0][RTW89_IC][2][105] = 127,
[0][1][2][0][RTW89_KCC][1][105] = 20,
[0][1][2][0][RTW89_KCC][0][105] = 127,
[0][1][2][0][RTW89_ACMA][1][105] = 127,
@@ -43152,13 +42166,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][105] = 127,
[0][1][2][0][RTW89_THAILAND][0][105] = 127,
[0][1][2][0][RTW89_FCC][1][107] = 1,
- [0][1][2][0][RTW89_FCC][2][107] = 127,
[0][1][2][0][RTW89_ETSI][1][107] = 127,
[0][1][2][0][RTW89_ETSI][0][107] = 127,
[0][1][2][0][RTW89_MKK][1][107] = 127,
[0][1][2][0][RTW89_MKK][0][107] = 127,
[0][1][2][0][RTW89_IC][1][107] = 1,
- [0][1][2][0][RTW89_IC][2][107] = 127,
[0][1][2][0][RTW89_KCC][1][107] = 20,
[0][1][2][0][RTW89_KCC][0][107] = 127,
[0][1][2][0][RTW89_ACMA][1][107] = 127,
@@ -43171,13 +42183,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][107] = 127,
[0][1][2][0][RTW89_THAILAND][0][107] = 127,
[0][1][2][0][RTW89_FCC][1][109] = 1,
- [0][1][2][0][RTW89_FCC][2][109] = 127,
[0][1][2][0][RTW89_ETSI][1][109] = 127,
[0][1][2][0][RTW89_ETSI][0][109] = 127,
[0][1][2][0][RTW89_MKK][1][109] = 127,
[0][1][2][0][RTW89_MKK][0][109] = 127,
[0][1][2][0][RTW89_IC][1][109] = 1,
- [0][1][2][0][RTW89_IC][2][109] = 127,
[0][1][2][0][RTW89_KCC][1][109] = 20,
[0][1][2][0][RTW89_KCC][0][109] = 127,
[0][1][2][0][RTW89_ACMA][1][109] = 127,
@@ -43190,13 +42200,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][109] = 127,
[0][1][2][0][RTW89_THAILAND][0][109] = 127,
[0][1][2][0][RTW89_FCC][1][111] = 127,
- [0][1][2][0][RTW89_FCC][2][111] = 127,
[0][1][2][0][RTW89_ETSI][1][111] = 127,
[0][1][2][0][RTW89_ETSI][0][111] = 127,
[0][1][2][0][RTW89_MKK][1][111] = 127,
[0][1][2][0][RTW89_MKK][0][111] = 127,
[0][1][2][0][RTW89_IC][1][111] = 127,
- [0][1][2][0][RTW89_IC][2][111] = 127,
[0][1][2][0][RTW89_KCC][1][111] = 127,
[0][1][2][0][RTW89_KCC][0][111] = 127,
[0][1][2][0][RTW89_ACMA][1][111] = 127,
@@ -43209,13 +42217,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][111] = 127,
[0][1][2][0][RTW89_THAILAND][0][111] = 127,
[0][1][2][0][RTW89_FCC][1][113] = 127,
- [0][1][2][0][RTW89_FCC][2][113] = 127,
[0][1][2][0][RTW89_ETSI][1][113] = 127,
[0][1][2][0][RTW89_ETSI][0][113] = 127,
[0][1][2][0][RTW89_MKK][1][113] = 127,
[0][1][2][0][RTW89_MKK][0][113] = 127,
[0][1][2][0][RTW89_IC][1][113] = 127,
- [0][1][2][0][RTW89_IC][2][113] = 127,
[0][1][2][0][RTW89_KCC][1][113] = 127,
[0][1][2][0][RTW89_KCC][0][113] = 127,
[0][1][2][0][RTW89_ACMA][1][113] = 127,
@@ -43228,13 +42234,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][113] = 127,
[0][1][2][0][RTW89_THAILAND][0][113] = 127,
[0][1][2][0][RTW89_FCC][1][115] = 127,
- [0][1][2][0][RTW89_FCC][2][115] = 127,
[0][1][2][0][RTW89_ETSI][1][115] = 127,
[0][1][2][0][RTW89_ETSI][0][115] = 127,
[0][1][2][0][RTW89_MKK][1][115] = 127,
[0][1][2][0][RTW89_MKK][0][115] = 127,
[0][1][2][0][RTW89_IC][1][115] = 127,
- [0][1][2][0][RTW89_IC][2][115] = 127,
[0][1][2][0][RTW89_KCC][1][115] = 127,
[0][1][2][0][RTW89_KCC][0][115] = 127,
[0][1][2][0][RTW89_ACMA][1][115] = 127,
@@ -43247,13 +42251,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][115] = 127,
[0][1][2][0][RTW89_THAILAND][0][115] = 127,
[0][1][2][0][RTW89_FCC][1][117] = 127,
- [0][1][2][0][RTW89_FCC][2][117] = 127,
[0][1][2][0][RTW89_ETSI][1][117] = 127,
[0][1][2][0][RTW89_ETSI][0][117] = 127,
[0][1][2][0][RTW89_MKK][1][117] = 127,
[0][1][2][0][RTW89_MKK][0][117] = 127,
[0][1][2][0][RTW89_IC][1][117] = 127,
- [0][1][2][0][RTW89_IC][2][117] = 127,
[0][1][2][0][RTW89_KCC][1][117] = 127,
[0][1][2][0][RTW89_KCC][0][117] = 127,
[0][1][2][0][RTW89_ACMA][1][117] = 127,
@@ -43266,13 +42268,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][117] = 127,
[0][1][2][0][RTW89_THAILAND][0][117] = 127,
[0][1][2][0][RTW89_FCC][1][119] = 127,
- [0][1][2][0][RTW89_FCC][2][119] = 127,
[0][1][2][0][RTW89_ETSI][1][119] = 127,
[0][1][2][0][RTW89_ETSI][0][119] = 127,
[0][1][2][0][RTW89_MKK][1][119] = 127,
[0][1][2][0][RTW89_MKK][0][119] = 127,
[0][1][2][0][RTW89_IC][1][119] = 127,
- [0][1][2][0][RTW89_IC][2][119] = 127,
[0][1][2][0][RTW89_KCC][1][119] = 127,
[0][1][2][0][RTW89_KCC][0][119] = 127,
[0][1][2][0][RTW89_ACMA][1][119] = 127,
@@ -43285,13 +42285,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][0][RTW89_THAILAND][1][119] = 127,
[0][1][2][0][RTW89_THAILAND][0][119] = 127,
[0][1][2][1][RTW89_FCC][1][0] = -2,
- [0][1][2][1][RTW89_FCC][2][0] = 54,
[0][1][2][1][RTW89_ETSI][1][0] = 42,
[0][1][2][1][RTW89_ETSI][0][0] = 6,
[0][1][2][1][RTW89_MKK][1][0] = 56,
[0][1][2][1][RTW89_MKK][0][0] = 16,
[0][1][2][1][RTW89_IC][1][0] = -2,
- [0][1][2][1][RTW89_IC][2][0] = 54,
[0][1][2][1][RTW89_KCC][1][0] = 12,
[0][1][2][1][RTW89_KCC][0][0] = 10,
[0][1][2][1][RTW89_ACMA][1][0] = 42,
@@ -43304,13 +42302,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][0] = 44,
[0][1][2][1][RTW89_THAILAND][0][0] = -2,
[0][1][2][1][RTW89_FCC][1][2] = -4,
- [0][1][2][1][RTW89_FCC][2][2] = 54,
[0][1][2][1][RTW89_ETSI][1][2] = 42,
[0][1][2][1][RTW89_ETSI][0][2] = 6,
[0][1][2][1][RTW89_MKK][1][2] = 54,
[0][1][2][1][RTW89_MKK][0][2] = 16,
[0][1][2][1][RTW89_IC][1][2] = -4,
- [0][1][2][1][RTW89_IC][2][2] = 54,
[0][1][2][1][RTW89_KCC][1][2] = 12,
[0][1][2][1][RTW89_KCC][0][2] = 12,
[0][1][2][1][RTW89_ACMA][1][2] = 42,
@@ -43323,13 +42319,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][2] = 44,
[0][1][2][1][RTW89_THAILAND][0][2] = -4,
[0][1][2][1][RTW89_FCC][1][4] = -4,
- [0][1][2][1][RTW89_FCC][2][4] = 54,
[0][1][2][1][RTW89_ETSI][1][4] = 42,
[0][1][2][1][RTW89_ETSI][0][4] = 6,
[0][1][2][1][RTW89_MKK][1][4] = 54,
[0][1][2][1][RTW89_MKK][0][4] = 16,
[0][1][2][1][RTW89_IC][1][4] = -4,
- [0][1][2][1][RTW89_IC][2][4] = 54,
[0][1][2][1][RTW89_KCC][1][4] = 12,
[0][1][2][1][RTW89_KCC][0][4] = 12,
[0][1][2][1][RTW89_ACMA][1][4] = 42,
@@ -43342,13 +42336,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][4] = 44,
[0][1][2][1][RTW89_THAILAND][0][4] = -4,
[0][1][2][1][RTW89_FCC][1][6] = -4,
- [0][1][2][1][RTW89_FCC][2][6] = 54,
[0][1][2][1][RTW89_ETSI][1][6] = 42,
[0][1][2][1][RTW89_ETSI][0][6] = 6,
[0][1][2][1][RTW89_MKK][1][6] = 54,
[0][1][2][1][RTW89_MKK][0][6] = 16,
[0][1][2][1][RTW89_IC][1][6] = -4,
- [0][1][2][1][RTW89_IC][2][6] = 54,
[0][1][2][1][RTW89_KCC][1][6] = 12,
[0][1][2][1][RTW89_KCC][0][6] = 12,
[0][1][2][1][RTW89_ACMA][1][6] = 42,
@@ -43361,13 +42353,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][6] = 44,
[0][1][2][1][RTW89_THAILAND][0][6] = -4,
[0][1][2][1][RTW89_FCC][1][8] = -4,
- [0][1][2][1][RTW89_FCC][2][8] = 54,
[0][1][2][1][RTW89_ETSI][1][8] = 42,
[0][1][2][1][RTW89_ETSI][0][8] = 6,
[0][1][2][1][RTW89_MKK][1][8] = 54,
[0][1][2][1][RTW89_MKK][0][8] = 16,
[0][1][2][1][RTW89_IC][1][8] = -4,
- [0][1][2][1][RTW89_IC][2][8] = 54,
[0][1][2][1][RTW89_KCC][1][8] = 12,
[0][1][2][1][RTW89_KCC][0][8] = 12,
[0][1][2][1][RTW89_ACMA][1][8] = 42,
@@ -43380,13 +42370,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][8] = 44,
[0][1][2][1][RTW89_THAILAND][0][8] = -4,
[0][1][2][1][RTW89_FCC][1][10] = -4,
- [0][1][2][1][RTW89_FCC][2][10] = 54,
[0][1][2][1][RTW89_ETSI][1][10] = 42,
[0][1][2][1][RTW89_ETSI][0][10] = 6,
[0][1][2][1][RTW89_MKK][1][10] = 54,
[0][1][2][1][RTW89_MKK][0][10] = 16,
[0][1][2][1][RTW89_IC][1][10] = -4,
- [0][1][2][1][RTW89_IC][2][10] = 54,
[0][1][2][1][RTW89_KCC][1][10] = 12,
[0][1][2][1][RTW89_KCC][0][10] = 12,
[0][1][2][1][RTW89_ACMA][1][10] = 42,
@@ -43399,13 +42387,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][10] = 44,
[0][1][2][1][RTW89_THAILAND][0][10] = -4,
[0][1][2][1][RTW89_FCC][1][12] = -4,
- [0][1][2][1][RTW89_FCC][2][12] = 54,
[0][1][2][1][RTW89_ETSI][1][12] = 42,
[0][1][2][1][RTW89_ETSI][0][12] = 6,
[0][1][2][1][RTW89_MKK][1][12] = 54,
[0][1][2][1][RTW89_MKK][0][12] = 16,
[0][1][2][1][RTW89_IC][1][12] = -4,
- [0][1][2][1][RTW89_IC][2][12] = 54,
[0][1][2][1][RTW89_KCC][1][12] = 12,
[0][1][2][1][RTW89_KCC][0][12] = 12,
[0][1][2][1][RTW89_ACMA][1][12] = 42,
@@ -43418,13 +42404,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][12] = 44,
[0][1][2][1][RTW89_THAILAND][0][12] = -4,
[0][1][2][1][RTW89_FCC][1][14] = -4,
- [0][1][2][1][RTW89_FCC][2][14] = 54,
[0][1][2][1][RTW89_ETSI][1][14] = 42,
[0][1][2][1][RTW89_ETSI][0][14] = 6,
[0][1][2][1][RTW89_MKK][1][14] = 54,
[0][1][2][1][RTW89_MKK][0][14] = 16,
[0][1][2][1][RTW89_IC][1][14] = -4,
- [0][1][2][1][RTW89_IC][2][14] = 54,
[0][1][2][1][RTW89_KCC][1][14] = 12,
[0][1][2][1][RTW89_KCC][0][14] = 12,
[0][1][2][1][RTW89_ACMA][1][14] = 42,
@@ -43437,13 +42421,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][14] = 44,
[0][1][2][1][RTW89_THAILAND][0][14] = -4,
[0][1][2][1][RTW89_FCC][1][15] = -4,
- [0][1][2][1][RTW89_FCC][2][15] = 54,
[0][1][2][1][RTW89_ETSI][1][15] = 42,
[0][1][2][1][RTW89_ETSI][0][15] = 6,
[0][1][2][1][RTW89_MKK][1][15] = 54,
[0][1][2][1][RTW89_MKK][0][15] = 16,
[0][1][2][1][RTW89_IC][1][15] = -4,
- [0][1][2][1][RTW89_IC][2][15] = 54,
[0][1][2][1][RTW89_KCC][1][15] = 12,
[0][1][2][1][RTW89_KCC][0][15] = 12,
[0][1][2][1][RTW89_ACMA][1][15] = 42,
@@ -43456,13 +42438,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][15] = 44,
[0][1][2][1][RTW89_THAILAND][0][15] = -4,
[0][1][2][1][RTW89_FCC][1][17] = -4,
- [0][1][2][1][RTW89_FCC][2][17] = 54,
[0][1][2][1][RTW89_ETSI][1][17] = 42,
[0][1][2][1][RTW89_ETSI][0][17] = 6,
[0][1][2][1][RTW89_MKK][1][17] = 54,
[0][1][2][1][RTW89_MKK][0][17] = 16,
[0][1][2][1][RTW89_IC][1][17] = -4,
- [0][1][2][1][RTW89_IC][2][17] = 54,
[0][1][2][1][RTW89_KCC][1][17] = 12,
[0][1][2][1][RTW89_KCC][0][17] = 12,
[0][1][2][1][RTW89_ACMA][1][17] = 42,
@@ -43475,13 +42455,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][17] = 44,
[0][1][2][1][RTW89_THAILAND][0][17] = -4,
[0][1][2][1][RTW89_FCC][1][19] = -4,
- [0][1][2][1][RTW89_FCC][2][19] = 54,
[0][1][2][1][RTW89_ETSI][1][19] = 42,
[0][1][2][1][RTW89_ETSI][0][19] = 6,
[0][1][2][1][RTW89_MKK][1][19] = 54,
[0][1][2][1][RTW89_MKK][0][19] = 16,
[0][1][2][1][RTW89_IC][1][19] = -4,
- [0][1][2][1][RTW89_IC][2][19] = 54,
[0][1][2][1][RTW89_KCC][1][19] = 12,
[0][1][2][1][RTW89_KCC][0][19] = 12,
[0][1][2][1][RTW89_ACMA][1][19] = 42,
@@ -43494,13 +42472,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][19] = 44,
[0][1][2][1][RTW89_THAILAND][0][19] = -4,
[0][1][2][1][RTW89_FCC][1][21] = -4,
- [0][1][2][1][RTW89_FCC][2][21] = 54,
[0][1][2][1][RTW89_ETSI][1][21] = 42,
[0][1][2][1][RTW89_ETSI][0][21] = 6,
[0][1][2][1][RTW89_MKK][1][21] = 54,
[0][1][2][1][RTW89_MKK][0][21] = 16,
[0][1][2][1][RTW89_IC][1][21] = -4,
- [0][1][2][1][RTW89_IC][2][21] = 54,
[0][1][2][1][RTW89_KCC][1][21] = 12,
[0][1][2][1][RTW89_KCC][0][21] = 12,
[0][1][2][1][RTW89_ACMA][1][21] = 42,
@@ -43513,13 +42489,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][21] = 44,
[0][1][2][1][RTW89_THAILAND][0][21] = -4,
[0][1][2][1][RTW89_FCC][1][23] = -4,
- [0][1][2][1][RTW89_FCC][2][23] = 68,
[0][1][2][1][RTW89_ETSI][1][23] = 42,
[0][1][2][1][RTW89_ETSI][0][23] = 6,
[0][1][2][1][RTW89_MKK][1][23] = 54,
[0][1][2][1][RTW89_MKK][0][23] = 16,
[0][1][2][1][RTW89_IC][1][23] = -4,
- [0][1][2][1][RTW89_IC][2][23] = 68,
[0][1][2][1][RTW89_KCC][1][23] = 12,
[0][1][2][1][RTW89_KCC][0][23] = 10,
[0][1][2][1][RTW89_ACMA][1][23] = 42,
@@ -43532,13 +42506,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][23] = 44,
[0][1][2][1][RTW89_THAILAND][0][23] = -4,
[0][1][2][1][RTW89_FCC][1][25] = -4,
- [0][1][2][1][RTW89_FCC][2][25] = 68,
[0][1][2][1][RTW89_ETSI][1][25] = 42,
[0][1][2][1][RTW89_ETSI][0][25] = 6,
[0][1][2][1][RTW89_MKK][1][25] = 54,
[0][1][2][1][RTW89_MKK][0][25] = 16,
[0][1][2][1][RTW89_IC][1][25] = -4,
- [0][1][2][1][RTW89_IC][2][25] = 68,
[0][1][2][1][RTW89_KCC][1][25] = 12,
[0][1][2][1][RTW89_KCC][0][25] = 14,
[0][1][2][1][RTW89_ACMA][1][25] = 42,
@@ -43551,13 +42523,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][25] = 42,
[0][1][2][1][RTW89_THAILAND][0][25] = -4,
[0][1][2][1][RTW89_FCC][1][27] = -4,
- [0][1][2][1][RTW89_FCC][2][27] = 68,
[0][1][2][1][RTW89_ETSI][1][27] = 42,
[0][1][2][1][RTW89_ETSI][0][27] = 6,
[0][1][2][1][RTW89_MKK][1][27] = 54,
[0][1][2][1][RTW89_MKK][0][27] = 16,
[0][1][2][1][RTW89_IC][1][27] = -4,
- [0][1][2][1][RTW89_IC][2][27] = 68,
[0][1][2][1][RTW89_KCC][1][27] = 12,
[0][1][2][1][RTW89_KCC][0][27] = 14,
[0][1][2][1][RTW89_ACMA][1][27] = 42,
@@ -43570,13 +42540,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][27] = 42,
[0][1][2][1][RTW89_THAILAND][0][27] = -4,
[0][1][2][1][RTW89_FCC][1][29] = -4,
- [0][1][2][1][RTW89_FCC][2][29] = 68,
[0][1][2][1][RTW89_ETSI][1][29] = 42,
[0][1][2][1][RTW89_ETSI][0][29] = 6,
[0][1][2][1][RTW89_MKK][1][29] = 54,
[0][1][2][1][RTW89_MKK][0][29] = 16,
[0][1][2][1][RTW89_IC][1][29] = -4,
- [0][1][2][1][RTW89_IC][2][29] = 68,
[0][1][2][1][RTW89_KCC][1][29] = 12,
[0][1][2][1][RTW89_KCC][0][29] = 14,
[0][1][2][1][RTW89_ACMA][1][29] = 42,
@@ -43589,13 +42557,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][29] = 42,
[0][1][2][1][RTW89_THAILAND][0][29] = -4,
[0][1][2][1][RTW89_FCC][1][30] = -4,
- [0][1][2][1][RTW89_FCC][2][30] = 68,
[0][1][2][1][RTW89_ETSI][1][30] = 42,
[0][1][2][1][RTW89_ETSI][0][30] = 6,
[0][1][2][1][RTW89_MKK][1][30] = 54,
[0][1][2][1][RTW89_MKK][0][30] = 16,
[0][1][2][1][RTW89_IC][1][30] = -4,
- [0][1][2][1][RTW89_IC][2][30] = 68,
[0][1][2][1][RTW89_KCC][1][30] = 12,
[0][1][2][1][RTW89_KCC][0][30] = 14,
[0][1][2][1][RTW89_ACMA][1][30] = 42,
@@ -43608,13 +42574,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][30] = 42,
[0][1][2][1][RTW89_THAILAND][0][30] = -4,
[0][1][2][1][RTW89_FCC][1][32] = -4,
- [0][1][2][1][RTW89_FCC][2][32] = 68,
[0][1][2][1][RTW89_ETSI][1][32] = 42,
[0][1][2][1][RTW89_ETSI][0][32] = 6,
[0][1][2][1][RTW89_MKK][1][32] = 54,
[0][1][2][1][RTW89_MKK][0][32] = 16,
[0][1][2][1][RTW89_IC][1][32] = -4,
- [0][1][2][1][RTW89_IC][2][32] = 68,
[0][1][2][1][RTW89_KCC][1][32] = 12,
[0][1][2][1][RTW89_KCC][0][32] = 14,
[0][1][2][1][RTW89_ACMA][1][32] = 42,
@@ -43627,13 +42591,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][32] = 42,
[0][1][2][1][RTW89_THAILAND][0][32] = -4,
[0][1][2][1][RTW89_FCC][1][34] = -4,
- [0][1][2][1][RTW89_FCC][2][34] = 68,
[0][1][2][1][RTW89_ETSI][1][34] = 42,
[0][1][2][1][RTW89_ETSI][0][34] = 6,
[0][1][2][1][RTW89_MKK][1][34] = 54,
[0][1][2][1][RTW89_MKK][0][34] = 16,
[0][1][2][1][RTW89_IC][1][34] = -4,
- [0][1][2][1][RTW89_IC][2][34] = 68,
[0][1][2][1][RTW89_KCC][1][34] = 12,
[0][1][2][1][RTW89_KCC][0][34] = 14,
[0][1][2][1][RTW89_ACMA][1][34] = 42,
@@ -43646,13 +42608,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][34] = 42,
[0][1][2][1][RTW89_THAILAND][0][34] = -4,
[0][1][2][1][RTW89_FCC][1][36] = -4,
- [0][1][2][1][RTW89_FCC][2][36] = 68,
[0][1][2][1][RTW89_ETSI][1][36] = 42,
[0][1][2][1][RTW89_ETSI][0][36] = 6,
[0][1][2][1][RTW89_MKK][1][36] = 54,
[0][1][2][1][RTW89_MKK][0][36] = 16,
[0][1][2][1][RTW89_IC][1][36] = -4,
- [0][1][2][1][RTW89_IC][2][36] = 68,
[0][1][2][1][RTW89_KCC][1][36] = 12,
[0][1][2][1][RTW89_KCC][0][36] = 14,
[0][1][2][1][RTW89_ACMA][1][36] = 42,
@@ -43665,13 +42625,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][36] = 42,
[0][1][2][1][RTW89_THAILAND][0][36] = -4,
[0][1][2][1][RTW89_FCC][1][38] = -4,
- [0][1][2][1][RTW89_FCC][2][38] = 68,
[0][1][2][1][RTW89_ETSI][1][38] = 42,
[0][1][2][1][RTW89_ETSI][0][38] = 6,
[0][1][2][1][RTW89_MKK][1][38] = 54,
[0][1][2][1][RTW89_MKK][0][38] = 16,
[0][1][2][1][RTW89_IC][1][38] = -4,
- [0][1][2][1][RTW89_IC][2][38] = 68,
[0][1][2][1][RTW89_KCC][1][38] = 12,
[0][1][2][1][RTW89_KCC][0][38] = 14,
[0][1][2][1][RTW89_ACMA][1][38] = 42,
@@ -43684,13 +42642,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][38] = 42,
[0][1][2][1][RTW89_THAILAND][0][38] = -4,
[0][1][2][1][RTW89_FCC][1][40] = -4,
- [0][1][2][1][RTW89_FCC][2][40] = 68,
[0][1][2][1][RTW89_ETSI][1][40] = 42,
[0][1][2][1][RTW89_ETSI][0][40] = 6,
[0][1][2][1][RTW89_MKK][1][40] = 54,
[0][1][2][1][RTW89_MKK][0][40] = 16,
[0][1][2][1][RTW89_IC][1][40] = -4,
- [0][1][2][1][RTW89_IC][2][40] = 68,
[0][1][2][1][RTW89_KCC][1][40] = 12,
[0][1][2][1][RTW89_KCC][0][40] = 14,
[0][1][2][1][RTW89_ACMA][1][40] = 42,
@@ -43703,13 +42659,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][40] = 42,
[0][1][2][1][RTW89_THAILAND][0][40] = -4,
[0][1][2][1][RTW89_FCC][1][42] = -4,
- [0][1][2][1][RTW89_FCC][2][42] = 68,
[0][1][2][1][RTW89_ETSI][1][42] = 42,
[0][1][2][1][RTW89_ETSI][0][42] = 6,
[0][1][2][1][RTW89_MKK][1][42] = 54,
[0][1][2][1][RTW89_MKK][0][42] = 16,
[0][1][2][1][RTW89_IC][1][42] = -4,
- [0][1][2][1][RTW89_IC][2][42] = 68,
[0][1][2][1][RTW89_KCC][1][42] = 12,
[0][1][2][1][RTW89_KCC][0][42] = 14,
[0][1][2][1][RTW89_ACMA][1][42] = 42,
@@ -43722,13 +42676,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][42] = 42,
[0][1][2][1][RTW89_THAILAND][0][42] = -4,
[0][1][2][1][RTW89_FCC][1][44] = -2,
- [0][1][2][1][RTW89_FCC][2][44] = 68,
[0][1][2][1][RTW89_ETSI][1][44] = 42,
[0][1][2][1][RTW89_ETSI][0][44] = 6,
[0][1][2][1][RTW89_MKK][1][44] = 34,
[0][1][2][1][RTW89_MKK][0][44] = 16,
[0][1][2][1][RTW89_IC][1][44] = -2,
- [0][1][2][1][RTW89_IC][2][44] = 68,
[0][1][2][1][RTW89_KCC][1][44] = 12,
[0][1][2][1][RTW89_KCC][0][44] = 12,
[0][1][2][1][RTW89_ACMA][1][44] = 42,
@@ -43741,13 +42693,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][44] = 42,
[0][1][2][1][RTW89_THAILAND][0][44] = -2,
[0][1][2][1][RTW89_FCC][1][45] = -2,
- [0][1][2][1][RTW89_FCC][2][45] = 127,
[0][1][2][1][RTW89_ETSI][1][45] = 127,
[0][1][2][1][RTW89_ETSI][0][45] = 127,
[0][1][2][1][RTW89_MKK][1][45] = 127,
[0][1][2][1][RTW89_MKK][0][45] = 127,
[0][1][2][1][RTW89_IC][1][45] = -2,
- [0][1][2][1][RTW89_IC][2][45] = 70,
[0][1][2][1][RTW89_KCC][1][45] = 12,
[0][1][2][1][RTW89_KCC][0][45] = 127,
[0][1][2][1][RTW89_ACMA][1][45] = 127,
@@ -43760,13 +42710,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][45] = 127,
[0][1][2][1][RTW89_THAILAND][0][45] = 127,
[0][1][2][1][RTW89_FCC][1][47] = -2,
- [0][1][2][1][RTW89_FCC][2][47] = 127,
[0][1][2][1][RTW89_ETSI][1][47] = 127,
[0][1][2][1][RTW89_ETSI][0][47] = 127,
[0][1][2][1][RTW89_MKK][1][47] = 127,
[0][1][2][1][RTW89_MKK][0][47] = 127,
[0][1][2][1][RTW89_IC][1][47] = -2,
- [0][1][2][1][RTW89_IC][2][47] = 68,
[0][1][2][1][RTW89_KCC][1][47] = 12,
[0][1][2][1][RTW89_KCC][0][47] = 127,
[0][1][2][1][RTW89_ACMA][1][47] = 127,
@@ -43779,13 +42727,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][47] = 127,
[0][1][2][1][RTW89_THAILAND][0][47] = 127,
[0][1][2][1][RTW89_FCC][1][49] = -2,
- [0][1][2][1][RTW89_FCC][2][49] = 127,
[0][1][2][1][RTW89_ETSI][1][49] = 127,
[0][1][2][1][RTW89_ETSI][0][49] = 127,
[0][1][2][1][RTW89_MKK][1][49] = 127,
[0][1][2][1][RTW89_MKK][0][49] = 127,
[0][1][2][1][RTW89_IC][1][49] = -2,
- [0][1][2][1][RTW89_IC][2][49] = 68,
[0][1][2][1][RTW89_KCC][1][49] = 12,
[0][1][2][1][RTW89_KCC][0][49] = 127,
[0][1][2][1][RTW89_ACMA][1][49] = 127,
@@ -43798,13 +42744,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][49] = 127,
[0][1][2][1][RTW89_THAILAND][0][49] = 127,
[0][1][2][1][RTW89_FCC][1][51] = -2,
- [0][1][2][1][RTW89_FCC][2][51] = 127,
[0][1][2][1][RTW89_ETSI][1][51] = 127,
[0][1][2][1][RTW89_ETSI][0][51] = 127,
[0][1][2][1][RTW89_MKK][1][51] = 127,
[0][1][2][1][RTW89_MKK][0][51] = 127,
[0][1][2][1][RTW89_IC][1][51] = -2,
- [0][1][2][1][RTW89_IC][2][51] = 68,
[0][1][2][1][RTW89_KCC][1][51] = 12,
[0][1][2][1][RTW89_KCC][0][51] = 127,
[0][1][2][1][RTW89_ACMA][1][51] = 127,
@@ -43817,13 +42761,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][51] = 127,
[0][1][2][1][RTW89_THAILAND][0][51] = 127,
[0][1][2][1][RTW89_FCC][1][53] = -2,
- [0][1][2][1][RTW89_FCC][2][53] = 127,
[0][1][2][1][RTW89_ETSI][1][53] = 127,
[0][1][2][1][RTW89_ETSI][0][53] = 127,
[0][1][2][1][RTW89_MKK][1][53] = 127,
[0][1][2][1][RTW89_MKK][0][53] = 127,
[0][1][2][1][RTW89_IC][1][53] = -2,
- [0][1][2][1][RTW89_IC][2][53] = 68,
[0][1][2][1][RTW89_KCC][1][53] = 12,
[0][1][2][1][RTW89_KCC][0][53] = 127,
[0][1][2][1][RTW89_ACMA][1][53] = 127,
@@ -43836,13 +42778,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][53] = 127,
[0][1][2][1][RTW89_THAILAND][0][53] = 127,
[0][1][2][1][RTW89_FCC][1][55] = -2,
- [0][1][2][1][RTW89_FCC][2][55] = 68,
[0][1][2][1][RTW89_ETSI][1][55] = 127,
[0][1][2][1][RTW89_ETSI][0][55] = 127,
[0][1][2][1][RTW89_MKK][1][55] = 127,
[0][1][2][1][RTW89_MKK][0][55] = 127,
[0][1][2][1][RTW89_IC][1][55] = -2,
- [0][1][2][1][RTW89_IC][2][55] = 68,
[0][1][2][1][RTW89_KCC][1][55] = 12,
[0][1][2][1][RTW89_KCC][0][55] = 127,
[0][1][2][1][RTW89_ACMA][1][55] = 127,
@@ -43855,13 +42795,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][55] = 127,
[0][1][2][1][RTW89_THAILAND][0][55] = 127,
[0][1][2][1][RTW89_FCC][1][57] = -2,
- [0][1][2][1][RTW89_FCC][2][57] = 68,
[0][1][2][1][RTW89_ETSI][1][57] = 127,
[0][1][2][1][RTW89_ETSI][0][57] = 127,
[0][1][2][1][RTW89_MKK][1][57] = 127,
[0][1][2][1][RTW89_MKK][0][57] = 127,
[0][1][2][1][RTW89_IC][1][57] = -2,
- [0][1][2][1][RTW89_IC][2][57] = 68,
[0][1][2][1][RTW89_KCC][1][57] = 12,
[0][1][2][1][RTW89_KCC][0][57] = 127,
[0][1][2][1][RTW89_ACMA][1][57] = 127,
@@ -43874,13 +42812,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][57] = 127,
[0][1][2][1][RTW89_THAILAND][0][57] = 127,
[0][1][2][1][RTW89_FCC][1][59] = -2,
- [0][1][2][1][RTW89_FCC][2][59] = 68,
[0][1][2][1][RTW89_ETSI][1][59] = 127,
[0][1][2][1][RTW89_ETSI][0][59] = 127,
[0][1][2][1][RTW89_MKK][1][59] = 127,
[0][1][2][1][RTW89_MKK][0][59] = 127,
[0][1][2][1][RTW89_IC][1][59] = -2,
- [0][1][2][1][RTW89_IC][2][59] = 68,
[0][1][2][1][RTW89_KCC][1][59] = 12,
[0][1][2][1][RTW89_KCC][0][59] = 127,
[0][1][2][1][RTW89_ACMA][1][59] = 127,
@@ -43893,13 +42829,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][59] = 127,
[0][1][2][1][RTW89_THAILAND][0][59] = 127,
[0][1][2][1][RTW89_FCC][1][60] = -2,
- [0][1][2][1][RTW89_FCC][2][60] = 68,
[0][1][2][1][RTW89_ETSI][1][60] = 127,
[0][1][2][1][RTW89_ETSI][0][60] = 127,
[0][1][2][1][RTW89_MKK][1][60] = 127,
[0][1][2][1][RTW89_MKK][0][60] = 127,
[0][1][2][1][RTW89_IC][1][60] = -2,
- [0][1][2][1][RTW89_IC][2][60] = 68,
[0][1][2][1][RTW89_KCC][1][60] = 12,
[0][1][2][1][RTW89_KCC][0][60] = 127,
[0][1][2][1][RTW89_ACMA][1][60] = 127,
@@ -43912,13 +42846,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][60] = 127,
[0][1][2][1][RTW89_THAILAND][0][60] = 127,
[0][1][2][1][RTW89_FCC][1][62] = -2,
- [0][1][2][1][RTW89_FCC][2][62] = 68,
[0][1][2][1][RTW89_ETSI][1][62] = 127,
[0][1][2][1][RTW89_ETSI][0][62] = 127,
[0][1][2][1][RTW89_MKK][1][62] = 127,
[0][1][2][1][RTW89_MKK][0][62] = 127,
[0][1][2][1][RTW89_IC][1][62] = -2,
- [0][1][2][1][RTW89_IC][2][62] = 68,
[0][1][2][1][RTW89_KCC][1][62] = 12,
[0][1][2][1][RTW89_KCC][0][62] = 127,
[0][1][2][1][RTW89_ACMA][1][62] = 127,
@@ -43931,13 +42863,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][62] = 127,
[0][1][2][1][RTW89_THAILAND][0][62] = 127,
[0][1][2][1][RTW89_FCC][1][64] = -2,
- [0][1][2][1][RTW89_FCC][2][64] = 68,
[0][1][2][1][RTW89_ETSI][1][64] = 127,
[0][1][2][1][RTW89_ETSI][0][64] = 127,
[0][1][2][1][RTW89_MKK][1][64] = 127,
[0][1][2][1][RTW89_MKK][0][64] = 127,
[0][1][2][1][RTW89_IC][1][64] = -2,
- [0][1][2][1][RTW89_IC][2][64] = 68,
[0][1][2][1][RTW89_KCC][1][64] = 12,
[0][1][2][1][RTW89_KCC][0][64] = 127,
[0][1][2][1][RTW89_ACMA][1][64] = 127,
@@ -43950,13 +42880,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][64] = 127,
[0][1][2][1][RTW89_THAILAND][0][64] = 127,
[0][1][2][1][RTW89_FCC][1][66] = -2,
- [0][1][2][1][RTW89_FCC][2][66] = 68,
[0][1][2][1][RTW89_ETSI][1][66] = 127,
[0][1][2][1][RTW89_ETSI][0][66] = 127,
[0][1][2][1][RTW89_MKK][1][66] = 127,
[0][1][2][1][RTW89_MKK][0][66] = 127,
[0][1][2][1][RTW89_IC][1][66] = -2,
- [0][1][2][1][RTW89_IC][2][66] = 68,
[0][1][2][1][RTW89_KCC][1][66] = 12,
[0][1][2][1][RTW89_KCC][0][66] = 127,
[0][1][2][1][RTW89_ACMA][1][66] = 127,
@@ -43969,13 +42897,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][66] = 127,
[0][1][2][1][RTW89_THAILAND][0][66] = 127,
[0][1][2][1][RTW89_FCC][1][68] = -2,
- [0][1][2][1][RTW89_FCC][2][68] = 68,
[0][1][2][1][RTW89_ETSI][1][68] = 127,
[0][1][2][1][RTW89_ETSI][0][68] = 127,
[0][1][2][1][RTW89_MKK][1][68] = 127,
[0][1][2][1][RTW89_MKK][0][68] = 127,
[0][1][2][1][RTW89_IC][1][68] = -2,
- [0][1][2][1][RTW89_IC][2][68] = 68,
[0][1][2][1][RTW89_KCC][1][68] = 12,
[0][1][2][1][RTW89_KCC][0][68] = 127,
[0][1][2][1][RTW89_ACMA][1][68] = 127,
@@ -43988,13 +42914,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][68] = 127,
[0][1][2][1][RTW89_THAILAND][0][68] = 127,
[0][1][2][1][RTW89_FCC][1][70] = -2,
- [0][1][2][1][RTW89_FCC][2][70] = 68,
[0][1][2][1][RTW89_ETSI][1][70] = 127,
[0][1][2][1][RTW89_ETSI][0][70] = 127,
[0][1][2][1][RTW89_MKK][1][70] = 127,
[0][1][2][1][RTW89_MKK][0][70] = 127,
[0][1][2][1][RTW89_IC][1][70] = -2,
- [0][1][2][1][RTW89_IC][2][70] = 68,
[0][1][2][1][RTW89_KCC][1][70] = 12,
[0][1][2][1][RTW89_KCC][0][70] = 127,
[0][1][2][1][RTW89_ACMA][1][70] = 127,
@@ -44007,13 +42931,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][70] = 127,
[0][1][2][1][RTW89_THAILAND][0][70] = 127,
[0][1][2][1][RTW89_FCC][1][72] = -2,
- [0][1][2][1][RTW89_FCC][2][72] = 68,
[0][1][2][1][RTW89_ETSI][1][72] = 127,
[0][1][2][1][RTW89_ETSI][0][72] = 127,
[0][1][2][1][RTW89_MKK][1][72] = 127,
[0][1][2][1][RTW89_MKK][0][72] = 127,
[0][1][2][1][RTW89_IC][1][72] = -2,
- [0][1][2][1][RTW89_IC][2][72] = 68,
[0][1][2][1][RTW89_KCC][1][72] = 12,
[0][1][2][1][RTW89_KCC][0][72] = 127,
[0][1][2][1][RTW89_ACMA][1][72] = 127,
@@ -44026,13 +42948,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][72] = 127,
[0][1][2][1][RTW89_THAILAND][0][72] = 127,
[0][1][2][1][RTW89_FCC][1][74] = -2,
- [0][1][2][1][RTW89_FCC][2][74] = 68,
[0][1][2][1][RTW89_ETSI][1][74] = 127,
[0][1][2][1][RTW89_ETSI][0][74] = 127,
[0][1][2][1][RTW89_MKK][1][74] = 127,
[0][1][2][1][RTW89_MKK][0][74] = 127,
[0][1][2][1][RTW89_IC][1][74] = -2,
- [0][1][2][1][RTW89_IC][2][74] = 68,
[0][1][2][1][RTW89_KCC][1][74] = 12,
[0][1][2][1][RTW89_KCC][0][74] = 127,
[0][1][2][1][RTW89_ACMA][1][74] = 127,
@@ -44045,13 +42965,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][74] = 127,
[0][1][2][1][RTW89_THAILAND][0][74] = 127,
[0][1][2][1][RTW89_FCC][1][75] = -2,
- [0][1][2][1][RTW89_FCC][2][75] = 68,
[0][1][2][1][RTW89_ETSI][1][75] = 127,
[0][1][2][1][RTW89_ETSI][0][75] = 127,
[0][1][2][1][RTW89_MKK][1][75] = 127,
[0][1][2][1][RTW89_MKK][0][75] = 127,
[0][1][2][1][RTW89_IC][1][75] = -2,
- [0][1][2][1][RTW89_IC][2][75] = 68,
[0][1][2][1][RTW89_KCC][1][75] = 12,
[0][1][2][1][RTW89_KCC][0][75] = 127,
[0][1][2][1][RTW89_ACMA][1][75] = 127,
@@ -44064,13 +42982,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][75] = 127,
[0][1][2][1][RTW89_THAILAND][0][75] = 127,
[0][1][2][1][RTW89_FCC][1][77] = -2,
- [0][1][2][1][RTW89_FCC][2][77] = 68,
[0][1][2][1][RTW89_ETSI][1][77] = 127,
[0][1][2][1][RTW89_ETSI][0][77] = 127,
[0][1][2][1][RTW89_MKK][1][77] = 127,
[0][1][2][1][RTW89_MKK][0][77] = 127,
[0][1][2][1][RTW89_IC][1][77] = -2,
- [0][1][2][1][RTW89_IC][2][77] = 68,
[0][1][2][1][RTW89_KCC][1][77] = 12,
[0][1][2][1][RTW89_KCC][0][77] = 127,
[0][1][2][1][RTW89_ACMA][1][77] = 127,
@@ -44083,13 +42999,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][77] = 127,
[0][1][2][1][RTW89_THAILAND][0][77] = 127,
[0][1][2][1][RTW89_FCC][1][79] = -2,
- [0][1][2][1][RTW89_FCC][2][79] = 68,
[0][1][2][1][RTW89_ETSI][1][79] = 127,
[0][1][2][1][RTW89_ETSI][0][79] = 127,
[0][1][2][1][RTW89_MKK][1][79] = 127,
[0][1][2][1][RTW89_MKK][0][79] = 127,
[0][1][2][1][RTW89_IC][1][79] = -2,
- [0][1][2][1][RTW89_IC][2][79] = 68,
[0][1][2][1][RTW89_KCC][1][79] = 12,
[0][1][2][1][RTW89_KCC][0][79] = 127,
[0][1][2][1][RTW89_ACMA][1][79] = 127,
@@ -44102,13 +43016,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][79] = 127,
[0][1][2][1][RTW89_THAILAND][0][79] = 127,
[0][1][2][1][RTW89_FCC][1][81] = -2,
- [0][1][2][1][RTW89_FCC][2][81] = 68,
[0][1][2][1][RTW89_ETSI][1][81] = 127,
[0][1][2][1][RTW89_ETSI][0][81] = 127,
[0][1][2][1][RTW89_MKK][1][81] = 127,
[0][1][2][1][RTW89_MKK][0][81] = 127,
[0][1][2][1][RTW89_IC][1][81] = -2,
- [0][1][2][1][RTW89_IC][2][81] = 68,
[0][1][2][1][RTW89_KCC][1][81] = 12,
[0][1][2][1][RTW89_KCC][0][81] = 127,
[0][1][2][1][RTW89_ACMA][1][81] = 127,
@@ -44121,13 +43033,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][81] = 127,
[0][1][2][1][RTW89_THAILAND][0][81] = 127,
[0][1][2][1][RTW89_FCC][1][83] = -2,
- [0][1][2][1][RTW89_FCC][2][83] = 68,
[0][1][2][1][RTW89_ETSI][1][83] = 127,
[0][1][2][1][RTW89_ETSI][0][83] = 127,
[0][1][2][1][RTW89_MKK][1][83] = 127,
[0][1][2][1][RTW89_MKK][0][83] = 127,
[0][1][2][1][RTW89_IC][1][83] = -2,
- [0][1][2][1][RTW89_IC][2][83] = 68,
[0][1][2][1][RTW89_KCC][1][83] = 20,
[0][1][2][1][RTW89_KCC][0][83] = 127,
[0][1][2][1][RTW89_ACMA][1][83] = 127,
@@ -44140,13 +43050,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][83] = 127,
[0][1][2][1][RTW89_THAILAND][0][83] = 127,
[0][1][2][1][RTW89_FCC][1][85] = -2,
- [0][1][2][1][RTW89_FCC][2][85] = 68,
[0][1][2][1][RTW89_ETSI][1][85] = 127,
[0][1][2][1][RTW89_ETSI][0][85] = 127,
[0][1][2][1][RTW89_MKK][1][85] = 127,
[0][1][2][1][RTW89_MKK][0][85] = 127,
[0][1][2][1][RTW89_IC][1][85] = -2,
- [0][1][2][1][RTW89_IC][2][85] = 68,
[0][1][2][1][RTW89_KCC][1][85] = 20,
[0][1][2][1][RTW89_KCC][0][85] = 127,
[0][1][2][1][RTW89_ACMA][1][85] = 127,
@@ -44159,13 +43067,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][85] = 127,
[0][1][2][1][RTW89_THAILAND][0][85] = 127,
[0][1][2][1][RTW89_FCC][1][87] = -2,
- [0][1][2][1][RTW89_FCC][2][87] = 127,
[0][1][2][1][RTW89_ETSI][1][87] = 127,
[0][1][2][1][RTW89_ETSI][0][87] = 127,
[0][1][2][1][RTW89_MKK][1][87] = 127,
[0][1][2][1][RTW89_MKK][0][87] = 127,
[0][1][2][1][RTW89_IC][1][87] = -2,
- [0][1][2][1][RTW89_IC][2][87] = 127,
[0][1][2][1][RTW89_KCC][1][87] = 20,
[0][1][2][1][RTW89_KCC][0][87] = 127,
[0][1][2][1][RTW89_ACMA][1][87] = 127,
@@ -44178,13 +43084,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][87] = 127,
[0][1][2][1][RTW89_THAILAND][0][87] = 127,
[0][1][2][1][RTW89_FCC][1][89] = -2,
- [0][1][2][1][RTW89_FCC][2][89] = 127,
[0][1][2][1][RTW89_ETSI][1][89] = 127,
[0][1][2][1][RTW89_ETSI][0][89] = 127,
[0][1][2][1][RTW89_MKK][1][89] = 127,
[0][1][2][1][RTW89_MKK][0][89] = 127,
[0][1][2][1][RTW89_IC][1][89] = -2,
- [0][1][2][1][RTW89_IC][2][89] = 127,
[0][1][2][1][RTW89_KCC][1][89] = 20,
[0][1][2][1][RTW89_KCC][0][89] = 127,
[0][1][2][1][RTW89_ACMA][1][89] = 127,
@@ -44197,13 +43101,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][89] = 127,
[0][1][2][1][RTW89_THAILAND][0][89] = 127,
[0][1][2][1][RTW89_FCC][1][90] = -2,
- [0][1][2][1][RTW89_FCC][2][90] = 127,
[0][1][2][1][RTW89_ETSI][1][90] = 127,
[0][1][2][1][RTW89_ETSI][0][90] = 127,
[0][1][2][1][RTW89_MKK][1][90] = 127,
[0][1][2][1][RTW89_MKK][0][90] = 127,
[0][1][2][1][RTW89_IC][1][90] = -2,
- [0][1][2][1][RTW89_IC][2][90] = 127,
[0][1][2][1][RTW89_KCC][1][90] = 20,
[0][1][2][1][RTW89_KCC][0][90] = 127,
[0][1][2][1][RTW89_ACMA][1][90] = 127,
@@ -44216,13 +43118,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][90] = 127,
[0][1][2][1][RTW89_THAILAND][0][90] = 127,
[0][1][2][1][RTW89_FCC][1][92] = -2,
- [0][1][2][1][RTW89_FCC][2][92] = 127,
[0][1][2][1][RTW89_ETSI][1][92] = 127,
[0][1][2][1][RTW89_ETSI][0][92] = 127,
[0][1][2][1][RTW89_MKK][1][92] = 127,
[0][1][2][1][RTW89_MKK][0][92] = 127,
[0][1][2][1][RTW89_IC][1][92] = -2,
- [0][1][2][1][RTW89_IC][2][92] = 127,
[0][1][2][1][RTW89_KCC][1][92] = 20,
[0][1][2][1][RTW89_KCC][0][92] = 127,
[0][1][2][1][RTW89_ACMA][1][92] = 127,
@@ -44235,13 +43135,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][92] = 127,
[0][1][2][1][RTW89_THAILAND][0][92] = 127,
[0][1][2][1][RTW89_FCC][1][94] = -2,
- [0][1][2][1][RTW89_FCC][2][94] = 127,
[0][1][2][1][RTW89_ETSI][1][94] = 127,
[0][1][2][1][RTW89_ETSI][0][94] = 127,
[0][1][2][1][RTW89_MKK][1][94] = 127,
[0][1][2][1][RTW89_MKK][0][94] = 127,
[0][1][2][1][RTW89_IC][1][94] = -2,
- [0][1][2][1][RTW89_IC][2][94] = 127,
[0][1][2][1][RTW89_KCC][1][94] = 20,
[0][1][2][1][RTW89_KCC][0][94] = 127,
[0][1][2][1][RTW89_ACMA][1][94] = 127,
@@ -44254,13 +43152,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][94] = 127,
[0][1][2][1][RTW89_THAILAND][0][94] = 127,
[0][1][2][1][RTW89_FCC][1][96] = -2,
- [0][1][2][1][RTW89_FCC][2][96] = 127,
[0][1][2][1][RTW89_ETSI][1][96] = 127,
[0][1][2][1][RTW89_ETSI][0][96] = 127,
[0][1][2][1][RTW89_MKK][1][96] = 127,
[0][1][2][1][RTW89_MKK][0][96] = 127,
[0][1][2][1][RTW89_IC][1][96] = -2,
- [0][1][2][1][RTW89_IC][2][96] = 127,
[0][1][2][1][RTW89_KCC][1][96] = 20,
[0][1][2][1][RTW89_KCC][0][96] = 127,
[0][1][2][1][RTW89_ACMA][1][96] = 127,
@@ -44273,13 +43169,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][96] = 127,
[0][1][2][1][RTW89_THAILAND][0][96] = 127,
[0][1][2][1][RTW89_FCC][1][98] = -2,
- [0][1][2][1][RTW89_FCC][2][98] = 127,
[0][1][2][1][RTW89_ETSI][1][98] = 127,
[0][1][2][1][RTW89_ETSI][0][98] = 127,
[0][1][2][1][RTW89_MKK][1][98] = 127,
[0][1][2][1][RTW89_MKK][0][98] = 127,
[0][1][2][1][RTW89_IC][1][98] = -2,
- [0][1][2][1][RTW89_IC][2][98] = 127,
[0][1][2][1][RTW89_KCC][1][98] = 20,
[0][1][2][1][RTW89_KCC][0][98] = 127,
[0][1][2][1][RTW89_ACMA][1][98] = 127,
@@ -44292,13 +43186,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][98] = 127,
[0][1][2][1][RTW89_THAILAND][0][98] = 127,
[0][1][2][1][RTW89_FCC][1][100] = -2,
- [0][1][2][1][RTW89_FCC][2][100] = 127,
[0][1][2][1][RTW89_ETSI][1][100] = 127,
[0][1][2][1][RTW89_ETSI][0][100] = 127,
[0][1][2][1][RTW89_MKK][1][100] = 127,
[0][1][2][1][RTW89_MKK][0][100] = 127,
[0][1][2][1][RTW89_IC][1][100] = -2,
- [0][1][2][1][RTW89_IC][2][100] = 127,
[0][1][2][1][RTW89_KCC][1][100] = 20,
[0][1][2][1][RTW89_KCC][0][100] = 127,
[0][1][2][1][RTW89_ACMA][1][100] = 127,
@@ -44311,13 +43203,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][100] = 127,
[0][1][2][1][RTW89_THAILAND][0][100] = 127,
[0][1][2][1][RTW89_FCC][1][102] = -2,
- [0][1][2][1][RTW89_FCC][2][102] = 127,
[0][1][2][1][RTW89_ETSI][1][102] = 127,
[0][1][2][1][RTW89_ETSI][0][102] = 127,
[0][1][2][1][RTW89_MKK][1][102] = 127,
[0][1][2][1][RTW89_MKK][0][102] = 127,
[0][1][2][1][RTW89_IC][1][102] = -2,
- [0][1][2][1][RTW89_IC][2][102] = 127,
[0][1][2][1][RTW89_KCC][1][102] = 20,
[0][1][2][1][RTW89_KCC][0][102] = 127,
[0][1][2][1][RTW89_ACMA][1][102] = 127,
@@ -44330,13 +43220,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][102] = 127,
[0][1][2][1][RTW89_THAILAND][0][102] = 127,
[0][1][2][1][RTW89_FCC][1][104] = -2,
- [0][1][2][1][RTW89_FCC][2][104] = 127,
[0][1][2][1][RTW89_ETSI][1][104] = 127,
[0][1][2][1][RTW89_ETSI][0][104] = 127,
[0][1][2][1][RTW89_MKK][1][104] = 127,
[0][1][2][1][RTW89_MKK][0][104] = 127,
[0][1][2][1][RTW89_IC][1][104] = -2,
- [0][1][2][1][RTW89_IC][2][104] = 127,
[0][1][2][1][RTW89_KCC][1][104] = 20,
[0][1][2][1][RTW89_KCC][0][104] = 127,
[0][1][2][1][RTW89_ACMA][1][104] = 127,
@@ -44349,13 +43237,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][104] = 127,
[0][1][2][1][RTW89_THAILAND][0][104] = 127,
[0][1][2][1][RTW89_FCC][1][105] = -2,
- [0][1][2][1][RTW89_FCC][2][105] = 127,
[0][1][2][1][RTW89_ETSI][1][105] = 127,
[0][1][2][1][RTW89_ETSI][0][105] = 127,
[0][1][2][1][RTW89_MKK][1][105] = 127,
[0][1][2][1][RTW89_MKK][0][105] = 127,
[0][1][2][1][RTW89_IC][1][105] = -2,
- [0][1][2][1][RTW89_IC][2][105] = 127,
[0][1][2][1][RTW89_KCC][1][105] = 20,
[0][1][2][1][RTW89_KCC][0][105] = 127,
[0][1][2][1][RTW89_ACMA][1][105] = 127,
@@ -44368,13 +43254,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][105] = 127,
[0][1][2][1][RTW89_THAILAND][0][105] = 127,
[0][1][2][1][RTW89_FCC][1][107] = 1,
- [0][1][2][1][RTW89_FCC][2][107] = 127,
[0][1][2][1][RTW89_ETSI][1][107] = 127,
[0][1][2][1][RTW89_ETSI][0][107] = 127,
[0][1][2][1][RTW89_MKK][1][107] = 127,
[0][1][2][1][RTW89_MKK][0][107] = 127,
[0][1][2][1][RTW89_IC][1][107] = 1,
- [0][1][2][1][RTW89_IC][2][107] = 127,
[0][1][2][1][RTW89_KCC][1][107] = 20,
[0][1][2][1][RTW89_KCC][0][107] = 127,
[0][1][2][1][RTW89_ACMA][1][107] = 127,
@@ -44387,13 +43271,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][107] = 127,
[0][1][2][1][RTW89_THAILAND][0][107] = 127,
[0][1][2][1][RTW89_FCC][1][109] = 1,
- [0][1][2][1][RTW89_FCC][2][109] = 127,
[0][1][2][1][RTW89_ETSI][1][109] = 127,
[0][1][2][1][RTW89_ETSI][0][109] = 127,
[0][1][2][1][RTW89_MKK][1][109] = 127,
[0][1][2][1][RTW89_MKK][0][109] = 127,
[0][1][2][1][RTW89_IC][1][109] = 1,
- [0][1][2][1][RTW89_IC][2][109] = 127,
[0][1][2][1][RTW89_KCC][1][109] = 20,
[0][1][2][1][RTW89_KCC][0][109] = 127,
[0][1][2][1][RTW89_ACMA][1][109] = 127,
@@ -44406,13 +43288,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][109] = 127,
[0][1][2][1][RTW89_THAILAND][0][109] = 127,
[0][1][2][1][RTW89_FCC][1][111] = 127,
- [0][1][2][1][RTW89_FCC][2][111] = 127,
[0][1][2][1][RTW89_ETSI][1][111] = 127,
[0][1][2][1][RTW89_ETSI][0][111] = 127,
[0][1][2][1][RTW89_MKK][1][111] = 127,
[0][1][2][1][RTW89_MKK][0][111] = 127,
[0][1][2][1][RTW89_IC][1][111] = 127,
- [0][1][2][1][RTW89_IC][2][111] = 127,
[0][1][2][1][RTW89_KCC][1][111] = 127,
[0][1][2][1][RTW89_KCC][0][111] = 127,
[0][1][2][1][RTW89_ACMA][1][111] = 127,
@@ -44425,13 +43305,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][111] = 127,
[0][1][2][1][RTW89_THAILAND][0][111] = 127,
[0][1][2][1][RTW89_FCC][1][113] = 127,
- [0][1][2][1][RTW89_FCC][2][113] = 127,
[0][1][2][1][RTW89_ETSI][1][113] = 127,
[0][1][2][1][RTW89_ETSI][0][113] = 127,
[0][1][2][1][RTW89_MKK][1][113] = 127,
[0][1][2][1][RTW89_MKK][0][113] = 127,
[0][1][2][1][RTW89_IC][1][113] = 127,
- [0][1][2][1][RTW89_IC][2][113] = 127,
[0][1][2][1][RTW89_KCC][1][113] = 127,
[0][1][2][1][RTW89_KCC][0][113] = 127,
[0][1][2][1][RTW89_ACMA][1][113] = 127,
@@ -44444,13 +43322,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][113] = 127,
[0][1][2][1][RTW89_THAILAND][0][113] = 127,
[0][1][2][1][RTW89_FCC][1][115] = 127,
- [0][1][2][1][RTW89_FCC][2][115] = 127,
[0][1][2][1][RTW89_ETSI][1][115] = 127,
[0][1][2][1][RTW89_ETSI][0][115] = 127,
[0][1][2][1][RTW89_MKK][1][115] = 127,
[0][1][2][1][RTW89_MKK][0][115] = 127,
[0][1][2][1][RTW89_IC][1][115] = 127,
- [0][1][2][1][RTW89_IC][2][115] = 127,
[0][1][2][1][RTW89_KCC][1][115] = 127,
[0][1][2][1][RTW89_KCC][0][115] = 127,
[0][1][2][1][RTW89_ACMA][1][115] = 127,
@@ -44463,13 +43339,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][115] = 127,
[0][1][2][1][RTW89_THAILAND][0][115] = 127,
[0][1][2][1][RTW89_FCC][1][117] = 127,
- [0][1][2][1][RTW89_FCC][2][117] = 127,
[0][1][2][1][RTW89_ETSI][1][117] = 127,
[0][1][2][1][RTW89_ETSI][0][117] = 127,
[0][1][2][1][RTW89_MKK][1][117] = 127,
[0][1][2][1][RTW89_MKK][0][117] = 127,
[0][1][2][1][RTW89_IC][1][117] = 127,
- [0][1][2][1][RTW89_IC][2][117] = 127,
[0][1][2][1][RTW89_KCC][1][117] = 127,
[0][1][2][1][RTW89_KCC][0][117] = 127,
[0][1][2][1][RTW89_ACMA][1][117] = 127,
@@ -44482,13 +43356,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][117] = 127,
[0][1][2][1][RTW89_THAILAND][0][117] = 127,
[0][1][2][1][RTW89_FCC][1][119] = 127,
- [0][1][2][1][RTW89_FCC][2][119] = 127,
[0][1][2][1][RTW89_ETSI][1][119] = 127,
[0][1][2][1][RTW89_ETSI][0][119] = 127,
[0][1][2][1][RTW89_MKK][1][119] = 127,
[0][1][2][1][RTW89_MKK][0][119] = 127,
[0][1][2][1][RTW89_IC][1][119] = 127,
- [0][1][2][1][RTW89_IC][2][119] = 127,
[0][1][2][1][RTW89_KCC][1][119] = 127,
[0][1][2][1][RTW89_KCC][0][119] = 127,
[0][1][2][1][RTW89_ACMA][1][119] = 127,
@@ -44501,13 +43373,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[0][1][2][1][RTW89_THAILAND][1][119] = 127,
[0][1][2][1][RTW89_THAILAND][0][119] = 127,
[1][0][2][0][RTW89_FCC][1][1] = 34,
- [1][0][2][0][RTW89_FCC][2][1] = 70,
[1][0][2][0][RTW89_ETSI][1][1] = 66,
[1][0][2][0][RTW89_ETSI][0][1] = 30,
[1][0][2][0][RTW89_MKK][1][1] = 62,
[1][0][2][0][RTW89_MKK][0][1] = 26,
[1][0][2][0][RTW89_IC][1][1] = 34,
- [1][0][2][0][RTW89_IC][2][1] = 70,
[1][0][2][0][RTW89_KCC][1][1] = 40,
[1][0][2][0][RTW89_KCC][0][1] = 24,
[1][0][2][0][RTW89_ACMA][1][1] = 66,
@@ -44520,13 +43390,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][1] = 68,
[1][0][2][0][RTW89_THAILAND][0][1] = 30,
[1][0][2][0][RTW89_FCC][1][5] = 34,
- [1][0][2][0][RTW89_FCC][2][5] = 70,
[1][0][2][0][RTW89_ETSI][1][5] = 66,
[1][0][2][0][RTW89_ETSI][0][5] = 30,
[1][0][2][0][RTW89_MKK][1][5] = 62,
[1][0][2][0][RTW89_MKK][0][5] = 26,
[1][0][2][0][RTW89_IC][1][5] = 34,
- [1][0][2][0][RTW89_IC][2][5] = 70,
[1][0][2][0][RTW89_KCC][1][5] = 40,
[1][0][2][0][RTW89_KCC][0][5] = 24,
[1][0][2][0][RTW89_ACMA][1][5] = 66,
@@ -44539,13 +43407,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][5] = 68,
[1][0][2][0][RTW89_THAILAND][0][5] = 30,
[1][0][2][0][RTW89_FCC][1][9] = 34,
- [1][0][2][0][RTW89_FCC][2][9] = 70,
[1][0][2][0][RTW89_ETSI][1][9] = 66,
[1][0][2][0][RTW89_ETSI][0][9] = 30,
[1][0][2][0][RTW89_MKK][1][9] = 62,
[1][0][2][0][RTW89_MKK][0][9] = 26,
[1][0][2][0][RTW89_IC][1][9] = 34,
- [1][0][2][0][RTW89_IC][2][9] = 70,
[1][0][2][0][RTW89_KCC][1][9] = 40,
[1][0][2][0][RTW89_KCC][0][9] = 24,
[1][0][2][0][RTW89_ACMA][1][9] = 66,
@@ -44558,13 +43424,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][9] = 68,
[1][0][2][0][RTW89_THAILAND][0][9] = 30,
[1][0][2][0][RTW89_FCC][1][13] = 34,
- [1][0][2][0][RTW89_FCC][2][13] = 70,
[1][0][2][0][RTW89_ETSI][1][13] = 66,
[1][0][2][0][RTW89_ETSI][0][13] = 30,
[1][0][2][0][RTW89_MKK][1][13] = 62,
[1][0][2][0][RTW89_MKK][0][13] = 26,
[1][0][2][0][RTW89_IC][1][13] = 34,
- [1][0][2][0][RTW89_IC][2][13] = 70,
[1][0][2][0][RTW89_KCC][1][13] = 40,
[1][0][2][0][RTW89_KCC][0][13] = 24,
[1][0][2][0][RTW89_ACMA][1][13] = 66,
@@ -44577,13 +43441,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][13] = 68,
[1][0][2][0][RTW89_THAILAND][0][13] = 30,
[1][0][2][0][RTW89_FCC][1][16] = 34,
- [1][0][2][0][RTW89_FCC][2][16] = 70,
[1][0][2][0][RTW89_ETSI][1][16] = 66,
[1][0][2][0][RTW89_ETSI][0][16] = 30,
[1][0][2][0][RTW89_MKK][1][16] = 62,
[1][0][2][0][RTW89_MKK][0][16] = 26,
[1][0][2][0][RTW89_IC][1][16] = 34,
- [1][0][2][0][RTW89_IC][2][16] = 70,
[1][0][2][0][RTW89_KCC][1][16] = 40,
[1][0][2][0][RTW89_KCC][0][16] = 24,
[1][0][2][0][RTW89_ACMA][1][16] = 66,
@@ -44596,13 +43458,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][16] = 68,
[1][0][2][0][RTW89_THAILAND][0][16] = 30,
[1][0][2][0][RTW89_FCC][1][20] = 34,
- [1][0][2][0][RTW89_FCC][2][20] = 70,
[1][0][2][0][RTW89_ETSI][1][20] = 66,
[1][0][2][0][RTW89_ETSI][0][20] = 30,
[1][0][2][0][RTW89_MKK][1][20] = 62,
[1][0][2][0][RTW89_MKK][0][20] = 26,
[1][0][2][0][RTW89_IC][1][20] = 34,
- [1][0][2][0][RTW89_IC][2][20] = 70,
[1][0][2][0][RTW89_KCC][1][20] = 40,
[1][0][2][0][RTW89_KCC][0][20] = 24,
[1][0][2][0][RTW89_ACMA][1][20] = 66,
@@ -44615,13 +43475,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][20] = 68,
[1][0][2][0][RTW89_THAILAND][0][20] = 30,
[1][0][2][0][RTW89_FCC][1][24] = 36,
- [1][0][2][0][RTW89_FCC][2][24] = 70,
[1][0][2][0][RTW89_ETSI][1][24] = 66,
[1][0][2][0][RTW89_ETSI][0][24] = 30,
[1][0][2][0][RTW89_MKK][1][24] = 64,
[1][0][2][0][RTW89_MKK][0][24] = 28,
[1][0][2][0][RTW89_IC][1][24] = 36,
- [1][0][2][0][RTW89_IC][2][24] = 70,
[1][0][2][0][RTW89_KCC][1][24] = 40,
[1][0][2][0][RTW89_KCC][0][24] = 26,
[1][0][2][0][RTW89_ACMA][1][24] = 66,
@@ -44634,13 +43492,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][24] = 68,
[1][0][2][0][RTW89_THAILAND][0][24] = 30,
[1][0][2][0][RTW89_FCC][1][28] = 34,
- [1][0][2][0][RTW89_FCC][2][28] = 70,
[1][0][2][0][RTW89_ETSI][1][28] = 66,
[1][0][2][0][RTW89_ETSI][0][28] = 30,
[1][0][2][0][RTW89_MKK][1][28] = 64,
[1][0][2][0][RTW89_MKK][0][28] = 26,
[1][0][2][0][RTW89_IC][1][28] = 34,
- [1][0][2][0][RTW89_IC][2][28] = 70,
[1][0][2][0][RTW89_KCC][1][28] = 40,
[1][0][2][0][RTW89_KCC][0][28] = 26,
[1][0][2][0][RTW89_ACMA][1][28] = 66,
@@ -44653,13 +43509,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][28] = 68,
[1][0][2][0][RTW89_THAILAND][0][28] = 30,
[1][0][2][0][RTW89_FCC][1][31] = 34,
- [1][0][2][0][RTW89_FCC][2][31] = 70,
[1][0][2][0][RTW89_ETSI][1][31] = 66,
[1][0][2][0][RTW89_ETSI][0][31] = 30,
[1][0][2][0][RTW89_MKK][1][31] = 64,
[1][0][2][0][RTW89_MKK][0][31] = 26,
[1][0][2][0][RTW89_IC][1][31] = 34,
- [1][0][2][0][RTW89_IC][2][31] = 70,
[1][0][2][0][RTW89_KCC][1][31] = 40,
[1][0][2][0][RTW89_KCC][0][31] = 26,
[1][0][2][0][RTW89_ACMA][1][31] = 66,
@@ -44672,13 +43526,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][31] = 68,
[1][0][2][0][RTW89_THAILAND][0][31] = 30,
[1][0][2][0][RTW89_FCC][1][35] = 34,
- [1][0][2][0][RTW89_FCC][2][35] = 70,
[1][0][2][0][RTW89_ETSI][1][35] = 66,
[1][0][2][0][RTW89_ETSI][0][35] = 30,
[1][0][2][0][RTW89_MKK][1][35] = 64,
[1][0][2][0][RTW89_MKK][0][35] = 26,
[1][0][2][0][RTW89_IC][1][35] = 34,
- [1][0][2][0][RTW89_IC][2][35] = 70,
[1][0][2][0][RTW89_KCC][1][35] = 40,
[1][0][2][0][RTW89_KCC][0][35] = 26,
[1][0][2][0][RTW89_ACMA][1][35] = 66,
@@ -44691,13 +43543,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][35] = 68,
[1][0][2][0][RTW89_THAILAND][0][35] = 30,
[1][0][2][0][RTW89_FCC][1][39] = 34,
- [1][0][2][0][RTW89_FCC][2][39] = 70,
[1][0][2][0][RTW89_ETSI][1][39] = 66,
[1][0][2][0][RTW89_ETSI][0][39] = 30,
[1][0][2][0][RTW89_MKK][1][39] = 64,
[1][0][2][0][RTW89_MKK][0][39] = 26,
[1][0][2][0][RTW89_IC][1][39] = 34,
- [1][0][2][0][RTW89_IC][2][39] = 70,
[1][0][2][0][RTW89_KCC][1][39] = 40,
[1][0][2][0][RTW89_KCC][0][39] = 26,
[1][0][2][0][RTW89_ACMA][1][39] = 66,
@@ -44710,13 +43560,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][39] = 68,
[1][0][2][0][RTW89_THAILAND][0][39] = 30,
[1][0][2][0][RTW89_FCC][1][43] = 34,
- [1][0][2][0][RTW89_FCC][2][43] = 70,
[1][0][2][0][RTW89_ETSI][1][43] = 66,
[1][0][2][0][RTW89_ETSI][0][43] = 30,
[1][0][2][0][RTW89_MKK][1][43] = 64,
[1][0][2][0][RTW89_MKK][0][43] = 26,
[1][0][2][0][RTW89_IC][1][43] = 34,
- [1][0][2][0][RTW89_IC][2][43] = 70,
[1][0][2][0][RTW89_KCC][1][43] = 40,
[1][0][2][0][RTW89_KCC][0][43] = 26,
[1][0][2][0][RTW89_ACMA][1][43] = 66,
@@ -44729,13 +43577,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][43] = 68,
[1][0][2][0][RTW89_THAILAND][0][43] = 30,
[1][0][2][0][RTW89_FCC][1][46] = 34,
- [1][0][2][0][RTW89_FCC][2][46] = 127,
[1][0][2][0][RTW89_ETSI][1][46] = 127,
[1][0][2][0][RTW89_ETSI][0][46] = 127,
[1][0][2][0][RTW89_MKK][1][46] = 127,
[1][0][2][0][RTW89_MKK][0][46] = 127,
[1][0][2][0][RTW89_IC][1][46] = 34,
- [1][0][2][0][RTW89_IC][2][46] = 68,
[1][0][2][0][RTW89_KCC][1][46] = 40,
[1][0][2][0][RTW89_KCC][0][46] = 127,
[1][0][2][0][RTW89_ACMA][1][46] = 127,
@@ -44748,13 +43594,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][46] = 127,
[1][0][2][0][RTW89_THAILAND][0][46] = 127,
[1][0][2][0][RTW89_FCC][1][50] = 34,
- [1][0][2][0][RTW89_FCC][2][50] = 127,
[1][0][2][0][RTW89_ETSI][1][50] = 127,
[1][0][2][0][RTW89_ETSI][0][50] = 127,
[1][0][2][0][RTW89_MKK][1][50] = 127,
[1][0][2][0][RTW89_MKK][0][50] = 127,
[1][0][2][0][RTW89_IC][1][50] = 34,
- [1][0][2][0][RTW89_IC][2][50] = 68,
[1][0][2][0][RTW89_KCC][1][50] = 40,
[1][0][2][0][RTW89_KCC][0][50] = 127,
[1][0][2][0][RTW89_ACMA][1][50] = 127,
@@ -44767,13 +43611,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][50] = 127,
[1][0][2][0][RTW89_THAILAND][0][50] = 127,
[1][0][2][0][RTW89_FCC][1][54] = 36,
- [1][0][2][0][RTW89_FCC][2][54] = 127,
[1][0][2][0][RTW89_ETSI][1][54] = 127,
[1][0][2][0][RTW89_ETSI][0][54] = 127,
[1][0][2][0][RTW89_MKK][1][54] = 127,
[1][0][2][0][RTW89_MKK][0][54] = 127,
[1][0][2][0][RTW89_IC][1][54] = 36,
- [1][0][2][0][RTW89_IC][2][54] = 127,
[1][0][2][0][RTW89_KCC][1][54] = 40,
[1][0][2][0][RTW89_KCC][0][54] = 127,
[1][0][2][0][RTW89_ACMA][1][54] = 127,
@@ -44786,13 +43628,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][54] = 127,
[1][0][2][0][RTW89_THAILAND][0][54] = 127,
[1][0][2][0][RTW89_FCC][1][58] = 36,
- [1][0][2][0][RTW89_FCC][2][58] = 66,
[1][0][2][0][RTW89_ETSI][1][58] = 127,
[1][0][2][0][RTW89_ETSI][0][58] = 127,
[1][0][2][0][RTW89_MKK][1][58] = 127,
[1][0][2][0][RTW89_MKK][0][58] = 127,
[1][0][2][0][RTW89_IC][1][58] = 36,
- [1][0][2][0][RTW89_IC][2][58] = 66,
[1][0][2][0][RTW89_KCC][1][58] = 40,
[1][0][2][0][RTW89_KCC][0][58] = 127,
[1][0][2][0][RTW89_ACMA][1][58] = 127,
@@ -44805,13 +43645,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][58] = 127,
[1][0][2][0][RTW89_THAILAND][0][58] = 127,
[1][0][2][0][RTW89_FCC][1][61] = 34,
- [1][0][2][0][RTW89_FCC][2][61] = 66,
[1][0][2][0][RTW89_ETSI][1][61] = 127,
[1][0][2][0][RTW89_ETSI][0][61] = 127,
[1][0][2][0][RTW89_MKK][1][61] = 127,
[1][0][2][0][RTW89_MKK][0][61] = 127,
[1][0][2][0][RTW89_IC][1][61] = 34,
- [1][0][2][0][RTW89_IC][2][61] = 66,
[1][0][2][0][RTW89_KCC][1][61] = 40,
[1][0][2][0][RTW89_KCC][0][61] = 127,
[1][0][2][0][RTW89_ACMA][1][61] = 127,
@@ -44824,13 +43662,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][61] = 127,
[1][0][2][0][RTW89_THAILAND][0][61] = 127,
[1][0][2][0][RTW89_FCC][1][65] = 34,
- [1][0][2][0][RTW89_FCC][2][65] = 66,
[1][0][2][0][RTW89_ETSI][1][65] = 127,
[1][0][2][0][RTW89_ETSI][0][65] = 127,
[1][0][2][0][RTW89_MKK][1][65] = 127,
[1][0][2][0][RTW89_MKK][0][65] = 127,
[1][0][2][0][RTW89_IC][1][65] = 34,
- [1][0][2][0][RTW89_IC][2][65] = 66,
[1][0][2][0][RTW89_KCC][1][65] = 40,
[1][0][2][0][RTW89_KCC][0][65] = 127,
[1][0][2][0][RTW89_ACMA][1][65] = 127,
@@ -44843,13 +43679,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][65] = 127,
[1][0][2][0][RTW89_THAILAND][0][65] = 127,
[1][0][2][0][RTW89_FCC][1][69] = 34,
- [1][0][2][0][RTW89_FCC][2][69] = 66,
[1][0][2][0][RTW89_ETSI][1][69] = 127,
[1][0][2][0][RTW89_ETSI][0][69] = 127,
[1][0][2][0][RTW89_MKK][1][69] = 127,
[1][0][2][0][RTW89_MKK][0][69] = 127,
[1][0][2][0][RTW89_IC][1][69] = 34,
- [1][0][2][0][RTW89_IC][2][69] = 66,
[1][0][2][0][RTW89_KCC][1][69] = 40,
[1][0][2][0][RTW89_KCC][0][69] = 127,
[1][0][2][0][RTW89_ACMA][1][69] = 127,
@@ -44862,13 +43696,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][69] = 127,
[1][0][2][0][RTW89_THAILAND][0][69] = 127,
[1][0][2][0][RTW89_FCC][1][73] = 34,
- [1][0][2][0][RTW89_FCC][2][73] = 66,
[1][0][2][0][RTW89_ETSI][1][73] = 127,
[1][0][2][0][RTW89_ETSI][0][73] = 127,
[1][0][2][0][RTW89_MKK][1][73] = 127,
[1][0][2][0][RTW89_MKK][0][73] = 127,
[1][0][2][0][RTW89_IC][1][73] = 34,
- [1][0][2][0][RTW89_IC][2][73] = 66,
[1][0][2][0][RTW89_KCC][1][73] = 40,
[1][0][2][0][RTW89_KCC][0][73] = 127,
[1][0][2][0][RTW89_ACMA][1][73] = 127,
@@ -44881,13 +43713,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][73] = 127,
[1][0][2][0][RTW89_THAILAND][0][73] = 127,
[1][0][2][0][RTW89_FCC][1][76] = 34,
- [1][0][2][0][RTW89_FCC][2][76] = 66,
[1][0][2][0][RTW89_ETSI][1][76] = 127,
[1][0][2][0][RTW89_ETSI][0][76] = 127,
[1][0][2][0][RTW89_MKK][1][76] = 127,
[1][0][2][0][RTW89_MKK][0][76] = 127,
[1][0][2][0][RTW89_IC][1][76] = 34,
- [1][0][2][0][RTW89_IC][2][76] = 66,
[1][0][2][0][RTW89_KCC][1][76] = 40,
[1][0][2][0][RTW89_KCC][0][76] = 127,
[1][0][2][0][RTW89_ACMA][1][76] = 127,
@@ -44900,13 +43730,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][76] = 127,
[1][0][2][0][RTW89_THAILAND][0][76] = 127,
[1][0][2][0][RTW89_FCC][1][80] = 34,
- [1][0][2][0][RTW89_FCC][2][80] = 66,
[1][0][2][0][RTW89_ETSI][1][80] = 127,
[1][0][2][0][RTW89_ETSI][0][80] = 127,
[1][0][2][0][RTW89_MKK][1][80] = 127,
[1][0][2][0][RTW89_MKK][0][80] = 127,
[1][0][2][0][RTW89_IC][1][80] = 34,
- [1][0][2][0][RTW89_IC][2][80] = 66,
[1][0][2][0][RTW89_KCC][1][80] = 42,
[1][0][2][0][RTW89_KCC][0][80] = 127,
[1][0][2][0][RTW89_ACMA][1][80] = 127,
@@ -44919,13 +43747,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][80] = 127,
[1][0][2][0][RTW89_THAILAND][0][80] = 127,
[1][0][2][0][RTW89_FCC][1][84] = 34,
- [1][0][2][0][RTW89_FCC][2][84] = 66,
[1][0][2][0][RTW89_ETSI][1][84] = 127,
[1][0][2][0][RTW89_ETSI][0][84] = 127,
[1][0][2][0][RTW89_MKK][1][84] = 127,
[1][0][2][0][RTW89_MKK][0][84] = 127,
[1][0][2][0][RTW89_IC][1][84] = 34,
- [1][0][2][0][RTW89_IC][2][84] = 66,
[1][0][2][0][RTW89_KCC][1][84] = 42,
[1][0][2][0][RTW89_KCC][0][84] = 127,
[1][0][2][0][RTW89_ACMA][1][84] = 127,
@@ -44938,13 +43764,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][84] = 127,
[1][0][2][0][RTW89_THAILAND][0][84] = 127,
[1][0][2][0][RTW89_FCC][1][88] = 34,
- [1][0][2][0][RTW89_FCC][2][88] = 127,
[1][0][2][0][RTW89_ETSI][1][88] = 127,
[1][0][2][0][RTW89_ETSI][0][88] = 127,
[1][0][2][0][RTW89_MKK][1][88] = 127,
[1][0][2][0][RTW89_MKK][0][88] = 127,
[1][0][2][0][RTW89_IC][1][88] = 34,
- [1][0][2][0][RTW89_IC][2][88] = 127,
[1][0][2][0][RTW89_KCC][1][88] = 42,
[1][0][2][0][RTW89_KCC][0][88] = 127,
[1][0][2][0][RTW89_ACMA][1][88] = 127,
@@ -44957,13 +43781,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][88] = 127,
[1][0][2][0][RTW89_THAILAND][0][88] = 127,
[1][0][2][0][RTW89_FCC][1][91] = 36,
- [1][0][2][0][RTW89_FCC][2][91] = 127,
[1][0][2][0][RTW89_ETSI][1][91] = 127,
[1][0][2][0][RTW89_ETSI][0][91] = 127,
[1][0][2][0][RTW89_MKK][1][91] = 127,
[1][0][2][0][RTW89_MKK][0][91] = 127,
[1][0][2][0][RTW89_IC][1][91] = 36,
- [1][0][2][0][RTW89_IC][2][91] = 127,
[1][0][2][0][RTW89_KCC][1][91] = 42,
[1][0][2][0][RTW89_KCC][0][91] = 127,
[1][0][2][0][RTW89_ACMA][1][91] = 127,
@@ -44976,13 +43798,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][91] = 127,
[1][0][2][0][RTW89_THAILAND][0][91] = 127,
[1][0][2][0][RTW89_FCC][1][95] = 34,
- [1][0][2][0][RTW89_FCC][2][95] = 127,
[1][0][2][0][RTW89_ETSI][1][95] = 127,
[1][0][2][0][RTW89_ETSI][0][95] = 127,
[1][0][2][0][RTW89_MKK][1][95] = 127,
[1][0][2][0][RTW89_MKK][0][95] = 127,
[1][0][2][0][RTW89_IC][1][95] = 34,
- [1][0][2][0][RTW89_IC][2][95] = 127,
[1][0][2][0][RTW89_KCC][1][95] = 42,
[1][0][2][0][RTW89_KCC][0][95] = 127,
[1][0][2][0][RTW89_ACMA][1][95] = 127,
@@ -44995,13 +43815,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][95] = 127,
[1][0][2][0][RTW89_THAILAND][0][95] = 127,
[1][0][2][0][RTW89_FCC][1][99] = 34,
- [1][0][2][0][RTW89_FCC][2][99] = 127,
[1][0][2][0][RTW89_ETSI][1][99] = 127,
[1][0][2][0][RTW89_ETSI][0][99] = 127,
[1][0][2][0][RTW89_MKK][1][99] = 127,
[1][0][2][0][RTW89_MKK][0][99] = 127,
[1][0][2][0][RTW89_IC][1][99] = 34,
- [1][0][2][0][RTW89_IC][2][99] = 127,
[1][0][2][0][RTW89_KCC][1][99] = 42,
[1][0][2][0][RTW89_KCC][0][99] = 127,
[1][0][2][0][RTW89_ACMA][1][99] = 127,
@@ -45014,13 +43832,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][99] = 127,
[1][0][2][0][RTW89_THAILAND][0][99] = 127,
[1][0][2][0][RTW89_FCC][1][103] = 34,
- [1][0][2][0][RTW89_FCC][2][103] = 127,
[1][0][2][0][RTW89_ETSI][1][103] = 127,
[1][0][2][0][RTW89_ETSI][0][103] = 127,
[1][0][2][0][RTW89_MKK][1][103] = 127,
[1][0][2][0][RTW89_MKK][0][103] = 127,
[1][0][2][0][RTW89_IC][1][103] = 34,
- [1][0][2][0][RTW89_IC][2][103] = 127,
[1][0][2][0][RTW89_KCC][1][103] = 42,
[1][0][2][0][RTW89_KCC][0][103] = 127,
[1][0][2][0][RTW89_ACMA][1][103] = 127,
@@ -45033,13 +43849,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][103] = 127,
[1][0][2][0][RTW89_THAILAND][0][103] = 127,
[1][0][2][0][RTW89_FCC][1][106] = 36,
- [1][0][2][0][RTW89_FCC][2][106] = 127,
[1][0][2][0][RTW89_ETSI][1][106] = 127,
[1][0][2][0][RTW89_ETSI][0][106] = 127,
[1][0][2][0][RTW89_MKK][1][106] = 127,
[1][0][2][0][RTW89_MKK][0][106] = 127,
[1][0][2][0][RTW89_IC][1][106] = 36,
- [1][0][2][0][RTW89_IC][2][106] = 127,
[1][0][2][0][RTW89_KCC][1][106] = 42,
[1][0][2][0][RTW89_KCC][0][106] = 127,
[1][0][2][0][RTW89_ACMA][1][106] = 127,
@@ -45052,13 +43866,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][106] = 127,
[1][0][2][0][RTW89_THAILAND][0][106] = 127,
[1][0][2][0][RTW89_FCC][1][110] = 127,
- [1][0][2][0][RTW89_FCC][2][110] = 127,
[1][0][2][0][RTW89_ETSI][1][110] = 127,
[1][0][2][0][RTW89_ETSI][0][110] = 127,
[1][0][2][0][RTW89_MKK][1][110] = 127,
[1][0][2][0][RTW89_MKK][0][110] = 127,
[1][0][2][0][RTW89_IC][1][110] = 127,
- [1][0][2][0][RTW89_IC][2][110] = 127,
[1][0][2][0][RTW89_KCC][1][110] = 127,
[1][0][2][0][RTW89_KCC][0][110] = 127,
[1][0][2][0][RTW89_ACMA][1][110] = 127,
@@ -45071,13 +43883,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][110] = 127,
[1][0][2][0][RTW89_THAILAND][0][110] = 127,
[1][0][2][0][RTW89_FCC][1][114] = 127,
- [1][0][2][0][RTW89_FCC][2][114] = 127,
[1][0][2][0][RTW89_ETSI][1][114] = 127,
[1][0][2][0][RTW89_ETSI][0][114] = 127,
[1][0][2][0][RTW89_MKK][1][114] = 127,
[1][0][2][0][RTW89_MKK][0][114] = 127,
[1][0][2][0][RTW89_IC][1][114] = 127,
- [1][0][2][0][RTW89_IC][2][114] = 127,
[1][0][2][0][RTW89_KCC][1][114] = 127,
[1][0][2][0][RTW89_KCC][0][114] = 127,
[1][0][2][0][RTW89_ACMA][1][114] = 127,
@@ -45090,13 +43900,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][114] = 127,
[1][0][2][0][RTW89_THAILAND][0][114] = 127,
[1][0][2][0][RTW89_FCC][1][118] = 127,
- [1][0][2][0][RTW89_FCC][2][118] = 127,
[1][0][2][0][RTW89_ETSI][1][118] = 127,
[1][0][2][0][RTW89_ETSI][0][118] = 127,
[1][0][2][0][RTW89_MKK][1][118] = 127,
[1][0][2][0][RTW89_MKK][0][118] = 127,
[1][0][2][0][RTW89_IC][1][118] = 127,
- [1][0][2][0][RTW89_IC][2][118] = 127,
[1][0][2][0][RTW89_KCC][1][118] = 127,
[1][0][2][0][RTW89_KCC][0][118] = 127,
[1][0][2][0][RTW89_ACMA][1][118] = 127,
@@ -45109,13 +43917,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][0][2][0][RTW89_THAILAND][1][118] = 127,
[1][0][2][0][RTW89_THAILAND][0][118] = 127,
[1][1][2][0][RTW89_FCC][1][1] = 10,
- [1][1][2][0][RTW89_FCC][2][1] = 58,
[1][1][2][0][RTW89_ETSI][1][1] = 54,
[1][1][2][0][RTW89_ETSI][0][1] = 18,
[1][1][2][0][RTW89_MKK][1][1] = 52,
[1][1][2][0][RTW89_MKK][0][1] = 12,
[1][1][2][0][RTW89_IC][1][1] = 10,
- [1][1][2][0][RTW89_IC][2][1] = 58,
[1][1][2][0][RTW89_KCC][1][1] = 28,
[1][1][2][0][RTW89_KCC][0][1] = 12,
[1][1][2][0][RTW89_ACMA][1][1] = 54,
@@ -45128,13 +43934,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][1] = 46,
[1][1][2][0][RTW89_THAILAND][0][1] = 10,
[1][1][2][0][RTW89_FCC][1][5] = 10,
- [1][1][2][0][RTW89_FCC][2][5] = 58,
[1][1][2][0][RTW89_ETSI][1][5] = 54,
[1][1][2][0][RTW89_ETSI][0][5] = 16,
[1][1][2][0][RTW89_MKK][1][5] = 52,
[1][1][2][0][RTW89_MKK][0][5] = 12,
[1][1][2][0][RTW89_IC][1][5] = 10,
- [1][1][2][0][RTW89_IC][2][5] = 58,
[1][1][2][0][RTW89_KCC][1][5] = 28,
[1][1][2][0][RTW89_KCC][0][5] = 12,
[1][1][2][0][RTW89_ACMA][1][5] = 54,
@@ -45147,13 +43951,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][5] = 46,
[1][1][2][0][RTW89_THAILAND][0][5] = 10,
[1][1][2][0][RTW89_FCC][1][9] = 10,
- [1][1][2][0][RTW89_FCC][2][9] = 58,
[1][1][2][0][RTW89_ETSI][1][9] = 54,
[1][1][2][0][RTW89_ETSI][0][9] = 16,
[1][1][2][0][RTW89_MKK][1][9] = 52,
[1][1][2][0][RTW89_MKK][0][9] = 12,
[1][1][2][0][RTW89_IC][1][9] = 10,
- [1][1][2][0][RTW89_IC][2][9] = 58,
[1][1][2][0][RTW89_KCC][1][9] = 28,
[1][1][2][0][RTW89_KCC][0][9] = 12,
[1][1][2][0][RTW89_ACMA][1][9] = 54,
@@ -45166,13 +43968,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][9] = 46,
[1][1][2][0][RTW89_THAILAND][0][9] = 10,
[1][1][2][0][RTW89_FCC][1][13] = 10,
- [1][1][2][0][RTW89_FCC][2][13] = 58,
[1][1][2][0][RTW89_ETSI][1][13] = 54,
[1][1][2][0][RTW89_ETSI][0][13] = 16,
[1][1][2][0][RTW89_MKK][1][13] = 52,
[1][1][2][0][RTW89_MKK][0][13] = 12,
[1][1][2][0][RTW89_IC][1][13] = 10,
- [1][1][2][0][RTW89_IC][2][13] = 58,
[1][1][2][0][RTW89_KCC][1][13] = 28,
[1][1][2][0][RTW89_KCC][0][13] = 12,
[1][1][2][0][RTW89_ACMA][1][13] = 54,
@@ -45185,13 +43985,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][13] = 46,
[1][1][2][0][RTW89_THAILAND][0][13] = 10,
[1][1][2][0][RTW89_FCC][1][16] = 10,
- [1][1][2][0][RTW89_FCC][2][16] = 58,
[1][1][2][0][RTW89_ETSI][1][16] = 54,
[1][1][2][0][RTW89_ETSI][0][16] = 16,
[1][1][2][0][RTW89_MKK][1][16] = 52,
[1][1][2][0][RTW89_MKK][0][16] = 12,
[1][1][2][0][RTW89_IC][1][16] = 10,
- [1][1][2][0][RTW89_IC][2][16] = 58,
[1][1][2][0][RTW89_KCC][1][16] = 28,
[1][1][2][0][RTW89_KCC][0][16] = 12,
[1][1][2][0][RTW89_ACMA][1][16] = 54,
@@ -45204,13 +44002,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][16] = 46,
[1][1][2][0][RTW89_THAILAND][0][16] = 10,
[1][1][2][0][RTW89_FCC][1][20] = 10,
- [1][1][2][0][RTW89_FCC][2][20] = 58,
[1][1][2][0][RTW89_ETSI][1][20] = 54,
[1][1][2][0][RTW89_ETSI][0][20] = 16,
[1][1][2][0][RTW89_MKK][1][20] = 52,
[1][1][2][0][RTW89_MKK][0][20] = 12,
[1][1][2][0][RTW89_IC][1][20] = 10,
- [1][1][2][0][RTW89_IC][2][20] = 58,
[1][1][2][0][RTW89_KCC][1][20] = 28,
[1][1][2][0][RTW89_KCC][0][20] = 12,
[1][1][2][0][RTW89_ACMA][1][20] = 54,
@@ -45223,13 +44019,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][20] = 46,
[1][1][2][0][RTW89_THAILAND][0][20] = 10,
[1][1][2][0][RTW89_FCC][1][24] = 10,
- [1][1][2][0][RTW89_FCC][2][24] = 70,
[1][1][2][0][RTW89_ETSI][1][24] = 54,
[1][1][2][0][RTW89_ETSI][0][24] = 16,
[1][1][2][0][RTW89_MKK][1][24] = 54,
[1][1][2][0][RTW89_MKK][0][24] = 14,
[1][1][2][0][RTW89_IC][1][24] = 10,
- [1][1][2][0][RTW89_IC][2][24] = 70,
[1][1][2][0][RTW89_KCC][1][24] = 28,
[1][1][2][0][RTW89_KCC][0][24] = 12,
[1][1][2][0][RTW89_ACMA][1][24] = 54,
@@ -45242,13 +44036,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][24] = 46,
[1][1][2][0][RTW89_THAILAND][0][24] = 10,
[1][1][2][0][RTW89_FCC][1][28] = 10,
- [1][1][2][0][RTW89_FCC][2][28] = 70,
[1][1][2][0][RTW89_ETSI][1][28] = 54,
[1][1][2][0][RTW89_ETSI][0][28] = 16,
[1][1][2][0][RTW89_MKK][1][28] = 52,
[1][1][2][0][RTW89_MKK][0][28] = 14,
[1][1][2][0][RTW89_IC][1][28] = 10,
- [1][1][2][0][RTW89_IC][2][28] = 70,
[1][1][2][0][RTW89_KCC][1][28] = 28,
[1][1][2][0][RTW89_KCC][0][28] = 14,
[1][1][2][0][RTW89_ACMA][1][28] = 54,
@@ -45261,13 +44053,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][28] = 46,
[1][1][2][0][RTW89_THAILAND][0][28] = 10,
[1][1][2][0][RTW89_FCC][1][31] = 10,
- [1][1][2][0][RTW89_FCC][2][31] = 70,
[1][1][2][0][RTW89_ETSI][1][31] = 54,
[1][1][2][0][RTW89_ETSI][0][31] = 16,
[1][1][2][0][RTW89_MKK][1][31] = 52,
[1][1][2][0][RTW89_MKK][0][31] = 14,
[1][1][2][0][RTW89_IC][1][31] = 10,
- [1][1][2][0][RTW89_IC][2][31] = 70,
[1][1][2][0][RTW89_KCC][1][31] = 28,
[1][1][2][0][RTW89_KCC][0][31] = 14,
[1][1][2][0][RTW89_ACMA][1][31] = 54,
@@ -45280,13 +44070,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][31] = 46,
[1][1][2][0][RTW89_THAILAND][0][31] = 10,
[1][1][2][0][RTW89_FCC][1][35] = 10,
- [1][1][2][0][RTW89_FCC][2][35] = 70,
[1][1][2][0][RTW89_ETSI][1][35] = 54,
[1][1][2][0][RTW89_ETSI][0][35] = 16,
[1][1][2][0][RTW89_MKK][1][35] = 52,
[1][1][2][0][RTW89_MKK][0][35] = 14,
[1][1][2][0][RTW89_IC][1][35] = 10,
- [1][1][2][0][RTW89_IC][2][35] = 70,
[1][1][2][0][RTW89_KCC][1][35] = 28,
[1][1][2][0][RTW89_KCC][0][35] = 14,
[1][1][2][0][RTW89_ACMA][1][35] = 54,
@@ -45299,13 +44087,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][35] = 46,
[1][1][2][0][RTW89_THAILAND][0][35] = 10,
[1][1][2][0][RTW89_FCC][1][39] = 10,
- [1][1][2][0][RTW89_FCC][2][39] = 70,
[1][1][2][0][RTW89_ETSI][1][39] = 54,
[1][1][2][0][RTW89_ETSI][0][39] = 16,
[1][1][2][0][RTW89_MKK][1][39] = 52,
[1][1][2][0][RTW89_MKK][0][39] = 14,
[1][1][2][0][RTW89_IC][1][39] = 10,
- [1][1][2][0][RTW89_IC][2][39] = 70,
[1][1][2][0][RTW89_KCC][1][39] = 28,
[1][1][2][0][RTW89_KCC][0][39] = 14,
[1][1][2][0][RTW89_ACMA][1][39] = 54,
@@ -45318,13 +44104,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][39] = 46,
[1][1][2][0][RTW89_THAILAND][0][39] = 10,
[1][1][2][0][RTW89_FCC][1][43] = 10,
- [1][1][2][0][RTW89_FCC][2][43] = 70,
[1][1][2][0][RTW89_ETSI][1][43] = 54,
[1][1][2][0][RTW89_ETSI][0][43] = 16,
[1][1][2][0][RTW89_MKK][1][43] = 52,
[1][1][2][0][RTW89_MKK][0][43] = 14,
[1][1][2][0][RTW89_IC][1][43] = 10,
- [1][1][2][0][RTW89_IC][2][43] = 70,
[1][1][2][0][RTW89_KCC][1][43] = 28,
[1][1][2][0][RTW89_KCC][0][43] = 14,
[1][1][2][0][RTW89_ACMA][1][43] = 54,
@@ -45337,13 +44121,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][43] = 46,
[1][1][2][0][RTW89_THAILAND][0][43] = 10,
[1][1][2][0][RTW89_FCC][1][46] = 12,
- [1][1][2][0][RTW89_FCC][2][46] = 127,
[1][1][2][0][RTW89_ETSI][1][46] = 127,
[1][1][2][0][RTW89_ETSI][0][46] = 127,
[1][1][2][0][RTW89_MKK][1][46] = 127,
[1][1][2][0][RTW89_MKK][0][46] = 127,
[1][1][2][0][RTW89_IC][1][46] = 12,
- [1][1][2][0][RTW89_IC][2][46] = 68,
[1][1][2][0][RTW89_KCC][1][46] = 28,
[1][1][2][0][RTW89_KCC][0][46] = 127,
[1][1][2][0][RTW89_ACMA][1][46] = 127,
@@ -45356,13 +44138,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][46] = 127,
[1][1][2][0][RTW89_THAILAND][0][46] = 127,
[1][1][2][0][RTW89_FCC][1][50] = 12,
- [1][1][2][0][RTW89_FCC][2][50] = 127,
[1][1][2][0][RTW89_ETSI][1][50] = 127,
[1][1][2][0][RTW89_ETSI][0][50] = 127,
[1][1][2][0][RTW89_MKK][1][50] = 127,
[1][1][2][0][RTW89_MKK][0][50] = 127,
[1][1][2][0][RTW89_IC][1][50] = 12,
- [1][1][2][0][RTW89_IC][2][50] = 68,
[1][1][2][0][RTW89_KCC][1][50] = 28,
[1][1][2][0][RTW89_KCC][0][50] = 127,
[1][1][2][0][RTW89_ACMA][1][50] = 127,
@@ -45375,13 +44155,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][50] = 127,
[1][1][2][0][RTW89_THAILAND][0][50] = 127,
[1][1][2][0][RTW89_FCC][1][54] = 10,
- [1][1][2][0][RTW89_FCC][2][54] = 127,
[1][1][2][0][RTW89_ETSI][1][54] = 127,
[1][1][2][0][RTW89_ETSI][0][54] = 127,
[1][1][2][0][RTW89_MKK][1][54] = 127,
[1][1][2][0][RTW89_MKK][0][54] = 127,
[1][1][2][0][RTW89_IC][1][54] = 10,
- [1][1][2][0][RTW89_IC][2][54] = 127,
[1][1][2][0][RTW89_KCC][1][54] = 28,
[1][1][2][0][RTW89_KCC][0][54] = 127,
[1][1][2][0][RTW89_ACMA][1][54] = 127,
@@ -45394,13 +44172,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][54] = 127,
[1][1][2][0][RTW89_THAILAND][0][54] = 127,
[1][1][2][0][RTW89_FCC][1][58] = 10,
- [1][1][2][0][RTW89_FCC][2][58] = 66,
[1][1][2][0][RTW89_ETSI][1][58] = 127,
[1][1][2][0][RTW89_ETSI][0][58] = 127,
[1][1][2][0][RTW89_MKK][1][58] = 127,
[1][1][2][0][RTW89_MKK][0][58] = 127,
[1][1][2][0][RTW89_IC][1][58] = 10,
- [1][1][2][0][RTW89_IC][2][58] = 66,
[1][1][2][0][RTW89_KCC][1][58] = 28,
[1][1][2][0][RTW89_KCC][0][58] = 127,
[1][1][2][0][RTW89_ACMA][1][58] = 127,
@@ -45413,13 +44189,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][58] = 127,
[1][1][2][0][RTW89_THAILAND][0][58] = 127,
[1][1][2][0][RTW89_FCC][1][61] = 10,
- [1][1][2][0][RTW89_FCC][2][61] = 66,
[1][1][2][0][RTW89_ETSI][1][61] = 127,
[1][1][2][0][RTW89_ETSI][0][61] = 127,
[1][1][2][0][RTW89_MKK][1][61] = 127,
[1][1][2][0][RTW89_MKK][0][61] = 127,
[1][1][2][0][RTW89_IC][1][61] = 10,
- [1][1][2][0][RTW89_IC][2][61] = 66,
[1][1][2][0][RTW89_KCC][1][61] = 28,
[1][1][2][0][RTW89_KCC][0][61] = 127,
[1][1][2][0][RTW89_ACMA][1][61] = 127,
@@ -45432,13 +44206,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][61] = 127,
[1][1][2][0][RTW89_THAILAND][0][61] = 127,
[1][1][2][0][RTW89_FCC][1][65] = 10,
- [1][1][2][0][RTW89_FCC][2][65] = 66,
[1][1][2][0][RTW89_ETSI][1][65] = 127,
[1][1][2][0][RTW89_ETSI][0][65] = 127,
[1][1][2][0][RTW89_MKK][1][65] = 127,
[1][1][2][0][RTW89_MKK][0][65] = 127,
[1][1][2][0][RTW89_IC][1][65] = 10,
- [1][1][2][0][RTW89_IC][2][65] = 66,
[1][1][2][0][RTW89_KCC][1][65] = 28,
[1][1][2][0][RTW89_KCC][0][65] = 127,
[1][1][2][0][RTW89_ACMA][1][65] = 127,
@@ -45451,13 +44223,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][65] = 127,
[1][1][2][0][RTW89_THAILAND][0][65] = 127,
[1][1][2][0][RTW89_FCC][1][69] = 10,
- [1][1][2][0][RTW89_FCC][2][69] = 66,
[1][1][2][0][RTW89_ETSI][1][69] = 127,
[1][1][2][0][RTW89_ETSI][0][69] = 127,
[1][1][2][0][RTW89_MKK][1][69] = 127,
[1][1][2][0][RTW89_MKK][0][69] = 127,
[1][1][2][0][RTW89_IC][1][69] = 10,
- [1][1][2][0][RTW89_IC][2][69] = 66,
[1][1][2][0][RTW89_KCC][1][69] = 28,
[1][1][2][0][RTW89_KCC][0][69] = 127,
[1][1][2][0][RTW89_ACMA][1][69] = 127,
@@ -45470,13 +44240,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][69] = 127,
[1][1][2][0][RTW89_THAILAND][0][69] = 127,
[1][1][2][0][RTW89_FCC][1][73] = 10,
- [1][1][2][0][RTW89_FCC][2][73] = 66,
[1][1][2][0][RTW89_ETSI][1][73] = 127,
[1][1][2][0][RTW89_ETSI][0][73] = 127,
[1][1][2][0][RTW89_MKK][1][73] = 127,
[1][1][2][0][RTW89_MKK][0][73] = 127,
[1][1][2][0][RTW89_IC][1][73] = 10,
- [1][1][2][0][RTW89_IC][2][73] = 66,
[1][1][2][0][RTW89_KCC][1][73] = 28,
[1][1][2][0][RTW89_KCC][0][73] = 127,
[1][1][2][0][RTW89_ACMA][1][73] = 127,
@@ -45489,13 +44257,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][73] = 127,
[1][1][2][0][RTW89_THAILAND][0][73] = 127,
[1][1][2][0][RTW89_FCC][1][76] = 10,
- [1][1][2][0][RTW89_FCC][2][76] = 66,
[1][1][2][0][RTW89_ETSI][1][76] = 127,
[1][1][2][0][RTW89_ETSI][0][76] = 127,
[1][1][2][0][RTW89_MKK][1][76] = 127,
[1][1][2][0][RTW89_MKK][0][76] = 127,
[1][1][2][0][RTW89_IC][1][76] = 10,
- [1][1][2][0][RTW89_IC][2][76] = 66,
[1][1][2][0][RTW89_KCC][1][76] = 28,
[1][1][2][0][RTW89_KCC][0][76] = 127,
[1][1][2][0][RTW89_ACMA][1][76] = 127,
@@ -45508,13 +44274,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][76] = 127,
[1][1][2][0][RTW89_THAILAND][0][76] = 127,
[1][1][2][0][RTW89_FCC][1][80] = 10,
- [1][1][2][0][RTW89_FCC][2][80] = 66,
[1][1][2][0][RTW89_ETSI][1][80] = 127,
[1][1][2][0][RTW89_ETSI][0][80] = 127,
[1][1][2][0][RTW89_MKK][1][80] = 127,
[1][1][2][0][RTW89_MKK][0][80] = 127,
[1][1][2][0][RTW89_IC][1][80] = 10,
- [1][1][2][0][RTW89_IC][2][80] = 66,
[1][1][2][0][RTW89_KCC][1][80] = 32,
[1][1][2][0][RTW89_KCC][0][80] = 127,
[1][1][2][0][RTW89_ACMA][1][80] = 127,
@@ -45527,13 +44291,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][80] = 127,
[1][1][2][0][RTW89_THAILAND][0][80] = 127,
[1][1][2][0][RTW89_FCC][1][84] = 10,
- [1][1][2][0][RTW89_FCC][2][84] = 66,
[1][1][2][0][RTW89_ETSI][1][84] = 127,
[1][1][2][0][RTW89_ETSI][0][84] = 127,
[1][1][2][0][RTW89_MKK][1][84] = 127,
[1][1][2][0][RTW89_MKK][0][84] = 127,
[1][1][2][0][RTW89_IC][1][84] = 10,
- [1][1][2][0][RTW89_IC][2][84] = 66,
[1][1][2][0][RTW89_KCC][1][84] = 32,
[1][1][2][0][RTW89_KCC][0][84] = 127,
[1][1][2][0][RTW89_ACMA][1][84] = 127,
@@ -45546,13 +44308,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][84] = 127,
[1][1][2][0][RTW89_THAILAND][0][84] = 127,
[1][1][2][0][RTW89_FCC][1][88] = 10,
- [1][1][2][0][RTW89_FCC][2][88] = 127,
[1][1][2][0][RTW89_ETSI][1][88] = 127,
[1][1][2][0][RTW89_ETSI][0][88] = 127,
[1][1][2][0][RTW89_MKK][1][88] = 127,
[1][1][2][0][RTW89_MKK][0][88] = 127,
[1][1][2][0][RTW89_IC][1][88] = 10,
- [1][1][2][0][RTW89_IC][2][88] = 127,
[1][1][2][0][RTW89_KCC][1][88] = 32,
[1][1][2][0][RTW89_KCC][0][88] = 127,
[1][1][2][0][RTW89_ACMA][1][88] = 127,
@@ -45565,13 +44325,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][88] = 127,
[1][1][2][0][RTW89_THAILAND][0][88] = 127,
[1][1][2][0][RTW89_FCC][1][91] = 12,
- [1][1][2][0][RTW89_FCC][2][91] = 127,
[1][1][2][0][RTW89_ETSI][1][91] = 127,
[1][1][2][0][RTW89_ETSI][0][91] = 127,
[1][1][2][0][RTW89_MKK][1][91] = 127,
[1][1][2][0][RTW89_MKK][0][91] = 127,
[1][1][2][0][RTW89_IC][1][91] = 12,
- [1][1][2][0][RTW89_IC][2][91] = 127,
[1][1][2][0][RTW89_KCC][1][91] = 32,
[1][1][2][0][RTW89_KCC][0][91] = 127,
[1][1][2][0][RTW89_ACMA][1][91] = 127,
@@ -45584,13 +44342,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][91] = 127,
[1][1][2][0][RTW89_THAILAND][0][91] = 127,
[1][1][2][0][RTW89_FCC][1][95] = 10,
- [1][1][2][0][RTW89_FCC][2][95] = 127,
[1][1][2][0][RTW89_ETSI][1][95] = 127,
[1][1][2][0][RTW89_ETSI][0][95] = 127,
[1][1][2][0][RTW89_MKK][1][95] = 127,
[1][1][2][0][RTW89_MKK][0][95] = 127,
[1][1][2][0][RTW89_IC][1][95] = 10,
- [1][1][2][0][RTW89_IC][2][95] = 127,
[1][1][2][0][RTW89_KCC][1][95] = 32,
[1][1][2][0][RTW89_KCC][0][95] = 127,
[1][1][2][0][RTW89_ACMA][1][95] = 127,
@@ -45603,13 +44359,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][95] = 127,
[1][1][2][0][RTW89_THAILAND][0][95] = 127,
[1][1][2][0][RTW89_FCC][1][99] = 10,
- [1][1][2][0][RTW89_FCC][2][99] = 127,
[1][1][2][0][RTW89_ETSI][1][99] = 127,
[1][1][2][0][RTW89_ETSI][0][99] = 127,
[1][1][2][0][RTW89_MKK][1][99] = 127,
[1][1][2][0][RTW89_MKK][0][99] = 127,
[1][1][2][0][RTW89_IC][1][99] = 10,
- [1][1][2][0][RTW89_IC][2][99] = 127,
[1][1][2][0][RTW89_KCC][1][99] = 32,
[1][1][2][0][RTW89_KCC][0][99] = 127,
[1][1][2][0][RTW89_ACMA][1][99] = 127,
@@ -45622,13 +44376,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][99] = 127,
[1][1][2][0][RTW89_THAILAND][0][99] = 127,
[1][1][2][0][RTW89_FCC][1][103] = 10,
- [1][1][2][0][RTW89_FCC][2][103] = 127,
[1][1][2][0][RTW89_ETSI][1][103] = 127,
[1][1][2][0][RTW89_ETSI][0][103] = 127,
[1][1][2][0][RTW89_MKK][1][103] = 127,
[1][1][2][0][RTW89_MKK][0][103] = 127,
[1][1][2][0][RTW89_IC][1][103] = 10,
- [1][1][2][0][RTW89_IC][2][103] = 127,
[1][1][2][0][RTW89_KCC][1][103] = 32,
[1][1][2][0][RTW89_KCC][0][103] = 127,
[1][1][2][0][RTW89_ACMA][1][103] = 127,
@@ -45641,13 +44393,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][103] = 127,
[1][1][2][0][RTW89_THAILAND][0][103] = 127,
[1][1][2][0][RTW89_FCC][1][106] = 12,
- [1][1][2][0][RTW89_FCC][2][106] = 127,
[1][1][2][0][RTW89_ETSI][1][106] = 127,
[1][1][2][0][RTW89_ETSI][0][106] = 127,
[1][1][2][0][RTW89_MKK][1][106] = 127,
[1][1][2][0][RTW89_MKK][0][106] = 127,
[1][1][2][0][RTW89_IC][1][106] = 12,
- [1][1][2][0][RTW89_IC][2][106] = 127,
[1][1][2][0][RTW89_KCC][1][106] = 32,
[1][1][2][0][RTW89_KCC][0][106] = 127,
[1][1][2][0][RTW89_ACMA][1][106] = 127,
@@ -45660,13 +44410,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][106] = 127,
[1][1][2][0][RTW89_THAILAND][0][106] = 127,
[1][1][2][0][RTW89_FCC][1][110] = 127,
- [1][1][2][0][RTW89_FCC][2][110] = 127,
[1][1][2][0][RTW89_ETSI][1][110] = 127,
[1][1][2][0][RTW89_ETSI][0][110] = 127,
[1][1][2][0][RTW89_MKK][1][110] = 127,
[1][1][2][0][RTW89_MKK][0][110] = 127,
[1][1][2][0][RTW89_IC][1][110] = 127,
- [1][1][2][0][RTW89_IC][2][110] = 127,
[1][1][2][0][RTW89_KCC][1][110] = 127,
[1][1][2][0][RTW89_KCC][0][110] = 127,
[1][1][2][0][RTW89_ACMA][1][110] = 127,
@@ -45679,13 +44427,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][110] = 127,
[1][1][2][0][RTW89_THAILAND][0][110] = 127,
[1][1][2][0][RTW89_FCC][1][114] = 127,
- [1][1][2][0][RTW89_FCC][2][114] = 127,
[1][1][2][0][RTW89_ETSI][1][114] = 127,
[1][1][2][0][RTW89_ETSI][0][114] = 127,
[1][1][2][0][RTW89_MKK][1][114] = 127,
[1][1][2][0][RTW89_MKK][0][114] = 127,
[1][1][2][0][RTW89_IC][1][114] = 127,
- [1][1][2][0][RTW89_IC][2][114] = 127,
[1][1][2][0][RTW89_KCC][1][114] = 127,
[1][1][2][0][RTW89_KCC][0][114] = 127,
[1][1][2][0][RTW89_ACMA][1][114] = 127,
@@ -45698,13 +44444,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][114] = 127,
[1][1][2][0][RTW89_THAILAND][0][114] = 127,
[1][1][2][0][RTW89_FCC][1][118] = 127,
- [1][1][2][0][RTW89_FCC][2][118] = 127,
[1][1][2][0][RTW89_ETSI][1][118] = 127,
[1][1][2][0][RTW89_ETSI][0][118] = 127,
[1][1][2][0][RTW89_MKK][1][118] = 127,
[1][1][2][0][RTW89_MKK][0][118] = 127,
[1][1][2][0][RTW89_IC][1][118] = 127,
- [1][1][2][0][RTW89_IC][2][118] = 127,
[1][1][2][0][RTW89_KCC][1][118] = 127,
[1][1][2][0][RTW89_KCC][0][118] = 127,
[1][1][2][0][RTW89_ACMA][1][118] = 127,
@@ -45717,13 +44461,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][0][RTW89_THAILAND][1][118] = 127,
[1][1][2][0][RTW89_THAILAND][0][118] = 127,
[1][1][2][1][RTW89_FCC][1][1] = 10,
- [1][1][2][1][RTW89_FCC][2][1] = 58,
[1][1][2][1][RTW89_ETSI][1][1] = 42,
[1][1][2][1][RTW89_ETSI][0][1] = 6,
[1][1][2][1][RTW89_MKK][1][1] = 52,
[1][1][2][1][RTW89_MKK][0][1] = 12,
[1][1][2][1][RTW89_IC][1][1] = 10,
- [1][1][2][1][RTW89_IC][2][1] = 58,
[1][1][2][1][RTW89_KCC][1][1] = 28,
[1][1][2][1][RTW89_KCC][0][1] = 12,
[1][1][2][1][RTW89_ACMA][1][1] = 42,
@@ -45736,13 +44478,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][1] = 46,
[1][1][2][1][RTW89_THAILAND][0][1] = 6,
[1][1][2][1][RTW89_FCC][1][5] = 10,
- [1][1][2][1][RTW89_FCC][2][5] = 58,
[1][1][2][1][RTW89_ETSI][1][5] = 42,
[1][1][2][1][RTW89_ETSI][0][5] = 6,
[1][1][2][1][RTW89_MKK][1][5] = 52,
[1][1][2][1][RTW89_MKK][0][5] = 12,
[1][1][2][1][RTW89_IC][1][5] = 10,
- [1][1][2][1][RTW89_IC][2][5] = 58,
[1][1][2][1][RTW89_KCC][1][5] = 28,
[1][1][2][1][RTW89_KCC][0][5] = 12,
[1][1][2][1][RTW89_ACMA][1][5] = 42,
@@ -45755,13 +44495,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][5] = 46,
[1][1][2][1][RTW89_THAILAND][0][5] = 6,
[1][1][2][1][RTW89_FCC][1][9] = 10,
- [1][1][2][1][RTW89_FCC][2][9] = 58,
[1][1][2][1][RTW89_ETSI][1][9] = 42,
[1][1][2][1][RTW89_ETSI][0][9] = 6,
[1][1][2][1][RTW89_MKK][1][9] = 52,
[1][1][2][1][RTW89_MKK][0][9] = 12,
[1][1][2][1][RTW89_IC][1][9] = 10,
- [1][1][2][1][RTW89_IC][2][9] = 58,
[1][1][2][1][RTW89_KCC][1][9] = 28,
[1][1][2][1][RTW89_KCC][0][9] = 12,
[1][1][2][1][RTW89_ACMA][1][9] = 42,
@@ -45774,13 +44512,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][9] = 46,
[1][1][2][1][RTW89_THAILAND][0][9] = 6,
[1][1][2][1][RTW89_FCC][1][13] = 10,
- [1][1][2][1][RTW89_FCC][2][13] = 58,
[1][1][2][1][RTW89_ETSI][1][13] = 42,
[1][1][2][1][RTW89_ETSI][0][13] = 6,
[1][1][2][1][RTW89_MKK][1][13] = 52,
[1][1][2][1][RTW89_MKK][0][13] = 12,
[1][1][2][1][RTW89_IC][1][13] = 10,
- [1][1][2][1][RTW89_IC][2][13] = 58,
[1][1][2][1][RTW89_KCC][1][13] = 28,
[1][1][2][1][RTW89_KCC][0][13] = 12,
[1][1][2][1][RTW89_ACMA][1][13] = 42,
@@ -45793,13 +44529,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][13] = 46,
[1][1][2][1][RTW89_THAILAND][0][13] = 6,
[1][1][2][1][RTW89_FCC][1][16] = 10,
- [1][1][2][1][RTW89_FCC][2][16] = 58,
[1][1][2][1][RTW89_ETSI][1][16] = 42,
[1][1][2][1][RTW89_ETSI][0][16] = 6,
[1][1][2][1][RTW89_MKK][1][16] = 52,
[1][1][2][1][RTW89_MKK][0][16] = 12,
[1][1][2][1][RTW89_IC][1][16] = 10,
- [1][1][2][1][RTW89_IC][2][16] = 58,
[1][1][2][1][RTW89_KCC][1][16] = 28,
[1][1][2][1][RTW89_KCC][0][16] = 12,
[1][1][2][1][RTW89_ACMA][1][16] = 42,
@@ -45812,13 +44546,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][16] = 46,
[1][1][2][1][RTW89_THAILAND][0][16] = 6,
[1][1][2][1][RTW89_FCC][1][20] = 10,
- [1][1][2][1][RTW89_FCC][2][20] = 58,
[1][1][2][1][RTW89_ETSI][1][20] = 42,
[1][1][2][1][RTW89_ETSI][0][20] = 6,
[1][1][2][1][RTW89_MKK][1][20] = 52,
[1][1][2][1][RTW89_MKK][0][20] = 12,
[1][1][2][1][RTW89_IC][1][20] = 10,
- [1][1][2][1][RTW89_IC][2][20] = 58,
[1][1][2][1][RTW89_KCC][1][20] = 28,
[1][1][2][1][RTW89_KCC][0][20] = 12,
[1][1][2][1][RTW89_ACMA][1][20] = 42,
@@ -45831,13 +44563,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][20] = 46,
[1][1][2][1][RTW89_THAILAND][0][20] = 6,
[1][1][2][1][RTW89_FCC][1][24] = 10,
- [1][1][2][1][RTW89_FCC][2][24] = 70,
[1][1][2][1][RTW89_ETSI][1][24] = 42,
[1][1][2][1][RTW89_ETSI][0][24] = 6,
[1][1][2][1][RTW89_MKK][1][24] = 54,
[1][1][2][1][RTW89_MKK][0][24] = 14,
[1][1][2][1][RTW89_IC][1][24] = 10,
- [1][1][2][1][RTW89_IC][2][24] = 70,
[1][1][2][1][RTW89_KCC][1][24] = 28,
[1][1][2][1][RTW89_KCC][0][24] = 12,
[1][1][2][1][RTW89_ACMA][1][24] = 42,
@@ -45850,13 +44580,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][24] = 46,
[1][1][2][1][RTW89_THAILAND][0][24] = 6,
[1][1][2][1][RTW89_FCC][1][28] = 10,
- [1][1][2][1][RTW89_FCC][2][28] = 70,
[1][1][2][1][RTW89_ETSI][1][28] = 42,
[1][1][2][1][RTW89_ETSI][0][28] = 6,
[1][1][2][1][RTW89_MKK][1][28] = 52,
[1][1][2][1][RTW89_MKK][0][28] = 14,
[1][1][2][1][RTW89_IC][1][28] = 10,
- [1][1][2][1][RTW89_IC][2][28] = 70,
[1][1][2][1][RTW89_KCC][1][28] = 28,
[1][1][2][1][RTW89_KCC][0][28] = 14,
[1][1][2][1][RTW89_ACMA][1][28] = 42,
@@ -45869,13 +44597,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][28] = 46,
[1][1][2][1][RTW89_THAILAND][0][28] = 6,
[1][1][2][1][RTW89_FCC][1][31] = 10,
- [1][1][2][1][RTW89_FCC][2][31] = 70,
[1][1][2][1][RTW89_ETSI][1][31] = 42,
[1][1][2][1][RTW89_ETSI][0][31] = 6,
[1][1][2][1][RTW89_MKK][1][31] = 52,
[1][1][2][1][RTW89_MKK][0][31] = 14,
[1][1][2][1][RTW89_IC][1][31] = 10,
- [1][1][2][1][RTW89_IC][2][31] = 70,
[1][1][2][1][RTW89_KCC][1][31] = 28,
[1][1][2][1][RTW89_KCC][0][31] = 14,
[1][1][2][1][RTW89_ACMA][1][31] = 42,
@@ -45888,13 +44614,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][31] = 46,
[1][1][2][1][RTW89_THAILAND][0][31] = 6,
[1][1][2][1][RTW89_FCC][1][35] = 10,
- [1][1][2][1][RTW89_FCC][2][35] = 70,
[1][1][2][1][RTW89_ETSI][1][35] = 42,
[1][1][2][1][RTW89_ETSI][0][35] = 6,
[1][1][2][1][RTW89_MKK][1][35] = 52,
[1][1][2][1][RTW89_MKK][0][35] = 14,
[1][1][2][1][RTW89_IC][1][35] = 10,
- [1][1][2][1][RTW89_IC][2][35] = 70,
[1][1][2][1][RTW89_KCC][1][35] = 28,
[1][1][2][1][RTW89_KCC][0][35] = 14,
[1][1][2][1][RTW89_ACMA][1][35] = 42,
@@ -45907,13 +44631,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][35] = 46,
[1][1][2][1][RTW89_THAILAND][0][35] = 6,
[1][1][2][1][RTW89_FCC][1][39] = 10,
- [1][1][2][1][RTW89_FCC][2][39] = 70,
[1][1][2][1][RTW89_ETSI][1][39] = 42,
[1][1][2][1][RTW89_ETSI][0][39] = 6,
[1][1][2][1][RTW89_MKK][1][39] = 52,
[1][1][2][1][RTW89_MKK][0][39] = 14,
[1][1][2][1][RTW89_IC][1][39] = 10,
- [1][1][2][1][RTW89_IC][2][39] = 70,
[1][1][2][1][RTW89_KCC][1][39] = 28,
[1][1][2][1][RTW89_KCC][0][39] = 14,
[1][1][2][1][RTW89_ACMA][1][39] = 42,
@@ -45926,13 +44648,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][39] = 46,
[1][1][2][1][RTW89_THAILAND][0][39] = 6,
[1][1][2][1][RTW89_FCC][1][43] = 10,
- [1][1][2][1][RTW89_FCC][2][43] = 70,
[1][1][2][1][RTW89_ETSI][1][43] = 42,
[1][1][2][1][RTW89_ETSI][0][43] = 6,
[1][1][2][1][RTW89_MKK][1][43] = 52,
[1][1][2][1][RTW89_MKK][0][43] = 14,
[1][1][2][1][RTW89_IC][1][43] = 10,
- [1][1][2][1][RTW89_IC][2][43] = 70,
[1][1][2][1][RTW89_KCC][1][43] = 28,
[1][1][2][1][RTW89_KCC][0][43] = 14,
[1][1][2][1][RTW89_ACMA][1][43] = 42,
@@ -45945,13 +44665,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][43] = 46,
[1][1][2][1][RTW89_THAILAND][0][43] = 6,
[1][1][2][1][RTW89_FCC][1][46] = 12,
- [1][1][2][1][RTW89_FCC][2][46] = 127,
[1][1][2][1][RTW89_ETSI][1][46] = 127,
[1][1][2][1][RTW89_ETSI][0][46] = 127,
[1][1][2][1][RTW89_MKK][1][46] = 127,
[1][1][2][1][RTW89_MKK][0][46] = 127,
[1][1][2][1][RTW89_IC][1][46] = 12,
- [1][1][2][1][RTW89_IC][2][46] = 68,
[1][1][2][1][RTW89_KCC][1][46] = 28,
[1][1][2][1][RTW89_KCC][0][46] = 127,
[1][1][2][1][RTW89_ACMA][1][46] = 127,
@@ -45964,13 +44682,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][46] = 127,
[1][1][2][1][RTW89_THAILAND][0][46] = 127,
[1][1][2][1][RTW89_FCC][1][50] = 12,
- [1][1][2][1][RTW89_FCC][2][50] = 127,
[1][1][2][1][RTW89_ETSI][1][50] = 127,
[1][1][2][1][RTW89_ETSI][0][50] = 127,
[1][1][2][1][RTW89_MKK][1][50] = 127,
[1][1][2][1][RTW89_MKK][0][50] = 127,
[1][1][2][1][RTW89_IC][1][50] = 12,
- [1][1][2][1][RTW89_IC][2][50] = 68,
[1][1][2][1][RTW89_KCC][1][50] = 28,
[1][1][2][1][RTW89_KCC][0][50] = 127,
[1][1][2][1][RTW89_ACMA][1][50] = 127,
@@ -45983,13 +44699,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][50] = 127,
[1][1][2][1][RTW89_THAILAND][0][50] = 127,
[1][1][2][1][RTW89_FCC][1][54] = 10,
- [1][1][2][1][RTW89_FCC][2][54] = 127,
[1][1][2][1][RTW89_ETSI][1][54] = 127,
[1][1][2][1][RTW89_ETSI][0][54] = 127,
[1][1][2][1][RTW89_MKK][1][54] = 127,
[1][1][2][1][RTW89_MKK][0][54] = 127,
[1][1][2][1][RTW89_IC][1][54] = 10,
- [1][1][2][1][RTW89_IC][2][54] = 127,
[1][1][2][1][RTW89_KCC][1][54] = 28,
[1][1][2][1][RTW89_KCC][0][54] = 127,
[1][1][2][1][RTW89_ACMA][1][54] = 127,
@@ -46002,13 +44716,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][54] = 127,
[1][1][2][1][RTW89_THAILAND][0][54] = 127,
[1][1][2][1][RTW89_FCC][1][58] = 10,
- [1][1][2][1][RTW89_FCC][2][58] = 66,
[1][1][2][1][RTW89_ETSI][1][58] = 127,
[1][1][2][1][RTW89_ETSI][0][58] = 127,
[1][1][2][1][RTW89_MKK][1][58] = 127,
[1][1][2][1][RTW89_MKK][0][58] = 127,
[1][1][2][1][RTW89_IC][1][58] = 10,
- [1][1][2][1][RTW89_IC][2][58] = 66,
[1][1][2][1][RTW89_KCC][1][58] = 28,
[1][1][2][1][RTW89_KCC][0][58] = 127,
[1][1][2][1][RTW89_ACMA][1][58] = 127,
@@ -46021,13 +44733,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][58] = 127,
[1][1][2][1][RTW89_THAILAND][0][58] = 127,
[1][1][2][1][RTW89_FCC][1][61] = 10,
- [1][1][2][1][RTW89_FCC][2][61] = 66,
[1][1][2][1][RTW89_ETSI][1][61] = 127,
[1][1][2][1][RTW89_ETSI][0][61] = 127,
[1][1][2][1][RTW89_MKK][1][61] = 127,
[1][1][2][1][RTW89_MKK][0][61] = 127,
[1][1][2][1][RTW89_IC][1][61] = 10,
- [1][1][2][1][RTW89_IC][2][61] = 66,
[1][1][2][1][RTW89_KCC][1][61] = 28,
[1][1][2][1][RTW89_KCC][0][61] = 127,
[1][1][2][1][RTW89_ACMA][1][61] = 127,
@@ -46040,13 +44750,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][61] = 127,
[1][1][2][1][RTW89_THAILAND][0][61] = 127,
[1][1][2][1][RTW89_FCC][1][65] = 10,
- [1][1][2][1][RTW89_FCC][2][65] = 66,
[1][1][2][1][RTW89_ETSI][1][65] = 127,
[1][1][2][1][RTW89_ETSI][0][65] = 127,
[1][1][2][1][RTW89_MKK][1][65] = 127,
[1][1][2][1][RTW89_MKK][0][65] = 127,
[1][1][2][1][RTW89_IC][1][65] = 10,
- [1][1][2][1][RTW89_IC][2][65] = 66,
[1][1][2][1][RTW89_KCC][1][65] = 28,
[1][1][2][1][RTW89_KCC][0][65] = 127,
[1][1][2][1][RTW89_ACMA][1][65] = 127,
@@ -46059,13 +44767,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][65] = 127,
[1][1][2][1][RTW89_THAILAND][0][65] = 127,
[1][1][2][1][RTW89_FCC][1][69] = 10,
- [1][1][2][1][RTW89_FCC][2][69] = 66,
[1][1][2][1][RTW89_ETSI][1][69] = 127,
[1][1][2][1][RTW89_ETSI][0][69] = 127,
[1][1][2][1][RTW89_MKK][1][69] = 127,
[1][1][2][1][RTW89_MKK][0][69] = 127,
[1][1][2][1][RTW89_IC][1][69] = 10,
- [1][1][2][1][RTW89_IC][2][69] = 66,
[1][1][2][1][RTW89_KCC][1][69] = 28,
[1][1][2][1][RTW89_KCC][0][69] = 127,
[1][1][2][1][RTW89_ACMA][1][69] = 127,
@@ -46078,13 +44784,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][69] = 127,
[1][1][2][1][RTW89_THAILAND][0][69] = 127,
[1][1][2][1][RTW89_FCC][1][73] = 10,
- [1][1][2][1][RTW89_FCC][2][73] = 66,
[1][1][2][1][RTW89_ETSI][1][73] = 127,
[1][1][2][1][RTW89_ETSI][0][73] = 127,
[1][1][2][1][RTW89_MKK][1][73] = 127,
[1][1][2][1][RTW89_MKK][0][73] = 127,
[1][1][2][1][RTW89_IC][1][73] = 10,
- [1][1][2][1][RTW89_IC][2][73] = 66,
[1][1][2][1][RTW89_KCC][1][73] = 28,
[1][1][2][1][RTW89_KCC][0][73] = 127,
[1][1][2][1][RTW89_ACMA][1][73] = 127,
@@ -46097,13 +44801,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][73] = 127,
[1][1][2][1][RTW89_THAILAND][0][73] = 127,
[1][1][2][1][RTW89_FCC][1][76] = 10,
- [1][1][2][1][RTW89_FCC][2][76] = 66,
[1][1][2][1][RTW89_ETSI][1][76] = 127,
[1][1][2][1][RTW89_ETSI][0][76] = 127,
[1][1][2][1][RTW89_MKK][1][76] = 127,
[1][1][2][1][RTW89_MKK][0][76] = 127,
[1][1][2][1][RTW89_IC][1][76] = 10,
- [1][1][2][1][RTW89_IC][2][76] = 66,
[1][1][2][1][RTW89_KCC][1][76] = 28,
[1][1][2][1][RTW89_KCC][0][76] = 127,
[1][1][2][1][RTW89_ACMA][1][76] = 127,
@@ -46116,13 +44818,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][76] = 127,
[1][1][2][1][RTW89_THAILAND][0][76] = 127,
[1][1][2][1][RTW89_FCC][1][80] = 10,
- [1][1][2][1][RTW89_FCC][2][80] = 66,
[1][1][2][1][RTW89_ETSI][1][80] = 127,
[1][1][2][1][RTW89_ETSI][0][80] = 127,
[1][1][2][1][RTW89_MKK][1][80] = 127,
[1][1][2][1][RTW89_MKK][0][80] = 127,
[1][1][2][1][RTW89_IC][1][80] = 10,
- [1][1][2][1][RTW89_IC][2][80] = 66,
[1][1][2][1][RTW89_KCC][1][80] = 32,
[1][1][2][1][RTW89_KCC][0][80] = 127,
[1][1][2][1][RTW89_ACMA][1][80] = 127,
@@ -46135,13 +44835,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][80] = 127,
[1][1][2][1][RTW89_THAILAND][0][80] = 127,
[1][1][2][1][RTW89_FCC][1][84] = 10,
- [1][1][2][1][RTW89_FCC][2][84] = 66,
[1][1][2][1][RTW89_ETSI][1][84] = 127,
[1][1][2][1][RTW89_ETSI][0][84] = 127,
[1][1][2][1][RTW89_MKK][1][84] = 127,
[1][1][2][1][RTW89_MKK][0][84] = 127,
[1][1][2][1][RTW89_IC][1][84] = 10,
- [1][1][2][1][RTW89_IC][2][84] = 66,
[1][1][2][1][RTW89_KCC][1][84] = 32,
[1][1][2][1][RTW89_KCC][0][84] = 127,
[1][1][2][1][RTW89_ACMA][1][84] = 127,
@@ -46154,13 +44852,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][84] = 127,
[1][1][2][1][RTW89_THAILAND][0][84] = 127,
[1][1][2][1][RTW89_FCC][1][88] = 10,
- [1][1][2][1][RTW89_FCC][2][88] = 127,
[1][1][2][1][RTW89_ETSI][1][88] = 127,
[1][1][2][1][RTW89_ETSI][0][88] = 127,
[1][1][2][1][RTW89_MKK][1][88] = 127,
[1][1][2][1][RTW89_MKK][0][88] = 127,
[1][1][2][1][RTW89_IC][1][88] = 10,
- [1][1][2][1][RTW89_IC][2][88] = 127,
[1][1][2][1][RTW89_KCC][1][88] = 32,
[1][1][2][1][RTW89_KCC][0][88] = 127,
[1][1][2][1][RTW89_ACMA][1][88] = 127,
@@ -46173,13 +44869,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][88] = 127,
[1][1][2][1][RTW89_THAILAND][0][88] = 127,
[1][1][2][1][RTW89_FCC][1][91] = 12,
- [1][1][2][1][RTW89_FCC][2][91] = 127,
[1][1][2][1][RTW89_ETSI][1][91] = 127,
[1][1][2][1][RTW89_ETSI][0][91] = 127,
[1][1][2][1][RTW89_MKK][1][91] = 127,
[1][1][2][1][RTW89_MKK][0][91] = 127,
[1][1][2][1][RTW89_IC][1][91] = 12,
- [1][1][2][1][RTW89_IC][2][91] = 127,
[1][1][2][1][RTW89_KCC][1][91] = 32,
[1][1][2][1][RTW89_KCC][0][91] = 127,
[1][1][2][1][RTW89_ACMA][1][91] = 127,
@@ -46192,13 +44886,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][91] = 127,
[1][1][2][1][RTW89_THAILAND][0][91] = 127,
[1][1][2][1][RTW89_FCC][1][95] = 10,
- [1][1][2][1][RTW89_FCC][2][95] = 127,
[1][1][2][1][RTW89_ETSI][1][95] = 127,
[1][1][2][1][RTW89_ETSI][0][95] = 127,
[1][1][2][1][RTW89_MKK][1][95] = 127,
[1][1][2][1][RTW89_MKK][0][95] = 127,
[1][1][2][1][RTW89_IC][1][95] = 10,
- [1][1][2][1][RTW89_IC][2][95] = 127,
[1][1][2][1][RTW89_KCC][1][95] = 32,
[1][1][2][1][RTW89_KCC][0][95] = 127,
[1][1][2][1][RTW89_ACMA][1][95] = 127,
@@ -46211,13 +44903,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][95] = 127,
[1][1][2][1][RTW89_THAILAND][0][95] = 127,
[1][1][2][1][RTW89_FCC][1][99] = 10,
- [1][1][2][1][RTW89_FCC][2][99] = 127,
[1][1][2][1][RTW89_ETSI][1][99] = 127,
[1][1][2][1][RTW89_ETSI][0][99] = 127,
[1][1][2][1][RTW89_MKK][1][99] = 127,
[1][1][2][1][RTW89_MKK][0][99] = 127,
[1][1][2][1][RTW89_IC][1][99] = 10,
- [1][1][2][1][RTW89_IC][2][99] = 127,
[1][1][2][1][RTW89_KCC][1][99] = 32,
[1][1][2][1][RTW89_KCC][0][99] = 127,
[1][1][2][1][RTW89_ACMA][1][99] = 127,
@@ -46230,13 +44920,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][99] = 127,
[1][1][2][1][RTW89_THAILAND][0][99] = 127,
[1][1][2][1][RTW89_FCC][1][103] = 10,
- [1][1][2][1][RTW89_FCC][2][103] = 127,
[1][1][2][1][RTW89_ETSI][1][103] = 127,
[1][1][2][1][RTW89_ETSI][0][103] = 127,
[1][1][2][1][RTW89_MKK][1][103] = 127,
[1][1][2][1][RTW89_MKK][0][103] = 127,
[1][1][2][1][RTW89_IC][1][103] = 10,
- [1][1][2][1][RTW89_IC][2][103] = 127,
[1][1][2][1][RTW89_KCC][1][103] = 32,
[1][1][2][1][RTW89_KCC][0][103] = 127,
[1][1][2][1][RTW89_ACMA][1][103] = 127,
@@ -46249,13 +44937,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][103] = 127,
[1][1][2][1][RTW89_THAILAND][0][103] = 127,
[1][1][2][1][RTW89_FCC][1][106] = 12,
- [1][1][2][1][RTW89_FCC][2][106] = 127,
[1][1][2][1][RTW89_ETSI][1][106] = 127,
[1][1][2][1][RTW89_ETSI][0][106] = 127,
[1][1][2][1][RTW89_MKK][1][106] = 127,
[1][1][2][1][RTW89_MKK][0][106] = 127,
[1][1][2][1][RTW89_IC][1][106] = 12,
- [1][1][2][1][RTW89_IC][2][106] = 127,
[1][1][2][1][RTW89_KCC][1][106] = 32,
[1][1][2][1][RTW89_KCC][0][106] = 127,
[1][1][2][1][RTW89_ACMA][1][106] = 127,
@@ -46268,13 +44954,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][106] = 127,
[1][1][2][1][RTW89_THAILAND][0][106] = 127,
[1][1][2][1][RTW89_FCC][1][110] = 127,
- [1][1][2][1][RTW89_FCC][2][110] = 127,
[1][1][2][1][RTW89_ETSI][1][110] = 127,
[1][1][2][1][RTW89_ETSI][0][110] = 127,
[1][1][2][1][RTW89_MKK][1][110] = 127,
[1][1][2][1][RTW89_MKK][0][110] = 127,
[1][1][2][1][RTW89_IC][1][110] = 127,
- [1][1][2][1][RTW89_IC][2][110] = 127,
[1][1][2][1][RTW89_KCC][1][110] = 127,
[1][1][2][1][RTW89_KCC][0][110] = 127,
[1][1][2][1][RTW89_ACMA][1][110] = 127,
@@ -46287,13 +44971,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][110] = 127,
[1][1][2][1][RTW89_THAILAND][0][110] = 127,
[1][1][2][1][RTW89_FCC][1][114] = 127,
- [1][1][2][1][RTW89_FCC][2][114] = 127,
[1][1][2][1][RTW89_ETSI][1][114] = 127,
[1][1][2][1][RTW89_ETSI][0][114] = 127,
[1][1][2][1][RTW89_MKK][1][114] = 127,
[1][1][2][1][RTW89_MKK][0][114] = 127,
[1][1][2][1][RTW89_IC][1][114] = 127,
- [1][1][2][1][RTW89_IC][2][114] = 127,
[1][1][2][1][RTW89_KCC][1][114] = 127,
[1][1][2][1][RTW89_KCC][0][114] = 127,
[1][1][2][1][RTW89_ACMA][1][114] = 127,
@@ -46306,13 +44988,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][114] = 127,
[1][1][2][1][RTW89_THAILAND][0][114] = 127,
[1][1][2][1][RTW89_FCC][1][118] = 127,
- [1][1][2][1][RTW89_FCC][2][118] = 127,
[1][1][2][1][RTW89_ETSI][1][118] = 127,
[1][1][2][1][RTW89_ETSI][0][118] = 127,
[1][1][2][1][RTW89_MKK][1][118] = 127,
[1][1][2][1][RTW89_MKK][0][118] = 127,
[1][1][2][1][RTW89_IC][1][118] = 127,
- [1][1][2][1][RTW89_IC][2][118] = 127,
[1][1][2][1][RTW89_KCC][1][118] = 127,
[1][1][2][1][RTW89_KCC][0][118] = 127,
[1][1][2][1][RTW89_ACMA][1][118] = 127,
@@ -46325,13 +45005,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[1][1][2][1][RTW89_THAILAND][1][118] = 127,
[1][1][2][1][RTW89_THAILAND][0][118] = 127,
[2][0][2][0][RTW89_FCC][1][3] = 46,
- [2][0][2][0][RTW89_FCC][2][3] = 60,
[2][0][2][0][RTW89_ETSI][1][3] = 58,
[2][0][2][0][RTW89_ETSI][0][3] = 30,
[2][0][2][0][RTW89_MKK][1][3] = 58,
[2][0][2][0][RTW89_MKK][0][3] = 26,
[2][0][2][0][RTW89_IC][1][3] = 46,
- [2][0][2][0][RTW89_IC][2][3] = 60,
[2][0][2][0][RTW89_KCC][1][3] = 50,
[2][0][2][0][RTW89_KCC][0][3] = 24,
[2][0][2][0][RTW89_ACMA][1][3] = 58,
@@ -46344,13 +45022,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][3] = 58,
[2][0][2][0][RTW89_THAILAND][0][3] = 30,
[2][0][2][0][RTW89_FCC][1][11] = 46,
- [2][0][2][0][RTW89_FCC][2][11] = 60,
[2][0][2][0][RTW89_ETSI][1][11] = 58,
[2][0][2][0][RTW89_ETSI][0][11] = 30,
[2][0][2][0][RTW89_MKK][1][11] = 58,
[2][0][2][0][RTW89_MKK][0][11] = 24,
[2][0][2][0][RTW89_IC][1][11] = 46,
- [2][0][2][0][RTW89_IC][2][11] = 60,
[2][0][2][0][RTW89_KCC][1][11] = 50,
[2][0][2][0][RTW89_KCC][0][11] = 24,
[2][0][2][0][RTW89_ACMA][1][11] = 58,
@@ -46363,13 +45039,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][11] = 58,
[2][0][2][0][RTW89_THAILAND][0][11] = 30,
[2][0][2][0][RTW89_FCC][1][18] = 46,
- [2][0][2][0][RTW89_FCC][2][18] = 60,
[2][0][2][0][RTW89_ETSI][1][18] = 58,
[2][0][2][0][RTW89_ETSI][0][18] = 30,
[2][0][2][0][RTW89_MKK][1][18] = 58,
[2][0][2][0][RTW89_MKK][0][18] = 24,
[2][0][2][0][RTW89_IC][1][18] = 46,
- [2][0][2][0][RTW89_IC][2][18] = 60,
[2][0][2][0][RTW89_KCC][1][18] = 50,
[2][0][2][0][RTW89_KCC][0][18] = 24,
[2][0][2][0][RTW89_ACMA][1][18] = 58,
@@ -46382,13 +45056,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][18] = 58,
[2][0][2][0][RTW89_THAILAND][0][18] = 30,
[2][0][2][0][RTW89_FCC][1][26] = 46,
- [2][0][2][0][RTW89_FCC][2][26] = 60,
[2][0][2][0][RTW89_ETSI][1][26] = 58,
[2][0][2][0][RTW89_ETSI][0][26] = 30,
[2][0][2][0][RTW89_MKK][1][26] = 58,
[2][0][2][0][RTW89_MKK][0][26] = 24,
[2][0][2][0][RTW89_IC][1][26] = 46,
- [2][0][2][0][RTW89_IC][2][26] = 60,
[2][0][2][0][RTW89_KCC][1][26] = 50,
[2][0][2][0][RTW89_KCC][0][26] = 26,
[2][0][2][0][RTW89_ACMA][1][26] = 58,
@@ -46401,13 +45073,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][26] = 58,
[2][0][2][0][RTW89_THAILAND][0][26] = 30,
[2][0][2][0][RTW89_FCC][1][33] = 46,
- [2][0][2][0][RTW89_FCC][2][33] = 60,
[2][0][2][0][RTW89_ETSI][1][33] = 58,
[2][0][2][0][RTW89_ETSI][0][33] = 30,
[2][0][2][0][RTW89_MKK][1][33] = 58,
[2][0][2][0][RTW89_MKK][0][33] = 24,
[2][0][2][0][RTW89_IC][1][33] = 46,
- [2][0][2][0][RTW89_IC][2][33] = 60,
[2][0][2][0][RTW89_KCC][1][33] = 50,
[2][0][2][0][RTW89_KCC][0][33] = 24,
[2][0][2][0][RTW89_ACMA][1][33] = 58,
@@ -46420,13 +45090,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][33] = 58,
[2][0][2][0][RTW89_THAILAND][0][33] = 30,
[2][0][2][0][RTW89_FCC][1][41] = 46,
- [2][0][2][0][RTW89_FCC][2][41] = 60,
[2][0][2][0][RTW89_ETSI][1][41] = 58,
[2][0][2][0][RTW89_ETSI][0][41] = 30,
[2][0][2][0][RTW89_MKK][1][41] = 58,
[2][0][2][0][RTW89_MKK][0][41] = 24,
[2][0][2][0][RTW89_IC][1][41] = 46,
- [2][0][2][0][RTW89_IC][2][41] = 60,
[2][0][2][0][RTW89_KCC][1][41] = 50,
[2][0][2][0][RTW89_KCC][0][41] = 24,
[2][0][2][0][RTW89_ACMA][1][41] = 58,
@@ -46439,13 +45107,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][41] = 58,
[2][0][2][0][RTW89_THAILAND][0][41] = 30,
[2][0][2][0][RTW89_FCC][1][48] = 46,
- [2][0][2][0][RTW89_FCC][2][48] = 127,
[2][0][2][0][RTW89_ETSI][1][48] = 127,
[2][0][2][0][RTW89_ETSI][0][48] = 127,
[2][0][2][0][RTW89_MKK][1][48] = 127,
[2][0][2][0][RTW89_MKK][0][48] = 127,
[2][0][2][0][RTW89_IC][1][48] = 46,
- [2][0][2][0][RTW89_IC][2][48] = 60,
[2][0][2][0][RTW89_KCC][1][48] = 48,
[2][0][2][0][RTW89_KCC][0][48] = 127,
[2][0][2][0][RTW89_ACMA][1][48] = 127,
@@ -46458,13 +45124,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][48] = 127,
[2][0][2][0][RTW89_THAILAND][0][48] = 127,
[2][0][2][0][RTW89_FCC][1][56] = 46,
- [2][0][2][0][RTW89_FCC][2][56] = 127,
[2][0][2][0][RTW89_ETSI][1][56] = 127,
[2][0][2][0][RTW89_ETSI][0][56] = 127,
[2][0][2][0][RTW89_MKK][1][56] = 127,
[2][0][2][0][RTW89_MKK][0][56] = 127,
[2][0][2][0][RTW89_IC][1][56] = 46,
- [2][0][2][0][RTW89_IC][2][56] = 58,
[2][0][2][0][RTW89_KCC][1][56] = 48,
[2][0][2][0][RTW89_KCC][0][56] = 127,
[2][0][2][0][RTW89_ACMA][1][56] = 127,
@@ -46477,13 +45141,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][56] = 127,
[2][0][2][0][RTW89_THAILAND][0][56] = 127,
[2][0][2][0][RTW89_FCC][1][63] = 46,
- [2][0][2][0][RTW89_FCC][2][63] = 58,
[2][0][2][0][RTW89_ETSI][1][63] = 127,
[2][0][2][0][RTW89_ETSI][0][63] = 127,
[2][0][2][0][RTW89_MKK][1][63] = 127,
[2][0][2][0][RTW89_MKK][0][63] = 127,
[2][0][2][0][RTW89_IC][1][63] = 46,
- [2][0][2][0][RTW89_IC][2][63] = 58,
[2][0][2][0][RTW89_KCC][1][63] = 48,
[2][0][2][0][RTW89_KCC][0][63] = 127,
[2][0][2][0][RTW89_ACMA][1][63] = 127,
@@ -46496,13 +45158,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][63] = 127,
[2][0][2][0][RTW89_THAILAND][0][63] = 127,
[2][0][2][0][RTW89_FCC][1][71] = 46,
- [2][0][2][0][RTW89_FCC][2][71] = 58,
[2][0][2][0][RTW89_ETSI][1][71] = 127,
[2][0][2][0][RTW89_ETSI][0][71] = 127,
[2][0][2][0][RTW89_MKK][1][71] = 127,
[2][0][2][0][RTW89_MKK][0][71] = 127,
[2][0][2][0][RTW89_IC][1][71] = 46,
- [2][0][2][0][RTW89_IC][2][71] = 58,
[2][0][2][0][RTW89_KCC][1][71] = 48,
[2][0][2][0][RTW89_KCC][0][71] = 127,
[2][0][2][0][RTW89_ACMA][1][71] = 127,
@@ -46515,13 +45175,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][71] = 127,
[2][0][2][0][RTW89_THAILAND][0][71] = 127,
[2][0][2][0][RTW89_FCC][1][78] = 46,
- [2][0][2][0][RTW89_FCC][2][78] = 58,
[2][0][2][0][RTW89_ETSI][1][78] = 127,
[2][0][2][0][RTW89_ETSI][0][78] = 127,
[2][0][2][0][RTW89_MKK][1][78] = 127,
[2][0][2][0][RTW89_MKK][0][78] = 127,
[2][0][2][0][RTW89_IC][1][78] = 46,
- [2][0][2][0][RTW89_IC][2][78] = 58,
[2][0][2][0][RTW89_KCC][1][78] = 52,
[2][0][2][0][RTW89_KCC][0][78] = 127,
[2][0][2][0][RTW89_ACMA][1][78] = 127,
@@ -46534,13 +45192,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][78] = 127,
[2][0][2][0][RTW89_THAILAND][0][78] = 127,
[2][0][2][0][RTW89_FCC][1][86] = 46,
- [2][0][2][0][RTW89_FCC][2][86] = 127,
[2][0][2][0][RTW89_ETSI][1][86] = 127,
[2][0][2][0][RTW89_ETSI][0][86] = 127,
[2][0][2][0][RTW89_MKK][1][86] = 127,
[2][0][2][0][RTW89_MKK][0][86] = 127,
[2][0][2][0][RTW89_IC][1][86] = 46,
- [2][0][2][0][RTW89_IC][2][86] = 127,
[2][0][2][0][RTW89_KCC][1][86] = 52,
[2][0][2][0][RTW89_KCC][0][86] = 127,
[2][0][2][0][RTW89_ACMA][1][86] = 127,
@@ -46553,13 +45209,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][86] = 127,
[2][0][2][0][RTW89_THAILAND][0][86] = 127,
[2][0][2][0][RTW89_FCC][1][93] = 46,
- [2][0][2][0][RTW89_FCC][2][93] = 127,
[2][0][2][0][RTW89_ETSI][1][93] = 127,
[2][0][2][0][RTW89_ETSI][0][93] = 127,
[2][0][2][0][RTW89_MKK][1][93] = 127,
[2][0][2][0][RTW89_MKK][0][93] = 127,
[2][0][2][0][RTW89_IC][1][93] = 46,
- [2][0][2][0][RTW89_IC][2][93] = 127,
[2][0][2][0][RTW89_KCC][1][93] = 50,
[2][0][2][0][RTW89_KCC][0][93] = 127,
[2][0][2][0][RTW89_ACMA][1][93] = 127,
@@ -46572,13 +45226,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][93] = 127,
[2][0][2][0][RTW89_THAILAND][0][93] = 127,
[2][0][2][0][RTW89_FCC][1][101] = 44,
- [2][0][2][0][RTW89_FCC][2][101] = 127,
[2][0][2][0][RTW89_ETSI][1][101] = 127,
[2][0][2][0][RTW89_ETSI][0][101] = 127,
[2][0][2][0][RTW89_MKK][1][101] = 127,
[2][0][2][0][RTW89_MKK][0][101] = 127,
[2][0][2][0][RTW89_IC][1][101] = 44,
- [2][0][2][0][RTW89_IC][2][101] = 127,
[2][0][2][0][RTW89_KCC][1][101] = 50,
[2][0][2][0][RTW89_KCC][0][101] = 127,
[2][0][2][0][RTW89_ACMA][1][101] = 127,
@@ -46591,13 +45243,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][101] = 127,
[2][0][2][0][RTW89_THAILAND][0][101] = 127,
[2][0][2][0][RTW89_FCC][1][108] = 127,
- [2][0][2][0][RTW89_FCC][2][108] = 127,
[2][0][2][0][RTW89_ETSI][1][108] = 127,
[2][0][2][0][RTW89_ETSI][0][108] = 127,
[2][0][2][0][RTW89_MKK][1][108] = 127,
[2][0][2][0][RTW89_MKK][0][108] = 127,
[2][0][2][0][RTW89_IC][1][108] = 127,
- [2][0][2][0][RTW89_IC][2][108] = 127,
[2][0][2][0][RTW89_KCC][1][108] = 127,
[2][0][2][0][RTW89_KCC][0][108] = 127,
[2][0][2][0][RTW89_ACMA][1][108] = 127,
@@ -46610,13 +45260,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][108] = 127,
[2][0][2][0][RTW89_THAILAND][0][108] = 127,
[2][0][2][0][RTW89_FCC][1][116] = 127,
- [2][0][2][0][RTW89_FCC][2][116] = 127,
[2][0][2][0][RTW89_ETSI][1][116] = 127,
[2][0][2][0][RTW89_ETSI][0][116] = 127,
[2][0][2][0][RTW89_MKK][1][116] = 127,
[2][0][2][0][RTW89_MKK][0][116] = 127,
[2][0][2][0][RTW89_IC][1][116] = 127,
- [2][0][2][0][RTW89_IC][2][116] = 127,
[2][0][2][0][RTW89_KCC][1][116] = 127,
[2][0][2][0][RTW89_KCC][0][116] = 127,
[2][0][2][0][RTW89_ACMA][1][116] = 127,
@@ -46629,13 +45277,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][0][2][0][RTW89_THAILAND][1][116] = 127,
[2][0][2][0][RTW89_THAILAND][0][116] = 127,
[2][1][2][0][RTW89_FCC][1][3] = 22,
- [2][1][2][0][RTW89_FCC][2][3] = 50,
[2][1][2][0][RTW89_ETSI][1][3] = 54,
[2][1][2][0][RTW89_ETSI][0][3] = 16,
[2][1][2][0][RTW89_MKK][1][3] = 52,
[2][1][2][0][RTW89_MKK][0][3] = 14,
[2][1][2][0][RTW89_IC][1][3] = 22,
- [2][1][2][0][RTW89_IC][2][3] = 50,
[2][1][2][0][RTW89_KCC][1][3] = 38,
[2][1][2][0][RTW89_KCC][0][3] = 12,
[2][1][2][0][RTW89_ACMA][1][3] = 54,
@@ -46648,13 +45294,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][3] = 46,
[2][1][2][0][RTW89_THAILAND][0][3] = 18,
[2][1][2][0][RTW89_FCC][1][11] = 20,
- [2][1][2][0][RTW89_FCC][2][11] = 50,
[2][1][2][0][RTW89_ETSI][1][11] = 54,
[2][1][2][0][RTW89_ETSI][0][11] = 16,
[2][1][2][0][RTW89_MKK][1][11] = 52,
[2][1][2][0][RTW89_MKK][0][11] = 12,
[2][1][2][0][RTW89_IC][1][11] = 20,
- [2][1][2][0][RTW89_IC][2][11] = 50,
[2][1][2][0][RTW89_KCC][1][11] = 38,
[2][1][2][0][RTW89_KCC][0][11] = 12,
[2][1][2][0][RTW89_ACMA][1][11] = 54,
@@ -46667,13 +45311,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][11] = 46,
[2][1][2][0][RTW89_THAILAND][0][11] = 18,
[2][1][2][0][RTW89_FCC][1][18] = 20,
- [2][1][2][0][RTW89_FCC][2][18] = 50,
[2][1][2][0][RTW89_ETSI][1][18] = 54,
[2][1][2][0][RTW89_ETSI][0][18] = 16,
[2][1][2][0][RTW89_MKK][1][18] = 52,
[2][1][2][0][RTW89_MKK][0][18] = 12,
[2][1][2][0][RTW89_IC][1][18] = 20,
- [2][1][2][0][RTW89_IC][2][18] = 50,
[2][1][2][0][RTW89_KCC][1][18] = 38,
[2][1][2][0][RTW89_KCC][0][18] = 12,
[2][1][2][0][RTW89_ACMA][1][18] = 54,
@@ -46686,13 +45328,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][18] = 46,
[2][1][2][0][RTW89_THAILAND][0][18] = 18,
[2][1][2][0][RTW89_FCC][1][26] = 20,
- [2][1][2][0][RTW89_FCC][2][26] = 60,
[2][1][2][0][RTW89_ETSI][1][26] = 54,
[2][1][2][0][RTW89_ETSI][0][26] = 16,
[2][1][2][0][RTW89_MKK][1][26] = 52,
[2][1][2][0][RTW89_MKK][0][26] = 12,
[2][1][2][0][RTW89_IC][1][26] = 20,
- [2][1][2][0][RTW89_IC][2][26] = 60,
[2][1][2][0][RTW89_KCC][1][26] = 38,
[2][1][2][0][RTW89_KCC][0][26] = 12,
[2][1][2][0][RTW89_ACMA][1][26] = 54,
@@ -46705,13 +45345,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][26] = 46,
[2][1][2][0][RTW89_THAILAND][0][26] = 18,
[2][1][2][0][RTW89_FCC][1][33] = 20,
- [2][1][2][0][RTW89_FCC][2][33] = 60,
[2][1][2][0][RTW89_ETSI][1][33] = 54,
[2][1][2][0][RTW89_ETSI][0][33] = 16,
[2][1][2][0][RTW89_MKK][1][33] = 48,
[2][1][2][0][RTW89_MKK][0][33] = 12,
[2][1][2][0][RTW89_IC][1][33] = 20,
- [2][1][2][0][RTW89_IC][2][33] = 60,
[2][1][2][0][RTW89_KCC][1][33] = 38,
[2][1][2][0][RTW89_KCC][0][33] = 12,
[2][1][2][0][RTW89_ACMA][1][33] = 54,
@@ -46724,13 +45362,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][33] = 46,
[2][1][2][0][RTW89_THAILAND][0][33] = 18,
[2][1][2][0][RTW89_FCC][1][41] = 22,
- [2][1][2][0][RTW89_FCC][2][41] = 60,
[2][1][2][0][RTW89_ETSI][1][41] = 54,
[2][1][2][0][RTW89_ETSI][0][41] = 18,
[2][1][2][0][RTW89_MKK][1][41] = 48,
[2][1][2][0][RTW89_MKK][0][41] = 12,
[2][1][2][0][RTW89_IC][1][41] = 22,
- [2][1][2][0][RTW89_IC][2][41] = 60,
[2][1][2][0][RTW89_KCC][1][41] = 38,
[2][1][2][0][RTW89_KCC][0][41] = 12,
[2][1][2][0][RTW89_ACMA][1][41] = 54,
@@ -46743,13 +45379,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][41] = 46,
[2][1][2][0][RTW89_THAILAND][0][41] = 18,
[2][1][2][0][RTW89_FCC][1][48] = 22,
- [2][1][2][0][RTW89_FCC][2][48] = 127,
[2][1][2][0][RTW89_ETSI][1][48] = 127,
[2][1][2][0][RTW89_ETSI][0][48] = 127,
[2][1][2][0][RTW89_MKK][1][48] = 127,
[2][1][2][0][RTW89_MKK][0][48] = 127,
[2][1][2][0][RTW89_IC][1][48] = 22,
- [2][1][2][0][RTW89_IC][2][48] = 60,
[2][1][2][0][RTW89_KCC][1][48] = 38,
[2][1][2][0][RTW89_KCC][0][48] = 127,
[2][1][2][0][RTW89_ACMA][1][48] = 127,
@@ -46762,13 +45396,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][48] = 127,
[2][1][2][0][RTW89_THAILAND][0][48] = 127,
[2][1][2][0][RTW89_FCC][1][56] = 20,
- [2][1][2][0][RTW89_FCC][2][56] = 127,
[2][1][2][0][RTW89_ETSI][1][56] = 127,
[2][1][2][0][RTW89_ETSI][0][56] = 127,
[2][1][2][0][RTW89_MKK][1][56] = 127,
[2][1][2][0][RTW89_MKK][0][56] = 127,
[2][1][2][0][RTW89_IC][1][56] = 20,
- [2][1][2][0][RTW89_IC][2][56] = 56,
[2][1][2][0][RTW89_KCC][1][56] = 38,
[2][1][2][0][RTW89_KCC][0][56] = 127,
[2][1][2][0][RTW89_ACMA][1][56] = 127,
@@ -46781,13 +45413,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][56] = 127,
[2][1][2][0][RTW89_THAILAND][0][56] = 127,
[2][1][2][0][RTW89_FCC][1][63] = 22,
- [2][1][2][0][RTW89_FCC][2][63] = 58,
[2][1][2][0][RTW89_ETSI][1][63] = 127,
[2][1][2][0][RTW89_ETSI][0][63] = 127,
[2][1][2][0][RTW89_MKK][1][63] = 127,
[2][1][2][0][RTW89_MKK][0][63] = 127,
[2][1][2][0][RTW89_IC][1][63] = 22,
- [2][1][2][0][RTW89_IC][2][63] = 58,
[2][1][2][0][RTW89_KCC][1][63] = 38,
[2][1][2][0][RTW89_KCC][0][63] = 127,
[2][1][2][0][RTW89_ACMA][1][63] = 127,
@@ -46800,13 +45430,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][63] = 127,
[2][1][2][0][RTW89_THAILAND][0][63] = 127,
[2][1][2][0][RTW89_FCC][1][71] = 20,
- [2][1][2][0][RTW89_FCC][2][71] = 58,
[2][1][2][0][RTW89_ETSI][1][71] = 127,
[2][1][2][0][RTW89_ETSI][0][71] = 127,
[2][1][2][0][RTW89_MKK][1][71] = 127,
[2][1][2][0][RTW89_MKK][0][71] = 127,
[2][1][2][0][RTW89_IC][1][71] = 20,
- [2][1][2][0][RTW89_IC][2][71] = 58,
[2][1][2][0][RTW89_KCC][1][71] = 38,
[2][1][2][0][RTW89_KCC][0][71] = 127,
[2][1][2][0][RTW89_ACMA][1][71] = 127,
@@ -46819,13 +45447,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][71] = 127,
[2][1][2][0][RTW89_THAILAND][0][71] = 127,
[2][1][2][0][RTW89_FCC][1][78] = 20,
- [2][1][2][0][RTW89_FCC][2][78] = 58,
[2][1][2][0][RTW89_ETSI][1][78] = 127,
[2][1][2][0][RTW89_ETSI][0][78] = 127,
[2][1][2][0][RTW89_MKK][1][78] = 127,
[2][1][2][0][RTW89_MKK][0][78] = 127,
[2][1][2][0][RTW89_IC][1][78] = 20,
- [2][1][2][0][RTW89_IC][2][78] = 58,
[2][1][2][0][RTW89_KCC][1][78] = 38,
[2][1][2][0][RTW89_KCC][0][78] = 127,
[2][1][2][0][RTW89_ACMA][1][78] = 127,
@@ -46838,13 +45464,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][78] = 127,
[2][1][2][0][RTW89_THAILAND][0][78] = 127,
[2][1][2][0][RTW89_FCC][1][86] = 20,
- [2][1][2][0][RTW89_FCC][2][86] = 127,
[2][1][2][0][RTW89_ETSI][1][86] = 127,
[2][1][2][0][RTW89_ETSI][0][86] = 127,
[2][1][2][0][RTW89_MKK][1][86] = 127,
[2][1][2][0][RTW89_MKK][0][86] = 127,
[2][1][2][0][RTW89_IC][1][86] = 20,
- [2][1][2][0][RTW89_IC][2][86] = 127,
[2][1][2][0][RTW89_KCC][1][86] = 38,
[2][1][2][0][RTW89_KCC][0][86] = 127,
[2][1][2][0][RTW89_ACMA][1][86] = 127,
@@ -46857,13 +45481,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][86] = 127,
[2][1][2][0][RTW89_THAILAND][0][86] = 127,
[2][1][2][0][RTW89_FCC][1][93] = 22,
- [2][1][2][0][RTW89_FCC][2][93] = 127,
[2][1][2][0][RTW89_ETSI][1][93] = 127,
[2][1][2][0][RTW89_ETSI][0][93] = 127,
[2][1][2][0][RTW89_MKK][1][93] = 127,
[2][1][2][0][RTW89_MKK][0][93] = 127,
[2][1][2][0][RTW89_IC][1][93] = 22,
- [2][1][2][0][RTW89_IC][2][93] = 127,
[2][1][2][0][RTW89_KCC][1][93] = 38,
[2][1][2][0][RTW89_KCC][0][93] = 127,
[2][1][2][0][RTW89_ACMA][1][93] = 127,
@@ -46876,13 +45498,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][93] = 127,
[2][1][2][0][RTW89_THAILAND][0][93] = 127,
[2][1][2][0][RTW89_FCC][1][101] = 22,
- [2][1][2][0][RTW89_FCC][2][101] = 127,
[2][1][2][0][RTW89_ETSI][1][101] = 127,
[2][1][2][0][RTW89_ETSI][0][101] = 127,
[2][1][2][0][RTW89_MKK][1][101] = 127,
[2][1][2][0][RTW89_MKK][0][101] = 127,
[2][1][2][0][RTW89_IC][1][101] = 22,
- [2][1][2][0][RTW89_IC][2][101] = 127,
[2][1][2][0][RTW89_KCC][1][101] = 38,
[2][1][2][0][RTW89_KCC][0][101] = 127,
[2][1][2][0][RTW89_ACMA][1][101] = 127,
@@ -46895,13 +45515,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][101] = 127,
[2][1][2][0][RTW89_THAILAND][0][101] = 127,
[2][1][2][0][RTW89_FCC][1][108] = 127,
- [2][1][2][0][RTW89_FCC][2][108] = 127,
[2][1][2][0][RTW89_ETSI][1][108] = 127,
[2][1][2][0][RTW89_ETSI][0][108] = 127,
[2][1][2][0][RTW89_MKK][1][108] = 127,
[2][1][2][0][RTW89_MKK][0][108] = 127,
[2][1][2][0][RTW89_IC][1][108] = 127,
- [2][1][2][0][RTW89_IC][2][108] = 127,
[2][1][2][0][RTW89_KCC][1][108] = 127,
[2][1][2][0][RTW89_KCC][0][108] = 127,
[2][1][2][0][RTW89_ACMA][1][108] = 127,
@@ -46914,13 +45532,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][108] = 127,
[2][1][2][0][RTW89_THAILAND][0][108] = 127,
[2][1][2][0][RTW89_FCC][1][116] = 127,
- [2][1][2][0][RTW89_FCC][2][116] = 127,
[2][1][2][0][RTW89_ETSI][1][116] = 127,
[2][1][2][0][RTW89_ETSI][0][116] = 127,
[2][1][2][0][RTW89_MKK][1][116] = 127,
[2][1][2][0][RTW89_MKK][0][116] = 127,
[2][1][2][0][RTW89_IC][1][116] = 127,
- [2][1][2][0][RTW89_IC][2][116] = 127,
[2][1][2][0][RTW89_KCC][1][116] = 127,
[2][1][2][0][RTW89_KCC][0][116] = 127,
[2][1][2][0][RTW89_ACMA][1][116] = 127,
@@ -46933,13 +45549,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][0][RTW89_THAILAND][1][116] = 127,
[2][1][2][0][RTW89_THAILAND][0][116] = 127,
[2][1][2][1][RTW89_FCC][1][3] = 22,
- [2][1][2][1][RTW89_FCC][2][3] = 50,
[2][1][2][1][RTW89_ETSI][1][3] = 42,
[2][1][2][1][RTW89_ETSI][0][3] = 6,
[2][1][2][1][RTW89_MKK][1][3] = 52,
[2][1][2][1][RTW89_MKK][0][3] = 14,
[2][1][2][1][RTW89_IC][1][3] = 22,
- [2][1][2][1][RTW89_IC][2][3] = 50,
[2][1][2][1][RTW89_KCC][1][3] = 38,
[2][1][2][1][RTW89_KCC][0][3] = 12,
[2][1][2][1][RTW89_ACMA][1][3] = 42,
@@ -46952,13 +45566,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][3] = 46,
[2][1][2][1][RTW89_THAILAND][0][3] = 6,
[2][1][2][1][RTW89_FCC][1][11] = 20,
- [2][1][2][1][RTW89_FCC][2][11] = 50,
[2][1][2][1][RTW89_ETSI][1][11] = 42,
[2][1][2][1][RTW89_ETSI][0][11] = 6,
[2][1][2][1][RTW89_MKK][1][11] = 52,
[2][1][2][1][RTW89_MKK][0][11] = 12,
[2][1][2][1][RTW89_IC][1][11] = 20,
- [2][1][2][1][RTW89_IC][2][11] = 50,
[2][1][2][1][RTW89_KCC][1][11] = 38,
[2][1][2][1][RTW89_KCC][0][11] = 12,
[2][1][2][1][RTW89_ACMA][1][11] = 42,
@@ -46971,13 +45583,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][11] = 46,
[2][1][2][1][RTW89_THAILAND][0][11] = 6,
[2][1][2][1][RTW89_FCC][1][18] = 20,
- [2][1][2][1][RTW89_FCC][2][18] = 50,
[2][1][2][1][RTW89_ETSI][1][18] = 42,
[2][1][2][1][RTW89_ETSI][0][18] = 6,
[2][1][2][1][RTW89_MKK][1][18] = 52,
[2][1][2][1][RTW89_MKK][0][18] = 12,
[2][1][2][1][RTW89_IC][1][18] = 20,
- [2][1][2][1][RTW89_IC][2][18] = 50,
[2][1][2][1][RTW89_KCC][1][18] = 38,
[2][1][2][1][RTW89_KCC][0][18] = 12,
[2][1][2][1][RTW89_ACMA][1][18] = 42,
@@ -46990,13 +45600,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][18] = 46,
[2][1][2][1][RTW89_THAILAND][0][18] = 6,
[2][1][2][1][RTW89_FCC][1][26] = 20,
- [2][1][2][1][RTW89_FCC][2][26] = 60,
[2][1][2][1][RTW89_ETSI][1][26] = 42,
[2][1][2][1][RTW89_ETSI][0][26] = 6,
[2][1][2][1][RTW89_MKK][1][26] = 52,
[2][1][2][1][RTW89_MKK][0][26] = 12,
[2][1][2][1][RTW89_IC][1][26] = 20,
- [2][1][2][1][RTW89_IC][2][26] = 60,
[2][1][2][1][RTW89_KCC][1][26] = 38,
[2][1][2][1][RTW89_KCC][0][26] = 12,
[2][1][2][1][RTW89_ACMA][1][26] = 42,
@@ -47009,13 +45617,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][26] = 46,
[2][1][2][1][RTW89_THAILAND][0][26] = 6,
[2][1][2][1][RTW89_FCC][1][33] = 20,
- [2][1][2][1][RTW89_FCC][2][33] = 60,
[2][1][2][1][RTW89_ETSI][1][33] = 42,
[2][1][2][1][RTW89_ETSI][0][33] = 6,
[2][1][2][1][RTW89_MKK][1][33] = 48,
[2][1][2][1][RTW89_MKK][0][33] = 12,
[2][1][2][1][RTW89_IC][1][33] = 20,
- [2][1][2][1][RTW89_IC][2][33] = 60,
[2][1][2][1][RTW89_KCC][1][33] = 38,
[2][1][2][1][RTW89_KCC][0][33] = 12,
[2][1][2][1][RTW89_ACMA][1][33] = 42,
@@ -47028,13 +45634,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][33] = 46,
[2][1][2][1][RTW89_THAILAND][0][33] = 6,
[2][1][2][1][RTW89_FCC][1][41] = 22,
- [2][1][2][1][RTW89_FCC][2][41] = 60,
[2][1][2][1][RTW89_ETSI][1][41] = 42,
[2][1][2][1][RTW89_ETSI][0][41] = 6,
[2][1][2][1][RTW89_MKK][1][41] = 48,
[2][1][2][1][RTW89_MKK][0][41] = 12,
[2][1][2][1][RTW89_IC][1][41] = 22,
- [2][1][2][1][RTW89_IC][2][41] = 60,
[2][1][2][1][RTW89_KCC][1][41] = 38,
[2][1][2][1][RTW89_KCC][0][41] = 12,
[2][1][2][1][RTW89_ACMA][1][41] = 42,
@@ -47047,13 +45651,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][41] = 46,
[2][1][2][1][RTW89_THAILAND][0][41] = 6,
[2][1][2][1][RTW89_FCC][1][48] = 22,
- [2][1][2][1][RTW89_FCC][2][48] = 127,
[2][1][2][1][RTW89_ETSI][1][48] = 127,
[2][1][2][1][RTW89_ETSI][0][48] = 127,
[2][1][2][1][RTW89_MKK][1][48] = 127,
[2][1][2][1][RTW89_MKK][0][48] = 127,
[2][1][2][1][RTW89_IC][1][48] = 22,
- [2][1][2][1][RTW89_IC][2][48] = 60,
[2][1][2][1][RTW89_KCC][1][48] = 38,
[2][1][2][1][RTW89_KCC][0][48] = 127,
[2][1][2][1][RTW89_ACMA][1][48] = 127,
@@ -47066,13 +45668,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][48] = 127,
[2][1][2][1][RTW89_THAILAND][0][48] = 127,
[2][1][2][1][RTW89_FCC][1][56] = 20,
- [2][1][2][1][RTW89_FCC][2][56] = 127,
[2][1][2][1][RTW89_ETSI][1][56] = 127,
[2][1][2][1][RTW89_ETSI][0][56] = 127,
[2][1][2][1][RTW89_MKK][1][56] = 127,
[2][1][2][1][RTW89_MKK][0][56] = 127,
[2][1][2][1][RTW89_IC][1][56] = 20,
- [2][1][2][1][RTW89_IC][2][56] = 56,
[2][1][2][1][RTW89_KCC][1][56] = 38,
[2][1][2][1][RTW89_KCC][0][56] = 127,
[2][1][2][1][RTW89_ACMA][1][56] = 127,
@@ -47085,13 +45685,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][56] = 127,
[2][1][2][1][RTW89_THAILAND][0][56] = 127,
[2][1][2][1][RTW89_FCC][1][63] = 22,
- [2][1][2][1][RTW89_FCC][2][63] = 58,
[2][1][2][1][RTW89_ETSI][1][63] = 127,
[2][1][2][1][RTW89_ETSI][0][63] = 127,
[2][1][2][1][RTW89_MKK][1][63] = 127,
[2][1][2][1][RTW89_MKK][0][63] = 127,
[2][1][2][1][RTW89_IC][1][63] = 22,
- [2][1][2][1][RTW89_IC][2][63] = 58,
[2][1][2][1][RTW89_KCC][1][63] = 38,
[2][1][2][1][RTW89_KCC][0][63] = 127,
[2][1][2][1][RTW89_ACMA][1][63] = 127,
@@ -47104,13 +45702,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][63] = 127,
[2][1][2][1][RTW89_THAILAND][0][63] = 127,
[2][1][2][1][RTW89_FCC][1][71] = 20,
- [2][1][2][1][RTW89_FCC][2][71] = 58,
[2][1][2][1][RTW89_ETSI][1][71] = 127,
[2][1][2][1][RTW89_ETSI][0][71] = 127,
[2][1][2][1][RTW89_MKK][1][71] = 127,
[2][1][2][1][RTW89_MKK][0][71] = 127,
[2][1][2][1][RTW89_IC][1][71] = 20,
- [2][1][2][1][RTW89_IC][2][71] = 58,
[2][1][2][1][RTW89_KCC][1][71] = 38,
[2][1][2][1][RTW89_KCC][0][71] = 127,
[2][1][2][1][RTW89_ACMA][1][71] = 127,
@@ -47123,13 +45719,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][71] = 127,
[2][1][2][1][RTW89_THAILAND][0][71] = 127,
[2][1][2][1][RTW89_FCC][1][78] = 20,
- [2][1][2][1][RTW89_FCC][2][78] = 58,
[2][1][2][1][RTW89_ETSI][1][78] = 127,
[2][1][2][1][RTW89_ETSI][0][78] = 127,
[2][1][2][1][RTW89_MKK][1][78] = 127,
[2][1][2][1][RTW89_MKK][0][78] = 127,
[2][1][2][1][RTW89_IC][1][78] = 20,
- [2][1][2][1][RTW89_IC][2][78] = 58,
[2][1][2][1][RTW89_KCC][1][78] = 38,
[2][1][2][1][RTW89_KCC][0][78] = 127,
[2][1][2][1][RTW89_ACMA][1][78] = 127,
@@ -47142,13 +45736,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][78] = 127,
[2][1][2][1][RTW89_THAILAND][0][78] = 127,
[2][1][2][1][RTW89_FCC][1][86] = 20,
- [2][1][2][1][RTW89_FCC][2][86] = 127,
[2][1][2][1][RTW89_ETSI][1][86] = 127,
[2][1][2][1][RTW89_ETSI][0][86] = 127,
[2][1][2][1][RTW89_MKK][1][86] = 127,
[2][1][2][1][RTW89_MKK][0][86] = 127,
[2][1][2][1][RTW89_IC][1][86] = 20,
- [2][1][2][1][RTW89_IC][2][86] = 127,
[2][1][2][1][RTW89_KCC][1][86] = 38,
[2][1][2][1][RTW89_KCC][0][86] = 127,
[2][1][2][1][RTW89_ACMA][1][86] = 127,
@@ -47161,13 +45753,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][86] = 127,
[2][1][2][1][RTW89_THAILAND][0][86] = 127,
[2][1][2][1][RTW89_FCC][1][93] = 22,
- [2][1][2][1][RTW89_FCC][2][93] = 127,
[2][1][2][1][RTW89_ETSI][1][93] = 127,
[2][1][2][1][RTW89_ETSI][0][93] = 127,
[2][1][2][1][RTW89_MKK][1][93] = 127,
[2][1][2][1][RTW89_MKK][0][93] = 127,
[2][1][2][1][RTW89_IC][1][93] = 22,
- [2][1][2][1][RTW89_IC][2][93] = 127,
[2][1][2][1][RTW89_KCC][1][93] = 38,
[2][1][2][1][RTW89_KCC][0][93] = 127,
[2][1][2][1][RTW89_ACMA][1][93] = 127,
@@ -47180,13 +45770,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][93] = 127,
[2][1][2][1][RTW89_THAILAND][0][93] = 127,
[2][1][2][1][RTW89_FCC][1][101] = 22,
- [2][1][2][1][RTW89_FCC][2][101] = 127,
[2][1][2][1][RTW89_ETSI][1][101] = 127,
[2][1][2][1][RTW89_ETSI][0][101] = 127,
[2][1][2][1][RTW89_MKK][1][101] = 127,
[2][1][2][1][RTW89_MKK][0][101] = 127,
[2][1][2][1][RTW89_IC][1][101] = 22,
- [2][1][2][1][RTW89_IC][2][101] = 127,
[2][1][2][1][RTW89_KCC][1][101] = 38,
[2][1][2][1][RTW89_KCC][0][101] = 127,
[2][1][2][1][RTW89_ACMA][1][101] = 127,
@@ -47199,13 +45787,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][101] = 127,
[2][1][2][1][RTW89_THAILAND][0][101] = 127,
[2][1][2][1][RTW89_FCC][1][108] = 127,
- [2][1][2][1][RTW89_FCC][2][108] = 127,
[2][1][2][1][RTW89_ETSI][1][108] = 127,
[2][1][2][1][RTW89_ETSI][0][108] = 127,
[2][1][2][1][RTW89_MKK][1][108] = 127,
[2][1][2][1][RTW89_MKK][0][108] = 127,
[2][1][2][1][RTW89_IC][1][108] = 127,
- [2][1][2][1][RTW89_IC][2][108] = 127,
[2][1][2][1][RTW89_KCC][1][108] = 127,
[2][1][2][1][RTW89_KCC][0][108] = 127,
[2][1][2][1][RTW89_ACMA][1][108] = 127,
@@ -47218,13 +45804,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][108] = 127,
[2][1][2][1][RTW89_THAILAND][0][108] = 127,
[2][1][2][1][RTW89_FCC][1][116] = 127,
- [2][1][2][1][RTW89_FCC][2][116] = 127,
[2][1][2][1][RTW89_ETSI][1][116] = 127,
[2][1][2][1][RTW89_ETSI][0][116] = 127,
[2][1][2][1][RTW89_MKK][1][116] = 127,
[2][1][2][1][RTW89_MKK][0][116] = 127,
[2][1][2][1][RTW89_IC][1][116] = 127,
- [2][1][2][1][RTW89_IC][2][116] = 127,
[2][1][2][1][RTW89_KCC][1][116] = 127,
[2][1][2][1][RTW89_KCC][0][116] = 127,
[2][1][2][1][RTW89_ACMA][1][116] = 127,
@@ -47237,13 +45821,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[2][1][2][1][RTW89_THAILAND][1][116] = 127,
[2][1][2][1][RTW89_THAILAND][0][116] = 127,
[3][0][2][0][RTW89_FCC][1][7] = 52,
- [3][0][2][0][RTW89_FCC][2][7] = 52,
[3][0][2][0][RTW89_ETSI][1][7] = 50,
[3][0][2][0][RTW89_ETSI][0][7] = 30,
[3][0][2][0][RTW89_MKK][1][7] = 50,
[3][0][2][0][RTW89_MKK][0][7] = 22,
[3][0][2][0][RTW89_IC][1][7] = 52,
- [3][0][2][0][RTW89_IC][2][7] = 52,
[3][0][2][0][RTW89_KCC][1][7] = 42,
[3][0][2][0][RTW89_KCC][0][7] = 24,
[3][0][2][0][RTW89_ACMA][1][7] = 50,
@@ -47256,13 +45838,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][7] = 50,
[3][0][2][0][RTW89_THAILAND][0][7] = 30,
[3][0][2][0][RTW89_FCC][1][22] = 52,
- [3][0][2][0][RTW89_FCC][2][22] = 52,
[3][0][2][0][RTW89_ETSI][1][22] = 50,
[3][0][2][0][RTW89_ETSI][0][22] = 30,
[3][0][2][0][RTW89_MKK][1][22] = 50,
[3][0][2][0][RTW89_MKK][0][22] = 20,
[3][0][2][0][RTW89_IC][1][22] = 52,
- [3][0][2][0][RTW89_IC][2][22] = 52,
[3][0][2][0][RTW89_KCC][1][22] = 42,
[3][0][2][0][RTW89_KCC][0][22] = 24,
[3][0][2][0][RTW89_ACMA][1][22] = 50,
@@ -47275,13 +45855,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][22] = 50,
[3][0][2][0][RTW89_THAILAND][0][22] = 30,
[3][0][2][0][RTW89_FCC][1][37] = 52,
- [3][0][2][0][RTW89_FCC][2][37] = 52,
[3][0][2][0][RTW89_ETSI][1][37] = 50,
[3][0][2][0][RTW89_ETSI][0][37] = 30,
[3][0][2][0][RTW89_MKK][1][37] = 50,
[3][0][2][0][RTW89_MKK][0][37] = 20,
[3][0][2][0][RTW89_IC][1][37] = 52,
- [3][0][2][0][RTW89_IC][2][37] = 52,
[3][0][2][0][RTW89_KCC][1][37] = 42,
[3][0][2][0][RTW89_KCC][0][37] = 24,
[3][0][2][0][RTW89_ACMA][1][37] = 50,
@@ -47294,13 +45872,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][37] = 50,
[3][0][2][0][RTW89_THAILAND][0][37] = 30,
[3][0][2][0][RTW89_FCC][1][52] = 54,
- [3][0][2][0][RTW89_FCC][2][52] = 127,
[3][0][2][0][RTW89_ETSI][1][52] = 127,
[3][0][2][0][RTW89_ETSI][0][52] = 127,
[3][0][2][0][RTW89_MKK][1][52] = 127,
[3][0][2][0][RTW89_MKK][0][52] = 127,
[3][0][2][0][RTW89_IC][1][52] = 54,
- [3][0][2][0][RTW89_IC][2][52] = 56,
[3][0][2][0][RTW89_KCC][1][52] = 56,
[3][0][2][0][RTW89_KCC][0][52] = 127,
[3][0][2][0][RTW89_ACMA][1][52] = 127,
@@ -47313,13 +45889,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][52] = 127,
[3][0][2][0][RTW89_THAILAND][0][52] = 127,
[3][0][2][0][RTW89_FCC][1][67] = 54,
- [3][0][2][0][RTW89_FCC][2][67] = 54,
[3][0][2][0][RTW89_ETSI][1][67] = 127,
[3][0][2][0][RTW89_ETSI][0][67] = 127,
[3][0][2][0][RTW89_MKK][1][67] = 127,
[3][0][2][0][RTW89_MKK][0][67] = 127,
[3][0][2][0][RTW89_IC][1][67] = 54,
- [3][0][2][0][RTW89_IC][2][67] = 54,
[3][0][2][0][RTW89_KCC][1][67] = 54,
[3][0][2][0][RTW89_KCC][0][67] = 127,
[3][0][2][0][RTW89_ACMA][1][67] = 127,
@@ -47332,13 +45906,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][67] = 127,
[3][0][2][0][RTW89_THAILAND][0][67] = 127,
[3][0][2][0][RTW89_FCC][1][82] = 46,
- [3][0][2][0][RTW89_FCC][2][82] = 127,
[3][0][2][0][RTW89_ETSI][1][82] = 127,
[3][0][2][0][RTW89_ETSI][0][82] = 127,
[3][0][2][0][RTW89_MKK][1][82] = 127,
[3][0][2][0][RTW89_MKK][0][82] = 127,
[3][0][2][0][RTW89_IC][1][82] = 46,
- [3][0][2][0][RTW89_IC][2][82] = 127,
[3][0][2][0][RTW89_KCC][1][82] = 26,
[3][0][2][0][RTW89_KCC][0][82] = 127,
[3][0][2][0][RTW89_ACMA][1][82] = 127,
@@ -47351,13 +45923,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][82] = 127,
[3][0][2][0][RTW89_THAILAND][0][82] = 127,
[3][0][2][0][RTW89_FCC][1][97] = 40,
- [3][0][2][0][RTW89_FCC][2][97] = 127,
[3][0][2][0][RTW89_ETSI][1][97] = 127,
[3][0][2][0][RTW89_ETSI][0][97] = 127,
[3][0][2][0][RTW89_MKK][1][97] = 127,
[3][0][2][0][RTW89_MKK][0][97] = 127,
[3][0][2][0][RTW89_IC][1][97] = 40,
- [3][0][2][0][RTW89_IC][2][97] = 127,
[3][0][2][0][RTW89_KCC][1][97] = 26,
[3][0][2][0][RTW89_KCC][0][97] = 127,
[3][0][2][0][RTW89_ACMA][1][97] = 127,
@@ -47370,13 +45940,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][97] = 127,
[3][0][2][0][RTW89_THAILAND][0][97] = 127,
[3][0][2][0][RTW89_FCC][1][112] = 127,
- [3][0][2][0][RTW89_FCC][2][112] = 127,
[3][0][2][0][RTW89_ETSI][1][112] = 127,
[3][0][2][0][RTW89_ETSI][0][112] = 127,
[3][0][2][0][RTW89_MKK][1][112] = 127,
[3][0][2][0][RTW89_MKK][0][112] = 127,
[3][0][2][0][RTW89_IC][1][112] = 127,
- [3][0][2][0][RTW89_IC][2][112] = 127,
[3][0][2][0][RTW89_KCC][1][112] = 127,
[3][0][2][0][RTW89_KCC][0][112] = 127,
[3][0][2][0][RTW89_ACMA][1][112] = 127,
@@ -47389,13 +45957,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][0][2][0][RTW89_THAILAND][1][112] = 127,
[3][0][2][0][RTW89_THAILAND][0][112] = 127,
[3][1][2][0][RTW89_FCC][1][7] = 32,
- [3][1][2][0][RTW89_FCC][2][7] = 46,
[3][1][2][0][RTW89_ETSI][1][7] = 50,
[3][1][2][0][RTW89_ETSI][0][7] = 18,
[3][1][2][0][RTW89_MKK][1][7] = 38,
[3][1][2][0][RTW89_MKK][0][7] = 10,
[3][1][2][0][RTW89_IC][1][7] = 32,
- [3][1][2][0][RTW89_IC][2][7] = 46,
[3][1][2][0][RTW89_KCC][1][7] = 40,
[3][1][2][0][RTW89_KCC][0][7] = 12,
[3][1][2][0][RTW89_ACMA][1][7] = 50,
@@ -47408,13 +45974,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][7] = 46,
[3][1][2][0][RTW89_THAILAND][0][7] = 18,
[3][1][2][0][RTW89_FCC][1][22] = 30,
- [3][1][2][0][RTW89_FCC][2][22] = 52,
[3][1][2][0][RTW89_ETSI][1][22] = 46,
[3][1][2][0][RTW89_ETSI][0][22] = 16,
[3][1][2][0][RTW89_MKK][1][22] = 48,
[3][1][2][0][RTW89_MKK][0][22] = 8,
[3][1][2][0][RTW89_IC][1][22] = 30,
- [3][1][2][0][RTW89_IC][2][22] = 52,
[3][1][2][0][RTW89_KCC][1][22] = 40,
[3][1][2][0][RTW89_KCC][0][22] = 12,
[3][1][2][0][RTW89_ACMA][1][22] = 46,
@@ -47427,13 +45991,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][22] = 46,
[3][1][2][0][RTW89_THAILAND][0][22] = 18,
[3][1][2][0][RTW89_FCC][1][37] = 30,
- [3][1][2][0][RTW89_FCC][2][37] = 52,
[3][1][2][0][RTW89_ETSI][1][37] = 46,
[3][1][2][0][RTW89_ETSI][0][37] = 16,
[3][1][2][0][RTW89_MKK][1][37] = 48,
[3][1][2][0][RTW89_MKK][0][37] = 8,
[3][1][2][0][RTW89_IC][1][37] = 30,
- [3][1][2][0][RTW89_IC][2][37] = 52,
[3][1][2][0][RTW89_KCC][1][37] = 40,
[3][1][2][0][RTW89_KCC][0][37] = 12,
[3][1][2][0][RTW89_ACMA][1][37] = 46,
@@ -47446,13 +46008,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][37] = 46,
[3][1][2][0][RTW89_THAILAND][0][37] = 18,
[3][1][2][0][RTW89_FCC][1][52] = 30,
- [3][1][2][0][RTW89_FCC][2][52] = 127,
[3][1][2][0][RTW89_ETSI][1][52] = 127,
[3][1][2][0][RTW89_ETSI][0][52] = 127,
[3][1][2][0][RTW89_MKK][1][52] = 127,
[3][1][2][0][RTW89_MKK][0][52] = 127,
[3][1][2][0][RTW89_IC][1][52] = 30,
- [3][1][2][0][RTW89_IC][2][52] = 56,
[3][1][2][0][RTW89_KCC][1][52] = 48,
[3][1][2][0][RTW89_KCC][0][52] = 127,
[3][1][2][0][RTW89_ACMA][1][52] = 127,
@@ -47465,13 +46025,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][52] = 127,
[3][1][2][0][RTW89_THAILAND][0][52] = 127,
[3][1][2][0][RTW89_FCC][1][67] = 32,
- [3][1][2][0][RTW89_FCC][2][67] = 54,
[3][1][2][0][RTW89_ETSI][1][67] = 127,
[3][1][2][0][RTW89_ETSI][0][67] = 127,
[3][1][2][0][RTW89_MKK][1][67] = 127,
[3][1][2][0][RTW89_MKK][0][67] = 127,
[3][1][2][0][RTW89_IC][1][67] = 32,
- [3][1][2][0][RTW89_IC][2][67] = 54,
[3][1][2][0][RTW89_KCC][1][67] = 48,
[3][1][2][0][RTW89_KCC][0][67] = 127,
[3][1][2][0][RTW89_ACMA][1][67] = 127,
@@ -47484,13 +46042,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][67] = 127,
[3][1][2][0][RTW89_THAILAND][0][67] = 127,
[3][1][2][0][RTW89_FCC][1][82] = 32,
- [3][1][2][0][RTW89_FCC][2][82] = 127,
[3][1][2][0][RTW89_ETSI][1][82] = 127,
[3][1][2][0][RTW89_ETSI][0][82] = 127,
[3][1][2][0][RTW89_MKK][1][82] = 127,
[3][1][2][0][RTW89_MKK][0][82] = 127,
[3][1][2][0][RTW89_IC][1][82] = 32,
- [3][1][2][0][RTW89_IC][2][82] = 127,
[3][1][2][0][RTW89_KCC][1][82] = 24,
[3][1][2][0][RTW89_KCC][0][82] = 127,
[3][1][2][0][RTW89_ACMA][1][82] = 127,
@@ -47503,13 +46059,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][82] = 127,
[3][1][2][0][RTW89_THAILAND][0][82] = 127,
[3][1][2][0][RTW89_FCC][1][97] = 32,
- [3][1][2][0][RTW89_FCC][2][97] = 127,
[3][1][2][0][RTW89_ETSI][1][97] = 127,
[3][1][2][0][RTW89_ETSI][0][97] = 127,
[3][1][2][0][RTW89_MKK][1][97] = 127,
[3][1][2][0][RTW89_MKK][0][97] = 127,
[3][1][2][0][RTW89_IC][1][97] = 32,
- [3][1][2][0][RTW89_IC][2][97] = 127,
[3][1][2][0][RTW89_KCC][1][97] = 24,
[3][1][2][0][RTW89_KCC][0][97] = 127,
[3][1][2][0][RTW89_ACMA][1][97] = 127,
@@ -47522,13 +46076,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][97] = 127,
[3][1][2][0][RTW89_THAILAND][0][97] = 127,
[3][1][2][0][RTW89_FCC][1][112] = 127,
- [3][1][2][0][RTW89_FCC][2][112] = 127,
[3][1][2][0][RTW89_ETSI][1][112] = 127,
[3][1][2][0][RTW89_ETSI][0][112] = 127,
[3][1][2][0][RTW89_MKK][1][112] = 127,
[3][1][2][0][RTW89_MKK][0][112] = 127,
[3][1][2][0][RTW89_IC][1][112] = 127,
- [3][1][2][0][RTW89_IC][2][112] = 127,
[3][1][2][0][RTW89_KCC][1][112] = 127,
[3][1][2][0][RTW89_KCC][0][112] = 127,
[3][1][2][0][RTW89_ACMA][1][112] = 127,
@@ -47541,13 +46093,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][0][RTW89_THAILAND][1][112] = 127,
[3][1][2][0][RTW89_THAILAND][0][112] = 127,
[3][1][2][1][RTW89_FCC][1][7] = 32,
- [3][1][2][1][RTW89_FCC][2][7] = 46,
[3][1][2][1][RTW89_ETSI][1][7] = 42,
[3][1][2][1][RTW89_ETSI][0][7] = 6,
[3][1][2][1][RTW89_MKK][1][7] = 38,
[3][1][2][1][RTW89_MKK][0][7] = 10,
[3][1][2][1][RTW89_IC][1][7] = 32,
- [3][1][2][1][RTW89_IC][2][7] = 46,
[3][1][2][1][RTW89_KCC][1][7] = 40,
[3][1][2][1][RTW89_KCC][0][7] = 12,
[3][1][2][1][RTW89_ACMA][1][7] = 42,
@@ -47560,13 +46110,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][7] = 46,
[3][1][2][1][RTW89_THAILAND][0][7] = 6,
[3][1][2][1][RTW89_FCC][1][22] = 30,
- [3][1][2][1][RTW89_FCC][2][22] = 52,
[3][1][2][1][RTW89_ETSI][1][22] = 42,
[3][1][2][1][RTW89_ETSI][0][22] = 6,
[3][1][2][1][RTW89_MKK][1][22] = 48,
[3][1][2][1][RTW89_MKK][0][22] = 8,
[3][1][2][1][RTW89_IC][1][22] = 30,
- [3][1][2][1][RTW89_IC][2][22] = 52,
[3][1][2][1][RTW89_KCC][1][22] = 40,
[3][1][2][1][RTW89_KCC][0][22] = 12,
[3][1][2][1][RTW89_ACMA][1][22] = 42,
@@ -47579,13 +46127,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][22] = 46,
[3][1][2][1][RTW89_THAILAND][0][22] = 6,
[3][1][2][1][RTW89_FCC][1][37] = 30,
- [3][1][2][1][RTW89_FCC][2][37] = 52,
[3][1][2][1][RTW89_ETSI][1][37] = 42,
[3][1][2][1][RTW89_ETSI][0][37] = 6,
[3][1][2][1][RTW89_MKK][1][37] = 48,
[3][1][2][1][RTW89_MKK][0][37] = 8,
[3][1][2][1][RTW89_IC][1][37] = 30,
- [3][1][2][1][RTW89_IC][2][37] = 52,
[3][1][2][1][RTW89_KCC][1][37] = 40,
[3][1][2][1][RTW89_KCC][0][37] = 12,
[3][1][2][1][RTW89_ACMA][1][37] = 42,
@@ -47598,13 +46144,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][37] = 46,
[3][1][2][1][RTW89_THAILAND][0][37] = 6,
[3][1][2][1][RTW89_FCC][1][52] = 30,
- [3][1][2][1][RTW89_FCC][2][52] = 127,
[3][1][2][1][RTW89_ETSI][1][52] = 127,
[3][1][2][1][RTW89_ETSI][0][52] = 127,
[3][1][2][1][RTW89_MKK][1][52] = 127,
[3][1][2][1][RTW89_MKK][0][52] = 127,
[3][1][2][1][RTW89_IC][1][52] = 30,
- [3][1][2][1][RTW89_IC][2][52] = 56,
[3][1][2][1][RTW89_KCC][1][52] = 48,
[3][1][2][1][RTW89_KCC][0][52] = 127,
[3][1][2][1][RTW89_ACMA][1][52] = 127,
@@ -47617,13 +46161,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][52] = 127,
[3][1][2][1][RTW89_THAILAND][0][52] = 127,
[3][1][2][1][RTW89_FCC][1][67] = 32,
- [3][1][2][1][RTW89_FCC][2][67] = 54,
[3][1][2][1][RTW89_ETSI][1][67] = 127,
[3][1][2][1][RTW89_ETSI][0][67] = 127,
[3][1][2][1][RTW89_MKK][1][67] = 127,
[3][1][2][1][RTW89_MKK][0][67] = 127,
[3][1][2][1][RTW89_IC][1][67] = 32,
- [3][1][2][1][RTW89_IC][2][67] = 54,
[3][1][2][1][RTW89_KCC][1][67] = 48,
[3][1][2][1][RTW89_KCC][0][67] = 127,
[3][1][2][1][RTW89_ACMA][1][67] = 127,
@@ -47636,13 +46178,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][67] = 127,
[3][1][2][1][RTW89_THAILAND][0][67] = 127,
[3][1][2][1][RTW89_FCC][1][82] = 32,
- [3][1][2][1][RTW89_FCC][2][82] = 127,
[3][1][2][1][RTW89_ETSI][1][82] = 127,
[3][1][2][1][RTW89_ETSI][0][82] = 127,
[3][1][2][1][RTW89_MKK][1][82] = 127,
[3][1][2][1][RTW89_MKK][0][82] = 127,
[3][1][2][1][RTW89_IC][1][82] = 32,
- [3][1][2][1][RTW89_IC][2][82] = 127,
[3][1][2][1][RTW89_KCC][1][82] = 24,
[3][1][2][1][RTW89_KCC][0][82] = 127,
[3][1][2][1][RTW89_ACMA][1][82] = 127,
@@ -47655,13 +46195,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][82] = 127,
[3][1][2][1][RTW89_THAILAND][0][82] = 127,
[3][1][2][1][RTW89_FCC][1][97] = 32,
- [3][1][2][1][RTW89_FCC][2][97] = 127,
[3][1][2][1][RTW89_ETSI][1][97] = 127,
[3][1][2][1][RTW89_ETSI][0][97] = 127,
[3][1][2][1][RTW89_MKK][1][97] = 127,
[3][1][2][1][RTW89_MKK][0][97] = 127,
[3][1][2][1][RTW89_IC][1][97] = 32,
- [3][1][2][1][RTW89_IC][2][97] = 127,
[3][1][2][1][RTW89_KCC][1][97] = 24,
[3][1][2][1][RTW89_KCC][0][97] = 127,
[3][1][2][1][RTW89_ACMA][1][97] = 127,
@@ -47674,13 +46212,11 @@ const s8 rtw89_8852c_txpwr_lmt_6g[RTW89_6G_BW_NUM][RTW89_NTX_NUM]
[3][1][2][1][RTW89_THAILAND][1][97] = 127,
[3][1][2][1][RTW89_THAILAND][0][97] = 127,
[3][1][2][1][RTW89_FCC][1][112] = 127,
- [3][1][2][1][RTW89_FCC][2][112] = 127,
[3][1][2][1][RTW89_ETSI][1][112] = 127,
[3][1][2][1][RTW89_ETSI][0][112] = 127,
[3][1][2][1][RTW89_MKK][1][112] = 127,
[3][1][2][1][RTW89_MKK][0][112] = 127,
[3][1][2][1][RTW89_IC][1][112] = 127,
- [3][1][2][1][RTW89_IC][2][112] = 127,
[3][1][2][1][RTW89_KCC][1][112] = 127,
[3][1][2][1][RTW89_KCC][0][112] = 127,
[3][1][2][1][RTW89_ACMA][1][112] = 127,
@@ -49374,7 +47910,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][48] = 46,
[0][0][RTW89_ETSI][48] = 127,
[0][0][RTW89_MKK][48] = 127,
- [0][0][RTW89_IC][48] = 127,
+ [0][0][RTW89_IC][48] = 46,
[0][0][RTW89_KCC][48] = 127,
[0][0][RTW89_ACMA][48] = 127,
[0][0][RTW89_CN][48] = 127,
@@ -49387,7 +47923,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][50] = 44,
[0][0][RTW89_ETSI][50] = 127,
[0][0][RTW89_MKK][50] = 127,
- [0][0][RTW89_IC][50] = 127,
+ [0][0][RTW89_IC][50] = 44,
[0][0][RTW89_KCC][50] = 127,
[0][0][RTW89_ACMA][50] = 127,
[0][0][RTW89_CN][50] = 127,
@@ -49400,7 +47936,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_FCC][52] = 34,
[0][0][RTW89_ETSI][52] = 127,
[0][0][RTW89_MKK][52] = 127,
- [0][0][RTW89_IC][52] = 127,
+ [0][0][RTW89_IC][52] = 34,
[0][0][RTW89_KCC][52] = 127,
[0][0][RTW89_ACMA][52] = 127,
[0][0][RTW89_CN][52] = 127,
@@ -49738,7 +48274,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][48] = 20,
[0][1][RTW89_ETSI][48] = 127,
[0][1][RTW89_MKK][48] = 127,
- [0][1][RTW89_IC][48] = 127,
+ [0][1][RTW89_IC][48] = 20,
[0][1][RTW89_KCC][48] = 127,
[0][1][RTW89_ACMA][48] = 127,
[0][1][RTW89_CN][48] = 127,
@@ -49751,7 +48287,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][50] = 20,
[0][1][RTW89_ETSI][50] = 127,
[0][1][RTW89_MKK][50] = 127,
- [0][1][RTW89_IC][50] = 127,
+ [0][1][RTW89_IC][50] = 20,
[0][1][RTW89_KCC][50] = 127,
[0][1][RTW89_ACMA][50] = 127,
[0][1][RTW89_CN][50] = 127,
@@ -49764,7 +48300,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_FCC][52] = 8,
[0][1][RTW89_ETSI][52] = 127,
[0][1][RTW89_MKK][52] = 127,
- [0][1][RTW89_IC][52] = 127,
+ [0][1][RTW89_IC][52] = 8,
[0][1][RTW89_KCC][52] = 127,
[0][1][RTW89_ACMA][52] = 127,
[0][1][RTW89_CN][52] = 127,
@@ -50102,7 +48638,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][48] = 56,
[1][0][RTW89_ETSI][48] = 127,
[1][0][RTW89_MKK][48] = 127,
- [1][0][RTW89_IC][48] = 127,
+ [1][0][RTW89_IC][48] = 56,
[1][0][RTW89_KCC][48] = 127,
[1][0][RTW89_ACMA][48] = 127,
[1][0][RTW89_CN][48] = 127,
@@ -50115,7 +48651,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][50] = 58,
[1][0][RTW89_ETSI][50] = 127,
[1][0][RTW89_MKK][50] = 127,
- [1][0][RTW89_IC][50] = 127,
+ [1][0][RTW89_IC][50] = 58,
[1][0][RTW89_KCC][50] = 127,
[1][0][RTW89_ACMA][50] = 127,
[1][0][RTW89_CN][50] = 127,
@@ -50128,7 +48664,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_FCC][52] = 56,
[1][0][RTW89_ETSI][52] = 127,
[1][0][RTW89_MKK][52] = 127,
- [1][0][RTW89_IC][52] = 127,
+ [1][0][RTW89_IC][52] = 56,
[1][0][RTW89_KCC][52] = 127,
[1][0][RTW89_ACMA][52] = 127,
[1][0][RTW89_CN][52] = 127,
@@ -50466,7 +49002,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][48] = 34,
[1][1][RTW89_ETSI][48] = 127,
[1][1][RTW89_MKK][48] = 127,
- [1][1][RTW89_IC][48] = 127,
+ [1][1][RTW89_IC][48] = 34,
[1][1][RTW89_KCC][48] = 127,
[1][1][RTW89_ACMA][48] = 127,
[1][1][RTW89_CN][48] = 127,
@@ -50479,7 +49015,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][50] = 34,
[1][1][RTW89_ETSI][50] = 127,
[1][1][RTW89_MKK][50] = 127,
- [1][1][RTW89_IC][50] = 127,
+ [1][1][RTW89_IC][50] = 34,
[1][1][RTW89_KCC][50] = 127,
[1][1][RTW89_ACMA][50] = 127,
[1][1][RTW89_CN][50] = 127,
@@ -50492,7 +49028,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_FCC][52] = 30,
[1][1][RTW89_ETSI][52] = 127,
[1][1][RTW89_MKK][52] = 127,
- [1][1][RTW89_IC][52] = 127,
+ [1][1][RTW89_IC][52] = 30,
[1][1][RTW89_KCC][52] = 127,
[1][1][RTW89_ACMA][52] = 127,
[1][1][RTW89_CN][52] = 127,
@@ -50830,7 +49366,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][48] = 64,
[2][0][RTW89_ETSI][48] = 127,
[2][0][RTW89_MKK][48] = 127,
- [2][0][RTW89_IC][48] = 127,
+ [2][0][RTW89_IC][48] = 64,
[2][0][RTW89_KCC][48] = 127,
[2][0][RTW89_ACMA][48] = 127,
[2][0][RTW89_CN][48] = 127,
@@ -50843,7 +49379,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][50] = 64,
[2][0][RTW89_ETSI][50] = 127,
[2][0][RTW89_MKK][50] = 127,
- [2][0][RTW89_IC][50] = 127,
+ [2][0][RTW89_IC][50] = 64,
[2][0][RTW89_KCC][50] = 127,
[2][0][RTW89_ACMA][50] = 127,
[2][0][RTW89_CN][50] = 127,
@@ -50856,7 +49392,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_FCC][52] = 64,
[2][0][RTW89_ETSI][52] = 127,
[2][0][RTW89_MKK][52] = 127,
- [2][0][RTW89_IC][52] = 127,
+ [2][0][RTW89_IC][52] = 64,
[2][0][RTW89_KCC][52] = 127,
[2][0][RTW89_ACMA][52] = 127,
[2][0][RTW89_CN][52] = 127,
@@ -51194,7 +49730,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][48] = 40,
[2][1][RTW89_ETSI][48] = 127,
[2][1][RTW89_MKK][48] = 127,
- [2][1][RTW89_IC][48] = 127,
+ [2][1][RTW89_IC][48] = 40,
[2][1][RTW89_KCC][48] = 127,
[2][1][RTW89_ACMA][48] = 127,
[2][1][RTW89_CN][48] = 127,
@@ -51207,7 +49743,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][50] = 40,
[2][1][RTW89_ETSI][50] = 127,
[2][1][RTW89_MKK][50] = 127,
- [2][1][RTW89_IC][50] = 127,
+ [2][1][RTW89_IC][50] = 40,
[2][1][RTW89_KCC][50] = 127,
[2][1][RTW89_ACMA][50] = 127,
[2][1][RTW89_CN][50] = 127,
@@ -51220,7 +49756,7 @@ const s8 rtw89_8852c_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_FCC][52] = 40,
[2][1][RTW89_ETSI][52] = 127,
[2][1][RTW89_MKK][52] = 127,
- [2][1][RTW89_IC][52] = 127,
+ [2][1][RTW89_IC][52] = 40,
[2][1][RTW89_KCC][52] = 127,
[2][1][RTW89_ACMA][52] = 127,
[2][1][RTW89_CN][52] = 127,
@@ -51238,1164 +49774,778 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[RTW89_6G_CH_NUM] = {
[0][0][RTW89_WW][0][0] = -16,
[0][0][RTW89_WW][1][0] = -16,
- [0][0][RTW89_WW][2][0] = 44,
[0][0][RTW89_WW][0][2] = -18,
[0][0][RTW89_WW][1][2] = -18,
- [0][0][RTW89_WW][2][2] = 44,
[0][0][RTW89_WW][0][4] = -18,
[0][0][RTW89_WW][1][4] = -18,
- [0][0][RTW89_WW][2][4] = 44,
[0][0][RTW89_WW][0][6] = -18,
[0][0][RTW89_WW][1][6] = -18,
- [0][0][RTW89_WW][2][6] = 44,
[0][0][RTW89_WW][0][8] = -18,
[0][0][RTW89_WW][1][8] = -18,
- [0][0][RTW89_WW][2][8] = 44,
[0][0][RTW89_WW][0][10] = -18,
[0][0][RTW89_WW][1][10] = -18,
- [0][0][RTW89_WW][2][10] = 44,
[0][0][RTW89_WW][0][12] = -18,
[0][0][RTW89_WW][1][12] = -18,
- [0][0][RTW89_WW][2][12] = 44,
[0][0][RTW89_WW][0][14] = -18,
[0][0][RTW89_WW][1][14] = -18,
- [0][0][RTW89_WW][2][14] = 44,
[0][0][RTW89_WW][0][15] = -18,
[0][0][RTW89_WW][1][15] = -18,
- [0][0][RTW89_WW][2][15] = 44,
[0][0][RTW89_WW][0][17] = -18,
[0][0][RTW89_WW][1][17] = -18,
- [0][0][RTW89_WW][2][17] = 44,
[0][0][RTW89_WW][0][19] = -18,
[0][0][RTW89_WW][1][19] = -18,
- [0][0][RTW89_WW][2][19] = 44,
[0][0][RTW89_WW][0][21] = -18,
[0][0][RTW89_WW][1][21] = -18,
- [0][0][RTW89_WW][2][21] = 44,
[0][0][RTW89_WW][0][23] = -18,
[0][0][RTW89_WW][1][23] = -18,
- [0][0][RTW89_WW][2][23] = 54,
[0][0][RTW89_WW][0][25] = -18,
[0][0][RTW89_WW][1][25] = -18,
- [0][0][RTW89_WW][2][25] = 54,
[0][0][RTW89_WW][0][27] = -18,
[0][0][RTW89_WW][1][27] = -18,
- [0][0][RTW89_WW][2][27] = 54,
[0][0][RTW89_WW][0][29] = -18,
[0][0][RTW89_WW][1][29] = -18,
- [0][0][RTW89_WW][2][29] = 54,
[0][0][RTW89_WW][0][30] = -18,
[0][0][RTW89_WW][1][30] = -18,
- [0][0][RTW89_WW][2][30] = 54,
[0][0][RTW89_WW][0][32] = -18,
[0][0][RTW89_WW][1][32] = -18,
- [0][0][RTW89_WW][2][32] = 54,
[0][0][RTW89_WW][0][34] = -18,
[0][0][RTW89_WW][1][34] = -18,
- [0][0][RTW89_WW][2][34] = 54,
[0][0][RTW89_WW][0][36] = -18,
[0][0][RTW89_WW][1][36] = -18,
- [0][0][RTW89_WW][2][36] = 54,
[0][0][RTW89_WW][0][38] = -18,
[0][0][RTW89_WW][1][38] = -18,
- [0][0][RTW89_WW][2][38] = 54,
[0][0][RTW89_WW][0][40] = -18,
[0][0][RTW89_WW][1][40] = -18,
- [0][0][RTW89_WW][2][40] = 54,
[0][0][RTW89_WW][0][42] = -18,
[0][0][RTW89_WW][1][42] = -18,
- [0][0][RTW89_WW][2][42] = 54,
[0][0][RTW89_WW][0][44] = -16,
[0][0][RTW89_WW][1][44] = -16,
- [0][0][RTW89_WW][2][44] = 56,
[0][0][RTW89_WW][0][45] = -16,
[0][0][RTW89_WW][1][45] = -16,
- [0][0][RTW89_WW][2][45] = 56,
[0][0][RTW89_WW][0][47] = -18,
[0][0][RTW89_WW][1][47] = -18,
- [0][0][RTW89_WW][2][47] = 56,
[0][0][RTW89_WW][0][49] = -18,
[0][0][RTW89_WW][1][49] = -18,
- [0][0][RTW89_WW][2][49] = 56,
[0][0][RTW89_WW][0][51] = -18,
[0][0][RTW89_WW][1][51] = -18,
- [0][0][RTW89_WW][2][51] = 56,
[0][0][RTW89_WW][0][53] = -16,
[0][0][RTW89_WW][1][53] = -16,
- [0][0][RTW89_WW][2][53] = 56,
[0][0][RTW89_WW][0][55] = -18,
[0][0][RTW89_WW][1][55] = -18,
- [0][0][RTW89_WW][2][55] = 56,
[0][0][RTW89_WW][0][57] = -18,
[0][0][RTW89_WW][1][57] = -18,
- [0][0][RTW89_WW][2][57] = 56,
[0][0][RTW89_WW][0][59] = -18,
[0][0][RTW89_WW][1][59] = -18,
- [0][0][RTW89_WW][2][59] = 56,
[0][0][RTW89_WW][0][60] = -18,
[0][0][RTW89_WW][1][60] = -18,
- [0][0][RTW89_WW][2][60] = 56,
[0][0][RTW89_WW][0][62] = -18,
[0][0][RTW89_WW][1][62] = -18,
- [0][0][RTW89_WW][2][62] = 56,
[0][0][RTW89_WW][0][64] = -18,
[0][0][RTW89_WW][1][64] = -18,
- [0][0][RTW89_WW][2][64] = 56,
[0][0][RTW89_WW][0][66] = -18,
[0][0][RTW89_WW][1][66] = -18,
- [0][0][RTW89_WW][2][66] = 56,
[0][0][RTW89_WW][0][68] = -18,
[0][0][RTW89_WW][1][68] = -18,
- [0][0][RTW89_WW][2][68] = 56,
[0][0][RTW89_WW][0][70] = -16,
[0][0][RTW89_WW][1][70] = -16,
- [0][0][RTW89_WW][2][70] = 56,
[0][0][RTW89_WW][0][72] = -18,
[0][0][RTW89_WW][1][72] = -18,
- [0][0][RTW89_WW][2][72] = 56,
[0][0][RTW89_WW][0][74] = -18,
[0][0][RTW89_WW][1][74] = -18,
- [0][0][RTW89_WW][2][74] = 56,
[0][0][RTW89_WW][0][75] = -18,
[0][0][RTW89_WW][1][75] = -18,
- [0][0][RTW89_WW][2][75] = 56,
[0][0][RTW89_WW][0][77] = -18,
[0][0][RTW89_WW][1][77] = -18,
- [0][0][RTW89_WW][2][77] = 56,
[0][0][RTW89_WW][0][79] = -18,
[0][0][RTW89_WW][1][79] = -18,
- [0][0][RTW89_WW][2][79] = 56,
[0][0][RTW89_WW][0][81] = -18,
[0][0][RTW89_WW][1][81] = -18,
- [0][0][RTW89_WW][2][81] = 56,
[0][0][RTW89_WW][0][83] = -18,
[0][0][RTW89_WW][1][83] = -18,
- [0][0][RTW89_WW][2][83] = 56,
[0][0][RTW89_WW][0][85] = -18,
[0][0][RTW89_WW][1][85] = -18,
- [0][0][RTW89_WW][2][85] = 56,
[0][0][RTW89_WW][0][87] = -16,
[0][0][RTW89_WW][1][87] = -16,
- [0][0][RTW89_WW][2][87] = 0,
[0][0][RTW89_WW][0][89] = -16,
[0][0][RTW89_WW][1][89] = -16,
- [0][0][RTW89_WW][2][89] = 0,
[0][0][RTW89_WW][0][90] = -16,
[0][0][RTW89_WW][1][90] = -16,
- [0][0][RTW89_WW][2][90] = 0,
[0][0][RTW89_WW][0][92] = -16,
[0][0][RTW89_WW][1][92] = -16,
- [0][0][RTW89_WW][2][92] = 0,
[0][0][RTW89_WW][0][94] = -16,
[0][0][RTW89_WW][1][94] = -16,
- [0][0][RTW89_WW][2][94] = 0,
[0][0][RTW89_WW][0][96] = -16,
[0][0][RTW89_WW][1][96] = -16,
- [0][0][RTW89_WW][2][96] = 0,
[0][0][RTW89_WW][0][98] = -16,
[0][0][RTW89_WW][1][98] = -16,
- [0][0][RTW89_WW][2][98] = 0,
[0][0][RTW89_WW][0][100] = -16,
[0][0][RTW89_WW][1][100] = -16,
- [0][0][RTW89_WW][2][100] = 0,
[0][0][RTW89_WW][0][102] = -16,
[0][0][RTW89_WW][1][102] = -16,
- [0][0][RTW89_WW][2][102] = 0,
[0][0][RTW89_WW][0][104] = -16,
[0][0][RTW89_WW][1][104] = -16,
- [0][0][RTW89_WW][2][104] = 0,
[0][0][RTW89_WW][0][105] = -16,
[0][0][RTW89_WW][1][105] = -16,
- [0][0][RTW89_WW][2][105] = 0,
[0][0][RTW89_WW][0][107] = -12,
[0][0][RTW89_WW][1][107] = -12,
- [0][0][RTW89_WW][2][107] = 0,
[0][0][RTW89_WW][0][109] = -12,
[0][0][RTW89_WW][1][109] = -12,
- [0][0][RTW89_WW][2][109] = 0,
[0][0][RTW89_WW][0][111] = 0,
[0][0][RTW89_WW][1][111] = 0,
- [0][0][RTW89_WW][2][111] = 0,
[0][0][RTW89_WW][0][113] = 0,
[0][0][RTW89_WW][1][113] = 0,
- [0][0][RTW89_WW][2][113] = 0,
[0][0][RTW89_WW][0][115] = 0,
[0][0][RTW89_WW][1][115] = 0,
- [0][0][RTW89_WW][2][115] = 0,
[0][0][RTW89_WW][0][117] = 0,
[0][0][RTW89_WW][1][117] = 0,
- [0][0][RTW89_WW][2][117] = 0,
[0][0][RTW89_WW][0][119] = 0,
[0][0][RTW89_WW][1][119] = 0,
- [0][0][RTW89_WW][2][119] = 0,
[0][1][RTW89_WW][0][0] = -40,
[0][1][RTW89_WW][1][0] = -40,
- [0][1][RTW89_WW][2][0] = 32,
[0][1][RTW89_WW][0][2] = -40,
[0][1][RTW89_WW][1][2] = -40,
- [0][1][RTW89_WW][2][2] = 32,
[0][1][RTW89_WW][0][4] = -40,
[0][1][RTW89_WW][1][4] = -40,
- [0][1][RTW89_WW][2][4] = 32,
[0][1][RTW89_WW][0][6] = -40,
[0][1][RTW89_WW][1][6] = -40,
- [0][1][RTW89_WW][2][6] = 32,
[0][1][RTW89_WW][0][8] = -40,
[0][1][RTW89_WW][1][8] = -40,
- [0][1][RTW89_WW][2][8] = 32,
[0][1][RTW89_WW][0][10] = -40,
[0][1][RTW89_WW][1][10] = -40,
- [0][1][RTW89_WW][2][10] = 32,
[0][1][RTW89_WW][0][12] = -40,
[0][1][RTW89_WW][1][12] = -40,
- [0][1][RTW89_WW][2][12] = 32,
[0][1][RTW89_WW][0][14] = -40,
[0][1][RTW89_WW][1][14] = -40,
- [0][1][RTW89_WW][2][14] = 32,
[0][1][RTW89_WW][0][15] = -40,
[0][1][RTW89_WW][1][15] = -40,
- [0][1][RTW89_WW][2][15] = 32,
[0][1][RTW89_WW][0][17] = -40,
[0][1][RTW89_WW][1][17] = -40,
- [0][1][RTW89_WW][2][17] = 32,
[0][1][RTW89_WW][0][19] = -40,
[0][1][RTW89_WW][1][19] = -40,
- [0][1][RTW89_WW][2][19] = 32,
[0][1][RTW89_WW][0][21] = -40,
[0][1][RTW89_WW][1][21] = -40,
- [0][1][RTW89_WW][2][21] = 32,
[0][1][RTW89_WW][0][23] = -40,
[0][1][RTW89_WW][1][23] = -40,
- [0][1][RTW89_WW][2][23] = 32,
[0][1][RTW89_WW][0][25] = -40,
[0][1][RTW89_WW][1][25] = -40,
- [0][1][RTW89_WW][2][25] = 32,
[0][1][RTW89_WW][0][27] = -40,
[0][1][RTW89_WW][1][27] = -40,
- [0][1][RTW89_WW][2][27] = 32,
[0][1][RTW89_WW][0][29] = -40,
[0][1][RTW89_WW][1][29] = -40,
- [0][1][RTW89_WW][2][29] = 32,
[0][1][RTW89_WW][0][30] = -40,
[0][1][RTW89_WW][1][30] = -40,
- [0][1][RTW89_WW][2][30] = 32,
[0][1][RTW89_WW][0][32] = -40,
[0][1][RTW89_WW][1][32] = -40,
- [0][1][RTW89_WW][2][32] = 32,
[0][1][RTW89_WW][0][34] = -40,
[0][1][RTW89_WW][1][34] = -40,
- [0][1][RTW89_WW][2][34] = 32,
[0][1][RTW89_WW][0][36] = -40,
[0][1][RTW89_WW][1][36] = -40,
- [0][1][RTW89_WW][2][36] = 32,
[0][1][RTW89_WW][0][38] = -40,
[0][1][RTW89_WW][1][38] = -40,
- [0][1][RTW89_WW][2][38] = 32,
[0][1][RTW89_WW][0][40] = -40,
[0][1][RTW89_WW][1][40] = -40,
- [0][1][RTW89_WW][2][40] = 32,
[0][1][RTW89_WW][0][42] = -40,
[0][1][RTW89_WW][1][42] = -40,
- [0][1][RTW89_WW][2][42] = 32,
[0][1][RTW89_WW][0][44] = -40,
[0][1][RTW89_WW][1][44] = -40,
- [0][1][RTW89_WW][2][44] = 32,
[0][1][RTW89_WW][0][45] = -40,
[0][1][RTW89_WW][1][45] = -40,
- [0][1][RTW89_WW][2][45] = 32,
[0][1][RTW89_WW][0][47] = -40,
[0][1][RTW89_WW][1][47] = -40,
- [0][1][RTW89_WW][2][47] = 32,
[0][1][RTW89_WW][0][49] = -40,
[0][1][RTW89_WW][1][49] = -40,
- [0][1][RTW89_WW][2][49] = 32,
[0][1][RTW89_WW][0][51] = -40,
[0][1][RTW89_WW][1][51] = -40,
- [0][1][RTW89_WW][2][51] = 32,
[0][1][RTW89_WW][0][53] = -40,
[0][1][RTW89_WW][1][53] = -40,
- [0][1][RTW89_WW][2][53] = 32,
[0][1][RTW89_WW][0][55] = -40,
[0][1][RTW89_WW][1][55] = -40,
- [0][1][RTW89_WW][2][55] = 30,
[0][1][RTW89_WW][0][57] = -40,
[0][1][RTW89_WW][1][57] = -40,
- [0][1][RTW89_WW][2][57] = 30,
[0][1][RTW89_WW][0][59] = -40,
[0][1][RTW89_WW][1][59] = -40,
- [0][1][RTW89_WW][2][59] = 30,
[0][1][RTW89_WW][0][60] = -40,
[0][1][RTW89_WW][1][60] = -40,
- [0][1][RTW89_WW][2][60] = 30,
[0][1][RTW89_WW][0][62] = -40,
[0][1][RTW89_WW][1][62] = -40,
- [0][1][RTW89_WW][2][62] = 30,
[0][1][RTW89_WW][0][64] = -40,
[0][1][RTW89_WW][1][64] = -40,
- [0][1][RTW89_WW][2][64] = 30,
[0][1][RTW89_WW][0][66] = -40,
[0][1][RTW89_WW][1][66] = -40,
- [0][1][RTW89_WW][2][66] = 30,
[0][1][RTW89_WW][0][68] = -40,
[0][1][RTW89_WW][1][68] = -40,
- [0][1][RTW89_WW][2][68] = 30,
[0][1][RTW89_WW][0][70] = -38,
[0][1][RTW89_WW][1][70] = -38,
- [0][1][RTW89_WW][2][70] = 30,
[0][1][RTW89_WW][0][72] = -38,
[0][1][RTW89_WW][1][72] = -38,
- [0][1][RTW89_WW][2][72] = 30,
[0][1][RTW89_WW][0][74] = -38,
[0][1][RTW89_WW][1][74] = -38,
- [0][1][RTW89_WW][2][74] = 30,
[0][1][RTW89_WW][0][75] = -38,
[0][1][RTW89_WW][1][75] = -38,
- [0][1][RTW89_WW][2][75] = 30,
[0][1][RTW89_WW][0][77] = -38,
[0][1][RTW89_WW][1][77] = -38,
- [0][1][RTW89_WW][2][77] = 30,
[0][1][RTW89_WW][0][79] = -38,
[0][1][RTW89_WW][1][79] = -38,
- [0][1][RTW89_WW][2][79] = 30,
[0][1][RTW89_WW][0][81] = -38,
[0][1][RTW89_WW][1][81] = -38,
- [0][1][RTW89_WW][2][81] = 30,
[0][1][RTW89_WW][0][83] = -38,
[0][1][RTW89_WW][1][83] = -38,
- [0][1][RTW89_WW][2][83] = 30,
[0][1][RTW89_WW][0][85] = -38,
[0][1][RTW89_WW][1][85] = -38,
- [0][1][RTW89_WW][2][85] = 30,
[0][1][RTW89_WW][0][87] = -40,
[0][1][RTW89_WW][1][87] = -40,
- [0][1][RTW89_WW][2][87] = 0,
[0][1][RTW89_WW][0][89] = -38,
[0][1][RTW89_WW][1][89] = -38,
- [0][1][RTW89_WW][2][89] = 0,
[0][1][RTW89_WW][0][90] = -38,
[0][1][RTW89_WW][1][90] = -38,
- [0][1][RTW89_WW][2][90] = 0,
[0][1][RTW89_WW][0][92] = -38,
[0][1][RTW89_WW][1][92] = -38,
- [0][1][RTW89_WW][2][92] = 0,
[0][1][RTW89_WW][0][94] = -38,
[0][1][RTW89_WW][1][94] = -38,
- [0][1][RTW89_WW][2][94] = 0,
[0][1][RTW89_WW][0][96] = -38,
[0][1][RTW89_WW][1][96] = -38,
- [0][1][RTW89_WW][2][96] = 0,
[0][1][RTW89_WW][0][98] = -38,
[0][1][RTW89_WW][1][98] = -38,
- [0][1][RTW89_WW][2][98] = 0,
[0][1][RTW89_WW][0][100] = -38,
[0][1][RTW89_WW][1][100] = -38,
- [0][1][RTW89_WW][2][100] = 0,
[0][1][RTW89_WW][0][102] = -38,
[0][1][RTW89_WW][1][102] = -38,
- [0][1][RTW89_WW][2][102] = 0,
[0][1][RTW89_WW][0][104] = -38,
[0][1][RTW89_WW][1][104] = -38,
- [0][1][RTW89_WW][2][104] = 0,
[0][1][RTW89_WW][0][105] = -38,
[0][1][RTW89_WW][1][105] = -38,
- [0][1][RTW89_WW][2][105] = 0,
[0][1][RTW89_WW][0][107] = -34,
[0][1][RTW89_WW][1][107] = -34,
- [0][1][RTW89_WW][2][107] = 0,
[0][1][RTW89_WW][0][109] = -34,
[0][1][RTW89_WW][1][109] = -34,
- [0][1][RTW89_WW][2][109] = 0,
[0][1][RTW89_WW][0][111] = 0,
[0][1][RTW89_WW][1][111] = 0,
- [0][1][RTW89_WW][2][111] = 0,
[0][1][RTW89_WW][0][113] = 0,
[0][1][RTW89_WW][1][113] = 0,
- [0][1][RTW89_WW][2][113] = 0,
[0][1][RTW89_WW][0][115] = 0,
[0][1][RTW89_WW][1][115] = 0,
- [0][1][RTW89_WW][2][115] = 0,
[0][1][RTW89_WW][0][117] = 0,
[0][1][RTW89_WW][1][117] = 0,
- [0][1][RTW89_WW][2][117] = 0,
[0][1][RTW89_WW][0][119] = 0,
[0][1][RTW89_WW][1][119] = 0,
- [0][1][RTW89_WW][2][119] = 0,
[1][0][RTW89_WW][0][0] = -4,
[1][0][RTW89_WW][1][0] = -4,
- [1][0][RTW89_WW][2][0] = 52,
[1][0][RTW89_WW][0][2] = -4,
[1][0][RTW89_WW][1][2] = -4,
- [1][0][RTW89_WW][2][2] = 52,
[1][0][RTW89_WW][0][4] = -4,
[1][0][RTW89_WW][1][4] = -4,
- [1][0][RTW89_WW][2][4] = 52,
[1][0][RTW89_WW][0][6] = -4,
[1][0][RTW89_WW][1][6] = -4,
- [1][0][RTW89_WW][2][6] = 52,
[1][0][RTW89_WW][0][8] = -4,
[1][0][RTW89_WW][1][8] = -4,
- [1][0][RTW89_WW][2][8] = 52,
[1][0][RTW89_WW][0][10] = -4,
[1][0][RTW89_WW][1][10] = -4,
- [1][0][RTW89_WW][2][10] = 52,
[1][0][RTW89_WW][0][12] = -4,
[1][0][RTW89_WW][1][12] = -4,
- [1][0][RTW89_WW][2][12] = 52,
[1][0][RTW89_WW][0][14] = -4,
[1][0][RTW89_WW][1][14] = -4,
- [1][0][RTW89_WW][2][14] = 52,
[1][0][RTW89_WW][0][15] = -4,
[1][0][RTW89_WW][1][15] = -4,
- [1][0][RTW89_WW][2][15] = 52,
[1][0][RTW89_WW][0][17] = -4,
[1][0][RTW89_WW][1][17] = -4,
- [1][0][RTW89_WW][2][17] = 52,
[1][0][RTW89_WW][0][19] = -4,
[1][0][RTW89_WW][1][19] = -4,
- [1][0][RTW89_WW][2][19] = 52,
[1][0][RTW89_WW][0][21] = -4,
[1][0][RTW89_WW][1][21] = -4,
- [1][0][RTW89_WW][2][21] = 52,
[1][0][RTW89_WW][0][23] = -4,
[1][0][RTW89_WW][1][23] = -4,
- [1][0][RTW89_WW][2][23] = 66,
[1][0][RTW89_WW][0][25] = -4,
[1][0][RTW89_WW][1][25] = -4,
- [1][0][RTW89_WW][2][25] = 66,
[1][0][RTW89_WW][0][27] = -4,
[1][0][RTW89_WW][1][27] = -4,
- [1][0][RTW89_WW][2][27] = 66,
[1][0][RTW89_WW][0][29] = -4,
[1][0][RTW89_WW][1][29] = -4,
- [1][0][RTW89_WW][2][29] = 66,
[1][0][RTW89_WW][0][30] = -4,
[1][0][RTW89_WW][1][30] = -4,
- [1][0][RTW89_WW][2][30] = 66,
[1][0][RTW89_WW][0][32] = -4,
[1][0][RTW89_WW][1][32] = -4,
- [1][0][RTW89_WW][2][32] = 66,
[1][0][RTW89_WW][0][34] = -4,
[1][0][RTW89_WW][1][34] = -4,
- [1][0][RTW89_WW][2][34] = 66,
[1][0][RTW89_WW][0][36] = -4,
[1][0][RTW89_WW][1][36] = -4,
- [1][0][RTW89_WW][2][36] = 66,
[1][0][RTW89_WW][0][38] = -4,
[1][0][RTW89_WW][1][38] = -4,
- [1][0][RTW89_WW][2][38] = 66,
[1][0][RTW89_WW][0][40] = -4,
[1][0][RTW89_WW][1][40] = -4,
- [1][0][RTW89_WW][2][40] = 66,
[1][0][RTW89_WW][0][42] = -4,
[1][0][RTW89_WW][1][42] = -4,
- [1][0][RTW89_WW][2][42] = 66,
[1][0][RTW89_WW][0][44] = -4,
[1][0][RTW89_WW][1][44] = -4,
- [1][0][RTW89_WW][2][44] = 66,
[1][0][RTW89_WW][0][45] = -4,
[1][0][RTW89_WW][1][45] = -4,
- [1][0][RTW89_WW][2][45] = 68,
[1][0][RTW89_WW][0][47] = -4,
[1][0][RTW89_WW][1][47] = -4,
- [1][0][RTW89_WW][2][47] = 68,
[1][0][RTW89_WW][0][49] = -4,
[1][0][RTW89_WW][1][49] = -4,
- [1][0][RTW89_WW][2][49] = 68,
[1][0][RTW89_WW][0][51] = -4,
[1][0][RTW89_WW][1][51] = -4,
- [1][0][RTW89_WW][2][51] = 68,
[1][0][RTW89_WW][0][53] = -4,
[1][0][RTW89_WW][1][53] = -4,
- [1][0][RTW89_WW][2][53] = 68,
[1][0][RTW89_WW][0][55] = -4,
[1][0][RTW89_WW][1][55] = -4,
- [1][0][RTW89_WW][2][55] = 68,
[1][0][RTW89_WW][0][57] = -4,
[1][0][RTW89_WW][1][57] = -4,
- [1][0][RTW89_WW][2][57] = 68,
[1][0][RTW89_WW][0][59] = -4,
[1][0][RTW89_WW][1][59] = -4,
- [1][0][RTW89_WW][2][59] = 68,
[1][0][RTW89_WW][0][60] = -4,
[1][0][RTW89_WW][1][60] = -4,
- [1][0][RTW89_WW][2][60] = 68,
[1][0][RTW89_WW][0][62] = -4,
[1][0][RTW89_WW][1][62] = -4,
- [1][0][RTW89_WW][2][62] = 68,
[1][0][RTW89_WW][0][64] = -4,
[1][0][RTW89_WW][1][64] = -4,
- [1][0][RTW89_WW][2][64] = 68,
[1][0][RTW89_WW][0][66] = -4,
[1][0][RTW89_WW][1][66] = -4,
- [1][0][RTW89_WW][2][66] = 68,
[1][0][RTW89_WW][0][68] = -4,
[1][0][RTW89_WW][1][68] = -4,
- [1][0][RTW89_WW][2][68] = 68,
[1][0][RTW89_WW][0][70] = -4,
[1][0][RTW89_WW][1][70] = -4,
- [1][0][RTW89_WW][2][70] = 68,
[1][0][RTW89_WW][0][72] = -4,
[1][0][RTW89_WW][1][72] = -4,
- [1][0][RTW89_WW][2][72] = 68,
[1][0][RTW89_WW][0][74] = -4,
[1][0][RTW89_WW][1][74] = -4,
- [1][0][RTW89_WW][2][74] = 68,
[1][0][RTW89_WW][0][75] = -4,
[1][0][RTW89_WW][1][75] = -4,
- [1][0][RTW89_WW][2][75] = 68,
[1][0][RTW89_WW][0][77] = -4,
[1][0][RTW89_WW][1][77] = -4,
- [1][0][RTW89_WW][2][77] = 68,
[1][0][RTW89_WW][0][79] = -4,
[1][0][RTW89_WW][1][79] = -4,
- [1][0][RTW89_WW][2][79] = 68,
[1][0][RTW89_WW][0][81] = -4,
[1][0][RTW89_WW][1][81] = -4,
- [1][0][RTW89_WW][2][81] = 68,
[1][0][RTW89_WW][0][83] = -4,
[1][0][RTW89_WW][1][83] = -4,
- [1][0][RTW89_WW][2][83] = 68,
[1][0][RTW89_WW][0][85] = -4,
[1][0][RTW89_WW][1][85] = -4,
- [1][0][RTW89_WW][2][85] = 68,
[1][0][RTW89_WW][0][87] = -4,
[1][0][RTW89_WW][1][87] = -4,
- [1][0][RTW89_WW][2][87] = 0,
[1][0][RTW89_WW][0][89] = -4,
[1][0][RTW89_WW][1][89] = -4,
- [1][0][RTW89_WW][2][89] = 0,
[1][0][RTW89_WW][0][90] = -4,
[1][0][RTW89_WW][1][90] = -4,
- [1][0][RTW89_WW][2][90] = 0,
[1][0][RTW89_WW][0][92] = -4,
[1][0][RTW89_WW][1][92] = -4,
- [1][0][RTW89_WW][2][92] = 0,
[1][0][RTW89_WW][0][94] = -4,
[1][0][RTW89_WW][1][94] = -4,
- [1][0][RTW89_WW][2][94] = 0,
[1][0][RTW89_WW][0][96] = -4,
[1][0][RTW89_WW][1][96] = -4,
- [1][0][RTW89_WW][2][96] = 0,
[1][0][RTW89_WW][0][98] = -4,
[1][0][RTW89_WW][1][98] = -4,
- [1][0][RTW89_WW][2][98] = 0,
[1][0][RTW89_WW][0][100] = -4,
[1][0][RTW89_WW][1][100] = -4,
- [1][0][RTW89_WW][2][100] = 0,
[1][0][RTW89_WW][0][102] = -4,
[1][0][RTW89_WW][1][102] = -4,
- [1][0][RTW89_WW][2][102] = 0,
[1][0][RTW89_WW][0][104] = -4,
[1][0][RTW89_WW][1][104] = -4,
- [1][0][RTW89_WW][2][104] = 0,
[1][0][RTW89_WW][0][105] = -4,
[1][0][RTW89_WW][1][105] = -4,
- [1][0][RTW89_WW][2][105] = 0,
[1][0][RTW89_WW][0][107] = -2,
[1][0][RTW89_WW][1][107] = -2,
- [1][0][RTW89_WW][2][107] = 0,
[1][0][RTW89_WW][0][109] = 2,
[1][0][RTW89_WW][1][109] = 2,
- [1][0][RTW89_WW][2][109] = 0,
[1][0][RTW89_WW][0][111] = 0,
[1][0][RTW89_WW][1][111] = 0,
- [1][0][RTW89_WW][2][111] = 0,
[1][0][RTW89_WW][0][113] = 0,
[1][0][RTW89_WW][1][113] = 0,
- [1][0][RTW89_WW][2][113] = 0,
[1][0][RTW89_WW][0][115] = 0,
[1][0][RTW89_WW][1][115] = 0,
- [1][0][RTW89_WW][2][115] = 0,
[1][0][RTW89_WW][0][117] = 0,
[1][0][RTW89_WW][1][117] = 0,
- [1][0][RTW89_WW][2][117] = 0,
[1][0][RTW89_WW][0][119] = 0,
[1][0][RTW89_WW][1][119] = 0,
- [1][0][RTW89_WW][2][119] = 0,
[1][1][RTW89_WW][0][0] = -26,
[1][1][RTW89_WW][1][0] = -26,
- [1][1][RTW89_WW][2][0] = 44,
[1][1][RTW89_WW][0][2] = -28,
[1][1][RTW89_WW][1][2] = -28,
- [1][1][RTW89_WW][2][2] = 44,
[1][1][RTW89_WW][0][4] = -28,
[1][1][RTW89_WW][1][4] = -28,
- [1][1][RTW89_WW][2][4] = 44,
[1][1][RTW89_WW][0][6] = -28,
[1][1][RTW89_WW][1][6] = -28,
- [1][1][RTW89_WW][2][6] = 44,
[1][1][RTW89_WW][0][8] = -28,
[1][1][RTW89_WW][1][8] = -28,
- [1][1][RTW89_WW][2][8] = 44,
[1][1][RTW89_WW][0][10] = -28,
[1][1][RTW89_WW][1][10] = -28,
- [1][1][RTW89_WW][2][10] = 44,
[1][1][RTW89_WW][0][12] = -28,
[1][1][RTW89_WW][1][12] = -28,
- [1][1][RTW89_WW][2][12] = 44,
[1][1][RTW89_WW][0][14] = -28,
[1][1][RTW89_WW][1][14] = -28,
- [1][1][RTW89_WW][2][14] = 44,
[1][1][RTW89_WW][0][15] = -28,
[1][1][RTW89_WW][1][15] = -28,
- [1][1][RTW89_WW][2][15] = 44,
[1][1][RTW89_WW][0][17] = -28,
[1][1][RTW89_WW][1][17] = -28,
- [1][1][RTW89_WW][2][17] = 44,
[1][1][RTW89_WW][0][19] = -28,
[1][1][RTW89_WW][1][19] = -28,
- [1][1][RTW89_WW][2][19] = 44,
[1][1][RTW89_WW][0][21] = -28,
[1][1][RTW89_WW][1][21] = -28,
- [1][1][RTW89_WW][2][21] = 44,
[1][1][RTW89_WW][0][23] = -28,
[1][1][RTW89_WW][1][23] = -28,
- [1][1][RTW89_WW][2][23] = 44,
[1][1][RTW89_WW][0][25] = -28,
[1][1][RTW89_WW][1][25] = -28,
- [1][1][RTW89_WW][2][25] = 44,
[1][1][RTW89_WW][0][27] = -28,
[1][1][RTW89_WW][1][27] = -28,
- [1][1][RTW89_WW][2][27] = 44,
[1][1][RTW89_WW][0][29] = -28,
[1][1][RTW89_WW][1][29] = -28,
- [1][1][RTW89_WW][2][29] = 44,
[1][1][RTW89_WW][0][30] = -28,
[1][1][RTW89_WW][1][30] = -28,
- [1][1][RTW89_WW][2][30] = 44,
[1][1][RTW89_WW][0][32] = -28,
[1][1][RTW89_WW][1][32] = -28,
- [1][1][RTW89_WW][2][32] = 44,
[1][1][RTW89_WW][0][34] = -28,
[1][1][RTW89_WW][1][34] = -28,
- [1][1][RTW89_WW][2][34] = 44,
[1][1][RTW89_WW][0][36] = -28,
[1][1][RTW89_WW][1][36] = -28,
- [1][1][RTW89_WW][2][36] = 44,
[1][1][RTW89_WW][0][38] = -28,
[1][1][RTW89_WW][1][38] = -28,
- [1][1][RTW89_WW][2][38] = 44,
[1][1][RTW89_WW][0][40] = -28,
[1][1][RTW89_WW][1][40] = -28,
- [1][1][RTW89_WW][2][40] = 44,
[1][1][RTW89_WW][0][42] = -28,
[1][1][RTW89_WW][1][42] = -28,
- [1][1][RTW89_WW][2][42] = 44,
[1][1][RTW89_WW][0][44] = -28,
[1][1][RTW89_WW][1][44] = -28,
- [1][1][RTW89_WW][2][44] = 44,
[1][1][RTW89_WW][0][45] = -26,
[1][1][RTW89_WW][1][45] = -26,
- [1][1][RTW89_WW][2][45] = 44,
[1][1][RTW89_WW][0][47] = -28,
[1][1][RTW89_WW][1][47] = -28,
- [1][1][RTW89_WW][2][47] = 44,
[1][1][RTW89_WW][0][49] = -28,
[1][1][RTW89_WW][1][49] = -28,
- [1][1][RTW89_WW][2][49] = 44,
[1][1][RTW89_WW][0][51] = -28,
[1][1][RTW89_WW][1][51] = -28,
- [1][1][RTW89_WW][2][51] = 44,
[1][1][RTW89_WW][0][53] = -26,
[1][1][RTW89_WW][1][53] = -26,
- [1][1][RTW89_WW][2][53] = 44,
[1][1][RTW89_WW][0][55] = -28,
[1][1][RTW89_WW][1][55] = -28,
- [1][1][RTW89_WW][2][55] = 44,
[1][1][RTW89_WW][0][57] = -28,
[1][1][RTW89_WW][1][57] = -28,
- [1][1][RTW89_WW][2][57] = 44,
[1][1][RTW89_WW][0][59] = -28,
[1][1][RTW89_WW][1][59] = -28,
- [1][1][RTW89_WW][2][59] = 44,
[1][1][RTW89_WW][0][60] = -28,
[1][1][RTW89_WW][1][60] = -28,
- [1][1][RTW89_WW][2][60] = 44,
[1][1][RTW89_WW][0][62] = -28,
[1][1][RTW89_WW][1][62] = -28,
- [1][1][RTW89_WW][2][62] = 44,
[1][1][RTW89_WW][0][64] = -28,
[1][1][RTW89_WW][1][64] = -28,
- [1][1][RTW89_WW][2][64] = 44,
[1][1][RTW89_WW][0][66] = -28,
[1][1][RTW89_WW][1][66] = -28,
- [1][1][RTW89_WW][2][66] = 44,
[1][1][RTW89_WW][0][68] = -28,
[1][1][RTW89_WW][1][68] = -28,
- [1][1][RTW89_WW][2][68] = 44,
[1][1][RTW89_WW][0][70] = -26,
[1][1][RTW89_WW][1][70] = -26,
- [1][1][RTW89_WW][2][70] = 44,
[1][1][RTW89_WW][0][72] = -28,
[1][1][RTW89_WW][1][72] = -28,
- [1][1][RTW89_WW][2][72] = 44,
[1][1][RTW89_WW][0][74] = -28,
[1][1][RTW89_WW][1][74] = -28,
- [1][1][RTW89_WW][2][74] = 44,
[1][1][RTW89_WW][0][75] = -28,
[1][1][RTW89_WW][1][75] = -28,
- [1][1][RTW89_WW][2][75] = 44,
[1][1][RTW89_WW][0][77] = -28,
[1][1][RTW89_WW][1][77] = -28,
- [1][1][RTW89_WW][2][77] = 44,
[1][1][RTW89_WW][0][79] = -28,
[1][1][RTW89_WW][1][79] = -28,
- [1][1][RTW89_WW][2][79] = 44,
[1][1][RTW89_WW][0][81] = -28,
[1][1][RTW89_WW][1][81] = -28,
- [1][1][RTW89_WW][2][81] = 44,
[1][1][RTW89_WW][0][83] = -28,
[1][1][RTW89_WW][1][83] = -28,
- [1][1][RTW89_WW][2][83] = 44,
[1][1][RTW89_WW][0][85] = -28,
[1][1][RTW89_WW][1][85] = -28,
- [1][1][RTW89_WW][2][85] = 44,
[1][1][RTW89_WW][0][87] = -28,
[1][1][RTW89_WW][1][87] = -28,
- [1][1][RTW89_WW][2][87] = 0,
[1][1][RTW89_WW][0][89] = -26,
[1][1][RTW89_WW][1][89] = -26,
- [1][1][RTW89_WW][2][89] = 0,
[1][1][RTW89_WW][0][90] = -26,
[1][1][RTW89_WW][1][90] = -26,
- [1][1][RTW89_WW][2][90] = 0,
[1][1][RTW89_WW][0][92] = -26,
[1][1][RTW89_WW][1][92] = -26,
- [1][1][RTW89_WW][2][92] = 0,
[1][1][RTW89_WW][0][94] = -26,
[1][1][RTW89_WW][1][94] = -26,
- [1][1][RTW89_WW][2][94] = 0,
[1][1][RTW89_WW][0][96] = -26,
[1][1][RTW89_WW][1][96] = -26,
- [1][1][RTW89_WW][2][96] = 0,
[1][1][RTW89_WW][0][98] = -26,
[1][1][RTW89_WW][1][98] = -26,
- [1][1][RTW89_WW][2][98] = 0,
[1][1][RTW89_WW][0][100] = -26,
[1][1][RTW89_WW][1][100] = -26,
- [1][1][RTW89_WW][2][100] = 0,
[1][1][RTW89_WW][0][102] = -26,
[1][1][RTW89_WW][1][102] = -26,
- [1][1][RTW89_WW][2][102] = 0,
[1][1][RTW89_WW][0][104] = -26,
[1][1][RTW89_WW][1][104] = -26,
- [1][1][RTW89_WW][2][104] = 0,
[1][1][RTW89_WW][0][105] = -26,
[1][1][RTW89_WW][1][105] = -26,
- [1][1][RTW89_WW][2][105] = 0,
[1][1][RTW89_WW][0][107] = -22,
[1][1][RTW89_WW][1][107] = -22,
- [1][1][RTW89_WW][2][107] = 0,
[1][1][RTW89_WW][0][109] = -22,
[1][1][RTW89_WW][1][109] = -22,
- [1][1][RTW89_WW][2][109] = 0,
[1][1][RTW89_WW][0][111] = 0,
[1][1][RTW89_WW][1][111] = 0,
- [1][1][RTW89_WW][2][111] = 0,
[1][1][RTW89_WW][0][113] = 0,
[1][1][RTW89_WW][1][113] = 0,
- [1][1][RTW89_WW][2][113] = 0,
[1][1][RTW89_WW][0][115] = 0,
[1][1][RTW89_WW][1][115] = 0,
- [1][1][RTW89_WW][2][115] = 0,
[1][1][RTW89_WW][0][117] = 0,
[1][1][RTW89_WW][1][117] = 0,
- [1][1][RTW89_WW][2][117] = 0,
[1][1][RTW89_WW][0][119] = 0,
[1][1][RTW89_WW][1][119] = 0,
- [1][1][RTW89_WW][2][119] = 0,
[2][0][RTW89_WW][0][0] = -2,
[2][0][RTW89_WW][1][0] = -2,
- [2][0][RTW89_WW][2][0] = 60,
[2][0][RTW89_WW][0][2] = -2,
[2][0][RTW89_WW][1][2] = -2,
- [2][0][RTW89_WW][2][2] = 60,
[2][0][RTW89_WW][0][4] = -2,
[2][0][RTW89_WW][1][4] = -2,
- [2][0][RTW89_WW][2][4] = 60,
[2][0][RTW89_WW][0][6] = -2,
[2][0][RTW89_WW][1][6] = -2,
- [2][0][RTW89_WW][2][6] = 60,
[2][0][RTW89_WW][0][8] = -2,
[2][0][RTW89_WW][1][8] = -2,
- [2][0][RTW89_WW][2][8] = 60,
[2][0][RTW89_WW][0][10] = -2,
[2][0][RTW89_WW][1][10] = -2,
- [2][0][RTW89_WW][2][10] = 60,
[2][0][RTW89_WW][0][12] = -2,
[2][0][RTW89_WW][1][12] = -2,
- [2][0][RTW89_WW][2][12] = 60,
[2][0][RTW89_WW][0][14] = -2,
[2][0][RTW89_WW][1][14] = -2,
- [2][0][RTW89_WW][2][14] = 60,
[2][0][RTW89_WW][0][15] = -2,
[2][0][RTW89_WW][1][15] = -2,
- [2][0][RTW89_WW][2][15] = 60,
[2][0][RTW89_WW][0][17] = -2,
[2][0][RTW89_WW][1][17] = -2,
- [2][0][RTW89_WW][2][17] = 60,
[2][0][RTW89_WW][0][19] = -2,
[2][0][RTW89_WW][1][19] = -2,
- [2][0][RTW89_WW][2][19] = 60,
[2][0][RTW89_WW][0][21] = -2,
[2][0][RTW89_WW][1][21] = -2,
- [2][0][RTW89_WW][2][21] = 60,
[2][0][RTW89_WW][0][23] = -2,
[2][0][RTW89_WW][1][23] = -2,
- [2][0][RTW89_WW][2][23] = 70,
[2][0][RTW89_WW][0][25] = -2,
[2][0][RTW89_WW][1][25] = -2,
- [2][0][RTW89_WW][2][25] = 70,
[2][0][RTW89_WW][0][27] = -2,
[2][0][RTW89_WW][1][27] = -2,
- [2][0][RTW89_WW][2][27] = 70,
[2][0][RTW89_WW][0][29] = -2,
[2][0][RTW89_WW][1][29] = -2,
- [2][0][RTW89_WW][2][29] = 70,
[2][0][RTW89_WW][0][30] = -2,
[2][0][RTW89_WW][1][30] = -2,
- [2][0][RTW89_WW][2][30] = 70,
[2][0][RTW89_WW][0][32] = -2,
[2][0][RTW89_WW][1][32] = -2,
- [2][0][RTW89_WW][2][32] = 70,
[2][0][RTW89_WW][0][34] = -2,
[2][0][RTW89_WW][1][34] = -2,
- [2][0][RTW89_WW][2][34] = 70,
[2][0][RTW89_WW][0][36] = -2,
[2][0][RTW89_WW][1][36] = -2,
- [2][0][RTW89_WW][2][36] = 70,
[2][0][RTW89_WW][0][38] = -2,
[2][0][RTW89_WW][1][38] = -2,
- [2][0][RTW89_WW][2][38] = 70,
[2][0][RTW89_WW][0][40] = -2,
[2][0][RTW89_WW][1][40] = -2,
- [2][0][RTW89_WW][2][40] = 70,
[2][0][RTW89_WW][0][42] = -2,
[2][0][RTW89_WW][1][42] = -2,
- [2][0][RTW89_WW][2][42] = 70,
[2][0][RTW89_WW][0][44] = -2,
[2][0][RTW89_WW][1][44] = -2,
- [2][0][RTW89_WW][2][44] = 70,
[2][0][RTW89_WW][0][45] = -2,
[2][0][RTW89_WW][1][45] = -2,
- [2][0][RTW89_WW][2][45] = 70,
[2][0][RTW89_WW][0][47] = -2,
[2][0][RTW89_WW][1][47] = -2,
- [2][0][RTW89_WW][2][47] = 70,
[2][0][RTW89_WW][0][49] = -2,
[2][0][RTW89_WW][1][49] = -2,
- [2][0][RTW89_WW][2][49] = 70,
[2][0][RTW89_WW][0][51] = -2,
[2][0][RTW89_WW][1][51] = -2,
- [2][0][RTW89_WW][2][51] = 70,
[2][0][RTW89_WW][0][53] = -2,
[2][0][RTW89_WW][1][53] = -2,
- [2][0][RTW89_WW][2][53] = 70,
[2][0][RTW89_WW][0][55] = -2,
[2][0][RTW89_WW][1][55] = -2,
- [2][0][RTW89_WW][2][55] = 68,
[2][0][RTW89_WW][0][57] = -2,
[2][0][RTW89_WW][1][57] = -2,
- [2][0][RTW89_WW][2][57] = 68,
[2][0][RTW89_WW][0][59] = -2,
[2][0][RTW89_WW][1][59] = -2,
- [2][0][RTW89_WW][2][59] = 68,
[2][0][RTW89_WW][0][60] = -2,
[2][0][RTW89_WW][1][60] = -2,
- [2][0][RTW89_WW][2][60] = 68,
[2][0][RTW89_WW][0][62] = -2,
[2][0][RTW89_WW][1][62] = -2,
- [2][0][RTW89_WW][2][62] = 68,
[2][0][RTW89_WW][0][64] = -2,
[2][0][RTW89_WW][1][64] = -2,
- [2][0][RTW89_WW][2][64] = 68,
[2][0][RTW89_WW][0][66] = -2,
[2][0][RTW89_WW][1][66] = -2,
- [2][0][RTW89_WW][2][66] = 68,
[2][0][RTW89_WW][0][68] = -2,
[2][0][RTW89_WW][1][68] = -2,
- [2][0][RTW89_WW][2][68] = 68,
[2][0][RTW89_WW][0][70] = -2,
[2][0][RTW89_WW][1][70] = -2,
- [2][0][RTW89_WW][2][70] = 68,
[2][0][RTW89_WW][0][72] = -2,
[2][0][RTW89_WW][1][72] = -2,
- [2][0][RTW89_WW][2][72] = 68,
[2][0][RTW89_WW][0][74] = -2,
[2][0][RTW89_WW][1][74] = -2,
- [2][0][RTW89_WW][2][74] = 68,
[2][0][RTW89_WW][0][75] = -2,
[2][0][RTW89_WW][1][75] = -2,
- [2][0][RTW89_WW][2][75] = 68,
[2][0][RTW89_WW][0][77] = -2,
[2][0][RTW89_WW][1][77] = -2,
- [2][0][RTW89_WW][2][77] = 68,
[2][0][RTW89_WW][0][79] = -2,
[2][0][RTW89_WW][1][79] = -2,
- [2][0][RTW89_WW][2][79] = 68,
[2][0][RTW89_WW][0][81] = -2,
[2][0][RTW89_WW][1][81] = -2,
- [2][0][RTW89_WW][2][81] = 68,
[2][0][RTW89_WW][0][83] = -2,
[2][0][RTW89_WW][1][83] = -2,
- [2][0][RTW89_WW][2][83] = 68,
[2][0][RTW89_WW][0][85] = -2,
[2][0][RTW89_WW][1][85] = -2,
- [2][0][RTW89_WW][2][85] = 68,
[2][0][RTW89_WW][0][87] = -2,
[2][0][RTW89_WW][1][87] = -2,
- [2][0][RTW89_WW][2][87] = 0,
[2][0][RTW89_WW][0][89] = -2,
[2][0][RTW89_WW][1][89] = -2,
- [2][0][RTW89_WW][2][89] = 0,
[2][0][RTW89_WW][0][90] = -2,
[2][0][RTW89_WW][1][90] = -2,
- [2][0][RTW89_WW][2][90] = 0,
[2][0][RTW89_WW][0][92] = -2,
[2][0][RTW89_WW][1][92] = -2,
- [2][0][RTW89_WW][2][92] = 0,
[2][0][RTW89_WW][0][94] = -2,
[2][0][RTW89_WW][1][94] = -2,
- [2][0][RTW89_WW][2][94] = 0,
[2][0][RTW89_WW][0][96] = -2,
[2][0][RTW89_WW][1][96] = -2,
- [2][0][RTW89_WW][2][96] = 0,
[2][0][RTW89_WW][0][98] = -2,
[2][0][RTW89_WW][1][98] = -2,
- [2][0][RTW89_WW][2][98] = 0,
[2][0][RTW89_WW][0][100] = -2,
[2][0][RTW89_WW][1][100] = -2,
- [2][0][RTW89_WW][2][100] = 0,
[2][0][RTW89_WW][0][102] = -2,
[2][0][RTW89_WW][1][102] = -2,
- [2][0][RTW89_WW][2][102] = 0,
[2][0][RTW89_WW][0][104] = -2,
[2][0][RTW89_WW][1][104] = -2,
- [2][0][RTW89_WW][2][104] = 0,
[2][0][RTW89_WW][0][105] = -2,
[2][0][RTW89_WW][1][105] = -2,
- [2][0][RTW89_WW][2][105] = 0,
[2][0][RTW89_WW][0][107] = -2,
[2][0][RTW89_WW][1][107] = -2,
- [2][0][RTW89_WW][2][107] = 0,
[2][0][RTW89_WW][0][109] = 12,
[2][0][RTW89_WW][1][109] = 12,
- [2][0][RTW89_WW][2][109] = 0,
[2][0][RTW89_WW][0][111] = 0,
[2][0][RTW89_WW][1][111] = 0,
- [2][0][RTW89_WW][2][111] = 0,
[2][0][RTW89_WW][0][113] = 0,
[2][0][RTW89_WW][1][113] = 0,
- [2][0][RTW89_WW][2][113] = 0,
[2][0][RTW89_WW][0][115] = 0,
[2][0][RTW89_WW][1][115] = 0,
- [2][0][RTW89_WW][2][115] = 0,
[2][0][RTW89_WW][0][117] = 0,
[2][0][RTW89_WW][1][117] = 0,
- [2][0][RTW89_WW][2][117] = 0,
[2][0][RTW89_WW][0][119] = 0,
[2][0][RTW89_WW][1][119] = 0,
- [2][0][RTW89_WW][2][119] = 0,
[2][1][RTW89_WW][0][0] = -16,
[2][1][RTW89_WW][1][0] = -16,
- [2][1][RTW89_WW][2][0] = 54,
[2][1][RTW89_WW][0][2] = -16,
[2][1][RTW89_WW][1][2] = -16,
- [2][1][RTW89_WW][2][2] = 54,
[2][1][RTW89_WW][0][4] = -16,
[2][1][RTW89_WW][1][4] = -16,
- [2][1][RTW89_WW][2][4] = 54,
[2][1][RTW89_WW][0][6] = -16,
[2][1][RTW89_WW][1][6] = -16,
- [2][1][RTW89_WW][2][6] = 54,
[2][1][RTW89_WW][0][8] = -16,
[2][1][RTW89_WW][1][8] = -16,
- [2][1][RTW89_WW][2][8] = 54,
[2][1][RTW89_WW][0][10] = -16,
[2][1][RTW89_WW][1][10] = -16,
- [2][1][RTW89_WW][2][10] = 54,
[2][1][RTW89_WW][0][12] = -16,
[2][1][RTW89_WW][1][12] = -16,
- [2][1][RTW89_WW][2][12] = 54,
[2][1][RTW89_WW][0][14] = -16,
[2][1][RTW89_WW][1][14] = -16,
- [2][1][RTW89_WW][2][14] = 54,
[2][1][RTW89_WW][0][15] = -16,
[2][1][RTW89_WW][1][15] = -16,
- [2][1][RTW89_WW][2][15] = 54,
[2][1][RTW89_WW][0][17] = -16,
[2][1][RTW89_WW][1][17] = -16,
- [2][1][RTW89_WW][2][17] = 54,
[2][1][RTW89_WW][0][19] = -16,
[2][1][RTW89_WW][1][19] = -16,
- [2][1][RTW89_WW][2][19] = 54,
[2][1][RTW89_WW][0][21] = -16,
[2][1][RTW89_WW][1][21] = -16,
- [2][1][RTW89_WW][2][21] = 54,
[2][1][RTW89_WW][0][23] = -16,
[2][1][RTW89_WW][1][23] = -16,
- [2][1][RTW89_WW][2][23] = 54,
[2][1][RTW89_WW][0][25] = -16,
[2][1][RTW89_WW][1][25] = -16,
- [2][1][RTW89_WW][2][25] = 54,
[2][1][RTW89_WW][0][27] = -16,
[2][1][RTW89_WW][1][27] = -16,
- [2][1][RTW89_WW][2][27] = 54,
[2][1][RTW89_WW][0][29] = -16,
[2][1][RTW89_WW][1][29] = -16,
- [2][1][RTW89_WW][2][29] = 54,
[2][1][RTW89_WW][0][30] = -16,
[2][1][RTW89_WW][1][30] = -16,
- [2][1][RTW89_WW][2][30] = 54,
[2][1][RTW89_WW][0][32] = -16,
[2][1][RTW89_WW][1][32] = -16,
- [2][1][RTW89_WW][2][32] = 54,
[2][1][RTW89_WW][0][34] = -16,
[2][1][RTW89_WW][1][34] = -16,
- [2][1][RTW89_WW][2][34] = 54,
[2][1][RTW89_WW][0][36] = -16,
[2][1][RTW89_WW][1][36] = -16,
- [2][1][RTW89_WW][2][36] = 54,
[2][1][RTW89_WW][0][38] = -16,
[2][1][RTW89_WW][1][38] = -16,
- [2][1][RTW89_WW][2][38] = 54,
[2][1][RTW89_WW][0][40] = -16,
[2][1][RTW89_WW][1][40] = -16,
- [2][1][RTW89_WW][2][40] = 54,
[2][1][RTW89_WW][0][42] = -16,
[2][1][RTW89_WW][1][42] = -16,
- [2][1][RTW89_WW][2][42] = 54,
[2][1][RTW89_WW][0][44] = -16,
[2][1][RTW89_WW][1][44] = -16,
- [2][1][RTW89_WW][2][44] = 54,
[2][1][RTW89_WW][0][45] = -16,
[2][1][RTW89_WW][1][45] = -16,
- [2][1][RTW89_WW][2][45] = 56,
[2][1][RTW89_WW][0][47] = -16,
[2][1][RTW89_WW][1][47] = -16,
- [2][1][RTW89_WW][2][47] = 56,
[2][1][RTW89_WW][0][49] = -16,
[2][1][RTW89_WW][1][49] = -16,
- [2][1][RTW89_WW][2][49] = 56,
[2][1][RTW89_WW][0][51] = -16,
[2][1][RTW89_WW][1][51] = -16,
- [2][1][RTW89_WW][2][51] = 56,
[2][1][RTW89_WW][0][53] = -16,
[2][1][RTW89_WW][1][53] = -16,
- [2][1][RTW89_WW][2][53] = 56,
[2][1][RTW89_WW][0][55] = -16,
[2][1][RTW89_WW][1][55] = -16,
- [2][1][RTW89_WW][2][55] = 54,
[2][1][RTW89_WW][0][57] = -16,
[2][1][RTW89_WW][1][57] = -16,
- [2][1][RTW89_WW][2][57] = 54,
[2][1][RTW89_WW][0][59] = -16,
[2][1][RTW89_WW][1][59] = -16,
- [2][1][RTW89_WW][2][59] = 54,
[2][1][RTW89_WW][0][60] = -16,
[2][1][RTW89_WW][1][60] = -16,
- [2][1][RTW89_WW][2][60] = 54,
[2][1][RTW89_WW][0][62] = -16,
[2][1][RTW89_WW][1][62] = -16,
- [2][1][RTW89_WW][2][62] = 54,
[2][1][RTW89_WW][0][64] = -16,
[2][1][RTW89_WW][1][64] = -16,
- [2][1][RTW89_WW][2][64] = 54,
[2][1][RTW89_WW][0][66] = -16,
[2][1][RTW89_WW][1][66] = -16,
- [2][1][RTW89_WW][2][66] = 54,
[2][1][RTW89_WW][0][68] = -16,
[2][1][RTW89_WW][1][68] = -16,
- [2][1][RTW89_WW][2][68] = 54,
[2][1][RTW89_WW][0][70] = -16,
[2][1][RTW89_WW][1][70] = -16,
- [2][1][RTW89_WW][2][70] = 56,
[2][1][RTW89_WW][0][72] = -16,
[2][1][RTW89_WW][1][72] = -16,
- [2][1][RTW89_WW][2][72] = 56,
[2][1][RTW89_WW][0][74] = -16,
[2][1][RTW89_WW][1][74] = -16,
- [2][1][RTW89_WW][2][74] = 56,
[2][1][RTW89_WW][0][75] = -16,
[2][1][RTW89_WW][1][75] = -16,
- [2][1][RTW89_WW][2][75] = 56,
[2][1][RTW89_WW][0][77] = -16,
[2][1][RTW89_WW][1][77] = -16,
- [2][1][RTW89_WW][2][77] = 56,
[2][1][RTW89_WW][0][79] = -16,
[2][1][RTW89_WW][1][79] = -16,
- [2][1][RTW89_WW][2][79] = 56,
[2][1][RTW89_WW][0][81] = -16,
[2][1][RTW89_WW][1][81] = -16,
- [2][1][RTW89_WW][2][81] = 56,
[2][1][RTW89_WW][0][83] = -16,
[2][1][RTW89_WW][1][83] = -16,
- [2][1][RTW89_WW][2][83] = 56,
[2][1][RTW89_WW][0][85] = -18,
[2][1][RTW89_WW][1][85] = -18,
- [2][1][RTW89_WW][2][85] = 56,
[2][1][RTW89_WW][0][87] = -16,
[2][1][RTW89_WW][1][87] = -16,
- [2][1][RTW89_WW][2][87] = 0,
[2][1][RTW89_WW][0][89] = -16,
[2][1][RTW89_WW][1][89] = -16,
- [2][1][RTW89_WW][2][89] = 0,
[2][1][RTW89_WW][0][90] = -16,
[2][1][RTW89_WW][1][90] = -16,
- [2][1][RTW89_WW][2][90] = 0,
[2][1][RTW89_WW][0][92] = -16,
[2][1][RTW89_WW][1][92] = -16,
- [2][1][RTW89_WW][2][92] = 0,
[2][1][RTW89_WW][0][94] = -16,
[2][1][RTW89_WW][1][94] = -16,
- [2][1][RTW89_WW][2][94] = 0,
[2][1][RTW89_WW][0][96] = -16,
[2][1][RTW89_WW][1][96] = -16,
- [2][1][RTW89_WW][2][96] = 0,
[2][1][RTW89_WW][0][98] = -16,
[2][1][RTW89_WW][1][98] = -16,
- [2][1][RTW89_WW][2][98] = 0,
[2][1][RTW89_WW][0][100] = -16,
[2][1][RTW89_WW][1][100] = -16,
- [2][1][RTW89_WW][2][100] = 0,
[2][1][RTW89_WW][0][102] = -16,
[2][1][RTW89_WW][1][102] = -16,
- [2][1][RTW89_WW][2][102] = 0,
[2][1][RTW89_WW][0][104] = -16,
[2][1][RTW89_WW][1][104] = -16,
- [2][1][RTW89_WW][2][104] = 0,
[2][1][RTW89_WW][0][105] = -16,
[2][1][RTW89_WW][1][105] = -16,
- [2][1][RTW89_WW][2][105] = 0,
[2][1][RTW89_WW][0][107] = -14,
[2][1][RTW89_WW][1][107] = -14,
- [2][1][RTW89_WW][2][107] = 0,
[2][1][RTW89_WW][0][109] = -10,
[2][1][RTW89_WW][1][109] = -10,
- [2][1][RTW89_WW][2][109] = 0,
[2][1][RTW89_WW][0][111] = 0,
[2][1][RTW89_WW][1][111] = 0,
- [2][1][RTW89_WW][2][111] = 0,
[2][1][RTW89_WW][0][113] = 0,
[2][1][RTW89_WW][1][113] = 0,
- [2][1][RTW89_WW][2][113] = 0,
[2][1][RTW89_WW][0][115] = 0,
[2][1][RTW89_WW][1][115] = 0,
- [2][1][RTW89_WW][2][115] = 0,
[2][1][RTW89_WW][0][117] = 0,
[2][1][RTW89_WW][1][117] = 0,
- [2][1][RTW89_WW][2][117] = 0,
[2][1][RTW89_WW][0][119] = 0,
[2][1][RTW89_WW][1][119] = 0,
- [2][1][RTW89_WW][2][119] = 0,
[0][0][RTW89_FCC][1][0] = -16,
- [0][0][RTW89_FCC][2][0] = 44,
[0][0][RTW89_ETSI][1][0] = 32,
[0][0][RTW89_ETSI][0][0] = -8,
[0][0][RTW89_MKK][1][0] = 30,
[0][0][RTW89_MKK][0][0] = -8,
[0][0][RTW89_IC][1][0] = -16,
- [0][0][RTW89_IC][2][0] = 44,
[0][0][RTW89_KCC][1][0] = -2,
[0][0][RTW89_KCC][0][0] = -2,
[0][0][RTW89_ACMA][1][0] = 32,
@@ -52408,13 +50558,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][0] = 30,
[0][0][RTW89_THAILAND][0][0] = -16,
[0][0][RTW89_FCC][1][2] = -18,
- [0][0][RTW89_FCC][2][2] = 44,
[0][0][RTW89_ETSI][1][2] = 32,
[0][0][RTW89_ETSI][0][2] = -8,
[0][0][RTW89_MKK][1][2] = 30,
[0][0][RTW89_MKK][0][2] = -8,
[0][0][RTW89_IC][1][2] = -18,
- [0][0][RTW89_IC][2][2] = 44,
[0][0][RTW89_KCC][1][2] = -2,
[0][0][RTW89_KCC][0][2] = -2,
[0][0][RTW89_ACMA][1][2] = 32,
@@ -52427,13 +50575,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][2] = 30,
[0][0][RTW89_THAILAND][0][2] = -18,
[0][0][RTW89_FCC][1][4] = -18,
- [0][0][RTW89_FCC][2][4] = 44,
[0][0][RTW89_ETSI][1][4] = 32,
[0][0][RTW89_ETSI][0][4] = -8,
[0][0][RTW89_MKK][1][4] = 30,
[0][0][RTW89_MKK][0][4] = -8,
[0][0][RTW89_IC][1][4] = -18,
- [0][0][RTW89_IC][2][4] = 44,
[0][0][RTW89_KCC][1][4] = -2,
[0][0][RTW89_KCC][0][4] = -2,
[0][0][RTW89_ACMA][1][4] = 32,
@@ -52446,13 +50592,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][4] = 30,
[0][0][RTW89_THAILAND][0][4] = -18,
[0][0][RTW89_FCC][1][6] = -18,
- [0][0][RTW89_FCC][2][6] = 44,
[0][0][RTW89_ETSI][1][6] = 32,
[0][0][RTW89_ETSI][0][6] = -8,
[0][0][RTW89_MKK][1][6] = 30,
[0][0][RTW89_MKK][0][6] = -8,
[0][0][RTW89_IC][1][6] = -18,
- [0][0][RTW89_IC][2][6] = 44,
[0][0][RTW89_KCC][1][6] = -2,
[0][0][RTW89_KCC][0][6] = -2,
[0][0][RTW89_ACMA][1][6] = 32,
@@ -52465,13 +50609,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][6] = 30,
[0][0][RTW89_THAILAND][0][6] = -18,
[0][0][RTW89_FCC][1][8] = -18,
- [0][0][RTW89_FCC][2][8] = 44,
[0][0][RTW89_ETSI][1][8] = 32,
[0][0][RTW89_ETSI][0][8] = -8,
[0][0][RTW89_MKK][1][8] = 30,
[0][0][RTW89_MKK][0][8] = -8,
[0][0][RTW89_IC][1][8] = -18,
- [0][0][RTW89_IC][2][8] = 44,
[0][0][RTW89_KCC][1][8] = -2,
[0][0][RTW89_KCC][0][8] = -2,
[0][0][RTW89_ACMA][1][8] = 32,
@@ -52484,13 +50626,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][8] = 30,
[0][0][RTW89_THAILAND][0][8] = -18,
[0][0][RTW89_FCC][1][10] = -18,
- [0][0][RTW89_FCC][2][10] = 44,
[0][0][RTW89_ETSI][1][10] = 32,
[0][0][RTW89_ETSI][0][10] = -8,
[0][0][RTW89_MKK][1][10] = 30,
[0][0][RTW89_MKK][0][10] = -8,
[0][0][RTW89_IC][1][10] = -18,
- [0][0][RTW89_IC][2][10] = 44,
[0][0][RTW89_KCC][1][10] = -2,
[0][0][RTW89_KCC][0][10] = -2,
[0][0][RTW89_ACMA][1][10] = 32,
@@ -52503,13 +50643,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][10] = 30,
[0][0][RTW89_THAILAND][0][10] = -18,
[0][0][RTW89_FCC][1][12] = -18,
- [0][0][RTW89_FCC][2][12] = 44,
[0][0][RTW89_ETSI][1][12] = 32,
[0][0][RTW89_ETSI][0][12] = -8,
[0][0][RTW89_MKK][1][12] = 30,
[0][0][RTW89_MKK][0][12] = -8,
[0][0][RTW89_IC][1][12] = -18,
- [0][0][RTW89_IC][2][12] = 44,
[0][0][RTW89_KCC][1][12] = -2,
[0][0][RTW89_KCC][0][12] = -2,
[0][0][RTW89_ACMA][1][12] = 32,
@@ -52522,13 +50660,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][12] = 30,
[0][0][RTW89_THAILAND][0][12] = -18,
[0][0][RTW89_FCC][1][14] = -18,
- [0][0][RTW89_FCC][2][14] = 44,
[0][0][RTW89_ETSI][1][14] = 32,
[0][0][RTW89_ETSI][0][14] = -8,
[0][0][RTW89_MKK][1][14] = 30,
[0][0][RTW89_MKK][0][14] = -8,
[0][0][RTW89_IC][1][14] = -18,
- [0][0][RTW89_IC][2][14] = 44,
[0][0][RTW89_KCC][1][14] = -2,
[0][0][RTW89_KCC][0][14] = -2,
[0][0][RTW89_ACMA][1][14] = 32,
@@ -52541,13 +50677,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][14] = 30,
[0][0][RTW89_THAILAND][0][14] = -18,
[0][0][RTW89_FCC][1][15] = -18,
- [0][0][RTW89_FCC][2][15] = 44,
[0][0][RTW89_ETSI][1][15] = 32,
[0][0][RTW89_ETSI][0][15] = -8,
[0][0][RTW89_MKK][1][15] = 30,
[0][0][RTW89_MKK][0][15] = -8,
[0][0][RTW89_IC][1][15] = -18,
- [0][0][RTW89_IC][2][15] = 44,
[0][0][RTW89_KCC][1][15] = -2,
[0][0][RTW89_KCC][0][15] = -2,
[0][0][RTW89_ACMA][1][15] = 32,
@@ -52560,13 +50694,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][15] = 30,
[0][0][RTW89_THAILAND][0][15] = -18,
[0][0][RTW89_FCC][1][17] = -18,
- [0][0][RTW89_FCC][2][17] = 44,
[0][0][RTW89_ETSI][1][17] = 32,
[0][0][RTW89_ETSI][0][17] = -8,
[0][0][RTW89_MKK][1][17] = 30,
[0][0][RTW89_MKK][0][17] = -8,
[0][0][RTW89_IC][1][17] = -18,
- [0][0][RTW89_IC][2][17] = 44,
[0][0][RTW89_KCC][1][17] = -2,
[0][0][RTW89_KCC][0][17] = -2,
[0][0][RTW89_ACMA][1][17] = 32,
@@ -52579,13 +50711,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][17] = 30,
[0][0][RTW89_THAILAND][0][17] = -18,
[0][0][RTW89_FCC][1][19] = -18,
- [0][0][RTW89_FCC][2][19] = 44,
[0][0][RTW89_ETSI][1][19] = 32,
[0][0][RTW89_ETSI][0][19] = -8,
[0][0][RTW89_MKK][1][19] = 30,
[0][0][RTW89_MKK][0][19] = -8,
[0][0][RTW89_IC][1][19] = -18,
- [0][0][RTW89_IC][2][19] = 44,
[0][0][RTW89_KCC][1][19] = -2,
[0][0][RTW89_KCC][0][19] = -2,
[0][0][RTW89_ACMA][1][19] = 32,
@@ -52598,13 +50728,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][19] = 30,
[0][0][RTW89_THAILAND][0][19] = -18,
[0][0][RTW89_FCC][1][21] = -18,
- [0][0][RTW89_FCC][2][21] = 44,
[0][0][RTW89_ETSI][1][21] = 32,
[0][0][RTW89_ETSI][0][21] = -8,
[0][0][RTW89_MKK][1][21] = 30,
[0][0][RTW89_MKK][0][21] = -8,
[0][0][RTW89_IC][1][21] = -18,
- [0][0][RTW89_IC][2][21] = 44,
[0][0][RTW89_KCC][1][21] = -2,
[0][0][RTW89_KCC][0][21] = -2,
[0][0][RTW89_ACMA][1][21] = 32,
@@ -52617,13 +50745,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][21] = 30,
[0][0][RTW89_THAILAND][0][21] = -18,
[0][0][RTW89_FCC][1][23] = -18,
- [0][0][RTW89_FCC][2][23] = 54,
[0][0][RTW89_ETSI][1][23] = 32,
[0][0][RTW89_ETSI][0][23] = -8,
[0][0][RTW89_MKK][1][23] = 30,
[0][0][RTW89_MKK][0][23] = -8,
[0][0][RTW89_IC][1][23] = -18,
- [0][0][RTW89_IC][2][23] = 54,
[0][0][RTW89_KCC][1][23] = -2,
[0][0][RTW89_KCC][0][23] = -2,
[0][0][RTW89_ACMA][1][23] = 32,
@@ -52636,13 +50762,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][23] = 30,
[0][0][RTW89_THAILAND][0][23] = -18,
[0][0][RTW89_FCC][1][25] = -18,
- [0][0][RTW89_FCC][2][25] = 54,
[0][0][RTW89_ETSI][1][25] = 32,
[0][0][RTW89_ETSI][0][25] = -8,
[0][0][RTW89_MKK][1][25] = 30,
[0][0][RTW89_MKK][0][25] = -8,
[0][0][RTW89_IC][1][25] = -18,
- [0][0][RTW89_IC][2][25] = 54,
[0][0][RTW89_KCC][1][25] = -2,
[0][0][RTW89_KCC][0][25] = -2,
[0][0][RTW89_ACMA][1][25] = 32,
@@ -52655,13 +50779,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][25] = 30,
[0][0][RTW89_THAILAND][0][25] = -18,
[0][0][RTW89_FCC][1][27] = -18,
- [0][0][RTW89_FCC][2][27] = 54,
[0][0][RTW89_ETSI][1][27] = 32,
[0][0][RTW89_ETSI][0][27] = -8,
[0][0][RTW89_MKK][1][27] = 30,
[0][0][RTW89_MKK][0][27] = -8,
[0][0][RTW89_IC][1][27] = -18,
- [0][0][RTW89_IC][2][27] = 54,
[0][0][RTW89_KCC][1][27] = -2,
[0][0][RTW89_KCC][0][27] = -2,
[0][0][RTW89_ACMA][1][27] = 32,
@@ -52674,13 +50796,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][27] = 30,
[0][0][RTW89_THAILAND][0][27] = -18,
[0][0][RTW89_FCC][1][29] = -18,
- [0][0][RTW89_FCC][2][29] = 54,
[0][0][RTW89_ETSI][1][29] = 32,
[0][0][RTW89_ETSI][0][29] = -8,
[0][0][RTW89_MKK][1][29] = 30,
[0][0][RTW89_MKK][0][29] = -8,
[0][0][RTW89_IC][1][29] = -18,
- [0][0][RTW89_IC][2][29] = 54,
[0][0][RTW89_KCC][1][29] = -2,
[0][0][RTW89_KCC][0][29] = -2,
[0][0][RTW89_ACMA][1][29] = 32,
@@ -52693,13 +50813,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][29] = 30,
[0][0][RTW89_THAILAND][0][29] = -18,
[0][0][RTW89_FCC][1][30] = -18,
- [0][0][RTW89_FCC][2][30] = 54,
[0][0][RTW89_ETSI][1][30] = 32,
[0][0][RTW89_ETSI][0][30] = -8,
[0][0][RTW89_MKK][1][30] = 30,
[0][0][RTW89_MKK][0][30] = -8,
[0][0][RTW89_IC][1][30] = -18,
- [0][0][RTW89_IC][2][30] = 54,
[0][0][RTW89_KCC][1][30] = -2,
[0][0][RTW89_KCC][0][30] = -2,
[0][0][RTW89_ACMA][1][30] = 32,
@@ -52712,13 +50830,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][30] = 30,
[0][0][RTW89_THAILAND][0][30] = -18,
[0][0][RTW89_FCC][1][32] = -18,
- [0][0][RTW89_FCC][2][32] = 54,
[0][0][RTW89_ETSI][1][32] = 32,
[0][0][RTW89_ETSI][0][32] = -8,
[0][0][RTW89_MKK][1][32] = 30,
[0][0][RTW89_MKK][0][32] = -8,
[0][0][RTW89_IC][1][32] = -18,
- [0][0][RTW89_IC][2][32] = 54,
[0][0][RTW89_KCC][1][32] = -2,
[0][0][RTW89_KCC][0][32] = -2,
[0][0][RTW89_ACMA][1][32] = 32,
@@ -52731,13 +50847,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][32] = 30,
[0][0][RTW89_THAILAND][0][32] = -18,
[0][0][RTW89_FCC][1][34] = -18,
- [0][0][RTW89_FCC][2][34] = 54,
[0][0][RTW89_ETSI][1][34] = 32,
[0][0][RTW89_ETSI][0][34] = -8,
[0][0][RTW89_MKK][1][34] = 30,
[0][0][RTW89_MKK][0][34] = -8,
[0][0][RTW89_IC][1][34] = -18,
- [0][0][RTW89_IC][2][34] = 54,
[0][0][RTW89_KCC][1][34] = -2,
[0][0][RTW89_KCC][0][34] = -2,
[0][0][RTW89_ACMA][1][34] = 32,
@@ -52750,13 +50864,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][34] = 30,
[0][0][RTW89_THAILAND][0][34] = -18,
[0][0][RTW89_FCC][1][36] = -18,
- [0][0][RTW89_FCC][2][36] = 54,
[0][0][RTW89_ETSI][1][36] = 32,
[0][0][RTW89_ETSI][0][36] = -8,
[0][0][RTW89_MKK][1][36] = 30,
[0][0][RTW89_MKK][0][36] = -8,
[0][0][RTW89_IC][1][36] = -18,
- [0][0][RTW89_IC][2][36] = 54,
[0][0][RTW89_KCC][1][36] = -2,
[0][0][RTW89_KCC][0][36] = -2,
[0][0][RTW89_ACMA][1][36] = 32,
@@ -52769,13 +50881,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][36] = 30,
[0][0][RTW89_THAILAND][0][36] = -18,
[0][0][RTW89_FCC][1][38] = -18,
- [0][0][RTW89_FCC][2][38] = 54,
[0][0][RTW89_ETSI][1][38] = 32,
[0][0][RTW89_ETSI][0][38] = -8,
[0][0][RTW89_MKK][1][38] = 30,
[0][0][RTW89_MKK][0][38] = -8,
[0][0][RTW89_IC][1][38] = -18,
- [0][0][RTW89_IC][2][38] = 54,
[0][0][RTW89_KCC][1][38] = -2,
[0][0][RTW89_KCC][0][38] = -2,
[0][0][RTW89_ACMA][1][38] = 32,
@@ -52788,13 +50898,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][38] = 30,
[0][0][RTW89_THAILAND][0][38] = -18,
[0][0][RTW89_FCC][1][40] = -18,
- [0][0][RTW89_FCC][2][40] = 54,
[0][0][RTW89_ETSI][1][40] = 32,
[0][0][RTW89_ETSI][0][40] = -8,
[0][0][RTW89_MKK][1][40] = 30,
[0][0][RTW89_MKK][0][40] = -8,
[0][0][RTW89_IC][1][40] = -18,
- [0][0][RTW89_IC][2][40] = 54,
[0][0][RTW89_KCC][1][40] = -2,
[0][0][RTW89_KCC][0][40] = -2,
[0][0][RTW89_ACMA][1][40] = 32,
@@ -52807,13 +50915,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][40] = 30,
[0][0][RTW89_THAILAND][0][40] = -18,
[0][0][RTW89_FCC][1][42] = -18,
- [0][0][RTW89_FCC][2][42] = 54,
[0][0][RTW89_ETSI][1][42] = 32,
[0][0][RTW89_ETSI][0][42] = -8,
[0][0][RTW89_MKK][1][42] = 30,
[0][0][RTW89_MKK][0][42] = -8,
[0][0][RTW89_IC][1][42] = -18,
- [0][0][RTW89_IC][2][42] = 54,
[0][0][RTW89_KCC][1][42] = -2,
[0][0][RTW89_KCC][0][42] = -2,
[0][0][RTW89_ACMA][1][42] = 32,
@@ -52826,13 +50932,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][42] = 30,
[0][0][RTW89_THAILAND][0][42] = -18,
[0][0][RTW89_FCC][1][44] = -16,
- [0][0][RTW89_FCC][2][44] = 56,
[0][0][RTW89_ETSI][1][44] = 32,
[0][0][RTW89_ETSI][0][44] = -6,
[0][0][RTW89_MKK][1][44] = 8,
[0][0][RTW89_MKK][0][44] = -10,
[0][0][RTW89_IC][1][44] = -16,
- [0][0][RTW89_IC][2][44] = 56,
[0][0][RTW89_KCC][1][44] = -2,
[0][0][RTW89_KCC][0][44] = -2,
[0][0][RTW89_ACMA][1][44] = 32,
@@ -52845,13 +50949,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][44] = 30,
[0][0][RTW89_THAILAND][0][44] = -16,
[0][0][RTW89_FCC][1][45] = -16,
- [0][0][RTW89_FCC][2][45] = 127,
[0][0][RTW89_ETSI][1][45] = 127,
[0][0][RTW89_ETSI][0][45] = 127,
[0][0][RTW89_MKK][1][45] = 127,
[0][0][RTW89_MKK][0][45] = 127,
[0][0][RTW89_IC][1][45] = -16,
- [0][0][RTW89_IC][2][45] = 56,
[0][0][RTW89_KCC][1][45] = -2,
[0][0][RTW89_KCC][0][45] = 127,
[0][0][RTW89_ACMA][1][45] = 127,
@@ -52864,13 +50966,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][45] = 127,
[0][0][RTW89_THAILAND][0][45] = 127,
[0][0][RTW89_FCC][1][47] = -18,
- [0][0][RTW89_FCC][2][47] = 127,
[0][0][RTW89_ETSI][1][47] = 127,
[0][0][RTW89_ETSI][0][47] = 127,
[0][0][RTW89_MKK][1][47] = 127,
[0][0][RTW89_MKK][0][47] = 127,
[0][0][RTW89_IC][1][47] = -18,
- [0][0][RTW89_IC][2][47] = 56,
[0][0][RTW89_KCC][1][47] = -2,
[0][0][RTW89_KCC][0][47] = 127,
[0][0][RTW89_ACMA][1][47] = 127,
@@ -52883,13 +50983,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][47] = 127,
[0][0][RTW89_THAILAND][0][47] = 127,
[0][0][RTW89_FCC][1][49] = -18,
- [0][0][RTW89_FCC][2][49] = 127,
[0][0][RTW89_ETSI][1][49] = 127,
[0][0][RTW89_ETSI][0][49] = 127,
[0][0][RTW89_MKK][1][49] = 127,
[0][0][RTW89_MKK][0][49] = 127,
[0][0][RTW89_IC][1][49] = -18,
- [0][0][RTW89_IC][2][49] = 56,
[0][0][RTW89_KCC][1][49] = -2,
[0][0][RTW89_KCC][0][49] = 127,
[0][0][RTW89_ACMA][1][49] = 127,
@@ -52902,13 +51000,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][49] = 127,
[0][0][RTW89_THAILAND][0][49] = 127,
[0][0][RTW89_FCC][1][51] = -18,
- [0][0][RTW89_FCC][2][51] = 127,
[0][0][RTW89_ETSI][1][51] = 127,
[0][0][RTW89_ETSI][0][51] = 127,
[0][0][RTW89_MKK][1][51] = 127,
[0][0][RTW89_MKK][0][51] = 127,
[0][0][RTW89_IC][1][51] = -18,
- [0][0][RTW89_IC][2][51] = 56,
[0][0][RTW89_KCC][1][51] = -2,
[0][0][RTW89_KCC][0][51] = 127,
[0][0][RTW89_ACMA][1][51] = 127,
@@ -52921,13 +51017,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][51] = 127,
[0][0][RTW89_THAILAND][0][51] = 127,
[0][0][RTW89_FCC][1][53] = -16,
- [0][0][RTW89_FCC][2][53] = 127,
[0][0][RTW89_ETSI][1][53] = 127,
[0][0][RTW89_ETSI][0][53] = 127,
[0][0][RTW89_MKK][1][53] = 127,
[0][0][RTW89_MKK][0][53] = 127,
[0][0][RTW89_IC][1][53] = -16,
- [0][0][RTW89_IC][2][53] = 56,
[0][0][RTW89_KCC][1][53] = -2,
[0][0][RTW89_KCC][0][53] = 127,
[0][0][RTW89_ACMA][1][53] = 127,
@@ -52940,13 +51034,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][53] = 127,
[0][0][RTW89_THAILAND][0][53] = 127,
[0][0][RTW89_FCC][1][55] = -18,
- [0][0][RTW89_FCC][2][55] = 56,
[0][0][RTW89_ETSI][1][55] = 127,
[0][0][RTW89_ETSI][0][55] = 127,
[0][0][RTW89_MKK][1][55] = 127,
[0][0][RTW89_MKK][0][55] = 127,
[0][0][RTW89_IC][1][55] = -18,
- [0][0][RTW89_IC][2][55] = 56,
[0][0][RTW89_KCC][1][55] = -2,
[0][0][RTW89_KCC][0][55] = 127,
[0][0][RTW89_ACMA][1][55] = 127,
@@ -52959,13 +51051,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][55] = 127,
[0][0][RTW89_THAILAND][0][55] = 127,
[0][0][RTW89_FCC][1][57] = -18,
- [0][0][RTW89_FCC][2][57] = 56,
[0][0][RTW89_ETSI][1][57] = 127,
[0][0][RTW89_ETSI][0][57] = 127,
[0][0][RTW89_MKK][1][57] = 127,
[0][0][RTW89_MKK][0][57] = 127,
[0][0][RTW89_IC][1][57] = -18,
- [0][0][RTW89_IC][2][57] = 56,
[0][0][RTW89_KCC][1][57] = -2,
[0][0][RTW89_KCC][0][57] = 127,
[0][0][RTW89_ACMA][1][57] = 127,
@@ -52978,13 +51068,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][57] = 127,
[0][0][RTW89_THAILAND][0][57] = 127,
[0][0][RTW89_FCC][1][59] = -18,
- [0][0][RTW89_FCC][2][59] = 56,
[0][0][RTW89_ETSI][1][59] = 127,
[0][0][RTW89_ETSI][0][59] = 127,
[0][0][RTW89_MKK][1][59] = 127,
[0][0][RTW89_MKK][0][59] = 127,
[0][0][RTW89_IC][1][59] = -18,
- [0][0][RTW89_IC][2][59] = 56,
[0][0][RTW89_KCC][1][59] = -2,
[0][0][RTW89_KCC][0][59] = 127,
[0][0][RTW89_ACMA][1][59] = 127,
@@ -52997,13 +51085,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][59] = 127,
[0][0][RTW89_THAILAND][0][59] = 127,
[0][0][RTW89_FCC][1][60] = -18,
- [0][0][RTW89_FCC][2][60] = 56,
[0][0][RTW89_ETSI][1][60] = 127,
[0][0][RTW89_ETSI][0][60] = 127,
[0][0][RTW89_MKK][1][60] = 127,
[0][0][RTW89_MKK][0][60] = 127,
[0][0][RTW89_IC][1][60] = -18,
- [0][0][RTW89_IC][2][60] = 56,
[0][0][RTW89_KCC][1][60] = -2,
[0][0][RTW89_KCC][0][60] = 127,
[0][0][RTW89_ACMA][1][60] = 127,
@@ -53016,13 +51102,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][60] = 127,
[0][0][RTW89_THAILAND][0][60] = 127,
[0][0][RTW89_FCC][1][62] = -18,
- [0][0][RTW89_FCC][2][62] = 56,
[0][0][RTW89_ETSI][1][62] = 127,
[0][0][RTW89_ETSI][0][62] = 127,
[0][0][RTW89_MKK][1][62] = 127,
[0][0][RTW89_MKK][0][62] = 127,
[0][0][RTW89_IC][1][62] = -18,
- [0][0][RTW89_IC][2][62] = 56,
[0][0][RTW89_KCC][1][62] = -2,
[0][0][RTW89_KCC][0][62] = 127,
[0][0][RTW89_ACMA][1][62] = 127,
@@ -53035,13 +51119,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][62] = 127,
[0][0][RTW89_THAILAND][0][62] = 127,
[0][0][RTW89_FCC][1][64] = -18,
- [0][0][RTW89_FCC][2][64] = 56,
[0][0][RTW89_ETSI][1][64] = 127,
[0][0][RTW89_ETSI][0][64] = 127,
[0][0][RTW89_MKK][1][64] = 127,
[0][0][RTW89_MKK][0][64] = 127,
[0][0][RTW89_IC][1][64] = -18,
- [0][0][RTW89_IC][2][64] = 56,
[0][0][RTW89_KCC][1][64] = -2,
[0][0][RTW89_KCC][0][64] = 127,
[0][0][RTW89_ACMA][1][64] = 127,
@@ -53054,13 +51136,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][64] = 127,
[0][0][RTW89_THAILAND][0][64] = 127,
[0][0][RTW89_FCC][1][66] = -18,
- [0][0][RTW89_FCC][2][66] = 56,
[0][0][RTW89_ETSI][1][66] = 127,
[0][0][RTW89_ETSI][0][66] = 127,
[0][0][RTW89_MKK][1][66] = 127,
[0][0][RTW89_MKK][0][66] = 127,
[0][0][RTW89_IC][1][66] = -18,
- [0][0][RTW89_IC][2][66] = 56,
[0][0][RTW89_KCC][1][66] = -2,
[0][0][RTW89_KCC][0][66] = 127,
[0][0][RTW89_ACMA][1][66] = 127,
@@ -53073,13 +51153,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][66] = 127,
[0][0][RTW89_THAILAND][0][66] = 127,
[0][0][RTW89_FCC][1][68] = -18,
- [0][0][RTW89_FCC][2][68] = 56,
[0][0][RTW89_ETSI][1][68] = 127,
[0][0][RTW89_ETSI][0][68] = 127,
[0][0][RTW89_MKK][1][68] = 127,
[0][0][RTW89_MKK][0][68] = 127,
[0][0][RTW89_IC][1][68] = -18,
- [0][0][RTW89_IC][2][68] = 56,
[0][0][RTW89_KCC][1][68] = -2,
[0][0][RTW89_KCC][0][68] = 127,
[0][0][RTW89_ACMA][1][68] = 127,
@@ -53092,13 +51170,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][68] = 127,
[0][0][RTW89_THAILAND][0][68] = 127,
[0][0][RTW89_FCC][1][70] = -16,
- [0][0][RTW89_FCC][2][70] = 56,
[0][0][RTW89_ETSI][1][70] = 127,
[0][0][RTW89_ETSI][0][70] = 127,
[0][0][RTW89_MKK][1][70] = 127,
[0][0][RTW89_MKK][0][70] = 127,
[0][0][RTW89_IC][1][70] = -16,
- [0][0][RTW89_IC][2][70] = 56,
[0][0][RTW89_KCC][1][70] = -2,
[0][0][RTW89_KCC][0][70] = 127,
[0][0][RTW89_ACMA][1][70] = 127,
@@ -53111,13 +51187,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][70] = 127,
[0][0][RTW89_THAILAND][0][70] = 127,
[0][0][RTW89_FCC][1][72] = -18,
- [0][0][RTW89_FCC][2][72] = 56,
[0][0][RTW89_ETSI][1][72] = 127,
[0][0][RTW89_ETSI][0][72] = 127,
[0][0][RTW89_MKK][1][72] = 127,
[0][0][RTW89_MKK][0][72] = 127,
[0][0][RTW89_IC][1][72] = -18,
- [0][0][RTW89_IC][2][72] = 56,
[0][0][RTW89_KCC][1][72] = -2,
[0][0][RTW89_KCC][0][72] = 127,
[0][0][RTW89_ACMA][1][72] = 127,
@@ -53130,13 +51204,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][72] = 127,
[0][0][RTW89_THAILAND][0][72] = 127,
[0][0][RTW89_FCC][1][74] = -18,
- [0][0][RTW89_FCC][2][74] = 56,
[0][0][RTW89_ETSI][1][74] = 127,
[0][0][RTW89_ETSI][0][74] = 127,
[0][0][RTW89_MKK][1][74] = 127,
[0][0][RTW89_MKK][0][74] = 127,
[0][0][RTW89_IC][1][74] = -18,
- [0][0][RTW89_IC][2][74] = 56,
[0][0][RTW89_KCC][1][74] = -2,
[0][0][RTW89_KCC][0][74] = 127,
[0][0][RTW89_ACMA][1][74] = 127,
@@ -53149,13 +51221,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][74] = 127,
[0][0][RTW89_THAILAND][0][74] = 127,
[0][0][RTW89_FCC][1][75] = -18,
- [0][0][RTW89_FCC][2][75] = 56,
[0][0][RTW89_ETSI][1][75] = 127,
[0][0][RTW89_ETSI][0][75] = 127,
[0][0][RTW89_MKK][1][75] = 127,
[0][0][RTW89_MKK][0][75] = 127,
[0][0][RTW89_IC][1][75] = -18,
- [0][0][RTW89_IC][2][75] = 56,
[0][0][RTW89_KCC][1][75] = -2,
[0][0][RTW89_KCC][0][75] = 127,
[0][0][RTW89_ACMA][1][75] = 127,
@@ -53168,13 +51238,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][75] = 127,
[0][0][RTW89_THAILAND][0][75] = 127,
[0][0][RTW89_FCC][1][77] = -18,
- [0][0][RTW89_FCC][2][77] = 56,
[0][0][RTW89_ETSI][1][77] = 127,
[0][0][RTW89_ETSI][0][77] = 127,
[0][0][RTW89_MKK][1][77] = 127,
[0][0][RTW89_MKK][0][77] = 127,
[0][0][RTW89_IC][1][77] = -18,
- [0][0][RTW89_IC][2][77] = 56,
[0][0][RTW89_KCC][1][77] = -2,
[0][0][RTW89_KCC][0][77] = 127,
[0][0][RTW89_ACMA][1][77] = 127,
@@ -53187,13 +51255,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][77] = 127,
[0][0][RTW89_THAILAND][0][77] = 127,
[0][0][RTW89_FCC][1][79] = -18,
- [0][0][RTW89_FCC][2][79] = 56,
[0][0][RTW89_ETSI][1][79] = 127,
[0][0][RTW89_ETSI][0][79] = 127,
[0][0][RTW89_MKK][1][79] = 127,
[0][0][RTW89_MKK][0][79] = 127,
[0][0][RTW89_IC][1][79] = -18,
- [0][0][RTW89_IC][2][79] = 56,
[0][0][RTW89_KCC][1][79] = -2,
[0][0][RTW89_KCC][0][79] = 127,
[0][0][RTW89_ACMA][1][79] = 127,
@@ -53206,13 +51272,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][79] = 127,
[0][0][RTW89_THAILAND][0][79] = 127,
[0][0][RTW89_FCC][1][81] = -18,
- [0][0][RTW89_FCC][2][81] = 56,
[0][0][RTW89_ETSI][1][81] = 127,
[0][0][RTW89_ETSI][0][81] = 127,
[0][0][RTW89_MKK][1][81] = 127,
[0][0][RTW89_MKK][0][81] = 127,
[0][0][RTW89_IC][1][81] = -18,
- [0][0][RTW89_IC][2][81] = 56,
[0][0][RTW89_KCC][1][81] = -2,
[0][0][RTW89_KCC][0][81] = 127,
[0][0][RTW89_ACMA][1][81] = 127,
@@ -53225,13 +51289,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][81] = 127,
[0][0][RTW89_THAILAND][0][81] = 127,
[0][0][RTW89_FCC][1][83] = -18,
- [0][0][RTW89_FCC][2][83] = 56,
[0][0][RTW89_ETSI][1][83] = 127,
[0][0][RTW89_ETSI][0][83] = 127,
[0][0][RTW89_MKK][1][83] = 127,
[0][0][RTW89_MKK][0][83] = 127,
[0][0][RTW89_IC][1][83] = -18,
- [0][0][RTW89_IC][2][83] = 56,
[0][0][RTW89_KCC][1][83] = -2,
[0][0][RTW89_KCC][0][83] = 127,
[0][0][RTW89_ACMA][1][83] = 127,
@@ -53244,13 +51306,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][83] = 127,
[0][0][RTW89_THAILAND][0][83] = 127,
[0][0][RTW89_FCC][1][85] = -18,
- [0][0][RTW89_FCC][2][85] = 56,
[0][0][RTW89_ETSI][1][85] = 127,
[0][0][RTW89_ETSI][0][85] = 127,
[0][0][RTW89_MKK][1][85] = 127,
[0][0][RTW89_MKK][0][85] = 127,
[0][0][RTW89_IC][1][85] = -18,
- [0][0][RTW89_IC][2][85] = 56,
[0][0][RTW89_KCC][1][85] = -2,
[0][0][RTW89_KCC][0][85] = 127,
[0][0][RTW89_ACMA][1][85] = 127,
@@ -53263,13 +51323,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][85] = 127,
[0][0][RTW89_THAILAND][0][85] = 127,
[0][0][RTW89_FCC][1][87] = -16,
- [0][0][RTW89_FCC][2][87] = 127,
[0][0][RTW89_ETSI][1][87] = 127,
[0][0][RTW89_ETSI][0][87] = 127,
[0][0][RTW89_MKK][1][87] = 127,
[0][0][RTW89_MKK][0][87] = 127,
[0][0][RTW89_IC][1][87] = -16,
- [0][0][RTW89_IC][2][87] = 127,
[0][0][RTW89_KCC][1][87] = -2,
[0][0][RTW89_KCC][0][87] = 127,
[0][0][RTW89_ACMA][1][87] = 127,
@@ -53282,13 +51340,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][87] = 127,
[0][0][RTW89_THAILAND][0][87] = 127,
[0][0][RTW89_FCC][1][89] = -16,
- [0][0][RTW89_FCC][2][89] = 127,
[0][0][RTW89_ETSI][1][89] = 127,
[0][0][RTW89_ETSI][0][89] = 127,
[0][0][RTW89_MKK][1][89] = 127,
[0][0][RTW89_MKK][0][89] = 127,
[0][0][RTW89_IC][1][89] = -16,
- [0][0][RTW89_IC][2][89] = 127,
[0][0][RTW89_KCC][1][89] = -2,
[0][0][RTW89_KCC][0][89] = 127,
[0][0][RTW89_ACMA][1][89] = 127,
@@ -53301,13 +51357,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][89] = 127,
[0][0][RTW89_THAILAND][0][89] = 127,
[0][0][RTW89_FCC][1][90] = -16,
- [0][0][RTW89_FCC][2][90] = 127,
[0][0][RTW89_ETSI][1][90] = 127,
[0][0][RTW89_ETSI][0][90] = 127,
[0][0][RTW89_MKK][1][90] = 127,
[0][0][RTW89_MKK][0][90] = 127,
[0][0][RTW89_IC][1][90] = -16,
- [0][0][RTW89_IC][2][90] = 127,
[0][0][RTW89_KCC][1][90] = -2,
[0][0][RTW89_KCC][0][90] = 127,
[0][0][RTW89_ACMA][1][90] = 127,
@@ -53320,13 +51374,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][90] = 127,
[0][0][RTW89_THAILAND][0][90] = 127,
[0][0][RTW89_FCC][1][92] = -16,
- [0][0][RTW89_FCC][2][92] = 127,
[0][0][RTW89_ETSI][1][92] = 127,
[0][0][RTW89_ETSI][0][92] = 127,
[0][0][RTW89_MKK][1][92] = 127,
[0][0][RTW89_MKK][0][92] = 127,
[0][0][RTW89_IC][1][92] = -16,
- [0][0][RTW89_IC][2][92] = 127,
[0][0][RTW89_KCC][1][92] = -2,
[0][0][RTW89_KCC][0][92] = 127,
[0][0][RTW89_ACMA][1][92] = 127,
@@ -53339,13 +51391,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][92] = 127,
[0][0][RTW89_THAILAND][0][92] = 127,
[0][0][RTW89_FCC][1][94] = -16,
- [0][0][RTW89_FCC][2][94] = 127,
[0][0][RTW89_ETSI][1][94] = 127,
[0][0][RTW89_ETSI][0][94] = 127,
[0][0][RTW89_MKK][1][94] = 127,
[0][0][RTW89_MKK][0][94] = 127,
[0][0][RTW89_IC][1][94] = -16,
- [0][0][RTW89_IC][2][94] = 127,
[0][0][RTW89_KCC][1][94] = -2,
[0][0][RTW89_KCC][0][94] = 127,
[0][0][RTW89_ACMA][1][94] = 127,
@@ -53358,13 +51408,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][94] = 127,
[0][0][RTW89_THAILAND][0][94] = 127,
[0][0][RTW89_FCC][1][96] = -16,
- [0][0][RTW89_FCC][2][96] = 127,
[0][0][RTW89_ETSI][1][96] = 127,
[0][0][RTW89_ETSI][0][96] = 127,
[0][0][RTW89_MKK][1][96] = 127,
[0][0][RTW89_MKK][0][96] = 127,
[0][0][RTW89_IC][1][96] = -16,
- [0][0][RTW89_IC][2][96] = 127,
[0][0][RTW89_KCC][1][96] = -2,
[0][0][RTW89_KCC][0][96] = 127,
[0][0][RTW89_ACMA][1][96] = 127,
@@ -53377,13 +51425,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][96] = 127,
[0][0][RTW89_THAILAND][0][96] = 127,
[0][0][RTW89_FCC][1][98] = -16,
- [0][0][RTW89_FCC][2][98] = 127,
[0][0][RTW89_ETSI][1][98] = 127,
[0][0][RTW89_ETSI][0][98] = 127,
[0][0][RTW89_MKK][1][98] = 127,
[0][0][RTW89_MKK][0][98] = 127,
[0][0][RTW89_IC][1][98] = -16,
- [0][0][RTW89_IC][2][98] = 127,
[0][0][RTW89_KCC][1][98] = -2,
[0][0][RTW89_KCC][0][98] = 127,
[0][0][RTW89_ACMA][1][98] = 127,
@@ -53396,13 +51442,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][98] = 127,
[0][0][RTW89_THAILAND][0][98] = 127,
[0][0][RTW89_FCC][1][100] = -16,
- [0][0][RTW89_FCC][2][100] = 127,
[0][0][RTW89_ETSI][1][100] = 127,
[0][0][RTW89_ETSI][0][100] = 127,
[0][0][RTW89_MKK][1][100] = 127,
[0][0][RTW89_MKK][0][100] = 127,
[0][0][RTW89_IC][1][100] = -16,
- [0][0][RTW89_IC][2][100] = 127,
[0][0][RTW89_KCC][1][100] = -2,
[0][0][RTW89_KCC][0][100] = 127,
[0][0][RTW89_ACMA][1][100] = 127,
@@ -53415,13 +51459,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][100] = 127,
[0][0][RTW89_THAILAND][0][100] = 127,
[0][0][RTW89_FCC][1][102] = -16,
- [0][0][RTW89_FCC][2][102] = 127,
[0][0][RTW89_ETSI][1][102] = 127,
[0][0][RTW89_ETSI][0][102] = 127,
[0][0][RTW89_MKK][1][102] = 127,
[0][0][RTW89_MKK][0][102] = 127,
[0][0][RTW89_IC][1][102] = -16,
- [0][0][RTW89_IC][2][102] = 127,
[0][0][RTW89_KCC][1][102] = -2,
[0][0][RTW89_KCC][0][102] = 127,
[0][0][RTW89_ACMA][1][102] = 127,
@@ -53434,13 +51476,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][102] = 127,
[0][0][RTW89_THAILAND][0][102] = 127,
[0][0][RTW89_FCC][1][104] = -16,
- [0][0][RTW89_FCC][2][104] = 127,
[0][0][RTW89_ETSI][1][104] = 127,
[0][0][RTW89_ETSI][0][104] = 127,
[0][0][RTW89_MKK][1][104] = 127,
[0][0][RTW89_MKK][0][104] = 127,
[0][0][RTW89_IC][1][104] = -16,
- [0][0][RTW89_IC][2][104] = 127,
[0][0][RTW89_KCC][1][104] = -2,
[0][0][RTW89_KCC][0][104] = 127,
[0][0][RTW89_ACMA][1][104] = 127,
@@ -53453,13 +51493,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][104] = 127,
[0][0][RTW89_THAILAND][0][104] = 127,
[0][0][RTW89_FCC][1][105] = -16,
- [0][0][RTW89_FCC][2][105] = 127,
[0][0][RTW89_ETSI][1][105] = 127,
[0][0][RTW89_ETSI][0][105] = 127,
[0][0][RTW89_MKK][1][105] = 127,
[0][0][RTW89_MKK][0][105] = 127,
[0][0][RTW89_IC][1][105] = -16,
- [0][0][RTW89_IC][2][105] = 127,
[0][0][RTW89_KCC][1][105] = -2,
[0][0][RTW89_KCC][0][105] = 127,
[0][0][RTW89_ACMA][1][105] = 127,
@@ -53472,13 +51510,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][105] = 127,
[0][0][RTW89_THAILAND][0][105] = 127,
[0][0][RTW89_FCC][1][107] = -12,
- [0][0][RTW89_FCC][2][107] = 127,
[0][0][RTW89_ETSI][1][107] = 127,
[0][0][RTW89_ETSI][0][107] = 127,
[0][0][RTW89_MKK][1][107] = 127,
[0][0][RTW89_MKK][0][107] = 127,
[0][0][RTW89_IC][1][107] = -12,
- [0][0][RTW89_IC][2][107] = 127,
[0][0][RTW89_KCC][1][107] = -2,
[0][0][RTW89_KCC][0][107] = 127,
[0][0][RTW89_ACMA][1][107] = 127,
@@ -53491,13 +51527,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][107] = 127,
[0][0][RTW89_THAILAND][0][107] = 127,
[0][0][RTW89_FCC][1][109] = -12,
- [0][0][RTW89_FCC][2][109] = 127,
[0][0][RTW89_ETSI][1][109] = 127,
[0][0][RTW89_ETSI][0][109] = 127,
[0][0][RTW89_MKK][1][109] = 127,
[0][0][RTW89_MKK][0][109] = 127,
[0][0][RTW89_IC][1][109] = -12,
- [0][0][RTW89_IC][2][109] = 127,
[0][0][RTW89_KCC][1][109] = 127,
[0][0][RTW89_KCC][0][109] = 127,
[0][0][RTW89_ACMA][1][109] = 127,
@@ -53510,13 +51544,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][109] = 127,
[0][0][RTW89_THAILAND][0][109] = 127,
[0][0][RTW89_FCC][1][111] = 127,
- [0][0][RTW89_FCC][2][111] = 127,
[0][0][RTW89_ETSI][1][111] = 127,
[0][0][RTW89_ETSI][0][111] = 127,
[0][0][RTW89_MKK][1][111] = 127,
[0][0][RTW89_MKK][0][111] = 127,
[0][0][RTW89_IC][1][111] = 127,
- [0][0][RTW89_IC][2][111] = 127,
[0][0][RTW89_KCC][1][111] = 127,
[0][0][RTW89_KCC][0][111] = 127,
[0][0][RTW89_ACMA][1][111] = 127,
@@ -53529,13 +51561,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][111] = 127,
[0][0][RTW89_THAILAND][0][111] = 127,
[0][0][RTW89_FCC][1][113] = 127,
- [0][0][RTW89_FCC][2][113] = 127,
[0][0][RTW89_ETSI][1][113] = 127,
[0][0][RTW89_ETSI][0][113] = 127,
[0][0][RTW89_MKK][1][113] = 127,
[0][0][RTW89_MKK][0][113] = 127,
[0][0][RTW89_IC][1][113] = 127,
- [0][0][RTW89_IC][2][113] = 127,
[0][0][RTW89_KCC][1][113] = 127,
[0][0][RTW89_KCC][0][113] = 127,
[0][0][RTW89_ACMA][1][113] = 127,
@@ -53548,13 +51578,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][113] = 127,
[0][0][RTW89_THAILAND][0][113] = 127,
[0][0][RTW89_FCC][1][115] = 127,
- [0][0][RTW89_FCC][2][115] = 127,
[0][0][RTW89_ETSI][1][115] = 127,
[0][0][RTW89_ETSI][0][115] = 127,
[0][0][RTW89_MKK][1][115] = 127,
[0][0][RTW89_MKK][0][115] = 127,
[0][0][RTW89_IC][1][115] = 127,
- [0][0][RTW89_IC][2][115] = 127,
[0][0][RTW89_KCC][1][115] = 127,
[0][0][RTW89_KCC][0][115] = 127,
[0][0][RTW89_ACMA][1][115] = 127,
@@ -53567,13 +51595,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][115] = 127,
[0][0][RTW89_THAILAND][0][115] = 127,
[0][0][RTW89_FCC][1][117] = 127,
- [0][0][RTW89_FCC][2][117] = 127,
[0][0][RTW89_ETSI][1][117] = 127,
[0][0][RTW89_ETSI][0][117] = 127,
[0][0][RTW89_MKK][1][117] = 127,
[0][0][RTW89_MKK][0][117] = 127,
[0][0][RTW89_IC][1][117] = 127,
- [0][0][RTW89_IC][2][117] = 127,
[0][0][RTW89_KCC][1][117] = 127,
[0][0][RTW89_KCC][0][117] = 127,
[0][0][RTW89_ACMA][1][117] = 127,
@@ -53586,13 +51612,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][117] = 127,
[0][0][RTW89_THAILAND][0][117] = 127,
[0][0][RTW89_FCC][1][119] = 127,
- [0][0][RTW89_FCC][2][119] = 127,
[0][0][RTW89_ETSI][1][119] = 127,
[0][0][RTW89_ETSI][0][119] = 127,
[0][0][RTW89_MKK][1][119] = 127,
[0][0][RTW89_MKK][0][119] = 127,
[0][0][RTW89_IC][1][119] = 127,
- [0][0][RTW89_IC][2][119] = 127,
[0][0][RTW89_KCC][1][119] = 127,
[0][0][RTW89_KCC][0][119] = 127,
[0][0][RTW89_ACMA][1][119] = 127,
@@ -53605,13 +51629,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][0][RTW89_THAILAND][1][119] = 127,
[0][0][RTW89_THAILAND][0][119] = 127,
[0][1][RTW89_FCC][1][0] = -40,
- [0][1][RTW89_FCC][2][0] = 32,
[0][1][RTW89_ETSI][1][0] = 20,
[0][1][RTW89_ETSI][0][0] = -18,
[0][1][RTW89_MKK][1][0] = 18,
[0][1][RTW89_MKK][0][0] = -20,
[0][1][RTW89_IC][1][0] = -40,
- [0][1][RTW89_IC][2][0] = 32,
[0][1][RTW89_KCC][1][0] = -14,
[0][1][RTW89_KCC][0][0] = -14,
[0][1][RTW89_ACMA][1][0] = 20,
@@ -53624,13 +51646,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][0] = 6,
[0][1][RTW89_THAILAND][0][0] = -40,
[0][1][RTW89_FCC][1][2] = -40,
- [0][1][RTW89_FCC][2][2] = 32,
[0][1][RTW89_ETSI][1][2] = 20,
[0][1][RTW89_ETSI][0][2] = -18,
[0][1][RTW89_MKK][1][2] = 18,
[0][1][RTW89_MKK][0][2] = -22,
[0][1][RTW89_IC][1][2] = -40,
- [0][1][RTW89_IC][2][2] = 32,
[0][1][RTW89_KCC][1][2] = -14,
[0][1][RTW89_KCC][0][2] = -14,
[0][1][RTW89_ACMA][1][2] = 20,
@@ -53643,13 +51663,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][2] = 6,
[0][1][RTW89_THAILAND][0][2] = -40,
[0][1][RTW89_FCC][1][4] = -40,
- [0][1][RTW89_FCC][2][4] = 32,
[0][1][RTW89_ETSI][1][4] = 20,
[0][1][RTW89_ETSI][0][4] = -18,
[0][1][RTW89_MKK][1][4] = 18,
[0][1][RTW89_MKK][0][4] = -22,
[0][1][RTW89_IC][1][4] = -40,
- [0][1][RTW89_IC][2][4] = 32,
[0][1][RTW89_KCC][1][4] = -14,
[0][1][RTW89_KCC][0][4] = -14,
[0][1][RTW89_ACMA][1][4] = 20,
@@ -53662,13 +51680,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][4] = 6,
[0][1][RTW89_THAILAND][0][4] = -40,
[0][1][RTW89_FCC][1][6] = -40,
- [0][1][RTW89_FCC][2][6] = 32,
[0][1][RTW89_ETSI][1][6] = 20,
[0][1][RTW89_ETSI][0][6] = -18,
[0][1][RTW89_MKK][1][6] = 18,
[0][1][RTW89_MKK][0][6] = -22,
[0][1][RTW89_IC][1][6] = -40,
- [0][1][RTW89_IC][2][6] = 32,
[0][1][RTW89_KCC][1][6] = -14,
[0][1][RTW89_KCC][0][6] = -14,
[0][1][RTW89_ACMA][1][6] = 20,
@@ -53681,13 +51697,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][6] = 6,
[0][1][RTW89_THAILAND][0][6] = -40,
[0][1][RTW89_FCC][1][8] = -40,
- [0][1][RTW89_FCC][2][8] = 32,
[0][1][RTW89_ETSI][1][8] = 20,
[0][1][RTW89_ETSI][0][8] = -18,
[0][1][RTW89_MKK][1][8] = 18,
[0][1][RTW89_MKK][0][8] = -22,
[0][1][RTW89_IC][1][8] = -40,
- [0][1][RTW89_IC][2][8] = 32,
[0][1][RTW89_KCC][1][8] = -14,
[0][1][RTW89_KCC][0][8] = -14,
[0][1][RTW89_ACMA][1][8] = 20,
@@ -53700,13 +51714,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][8] = 6,
[0][1][RTW89_THAILAND][0][8] = -40,
[0][1][RTW89_FCC][1][10] = -40,
- [0][1][RTW89_FCC][2][10] = 32,
[0][1][RTW89_ETSI][1][10] = 20,
[0][1][RTW89_ETSI][0][10] = -18,
[0][1][RTW89_MKK][1][10] = 18,
[0][1][RTW89_MKK][0][10] = -22,
[0][1][RTW89_IC][1][10] = -40,
- [0][1][RTW89_IC][2][10] = 32,
[0][1][RTW89_KCC][1][10] = -14,
[0][1][RTW89_KCC][0][10] = -14,
[0][1][RTW89_ACMA][1][10] = 20,
@@ -53719,13 +51731,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][10] = 6,
[0][1][RTW89_THAILAND][0][10] = -40,
[0][1][RTW89_FCC][1][12] = -40,
- [0][1][RTW89_FCC][2][12] = 32,
[0][1][RTW89_ETSI][1][12] = 20,
[0][1][RTW89_ETSI][0][12] = -18,
[0][1][RTW89_MKK][1][12] = 18,
[0][1][RTW89_MKK][0][12] = -22,
[0][1][RTW89_IC][1][12] = -40,
- [0][1][RTW89_IC][2][12] = 32,
[0][1][RTW89_KCC][1][12] = -14,
[0][1][RTW89_KCC][0][12] = -14,
[0][1][RTW89_ACMA][1][12] = 20,
@@ -53738,13 +51748,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][12] = 6,
[0][1][RTW89_THAILAND][0][12] = -40,
[0][1][RTW89_FCC][1][14] = -40,
- [0][1][RTW89_FCC][2][14] = 32,
[0][1][RTW89_ETSI][1][14] = 20,
[0][1][RTW89_ETSI][0][14] = -18,
[0][1][RTW89_MKK][1][14] = 18,
[0][1][RTW89_MKK][0][14] = -22,
[0][1][RTW89_IC][1][14] = -40,
- [0][1][RTW89_IC][2][14] = 32,
[0][1][RTW89_KCC][1][14] = -14,
[0][1][RTW89_KCC][0][14] = -14,
[0][1][RTW89_ACMA][1][14] = 20,
@@ -53757,13 +51765,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][14] = 6,
[0][1][RTW89_THAILAND][0][14] = -40,
[0][1][RTW89_FCC][1][15] = -40,
- [0][1][RTW89_FCC][2][15] = 32,
[0][1][RTW89_ETSI][1][15] = 20,
[0][1][RTW89_ETSI][0][15] = -18,
[0][1][RTW89_MKK][1][15] = 18,
[0][1][RTW89_MKK][0][15] = -22,
[0][1][RTW89_IC][1][15] = -40,
- [0][1][RTW89_IC][2][15] = 32,
[0][1][RTW89_KCC][1][15] = -14,
[0][1][RTW89_KCC][0][15] = -14,
[0][1][RTW89_ACMA][1][15] = 20,
@@ -53776,13 +51782,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][15] = 6,
[0][1][RTW89_THAILAND][0][15] = -40,
[0][1][RTW89_FCC][1][17] = -40,
- [0][1][RTW89_FCC][2][17] = 32,
[0][1][RTW89_ETSI][1][17] = 20,
[0][1][RTW89_ETSI][0][17] = -18,
[0][1][RTW89_MKK][1][17] = 18,
[0][1][RTW89_MKK][0][17] = -22,
[0][1][RTW89_IC][1][17] = -40,
- [0][1][RTW89_IC][2][17] = 32,
[0][1][RTW89_KCC][1][17] = -14,
[0][1][RTW89_KCC][0][17] = -14,
[0][1][RTW89_ACMA][1][17] = 20,
@@ -53795,13 +51799,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][17] = 6,
[0][1][RTW89_THAILAND][0][17] = -40,
[0][1][RTW89_FCC][1][19] = -40,
- [0][1][RTW89_FCC][2][19] = 32,
[0][1][RTW89_ETSI][1][19] = 20,
[0][1][RTW89_ETSI][0][19] = -18,
[0][1][RTW89_MKK][1][19] = 18,
[0][1][RTW89_MKK][0][19] = -22,
[0][1][RTW89_IC][1][19] = -40,
- [0][1][RTW89_IC][2][19] = 32,
[0][1][RTW89_KCC][1][19] = -14,
[0][1][RTW89_KCC][0][19] = -14,
[0][1][RTW89_ACMA][1][19] = 20,
@@ -53814,13 +51816,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][19] = 6,
[0][1][RTW89_THAILAND][0][19] = -40,
[0][1][RTW89_FCC][1][21] = -40,
- [0][1][RTW89_FCC][2][21] = 32,
[0][1][RTW89_ETSI][1][21] = 20,
[0][1][RTW89_ETSI][0][21] = -18,
[0][1][RTW89_MKK][1][21] = 18,
[0][1][RTW89_MKK][0][21] = -22,
[0][1][RTW89_IC][1][21] = -40,
- [0][1][RTW89_IC][2][21] = 32,
[0][1][RTW89_KCC][1][21] = -14,
[0][1][RTW89_KCC][0][21] = -14,
[0][1][RTW89_ACMA][1][21] = 20,
@@ -53833,13 +51833,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][21] = 6,
[0][1][RTW89_THAILAND][0][21] = -40,
[0][1][RTW89_FCC][1][23] = -40,
- [0][1][RTW89_FCC][2][23] = 32,
[0][1][RTW89_ETSI][1][23] = 20,
[0][1][RTW89_ETSI][0][23] = -18,
[0][1][RTW89_MKK][1][23] = 18,
[0][1][RTW89_MKK][0][23] = -22,
[0][1][RTW89_IC][1][23] = -40,
- [0][1][RTW89_IC][2][23] = 32,
[0][1][RTW89_KCC][1][23] = -14,
[0][1][RTW89_KCC][0][23] = -14,
[0][1][RTW89_ACMA][1][23] = 20,
@@ -53852,13 +51850,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][23] = 6,
[0][1][RTW89_THAILAND][0][23] = -40,
[0][1][RTW89_FCC][1][25] = -40,
- [0][1][RTW89_FCC][2][25] = 32,
[0][1][RTW89_ETSI][1][25] = 20,
[0][1][RTW89_ETSI][0][25] = -18,
[0][1][RTW89_MKK][1][25] = -4,
[0][1][RTW89_MKK][0][25] = -22,
[0][1][RTW89_IC][1][25] = -40,
- [0][1][RTW89_IC][2][25] = 32,
[0][1][RTW89_KCC][1][25] = -14,
[0][1][RTW89_KCC][0][25] = -14,
[0][1][RTW89_ACMA][1][25] = 20,
@@ -53871,13 +51867,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][25] = 6,
[0][1][RTW89_THAILAND][0][25] = -40,
[0][1][RTW89_FCC][1][27] = -40,
- [0][1][RTW89_FCC][2][27] = 32,
[0][1][RTW89_ETSI][1][27] = 20,
[0][1][RTW89_ETSI][0][27] = -18,
[0][1][RTW89_MKK][1][27] = -4,
[0][1][RTW89_MKK][0][27] = -22,
[0][1][RTW89_IC][1][27] = -40,
- [0][1][RTW89_IC][2][27] = 32,
[0][1][RTW89_KCC][1][27] = -14,
[0][1][RTW89_KCC][0][27] = -14,
[0][1][RTW89_ACMA][1][27] = 20,
@@ -53890,13 +51884,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][27] = 6,
[0][1][RTW89_THAILAND][0][27] = -40,
[0][1][RTW89_FCC][1][29] = -40,
- [0][1][RTW89_FCC][2][29] = 32,
[0][1][RTW89_ETSI][1][29] = 20,
[0][1][RTW89_ETSI][0][29] = -18,
[0][1][RTW89_MKK][1][29] = -4,
[0][1][RTW89_MKK][0][29] = -22,
[0][1][RTW89_IC][1][29] = -40,
- [0][1][RTW89_IC][2][29] = 32,
[0][1][RTW89_KCC][1][29] = -14,
[0][1][RTW89_KCC][0][29] = -14,
[0][1][RTW89_ACMA][1][29] = 20,
@@ -53909,13 +51901,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][29] = 6,
[0][1][RTW89_THAILAND][0][29] = -40,
[0][1][RTW89_FCC][1][30] = -40,
- [0][1][RTW89_FCC][2][30] = 32,
[0][1][RTW89_ETSI][1][30] = 20,
[0][1][RTW89_ETSI][0][30] = -18,
[0][1][RTW89_MKK][1][30] = -4,
[0][1][RTW89_MKK][0][30] = -22,
[0][1][RTW89_IC][1][30] = -40,
- [0][1][RTW89_IC][2][30] = 32,
[0][1][RTW89_KCC][1][30] = -14,
[0][1][RTW89_KCC][0][30] = -14,
[0][1][RTW89_ACMA][1][30] = 20,
@@ -53928,13 +51918,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][30] = 6,
[0][1][RTW89_THAILAND][0][30] = -40,
[0][1][RTW89_FCC][1][32] = -40,
- [0][1][RTW89_FCC][2][32] = 32,
[0][1][RTW89_ETSI][1][32] = 20,
[0][1][RTW89_ETSI][0][32] = -18,
[0][1][RTW89_MKK][1][32] = -4,
[0][1][RTW89_MKK][0][32] = -22,
[0][1][RTW89_IC][1][32] = -40,
- [0][1][RTW89_IC][2][32] = 32,
[0][1][RTW89_KCC][1][32] = -14,
[0][1][RTW89_KCC][0][32] = -14,
[0][1][RTW89_ACMA][1][32] = 20,
@@ -53947,13 +51935,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][32] = 6,
[0][1][RTW89_THAILAND][0][32] = -40,
[0][1][RTW89_FCC][1][34] = -40,
- [0][1][RTW89_FCC][2][34] = 32,
[0][1][RTW89_ETSI][1][34] = 20,
[0][1][RTW89_ETSI][0][34] = -18,
[0][1][RTW89_MKK][1][34] = -4,
[0][1][RTW89_MKK][0][34] = -22,
[0][1][RTW89_IC][1][34] = -40,
- [0][1][RTW89_IC][2][34] = 32,
[0][1][RTW89_KCC][1][34] = -14,
[0][1][RTW89_KCC][0][34] = -14,
[0][1][RTW89_ACMA][1][34] = 20,
@@ -53966,13 +51952,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][34] = 6,
[0][1][RTW89_THAILAND][0][34] = -40,
[0][1][RTW89_FCC][1][36] = -40,
- [0][1][RTW89_FCC][2][36] = 32,
[0][1][RTW89_ETSI][1][36] = 20,
[0][1][RTW89_ETSI][0][36] = -18,
[0][1][RTW89_MKK][1][36] = -4,
[0][1][RTW89_MKK][0][36] = -22,
[0][1][RTW89_IC][1][36] = -40,
- [0][1][RTW89_IC][2][36] = 32,
[0][1][RTW89_KCC][1][36] = -14,
[0][1][RTW89_KCC][0][36] = -14,
[0][1][RTW89_ACMA][1][36] = 20,
@@ -53985,13 +51969,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][36] = 6,
[0][1][RTW89_THAILAND][0][36] = -40,
[0][1][RTW89_FCC][1][38] = -40,
- [0][1][RTW89_FCC][2][38] = 32,
[0][1][RTW89_ETSI][1][38] = 20,
[0][1][RTW89_ETSI][0][38] = -18,
[0][1][RTW89_MKK][1][38] = -4,
[0][1][RTW89_MKK][0][38] = -22,
[0][1][RTW89_IC][1][38] = -40,
- [0][1][RTW89_IC][2][38] = 32,
[0][1][RTW89_KCC][1][38] = -14,
[0][1][RTW89_KCC][0][38] = -14,
[0][1][RTW89_ACMA][1][38] = 20,
@@ -54004,13 +51986,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][38] = 6,
[0][1][RTW89_THAILAND][0][38] = -40,
[0][1][RTW89_FCC][1][40] = -40,
- [0][1][RTW89_FCC][2][40] = 32,
[0][1][RTW89_ETSI][1][40] = 20,
[0][1][RTW89_ETSI][0][40] = -18,
[0][1][RTW89_MKK][1][40] = -4,
[0][1][RTW89_MKK][0][40] = -22,
[0][1][RTW89_IC][1][40] = -40,
- [0][1][RTW89_IC][2][40] = 32,
[0][1][RTW89_KCC][1][40] = -14,
[0][1][RTW89_KCC][0][40] = -14,
[0][1][RTW89_ACMA][1][40] = 20,
@@ -54023,13 +52003,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][40] = 6,
[0][1][RTW89_THAILAND][0][40] = -40,
[0][1][RTW89_FCC][1][42] = -40,
- [0][1][RTW89_FCC][2][42] = 32,
[0][1][RTW89_ETSI][1][42] = 20,
[0][1][RTW89_ETSI][0][42] = -18,
[0][1][RTW89_MKK][1][42] = -4,
[0][1][RTW89_MKK][0][42] = -22,
[0][1][RTW89_IC][1][42] = -40,
- [0][1][RTW89_IC][2][42] = 32,
[0][1][RTW89_KCC][1][42] = -14,
[0][1][RTW89_KCC][0][42] = -14,
[0][1][RTW89_ACMA][1][42] = 20,
@@ -54042,13 +52020,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][42] = 6,
[0][1][RTW89_THAILAND][0][42] = -40,
[0][1][RTW89_FCC][1][44] = -40,
- [0][1][RTW89_FCC][2][44] = 32,
[0][1][RTW89_ETSI][1][44] = 20,
[0][1][RTW89_ETSI][0][44] = -18,
[0][1][RTW89_MKK][1][44] = -4,
[0][1][RTW89_MKK][0][44] = -22,
[0][1][RTW89_IC][1][44] = -40,
- [0][1][RTW89_IC][2][44] = 32,
[0][1][RTW89_KCC][1][44] = -14,
[0][1][RTW89_KCC][0][44] = -14,
[0][1][RTW89_ACMA][1][44] = 20,
@@ -54061,13 +52037,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][44] = 6,
[0][1][RTW89_THAILAND][0][44] = -40,
[0][1][RTW89_FCC][1][45] = -40,
- [0][1][RTW89_FCC][2][45] = 127,
[0][1][RTW89_ETSI][1][45] = 127,
[0][1][RTW89_ETSI][0][45] = 127,
[0][1][RTW89_MKK][1][45] = 127,
[0][1][RTW89_MKK][0][45] = 127,
[0][1][RTW89_IC][1][45] = -40,
- [0][1][RTW89_IC][2][45] = 32,
[0][1][RTW89_KCC][1][45] = -14,
[0][1][RTW89_KCC][0][45] = 127,
[0][1][RTW89_ACMA][1][45] = 127,
@@ -54080,13 +52054,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][45] = 127,
[0][1][RTW89_THAILAND][0][45] = 127,
[0][1][RTW89_FCC][1][47] = -40,
- [0][1][RTW89_FCC][2][47] = 127,
[0][1][RTW89_ETSI][1][47] = 127,
[0][1][RTW89_ETSI][0][47] = 127,
[0][1][RTW89_MKK][1][47] = 127,
[0][1][RTW89_MKK][0][47] = 127,
[0][1][RTW89_IC][1][47] = -40,
- [0][1][RTW89_IC][2][47] = 32,
[0][1][RTW89_KCC][1][47] = -14,
[0][1][RTW89_KCC][0][47] = 127,
[0][1][RTW89_ACMA][1][47] = 127,
@@ -54099,13 +52071,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][47] = 127,
[0][1][RTW89_THAILAND][0][47] = 127,
[0][1][RTW89_FCC][1][49] = -40,
- [0][1][RTW89_FCC][2][49] = 127,
[0][1][RTW89_ETSI][1][49] = 127,
[0][1][RTW89_ETSI][0][49] = 127,
[0][1][RTW89_MKK][1][49] = 127,
[0][1][RTW89_MKK][0][49] = 127,
[0][1][RTW89_IC][1][49] = -40,
- [0][1][RTW89_IC][2][49] = 32,
[0][1][RTW89_KCC][1][49] = -14,
[0][1][RTW89_KCC][0][49] = 127,
[0][1][RTW89_ACMA][1][49] = 127,
@@ -54118,13 +52088,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][49] = 127,
[0][1][RTW89_THAILAND][0][49] = 127,
[0][1][RTW89_FCC][1][51] = -40,
- [0][1][RTW89_FCC][2][51] = 127,
[0][1][RTW89_ETSI][1][51] = 127,
[0][1][RTW89_ETSI][0][51] = 127,
[0][1][RTW89_MKK][1][51] = 127,
[0][1][RTW89_MKK][0][51] = 127,
[0][1][RTW89_IC][1][51] = -40,
- [0][1][RTW89_IC][2][51] = 32,
[0][1][RTW89_KCC][1][51] = -14,
[0][1][RTW89_KCC][0][51] = 127,
[0][1][RTW89_ACMA][1][51] = 127,
@@ -54137,13 +52105,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][51] = 127,
[0][1][RTW89_THAILAND][0][51] = 127,
[0][1][RTW89_FCC][1][53] = -40,
- [0][1][RTW89_FCC][2][53] = 127,
[0][1][RTW89_ETSI][1][53] = 127,
[0][1][RTW89_ETSI][0][53] = 127,
[0][1][RTW89_MKK][1][53] = 127,
[0][1][RTW89_MKK][0][53] = 127,
[0][1][RTW89_IC][1][53] = -40,
- [0][1][RTW89_IC][2][53] = 32,
[0][1][RTW89_KCC][1][53] = -14,
[0][1][RTW89_KCC][0][53] = 127,
[0][1][RTW89_ACMA][1][53] = 127,
@@ -54156,13 +52122,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][53] = 127,
[0][1][RTW89_THAILAND][0][53] = 127,
[0][1][RTW89_FCC][1][55] = -40,
- [0][1][RTW89_FCC][2][55] = 30,
[0][1][RTW89_ETSI][1][55] = 127,
[0][1][RTW89_ETSI][0][55] = 127,
[0][1][RTW89_MKK][1][55] = 127,
[0][1][RTW89_MKK][0][55] = 127,
[0][1][RTW89_IC][1][55] = -40,
- [0][1][RTW89_IC][2][55] = 30,
[0][1][RTW89_KCC][1][55] = -14,
[0][1][RTW89_KCC][0][55] = 127,
[0][1][RTW89_ACMA][1][55] = 127,
@@ -54175,13 +52139,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][55] = 127,
[0][1][RTW89_THAILAND][0][55] = 127,
[0][1][RTW89_FCC][1][57] = -40,
- [0][1][RTW89_FCC][2][57] = 30,
[0][1][RTW89_ETSI][1][57] = 127,
[0][1][RTW89_ETSI][0][57] = 127,
[0][1][RTW89_MKK][1][57] = 127,
[0][1][RTW89_MKK][0][57] = 127,
[0][1][RTW89_IC][1][57] = -40,
- [0][1][RTW89_IC][2][57] = 30,
[0][1][RTW89_KCC][1][57] = -14,
[0][1][RTW89_KCC][0][57] = 127,
[0][1][RTW89_ACMA][1][57] = 127,
@@ -54194,13 +52156,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][57] = 127,
[0][1][RTW89_THAILAND][0][57] = 127,
[0][1][RTW89_FCC][1][59] = -40,
- [0][1][RTW89_FCC][2][59] = 30,
[0][1][RTW89_ETSI][1][59] = 127,
[0][1][RTW89_ETSI][0][59] = 127,
[0][1][RTW89_MKK][1][59] = 127,
[0][1][RTW89_MKK][0][59] = 127,
[0][1][RTW89_IC][1][59] = -40,
- [0][1][RTW89_IC][2][59] = 30,
[0][1][RTW89_KCC][1][59] = -14,
[0][1][RTW89_KCC][0][59] = 127,
[0][1][RTW89_ACMA][1][59] = 127,
@@ -54213,13 +52173,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][59] = 127,
[0][1][RTW89_THAILAND][0][59] = 127,
[0][1][RTW89_FCC][1][60] = -40,
- [0][1][RTW89_FCC][2][60] = 30,
[0][1][RTW89_ETSI][1][60] = 127,
[0][1][RTW89_ETSI][0][60] = 127,
[0][1][RTW89_MKK][1][60] = 127,
[0][1][RTW89_MKK][0][60] = 127,
[0][1][RTW89_IC][1][60] = -40,
- [0][1][RTW89_IC][2][60] = 30,
[0][1][RTW89_KCC][1][60] = -14,
[0][1][RTW89_KCC][0][60] = 127,
[0][1][RTW89_ACMA][1][60] = 127,
@@ -54232,13 +52190,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][60] = 127,
[0][1][RTW89_THAILAND][0][60] = 127,
[0][1][RTW89_FCC][1][62] = -40,
- [0][1][RTW89_FCC][2][62] = 30,
[0][1][RTW89_ETSI][1][62] = 127,
[0][1][RTW89_ETSI][0][62] = 127,
[0][1][RTW89_MKK][1][62] = 127,
[0][1][RTW89_MKK][0][62] = 127,
[0][1][RTW89_IC][1][62] = -40,
- [0][1][RTW89_IC][2][62] = 30,
[0][1][RTW89_KCC][1][62] = -14,
[0][1][RTW89_KCC][0][62] = 127,
[0][1][RTW89_ACMA][1][62] = 127,
@@ -54251,13 +52207,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][62] = 127,
[0][1][RTW89_THAILAND][0][62] = 127,
[0][1][RTW89_FCC][1][64] = -40,
- [0][1][RTW89_FCC][2][64] = 30,
[0][1][RTW89_ETSI][1][64] = 127,
[0][1][RTW89_ETSI][0][64] = 127,
[0][1][RTW89_MKK][1][64] = 127,
[0][1][RTW89_MKK][0][64] = 127,
[0][1][RTW89_IC][1][64] = -40,
- [0][1][RTW89_IC][2][64] = 30,
[0][1][RTW89_KCC][1][64] = -14,
[0][1][RTW89_KCC][0][64] = 127,
[0][1][RTW89_ACMA][1][64] = 127,
@@ -54270,13 +52224,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][64] = 127,
[0][1][RTW89_THAILAND][0][64] = 127,
[0][1][RTW89_FCC][1][66] = -40,
- [0][1][RTW89_FCC][2][66] = 30,
[0][1][RTW89_ETSI][1][66] = 127,
[0][1][RTW89_ETSI][0][66] = 127,
[0][1][RTW89_MKK][1][66] = 127,
[0][1][RTW89_MKK][0][66] = 127,
[0][1][RTW89_IC][1][66] = -40,
- [0][1][RTW89_IC][2][66] = 30,
[0][1][RTW89_KCC][1][66] = -14,
[0][1][RTW89_KCC][0][66] = 127,
[0][1][RTW89_ACMA][1][66] = 127,
@@ -54289,13 +52241,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][66] = 127,
[0][1][RTW89_THAILAND][0][66] = 127,
[0][1][RTW89_FCC][1][68] = -40,
- [0][1][RTW89_FCC][2][68] = 30,
[0][1][RTW89_ETSI][1][68] = 127,
[0][1][RTW89_ETSI][0][68] = 127,
[0][1][RTW89_MKK][1][68] = 127,
[0][1][RTW89_MKK][0][68] = 127,
[0][1][RTW89_IC][1][68] = -40,
- [0][1][RTW89_IC][2][68] = 30,
[0][1][RTW89_KCC][1][68] = -14,
[0][1][RTW89_KCC][0][68] = 127,
[0][1][RTW89_ACMA][1][68] = 127,
@@ -54308,13 +52258,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][68] = 127,
[0][1][RTW89_THAILAND][0][68] = 127,
[0][1][RTW89_FCC][1][70] = -38,
- [0][1][RTW89_FCC][2][70] = 30,
[0][1][RTW89_ETSI][1][70] = 127,
[0][1][RTW89_ETSI][0][70] = 127,
[0][1][RTW89_MKK][1][70] = 127,
[0][1][RTW89_MKK][0][70] = 127,
[0][1][RTW89_IC][1][70] = -38,
- [0][1][RTW89_IC][2][70] = 30,
[0][1][RTW89_KCC][1][70] = -14,
[0][1][RTW89_KCC][0][70] = 127,
[0][1][RTW89_ACMA][1][70] = 127,
@@ -54327,13 +52275,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][70] = 127,
[0][1][RTW89_THAILAND][0][70] = 127,
[0][1][RTW89_FCC][1][72] = -38,
- [0][1][RTW89_FCC][2][72] = 30,
[0][1][RTW89_ETSI][1][72] = 127,
[0][1][RTW89_ETSI][0][72] = 127,
[0][1][RTW89_MKK][1][72] = 127,
[0][1][RTW89_MKK][0][72] = 127,
[0][1][RTW89_IC][1][72] = -38,
- [0][1][RTW89_IC][2][72] = 30,
[0][1][RTW89_KCC][1][72] = -14,
[0][1][RTW89_KCC][0][72] = 127,
[0][1][RTW89_ACMA][1][72] = 127,
@@ -54346,13 +52292,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][72] = 127,
[0][1][RTW89_THAILAND][0][72] = 127,
[0][1][RTW89_FCC][1][74] = -38,
- [0][1][RTW89_FCC][2][74] = 30,
[0][1][RTW89_ETSI][1][74] = 127,
[0][1][RTW89_ETSI][0][74] = 127,
[0][1][RTW89_MKK][1][74] = 127,
[0][1][RTW89_MKK][0][74] = 127,
[0][1][RTW89_IC][1][74] = -38,
- [0][1][RTW89_IC][2][74] = 30,
[0][1][RTW89_KCC][1][74] = -14,
[0][1][RTW89_KCC][0][74] = 127,
[0][1][RTW89_ACMA][1][74] = 127,
@@ -54365,13 +52309,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][74] = 127,
[0][1][RTW89_THAILAND][0][74] = 127,
[0][1][RTW89_FCC][1][75] = -38,
- [0][1][RTW89_FCC][2][75] = 30,
[0][1][RTW89_ETSI][1][75] = 127,
[0][1][RTW89_ETSI][0][75] = 127,
[0][1][RTW89_MKK][1][75] = 127,
[0][1][RTW89_MKK][0][75] = 127,
[0][1][RTW89_IC][1][75] = -38,
- [0][1][RTW89_IC][2][75] = 30,
[0][1][RTW89_KCC][1][75] = -14,
[0][1][RTW89_KCC][0][75] = 127,
[0][1][RTW89_ACMA][1][75] = 127,
@@ -54384,13 +52326,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][75] = 127,
[0][1][RTW89_THAILAND][0][75] = 127,
[0][1][RTW89_FCC][1][77] = -38,
- [0][1][RTW89_FCC][2][77] = 30,
[0][1][RTW89_ETSI][1][77] = 127,
[0][1][RTW89_ETSI][0][77] = 127,
[0][1][RTW89_MKK][1][77] = 127,
[0][1][RTW89_MKK][0][77] = 127,
[0][1][RTW89_IC][1][77] = -38,
- [0][1][RTW89_IC][2][77] = 30,
[0][1][RTW89_KCC][1][77] = -14,
[0][1][RTW89_KCC][0][77] = 127,
[0][1][RTW89_ACMA][1][77] = 127,
@@ -54403,13 +52343,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][77] = 127,
[0][1][RTW89_THAILAND][0][77] = 127,
[0][1][RTW89_FCC][1][79] = -38,
- [0][1][RTW89_FCC][2][79] = 30,
[0][1][RTW89_ETSI][1][79] = 127,
[0][1][RTW89_ETSI][0][79] = 127,
[0][1][RTW89_MKK][1][79] = 127,
[0][1][RTW89_MKK][0][79] = 127,
[0][1][RTW89_IC][1][79] = -38,
- [0][1][RTW89_IC][2][79] = 30,
[0][1][RTW89_KCC][1][79] = -14,
[0][1][RTW89_KCC][0][79] = 127,
[0][1][RTW89_ACMA][1][79] = 127,
@@ -54422,13 +52360,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][79] = 127,
[0][1][RTW89_THAILAND][0][79] = 127,
[0][1][RTW89_FCC][1][81] = -38,
- [0][1][RTW89_FCC][2][81] = 30,
[0][1][RTW89_ETSI][1][81] = 127,
[0][1][RTW89_ETSI][0][81] = 127,
[0][1][RTW89_MKK][1][81] = 127,
[0][1][RTW89_MKK][0][81] = 127,
[0][1][RTW89_IC][1][81] = -38,
- [0][1][RTW89_IC][2][81] = 30,
[0][1][RTW89_KCC][1][81] = -14,
[0][1][RTW89_KCC][0][81] = 127,
[0][1][RTW89_ACMA][1][81] = 127,
@@ -54441,13 +52377,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][81] = 127,
[0][1][RTW89_THAILAND][0][81] = 127,
[0][1][RTW89_FCC][1][83] = -38,
- [0][1][RTW89_FCC][2][83] = 30,
[0][1][RTW89_ETSI][1][83] = 127,
[0][1][RTW89_ETSI][0][83] = 127,
[0][1][RTW89_MKK][1][83] = 127,
[0][1][RTW89_MKK][0][83] = 127,
[0][1][RTW89_IC][1][83] = -38,
- [0][1][RTW89_IC][2][83] = 30,
[0][1][RTW89_KCC][1][83] = -14,
[0][1][RTW89_KCC][0][83] = 127,
[0][1][RTW89_ACMA][1][83] = 127,
@@ -54460,13 +52394,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][83] = 127,
[0][1][RTW89_THAILAND][0][83] = 127,
[0][1][RTW89_FCC][1][85] = -38,
- [0][1][RTW89_FCC][2][85] = 30,
[0][1][RTW89_ETSI][1][85] = 127,
[0][1][RTW89_ETSI][0][85] = 127,
[0][1][RTW89_MKK][1][85] = 127,
[0][1][RTW89_MKK][0][85] = 127,
[0][1][RTW89_IC][1][85] = -38,
- [0][1][RTW89_IC][2][85] = 30,
[0][1][RTW89_KCC][1][85] = -14,
[0][1][RTW89_KCC][0][85] = 127,
[0][1][RTW89_ACMA][1][85] = 127,
@@ -54479,13 +52411,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][85] = 127,
[0][1][RTW89_THAILAND][0][85] = 127,
[0][1][RTW89_FCC][1][87] = -40,
- [0][1][RTW89_FCC][2][87] = 127,
[0][1][RTW89_ETSI][1][87] = 127,
[0][1][RTW89_ETSI][0][87] = 127,
[0][1][RTW89_MKK][1][87] = 127,
[0][1][RTW89_MKK][0][87] = 127,
[0][1][RTW89_IC][1][87] = -40,
- [0][1][RTW89_IC][2][87] = 127,
[0][1][RTW89_KCC][1][87] = -14,
[0][1][RTW89_KCC][0][87] = 127,
[0][1][RTW89_ACMA][1][87] = 127,
@@ -54498,13 +52428,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][87] = 127,
[0][1][RTW89_THAILAND][0][87] = 127,
[0][1][RTW89_FCC][1][89] = -38,
- [0][1][RTW89_FCC][2][89] = 127,
[0][1][RTW89_ETSI][1][89] = 127,
[0][1][RTW89_ETSI][0][89] = 127,
[0][1][RTW89_MKK][1][89] = 127,
[0][1][RTW89_MKK][0][89] = 127,
[0][1][RTW89_IC][1][89] = -38,
- [0][1][RTW89_IC][2][89] = 127,
[0][1][RTW89_KCC][1][89] = -14,
[0][1][RTW89_KCC][0][89] = 127,
[0][1][RTW89_ACMA][1][89] = 127,
@@ -54517,13 +52445,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][89] = 127,
[0][1][RTW89_THAILAND][0][89] = 127,
[0][1][RTW89_FCC][1][90] = -38,
- [0][1][RTW89_FCC][2][90] = 127,
[0][1][RTW89_ETSI][1][90] = 127,
[0][1][RTW89_ETSI][0][90] = 127,
[0][1][RTW89_MKK][1][90] = 127,
[0][1][RTW89_MKK][0][90] = 127,
[0][1][RTW89_IC][1][90] = -38,
- [0][1][RTW89_IC][2][90] = 127,
[0][1][RTW89_KCC][1][90] = -14,
[0][1][RTW89_KCC][0][90] = 127,
[0][1][RTW89_ACMA][1][90] = 127,
@@ -54536,13 +52462,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][90] = 127,
[0][1][RTW89_THAILAND][0][90] = 127,
[0][1][RTW89_FCC][1][92] = -38,
- [0][1][RTW89_FCC][2][92] = 127,
[0][1][RTW89_ETSI][1][92] = 127,
[0][1][RTW89_ETSI][0][92] = 127,
[0][1][RTW89_MKK][1][92] = 127,
[0][1][RTW89_MKK][0][92] = 127,
[0][1][RTW89_IC][1][92] = -38,
- [0][1][RTW89_IC][2][92] = 127,
[0][1][RTW89_KCC][1][92] = -14,
[0][1][RTW89_KCC][0][92] = 127,
[0][1][RTW89_ACMA][1][92] = 127,
@@ -54555,13 +52479,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][92] = 127,
[0][1][RTW89_THAILAND][0][92] = 127,
[0][1][RTW89_FCC][1][94] = -38,
- [0][1][RTW89_FCC][2][94] = 127,
[0][1][RTW89_ETSI][1][94] = 127,
[0][1][RTW89_ETSI][0][94] = 127,
[0][1][RTW89_MKK][1][94] = 127,
[0][1][RTW89_MKK][0][94] = 127,
[0][1][RTW89_IC][1][94] = -38,
- [0][1][RTW89_IC][2][94] = 127,
[0][1][RTW89_KCC][1][94] = -14,
[0][1][RTW89_KCC][0][94] = 127,
[0][1][RTW89_ACMA][1][94] = 127,
@@ -54574,13 +52496,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][94] = 127,
[0][1][RTW89_THAILAND][0][94] = 127,
[0][1][RTW89_FCC][1][96] = -38,
- [0][1][RTW89_FCC][2][96] = 127,
[0][1][RTW89_ETSI][1][96] = 127,
[0][1][RTW89_ETSI][0][96] = 127,
[0][1][RTW89_MKK][1][96] = 127,
[0][1][RTW89_MKK][0][96] = 127,
[0][1][RTW89_IC][1][96] = -38,
- [0][1][RTW89_IC][2][96] = 127,
[0][1][RTW89_KCC][1][96] = -14,
[0][1][RTW89_KCC][0][96] = 127,
[0][1][RTW89_ACMA][1][96] = 127,
@@ -54593,13 +52513,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][96] = 127,
[0][1][RTW89_THAILAND][0][96] = 127,
[0][1][RTW89_FCC][1][98] = -38,
- [0][1][RTW89_FCC][2][98] = 127,
[0][1][RTW89_ETSI][1][98] = 127,
[0][1][RTW89_ETSI][0][98] = 127,
[0][1][RTW89_MKK][1][98] = 127,
[0][1][RTW89_MKK][0][98] = 127,
[0][1][RTW89_IC][1][98] = -38,
- [0][1][RTW89_IC][2][98] = 127,
[0][1][RTW89_KCC][1][98] = -14,
[0][1][RTW89_KCC][0][98] = 127,
[0][1][RTW89_ACMA][1][98] = 127,
@@ -54612,13 +52530,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][98] = 127,
[0][1][RTW89_THAILAND][0][98] = 127,
[0][1][RTW89_FCC][1][100] = -38,
- [0][1][RTW89_FCC][2][100] = 127,
[0][1][RTW89_ETSI][1][100] = 127,
[0][1][RTW89_ETSI][0][100] = 127,
[0][1][RTW89_MKK][1][100] = 127,
[0][1][RTW89_MKK][0][100] = 127,
[0][1][RTW89_IC][1][100] = -38,
- [0][1][RTW89_IC][2][100] = 127,
[0][1][RTW89_KCC][1][100] = -14,
[0][1][RTW89_KCC][0][100] = 127,
[0][1][RTW89_ACMA][1][100] = 127,
@@ -54631,13 +52547,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][100] = 127,
[0][1][RTW89_THAILAND][0][100] = 127,
[0][1][RTW89_FCC][1][102] = -38,
- [0][1][RTW89_FCC][2][102] = 127,
[0][1][RTW89_ETSI][1][102] = 127,
[0][1][RTW89_ETSI][0][102] = 127,
[0][1][RTW89_MKK][1][102] = 127,
[0][1][RTW89_MKK][0][102] = 127,
[0][1][RTW89_IC][1][102] = -38,
- [0][1][RTW89_IC][2][102] = 127,
[0][1][RTW89_KCC][1][102] = -14,
[0][1][RTW89_KCC][0][102] = 127,
[0][1][RTW89_ACMA][1][102] = 127,
@@ -54650,13 +52564,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][102] = 127,
[0][1][RTW89_THAILAND][0][102] = 127,
[0][1][RTW89_FCC][1][104] = -38,
- [0][1][RTW89_FCC][2][104] = 127,
[0][1][RTW89_ETSI][1][104] = 127,
[0][1][RTW89_ETSI][0][104] = 127,
[0][1][RTW89_MKK][1][104] = 127,
[0][1][RTW89_MKK][0][104] = 127,
[0][1][RTW89_IC][1][104] = -38,
- [0][1][RTW89_IC][2][104] = 127,
[0][1][RTW89_KCC][1][104] = -14,
[0][1][RTW89_KCC][0][104] = 127,
[0][1][RTW89_ACMA][1][104] = 127,
@@ -54669,13 +52581,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][104] = 127,
[0][1][RTW89_THAILAND][0][104] = 127,
[0][1][RTW89_FCC][1][105] = -38,
- [0][1][RTW89_FCC][2][105] = 127,
[0][1][RTW89_ETSI][1][105] = 127,
[0][1][RTW89_ETSI][0][105] = 127,
[0][1][RTW89_MKK][1][105] = 127,
[0][1][RTW89_MKK][0][105] = 127,
[0][1][RTW89_IC][1][105] = -38,
- [0][1][RTW89_IC][2][105] = 127,
[0][1][RTW89_KCC][1][105] = -14,
[0][1][RTW89_KCC][0][105] = 127,
[0][1][RTW89_ACMA][1][105] = 127,
@@ -54688,13 +52598,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][105] = 127,
[0][1][RTW89_THAILAND][0][105] = 127,
[0][1][RTW89_FCC][1][107] = -34,
- [0][1][RTW89_FCC][2][107] = 127,
[0][1][RTW89_ETSI][1][107] = 127,
[0][1][RTW89_ETSI][0][107] = 127,
[0][1][RTW89_MKK][1][107] = 127,
[0][1][RTW89_MKK][0][107] = 127,
[0][1][RTW89_IC][1][107] = -34,
- [0][1][RTW89_IC][2][107] = 127,
[0][1][RTW89_KCC][1][107] = -14,
[0][1][RTW89_KCC][0][107] = 127,
[0][1][RTW89_ACMA][1][107] = 127,
@@ -54707,13 +52615,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][107] = 127,
[0][1][RTW89_THAILAND][0][107] = 127,
[0][1][RTW89_FCC][1][109] = -34,
- [0][1][RTW89_FCC][2][109] = 127,
[0][1][RTW89_ETSI][1][109] = 127,
[0][1][RTW89_ETSI][0][109] = 127,
[0][1][RTW89_MKK][1][109] = 127,
[0][1][RTW89_MKK][0][109] = 127,
[0][1][RTW89_IC][1][109] = -34,
- [0][1][RTW89_IC][2][109] = 127,
[0][1][RTW89_KCC][1][109] = 127,
[0][1][RTW89_KCC][0][109] = 127,
[0][1][RTW89_ACMA][1][109] = 127,
@@ -54726,13 +52632,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][109] = 127,
[0][1][RTW89_THAILAND][0][109] = 127,
[0][1][RTW89_FCC][1][111] = 127,
- [0][1][RTW89_FCC][2][111] = 127,
[0][1][RTW89_ETSI][1][111] = 127,
[0][1][RTW89_ETSI][0][111] = 127,
[0][1][RTW89_MKK][1][111] = 127,
[0][1][RTW89_MKK][0][111] = 127,
[0][1][RTW89_IC][1][111] = 127,
- [0][1][RTW89_IC][2][111] = 127,
[0][1][RTW89_KCC][1][111] = 127,
[0][1][RTW89_KCC][0][111] = 127,
[0][1][RTW89_ACMA][1][111] = 127,
@@ -54745,13 +52649,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][111] = 127,
[0][1][RTW89_THAILAND][0][111] = 127,
[0][1][RTW89_FCC][1][113] = 127,
- [0][1][RTW89_FCC][2][113] = 127,
[0][1][RTW89_ETSI][1][113] = 127,
[0][1][RTW89_ETSI][0][113] = 127,
[0][1][RTW89_MKK][1][113] = 127,
[0][1][RTW89_MKK][0][113] = 127,
[0][1][RTW89_IC][1][113] = 127,
- [0][1][RTW89_IC][2][113] = 127,
[0][1][RTW89_KCC][1][113] = 127,
[0][1][RTW89_KCC][0][113] = 127,
[0][1][RTW89_ACMA][1][113] = 127,
@@ -54764,13 +52666,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][113] = 127,
[0][1][RTW89_THAILAND][0][113] = 127,
[0][1][RTW89_FCC][1][115] = 127,
- [0][1][RTW89_FCC][2][115] = 127,
[0][1][RTW89_ETSI][1][115] = 127,
[0][1][RTW89_ETSI][0][115] = 127,
[0][1][RTW89_MKK][1][115] = 127,
[0][1][RTW89_MKK][0][115] = 127,
[0][1][RTW89_IC][1][115] = 127,
- [0][1][RTW89_IC][2][115] = 127,
[0][1][RTW89_KCC][1][115] = 127,
[0][1][RTW89_KCC][0][115] = 127,
[0][1][RTW89_ACMA][1][115] = 127,
@@ -54783,13 +52683,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][115] = 127,
[0][1][RTW89_THAILAND][0][115] = 127,
[0][1][RTW89_FCC][1][117] = 127,
- [0][1][RTW89_FCC][2][117] = 127,
[0][1][RTW89_ETSI][1][117] = 127,
[0][1][RTW89_ETSI][0][117] = 127,
[0][1][RTW89_MKK][1][117] = 127,
[0][1][RTW89_MKK][0][117] = 127,
[0][1][RTW89_IC][1][117] = 127,
- [0][1][RTW89_IC][2][117] = 127,
[0][1][RTW89_KCC][1][117] = 127,
[0][1][RTW89_KCC][0][117] = 127,
[0][1][RTW89_ACMA][1][117] = 127,
@@ -54802,13 +52700,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][117] = 127,
[0][1][RTW89_THAILAND][0][117] = 127,
[0][1][RTW89_FCC][1][119] = 127,
- [0][1][RTW89_FCC][2][119] = 127,
[0][1][RTW89_ETSI][1][119] = 127,
[0][1][RTW89_ETSI][0][119] = 127,
[0][1][RTW89_MKK][1][119] = 127,
[0][1][RTW89_MKK][0][119] = 127,
[0][1][RTW89_IC][1][119] = 127,
- [0][1][RTW89_IC][2][119] = 127,
[0][1][RTW89_KCC][1][119] = 127,
[0][1][RTW89_KCC][0][119] = 127,
[0][1][RTW89_ACMA][1][119] = 127,
@@ -54821,13 +52717,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[0][1][RTW89_THAILAND][1][119] = 127,
[0][1][RTW89_THAILAND][0][119] = 127,
[1][0][RTW89_FCC][1][0] = -4,
- [1][0][RTW89_FCC][2][0] = 52,
[1][0][RTW89_ETSI][1][0] = 46,
[1][0][RTW89_ETSI][0][0] = 6,
[1][0][RTW89_MKK][1][0] = 42,
[1][0][RTW89_MKK][0][0] = 2,
[1][0][RTW89_IC][1][0] = -4,
- [1][0][RTW89_IC][2][0] = 52,
[1][0][RTW89_KCC][1][0] = -2,
[1][0][RTW89_KCC][0][0] = -2,
[1][0][RTW89_ACMA][1][0] = 46,
@@ -54840,13 +52734,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][0] = 42,
[1][0][RTW89_THAILAND][0][0] = -4,
[1][0][RTW89_FCC][1][2] = -4,
- [1][0][RTW89_FCC][2][2] = 52,
[1][0][RTW89_ETSI][1][2] = 46,
[1][0][RTW89_ETSI][0][2] = 6,
[1][0][RTW89_MKK][1][2] = 42,
[1][0][RTW89_MKK][0][2] = 2,
[1][0][RTW89_IC][1][2] = -4,
- [1][0][RTW89_IC][2][2] = 52,
[1][0][RTW89_KCC][1][2] = -2,
[1][0][RTW89_KCC][0][2] = -2,
[1][0][RTW89_ACMA][1][2] = 46,
@@ -54859,13 +52751,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][2] = 42,
[1][0][RTW89_THAILAND][0][2] = -4,
[1][0][RTW89_FCC][1][4] = -4,
- [1][0][RTW89_FCC][2][4] = 52,
[1][0][RTW89_ETSI][1][4] = 46,
[1][0][RTW89_ETSI][0][4] = 6,
[1][0][RTW89_MKK][1][4] = 42,
[1][0][RTW89_MKK][0][4] = 2,
[1][0][RTW89_IC][1][4] = -4,
- [1][0][RTW89_IC][2][4] = 52,
[1][0][RTW89_KCC][1][4] = -2,
[1][0][RTW89_KCC][0][4] = -2,
[1][0][RTW89_ACMA][1][4] = 46,
@@ -54878,13 +52768,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][4] = 42,
[1][0][RTW89_THAILAND][0][4] = -4,
[1][0][RTW89_FCC][1][6] = -4,
- [1][0][RTW89_FCC][2][6] = 52,
[1][0][RTW89_ETSI][1][6] = 46,
[1][0][RTW89_ETSI][0][6] = 6,
[1][0][RTW89_MKK][1][6] = 42,
[1][0][RTW89_MKK][0][6] = 2,
[1][0][RTW89_IC][1][6] = -4,
- [1][0][RTW89_IC][2][6] = 52,
[1][0][RTW89_KCC][1][6] = -2,
[1][0][RTW89_KCC][0][6] = -2,
[1][0][RTW89_ACMA][1][6] = 46,
@@ -54897,13 +52785,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][6] = 42,
[1][0][RTW89_THAILAND][0][6] = -4,
[1][0][RTW89_FCC][1][8] = -4,
- [1][0][RTW89_FCC][2][8] = 52,
[1][0][RTW89_ETSI][1][8] = 46,
[1][0][RTW89_ETSI][0][8] = 6,
[1][0][RTW89_MKK][1][8] = 42,
[1][0][RTW89_MKK][0][8] = 2,
[1][0][RTW89_IC][1][8] = -4,
- [1][0][RTW89_IC][2][8] = 52,
[1][0][RTW89_KCC][1][8] = -2,
[1][0][RTW89_KCC][0][8] = -2,
[1][0][RTW89_ACMA][1][8] = 46,
@@ -54916,13 +52802,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][8] = 42,
[1][0][RTW89_THAILAND][0][8] = -4,
[1][0][RTW89_FCC][1][10] = -4,
- [1][0][RTW89_FCC][2][10] = 52,
[1][0][RTW89_ETSI][1][10] = 46,
[1][0][RTW89_ETSI][0][10] = 6,
[1][0][RTW89_MKK][1][10] = 42,
[1][0][RTW89_MKK][0][10] = 2,
[1][0][RTW89_IC][1][10] = -4,
- [1][0][RTW89_IC][2][10] = 52,
[1][0][RTW89_KCC][1][10] = -2,
[1][0][RTW89_KCC][0][10] = -2,
[1][0][RTW89_ACMA][1][10] = 46,
@@ -54935,13 +52819,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][10] = 42,
[1][0][RTW89_THAILAND][0][10] = -4,
[1][0][RTW89_FCC][1][12] = -4,
- [1][0][RTW89_FCC][2][12] = 52,
[1][0][RTW89_ETSI][1][12] = 46,
[1][0][RTW89_ETSI][0][12] = 6,
[1][0][RTW89_MKK][1][12] = 42,
[1][0][RTW89_MKK][0][12] = 2,
[1][0][RTW89_IC][1][12] = -4,
- [1][0][RTW89_IC][2][12] = 52,
[1][0][RTW89_KCC][1][12] = -2,
[1][0][RTW89_KCC][0][12] = -2,
[1][0][RTW89_ACMA][1][12] = 46,
@@ -54954,13 +52836,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][12] = 42,
[1][0][RTW89_THAILAND][0][12] = -4,
[1][0][RTW89_FCC][1][14] = -4,
- [1][0][RTW89_FCC][2][14] = 52,
[1][0][RTW89_ETSI][1][14] = 46,
[1][0][RTW89_ETSI][0][14] = 6,
[1][0][RTW89_MKK][1][14] = 42,
[1][0][RTW89_MKK][0][14] = 2,
[1][0][RTW89_IC][1][14] = -4,
- [1][0][RTW89_IC][2][14] = 52,
[1][0][RTW89_KCC][1][14] = -2,
[1][0][RTW89_KCC][0][14] = -2,
[1][0][RTW89_ACMA][1][14] = 46,
@@ -54973,13 +52853,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][14] = 42,
[1][0][RTW89_THAILAND][0][14] = -4,
[1][0][RTW89_FCC][1][15] = -4,
- [1][0][RTW89_FCC][2][15] = 52,
[1][0][RTW89_ETSI][1][15] = 46,
[1][0][RTW89_ETSI][0][15] = 6,
[1][0][RTW89_MKK][1][15] = 42,
[1][0][RTW89_MKK][0][15] = 2,
[1][0][RTW89_IC][1][15] = -4,
- [1][0][RTW89_IC][2][15] = 52,
[1][0][RTW89_KCC][1][15] = -2,
[1][0][RTW89_KCC][0][15] = -2,
[1][0][RTW89_ACMA][1][15] = 46,
@@ -54992,13 +52870,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][15] = 42,
[1][0][RTW89_THAILAND][0][15] = -4,
[1][0][RTW89_FCC][1][17] = -4,
- [1][0][RTW89_FCC][2][17] = 52,
[1][0][RTW89_ETSI][1][17] = 46,
[1][0][RTW89_ETSI][0][17] = 6,
[1][0][RTW89_MKK][1][17] = 42,
[1][0][RTW89_MKK][0][17] = 2,
[1][0][RTW89_IC][1][17] = -4,
- [1][0][RTW89_IC][2][17] = 52,
[1][0][RTW89_KCC][1][17] = -2,
[1][0][RTW89_KCC][0][17] = -2,
[1][0][RTW89_ACMA][1][17] = 46,
@@ -55011,13 +52887,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][17] = 42,
[1][0][RTW89_THAILAND][0][17] = -4,
[1][0][RTW89_FCC][1][19] = -4,
- [1][0][RTW89_FCC][2][19] = 52,
[1][0][RTW89_ETSI][1][19] = 46,
[1][0][RTW89_ETSI][0][19] = 6,
[1][0][RTW89_MKK][1][19] = 42,
[1][0][RTW89_MKK][0][19] = 2,
[1][0][RTW89_IC][1][19] = -4,
- [1][0][RTW89_IC][2][19] = 52,
[1][0][RTW89_KCC][1][19] = -2,
[1][0][RTW89_KCC][0][19] = -2,
[1][0][RTW89_ACMA][1][19] = 46,
@@ -55030,13 +52904,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][19] = 42,
[1][0][RTW89_THAILAND][0][19] = -4,
[1][0][RTW89_FCC][1][21] = -4,
- [1][0][RTW89_FCC][2][21] = 52,
[1][0][RTW89_ETSI][1][21] = 46,
[1][0][RTW89_ETSI][0][21] = 6,
[1][0][RTW89_MKK][1][21] = 42,
[1][0][RTW89_MKK][0][21] = 2,
[1][0][RTW89_IC][1][21] = -4,
- [1][0][RTW89_IC][2][21] = 52,
[1][0][RTW89_KCC][1][21] = -2,
[1][0][RTW89_KCC][0][21] = -2,
[1][0][RTW89_ACMA][1][21] = 46,
@@ -55049,13 +52921,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][21] = 42,
[1][0][RTW89_THAILAND][0][21] = -4,
[1][0][RTW89_FCC][1][23] = -4,
- [1][0][RTW89_FCC][2][23] = 66,
[1][0][RTW89_ETSI][1][23] = 46,
[1][0][RTW89_ETSI][0][23] = 6,
[1][0][RTW89_MKK][1][23] = 42,
[1][0][RTW89_MKK][0][23] = 2,
[1][0][RTW89_IC][1][23] = -4,
- [1][0][RTW89_IC][2][23] = 66,
[1][0][RTW89_KCC][1][23] = -2,
[1][0][RTW89_KCC][0][23] = -2,
[1][0][RTW89_ACMA][1][23] = 46,
@@ -55068,13 +52938,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][23] = 42,
[1][0][RTW89_THAILAND][0][23] = -4,
[1][0][RTW89_FCC][1][25] = -4,
- [1][0][RTW89_FCC][2][25] = 66,
[1][0][RTW89_ETSI][1][25] = 46,
[1][0][RTW89_ETSI][0][25] = 6,
[1][0][RTW89_MKK][1][25] = 42,
[1][0][RTW89_MKK][0][25] = 2,
[1][0][RTW89_IC][1][25] = -4,
- [1][0][RTW89_IC][2][25] = 66,
[1][0][RTW89_KCC][1][25] = -2,
[1][0][RTW89_KCC][0][25] = -2,
[1][0][RTW89_ACMA][1][25] = 46,
@@ -55087,13 +52955,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][25] = 42,
[1][0][RTW89_THAILAND][0][25] = -4,
[1][0][RTW89_FCC][1][27] = -4,
- [1][0][RTW89_FCC][2][27] = 66,
[1][0][RTW89_ETSI][1][27] = 46,
[1][0][RTW89_ETSI][0][27] = 6,
[1][0][RTW89_MKK][1][27] = 42,
[1][0][RTW89_MKK][0][27] = 2,
[1][0][RTW89_IC][1][27] = -4,
- [1][0][RTW89_IC][2][27] = 66,
[1][0][RTW89_KCC][1][27] = -2,
[1][0][RTW89_KCC][0][27] = -2,
[1][0][RTW89_ACMA][1][27] = 46,
@@ -55106,13 +52972,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][27] = 42,
[1][0][RTW89_THAILAND][0][27] = -4,
[1][0][RTW89_FCC][1][29] = -4,
- [1][0][RTW89_FCC][2][29] = 66,
[1][0][RTW89_ETSI][1][29] = 46,
[1][0][RTW89_ETSI][0][29] = 6,
[1][0][RTW89_MKK][1][29] = 42,
[1][0][RTW89_MKK][0][29] = 2,
[1][0][RTW89_IC][1][29] = -4,
- [1][0][RTW89_IC][2][29] = 66,
[1][0][RTW89_KCC][1][29] = -2,
[1][0][RTW89_KCC][0][29] = -2,
[1][0][RTW89_ACMA][1][29] = 46,
@@ -55125,13 +52989,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][29] = 42,
[1][0][RTW89_THAILAND][0][29] = -4,
[1][0][RTW89_FCC][1][30] = -4,
- [1][0][RTW89_FCC][2][30] = 66,
[1][0][RTW89_ETSI][1][30] = 46,
[1][0][RTW89_ETSI][0][30] = 6,
[1][0][RTW89_MKK][1][30] = 42,
[1][0][RTW89_MKK][0][30] = 2,
[1][0][RTW89_IC][1][30] = -4,
- [1][0][RTW89_IC][2][30] = 66,
[1][0][RTW89_KCC][1][30] = -2,
[1][0][RTW89_KCC][0][30] = -2,
[1][0][RTW89_ACMA][1][30] = 46,
@@ -55144,13 +53006,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][30] = 42,
[1][0][RTW89_THAILAND][0][30] = -4,
[1][0][RTW89_FCC][1][32] = -4,
- [1][0][RTW89_FCC][2][32] = 66,
[1][0][RTW89_ETSI][1][32] = 46,
[1][0][RTW89_ETSI][0][32] = 6,
[1][0][RTW89_MKK][1][32] = 42,
[1][0][RTW89_MKK][0][32] = 2,
[1][0][RTW89_IC][1][32] = -4,
- [1][0][RTW89_IC][2][32] = 66,
[1][0][RTW89_KCC][1][32] = -2,
[1][0][RTW89_KCC][0][32] = -2,
[1][0][RTW89_ACMA][1][32] = 46,
@@ -55163,13 +53023,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][32] = 42,
[1][0][RTW89_THAILAND][0][32] = -4,
[1][0][RTW89_FCC][1][34] = -4,
- [1][0][RTW89_FCC][2][34] = 66,
[1][0][RTW89_ETSI][1][34] = 46,
[1][0][RTW89_ETSI][0][34] = 6,
[1][0][RTW89_MKK][1][34] = 42,
[1][0][RTW89_MKK][0][34] = 2,
[1][0][RTW89_IC][1][34] = -4,
- [1][0][RTW89_IC][2][34] = 66,
[1][0][RTW89_KCC][1][34] = -2,
[1][0][RTW89_KCC][0][34] = -2,
[1][0][RTW89_ACMA][1][34] = 46,
@@ -55182,13 +53040,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][34] = 42,
[1][0][RTW89_THAILAND][0][34] = -4,
[1][0][RTW89_FCC][1][36] = -4,
- [1][0][RTW89_FCC][2][36] = 66,
[1][0][RTW89_ETSI][1][36] = 46,
[1][0][RTW89_ETSI][0][36] = 6,
[1][0][RTW89_MKK][1][36] = 42,
[1][0][RTW89_MKK][0][36] = 2,
[1][0][RTW89_IC][1][36] = -4,
- [1][0][RTW89_IC][2][36] = 66,
[1][0][RTW89_KCC][1][36] = -2,
[1][0][RTW89_KCC][0][36] = -2,
[1][0][RTW89_ACMA][1][36] = 46,
@@ -55201,13 +53057,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][36] = 42,
[1][0][RTW89_THAILAND][0][36] = -4,
[1][0][RTW89_FCC][1][38] = -4,
- [1][0][RTW89_FCC][2][38] = 66,
[1][0][RTW89_ETSI][1][38] = 46,
[1][0][RTW89_ETSI][0][38] = 6,
[1][0][RTW89_MKK][1][38] = 42,
[1][0][RTW89_MKK][0][38] = 2,
[1][0][RTW89_IC][1][38] = -4,
- [1][0][RTW89_IC][2][38] = 66,
[1][0][RTW89_KCC][1][38] = -2,
[1][0][RTW89_KCC][0][38] = -2,
[1][0][RTW89_ACMA][1][38] = 46,
@@ -55220,13 +53074,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][38] = 42,
[1][0][RTW89_THAILAND][0][38] = -4,
[1][0][RTW89_FCC][1][40] = -4,
- [1][0][RTW89_FCC][2][40] = 66,
[1][0][RTW89_ETSI][1][40] = 46,
[1][0][RTW89_ETSI][0][40] = 6,
[1][0][RTW89_MKK][1][40] = 42,
[1][0][RTW89_MKK][0][40] = 2,
[1][0][RTW89_IC][1][40] = -4,
- [1][0][RTW89_IC][2][40] = 66,
[1][0][RTW89_KCC][1][40] = -2,
[1][0][RTW89_KCC][0][40] = -2,
[1][0][RTW89_ACMA][1][40] = 46,
@@ -55239,13 +53091,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][40] = 42,
[1][0][RTW89_THAILAND][0][40] = -4,
[1][0][RTW89_FCC][1][42] = -4,
- [1][0][RTW89_FCC][2][42] = 66,
[1][0][RTW89_ETSI][1][42] = 46,
[1][0][RTW89_ETSI][0][42] = 6,
[1][0][RTW89_MKK][1][42] = 42,
[1][0][RTW89_MKK][0][42] = 2,
[1][0][RTW89_IC][1][42] = -4,
- [1][0][RTW89_IC][2][42] = 66,
[1][0][RTW89_KCC][1][42] = -2,
[1][0][RTW89_KCC][0][42] = -2,
[1][0][RTW89_ACMA][1][42] = 46,
@@ -55258,13 +53108,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][42] = 42,
[1][0][RTW89_THAILAND][0][42] = -4,
[1][0][RTW89_FCC][1][44] = -4,
- [1][0][RTW89_FCC][2][44] = 66,
[1][0][RTW89_ETSI][1][44] = 46,
[1][0][RTW89_ETSI][0][44] = 8,
[1][0][RTW89_MKK][1][44] = 22,
[1][0][RTW89_MKK][0][44] = 4,
[1][0][RTW89_IC][1][44] = -4,
- [1][0][RTW89_IC][2][44] = 66,
[1][0][RTW89_KCC][1][44] = -2,
[1][0][RTW89_KCC][0][44] = -2,
[1][0][RTW89_ACMA][1][44] = 46,
@@ -55277,13 +53125,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][44] = 42,
[1][0][RTW89_THAILAND][0][44] = -4,
[1][0][RTW89_FCC][1][45] = -4,
- [1][0][RTW89_FCC][2][45] = 127,
[1][0][RTW89_ETSI][1][45] = 127,
[1][0][RTW89_ETSI][0][45] = 127,
[1][0][RTW89_MKK][1][45] = 127,
[1][0][RTW89_MKK][0][45] = 127,
[1][0][RTW89_IC][1][45] = -4,
- [1][0][RTW89_IC][2][45] = 68,
[1][0][RTW89_KCC][1][45] = -2,
[1][0][RTW89_KCC][0][45] = 127,
[1][0][RTW89_ACMA][1][45] = 127,
@@ -55296,13 +53142,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][45] = 127,
[1][0][RTW89_THAILAND][0][45] = 127,
[1][0][RTW89_FCC][1][47] = -4,
- [1][0][RTW89_FCC][2][47] = 127,
[1][0][RTW89_ETSI][1][47] = 127,
[1][0][RTW89_ETSI][0][47] = 127,
[1][0][RTW89_MKK][1][47] = 127,
[1][0][RTW89_MKK][0][47] = 127,
[1][0][RTW89_IC][1][47] = -4,
- [1][0][RTW89_IC][2][47] = 68,
[1][0][RTW89_KCC][1][47] = -2,
[1][0][RTW89_KCC][0][47] = 127,
[1][0][RTW89_ACMA][1][47] = 127,
@@ -55315,13 +53159,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][47] = 127,
[1][0][RTW89_THAILAND][0][47] = 127,
[1][0][RTW89_FCC][1][49] = -4,
- [1][0][RTW89_FCC][2][49] = 127,
[1][0][RTW89_ETSI][1][49] = 127,
[1][0][RTW89_ETSI][0][49] = 127,
[1][0][RTW89_MKK][1][49] = 127,
[1][0][RTW89_MKK][0][49] = 127,
[1][0][RTW89_IC][1][49] = -4,
- [1][0][RTW89_IC][2][49] = 68,
[1][0][RTW89_KCC][1][49] = -2,
[1][0][RTW89_KCC][0][49] = 127,
[1][0][RTW89_ACMA][1][49] = 127,
@@ -55334,13 +53176,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][49] = 127,
[1][0][RTW89_THAILAND][0][49] = 127,
[1][0][RTW89_FCC][1][51] = -4,
- [1][0][RTW89_FCC][2][51] = 127,
[1][0][RTW89_ETSI][1][51] = 127,
[1][0][RTW89_ETSI][0][51] = 127,
[1][0][RTW89_MKK][1][51] = 127,
[1][0][RTW89_MKK][0][51] = 127,
[1][0][RTW89_IC][1][51] = -4,
- [1][0][RTW89_IC][2][51] = 68,
[1][0][RTW89_KCC][1][51] = -2,
[1][0][RTW89_KCC][0][51] = 127,
[1][0][RTW89_ACMA][1][51] = 127,
@@ -55353,13 +53193,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][51] = 127,
[1][0][RTW89_THAILAND][0][51] = 127,
[1][0][RTW89_FCC][1][53] = -4,
- [1][0][RTW89_FCC][2][53] = 127,
[1][0][RTW89_ETSI][1][53] = 127,
[1][0][RTW89_ETSI][0][53] = 127,
[1][0][RTW89_MKK][1][53] = 127,
[1][0][RTW89_MKK][0][53] = 127,
[1][0][RTW89_IC][1][53] = -4,
- [1][0][RTW89_IC][2][53] = 68,
[1][0][RTW89_KCC][1][53] = -2,
[1][0][RTW89_KCC][0][53] = 127,
[1][0][RTW89_ACMA][1][53] = 127,
@@ -55372,13 +53210,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][53] = 127,
[1][0][RTW89_THAILAND][0][53] = 127,
[1][0][RTW89_FCC][1][55] = -4,
- [1][0][RTW89_FCC][2][55] = 68,
[1][0][RTW89_ETSI][1][55] = 127,
[1][0][RTW89_ETSI][0][55] = 127,
[1][0][RTW89_MKK][1][55] = 127,
[1][0][RTW89_MKK][0][55] = 127,
[1][0][RTW89_IC][1][55] = -4,
- [1][0][RTW89_IC][2][55] = 68,
[1][0][RTW89_KCC][1][55] = -2,
[1][0][RTW89_KCC][0][55] = 127,
[1][0][RTW89_ACMA][1][55] = 127,
@@ -55391,13 +53227,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][55] = 127,
[1][0][RTW89_THAILAND][0][55] = 127,
[1][0][RTW89_FCC][1][57] = -4,
- [1][0][RTW89_FCC][2][57] = 68,
[1][0][RTW89_ETSI][1][57] = 127,
[1][0][RTW89_ETSI][0][57] = 127,
[1][0][RTW89_MKK][1][57] = 127,
[1][0][RTW89_MKK][0][57] = 127,
[1][0][RTW89_IC][1][57] = -4,
- [1][0][RTW89_IC][2][57] = 68,
[1][0][RTW89_KCC][1][57] = -2,
[1][0][RTW89_KCC][0][57] = 127,
[1][0][RTW89_ACMA][1][57] = 127,
@@ -55410,13 +53244,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][57] = 127,
[1][0][RTW89_THAILAND][0][57] = 127,
[1][0][RTW89_FCC][1][59] = -4,
- [1][0][RTW89_FCC][2][59] = 68,
[1][0][RTW89_ETSI][1][59] = 127,
[1][0][RTW89_ETSI][0][59] = 127,
[1][0][RTW89_MKK][1][59] = 127,
[1][0][RTW89_MKK][0][59] = 127,
[1][0][RTW89_IC][1][59] = -4,
- [1][0][RTW89_IC][2][59] = 68,
[1][0][RTW89_KCC][1][59] = -2,
[1][0][RTW89_KCC][0][59] = 127,
[1][0][RTW89_ACMA][1][59] = 127,
@@ -55429,13 +53261,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][59] = 127,
[1][0][RTW89_THAILAND][0][59] = 127,
[1][0][RTW89_FCC][1][60] = -4,
- [1][0][RTW89_FCC][2][60] = 68,
[1][0][RTW89_ETSI][1][60] = 127,
[1][0][RTW89_ETSI][0][60] = 127,
[1][0][RTW89_MKK][1][60] = 127,
[1][0][RTW89_MKK][0][60] = 127,
[1][0][RTW89_IC][1][60] = -4,
- [1][0][RTW89_IC][2][60] = 68,
[1][0][RTW89_KCC][1][60] = -2,
[1][0][RTW89_KCC][0][60] = 127,
[1][0][RTW89_ACMA][1][60] = 127,
@@ -55448,13 +53278,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][60] = 127,
[1][0][RTW89_THAILAND][0][60] = 127,
[1][0][RTW89_FCC][1][62] = -4,
- [1][0][RTW89_FCC][2][62] = 68,
[1][0][RTW89_ETSI][1][62] = 127,
[1][0][RTW89_ETSI][0][62] = 127,
[1][0][RTW89_MKK][1][62] = 127,
[1][0][RTW89_MKK][0][62] = 127,
[1][0][RTW89_IC][1][62] = -4,
- [1][0][RTW89_IC][2][62] = 68,
[1][0][RTW89_KCC][1][62] = -2,
[1][0][RTW89_KCC][0][62] = 127,
[1][0][RTW89_ACMA][1][62] = 127,
@@ -55467,13 +53295,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][62] = 127,
[1][0][RTW89_THAILAND][0][62] = 127,
[1][0][RTW89_FCC][1][64] = -4,
- [1][0][RTW89_FCC][2][64] = 68,
[1][0][RTW89_ETSI][1][64] = 127,
[1][0][RTW89_ETSI][0][64] = 127,
[1][0][RTW89_MKK][1][64] = 127,
[1][0][RTW89_MKK][0][64] = 127,
[1][0][RTW89_IC][1][64] = -4,
- [1][0][RTW89_IC][2][64] = 68,
[1][0][RTW89_KCC][1][64] = -2,
[1][0][RTW89_KCC][0][64] = 127,
[1][0][RTW89_ACMA][1][64] = 127,
@@ -55486,13 +53312,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][64] = 127,
[1][0][RTW89_THAILAND][0][64] = 127,
[1][0][RTW89_FCC][1][66] = -4,
- [1][0][RTW89_FCC][2][66] = 68,
[1][0][RTW89_ETSI][1][66] = 127,
[1][0][RTW89_ETSI][0][66] = 127,
[1][0][RTW89_MKK][1][66] = 127,
[1][0][RTW89_MKK][0][66] = 127,
[1][0][RTW89_IC][1][66] = -4,
- [1][0][RTW89_IC][2][66] = 68,
[1][0][RTW89_KCC][1][66] = -2,
[1][0][RTW89_KCC][0][66] = 127,
[1][0][RTW89_ACMA][1][66] = 127,
@@ -55505,13 +53329,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][66] = 127,
[1][0][RTW89_THAILAND][0][66] = 127,
[1][0][RTW89_FCC][1][68] = -4,
- [1][0][RTW89_FCC][2][68] = 68,
[1][0][RTW89_ETSI][1][68] = 127,
[1][0][RTW89_ETSI][0][68] = 127,
[1][0][RTW89_MKK][1][68] = 127,
[1][0][RTW89_MKK][0][68] = 127,
[1][0][RTW89_IC][1][68] = -4,
- [1][0][RTW89_IC][2][68] = 68,
[1][0][RTW89_KCC][1][68] = -2,
[1][0][RTW89_KCC][0][68] = 127,
[1][0][RTW89_ACMA][1][68] = 127,
@@ -55524,13 +53346,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][68] = 127,
[1][0][RTW89_THAILAND][0][68] = 127,
[1][0][RTW89_FCC][1][70] = -4,
- [1][0][RTW89_FCC][2][70] = 68,
[1][0][RTW89_ETSI][1][70] = 127,
[1][0][RTW89_ETSI][0][70] = 127,
[1][0][RTW89_MKK][1][70] = 127,
[1][0][RTW89_MKK][0][70] = 127,
[1][0][RTW89_IC][1][70] = -4,
- [1][0][RTW89_IC][2][70] = 68,
[1][0][RTW89_KCC][1][70] = -2,
[1][0][RTW89_KCC][0][70] = 127,
[1][0][RTW89_ACMA][1][70] = 127,
@@ -55543,13 +53363,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][70] = 127,
[1][0][RTW89_THAILAND][0][70] = 127,
[1][0][RTW89_FCC][1][72] = -4,
- [1][0][RTW89_FCC][2][72] = 68,
[1][0][RTW89_ETSI][1][72] = 127,
[1][0][RTW89_ETSI][0][72] = 127,
[1][0][RTW89_MKK][1][72] = 127,
[1][0][RTW89_MKK][0][72] = 127,
[1][0][RTW89_IC][1][72] = -4,
- [1][0][RTW89_IC][2][72] = 68,
[1][0][RTW89_KCC][1][72] = -2,
[1][0][RTW89_KCC][0][72] = 127,
[1][0][RTW89_ACMA][1][72] = 127,
@@ -55562,13 +53380,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][72] = 127,
[1][0][RTW89_THAILAND][0][72] = 127,
[1][0][RTW89_FCC][1][74] = -4,
- [1][0][RTW89_FCC][2][74] = 68,
[1][0][RTW89_ETSI][1][74] = 127,
[1][0][RTW89_ETSI][0][74] = 127,
[1][0][RTW89_MKK][1][74] = 127,
[1][0][RTW89_MKK][0][74] = 127,
[1][0][RTW89_IC][1][74] = -4,
- [1][0][RTW89_IC][2][74] = 68,
[1][0][RTW89_KCC][1][74] = -2,
[1][0][RTW89_KCC][0][74] = 127,
[1][0][RTW89_ACMA][1][74] = 127,
@@ -55581,13 +53397,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][74] = 127,
[1][0][RTW89_THAILAND][0][74] = 127,
[1][0][RTW89_FCC][1][75] = -4,
- [1][0][RTW89_FCC][2][75] = 68,
[1][0][RTW89_ETSI][1][75] = 127,
[1][0][RTW89_ETSI][0][75] = 127,
[1][0][RTW89_MKK][1][75] = 127,
[1][0][RTW89_MKK][0][75] = 127,
[1][0][RTW89_IC][1][75] = -4,
- [1][0][RTW89_IC][2][75] = 68,
[1][0][RTW89_KCC][1][75] = -2,
[1][0][RTW89_KCC][0][75] = 127,
[1][0][RTW89_ACMA][1][75] = 127,
@@ -55600,13 +53414,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][75] = 127,
[1][0][RTW89_THAILAND][0][75] = 127,
[1][0][RTW89_FCC][1][77] = -4,
- [1][0][RTW89_FCC][2][77] = 68,
[1][0][RTW89_ETSI][1][77] = 127,
[1][0][RTW89_ETSI][0][77] = 127,
[1][0][RTW89_MKK][1][77] = 127,
[1][0][RTW89_MKK][0][77] = 127,
[1][0][RTW89_IC][1][77] = -4,
- [1][0][RTW89_IC][2][77] = 68,
[1][0][RTW89_KCC][1][77] = -2,
[1][0][RTW89_KCC][0][77] = 127,
[1][0][RTW89_ACMA][1][77] = 127,
@@ -55619,13 +53431,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][77] = 127,
[1][0][RTW89_THAILAND][0][77] = 127,
[1][0][RTW89_FCC][1][79] = -4,
- [1][0][RTW89_FCC][2][79] = 68,
[1][0][RTW89_ETSI][1][79] = 127,
[1][0][RTW89_ETSI][0][79] = 127,
[1][0][RTW89_MKK][1][79] = 127,
[1][0][RTW89_MKK][0][79] = 127,
[1][0][RTW89_IC][1][79] = -4,
- [1][0][RTW89_IC][2][79] = 68,
[1][0][RTW89_KCC][1][79] = -2,
[1][0][RTW89_KCC][0][79] = 127,
[1][0][RTW89_ACMA][1][79] = 127,
@@ -55638,13 +53448,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][79] = 127,
[1][0][RTW89_THAILAND][0][79] = 127,
[1][0][RTW89_FCC][1][81] = -4,
- [1][0][RTW89_FCC][2][81] = 68,
[1][0][RTW89_ETSI][1][81] = 127,
[1][0][RTW89_ETSI][0][81] = 127,
[1][0][RTW89_MKK][1][81] = 127,
[1][0][RTW89_MKK][0][81] = 127,
[1][0][RTW89_IC][1][81] = -4,
- [1][0][RTW89_IC][2][81] = 68,
[1][0][RTW89_KCC][1][81] = -2,
[1][0][RTW89_KCC][0][81] = 127,
[1][0][RTW89_ACMA][1][81] = 127,
@@ -55657,13 +53465,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][81] = 127,
[1][0][RTW89_THAILAND][0][81] = 127,
[1][0][RTW89_FCC][1][83] = -4,
- [1][0][RTW89_FCC][2][83] = 68,
[1][0][RTW89_ETSI][1][83] = 127,
[1][0][RTW89_ETSI][0][83] = 127,
[1][0][RTW89_MKK][1][83] = 127,
[1][0][RTW89_MKK][0][83] = 127,
[1][0][RTW89_IC][1][83] = -4,
- [1][0][RTW89_IC][2][83] = 68,
[1][0][RTW89_KCC][1][83] = -2,
[1][0][RTW89_KCC][0][83] = 127,
[1][0][RTW89_ACMA][1][83] = 127,
@@ -55676,13 +53482,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][83] = 127,
[1][0][RTW89_THAILAND][0][83] = 127,
[1][0][RTW89_FCC][1][85] = -4,
- [1][0][RTW89_FCC][2][85] = 68,
[1][0][RTW89_ETSI][1][85] = 127,
[1][0][RTW89_ETSI][0][85] = 127,
[1][0][RTW89_MKK][1][85] = 127,
[1][0][RTW89_MKK][0][85] = 127,
[1][0][RTW89_IC][1][85] = -4,
- [1][0][RTW89_IC][2][85] = 68,
[1][0][RTW89_KCC][1][85] = -2,
[1][0][RTW89_KCC][0][85] = 127,
[1][0][RTW89_ACMA][1][85] = 127,
@@ -55695,13 +53499,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][85] = 127,
[1][0][RTW89_THAILAND][0][85] = 127,
[1][0][RTW89_FCC][1][87] = -4,
- [1][0][RTW89_FCC][2][87] = 127,
[1][0][RTW89_ETSI][1][87] = 127,
[1][0][RTW89_ETSI][0][87] = 127,
[1][0][RTW89_MKK][1][87] = 127,
[1][0][RTW89_MKK][0][87] = 127,
[1][0][RTW89_IC][1][87] = -4,
- [1][0][RTW89_IC][2][87] = 127,
[1][0][RTW89_KCC][1][87] = -2,
[1][0][RTW89_KCC][0][87] = 127,
[1][0][RTW89_ACMA][1][87] = 127,
@@ -55714,13 +53516,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][87] = 127,
[1][0][RTW89_THAILAND][0][87] = 127,
[1][0][RTW89_FCC][1][89] = -4,
- [1][0][RTW89_FCC][2][89] = 127,
[1][0][RTW89_ETSI][1][89] = 127,
[1][0][RTW89_ETSI][0][89] = 127,
[1][0][RTW89_MKK][1][89] = 127,
[1][0][RTW89_MKK][0][89] = 127,
[1][0][RTW89_IC][1][89] = -4,
- [1][0][RTW89_IC][2][89] = 127,
[1][0][RTW89_KCC][1][89] = -2,
[1][0][RTW89_KCC][0][89] = 127,
[1][0][RTW89_ACMA][1][89] = 127,
@@ -55733,13 +53533,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][89] = 127,
[1][0][RTW89_THAILAND][0][89] = 127,
[1][0][RTW89_FCC][1][90] = -4,
- [1][0][RTW89_FCC][2][90] = 127,
[1][0][RTW89_ETSI][1][90] = 127,
[1][0][RTW89_ETSI][0][90] = 127,
[1][0][RTW89_MKK][1][90] = 127,
[1][0][RTW89_MKK][0][90] = 127,
[1][0][RTW89_IC][1][90] = -4,
- [1][0][RTW89_IC][2][90] = 127,
[1][0][RTW89_KCC][1][90] = -2,
[1][0][RTW89_KCC][0][90] = 127,
[1][0][RTW89_ACMA][1][90] = 127,
@@ -55752,13 +53550,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][90] = 127,
[1][0][RTW89_THAILAND][0][90] = 127,
[1][0][RTW89_FCC][1][92] = -4,
- [1][0][RTW89_FCC][2][92] = 127,
[1][0][RTW89_ETSI][1][92] = 127,
[1][0][RTW89_ETSI][0][92] = 127,
[1][0][RTW89_MKK][1][92] = 127,
[1][0][RTW89_MKK][0][92] = 127,
[1][0][RTW89_IC][1][92] = -4,
- [1][0][RTW89_IC][2][92] = 127,
[1][0][RTW89_KCC][1][92] = -2,
[1][0][RTW89_KCC][0][92] = 127,
[1][0][RTW89_ACMA][1][92] = 127,
@@ -55771,13 +53567,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][92] = 127,
[1][0][RTW89_THAILAND][0][92] = 127,
[1][0][RTW89_FCC][1][94] = -4,
- [1][0][RTW89_FCC][2][94] = 127,
[1][0][RTW89_ETSI][1][94] = 127,
[1][0][RTW89_ETSI][0][94] = 127,
[1][0][RTW89_MKK][1][94] = 127,
[1][0][RTW89_MKK][0][94] = 127,
[1][0][RTW89_IC][1][94] = -4,
- [1][0][RTW89_IC][2][94] = 127,
[1][0][RTW89_KCC][1][94] = -2,
[1][0][RTW89_KCC][0][94] = 127,
[1][0][RTW89_ACMA][1][94] = 127,
@@ -55790,13 +53584,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][94] = 127,
[1][0][RTW89_THAILAND][0][94] = 127,
[1][0][RTW89_FCC][1][96] = -4,
- [1][0][RTW89_FCC][2][96] = 127,
[1][0][RTW89_ETSI][1][96] = 127,
[1][0][RTW89_ETSI][0][96] = 127,
[1][0][RTW89_MKK][1][96] = 127,
[1][0][RTW89_MKK][0][96] = 127,
[1][0][RTW89_IC][1][96] = -4,
- [1][0][RTW89_IC][2][96] = 127,
[1][0][RTW89_KCC][1][96] = -2,
[1][0][RTW89_KCC][0][96] = 127,
[1][0][RTW89_ACMA][1][96] = 127,
@@ -55809,13 +53601,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][96] = 127,
[1][0][RTW89_THAILAND][0][96] = 127,
[1][0][RTW89_FCC][1][98] = -4,
- [1][0][RTW89_FCC][2][98] = 127,
[1][0][RTW89_ETSI][1][98] = 127,
[1][0][RTW89_ETSI][0][98] = 127,
[1][0][RTW89_MKK][1][98] = 127,
[1][0][RTW89_MKK][0][98] = 127,
[1][0][RTW89_IC][1][98] = -4,
- [1][0][RTW89_IC][2][98] = 127,
[1][0][RTW89_KCC][1][98] = -2,
[1][0][RTW89_KCC][0][98] = 127,
[1][0][RTW89_ACMA][1][98] = 127,
@@ -55828,13 +53618,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][98] = 127,
[1][0][RTW89_THAILAND][0][98] = 127,
[1][0][RTW89_FCC][1][100] = -4,
- [1][0][RTW89_FCC][2][100] = 127,
[1][0][RTW89_ETSI][1][100] = 127,
[1][0][RTW89_ETSI][0][100] = 127,
[1][0][RTW89_MKK][1][100] = 127,
[1][0][RTW89_MKK][0][100] = 127,
[1][0][RTW89_IC][1][100] = -4,
- [1][0][RTW89_IC][2][100] = 127,
[1][0][RTW89_KCC][1][100] = -2,
[1][0][RTW89_KCC][0][100] = 127,
[1][0][RTW89_ACMA][1][100] = 127,
@@ -55847,13 +53635,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][100] = 127,
[1][0][RTW89_THAILAND][0][100] = 127,
[1][0][RTW89_FCC][1][102] = -4,
- [1][0][RTW89_FCC][2][102] = 127,
[1][0][RTW89_ETSI][1][102] = 127,
[1][0][RTW89_ETSI][0][102] = 127,
[1][0][RTW89_MKK][1][102] = 127,
[1][0][RTW89_MKK][0][102] = 127,
[1][0][RTW89_IC][1][102] = -4,
- [1][0][RTW89_IC][2][102] = 127,
[1][0][RTW89_KCC][1][102] = -2,
[1][0][RTW89_KCC][0][102] = 127,
[1][0][RTW89_ACMA][1][102] = 127,
@@ -55866,13 +53652,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][102] = 127,
[1][0][RTW89_THAILAND][0][102] = 127,
[1][0][RTW89_FCC][1][104] = -4,
- [1][0][RTW89_FCC][2][104] = 127,
[1][0][RTW89_ETSI][1][104] = 127,
[1][0][RTW89_ETSI][0][104] = 127,
[1][0][RTW89_MKK][1][104] = 127,
[1][0][RTW89_MKK][0][104] = 127,
[1][0][RTW89_IC][1][104] = -4,
- [1][0][RTW89_IC][2][104] = 127,
[1][0][RTW89_KCC][1][104] = -2,
[1][0][RTW89_KCC][0][104] = 127,
[1][0][RTW89_ACMA][1][104] = 127,
@@ -55885,13 +53669,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][104] = 127,
[1][0][RTW89_THAILAND][0][104] = 127,
[1][0][RTW89_FCC][1][105] = -4,
- [1][0][RTW89_FCC][2][105] = 127,
[1][0][RTW89_ETSI][1][105] = 127,
[1][0][RTW89_ETSI][0][105] = 127,
[1][0][RTW89_MKK][1][105] = 127,
[1][0][RTW89_MKK][0][105] = 127,
[1][0][RTW89_IC][1][105] = -4,
- [1][0][RTW89_IC][2][105] = 127,
[1][0][RTW89_KCC][1][105] = -2,
[1][0][RTW89_KCC][0][105] = 127,
[1][0][RTW89_ACMA][1][105] = 127,
@@ -55904,13 +53686,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][105] = 127,
[1][0][RTW89_THAILAND][0][105] = 127,
[1][0][RTW89_FCC][1][107] = 1,
- [1][0][RTW89_FCC][2][107] = 127,
[1][0][RTW89_ETSI][1][107] = 127,
[1][0][RTW89_ETSI][0][107] = 127,
[1][0][RTW89_MKK][1][107] = 127,
[1][0][RTW89_MKK][0][107] = 127,
[1][0][RTW89_IC][1][107] = 1,
- [1][0][RTW89_IC][2][107] = 127,
[1][0][RTW89_KCC][1][107] = -2,
[1][0][RTW89_KCC][0][107] = 127,
[1][0][RTW89_ACMA][1][107] = 127,
@@ -55923,13 +53703,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][107] = 127,
[1][0][RTW89_THAILAND][0][107] = 127,
[1][0][RTW89_FCC][1][109] = 2,
- [1][0][RTW89_FCC][2][109] = 127,
[1][0][RTW89_ETSI][1][109] = 127,
[1][0][RTW89_ETSI][0][109] = 127,
[1][0][RTW89_MKK][1][109] = 127,
[1][0][RTW89_MKK][0][109] = 127,
[1][0][RTW89_IC][1][109] = 2,
- [1][0][RTW89_IC][2][109] = 127,
[1][0][RTW89_KCC][1][109] = 127,
[1][0][RTW89_KCC][0][109] = 127,
[1][0][RTW89_ACMA][1][109] = 127,
@@ -55942,13 +53720,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][109] = 127,
[1][0][RTW89_THAILAND][0][109] = 127,
[1][0][RTW89_FCC][1][111] = 127,
- [1][0][RTW89_FCC][2][111] = 127,
[1][0][RTW89_ETSI][1][111] = 127,
[1][0][RTW89_ETSI][0][111] = 127,
[1][0][RTW89_MKK][1][111] = 127,
[1][0][RTW89_MKK][0][111] = 127,
[1][0][RTW89_IC][1][111] = 127,
- [1][0][RTW89_IC][2][111] = 127,
[1][0][RTW89_KCC][1][111] = 127,
[1][0][RTW89_KCC][0][111] = 127,
[1][0][RTW89_ACMA][1][111] = 127,
@@ -55961,13 +53737,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][111] = 127,
[1][0][RTW89_THAILAND][0][111] = 127,
[1][0][RTW89_FCC][1][113] = 127,
- [1][0][RTW89_FCC][2][113] = 127,
[1][0][RTW89_ETSI][1][113] = 127,
[1][0][RTW89_ETSI][0][113] = 127,
[1][0][RTW89_MKK][1][113] = 127,
[1][0][RTW89_MKK][0][113] = 127,
[1][0][RTW89_IC][1][113] = 127,
- [1][0][RTW89_IC][2][113] = 127,
[1][0][RTW89_KCC][1][113] = 127,
[1][0][RTW89_KCC][0][113] = 127,
[1][0][RTW89_ACMA][1][113] = 127,
@@ -55980,13 +53754,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][113] = 127,
[1][0][RTW89_THAILAND][0][113] = 127,
[1][0][RTW89_FCC][1][115] = 127,
- [1][0][RTW89_FCC][2][115] = 127,
[1][0][RTW89_ETSI][1][115] = 127,
[1][0][RTW89_ETSI][0][115] = 127,
[1][0][RTW89_MKK][1][115] = 127,
[1][0][RTW89_MKK][0][115] = 127,
[1][0][RTW89_IC][1][115] = 127,
- [1][0][RTW89_IC][2][115] = 127,
[1][0][RTW89_KCC][1][115] = 127,
[1][0][RTW89_KCC][0][115] = 127,
[1][0][RTW89_ACMA][1][115] = 127,
@@ -55999,13 +53771,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][115] = 127,
[1][0][RTW89_THAILAND][0][115] = 127,
[1][0][RTW89_FCC][1][117] = 127,
- [1][0][RTW89_FCC][2][117] = 127,
[1][0][RTW89_ETSI][1][117] = 127,
[1][0][RTW89_ETSI][0][117] = 127,
[1][0][RTW89_MKK][1][117] = 127,
[1][0][RTW89_MKK][0][117] = 127,
[1][0][RTW89_IC][1][117] = 127,
- [1][0][RTW89_IC][2][117] = 127,
[1][0][RTW89_KCC][1][117] = 127,
[1][0][RTW89_KCC][0][117] = 127,
[1][0][RTW89_ACMA][1][117] = 127,
@@ -56018,13 +53788,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][117] = 127,
[1][0][RTW89_THAILAND][0][117] = 127,
[1][0][RTW89_FCC][1][119] = 127,
- [1][0][RTW89_FCC][2][119] = 127,
[1][0][RTW89_ETSI][1][119] = 127,
[1][0][RTW89_ETSI][0][119] = 127,
[1][0][RTW89_MKK][1][119] = 127,
[1][0][RTW89_MKK][0][119] = 127,
[1][0][RTW89_IC][1][119] = 127,
- [1][0][RTW89_IC][2][119] = 127,
[1][0][RTW89_KCC][1][119] = 127,
[1][0][RTW89_KCC][0][119] = 127,
[1][0][RTW89_ACMA][1][119] = 127,
@@ -56037,13 +53805,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][0][RTW89_THAILAND][1][119] = 127,
[1][0][RTW89_THAILAND][0][119] = 127,
[1][1][RTW89_FCC][1][0] = -26,
- [1][1][RTW89_FCC][2][0] = 44,
[1][1][RTW89_ETSI][1][0] = 32,
[1][1][RTW89_ETSI][0][0] = -6,
[1][1][RTW89_MKK][1][0] = 30,
[1][1][RTW89_MKK][0][0] = -10,
[1][1][RTW89_IC][1][0] = -26,
- [1][1][RTW89_IC][2][0] = 44,
[1][1][RTW89_KCC][1][0] = -14,
[1][1][RTW89_KCC][0][0] = -14,
[1][1][RTW89_ACMA][1][0] = 32,
@@ -56056,13 +53822,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][0] = 18,
[1][1][RTW89_THAILAND][0][0] = -26,
[1][1][RTW89_FCC][1][2] = -28,
- [1][1][RTW89_FCC][2][2] = 44,
[1][1][RTW89_ETSI][1][2] = 32,
[1][1][RTW89_ETSI][0][2] = -6,
[1][1][RTW89_MKK][1][2] = 30,
[1][1][RTW89_MKK][0][2] = -10,
[1][1][RTW89_IC][1][2] = -28,
- [1][1][RTW89_IC][2][2] = 44,
[1][1][RTW89_KCC][1][2] = -14,
[1][1][RTW89_KCC][0][2] = -14,
[1][1][RTW89_ACMA][1][2] = 32,
@@ -56075,13 +53839,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][2] = 18,
[1][1][RTW89_THAILAND][0][2] = -28,
[1][1][RTW89_FCC][1][4] = -28,
- [1][1][RTW89_FCC][2][4] = 44,
[1][1][RTW89_ETSI][1][4] = 32,
[1][1][RTW89_ETSI][0][4] = -6,
[1][1][RTW89_MKK][1][4] = 30,
[1][1][RTW89_MKK][0][4] = -10,
[1][1][RTW89_IC][1][4] = -28,
- [1][1][RTW89_IC][2][4] = 44,
[1][1][RTW89_KCC][1][4] = -14,
[1][1][RTW89_KCC][0][4] = -14,
[1][1][RTW89_ACMA][1][4] = 32,
@@ -56094,13 +53856,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][4] = 18,
[1][1][RTW89_THAILAND][0][4] = -28,
[1][1][RTW89_FCC][1][6] = -28,
- [1][1][RTW89_FCC][2][6] = 44,
[1][1][RTW89_ETSI][1][6] = 32,
[1][1][RTW89_ETSI][0][6] = -6,
[1][1][RTW89_MKK][1][6] = 30,
[1][1][RTW89_MKK][0][6] = -10,
[1][1][RTW89_IC][1][6] = -28,
- [1][1][RTW89_IC][2][6] = 44,
[1][1][RTW89_KCC][1][6] = -14,
[1][1][RTW89_KCC][0][6] = -14,
[1][1][RTW89_ACMA][1][6] = 32,
@@ -56113,13 +53873,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][6] = 18,
[1][1][RTW89_THAILAND][0][6] = -28,
[1][1][RTW89_FCC][1][8] = -28,
- [1][1][RTW89_FCC][2][8] = 44,
[1][1][RTW89_ETSI][1][8] = 32,
[1][1][RTW89_ETSI][0][8] = -6,
[1][1][RTW89_MKK][1][8] = 30,
[1][1][RTW89_MKK][0][8] = -10,
[1][1][RTW89_IC][1][8] = -28,
- [1][1][RTW89_IC][2][8] = 44,
[1][1][RTW89_KCC][1][8] = -14,
[1][1][RTW89_KCC][0][8] = -14,
[1][1][RTW89_ACMA][1][8] = 32,
@@ -56132,13 +53890,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][8] = 18,
[1][1][RTW89_THAILAND][0][8] = -28,
[1][1][RTW89_FCC][1][10] = -28,
- [1][1][RTW89_FCC][2][10] = 44,
[1][1][RTW89_ETSI][1][10] = 32,
[1][1][RTW89_ETSI][0][10] = -6,
[1][1][RTW89_MKK][1][10] = 30,
[1][1][RTW89_MKK][0][10] = -10,
[1][1][RTW89_IC][1][10] = -28,
- [1][1][RTW89_IC][2][10] = 44,
[1][1][RTW89_KCC][1][10] = -14,
[1][1][RTW89_KCC][0][10] = -14,
[1][1][RTW89_ACMA][1][10] = 32,
@@ -56151,13 +53907,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][10] = 18,
[1][1][RTW89_THAILAND][0][10] = -28,
[1][1][RTW89_FCC][1][12] = -28,
- [1][1][RTW89_FCC][2][12] = 44,
[1][1][RTW89_ETSI][1][12] = 32,
[1][1][RTW89_ETSI][0][12] = -6,
[1][1][RTW89_MKK][1][12] = 30,
[1][1][RTW89_MKK][0][12] = -10,
[1][1][RTW89_IC][1][12] = -28,
- [1][1][RTW89_IC][2][12] = 44,
[1][1][RTW89_KCC][1][12] = -14,
[1][1][RTW89_KCC][0][12] = -14,
[1][1][RTW89_ACMA][1][12] = 32,
@@ -56170,13 +53924,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][12] = 18,
[1][1][RTW89_THAILAND][0][12] = -28,
[1][1][RTW89_FCC][1][14] = -28,
- [1][1][RTW89_FCC][2][14] = 44,
[1][1][RTW89_ETSI][1][14] = 32,
[1][1][RTW89_ETSI][0][14] = -6,
[1][1][RTW89_MKK][1][14] = 30,
[1][1][RTW89_MKK][0][14] = -10,
[1][1][RTW89_IC][1][14] = -28,
- [1][1][RTW89_IC][2][14] = 44,
[1][1][RTW89_KCC][1][14] = -14,
[1][1][RTW89_KCC][0][14] = -14,
[1][1][RTW89_ACMA][1][14] = 32,
@@ -56189,13 +53941,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][14] = 18,
[1][1][RTW89_THAILAND][0][14] = -28,
[1][1][RTW89_FCC][1][15] = -28,
- [1][1][RTW89_FCC][2][15] = 44,
[1][1][RTW89_ETSI][1][15] = 32,
[1][1][RTW89_ETSI][0][15] = -6,
[1][1][RTW89_MKK][1][15] = 30,
[1][1][RTW89_MKK][0][15] = -10,
[1][1][RTW89_IC][1][15] = -28,
- [1][1][RTW89_IC][2][15] = 44,
[1][1][RTW89_KCC][1][15] = -14,
[1][1][RTW89_KCC][0][15] = -14,
[1][1][RTW89_ACMA][1][15] = 32,
@@ -56208,13 +53958,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][15] = 18,
[1][1][RTW89_THAILAND][0][15] = -28,
[1][1][RTW89_FCC][1][17] = -28,
- [1][1][RTW89_FCC][2][17] = 44,
[1][1][RTW89_ETSI][1][17] = 32,
[1][1][RTW89_ETSI][0][17] = -6,
[1][1][RTW89_MKK][1][17] = 30,
[1][1][RTW89_MKK][0][17] = -10,
[1][1][RTW89_IC][1][17] = -28,
- [1][1][RTW89_IC][2][17] = 44,
[1][1][RTW89_KCC][1][17] = -14,
[1][1][RTW89_KCC][0][17] = -14,
[1][1][RTW89_ACMA][1][17] = 32,
@@ -56227,13 +53975,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][17] = 18,
[1][1][RTW89_THAILAND][0][17] = -28,
[1][1][RTW89_FCC][1][19] = -28,
- [1][1][RTW89_FCC][2][19] = 44,
[1][1][RTW89_ETSI][1][19] = 32,
[1][1][RTW89_ETSI][0][19] = -6,
[1][1][RTW89_MKK][1][19] = 30,
[1][1][RTW89_MKK][0][19] = -10,
[1][1][RTW89_IC][1][19] = -28,
- [1][1][RTW89_IC][2][19] = 44,
[1][1][RTW89_KCC][1][19] = -14,
[1][1][RTW89_KCC][0][19] = -14,
[1][1][RTW89_ACMA][1][19] = 32,
@@ -56246,13 +53992,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][19] = 18,
[1][1][RTW89_THAILAND][0][19] = -28,
[1][1][RTW89_FCC][1][21] = -28,
- [1][1][RTW89_FCC][2][21] = 44,
[1][1][RTW89_ETSI][1][21] = 32,
[1][1][RTW89_ETSI][0][21] = -6,
[1][1][RTW89_MKK][1][21] = 30,
[1][1][RTW89_MKK][0][21] = -10,
[1][1][RTW89_IC][1][21] = -28,
- [1][1][RTW89_IC][2][21] = 44,
[1][1][RTW89_KCC][1][21] = -14,
[1][1][RTW89_KCC][0][21] = -14,
[1][1][RTW89_ACMA][1][21] = 32,
@@ -56265,13 +54009,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][21] = 18,
[1][1][RTW89_THAILAND][0][21] = -28,
[1][1][RTW89_FCC][1][23] = -28,
- [1][1][RTW89_FCC][2][23] = 44,
[1][1][RTW89_ETSI][1][23] = 32,
[1][1][RTW89_ETSI][0][23] = -6,
[1][1][RTW89_MKK][1][23] = 32,
[1][1][RTW89_MKK][0][23] = -10,
[1][1][RTW89_IC][1][23] = -28,
- [1][1][RTW89_IC][2][23] = 44,
[1][1][RTW89_KCC][1][23] = -14,
[1][1][RTW89_KCC][0][23] = -14,
[1][1][RTW89_ACMA][1][23] = 32,
@@ -56284,13 +54026,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][23] = 18,
[1][1][RTW89_THAILAND][0][23] = -28,
[1][1][RTW89_FCC][1][25] = -28,
- [1][1][RTW89_FCC][2][25] = 44,
[1][1][RTW89_ETSI][1][25] = 32,
[1][1][RTW89_ETSI][0][25] = -6,
[1][1][RTW89_MKK][1][25] = 32,
[1][1][RTW89_MKK][0][25] = -10,
[1][1][RTW89_IC][1][25] = -28,
- [1][1][RTW89_IC][2][25] = 44,
[1][1][RTW89_KCC][1][25] = -14,
[1][1][RTW89_KCC][0][25] = -14,
[1][1][RTW89_ACMA][1][25] = 32,
@@ -56303,13 +54043,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][25] = 18,
[1][1][RTW89_THAILAND][0][25] = -28,
[1][1][RTW89_FCC][1][27] = -28,
- [1][1][RTW89_FCC][2][27] = 44,
[1][1][RTW89_ETSI][1][27] = 32,
[1][1][RTW89_ETSI][0][27] = -6,
[1][1][RTW89_MKK][1][27] = 32,
[1][1][RTW89_MKK][0][27] = -10,
[1][1][RTW89_IC][1][27] = -28,
- [1][1][RTW89_IC][2][27] = 44,
[1][1][RTW89_KCC][1][27] = -14,
[1][1][RTW89_KCC][0][27] = -14,
[1][1][RTW89_ACMA][1][27] = 32,
@@ -56322,13 +54060,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][27] = 18,
[1][1][RTW89_THAILAND][0][27] = -28,
[1][1][RTW89_FCC][1][29] = -28,
- [1][1][RTW89_FCC][2][29] = 44,
[1][1][RTW89_ETSI][1][29] = 32,
[1][1][RTW89_ETSI][0][29] = -6,
[1][1][RTW89_MKK][1][29] = 32,
[1][1][RTW89_MKK][0][29] = -10,
[1][1][RTW89_IC][1][29] = -28,
- [1][1][RTW89_IC][2][29] = 44,
[1][1][RTW89_KCC][1][29] = -14,
[1][1][RTW89_KCC][0][29] = -14,
[1][1][RTW89_ACMA][1][29] = 32,
@@ -56341,13 +54077,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][29] = 18,
[1][1][RTW89_THAILAND][0][29] = -28,
[1][1][RTW89_FCC][1][30] = -28,
- [1][1][RTW89_FCC][2][30] = 44,
[1][1][RTW89_ETSI][1][30] = 32,
[1][1][RTW89_ETSI][0][30] = -6,
[1][1][RTW89_MKK][1][30] = 32,
[1][1][RTW89_MKK][0][30] = -10,
[1][1][RTW89_IC][1][30] = -28,
- [1][1][RTW89_IC][2][30] = 44,
[1][1][RTW89_KCC][1][30] = -14,
[1][1][RTW89_KCC][0][30] = -14,
[1][1][RTW89_ACMA][1][30] = 32,
@@ -56360,13 +54094,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][30] = 18,
[1][1][RTW89_THAILAND][0][30] = -28,
[1][1][RTW89_FCC][1][32] = -28,
- [1][1][RTW89_FCC][2][32] = 44,
[1][1][RTW89_ETSI][1][32] = 32,
[1][1][RTW89_ETSI][0][32] = -6,
[1][1][RTW89_MKK][1][32] = 32,
[1][1][RTW89_MKK][0][32] = -10,
[1][1][RTW89_IC][1][32] = -28,
- [1][1][RTW89_IC][2][32] = 44,
[1][1][RTW89_KCC][1][32] = -14,
[1][1][RTW89_KCC][0][32] = -14,
[1][1][RTW89_ACMA][1][32] = 32,
@@ -56379,13 +54111,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][32] = 18,
[1][1][RTW89_THAILAND][0][32] = -28,
[1][1][RTW89_FCC][1][34] = -28,
- [1][1][RTW89_FCC][2][34] = 44,
[1][1][RTW89_ETSI][1][34] = 32,
[1][1][RTW89_ETSI][0][34] = -6,
[1][1][RTW89_MKK][1][34] = 32,
[1][1][RTW89_MKK][0][34] = -10,
[1][1][RTW89_IC][1][34] = -28,
- [1][1][RTW89_IC][2][34] = 44,
[1][1][RTW89_KCC][1][34] = -14,
[1][1][RTW89_KCC][0][34] = -14,
[1][1][RTW89_ACMA][1][34] = 32,
@@ -56398,13 +54128,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][34] = 18,
[1][1][RTW89_THAILAND][0][34] = -28,
[1][1][RTW89_FCC][1][36] = -28,
- [1][1][RTW89_FCC][2][36] = 44,
[1][1][RTW89_ETSI][1][36] = 32,
[1][1][RTW89_ETSI][0][36] = -6,
[1][1][RTW89_MKK][1][36] = 32,
[1][1][RTW89_MKK][0][36] = -10,
[1][1][RTW89_IC][1][36] = -28,
- [1][1][RTW89_IC][2][36] = 44,
[1][1][RTW89_KCC][1][36] = -14,
[1][1][RTW89_KCC][0][36] = -14,
[1][1][RTW89_ACMA][1][36] = 32,
@@ -56417,13 +54145,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][36] = 18,
[1][1][RTW89_THAILAND][0][36] = -28,
[1][1][RTW89_FCC][1][38] = -28,
- [1][1][RTW89_FCC][2][38] = 44,
[1][1][RTW89_ETSI][1][38] = 32,
[1][1][RTW89_ETSI][0][38] = -6,
[1][1][RTW89_MKK][1][38] = 32,
[1][1][RTW89_MKK][0][38] = -10,
[1][1][RTW89_IC][1][38] = -28,
- [1][1][RTW89_IC][2][38] = 44,
[1][1][RTW89_KCC][1][38] = -14,
[1][1][RTW89_KCC][0][38] = -14,
[1][1][RTW89_ACMA][1][38] = 32,
@@ -56436,13 +54162,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][38] = 18,
[1][1][RTW89_THAILAND][0][38] = -28,
[1][1][RTW89_FCC][1][40] = -28,
- [1][1][RTW89_FCC][2][40] = 44,
[1][1][RTW89_ETSI][1][40] = 32,
[1][1][RTW89_ETSI][0][40] = -6,
[1][1][RTW89_MKK][1][40] = 32,
[1][1][RTW89_MKK][0][40] = -10,
[1][1][RTW89_IC][1][40] = -28,
- [1][1][RTW89_IC][2][40] = 44,
[1][1][RTW89_KCC][1][40] = -14,
[1][1][RTW89_KCC][0][40] = -14,
[1][1][RTW89_ACMA][1][40] = 32,
@@ -56455,13 +54179,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][40] = 18,
[1][1][RTW89_THAILAND][0][40] = -28,
[1][1][RTW89_FCC][1][42] = -28,
- [1][1][RTW89_FCC][2][42] = 44,
[1][1][RTW89_ETSI][1][42] = 32,
[1][1][RTW89_ETSI][0][42] = -6,
[1][1][RTW89_MKK][1][42] = 32,
[1][1][RTW89_MKK][0][42] = -10,
[1][1][RTW89_IC][1][42] = -28,
- [1][1][RTW89_IC][2][42] = 44,
[1][1][RTW89_KCC][1][42] = -14,
[1][1][RTW89_KCC][0][42] = -14,
[1][1][RTW89_ACMA][1][42] = 32,
@@ -56474,13 +54196,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][42] = 18,
[1][1][RTW89_THAILAND][0][42] = -28,
[1][1][RTW89_FCC][1][44] = -28,
- [1][1][RTW89_FCC][2][44] = 44,
[1][1][RTW89_ETSI][1][44] = 34,
[1][1][RTW89_ETSI][0][44] = -4,
[1][1][RTW89_MKK][1][44] = 4,
[1][1][RTW89_MKK][0][44] = -8,
[1][1][RTW89_IC][1][44] = -28,
- [1][1][RTW89_IC][2][44] = 44,
[1][1][RTW89_KCC][1][44] = -14,
[1][1][RTW89_KCC][0][44] = -14,
[1][1][RTW89_ACMA][1][44] = 34,
@@ -56493,13 +54213,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][44] = 18,
[1][1][RTW89_THAILAND][0][44] = -28,
[1][1][RTW89_FCC][1][45] = -26,
- [1][1][RTW89_FCC][2][45] = 127,
[1][1][RTW89_ETSI][1][45] = 127,
[1][1][RTW89_ETSI][0][45] = 127,
[1][1][RTW89_MKK][1][45] = 127,
[1][1][RTW89_MKK][0][45] = 127,
[1][1][RTW89_IC][1][45] = -26,
- [1][1][RTW89_IC][2][45] = 44,
[1][1][RTW89_KCC][1][45] = -14,
[1][1][RTW89_KCC][0][45] = 127,
[1][1][RTW89_ACMA][1][45] = 127,
@@ -56512,13 +54230,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][45] = 127,
[1][1][RTW89_THAILAND][0][45] = 127,
[1][1][RTW89_FCC][1][47] = -28,
- [1][1][RTW89_FCC][2][47] = 127,
[1][1][RTW89_ETSI][1][47] = 127,
[1][1][RTW89_ETSI][0][47] = 127,
[1][1][RTW89_MKK][1][47] = 127,
[1][1][RTW89_MKK][0][47] = 127,
[1][1][RTW89_IC][1][47] = -28,
- [1][1][RTW89_IC][2][47] = 44,
[1][1][RTW89_KCC][1][47] = -14,
[1][1][RTW89_KCC][0][47] = 127,
[1][1][RTW89_ACMA][1][47] = 127,
@@ -56531,13 +54247,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][47] = 127,
[1][1][RTW89_THAILAND][0][47] = 127,
[1][1][RTW89_FCC][1][49] = -28,
- [1][1][RTW89_FCC][2][49] = 127,
[1][1][RTW89_ETSI][1][49] = 127,
[1][1][RTW89_ETSI][0][49] = 127,
[1][1][RTW89_MKK][1][49] = 127,
[1][1][RTW89_MKK][0][49] = 127,
[1][1][RTW89_IC][1][49] = -28,
- [1][1][RTW89_IC][2][49] = 44,
[1][1][RTW89_KCC][1][49] = -14,
[1][1][RTW89_KCC][0][49] = 127,
[1][1][RTW89_ACMA][1][49] = 127,
@@ -56550,13 +54264,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][49] = 127,
[1][1][RTW89_THAILAND][0][49] = 127,
[1][1][RTW89_FCC][1][51] = -28,
- [1][1][RTW89_FCC][2][51] = 127,
[1][1][RTW89_ETSI][1][51] = 127,
[1][1][RTW89_ETSI][0][51] = 127,
[1][1][RTW89_MKK][1][51] = 127,
[1][1][RTW89_MKK][0][51] = 127,
[1][1][RTW89_IC][1][51] = -28,
- [1][1][RTW89_IC][2][51] = 44,
[1][1][RTW89_KCC][1][51] = -14,
[1][1][RTW89_KCC][0][51] = 127,
[1][1][RTW89_ACMA][1][51] = 127,
@@ -56569,13 +54281,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][51] = 127,
[1][1][RTW89_THAILAND][0][51] = 127,
[1][1][RTW89_FCC][1][53] = -26,
- [1][1][RTW89_FCC][2][53] = 127,
[1][1][RTW89_ETSI][1][53] = 127,
[1][1][RTW89_ETSI][0][53] = 127,
[1][1][RTW89_MKK][1][53] = 127,
[1][1][RTW89_MKK][0][53] = 127,
[1][1][RTW89_IC][1][53] = -26,
- [1][1][RTW89_IC][2][53] = 44,
[1][1][RTW89_KCC][1][53] = -14,
[1][1][RTW89_KCC][0][53] = 127,
[1][1][RTW89_ACMA][1][53] = 127,
@@ -56588,13 +54298,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][53] = 127,
[1][1][RTW89_THAILAND][0][53] = 127,
[1][1][RTW89_FCC][1][55] = -28,
- [1][1][RTW89_FCC][2][55] = 44,
[1][1][RTW89_ETSI][1][55] = 127,
[1][1][RTW89_ETSI][0][55] = 127,
[1][1][RTW89_MKK][1][55] = 127,
[1][1][RTW89_MKK][0][55] = 127,
[1][1][RTW89_IC][1][55] = -28,
- [1][1][RTW89_IC][2][55] = 44,
[1][1][RTW89_KCC][1][55] = -14,
[1][1][RTW89_KCC][0][55] = 127,
[1][1][RTW89_ACMA][1][55] = 127,
@@ -56607,13 +54315,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][55] = 127,
[1][1][RTW89_THAILAND][0][55] = 127,
[1][1][RTW89_FCC][1][57] = -28,
- [1][1][RTW89_FCC][2][57] = 44,
[1][1][RTW89_ETSI][1][57] = 127,
[1][1][RTW89_ETSI][0][57] = 127,
[1][1][RTW89_MKK][1][57] = 127,
[1][1][RTW89_MKK][0][57] = 127,
[1][1][RTW89_IC][1][57] = -28,
- [1][1][RTW89_IC][2][57] = 44,
[1][1][RTW89_KCC][1][57] = -14,
[1][1][RTW89_KCC][0][57] = 127,
[1][1][RTW89_ACMA][1][57] = 127,
@@ -56626,13 +54332,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][57] = 127,
[1][1][RTW89_THAILAND][0][57] = 127,
[1][1][RTW89_FCC][1][59] = -28,
- [1][1][RTW89_FCC][2][59] = 44,
[1][1][RTW89_ETSI][1][59] = 127,
[1][1][RTW89_ETSI][0][59] = 127,
[1][1][RTW89_MKK][1][59] = 127,
[1][1][RTW89_MKK][0][59] = 127,
[1][1][RTW89_IC][1][59] = -28,
- [1][1][RTW89_IC][2][59] = 44,
[1][1][RTW89_KCC][1][59] = -14,
[1][1][RTW89_KCC][0][59] = 127,
[1][1][RTW89_ACMA][1][59] = 127,
@@ -56645,13 +54349,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][59] = 127,
[1][1][RTW89_THAILAND][0][59] = 127,
[1][1][RTW89_FCC][1][60] = -28,
- [1][1][RTW89_FCC][2][60] = 44,
[1][1][RTW89_ETSI][1][60] = 127,
[1][1][RTW89_ETSI][0][60] = 127,
[1][1][RTW89_MKK][1][60] = 127,
[1][1][RTW89_MKK][0][60] = 127,
[1][1][RTW89_IC][1][60] = -28,
- [1][1][RTW89_IC][2][60] = 44,
[1][1][RTW89_KCC][1][60] = -14,
[1][1][RTW89_KCC][0][60] = 127,
[1][1][RTW89_ACMA][1][60] = 127,
@@ -56664,13 +54366,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][60] = 127,
[1][1][RTW89_THAILAND][0][60] = 127,
[1][1][RTW89_FCC][1][62] = -28,
- [1][1][RTW89_FCC][2][62] = 44,
[1][1][RTW89_ETSI][1][62] = 127,
[1][1][RTW89_ETSI][0][62] = 127,
[1][1][RTW89_MKK][1][62] = 127,
[1][1][RTW89_MKK][0][62] = 127,
[1][1][RTW89_IC][1][62] = -28,
- [1][1][RTW89_IC][2][62] = 44,
[1][1][RTW89_KCC][1][62] = -14,
[1][1][RTW89_KCC][0][62] = 127,
[1][1][RTW89_ACMA][1][62] = 127,
@@ -56683,13 +54383,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][62] = 127,
[1][1][RTW89_THAILAND][0][62] = 127,
[1][1][RTW89_FCC][1][64] = -28,
- [1][1][RTW89_FCC][2][64] = 44,
[1][1][RTW89_ETSI][1][64] = 127,
[1][1][RTW89_ETSI][0][64] = 127,
[1][1][RTW89_MKK][1][64] = 127,
[1][1][RTW89_MKK][0][64] = 127,
[1][1][RTW89_IC][1][64] = -28,
- [1][1][RTW89_IC][2][64] = 44,
[1][1][RTW89_KCC][1][64] = -14,
[1][1][RTW89_KCC][0][64] = 127,
[1][1][RTW89_ACMA][1][64] = 127,
@@ -56702,13 +54400,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][64] = 127,
[1][1][RTW89_THAILAND][0][64] = 127,
[1][1][RTW89_FCC][1][66] = -28,
- [1][1][RTW89_FCC][2][66] = 44,
[1][1][RTW89_ETSI][1][66] = 127,
[1][1][RTW89_ETSI][0][66] = 127,
[1][1][RTW89_MKK][1][66] = 127,
[1][1][RTW89_MKK][0][66] = 127,
[1][1][RTW89_IC][1][66] = -28,
- [1][1][RTW89_IC][2][66] = 44,
[1][1][RTW89_KCC][1][66] = -14,
[1][1][RTW89_KCC][0][66] = 127,
[1][1][RTW89_ACMA][1][66] = 127,
@@ -56721,13 +54417,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][66] = 127,
[1][1][RTW89_THAILAND][0][66] = 127,
[1][1][RTW89_FCC][1][68] = -28,
- [1][1][RTW89_FCC][2][68] = 44,
[1][1][RTW89_ETSI][1][68] = 127,
[1][1][RTW89_ETSI][0][68] = 127,
[1][1][RTW89_MKK][1][68] = 127,
[1][1][RTW89_MKK][0][68] = 127,
[1][1][RTW89_IC][1][68] = -28,
- [1][1][RTW89_IC][2][68] = 44,
[1][1][RTW89_KCC][1][68] = -14,
[1][1][RTW89_KCC][0][68] = 127,
[1][1][RTW89_ACMA][1][68] = 127,
@@ -56740,13 +54434,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][68] = 127,
[1][1][RTW89_THAILAND][0][68] = 127,
[1][1][RTW89_FCC][1][70] = -26,
- [1][1][RTW89_FCC][2][70] = 44,
[1][1][RTW89_ETSI][1][70] = 127,
[1][1][RTW89_ETSI][0][70] = 127,
[1][1][RTW89_MKK][1][70] = 127,
[1][1][RTW89_MKK][0][70] = 127,
[1][1][RTW89_IC][1][70] = -26,
- [1][1][RTW89_IC][2][70] = 44,
[1][1][RTW89_KCC][1][70] = -14,
[1][1][RTW89_KCC][0][70] = 127,
[1][1][RTW89_ACMA][1][70] = 127,
@@ -56759,13 +54451,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][70] = 127,
[1][1][RTW89_THAILAND][0][70] = 127,
[1][1][RTW89_FCC][1][72] = -28,
- [1][1][RTW89_FCC][2][72] = 44,
[1][1][RTW89_ETSI][1][72] = 127,
[1][1][RTW89_ETSI][0][72] = 127,
[1][1][RTW89_MKK][1][72] = 127,
[1][1][RTW89_MKK][0][72] = 127,
[1][1][RTW89_IC][1][72] = -28,
- [1][1][RTW89_IC][2][72] = 44,
[1][1][RTW89_KCC][1][72] = -14,
[1][1][RTW89_KCC][0][72] = 127,
[1][1][RTW89_ACMA][1][72] = 127,
@@ -56778,13 +54468,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][72] = 127,
[1][1][RTW89_THAILAND][0][72] = 127,
[1][1][RTW89_FCC][1][74] = -28,
- [1][1][RTW89_FCC][2][74] = 44,
[1][1][RTW89_ETSI][1][74] = 127,
[1][1][RTW89_ETSI][0][74] = 127,
[1][1][RTW89_MKK][1][74] = 127,
[1][1][RTW89_MKK][0][74] = 127,
[1][1][RTW89_IC][1][74] = -28,
- [1][1][RTW89_IC][2][74] = 44,
[1][1][RTW89_KCC][1][74] = -14,
[1][1][RTW89_KCC][0][74] = 127,
[1][1][RTW89_ACMA][1][74] = 127,
@@ -56797,13 +54485,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][74] = 127,
[1][1][RTW89_THAILAND][0][74] = 127,
[1][1][RTW89_FCC][1][75] = -28,
- [1][1][RTW89_FCC][2][75] = 44,
[1][1][RTW89_ETSI][1][75] = 127,
[1][1][RTW89_ETSI][0][75] = 127,
[1][1][RTW89_MKK][1][75] = 127,
[1][1][RTW89_MKK][0][75] = 127,
[1][1][RTW89_IC][1][75] = -28,
- [1][1][RTW89_IC][2][75] = 44,
[1][1][RTW89_KCC][1][75] = -14,
[1][1][RTW89_KCC][0][75] = 127,
[1][1][RTW89_ACMA][1][75] = 127,
@@ -56816,13 +54502,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][75] = 127,
[1][1][RTW89_THAILAND][0][75] = 127,
[1][1][RTW89_FCC][1][77] = -28,
- [1][1][RTW89_FCC][2][77] = 44,
[1][1][RTW89_ETSI][1][77] = 127,
[1][1][RTW89_ETSI][0][77] = 127,
[1][1][RTW89_MKK][1][77] = 127,
[1][1][RTW89_MKK][0][77] = 127,
[1][1][RTW89_IC][1][77] = -28,
- [1][1][RTW89_IC][2][77] = 44,
[1][1][RTW89_KCC][1][77] = -14,
[1][1][RTW89_KCC][0][77] = 127,
[1][1][RTW89_ACMA][1][77] = 127,
@@ -56835,13 +54519,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][77] = 127,
[1][1][RTW89_THAILAND][0][77] = 127,
[1][1][RTW89_FCC][1][79] = -28,
- [1][1][RTW89_FCC][2][79] = 44,
[1][1][RTW89_ETSI][1][79] = 127,
[1][1][RTW89_ETSI][0][79] = 127,
[1][1][RTW89_MKK][1][79] = 127,
[1][1][RTW89_MKK][0][79] = 127,
[1][1][RTW89_IC][1][79] = -28,
- [1][1][RTW89_IC][2][79] = 44,
[1][1][RTW89_KCC][1][79] = -14,
[1][1][RTW89_KCC][0][79] = 127,
[1][1][RTW89_ACMA][1][79] = 127,
@@ -56854,13 +54536,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][79] = 127,
[1][1][RTW89_THAILAND][0][79] = 127,
[1][1][RTW89_FCC][1][81] = -28,
- [1][1][RTW89_FCC][2][81] = 44,
[1][1][RTW89_ETSI][1][81] = 127,
[1][1][RTW89_ETSI][0][81] = 127,
[1][1][RTW89_MKK][1][81] = 127,
[1][1][RTW89_MKK][0][81] = 127,
[1][1][RTW89_IC][1][81] = -28,
- [1][1][RTW89_IC][2][81] = 44,
[1][1][RTW89_KCC][1][81] = -14,
[1][1][RTW89_KCC][0][81] = 127,
[1][1][RTW89_ACMA][1][81] = 127,
@@ -56873,13 +54553,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][81] = 127,
[1][1][RTW89_THAILAND][0][81] = 127,
[1][1][RTW89_FCC][1][83] = -28,
- [1][1][RTW89_FCC][2][83] = 44,
[1][1][RTW89_ETSI][1][83] = 127,
[1][1][RTW89_ETSI][0][83] = 127,
[1][1][RTW89_MKK][1][83] = 127,
[1][1][RTW89_MKK][0][83] = 127,
[1][1][RTW89_IC][1][83] = -28,
- [1][1][RTW89_IC][2][83] = 44,
[1][1][RTW89_KCC][1][83] = -14,
[1][1][RTW89_KCC][0][83] = 127,
[1][1][RTW89_ACMA][1][83] = 127,
@@ -56892,13 +54570,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][83] = 127,
[1][1][RTW89_THAILAND][0][83] = 127,
[1][1][RTW89_FCC][1][85] = -28,
- [1][1][RTW89_FCC][2][85] = 44,
[1][1][RTW89_ETSI][1][85] = 127,
[1][1][RTW89_ETSI][0][85] = 127,
[1][1][RTW89_MKK][1][85] = 127,
[1][1][RTW89_MKK][0][85] = 127,
[1][1][RTW89_IC][1][85] = -28,
- [1][1][RTW89_IC][2][85] = 44,
[1][1][RTW89_KCC][1][85] = -14,
[1][1][RTW89_KCC][0][85] = 127,
[1][1][RTW89_ACMA][1][85] = 127,
@@ -56911,13 +54587,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][85] = 127,
[1][1][RTW89_THAILAND][0][85] = 127,
[1][1][RTW89_FCC][1][87] = -28,
- [1][1][RTW89_FCC][2][87] = 127,
[1][1][RTW89_ETSI][1][87] = 127,
[1][1][RTW89_ETSI][0][87] = 127,
[1][1][RTW89_MKK][1][87] = 127,
[1][1][RTW89_MKK][0][87] = 127,
[1][1][RTW89_IC][1][87] = -28,
- [1][1][RTW89_IC][2][87] = 127,
[1][1][RTW89_KCC][1][87] = -14,
[1][1][RTW89_KCC][0][87] = 127,
[1][1][RTW89_ACMA][1][87] = 127,
@@ -56930,13 +54604,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][87] = 127,
[1][1][RTW89_THAILAND][0][87] = 127,
[1][1][RTW89_FCC][1][89] = -26,
- [1][1][RTW89_FCC][2][89] = 127,
[1][1][RTW89_ETSI][1][89] = 127,
[1][1][RTW89_ETSI][0][89] = 127,
[1][1][RTW89_MKK][1][89] = 127,
[1][1][RTW89_MKK][0][89] = 127,
[1][1][RTW89_IC][1][89] = -26,
- [1][1][RTW89_IC][2][89] = 127,
[1][1][RTW89_KCC][1][89] = -14,
[1][1][RTW89_KCC][0][89] = 127,
[1][1][RTW89_ACMA][1][89] = 127,
@@ -56949,13 +54621,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][89] = 127,
[1][1][RTW89_THAILAND][0][89] = 127,
[1][1][RTW89_FCC][1][90] = -26,
- [1][1][RTW89_FCC][2][90] = 127,
[1][1][RTW89_ETSI][1][90] = 127,
[1][1][RTW89_ETSI][0][90] = 127,
[1][1][RTW89_MKK][1][90] = 127,
[1][1][RTW89_MKK][0][90] = 127,
[1][1][RTW89_IC][1][90] = -26,
- [1][1][RTW89_IC][2][90] = 127,
[1][1][RTW89_KCC][1][90] = -14,
[1][1][RTW89_KCC][0][90] = 127,
[1][1][RTW89_ACMA][1][90] = 127,
@@ -56968,13 +54638,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][90] = 127,
[1][1][RTW89_THAILAND][0][90] = 127,
[1][1][RTW89_FCC][1][92] = -26,
- [1][1][RTW89_FCC][2][92] = 127,
[1][1][RTW89_ETSI][1][92] = 127,
[1][1][RTW89_ETSI][0][92] = 127,
[1][1][RTW89_MKK][1][92] = 127,
[1][1][RTW89_MKK][0][92] = 127,
[1][1][RTW89_IC][1][92] = -26,
- [1][1][RTW89_IC][2][92] = 127,
[1][1][RTW89_KCC][1][92] = -14,
[1][1][RTW89_KCC][0][92] = 127,
[1][1][RTW89_ACMA][1][92] = 127,
@@ -56987,13 +54655,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][92] = 127,
[1][1][RTW89_THAILAND][0][92] = 127,
[1][1][RTW89_FCC][1][94] = -26,
- [1][1][RTW89_FCC][2][94] = 127,
[1][1][RTW89_ETSI][1][94] = 127,
[1][1][RTW89_ETSI][0][94] = 127,
[1][1][RTW89_MKK][1][94] = 127,
[1][1][RTW89_MKK][0][94] = 127,
[1][1][RTW89_IC][1][94] = -26,
- [1][1][RTW89_IC][2][94] = 127,
[1][1][RTW89_KCC][1][94] = -14,
[1][1][RTW89_KCC][0][94] = 127,
[1][1][RTW89_ACMA][1][94] = 127,
@@ -57006,13 +54672,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][94] = 127,
[1][1][RTW89_THAILAND][0][94] = 127,
[1][1][RTW89_FCC][1][96] = -26,
- [1][1][RTW89_FCC][2][96] = 127,
[1][1][RTW89_ETSI][1][96] = 127,
[1][1][RTW89_ETSI][0][96] = 127,
[1][1][RTW89_MKK][1][96] = 127,
[1][1][RTW89_MKK][0][96] = 127,
[1][1][RTW89_IC][1][96] = -26,
- [1][1][RTW89_IC][2][96] = 127,
[1][1][RTW89_KCC][1][96] = -14,
[1][1][RTW89_KCC][0][96] = 127,
[1][1][RTW89_ACMA][1][96] = 127,
@@ -57025,13 +54689,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][96] = 127,
[1][1][RTW89_THAILAND][0][96] = 127,
[1][1][RTW89_FCC][1][98] = -26,
- [1][1][RTW89_FCC][2][98] = 127,
[1][1][RTW89_ETSI][1][98] = 127,
[1][1][RTW89_ETSI][0][98] = 127,
[1][1][RTW89_MKK][1][98] = 127,
[1][1][RTW89_MKK][0][98] = 127,
[1][1][RTW89_IC][1][98] = -26,
- [1][1][RTW89_IC][2][98] = 127,
[1][1][RTW89_KCC][1][98] = -14,
[1][1][RTW89_KCC][0][98] = 127,
[1][1][RTW89_ACMA][1][98] = 127,
@@ -57044,13 +54706,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][98] = 127,
[1][1][RTW89_THAILAND][0][98] = 127,
[1][1][RTW89_FCC][1][100] = -26,
- [1][1][RTW89_FCC][2][100] = 127,
[1][1][RTW89_ETSI][1][100] = 127,
[1][1][RTW89_ETSI][0][100] = 127,
[1][1][RTW89_MKK][1][100] = 127,
[1][1][RTW89_MKK][0][100] = 127,
[1][1][RTW89_IC][1][100] = -26,
- [1][1][RTW89_IC][2][100] = 127,
[1][1][RTW89_KCC][1][100] = -14,
[1][1][RTW89_KCC][0][100] = 127,
[1][1][RTW89_ACMA][1][100] = 127,
@@ -57063,13 +54723,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][100] = 127,
[1][1][RTW89_THAILAND][0][100] = 127,
[1][1][RTW89_FCC][1][102] = -26,
- [1][1][RTW89_FCC][2][102] = 127,
[1][1][RTW89_ETSI][1][102] = 127,
[1][1][RTW89_ETSI][0][102] = 127,
[1][1][RTW89_MKK][1][102] = 127,
[1][1][RTW89_MKK][0][102] = 127,
[1][1][RTW89_IC][1][102] = -26,
- [1][1][RTW89_IC][2][102] = 127,
[1][1][RTW89_KCC][1][102] = -14,
[1][1][RTW89_KCC][0][102] = 127,
[1][1][RTW89_ACMA][1][102] = 127,
@@ -57082,13 +54740,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][102] = 127,
[1][1][RTW89_THAILAND][0][102] = 127,
[1][1][RTW89_FCC][1][104] = -26,
- [1][1][RTW89_FCC][2][104] = 127,
[1][1][RTW89_ETSI][1][104] = 127,
[1][1][RTW89_ETSI][0][104] = 127,
[1][1][RTW89_MKK][1][104] = 127,
[1][1][RTW89_MKK][0][104] = 127,
[1][1][RTW89_IC][1][104] = -26,
- [1][1][RTW89_IC][2][104] = 127,
[1][1][RTW89_KCC][1][104] = -14,
[1][1][RTW89_KCC][0][104] = 127,
[1][1][RTW89_ACMA][1][104] = 127,
@@ -57101,13 +54757,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][104] = 127,
[1][1][RTW89_THAILAND][0][104] = 127,
[1][1][RTW89_FCC][1][105] = -26,
- [1][1][RTW89_FCC][2][105] = 127,
[1][1][RTW89_ETSI][1][105] = 127,
[1][1][RTW89_ETSI][0][105] = 127,
[1][1][RTW89_MKK][1][105] = 127,
[1][1][RTW89_MKK][0][105] = 127,
[1][1][RTW89_IC][1][105] = -26,
- [1][1][RTW89_IC][2][105] = 127,
[1][1][RTW89_KCC][1][105] = -14,
[1][1][RTW89_KCC][0][105] = 127,
[1][1][RTW89_ACMA][1][105] = 127,
@@ -57120,13 +54774,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][105] = 127,
[1][1][RTW89_THAILAND][0][105] = 127,
[1][1][RTW89_FCC][1][107] = -22,
- [1][1][RTW89_FCC][2][107] = 127,
[1][1][RTW89_ETSI][1][107] = 127,
[1][1][RTW89_ETSI][0][107] = 127,
[1][1][RTW89_MKK][1][107] = 127,
[1][1][RTW89_MKK][0][107] = 127,
[1][1][RTW89_IC][1][107] = -22,
- [1][1][RTW89_IC][2][107] = 127,
[1][1][RTW89_KCC][1][107] = -14,
[1][1][RTW89_KCC][0][107] = 127,
[1][1][RTW89_ACMA][1][107] = 127,
@@ -57139,13 +54791,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][107] = 127,
[1][1][RTW89_THAILAND][0][107] = 127,
[1][1][RTW89_FCC][1][109] = -22,
- [1][1][RTW89_FCC][2][109] = 127,
[1][1][RTW89_ETSI][1][109] = 127,
[1][1][RTW89_ETSI][0][109] = 127,
[1][1][RTW89_MKK][1][109] = 127,
[1][1][RTW89_MKK][0][109] = 127,
[1][1][RTW89_IC][1][109] = -22,
- [1][1][RTW89_IC][2][109] = 127,
[1][1][RTW89_KCC][1][109] = 127,
[1][1][RTW89_KCC][0][109] = 127,
[1][1][RTW89_ACMA][1][109] = 127,
@@ -57158,13 +54808,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][109] = 127,
[1][1][RTW89_THAILAND][0][109] = 127,
[1][1][RTW89_FCC][1][111] = 127,
- [1][1][RTW89_FCC][2][111] = 127,
[1][1][RTW89_ETSI][1][111] = 127,
[1][1][RTW89_ETSI][0][111] = 127,
[1][1][RTW89_MKK][1][111] = 127,
[1][1][RTW89_MKK][0][111] = 127,
[1][1][RTW89_IC][1][111] = 127,
- [1][1][RTW89_IC][2][111] = 127,
[1][1][RTW89_KCC][1][111] = 127,
[1][1][RTW89_KCC][0][111] = 127,
[1][1][RTW89_ACMA][1][111] = 127,
@@ -57177,13 +54825,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][111] = 127,
[1][1][RTW89_THAILAND][0][111] = 127,
[1][1][RTW89_FCC][1][113] = 127,
- [1][1][RTW89_FCC][2][113] = 127,
[1][1][RTW89_ETSI][1][113] = 127,
[1][1][RTW89_ETSI][0][113] = 127,
[1][1][RTW89_MKK][1][113] = 127,
[1][1][RTW89_MKK][0][113] = 127,
[1][1][RTW89_IC][1][113] = 127,
- [1][1][RTW89_IC][2][113] = 127,
[1][1][RTW89_KCC][1][113] = 127,
[1][1][RTW89_KCC][0][113] = 127,
[1][1][RTW89_ACMA][1][113] = 127,
@@ -57196,13 +54842,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][113] = 127,
[1][1][RTW89_THAILAND][0][113] = 127,
[1][1][RTW89_FCC][1][115] = 127,
- [1][1][RTW89_FCC][2][115] = 127,
[1][1][RTW89_ETSI][1][115] = 127,
[1][1][RTW89_ETSI][0][115] = 127,
[1][1][RTW89_MKK][1][115] = 127,
[1][1][RTW89_MKK][0][115] = 127,
[1][1][RTW89_IC][1][115] = 127,
- [1][1][RTW89_IC][2][115] = 127,
[1][1][RTW89_KCC][1][115] = 127,
[1][1][RTW89_KCC][0][115] = 127,
[1][1][RTW89_ACMA][1][115] = 127,
@@ -57215,13 +54859,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][115] = 127,
[1][1][RTW89_THAILAND][0][115] = 127,
[1][1][RTW89_FCC][1][117] = 127,
- [1][1][RTW89_FCC][2][117] = 127,
[1][1][RTW89_ETSI][1][117] = 127,
[1][1][RTW89_ETSI][0][117] = 127,
[1][1][RTW89_MKK][1][117] = 127,
[1][1][RTW89_MKK][0][117] = 127,
[1][1][RTW89_IC][1][117] = 127,
- [1][1][RTW89_IC][2][117] = 127,
[1][1][RTW89_KCC][1][117] = 127,
[1][1][RTW89_KCC][0][117] = 127,
[1][1][RTW89_ACMA][1][117] = 127,
@@ -57234,13 +54876,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][117] = 127,
[1][1][RTW89_THAILAND][0][117] = 127,
[1][1][RTW89_FCC][1][119] = 127,
- [1][1][RTW89_FCC][2][119] = 127,
[1][1][RTW89_ETSI][1][119] = 127,
[1][1][RTW89_ETSI][0][119] = 127,
[1][1][RTW89_MKK][1][119] = 127,
[1][1][RTW89_MKK][0][119] = 127,
[1][1][RTW89_IC][1][119] = 127,
- [1][1][RTW89_IC][2][119] = 127,
[1][1][RTW89_KCC][1][119] = 127,
[1][1][RTW89_KCC][0][119] = 127,
[1][1][RTW89_ACMA][1][119] = 127,
@@ -57253,13 +54893,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[1][1][RTW89_THAILAND][1][119] = 127,
[1][1][RTW89_THAILAND][0][119] = 127,
[2][0][RTW89_FCC][1][0] = 8,
- [2][0][RTW89_FCC][2][0] = 60,
[2][0][RTW89_ETSI][1][0] = 56,
[2][0][RTW89_ETSI][0][0] = 18,
[2][0][RTW89_MKK][1][0] = 54,
[2][0][RTW89_MKK][0][0] = 14,
[2][0][RTW89_IC][1][0] = 8,
- [2][0][RTW89_IC][2][0] = 60,
[2][0][RTW89_KCC][1][0] = -2,
[2][0][RTW89_KCC][0][0] = -2,
[2][0][RTW89_ACMA][1][0] = 56,
@@ -57272,13 +54910,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][0] = 52,
[2][0][RTW89_THAILAND][0][0] = 8,
[2][0][RTW89_FCC][1][2] = 8,
- [2][0][RTW89_FCC][2][2] = 60,
[2][0][RTW89_ETSI][1][2] = 56,
[2][0][RTW89_ETSI][0][2] = 18,
[2][0][RTW89_MKK][1][2] = 54,
[2][0][RTW89_MKK][0][2] = 14,
[2][0][RTW89_IC][1][2] = 8,
- [2][0][RTW89_IC][2][2] = 60,
[2][0][RTW89_KCC][1][2] = -2,
[2][0][RTW89_KCC][0][2] = -2,
[2][0][RTW89_ACMA][1][2] = 56,
@@ -57291,13 +54927,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][2] = 52,
[2][0][RTW89_THAILAND][0][2] = 8,
[2][0][RTW89_FCC][1][4] = 8,
- [2][0][RTW89_FCC][2][4] = 60,
[2][0][RTW89_ETSI][1][4] = 56,
[2][0][RTW89_ETSI][0][4] = 18,
[2][0][RTW89_MKK][1][4] = 54,
[2][0][RTW89_MKK][0][4] = 14,
[2][0][RTW89_IC][1][4] = 8,
- [2][0][RTW89_IC][2][4] = 60,
[2][0][RTW89_KCC][1][4] = -2,
[2][0][RTW89_KCC][0][4] = -2,
[2][0][RTW89_ACMA][1][4] = 56,
@@ -57310,13 +54944,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][4] = 52,
[2][0][RTW89_THAILAND][0][4] = 8,
[2][0][RTW89_FCC][1][6] = 8,
- [2][0][RTW89_FCC][2][6] = 60,
[2][0][RTW89_ETSI][1][6] = 56,
[2][0][RTW89_ETSI][0][6] = 18,
[2][0][RTW89_MKK][1][6] = 54,
[2][0][RTW89_MKK][0][6] = 14,
[2][0][RTW89_IC][1][6] = 8,
- [2][0][RTW89_IC][2][6] = 60,
[2][0][RTW89_KCC][1][6] = -2,
[2][0][RTW89_KCC][0][6] = -2,
[2][0][RTW89_ACMA][1][6] = 56,
@@ -57329,13 +54961,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][6] = 52,
[2][0][RTW89_THAILAND][0][6] = 8,
[2][0][RTW89_FCC][1][8] = 8,
- [2][0][RTW89_FCC][2][8] = 60,
[2][0][RTW89_ETSI][1][8] = 56,
[2][0][RTW89_ETSI][0][8] = 18,
[2][0][RTW89_MKK][1][8] = 54,
[2][0][RTW89_MKK][0][8] = 14,
[2][0][RTW89_IC][1][8] = 8,
- [2][0][RTW89_IC][2][8] = 60,
[2][0][RTW89_KCC][1][8] = -2,
[2][0][RTW89_KCC][0][8] = -2,
[2][0][RTW89_ACMA][1][8] = 56,
@@ -57348,13 +54978,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][8] = 52,
[2][0][RTW89_THAILAND][0][8] = 8,
[2][0][RTW89_FCC][1][10] = 8,
- [2][0][RTW89_FCC][2][10] = 60,
[2][0][RTW89_ETSI][1][10] = 56,
[2][0][RTW89_ETSI][0][10] = 18,
[2][0][RTW89_MKK][1][10] = 54,
[2][0][RTW89_MKK][0][10] = 14,
[2][0][RTW89_IC][1][10] = 8,
- [2][0][RTW89_IC][2][10] = 60,
[2][0][RTW89_KCC][1][10] = -2,
[2][0][RTW89_KCC][0][10] = -2,
[2][0][RTW89_ACMA][1][10] = 56,
@@ -57367,13 +54995,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][10] = 52,
[2][0][RTW89_THAILAND][0][10] = 8,
[2][0][RTW89_FCC][1][12] = 8,
- [2][0][RTW89_FCC][2][12] = 60,
[2][0][RTW89_ETSI][1][12] = 56,
[2][0][RTW89_ETSI][0][12] = 18,
[2][0][RTW89_MKK][1][12] = 54,
[2][0][RTW89_MKK][0][12] = 14,
[2][0][RTW89_IC][1][12] = 8,
- [2][0][RTW89_IC][2][12] = 60,
[2][0][RTW89_KCC][1][12] = -2,
[2][0][RTW89_KCC][0][12] = -2,
[2][0][RTW89_ACMA][1][12] = 56,
@@ -57386,13 +55012,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][12] = 52,
[2][0][RTW89_THAILAND][0][12] = 8,
[2][0][RTW89_FCC][1][14] = 8,
- [2][0][RTW89_FCC][2][14] = 60,
[2][0][RTW89_ETSI][1][14] = 56,
[2][0][RTW89_ETSI][0][14] = 18,
[2][0][RTW89_MKK][1][14] = 54,
[2][0][RTW89_MKK][0][14] = 14,
[2][0][RTW89_IC][1][14] = 8,
- [2][0][RTW89_IC][2][14] = 60,
[2][0][RTW89_KCC][1][14] = -2,
[2][0][RTW89_KCC][0][14] = -2,
[2][0][RTW89_ACMA][1][14] = 56,
@@ -57405,13 +55029,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][14] = 52,
[2][0][RTW89_THAILAND][0][14] = 8,
[2][0][RTW89_FCC][1][15] = 8,
- [2][0][RTW89_FCC][2][15] = 60,
[2][0][RTW89_ETSI][1][15] = 56,
[2][0][RTW89_ETSI][0][15] = 18,
[2][0][RTW89_MKK][1][15] = 54,
[2][0][RTW89_MKK][0][15] = 14,
[2][0][RTW89_IC][1][15] = 8,
- [2][0][RTW89_IC][2][15] = 60,
[2][0][RTW89_KCC][1][15] = -2,
[2][0][RTW89_KCC][0][15] = -2,
[2][0][RTW89_ACMA][1][15] = 56,
@@ -57424,13 +55046,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][15] = 52,
[2][0][RTW89_THAILAND][0][15] = 8,
[2][0][RTW89_FCC][1][17] = 8,
- [2][0][RTW89_FCC][2][17] = 60,
[2][0][RTW89_ETSI][1][17] = 56,
[2][0][RTW89_ETSI][0][17] = 18,
[2][0][RTW89_MKK][1][17] = 54,
[2][0][RTW89_MKK][0][17] = 14,
[2][0][RTW89_IC][1][17] = 8,
- [2][0][RTW89_IC][2][17] = 60,
[2][0][RTW89_KCC][1][17] = -2,
[2][0][RTW89_KCC][0][17] = -2,
[2][0][RTW89_ACMA][1][17] = 56,
@@ -57443,13 +55063,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][17] = 52,
[2][0][RTW89_THAILAND][0][17] = 8,
[2][0][RTW89_FCC][1][19] = 8,
- [2][0][RTW89_FCC][2][19] = 60,
[2][0][RTW89_ETSI][1][19] = 56,
[2][0][RTW89_ETSI][0][19] = 18,
[2][0][RTW89_MKK][1][19] = 54,
[2][0][RTW89_MKK][0][19] = 14,
[2][0][RTW89_IC][1][19] = 8,
- [2][0][RTW89_IC][2][19] = 60,
[2][0][RTW89_KCC][1][19] = -2,
[2][0][RTW89_KCC][0][19] = -2,
[2][0][RTW89_ACMA][1][19] = 56,
@@ -57462,13 +55080,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][19] = 52,
[2][0][RTW89_THAILAND][0][19] = 8,
[2][0][RTW89_FCC][1][21] = 8,
- [2][0][RTW89_FCC][2][21] = 60,
[2][0][RTW89_ETSI][1][21] = 56,
[2][0][RTW89_ETSI][0][21] = 18,
[2][0][RTW89_MKK][1][21] = 54,
[2][0][RTW89_MKK][0][21] = 14,
[2][0][RTW89_IC][1][21] = 8,
- [2][0][RTW89_IC][2][21] = 60,
[2][0][RTW89_KCC][1][21] = -2,
[2][0][RTW89_KCC][0][21] = -2,
[2][0][RTW89_ACMA][1][21] = 56,
@@ -57481,13 +55097,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][21] = 52,
[2][0][RTW89_THAILAND][0][21] = 8,
[2][0][RTW89_FCC][1][23] = 8,
- [2][0][RTW89_FCC][2][23] = 70,
[2][0][RTW89_ETSI][1][23] = 56,
[2][0][RTW89_ETSI][0][23] = 18,
[2][0][RTW89_MKK][1][23] = 56,
[2][0][RTW89_MKK][0][23] = 14,
[2][0][RTW89_IC][1][23] = 8,
- [2][0][RTW89_IC][2][23] = 70,
[2][0][RTW89_KCC][1][23] = -2,
[2][0][RTW89_KCC][0][23] = -2,
[2][0][RTW89_ACMA][1][23] = 56,
@@ -57500,13 +55114,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][23] = 52,
[2][0][RTW89_THAILAND][0][23] = 8,
[2][0][RTW89_FCC][1][25] = 8,
- [2][0][RTW89_FCC][2][25] = 70,
[2][0][RTW89_ETSI][1][25] = 56,
[2][0][RTW89_ETSI][0][25] = 18,
[2][0][RTW89_MKK][1][25] = 56,
[2][0][RTW89_MKK][0][25] = 14,
[2][0][RTW89_IC][1][25] = 8,
- [2][0][RTW89_IC][2][25] = 70,
[2][0][RTW89_KCC][1][25] = -2,
[2][0][RTW89_KCC][0][25] = -2,
[2][0][RTW89_ACMA][1][25] = 56,
@@ -57519,13 +55131,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][25] = 52,
[2][0][RTW89_THAILAND][0][25] = 8,
[2][0][RTW89_FCC][1][27] = 8,
- [2][0][RTW89_FCC][2][27] = 70,
[2][0][RTW89_ETSI][1][27] = 56,
[2][0][RTW89_ETSI][0][27] = 18,
[2][0][RTW89_MKK][1][27] = 56,
[2][0][RTW89_MKK][0][27] = 14,
[2][0][RTW89_IC][1][27] = 8,
- [2][0][RTW89_IC][2][27] = 70,
[2][0][RTW89_KCC][1][27] = -2,
[2][0][RTW89_KCC][0][27] = -2,
[2][0][RTW89_ACMA][1][27] = 56,
@@ -57538,13 +55148,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][27] = 52,
[2][0][RTW89_THAILAND][0][27] = 8,
[2][0][RTW89_FCC][1][29] = 8,
- [2][0][RTW89_FCC][2][29] = 70,
[2][0][RTW89_ETSI][1][29] = 56,
[2][0][RTW89_ETSI][0][29] = 18,
[2][0][RTW89_MKK][1][29] = 56,
[2][0][RTW89_MKK][0][29] = 14,
[2][0][RTW89_IC][1][29] = 8,
- [2][0][RTW89_IC][2][29] = 70,
[2][0][RTW89_KCC][1][29] = -2,
[2][0][RTW89_KCC][0][29] = -2,
[2][0][RTW89_ACMA][1][29] = 56,
@@ -57557,13 +55165,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][29] = 52,
[2][0][RTW89_THAILAND][0][29] = 8,
[2][0][RTW89_FCC][1][30] = 8,
- [2][0][RTW89_FCC][2][30] = 70,
[2][0][RTW89_ETSI][1][30] = 56,
[2][0][RTW89_ETSI][0][30] = 18,
[2][0][RTW89_MKK][1][30] = 56,
[2][0][RTW89_MKK][0][30] = 14,
[2][0][RTW89_IC][1][30] = 8,
- [2][0][RTW89_IC][2][30] = 70,
[2][0][RTW89_KCC][1][30] = -2,
[2][0][RTW89_KCC][0][30] = -2,
[2][0][RTW89_ACMA][1][30] = 56,
@@ -57576,13 +55182,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][30] = 52,
[2][0][RTW89_THAILAND][0][30] = 8,
[2][0][RTW89_FCC][1][32] = 8,
- [2][0][RTW89_FCC][2][32] = 70,
[2][0][RTW89_ETSI][1][32] = 56,
[2][0][RTW89_ETSI][0][32] = 18,
[2][0][RTW89_MKK][1][32] = 56,
[2][0][RTW89_MKK][0][32] = 14,
[2][0][RTW89_IC][1][32] = 8,
- [2][0][RTW89_IC][2][32] = 70,
[2][0][RTW89_KCC][1][32] = -2,
[2][0][RTW89_KCC][0][32] = -2,
[2][0][RTW89_ACMA][1][32] = 56,
@@ -57595,13 +55199,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][32] = 52,
[2][0][RTW89_THAILAND][0][32] = 8,
[2][0][RTW89_FCC][1][34] = 8,
- [2][0][RTW89_FCC][2][34] = 70,
[2][0][RTW89_ETSI][1][34] = 56,
[2][0][RTW89_ETSI][0][34] = 18,
[2][0][RTW89_MKK][1][34] = 56,
[2][0][RTW89_MKK][0][34] = 14,
[2][0][RTW89_IC][1][34] = 8,
- [2][0][RTW89_IC][2][34] = 70,
[2][0][RTW89_KCC][1][34] = -2,
[2][0][RTW89_KCC][0][34] = -2,
[2][0][RTW89_ACMA][1][34] = 56,
@@ -57614,13 +55216,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][34] = 52,
[2][0][RTW89_THAILAND][0][34] = 8,
[2][0][RTW89_FCC][1][36] = 8,
- [2][0][RTW89_FCC][2][36] = 70,
[2][0][RTW89_ETSI][1][36] = 56,
[2][0][RTW89_ETSI][0][36] = 18,
[2][0][RTW89_MKK][1][36] = 56,
[2][0][RTW89_MKK][0][36] = 14,
[2][0][RTW89_IC][1][36] = 8,
- [2][0][RTW89_IC][2][36] = 70,
[2][0][RTW89_KCC][1][36] = -2,
[2][0][RTW89_KCC][0][36] = -2,
[2][0][RTW89_ACMA][1][36] = 56,
@@ -57633,13 +55233,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][36] = 52,
[2][0][RTW89_THAILAND][0][36] = 8,
[2][0][RTW89_FCC][1][38] = 8,
- [2][0][RTW89_FCC][2][38] = 70,
[2][0][RTW89_ETSI][1][38] = 56,
[2][0][RTW89_ETSI][0][38] = 18,
[2][0][RTW89_MKK][1][38] = 56,
[2][0][RTW89_MKK][0][38] = 14,
[2][0][RTW89_IC][1][38] = 8,
- [2][0][RTW89_IC][2][38] = 70,
[2][0][RTW89_KCC][1][38] = -2,
[2][0][RTW89_KCC][0][38] = -2,
[2][0][RTW89_ACMA][1][38] = 56,
@@ -57652,13 +55250,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][38] = 52,
[2][0][RTW89_THAILAND][0][38] = 8,
[2][0][RTW89_FCC][1][40] = 8,
- [2][0][RTW89_FCC][2][40] = 70,
[2][0][RTW89_ETSI][1][40] = 56,
[2][0][RTW89_ETSI][0][40] = 18,
[2][0][RTW89_MKK][1][40] = 56,
[2][0][RTW89_MKK][0][40] = 14,
[2][0][RTW89_IC][1][40] = 8,
- [2][0][RTW89_IC][2][40] = 70,
[2][0][RTW89_KCC][1][40] = -2,
[2][0][RTW89_KCC][0][40] = -2,
[2][0][RTW89_ACMA][1][40] = 56,
@@ -57671,13 +55267,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][40] = 52,
[2][0][RTW89_THAILAND][0][40] = 8,
[2][0][RTW89_FCC][1][42] = 8,
- [2][0][RTW89_FCC][2][42] = 70,
[2][0][RTW89_ETSI][1][42] = 56,
[2][0][RTW89_ETSI][0][42] = 18,
[2][0][RTW89_MKK][1][42] = 56,
[2][0][RTW89_MKK][0][42] = 14,
[2][0][RTW89_IC][1][42] = 8,
- [2][0][RTW89_IC][2][42] = 70,
[2][0][RTW89_KCC][1][42] = -2,
[2][0][RTW89_KCC][0][42] = -2,
[2][0][RTW89_ACMA][1][42] = 56,
@@ -57690,13 +55284,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][42] = 52,
[2][0][RTW89_THAILAND][0][42] = 8,
[2][0][RTW89_FCC][1][44] = 8,
- [2][0][RTW89_FCC][2][44] = 70,
[2][0][RTW89_ETSI][1][44] = 56,
[2][0][RTW89_ETSI][0][44] = 18,
[2][0][RTW89_MKK][1][44] = 32,
[2][0][RTW89_MKK][0][44] = 14,
[2][0][RTW89_IC][1][44] = 8,
- [2][0][RTW89_IC][2][44] = 70,
[2][0][RTW89_KCC][1][44] = -2,
[2][0][RTW89_KCC][0][44] = -2,
[2][0][RTW89_ACMA][1][44] = 56,
@@ -57709,13 +55301,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][44] = 52,
[2][0][RTW89_THAILAND][0][44] = 8,
[2][0][RTW89_FCC][1][45] = 8,
- [2][0][RTW89_FCC][2][45] = 127,
[2][0][RTW89_ETSI][1][45] = 127,
[2][0][RTW89_ETSI][0][45] = 127,
[2][0][RTW89_MKK][1][45] = 127,
[2][0][RTW89_MKK][0][45] = 127,
[2][0][RTW89_IC][1][45] = 8,
- [2][0][RTW89_IC][2][45] = 70,
[2][0][RTW89_KCC][1][45] = -2,
[2][0][RTW89_KCC][0][45] = 127,
[2][0][RTW89_ACMA][1][45] = 127,
@@ -57728,13 +55318,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][45] = 127,
[2][0][RTW89_THAILAND][0][45] = 127,
[2][0][RTW89_FCC][1][47] = 8,
- [2][0][RTW89_FCC][2][47] = 127,
[2][0][RTW89_ETSI][1][47] = 127,
[2][0][RTW89_ETSI][0][47] = 127,
[2][0][RTW89_MKK][1][47] = 127,
[2][0][RTW89_MKK][0][47] = 127,
[2][0][RTW89_IC][1][47] = 8,
- [2][0][RTW89_IC][2][47] = 70,
[2][0][RTW89_KCC][1][47] = -2,
[2][0][RTW89_KCC][0][47] = 127,
[2][0][RTW89_ACMA][1][47] = 127,
@@ -57747,13 +55335,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][47] = 127,
[2][0][RTW89_THAILAND][0][47] = 127,
[2][0][RTW89_FCC][1][49] = 8,
- [2][0][RTW89_FCC][2][49] = 127,
[2][0][RTW89_ETSI][1][49] = 127,
[2][0][RTW89_ETSI][0][49] = 127,
[2][0][RTW89_MKK][1][49] = 127,
[2][0][RTW89_MKK][0][49] = 127,
[2][0][RTW89_IC][1][49] = 8,
- [2][0][RTW89_IC][2][49] = 70,
[2][0][RTW89_KCC][1][49] = -2,
[2][0][RTW89_KCC][0][49] = 127,
[2][0][RTW89_ACMA][1][49] = 127,
@@ -57766,13 +55352,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][49] = 127,
[2][0][RTW89_THAILAND][0][49] = 127,
[2][0][RTW89_FCC][1][51] = 8,
- [2][0][RTW89_FCC][2][51] = 127,
[2][0][RTW89_ETSI][1][51] = 127,
[2][0][RTW89_ETSI][0][51] = 127,
[2][0][RTW89_MKK][1][51] = 127,
[2][0][RTW89_MKK][0][51] = 127,
[2][0][RTW89_IC][1][51] = 8,
- [2][0][RTW89_IC][2][51] = 70,
[2][0][RTW89_KCC][1][51] = -2,
[2][0][RTW89_KCC][0][51] = 127,
[2][0][RTW89_ACMA][1][51] = 127,
@@ -57785,13 +55369,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][51] = 127,
[2][0][RTW89_THAILAND][0][51] = 127,
[2][0][RTW89_FCC][1][53] = 8,
- [2][0][RTW89_FCC][2][53] = 127,
[2][0][RTW89_ETSI][1][53] = 127,
[2][0][RTW89_ETSI][0][53] = 127,
[2][0][RTW89_MKK][1][53] = 127,
[2][0][RTW89_MKK][0][53] = 127,
[2][0][RTW89_IC][1][53] = 8,
- [2][0][RTW89_IC][2][53] = 70,
[2][0][RTW89_KCC][1][53] = -2,
[2][0][RTW89_KCC][0][53] = 127,
[2][0][RTW89_ACMA][1][53] = 127,
@@ -57804,13 +55386,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][53] = 127,
[2][0][RTW89_THAILAND][0][53] = 127,
[2][0][RTW89_FCC][1][55] = 8,
- [2][0][RTW89_FCC][2][55] = 68,
[2][0][RTW89_ETSI][1][55] = 127,
[2][0][RTW89_ETSI][0][55] = 127,
[2][0][RTW89_MKK][1][55] = 127,
[2][0][RTW89_MKK][0][55] = 127,
[2][0][RTW89_IC][1][55] = 8,
- [2][0][RTW89_IC][2][55] = 68,
[2][0][RTW89_KCC][1][55] = -2,
[2][0][RTW89_KCC][0][55] = 127,
[2][0][RTW89_ACMA][1][55] = 127,
@@ -57823,13 +55403,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][55] = 127,
[2][0][RTW89_THAILAND][0][55] = 127,
[2][0][RTW89_FCC][1][57] = 8,
- [2][0][RTW89_FCC][2][57] = 68,
[2][0][RTW89_ETSI][1][57] = 127,
[2][0][RTW89_ETSI][0][57] = 127,
[2][0][RTW89_MKK][1][57] = 127,
[2][0][RTW89_MKK][0][57] = 127,
[2][0][RTW89_IC][1][57] = 8,
- [2][0][RTW89_IC][2][57] = 68,
[2][0][RTW89_KCC][1][57] = -2,
[2][0][RTW89_KCC][0][57] = 127,
[2][0][RTW89_ACMA][1][57] = 127,
@@ -57842,13 +55420,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][57] = 127,
[2][0][RTW89_THAILAND][0][57] = 127,
[2][0][RTW89_FCC][1][59] = 8,
- [2][0][RTW89_FCC][2][59] = 68,
[2][0][RTW89_ETSI][1][59] = 127,
[2][0][RTW89_ETSI][0][59] = 127,
[2][0][RTW89_MKK][1][59] = 127,
[2][0][RTW89_MKK][0][59] = 127,
[2][0][RTW89_IC][1][59] = 8,
- [2][0][RTW89_IC][2][59] = 68,
[2][0][RTW89_KCC][1][59] = -2,
[2][0][RTW89_KCC][0][59] = 127,
[2][0][RTW89_ACMA][1][59] = 127,
@@ -57861,13 +55437,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][59] = 127,
[2][0][RTW89_THAILAND][0][59] = 127,
[2][0][RTW89_FCC][1][60] = 8,
- [2][0][RTW89_FCC][2][60] = 68,
[2][0][RTW89_ETSI][1][60] = 127,
[2][0][RTW89_ETSI][0][60] = 127,
[2][0][RTW89_MKK][1][60] = 127,
[2][0][RTW89_MKK][0][60] = 127,
[2][0][RTW89_IC][1][60] = 8,
- [2][0][RTW89_IC][2][60] = 68,
[2][0][RTW89_KCC][1][60] = -2,
[2][0][RTW89_KCC][0][60] = 127,
[2][0][RTW89_ACMA][1][60] = 127,
@@ -57880,13 +55454,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][60] = 127,
[2][0][RTW89_THAILAND][0][60] = 127,
[2][0][RTW89_FCC][1][62] = 8,
- [2][0][RTW89_FCC][2][62] = 68,
[2][0][RTW89_ETSI][1][62] = 127,
[2][0][RTW89_ETSI][0][62] = 127,
[2][0][RTW89_MKK][1][62] = 127,
[2][0][RTW89_MKK][0][62] = 127,
[2][0][RTW89_IC][1][62] = 8,
- [2][0][RTW89_IC][2][62] = 68,
[2][0][RTW89_KCC][1][62] = -2,
[2][0][RTW89_KCC][0][62] = 127,
[2][0][RTW89_ACMA][1][62] = 127,
@@ -57899,13 +55471,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][62] = 127,
[2][0][RTW89_THAILAND][0][62] = 127,
[2][0][RTW89_FCC][1][64] = 8,
- [2][0][RTW89_FCC][2][64] = 68,
[2][0][RTW89_ETSI][1][64] = 127,
[2][0][RTW89_ETSI][0][64] = 127,
[2][0][RTW89_MKK][1][64] = 127,
[2][0][RTW89_MKK][0][64] = 127,
[2][0][RTW89_IC][1][64] = 8,
- [2][0][RTW89_IC][2][64] = 68,
[2][0][RTW89_KCC][1][64] = -2,
[2][0][RTW89_KCC][0][64] = 127,
[2][0][RTW89_ACMA][1][64] = 127,
@@ -57918,13 +55488,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][64] = 127,
[2][0][RTW89_THAILAND][0][64] = 127,
[2][0][RTW89_FCC][1][66] = 8,
- [2][0][RTW89_FCC][2][66] = 68,
[2][0][RTW89_ETSI][1][66] = 127,
[2][0][RTW89_ETSI][0][66] = 127,
[2][0][RTW89_MKK][1][66] = 127,
[2][0][RTW89_MKK][0][66] = 127,
[2][0][RTW89_IC][1][66] = 8,
- [2][0][RTW89_IC][2][66] = 68,
[2][0][RTW89_KCC][1][66] = -2,
[2][0][RTW89_KCC][0][66] = 127,
[2][0][RTW89_ACMA][1][66] = 127,
@@ -57937,13 +55505,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][66] = 127,
[2][0][RTW89_THAILAND][0][66] = 127,
[2][0][RTW89_FCC][1][68] = 8,
- [2][0][RTW89_FCC][2][68] = 68,
[2][0][RTW89_ETSI][1][68] = 127,
[2][0][RTW89_ETSI][0][68] = 127,
[2][0][RTW89_MKK][1][68] = 127,
[2][0][RTW89_MKK][0][68] = 127,
[2][0][RTW89_IC][1][68] = 8,
- [2][0][RTW89_IC][2][68] = 68,
[2][0][RTW89_KCC][1][68] = -2,
[2][0][RTW89_KCC][0][68] = 127,
[2][0][RTW89_ACMA][1][68] = 127,
@@ -57956,13 +55522,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][68] = 127,
[2][0][RTW89_THAILAND][0][68] = 127,
[2][0][RTW89_FCC][1][70] = 8,
- [2][0][RTW89_FCC][2][70] = 68,
[2][0][RTW89_ETSI][1][70] = 127,
[2][0][RTW89_ETSI][0][70] = 127,
[2][0][RTW89_MKK][1][70] = 127,
[2][0][RTW89_MKK][0][70] = 127,
[2][0][RTW89_IC][1][70] = 8,
- [2][0][RTW89_IC][2][70] = 68,
[2][0][RTW89_KCC][1][70] = -2,
[2][0][RTW89_KCC][0][70] = 127,
[2][0][RTW89_ACMA][1][70] = 127,
@@ -57975,13 +55539,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][70] = 127,
[2][0][RTW89_THAILAND][0][70] = 127,
[2][0][RTW89_FCC][1][72] = 8,
- [2][0][RTW89_FCC][2][72] = 68,
[2][0][RTW89_ETSI][1][72] = 127,
[2][0][RTW89_ETSI][0][72] = 127,
[2][0][RTW89_MKK][1][72] = 127,
[2][0][RTW89_MKK][0][72] = 127,
[2][0][RTW89_IC][1][72] = 8,
- [2][0][RTW89_IC][2][72] = 68,
[2][0][RTW89_KCC][1][72] = -2,
[2][0][RTW89_KCC][0][72] = 127,
[2][0][RTW89_ACMA][1][72] = 127,
@@ -57994,13 +55556,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][72] = 127,
[2][0][RTW89_THAILAND][0][72] = 127,
[2][0][RTW89_FCC][1][74] = 8,
- [2][0][RTW89_FCC][2][74] = 68,
[2][0][RTW89_ETSI][1][74] = 127,
[2][0][RTW89_ETSI][0][74] = 127,
[2][0][RTW89_MKK][1][74] = 127,
[2][0][RTW89_MKK][0][74] = 127,
[2][0][RTW89_IC][1][74] = 8,
- [2][0][RTW89_IC][2][74] = 68,
[2][0][RTW89_KCC][1][74] = -2,
[2][0][RTW89_KCC][0][74] = 127,
[2][0][RTW89_ACMA][1][74] = 127,
@@ -58013,13 +55573,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][74] = 127,
[2][0][RTW89_THAILAND][0][74] = 127,
[2][0][RTW89_FCC][1][75] = 8,
- [2][0][RTW89_FCC][2][75] = 68,
[2][0][RTW89_ETSI][1][75] = 127,
[2][0][RTW89_ETSI][0][75] = 127,
[2][0][RTW89_MKK][1][75] = 127,
[2][0][RTW89_MKK][0][75] = 127,
[2][0][RTW89_IC][1][75] = 8,
- [2][0][RTW89_IC][2][75] = 68,
[2][0][RTW89_KCC][1][75] = -2,
[2][0][RTW89_KCC][0][75] = 127,
[2][0][RTW89_ACMA][1][75] = 127,
@@ -58032,13 +55590,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][75] = 127,
[2][0][RTW89_THAILAND][0][75] = 127,
[2][0][RTW89_FCC][1][77] = 8,
- [2][0][RTW89_FCC][2][77] = 68,
[2][0][RTW89_ETSI][1][77] = 127,
[2][0][RTW89_ETSI][0][77] = 127,
[2][0][RTW89_MKK][1][77] = 127,
[2][0][RTW89_MKK][0][77] = 127,
[2][0][RTW89_IC][1][77] = 8,
- [2][0][RTW89_IC][2][77] = 68,
[2][0][RTW89_KCC][1][77] = -2,
[2][0][RTW89_KCC][0][77] = 127,
[2][0][RTW89_ACMA][1][77] = 127,
@@ -58051,13 +55607,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][77] = 127,
[2][0][RTW89_THAILAND][0][77] = 127,
[2][0][RTW89_FCC][1][79] = 8,
- [2][0][RTW89_FCC][2][79] = 68,
[2][0][RTW89_ETSI][1][79] = 127,
[2][0][RTW89_ETSI][0][79] = 127,
[2][0][RTW89_MKK][1][79] = 127,
[2][0][RTW89_MKK][0][79] = 127,
[2][0][RTW89_IC][1][79] = 8,
- [2][0][RTW89_IC][2][79] = 68,
[2][0][RTW89_KCC][1][79] = -2,
[2][0][RTW89_KCC][0][79] = 127,
[2][0][RTW89_ACMA][1][79] = 127,
@@ -58070,13 +55624,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][79] = 127,
[2][0][RTW89_THAILAND][0][79] = 127,
[2][0][RTW89_FCC][1][81] = 8,
- [2][0][RTW89_FCC][2][81] = 68,
[2][0][RTW89_ETSI][1][81] = 127,
[2][0][RTW89_ETSI][0][81] = 127,
[2][0][RTW89_MKK][1][81] = 127,
[2][0][RTW89_MKK][0][81] = 127,
[2][0][RTW89_IC][1][81] = 8,
- [2][0][RTW89_IC][2][81] = 68,
[2][0][RTW89_KCC][1][81] = -2,
[2][0][RTW89_KCC][0][81] = 127,
[2][0][RTW89_ACMA][1][81] = 127,
@@ -58089,13 +55641,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][81] = 127,
[2][0][RTW89_THAILAND][0][81] = 127,
[2][0][RTW89_FCC][1][83] = 8,
- [2][0][RTW89_FCC][2][83] = 68,
[2][0][RTW89_ETSI][1][83] = 127,
[2][0][RTW89_ETSI][0][83] = 127,
[2][0][RTW89_MKK][1][83] = 127,
[2][0][RTW89_MKK][0][83] = 127,
[2][0][RTW89_IC][1][83] = 8,
- [2][0][RTW89_IC][2][83] = 68,
[2][0][RTW89_KCC][1][83] = -2,
[2][0][RTW89_KCC][0][83] = 127,
[2][0][RTW89_ACMA][1][83] = 127,
@@ -58108,13 +55658,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][83] = 127,
[2][0][RTW89_THAILAND][0][83] = 127,
[2][0][RTW89_FCC][1][85] = 8,
- [2][0][RTW89_FCC][2][85] = 68,
[2][0][RTW89_ETSI][1][85] = 127,
[2][0][RTW89_ETSI][0][85] = 127,
[2][0][RTW89_MKK][1][85] = 127,
[2][0][RTW89_MKK][0][85] = 127,
[2][0][RTW89_IC][1][85] = 8,
- [2][0][RTW89_IC][2][85] = 68,
[2][0][RTW89_KCC][1][85] = -2,
[2][0][RTW89_KCC][0][85] = 127,
[2][0][RTW89_ACMA][1][85] = 127,
@@ -58127,13 +55675,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][85] = 127,
[2][0][RTW89_THAILAND][0][85] = 127,
[2][0][RTW89_FCC][1][87] = 8,
- [2][0][RTW89_FCC][2][87] = 127,
[2][0][RTW89_ETSI][1][87] = 127,
[2][0][RTW89_ETSI][0][87] = 127,
[2][0][RTW89_MKK][1][87] = 127,
[2][0][RTW89_MKK][0][87] = 127,
[2][0][RTW89_IC][1][87] = 8,
- [2][0][RTW89_IC][2][87] = 127,
[2][0][RTW89_KCC][1][87] = -2,
[2][0][RTW89_KCC][0][87] = 127,
[2][0][RTW89_ACMA][1][87] = 127,
@@ -58146,13 +55692,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][87] = 127,
[2][0][RTW89_THAILAND][0][87] = 127,
[2][0][RTW89_FCC][1][89] = 8,
- [2][0][RTW89_FCC][2][89] = 127,
[2][0][RTW89_ETSI][1][89] = 127,
[2][0][RTW89_ETSI][0][89] = 127,
[2][0][RTW89_MKK][1][89] = 127,
[2][0][RTW89_MKK][0][89] = 127,
[2][0][RTW89_IC][1][89] = 8,
- [2][0][RTW89_IC][2][89] = 127,
[2][0][RTW89_KCC][1][89] = -2,
[2][0][RTW89_KCC][0][89] = 127,
[2][0][RTW89_ACMA][1][89] = 127,
@@ -58165,13 +55709,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][89] = 127,
[2][0][RTW89_THAILAND][0][89] = 127,
[2][0][RTW89_FCC][1][90] = 8,
- [2][0][RTW89_FCC][2][90] = 127,
[2][0][RTW89_ETSI][1][90] = 127,
[2][0][RTW89_ETSI][0][90] = 127,
[2][0][RTW89_MKK][1][90] = 127,
[2][0][RTW89_MKK][0][90] = 127,
[2][0][RTW89_IC][1][90] = 8,
- [2][0][RTW89_IC][2][90] = 127,
[2][0][RTW89_KCC][1][90] = -2,
[2][0][RTW89_KCC][0][90] = 127,
[2][0][RTW89_ACMA][1][90] = 127,
@@ -58184,13 +55726,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][90] = 127,
[2][0][RTW89_THAILAND][0][90] = 127,
[2][0][RTW89_FCC][1][92] = 8,
- [2][0][RTW89_FCC][2][92] = 127,
[2][0][RTW89_ETSI][1][92] = 127,
[2][0][RTW89_ETSI][0][92] = 127,
[2][0][RTW89_MKK][1][92] = 127,
[2][0][RTW89_MKK][0][92] = 127,
[2][0][RTW89_IC][1][92] = 8,
- [2][0][RTW89_IC][2][92] = 127,
[2][0][RTW89_KCC][1][92] = -2,
[2][0][RTW89_KCC][0][92] = 127,
[2][0][RTW89_ACMA][1][92] = 127,
@@ -58203,13 +55743,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][92] = 127,
[2][0][RTW89_THAILAND][0][92] = 127,
[2][0][RTW89_FCC][1][94] = 8,
- [2][0][RTW89_FCC][2][94] = 127,
[2][0][RTW89_ETSI][1][94] = 127,
[2][0][RTW89_ETSI][0][94] = 127,
[2][0][RTW89_MKK][1][94] = 127,
[2][0][RTW89_MKK][0][94] = 127,
[2][0][RTW89_IC][1][94] = 8,
- [2][0][RTW89_IC][2][94] = 127,
[2][0][RTW89_KCC][1][94] = -2,
[2][0][RTW89_KCC][0][94] = 127,
[2][0][RTW89_ACMA][1][94] = 127,
@@ -58222,13 +55760,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][94] = 127,
[2][0][RTW89_THAILAND][0][94] = 127,
[2][0][RTW89_FCC][1][96] = 8,
- [2][0][RTW89_FCC][2][96] = 127,
[2][0][RTW89_ETSI][1][96] = 127,
[2][0][RTW89_ETSI][0][96] = 127,
[2][0][RTW89_MKK][1][96] = 127,
[2][0][RTW89_MKK][0][96] = 127,
[2][0][RTW89_IC][1][96] = 8,
- [2][0][RTW89_IC][2][96] = 127,
[2][0][RTW89_KCC][1][96] = -2,
[2][0][RTW89_KCC][0][96] = 127,
[2][0][RTW89_ACMA][1][96] = 127,
@@ -58241,13 +55777,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][96] = 127,
[2][0][RTW89_THAILAND][0][96] = 127,
[2][0][RTW89_FCC][1][98] = 8,
- [2][0][RTW89_FCC][2][98] = 127,
[2][0][RTW89_ETSI][1][98] = 127,
[2][0][RTW89_ETSI][0][98] = 127,
[2][0][RTW89_MKK][1][98] = 127,
[2][0][RTW89_MKK][0][98] = 127,
[2][0][RTW89_IC][1][98] = 8,
- [2][0][RTW89_IC][2][98] = 127,
[2][0][RTW89_KCC][1][98] = -2,
[2][0][RTW89_KCC][0][98] = 127,
[2][0][RTW89_ACMA][1][98] = 127,
@@ -58260,13 +55794,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][98] = 127,
[2][0][RTW89_THAILAND][0][98] = 127,
[2][0][RTW89_FCC][1][100] = 8,
- [2][0][RTW89_FCC][2][100] = 127,
[2][0][RTW89_ETSI][1][100] = 127,
[2][0][RTW89_ETSI][0][100] = 127,
[2][0][RTW89_MKK][1][100] = 127,
[2][0][RTW89_MKK][0][100] = 127,
[2][0][RTW89_IC][1][100] = 8,
- [2][0][RTW89_IC][2][100] = 127,
[2][0][RTW89_KCC][1][100] = -2,
[2][0][RTW89_KCC][0][100] = 127,
[2][0][RTW89_ACMA][1][100] = 127,
@@ -58279,13 +55811,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][100] = 127,
[2][0][RTW89_THAILAND][0][100] = 127,
[2][0][RTW89_FCC][1][102] = 8,
- [2][0][RTW89_FCC][2][102] = 127,
[2][0][RTW89_ETSI][1][102] = 127,
[2][0][RTW89_ETSI][0][102] = 127,
[2][0][RTW89_MKK][1][102] = 127,
[2][0][RTW89_MKK][0][102] = 127,
[2][0][RTW89_IC][1][102] = 8,
- [2][0][RTW89_IC][2][102] = 127,
[2][0][RTW89_KCC][1][102] = -2,
[2][0][RTW89_KCC][0][102] = 127,
[2][0][RTW89_ACMA][1][102] = 127,
@@ -58298,13 +55828,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][102] = 127,
[2][0][RTW89_THAILAND][0][102] = 127,
[2][0][RTW89_FCC][1][104] = 8,
- [2][0][RTW89_FCC][2][104] = 127,
[2][0][RTW89_ETSI][1][104] = 127,
[2][0][RTW89_ETSI][0][104] = 127,
[2][0][RTW89_MKK][1][104] = 127,
[2][0][RTW89_MKK][0][104] = 127,
[2][0][RTW89_IC][1][104] = 8,
- [2][0][RTW89_IC][2][104] = 127,
[2][0][RTW89_KCC][1][104] = -2,
[2][0][RTW89_KCC][0][104] = 127,
[2][0][RTW89_ACMA][1][104] = 127,
@@ -58317,13 +55845,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][104] = 127,
[2][0][RTW89_THAILAND][0][104] = 127,
[2][0][RTW89_FCC][1][105] = 8,
- [2][0][RTW89_FCC][2][105] = 127,
[2][0][RTW89_ETSI][1][105] = 127,
[2][0][RTW89_ETSI][0][105] = 127,
[2][0][RTW89_MKK][1][105] = 127,
[2][0][RTW89_MKK][0][105] = 127,
[2][0][RTW89_IC][1][105] = 8,
- [2][0][RTW89_IC][2][105] = 127,
[2][0][RTW89_KCC][1][105] = -2,
[2][0][RTW89_KCC][0][105] = 127,
[2][0][RTW89_ACMA][1][105] = 127,
@@ -58336,13 +55862,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][105] = 127,
[2][0][RTW89_THAILAND][0][105] = 127,
[2][0][RTW89_FCC][1][107] = 10,
- [2][0][RTW89_FCC][2][107] = 127,
[2][0][RTW89_ETSI][1][107] = 127,
[2][0][RTW89_ETSI][0][107] = 127,
[2][0][RTW89_MKK][1][107] = 127,
[2][0][RTW89_MKK][0][107] = 127,
[2][0][RTW89_IC][1][107] = 10,
- [2][0][RTW89_IC][2][107] = 127,
[2][0][RTW89_KCC][1][107] = -2,
[2][0][RTW89_KCC][0][107] = 127,
[2][0][RTW89_ACMA][1][107] = 127,
@@ -58355,13 +55879,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][107] = 127,
[2][0][RTW89_THAILAND][0][107] = 127,
[2][0][RTW89_FCC][1][109] = 12,
- [2][0][RTW89_FCC][2][109] = 127,
[2][0][RTW89_ETSI][1][109] = 127,
[2][0][RTW89_ETSI][0][109] = 127,
[2][0][RTW89_MKK][1][109] = 127,
[2][0][RTW89_MKK][0][109] = 127,
[2][0][RTW89_IC][1][109] = 12,
- [2][0][RTW89_IC][2][109] = 127,
[2][0][RTW89_KCC][1][109] = 127,
[2][0][RTW89_KCC][0][109] = 127,
[2][0][RTW89_ACMA][1][109] = 127,
@@ -58374,13 +55896,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][109] = 127,
[2][0][RTW89_THAILAND][0][109] = 127,
[2][0][RTW89_FCC][1][111] = 127,
- [2][0][RTW89_FCC][2][111] = 127,
[2][0][RTW89_ETSI][1][111] = 127,
[2][0][RTW89_ETSI][0][111] = 127,
[2][0][RTW89_MKK][1][111] = 127,
[2][0][RTW89_MKK][0][111] = 127,
[2][0][RTW89_IC][1][111] = 127,
- [2][0][RTW89_IC][2][111] = 127,
[2][0][RTW89_KCC][1][111] = 127,
[2][0][RTW89_KCC][0][111] = 127,
[2][0][RTW89_ACMA][1][111] = 127,
@@ -58393,13 +55913,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][111] = 127,
[2][0][RTW89_THAILAND][0][111] = 127,
[2][0][RTW89_FCC][1][113] = 127,
- [2][0][RTW89_FCC][2][113] = 127,
[2][0][RTW89_ETSI][1][113] = 127,
[2][0][RTW89_ETSI][0][113] = 127,
[2][0][RTW89_MKK][1][113] = 127,
[2][0][RTW89_MKK][0][113] = 127,
[2][0][RTW89_IC][1][113] = 127,
- [2][0][RTW89_IC][2][113] = 127,
[2][0][RTW89_KCC][1][113] = 127,
[2][0][RTW89_KCC][0][113] = 127,
[2][0][RTW89_ACMA][1][113] = 127,
@@ -58412,13 +55930,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][113] = 127,
[2][0][RTW89_THAILAND][0][113] = 127,
[2][0][RTW89_FCC][1][115] = 127,
- [2][0][RTW89_FCC][2][115] = 127,
[2][0][RTW89_ETSI][1][115] = 127,
[2][0][RTW89_ETSI][0][115] = 127,
[2][0][RTW89_MKK][1][115] = 127,
[2][0][RTW89_MKK][0][115] = 127,
[2][0][RTW89_IC][1][115] = 127,
- [2][0][RTW89_IC][2][115] = 127,
[2][0][RTW89_KCC][1][115] = 127,
[2][0][RTW89_KCC][0][115] = 127,
[2][0][RTW89_ACMA][1][115] = 127,
@@ -58431,13 +55947,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][115] = 127,
[2][0][RTW89_THAILAND][0][115] = 127,
[2][0][RTW89_FCC][1][117] = 127,
- [2][0][RTW89_FCC][2][117] = 127,
[2][0][RTW89_ETSI][1][117] = 127,
[2][0][RTW89_ETSI][0][117] = 127,
[2][0][RTW89_MKK][1][117] = 127,
[2][0][RTW89_MKK][0][117] = 127,
[2][0][RTW89_IC][1][117] = 127,
- [2][0][RTW89_IC][2][117] = 127,
[2][0][RTW89_KCC][1][117] = 127,
[2][0][RTW89_KCC][0][117] = 127,
[2][0][RTW89_ACMA][1][117] = 127,
@@ -58450,13 +55964,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][117] = 127,
[2][0][RTW89_THAILAND][0][117] = 127,
[2][0][RTW89_FCC][1][119] = 127,
- [2][0][RTW89_FCC][2][119] = 127,
[2][0][RTW89_ETSI][1][119] = 127,
[2][0][RTW89_ETSI][0][119] = 127,
[2][0][RTW89_MKK][1][119] = 127,
[2][0][RTW89_MKK][0][119] = 127,
[2][0][RTW89_IC][1][119] = 127,
- [2][0][RTW89_IC][2][119] = 127,
[2][0][RTW89_KCC][1][119] = 127,
[2][0][RTW89_KCC][0][119] = 127,
[2][0][RTW89_ACMA][1][119] = 127,
@@ -58469,13 +55981,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][0][RTW89_THAILAND][1][119] = 127,
[2][0][RTW89_THAILAND][0][119] = 127,
[2][1][RTW89_FCC][1][0] = -16,
- [2][1][RTW89_FCC][2][0] = 54,
[2][1][RTW89_ETSI][1][0] = 44,
[2][1][RTW89_ETSI][0][0] = 6,
[2][1][RTW89_MKK][1][0] = 42,
[2][1][RTW89_MKK][0][0] = 2,
[2][1][RTW89_IC][1][0] = -16,
- [2][1][RTW89_IC][2][0] = 54,
[2][1][RTW89_KCC][1][0] = -14,
[2][1][RTW89_KCC][0][0] = -14,
[2][1][RTW89_ACMA][1][0] = 44,
@@ -58488,13 +55998,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][0] = 28,
[2][1][RTW89_THAILAND][0][0] = -16,
[2][1][RTW89_FCC][1][2] = -16,
- [2][1][RTW89_FCC][2][2] = 54,
[2][1][RTW89_ETSI][1][2] = 44,
[2][1][RTW89_ETSI][0][2] = 6,
[2][1][RTW89_MKK][1][2] = 40,
[2][1][RTW89_MKK][0][2] = 2,
[2][1][RTW89_IC][1][2] = -16,
- [2][1][RTW89_IC][2][2] = 54,
[2][1][RTW89_KCC][1][2] = -14,
[2][1][RTW89_KCC][0][2] = -14,
[2][1][RTW89_ACMA][1][2] = 44,
@@ -58507,13 +56015,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][2] = 28,
[2][1][RTW89_THAILAND][0][2] = -16,
[2][1][RTW89_FCC][1][4] = -16,
- [2][1][RTW89_FCC][2][4] = 54,
[2][1][RTW89_ETSI][1][4] = 44,
[2][1][RTW89_ETSI][0][4] = 6,
[2][1][RTW89_MKK][1][4] = 40,
[2][1][RTW89_MKK][0][4] = 2,
[2][1][RTW89_IC][1][4] = -16,
- [2][1][RTW89_IC][2][4] = 54,
[2][1][RTW89_KCC][1][4] = -14,
[2][1][RTW89_KCC][0][4] = -14,
[2][1][RTW89_ACMA][1][4] = 44,
@@ -58526,13 +56032,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][4] = 28,
[2][1][RTW89_THAILAND][0][4] = -16,
[2][1][RTW89_FCC][1][6] = -16,
- [2][1][RTW89_FCC][2][6] = 54,
[2][1][RTW89_ETSI][1][6] = 44,
[2][1][RTW89_ETSI][0][6] = 6,
[2][1][RTW89_MKK][1][6] = 40,
[2][1][RTW89_MKK][0][6] = 2,
[2][1][RTW89_IC][1][6] = -16,
- [2][1][RTW89_IC][2][6] = 54,
[2][1][RTW89_KCC][1][6] = -14,
[2][1][RTW89_KCC][0][6] = -14,
[2][1][RTW89_ACMA][1][6] = 44,
@@ -58545,13 +56049,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][6] = 28,
[2][1][RTW89_THAILAND][0][6] = -16,
[2][1][RTW89_FCC][1][8] = -16,
- [2][1][RTW89_FCC][2][8] = 54,
[2][1][RTW89_ETSI][1][8] = 44,
[2][1][RTW89_ETSI][0][8] = 6,
[2][1][RTW89_MKK][1][8] = 40,
[2][1][RTW89_MKK][0][8] = 2,
[2][1][RTW89_IC][1][8] = -16,
- [2][1][RTW89_IC][2][8] = 54,
[2][1][RTW89_KCC][1][8] = -14,
[2][1][RTW89_KCC][0][8] = -14,
[2][1][RTW89_ACMA][1][8] = 44,
@@ -58564,13 +56066,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][8] = 28,
[2][1][RTW89_THAILAND][0][8] = -16,
[2][1][RTW89_FCC][1][10] = -16,
- [2][1][RTW89_FCC][2][10] = 54,
[2][1][RTW89_ETSI][1][10] = 44,
[2][1][RTW89_ETSI][0][10] = 6,
[2][1][RTW89_MKK][1][10] = 40,
[2][1][RTW89_MKK][0][10] = 2,
[2][1][RTW89_IC][1][10] = -16,
- [2][1][RTW89_IC][2][10] = 54,
[2][1][RTW89_KCC][1][10] = -14,
[2][1][RTW89_KCC][0][10] = -14,
[2][1][RTW89_ACMA][1][10] = 44,
@@ -58583,13 +56083,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][10] = 28,
[2][1][RTW89_THAILAND][0][10] = -16,
[2][1][RTW89_FCC][1][12] = -16,
- [2][1][RTW89_FCC][2][12] = 54,
[2][1][RTW89_ETSI][1][12] = 44,
[2][1][RTW89_ETSI][0][12] = 6,
[2][1][RTW89_MKK][1][12] = 40,
[2][1][RTW89_MKK][0][12] = 2,
[2][1][RTW89_IC][1][12] = -16,
- [2][1][RTW89_IC][2][12] = 54,
[2][1][RTW89_KCC][1][12] = -14,
[2][1][RTW89_KCC][0][12] = -14,
[2][1][RTW89_ACMA][1][12] = 44,
@@ -58602,13 +56100,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][12] = 28,
[2][1][RTW89_THAILAND][0][12] = -16,
[2][1][RTW89_FCC][1][14] = -16,
- [2][1][RTW89_FCC][2][14] = 54,
[2][1][RTW89_ETSI][1][14] = 44,
[2][1][RTW89_ETSI][0][14] = 6,
[2][1][RTW89_MKK][1][14] = 40,
[2][1][RTW89_MKK][0][14] = 2,
[2][1][RTW89_IC][1][14] = -16,
- [2][1][RTW89_IC][2][14] = 54,
[2][1][RTW89_KCC][1][14] = -14,
[2][1][RTW89_KCC][0][14] = -14,
[2][1][RTW89_ACMA][1][14] = 44,
@@ -58621,13 +56117,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][14] = 28,
[2][1][RTW89_THAILAND][0][14] = -16,
[2][1][RTW89_FCC][1][15] = -16,
- [2][1][RTW89_FCC][2][15] = 54,
[2][1][RTW89_ETSI][1][15] = 44,
[2][1][RTW89_ETSI][0][15] = 6,
[2][1][RTW89_MKK][1][15] = 40,
[2][1][RTW89_MKK][0][15] = 2,
[2][1][RTW89_IC][1][15] = -16,
- [2][1][RTW89_IC][2][15] = 54,
[2][1][RTW89_KCC][1][15] = -14,
[2][1][RTW89_KCC][0][15] = -14,
[2][1][RTW89_ACMA][1][15] = 44,
@@ -58640,13 +56134,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][15] = 28,
[2][1][RTW89_THAILAND][0][15] = -16,
[2][1][RTW89_FCC][1][17] = -16,
- [2][1][RTW89_FCC][2][17] = 54,
[2][1][RTW89_ETSI][1][17] = 44,
[2][1][RTW89_ETSI][0][17] = 6,
[2][1][RTW89_MKK][1][17] = 40,
[2][1][RTW89_MKK][0][17] = 2,
[2][1][RTW89_IC][1][17] = -16,
- [2][1][RTW89_IC][2][17] = 54,
[2][1][RTW89_KCC][1][17] = -14,
[2][1][RTW89_KCC][0][17] = -14,
[2][1][RTW89_ACMA][1][17] = 44,
@@ -58659,13 +56151,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][17] = 28,
[2][1][RTW89_THAILAND][0][17] = -16,
[2][1][RTW89_FCC][1][19] = -16,
- [2][1][RTW89_FCC][2][19] = 54,
[2][1][RTW89_ETSI][1][19] = 44,
[2][1][RTW89_ETSI][0][19] = 6,
[2][1][RTW89_MKK][1][19] = 40,
[2][1][RTW89_MKK][0][19] = 2,
[2][1][RTW89_IC][1][19] = -16,
- [2][1][RTW89_IC][2][19] = 54,
[2][1][RTW89_KCC][1][19] = -14,
[2][1][RTW89_KCC][0][19] = -14,
[2][1][RTW89_ACMA][1][19] = 44,
@@ -58678,13 +56168,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][19] = 28,
[2][1][RTW89_THAILAND][0][19] = -16,
[2][1][RTW89_FCC][1][21] = -16,
- [2][1][RTW89_FCC][2][21] = 54,
[2][1][RTW89_ETSI][1][21] = 44,
[2][1][RTW89_ETSI][0][21] = 6,
[2][1][RTW89_MKK][1][21] = 40,
[2][1][RTW89_MKK][0][21] = 2,
[2][1][RTW89_IC][1][21] = -16,
- [2][1][RTW89_IC][2][21] = 54,
[2][1][RTW89_KCC][1][21] = -14,
[2][1][RTW89_KCC][0][21] = -14,
[2][1][RTW89_ACMA][1][21] = 44,
@@ -58697,13 +56185,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][21] = 28,
[2][1][RTW89_THAILAND][0][21] = -16,
[2][1][RTW89_FCC][1][23] = -16,
- [2][1][RTW89_FCC][2][23] = 54,
[2][1][RTW89_ETSI][1][23] = 44,
[2][1][RTW89_ETSI][0][23] = 6,
[2][1][RTW89_MKK][1][23] = 40,
[2][1][RTW89_MKK][0][23] = 2,
[2][1][RTW89_IC][1][23] = -16,
- [2][1][RTW89_IC][2][23] = 54,
[2][1][RTW89_KCC][1][23] = -14,
[2][1][RTW89_KCC][0][23] = -14,
[2][1][RTW89_ACMA][1][23] = 44,
@@ -58716,13 +56202,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][23] = 30,
[2][1][RTW89_THAILAND][0][23] = -16,
[2][1][RTW89_FCC][1][25] = -16,
- [2][1][RTW89_FCC][2][25] = 54,
[2][1][RTW89_ETSI][1][25] = 44,
[2][1][RTW89_ETSI][0][25] = 6,
[2][1][RTW89_MKK][1][25] = 40,
[2][1][RTW89_MKK][0][25] = 2,
[2][1][RTW89_IC][1][25] = -16,
- [2][1][RTW89_IC][2][25] = 54,
[2][1][RTW89_KCC][1][25] = -14,
[2][1][RTW89_KCC][0][25] = -14,
[2][1][RTW89_ACMA][1][25] = 44,
@@ -58735,13 +56219,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][25] = 28,
[2][1][RTW89_THAILAND][0][25] = -16,
[2][1][RTW89_FCC][1][27] = -16,
- [2][1][RTW89_FCC][2][27] = 54,
[2][1][RTW89_ETSI][1][27] = 44,
[2][1][RTW89_ETSI][0][27] = 6,
[2][1][RTW89_MKK][1][27] = 40,
[2][1][RTW89_MKK][0][27] = 2,
[2][1][RTW89_IC][1][27] = -16,
- [2][1][RTW89_IC][2][27] = 54,
[2][1][RTW89_KCC][1][27] = -14,
[2][1][RTW89_KCC][0][27] = -14,
[2][1][RTW89_ACMA][1][27] = 44,
@@ -58754,13 +56236,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][27] = 28,
[2][1][RTW89_THAILAND][0][27] = -16,
[2][1][RTW89_FCC][1][29] = -16,
- [2][1][RTW89_FCC][2][29] = 54,
[2][1][RTW89_ETSI][1][29] = 44,
[2][1][RTW89_ETSI][0][29] = 6,
[2][1][RTW89_MKK][1][29] = 40,
[2][1][RTW89_MKK][0][29] = 2,
[2][1][RTW89_IC][1][29] = -16,
- [2][1][RTW89_IC][2][29] = 54,
[2][1][RTW89_KCC][1][29] = -14,
[2][1][RTW89_KCC][0][29] = -14,
[2][1][RTW89_ACMA][1][29] = 44,
@@ -58773,13 +56253,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][29] = 28,
[2][1][RTW89_THAILAND][0][29] = -16,
[2][1][RTW89_FCC][1][30] = -16,
- [2][1][RTW89_FCC][2][30] = 54,
[2][1][RTW89_ETSI][1][30] = 44,
[2][1][RTW89_ETSI][0][30] = 6,
[2][1][RTW89_MKK][1][30] = 40,
[2][1][RTW89_MKK][0][30] = 2,
[2][1][RTW89_IC][1][30] = -16,
- [2][1][RTW89_IC][2][30] = 54,
[2][1][RTW89_KCC][1][30] = -14,
[2][1][RTW89_KCC][0][30] = -14,
[2][1][RTW89_ACMA][1][30] = 44,
@@ -58792,13 +56270,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][30] = 28,
[2][1][RTW89_THAILAND][0][30] = -16,
[2][1][RTW89_FCC][1][32] = -16,
- [2][1][RTW89_FCC][2][32] = 54,
[2][1][RTW89_ETSI][1][32] = 44,
[2][1][RTW89_ETSI][0][32] = 6,
[2][1][RTW89_MKK][1][32] = 40,
[2][1][RTW89_MKK][0][32] = 2,
[2][1][RTW89_IC][1][32] = -16,
- [2][1][RTW89_IC][2][32] = 54,
[2][1][RTW89_KCC][1][32] = -14,
[2][1][RTW89_KCC][0][32] = -14,
[2][1][RTW89_ACMA][1][32] = 44,
@@ -58811,13 +56287,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][32] = 28,
[2][1][RTW89_THAILAND][0][32] = -16,
[2][1][RTW89_FCC][1][34] = -16,
- [2][1][RTW89_FCC][2][34] = 54,
[2][1][RTW89_ETSI][1][34] = 44,
[2][1][RTW89_ETSI][0][34] = 6,
[2][1][RTW89_MKK][1][34] = 40,
[2][1][RTW89_MKK][0][34] = 2,
[2][1][RTW89_IC][1][34] = -16,
- [2][1][RTW89_IC][2][34] = 54,
[2][1][RTW89_KCC][1][34] = -14,
[2][1][RTW89_KCC][0][34] = -14,
[2][1][RTW89_ACMA][1][34] = 44,
@@ -58830,13 +56304,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][34] = 28,
[2][1][RTW89_THAILAND][0][34] = -16,
[2][1][RTW89_FCC][1][36] = -16,
- [2][1][RTW89_FCC][2][36] = 54,
[2][1][RTW89_ETSI][1][36] = 44,
[2][1][RTW89_ETSI][0][36] = 6,
[2][1][RTW89_MKK][1][36] = 40,
[2][1][RTW89_MKK][0][36] = 2,
[2][1][RTW89_IC][1][36] = -16,
- [2][1][RTW89_IC][2][36] = 54,
[2][1][RTW89_KCC][1][36] = -14,
[2][1][RTW89_KCC][0][36] = -14,
[2][1][RTW89_ACMA][1][36] = 44,
@@ -58849,13 +56321,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][36] = 28,
[2][1][RTW89_THAILAND][0][36] = -16,
[2][1][RTW89_FCC][1][38] = -16,
- [2][1][RTW89_FCC][2][38] = 54,
[2][1][RTW89_ETSI][1][38] = 44,
[2][1][RTW89_ETSI][0][38] = 6,
[2][1][RTW89_MKK][1][38] = 40,
[2][1][RTW89_MKK][0][38] = 2,
[2][1][RTW89_IC][1][38] = -16,
- [2][1][RTW89_IC][2][38] = 54,
[2][1][RTW89_KCC][1][38] = -14,
[2][1][RTW89_KCC][0][38] = -14,
[2][1][RTW89_ACMA][1][38] = 44,
@@ -58868,13 +56338,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][38] = 28,
[2][1][RTW89_THAILAND][0][38] = -16,
[2][1][RTW89_FCC][1][40] = -16,
- [2][1][RTW89_FCC][2][40] = 54,
[2][1][RTW89_ETSI][1][40] = 44,
[2][1][RTW89_ETSI][0][40] = 6,
[2][1][RTW89_MKK][1][40] = 40,
[2][1][RTW89_MKK][0][40] = 2,
[2][1][RTW89_IC][1][40] = -16,
- [2][1][RTW89_IC][2][40] = 54,
[2][1][RTW89_KCC][1][40] = -14,
[2][1][RTW89_KCC][0][40] = -14,
[2][1][RTW89_ACMA][1][40] = 44,
@@ -58887,13 +56355,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][40] = 28,
[2][1][RTW89_THAILAND][0][40] = -16,
[2][1][RTW89_FCC][1][42] = -16,
- [2][1][RTW89_FCC][2][42] = 54,
[2][1][RTW89_ETSI][1][42] = 44,
[2][1][RTW89_ETSI][0][42] = 6,
[2][1][RTW89_MKK][1][42] = 40,
[2][1][RTW89_MKK][0][42] = 2,
[2][1][RTW89_IC][1][42] = -16,
- [2][1][RTW89_IC][2][42] = 54,
[2][1][RTW89_KCC][1][42] = -14,
[2][1][RTW89_KCC][0][42] = -14,
[2][1][RTW89_ACMA][1][42] = 44,
@@ -58906,13 +56372,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][42] = 28,
[2][1][RTW89_THAILAND][0][42] = -16,
[2][1][RTW89_FCC][1][44] = -16,
- [2][1][RTW89_FCC][2][44] = 54,
[2][1][RTW89_ETSI][1][44] = 44,
[2][1][RTW89_ETSI][0][44] = 6,
[2][1][RTW89_MKK][1][44] = 16,
[2][1][RTW89_MKK][0][44] = 2,
[2][1][RTW89_IC][1][44] = -16,
- [2][1][RTW89_IC][2][44] = 54,
[2][1][RTW89_KCC][1][44] = -14,
[2][1][RTW89_KCC][0][44] = -14,
[2][1][RTW89_ACMA][1][44] = 44,
@@ -58925,13 +56389,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][44] = 28,
[2][1][RTW89_THAILAND][0][44] = -16,
[2][1][RTW89_FCC][1][45] = -16,
- [2][1][RTW89_FCC][2][45] = 127,
[2][1][RTW89_ETSI][1][45] = 127,
[2][1][RTW89_ETSI][0][45] = 127,
[2][1][RTW89_MKK][1][45] = 127,
[2][1][RTW89_MKK][0][45] = 127,
[2][1][RTW89_IC][1][45] = -16,
- [2][1][RTW89_IC][2][45] = 56,
[2][1][RTW89_KCC][1][45] = -14,
[2][1][RTW89_KCC][0][45] = 127,
[2][1][RTW89_ACMA][1][45] = 127,
@@ -58944,13 +56406,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][45] = 127,
[2][1][RTW89_THAILAND][0][45] = 127,
[2][1][RTW89_FCC][1][47] = -16,
- [2][1][RTW89_FCC][2][47] = 127,
[2][1][RTW89_ETSI][1][47] = 127,
[2][1][RTW89_ETSI][0][47] = 127,
[2][1][RTW89_MKK][1][47] = 127,
[2][1][RTW89_MKK][0][47] = 127,
[2][1][RTW89_IC][1][47] = -16,
- [2][1][RTW89_IC][2][47] = 56,
[2][1][RTW89_KCC][1][47] = -14,
[2][1][RTW89_KCC][0][47] = 127,
[2][1][RTW89_ACMA][1][47] = 127,
@@ -58963,13 +56423,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][47] = 127,
[2][1][RTW89_THAILAND][0][47] = 127,
[2][1][RTW89_FCC][1][49] = -16,
- [2][1][RTW89_FCC][2][49] = 127,
[2][1][RTW89_ETSI][1][49] = 127,
[2][1][RTW89_ETSI][0][49] = 127,
[2][1][RTW89_MKK][1][49] = 127,
[2][1][RTW89_MKK][0][49] = 127,
[2][1][RTW89_IC][1][49] = -16,
- [2][1][RTW89_IC][2][49] = 56,
[2][1][RTW89_KCC][1][49] = -14,
[2][1][RTW89_KCC][0][49] = 127,
[2][1][RTW89_ACMA][1][49] = 127,
@@ -58982,13 +56440,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][49] = 127,
[2][1][RTW89_THAILAND][0][49] = 127,
[2][1][RTW89_FCC][1][51] = -16,
- [2][1][RTW89_FCC][2][51] = 127,
[2][1][RTW89_ETSI][1][51] = 127,
[2][1][RTW89_ETSI][0][51] = 127,
[2][1][RTW89_MKK][1][51] = 127,
[2][1][RTW89_MKK][0][51] = 127,
[2][1][RTW89_IC][1][51] = -16,
- [2][1][RTW89_IC][2][51] = 56,
[2][1][RTW89_KCC][1][51] = -14,
[2][1][RTW89_KCC][0][51] = 127,
[2][1][RTW89_ACMA][1][51] = 127,
@@ -59001,13 +56457,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][51] = 127,
[2][1][RTW89_THAILAND][0][51] = 127,
[2][1][RTW89_FCC][1][53] = -16,
- [2][1][RTW89_FCC][2][53] = 127,
[2][1][RTW89_ETSI][1][53] = 127,
[2][1][RTW89_ETSI][0][53] = 127,
[2][1][RTW89_MKK][1][53] = 127,
[2][1][RTW89_MKK][0][53] = 127,
[2][1][RTW89_IC][1][53] = -16,
- [2][1][RTW89_IC][2][53] = 56,
[2][1][RTW89_KCC][1][53] = -14,
[2][1][RTW89_KCC][0][53] = 127,
[2][1][RTW89_ACMA][1][53] = 127,
@@ -59020,13 +56474,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][53] = 127,
[2][1][RTW89_THAILAND][0][53] = 127,
[2][1][RTW89_FCC][1][55] = -16,
- [2][1][RTW89_FCC][2][55] = 54,
[2][1][RTW89_ETSI][1][55] = 127,
[2][1][RTW89_ETSI][0][55] = 127,
[2][1][RTW89_MKK][1][55] = 127,
[2][1][RTW89_MKK][0][55] = 127,
[2][1][RTW89_IC][1][55] = -16,
- [2][1][RTW89_IC][2][55] = 54,
[2][1][RTW89_KCC][1][55] = -14,
[2][1][RTW89_KCC][0][55] = 127,
[2][1][RTW89_ACMA][1][55] = 127,
@@ -59039,13 +56491,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][55] = 127,
[2][1][RTW89_THAILAND][0][55] = 127,
[2][1][RTW89_FCC][1][57] = -16,
- [2][1][RTW89_FCC][2][57] = 54,
[2][1][RTW89_ETSI][1][57] = 127,
[2][1][RTW89_ETSI][0][57] = 127,
[2][1][RTW89_MKK][1][57] = 127,
[2][1][RTW89_MKK][0][57] = 127,
[2][1][RTW89_IC][1][57] = -16,
- [2][1][RTW89_IC][2][57] = 54,
[2][1][RTW89_KCC][1][57] = -14,
[2][1][RTW89_KCC][0][57] = 127,
[2][1][RTW89_ACMA][1][57] = 127,
@@ -59058,13 +56508,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][57] = 127,
[2][1][RTW89_THAILAND][0][57] = 127,
[2][1][RTW89_FCC][1][59] = -16,
- [2][1][RTW89_FCC][2][59] = 54,
[2][1][RTW89_ETSI][1][59] = 127,
[2][1][RTW89_ETSI][0][59] = 127,
[2][1][RTW89_MKK][1][59] = 127,
[2][1][RTW89_MKK][0][59] = 127,
[2][1][RTW89_IC][1][59] = -16,
- [2][1][RTW89_IC][2][59] = 54,
[2][1][RTW89_KCC][1][59] = -14,
[2][1][RTW89_KCC][0][59] = 127,
[2][1][RTW89_ACMA][1][59] = 127,
@@ -59077,13 +56525,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][59] = 127,
[2][1][RTW89_THAILAND][0][59] = 127,
[2][1][RTW89_FCC][1][60] = -16,
- [2][1][RTW89_FCC][2][60] = 54,
[2][1][RTW89_ETSI][1][60] = 127,
[2][1][RTW89_ETSI][0][60] = 127,
[2][1][RTW89_MKK][1][60] = 127,
[2][1][RTW89_MKK][0][60] = 127,
[2][1][RTW89_IC][1][60] = -16,
- [2][1][RTW89_IC][2][60] = 54,
[2][1][RTW89_KCC][1][60] = -14,
[2][1][RTW89_KCC][0][60] = 127,
[2][1][RTW89_ACMA][1][60] = 127,
@@ -59096,13 +56542,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][60] = 127,
[2][1][RTW89_THAILAND][0][60] = 127,
[2][1][RTW89_FCC][1][62] = -16,
- [2][1][RTW89_FCC][2][62] = 54,
[2][1][RTW89_ETSI][1][62] = 127,
[2][1][RTW89_ETSI][0][62] = 127,
[2][1][RTW89_MKK][1][62] = 127,
[2][1][RTW89_MKK][0][62] = 127,
[2][1][RTW89_IC][1][62] = -16,
- [2][1][RTW89_IC][2][62] = 54,
[2][1][RTW89_KCC][1][62] = -14,
[2][1][RTW89_KCC][0][62] = 127,
[2][1][RTW89_ACMA][1][62] = 127,
@@ -59115,13 +56559,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][62] = 127,
[2][1][RTW89_THAILAND][0][62] = 127,
[2][1][RTW89_FCC][1][64] = -16,
- [2][1][RTW89_FCC][2][64] = 54,
[2][1][RTW89_ETSI][1][64] = 127,
[2][1][RTW89_ETSI][0][64] = 127,
[2][1][RTW89_MKK][1][64] = 127,
[2][1][RTW89_MKK][0][64] = 127,
[2][1][RTW89_IC][1][64] = -16,
- [2][1][RTW89_IC][2][64] = 54,
[2][1][RTW89_KCC][1][64] = -14,
[2][1][RTW89_KCC][0][64] = 127,
[2][1][RTW89_ACMA][1][64] = 127,
@@ -59134,13 +56576,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][64] = 127,
[2][1][RTW89_THAILAND][0][64] = 127,
[2][1][RTW89_FCC][1][66] = -16,
- [2][1][RTW89_FCC][2][66] = 54,
[2][1][RTW89_ETSI][1][66] = 127,
[2][1][RTW89_ETSI][0][66] = 127,
[2][1][RTW89_MKK][1][66] = 127,
[2][1][RTW89_MKK][0][66] = 127,
[2][1][RTW89_IC][1][66] = -16,
- [2][1][RTW89_IC][2][66] = 54,
[2][1][RTW89_KCC][1][66] = -14,
[2][1][RTW89_KCC][0][66] = 127,
[2][1][RTW89_ACMA][1][66] = 127,
@@ -59153,13 +56593,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][66] = 127,
[2][1][RTW89_THAILAND][0][66] = 127,
[2][1][RTW89_FCC][1][68] = -16,
- [2][1][RTW89_FCC][2][68] = 54,
[2][1][RTW89_ETSI][1][68] = 127,
[2][1][RTW89_ETSI][0][68] = 127,
[2][1][RTW89_MKK][1][68] = 127,
[2][1][RTW89_MKK][0][68] = 127,
[2][1][RTW89_IC][1][68] = -16,
- [2][1][RTW89_IC][2][68] = 54,
[2][1][RTW89_KCC][1][68] = -14,
[2][1][RTW89_KCC][0][68] = 127,
[2][1][RTW89_ACMA][1][68] = 127,
@@ -59172,13 +56610,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][68] = 127,
[2][1][RTW89_THAILAND][0][68] = 127,
[2][1][RTW89_FCC][1][70] = -16,
- [2][1][RTW89_FCC][2][70] = 56,
[2][1][RTW89_ETSI][1][70] = 127,
[2][1][RTW89_ETSI][0][70] = 127,
[2][1][RTW89_MKK][1][70] = 127,
[2][1][RTW89_MKK][0][70] = 127,
[2][1][RTW89_IC][1][70] = -16,
- [2][1][RTW89_IC][2][70] = 56,
[2][1][RTW89_KCC][1][70] = -14,
[2][1][RTW89_KCC][0][70] = 127,
[2][1][RTW89_ACMA][1][70] = 127,
@@ -59191,13 +56627,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][70] = 127,
[2][1][RTW89_THAILAND][0][70] = 127,
[2][1][RTW89_FCC][1][72] = -16,
- [2][1][RTW89_FCC][2][72] = 56,
[2][1][RTW89_ETSI][1][72] = 127,
[2][1][RTW89_ETSI][0][72] = 127,
[2][1][RTW89_MKK][1][72] = 127,
[2][1][RTW89_MKK][0][72] = 127,
[2][1][RTW89_IC][1][72] = -16,
- [2][1][RTW89_IC][2][72] = 56,
[2][1][RTW89_KCC][1][72] = -14,
[2][1][RTW89_KCC][0][72] = 127,
[2][1][RTW89_ACMA][1][72] = 127,
@@ -59210,13 +56644,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][72] = 127,
[2][1][RTW89_THAILAND][0][72] = 127,
[2][1][RTW89_FCC][1][74] = -16,
- [2][1][RTW89_FCC][2][74] = 56,
[2][1][RTW89_ETSI][1][74] = 127,
[2][1][RTW89_ETSI][0][74] = 127,
[2][1][RTW89_MKK][1][74] = 127,
[2][1][RTW89_MKK][0][74] = 127,
[2][1][RTW89_IC][1][74] = -16,
- [2][1][RTW89_IC][2][74] = 56,
[2][1][RTW89_KCC][1][74] = -14,
[2][1][RTW89_KCC][0][74] = 127,
[2][1][RTW89_ACMA][1][74] = 127,
@@ -59229,13 +56661,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][74] = 127,
[2][1][RTW89_THAILAND][0][74] = 127,
[2][1][RTW89_FCC][1][75] = -16,
- [2][1][RTW89_FCC][2][75] = 56,
[2][1][RTW89_ETSI][1][75] = 127,
[2][1][RTW89_ETSI][0][75] = 127,
[2][1][RTW89_MKK][1][75] = 127,
[2][1][RTW89_MKK][0][75] = 127,
[2][1][RTW89_IC][1][75] = -16,
- [2][1][RTW89_IC][2][75] = 56,
[2][1][RTW89_KCC][1][75] = -14,
[2][1][RTW89_KCC][0][75] = 127,
[2][1][RTW89_ACMA][1][75] = 127,
@@ -59248,13 +56678,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][75] = 127,
[2][1][RTW89_THAILAND][0][75] = 127,
[2][1][RTW89_FCC][1][77] = -16,
- [2][1][RTW89_FCC][2][77] = 56,
[2][1][RTW89_ETSI][1][77] = 127,
[2][1][RTW89_ETSI][0][77] = 127,
[2][1][RTW89_MKK][1][77] = 127,
[2][1][RTW89_MKK][0][77] = 127,
[2][1][RTW89_IC][1][77] = -16,
- [2][1][RTW89_IC][2][77] = 56,
[2][1][RTW89_KCC][1][77] = -14,
[2][1][RTW89_KCC][0][77] = 127,
[2][1][RTW89_ACMA][1][77] = 127,
@@ -59267,13 +56695,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][77] = 127,
[2][1][RTW89_THAILAND][0][77] = 127,
[2][1][RTW89_FCC][1][79] = -16,
- [2][1][RTW89_FCC][2][79] = 56,
[2][1][RTW89_ETSI][1][79] = 127,
[2][1][RTW89_ETSI][0][79] = 127,
[2][1][RTW89_MKK][1][79] = 127,
[2][1][RTW89_MKK][0][79] = 127,
[2][1][RTW89_IC][1][79] = -16,
- [2][1][RTW89_IC][2][79] = 56,
[2][1][RTW89_KCC][1][79] = -14,
[2][1][RTW89_KCC][0][79] = 127,
[2][1][RTW89_ACMA][1][79] = 127,
@@ -59286,13 +56712,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][79] = 127,
[2][1][RTW89_THAILAND][0][79] = 127,
[2][1][RTW89_FCC][1][81] = -16,
- [2][1][RTW89_FCC][2][81] = 56,
[2][1][RTW89_ETSI][1][81] = 127,
[2][1][RTW89_ETSI][0][81] = 127,
[2][1][RTW89_MKK][1][81] = 127,
[2][1][RTW89_MKK][0][81] = 127,
[2][1][RTW89_IC][1][81] = -16,
- [2][1][RTW89_IC][2][81] = 56,
[2][1][RTW89_KCC][1][81] = -14,
[2][1][RTW89_KCC][0][81] = 127,
[2][1][RTW89_ACMA][1][81] = 127,
@@ -59305,13 +56729,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][81] = 127,
[2][1][RTW89_THAILAND][0][81] = 127,
[2][1][RTW89_FCC][1][83] = -16,
- [2][1][RTW89_FCC][2][83] = 56,
[2][1][RTW89_ETSI][1][83] = 127,
[2][1][RTW89_ETSI][0][83] = 127,
[2][1][RTW89_MKK][1][83] = 127,
[2][1][RTW89_MKK][0][83] = 127,
[2][1][RTW89_IC][1][83] = -16,
- [2][1][RTW89_IC][2][83] = 56,
[2][1][RTW89_KCC][1][83] = -14,
[2][1][RTW89_KCC][0][83] = 127,
[2][1][RTW89_ACMA][1][83] = 127,
@@ -59324,13 +56746,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][83] = 127,
[2][1][RTW89_THAILAND][0][83] = 127,
[2][1][RTW89_FCC][1][85] = -18,
- [2][1][RTW89_FCC][2][85] = 56,
[2][1][RTW89_ETSI][1][85] = 127,
[2][1][RTW89_ETSI][0][85] = 127,
[2][1][RTW89_MKK][1][85] = 127,
[2][1][RTW89_MKK][0][85] = 127,
[2][1][RTW89_IC][1][85] = -18,
- [2][1][RTW89_IC][2][85] = 56,
[2][1][RTW89_KCC][1][85] = -14,
[2][1][RTW89_KCC][0][85] = 127,
[2][1][RTW89_ACMA][1][85] = 127,
@@ -59343,13 +56763,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][85] = 127,
[2][1][RTW89_THAILAND][0][85] = 127,
[2][1][RTW89_FCC][1][87] = -16,
- [2][1][RTW89_FCC][2][87] = 127,
[2][1][RTW89_ETSI][1][87] = 127,
[2][1][RTW89_ETSI][0][87] = 127,
[2][1][RTW89_MKK][1][87] = 127,
[2][1][RTW89_MKK][0][87] = 127,
[2][1][RTW89_IC][1][87] = -16,
- [2][1][RTW89_IC][2][87] = 127,
[2][1][RTW89_KCC][1][87] = -14,
[2][1][RTW89_KCC][0][87] = 127,
[2][1][RTW89_ACMA][1][87] = 127,
@@ -59362,13 +56780,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][87] = 127,
[2][1][RTW89_THAILAND][0][87] = 127,
[2][1][RTW89_FCC][1][89] = -16,
- [2][1][RTW89_FCC][2][89] = 127,
[2][1][RTW89_ETSI][1][89] = 127,
[2][1][RTW89_ETSI][0][89] = 127,
[2][1][RTW89_MKK][1][89] = 127,
[2][1][RTW89_MKK][0][89] = 127,
[2][1][RTW89_IC][1][89] = -16,
- [2][1][RTW89_IC][2][89] = 127,
[2][1][RTW89_KCC][1][89] = -14,
[2][1][RTW89_KCC][0][89] = 127,
[2][1][RTW89_ACMA][1][89] = 127,
@@ -59381,13 +56797,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][89] = 127,
[2][1][RTW89_THAILAND][0][89] = 127,
[2][1][RTW89_FCC][1][90] = -16,
- [2][1][RTW89_FCC][2][90] = 127,
[2][1][RTW89_ETSI][1][90] = 127,
[2][1][RTW89_ETSI][0][90] = 127,
[2][1][RTW89_MKK][1][90] = 127,
[2][1][RTW89_MKK][0][90] = 127,
[2][1][RTW89_IC][1][90] = -16,
- [2][1][RTW89_IC][2][90] = 127,
[2][1][RTW89_KCC][1][90] = -14,
[2][1][RTW89_KCC][0][90] = 127,
[2][1][RTW89_ACMA][1][90] = 127,
@@ -59400,13 +56814,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][90] = 127,
[2][1][RTW89_THAILAND][0][90] = 127,
[2][1][RTW89_FCC][1][92] = -16,
- [2][1][RTW89_FCC][2][92] = 127,
[2][1][RTW89_ETSI][1][92] = 127,
[2][1][RTW89_ETSI][0][92] = 127,
[2][1][RTW89_MKK][1][92] = 127,
[2][1][RTW89_MKK][0][92] = 127,
[2][1][RTW89_IC][1][92] = -16,
- [2][1][RTW89_IC][2][92] = 127,
[2][1][RTW89_KCC][1][92] = -14,
[2][1][RTW89_KCC][0][92] = 127,
[2][1][RTW89_ACMA][1][92] = 127,
@@ -59419,13 +56831,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][92] = 127,
[2][1][RTW89_THAILAND][0][92] = 127,
[2][1][RTW89_FCC][1][94] = -16,
- [2][1][RTW89_FCC][2][94] = 127,
[2][1][RTW89_ETSI][1][94] = 127,
[2][1][RTW89_ETSI][0][94] = 127,
[2][1][RTW89_MKK][1][94] = 127,
[2][1][RTW89_MKK][0][94] = 127,
[2][1][RTW89_IC][1][94] = -16,
- [2][1][RTW89_IC][2][94] = 127,
[2][1][RTW89_KCC][1][94] = -14,
[2][1][RTW89_KCC][0][94] = 127,
[2][1][RTW89_ACMA][1][94] = 127,
@@ -59438,13 +56848,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][94] = 127,
[2][1][RTW89_THAILAND][0][94] = 127,
[2][1][RTW89_FCC][1][96] = -16,
- [2][1][RTW89_FCC][2][96] = 127,
[2][1][RTW89_ETSI][1][96] = 127,
[2][1][RTW89_ETSI][0][96] = 127,
[2][1][RTW89_MKK][1][96] = 127,
[2][1][RTW89_MKK][0][96] = 127,
[2][1][RTW89_IC][1][96] = -16,
- [2][1][RTW89_IC][2][96] = 127,
[2][1][RTW89_KCC][1][96] = -14,
[2][1][RTW89_KCC][0][96] = 127,
[2][1][RTW89_ACMA][1][96] = 127,
@@ -59457,13 +56865,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][96] = 127,
[2][1][RTW89_THAILAND][0][96] = 127,
[2][1][RTW89_FCC][1][98] = -16,
- [2][1][RTW89_FCC][2][98] = 127,
[2][1][RTW89_ETSI][1][98] = 127,
[2][1][RTW89_ETSI][0][98] = 127,
[2][1][RTW89_MKK][1][98] = 127,
[2][1][RTW89_MKK][0][98] = 127,
[2][1][RTW89_IC][1][98] = -16,
- [2][1][RTW89_IC][2][98] = 127,
[2][1][RTW89_KCC][1][98] = -14,
[2][1][RTW89_KCC][0][98] = 127,
[2][1][RTW89_ACMA][1][98] = 127,
@@ -59476,13 +56882,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][98] = 127,
[2][1][RTW89_THAILAND][0][98] = 127,
[2][1][RTW89_FCC][1][100] = -16,
- [2][1][RTW89_FCC][2][100] = 127,
[2][1][RTW89_ETSI][1][100] = 127,
[2][1][RTW89_ETSI][0][100] = 127,
[2][1][RTW89_MKK][1][100] = 127,
[2][1][RTW89_MKK][0][100] = 127,
[2][1][RTW89_IC][1][100] = -16,
- [2][1][RTW89_IC][2][100] = 127,
[2][1][RTW89_KCC][1][100] = -14,
[2][1][RTW89_KCC][0][100] = 127,
[2][1][RTW89_ACMA][1][100] = 127,
@@ -59495,13 +56899,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][100] = 127,
[2][1][RTW89_THAILAND][0][100] = 127,
[2][1][RTW89_FCC][1][102] = -16,
- [2][1][RTW89_FCC][2][102] = 127,
[2][1][RTW89_ETSI][1][102] = 127,
[2][1][RTW89_ETSI][0][102] = 127,
[2][1][RTW89_MKK][1][102] = 127,
[2][1][RTW89_MKK][0][102] = 127,
[2][1][RTW89_IC][1][102] = -16,
- [2][1][RTW89_IC][2][102] = 127,
[2][1][RTW89_KCC][1][102] = -14,
[2][1][RTW89_KCC][0][102] = 127,
[2][1][RTW89_ACMA][1][102] = 127,
@@ -59514,13 +56916,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][102] = 127,
[2][1][RTW89_THAILAND][0][102] = 127,
[2][1][RTW89_FCC][1][104] = -16,
- [2][1][RTW89_FCC][2][104] = 127,
[2][1][RTW89_ETSI][1][104] = 127,
[2][1][RTW89_ETSI][0][104] = 127,
[2][1][RTW89_MKK][1][104] = 127,
[2][1][RTW89_MKK][0][104] = 127,
[2][1][RTW89_IC][1][104] = -16,
- [2][1][RTW89_IC][2][104] = 127,
[2][1][RTW89_KCC][1][104] = -14,
[2][1][RTW89_KCC][0][104] = 127,
[2][1][RTW89_ACMA][1][104] = 127,
@@ -59533,13 +56933,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][104] = 127,
[2][1][RTW89_THAILAND][0][104] = 127,
[2][1][RTW89_FCC][1][105] = -16,
- [2][1][RTW89_FCC][2][105] = 127,
[2][1][RTW89_ETSI][1][105] = 127,
[2][1][RTW89_ETSI][0][105] = 127,
[2][1][RTW89_MKK][1][105] = 127,
[2][1][RTW89_MKK][0][105] = 127,
[2][1][RTW89_IC][1][105] = -16,
- [2][1][RTW89_IC][2][105] = 127,
[2][1][RTW89_KCC][1][105] = -14,
[2][1][RTW89_KCC][0][105] = 127,
[2][1][RTW89_ACMA][1][105] = 127,
@@ -59552,13 +56950,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][105] = 127,
[2][1][RTW89_THAILAND][0][105] = 127,
[2][1][RTW89_FCC][1][107] = -12,
- [2][1][RTW89_FCC][2][107] = 127,
[2][1][RTW89_ETSI][1][107] = 127,
[2][1][RTW89_ETSI][0][107] = 127,
[2][1][RTW89_MKK][1][107] = 127,
[2][1][RTW89_MKK][0][107] = 127,
[2][1][RTW89_IC][1][107] = -12,
- [2][1][RTW89_IC][2][107] = 127,
[2][1][RTW89_KCC][1][107] = -14,
[2][1][RTW89_KCC][0][107] = 127,
[2][1][RTW89_ACMA][1][107] = 127,
@@ -59571,13 +56967,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][107] = 127,
[2][1][RTW89_THAILAND][0][107] = 127,
[2][1][RTW89_FCC][1][109] = -10,
- [2][1][RTW89_FCC][2][109] = 127,
[2][1][RTW89_ETSI][1][109] = 127,
[2][1][RTW89_ETSI][0][109] = 127,
[2][1][RTW89_MKK][1][109] = 127,
[2][1][RTW89_MKK][0][109] = 127,
[2][1][RTW89_IC][1][109] = -10,
- [2][1][RTW89_IC][2][109] = 127,
[2][1][RTW89_KCC][1][109] = 127,
[2][1][RTW89_KCC][0][109] = 127,
[2][1][RTW89_ACMA][1][109] = 127,
@@ -59590,13 +56984,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][109] = 127,
[2][1][RTW89_THAILAND][0][109] = 127,
[2][1][RTW89_FCC][1][111] = 127,
- [2][1][RTW89_FCC][2][111] = 127,
[2][1][RTW89_ETSI][1][111] = 127,
[2][1][RTW89_ETSI][0][111] = 127,
[2][1][RTW89_MKK][1][111] = 127,
[2][1][RTW89_MKK][0][111] = 127,
[2][1][RTW89_IC][1][111] = 127,
- [2][1][RTW89_IC][2][111] = 127,
[2][1][RTW89_KCC][1][111] = 127,
[2][1][RTW89_KCC][0][111] = 127,
[2][1][RTW89_ACMA][1][111] = 127,
@@ -59609,13 +57001,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][111] = 127,
[2][1][RTW89_THAILAND][0][111] = 127,
[2][1][RTW89_FCC][1][113] = 127,
- [2][1][RTW89_FCC][2][113] = 127,
[2][1][RTW89_ETSI][1][113] = 127,
[2][1][RTW89_ETSI][0][113] = 127,
[2][1][RTW89_MKK][1][113] = 127,
[2][1][RTW89_MKK][0][113] = 127,
[2][1][RTW89_IC][1][113] = 127,
- [2][1][RTW89_IC][2][113] = 127,
[2][1][RTW89_KCC][1][113] = 127,
[2][1][RTW89_KCC][0][113] = 127,
[2][1][RTW89_ACMA][1][113] = 127,
@@ -59628,13 +57018,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][113] = 127,
[2][1][RTW89_THAILAND][0][113] = 127,
[2][1][RTW89_FCC][1][115] = 127,
- [2][1][RTW89_FCC][2][115] = 127,
[2][1][RTW89_ETSI][1][115] = 127,
[2][1][RTW89_ETSI][0][115] = 127,
[2][1][RTW89_MKK][1][115] = 127,
[2][1][RTW89_MKK][0][115] = 127,
[2][1][RTW89_IC][1][115] = 127,
- [2][1][RTW89_IC][2][115] = 127,
[2][1][RTW89_KCC][1][115] = 127,
[2][1][RTW89_KCC][0][115] = 127,
[2][1][RTW89_ACMA][1][115] = 127,
@@ -59647,13 +57035,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][115] = 127,
[2][1][RTW89_THAILAND][0][115] = 127,
[2][1][RTW89_FCC][1][117] = 127,
- [2][1][RTW89_FCC][2][117] = 127,
[2][1][RTW89_ETSI][1][117] = 127,
[2][1][RTW89_ETSI][0][117] = 127,
[2][1][RTW89_MKK][1][117] = 127,
[2][1][RTW89_MKK][0][117] = 127,
[2][1][RTW89_IC][1][117] = 127,
- [2][1][RTW89_IC][2][117] = 127,
[2][1][RTW89_KCC][1][117] = 127,
[2][1][RTW89_KCC][0][117] = 127,
[2][1][RTW89_ACMA][1][117] = 127,
@@ -59666,13 +57052,11 @@ const s8 rtw89_8852c_txpwr_lmt_ru_6g[RTW89_RU_NUM][RTW89_NTX_NUM]
[2][1][RTW89_THAILAND][1][117] = 127,
[2][1][RTW89_THAILAND][0][117] = 127,
[2][1][RTW89_FCC][1][119] = 127,
- [2][1][RTW89_FCC][2][119] = 127,
[2][1][RTW89_ETSI][1][119] = 127,
[2][1][RTW89_ETSI][0][119] = 127,
[2][1][RTW89_MKK][1][119] = 127,
[2][1][RTW89_MKK][0][119] = 127,
[2][1][RTW89_IC][1][119] = 127,
- [2][1][RTW89_IC][2][119] = 127,
[2][1][RTW89_KCC][1][119] = 127,
[2][1][RTW89_KCC][0][119] = 127,
[2][1][RTW89_ACMA][1][119] = 127,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
index 583ea673a4f5..8aaad7d58c0d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c
@@ -55,6 +55,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.rpwm_addr = R_AX_PCIE_HRPWM_V1,
.cpwm_addr = R_AX_PCIE_CRPWM,
.mit_addr = R_AX_INT_MIT_RX_V1,
+ .wp_sel_addr = R_AX_WP_ADDR_H_SEL0_3,
.tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = &rtw8852c_bd_idx_addr_low_power,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1,
@@ -68,8 +69,31 @@ static const struct rtw89_pci_info rtw8852c_pci_info = {
.recognize_intrs = rtw89_pci_recognize_intrs_v1,
};
+static const struct dmi_system_id rtw8852c_pci_quirks[] = {
+ {
+ .ident = "Dell Inc. Vostro 16 5640",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 16 5640"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "0CA0"),
+ },
+ .driver_data = (void *)RTW89_QUIRK_PCI_BER,
+ },
+ {
+ .ident = "Dell Inc. Inspiron 16 5640",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5640"),
+ DMI_MATCH(DMI_PRODUCT_SKU, "0C9F"),
+ },
+ .driver_data = (void *)RTW89_QUIRK_PCI_BER,
+ },
+ {},
+};
+
static const struct rtw89_driver_info rtw89_8852ce_info = {
.chip = &rtw8852c_chip_info,
+ .quirks = rtw8852c_pci_quirks,
.bus = {
.pci = &rtw8852c_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
index 708132d5be2a..2af568a3264d 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c
@@ -85,6 +85,10 @@ static const u32 rtw8922a_c2h_regs[RTW89_H2CREG_MAX] = {
R_BE_C2HREG_DATA3
};
+static const u32 rtw8922a_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = {
+ R_AX_C2HREG_DATA3_V1 + 3, R_BE_DBG_WOW,
+};
+
static const struct rtw89_page_regs rtw8922a_page_regs = {
.hci_fc_ctrl = R_BE_HCI_FC_CTRL,
.ch_page_ctrl = R_BE_CH_PAGE_CTRL,
@@ -2126,7 +2130,7 @@ static void rtw8922a_bb_cfg_txrx_path(struct rtw89_dev *rtwdev)
rtw8922a_hal_reset(rtwdev, RTW89_PHY_0, RTW89_MAC_0, band, &tx_en0, false);
if (rtwdev->dbcc_en)
rtw8922a_hal_reset(rtwdev, RTW89_PHY_1, RTW89_MAC_1, band,
- &tx_en0, false);
+ &tx_en1, false);
}
static u8 rtw8922a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
@@ -2257,6 +2261,138 @@ static void rtw8922a_btc_init_cfg(struct rtw89_dev *rtwdev)
btc->cx.wl.status.map.init_ok = true;
}
+static void
+rtw8922a_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val)
+{
+ u16 ctrl_all_time = u32_get_bits(txpwr_val, GENMASK(15, 0));
+ u16 ctrl_gnt_bt = u32_get_bits(txpwr_val, GENMASK(31, 16));
+
+ switch (ctrl_all_time) {
+ case 0xffff:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_RATE_CTRL,
+ B_BE_FORCE_PWR_BY_RATE_EN, 0x0);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_RATE_CTRL,
+ B_BE_FORCE_PWR_BY_RATE_VAL, 0x0);
+ break;
+ default:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_RATE_CTRL,
+ B_BE_FORCE_PWR_BY_RATE_VAL, ctrl_all_time);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_RATE_CTRL,
+ B_BE_FORCE_PWR_BY_RATE_EN, 0x1);
+ break;
+ }
+
+ switch (ctrl_gnt_bt) {
+ case 0xffff:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_REG_CTRL,
+ B_BE_PWR_BT_EN, 0x0);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_COEX_CTRL,
+ B_BE_PWR_BT_VAL, 0x0);
+ break;
+ default:
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_COEX_CTRL,
+ B_BE_PWR_BT_VAL, ctrl_gnt_bt);
+ rtw89_mac_txpwr_write32_mask(rtwdev, RTW89_PHY_0, R_BE_PWR_REG_CTRL,
+ B_BE_PWR_BT_EN, 0x1);
+ break;
+ }
+}
+
+static
+s8 rtw8922a_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
+{
+ return clamp_t(s8, val, -100, 0) + 100;
+}
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8922a_rf_ul[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 1, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {6, 1, 0, 7},
+ {13, 1, 0, 7},
+ {13, 1, 0, 7}
+};
+
+static const struct rtw89_btc_rf_trx_para rtw89_btc_8922a_rf_dl[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 1, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {255, 1, 0, 7},
+ {255, 1, 0, 7},
+ {255, 1, 0, 7}
+};
+
+static const u8 rtw89_btc_8922a_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {60, 50, 40, 30};
+static const u8 rtw89_btc_8922a_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {50, 40, 30, 20};
+
+static const struct rtw89_btc_fbtc_mreg rtw89_btc_8922a_mon_reg[] = {
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe300),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe320),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe324),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe328),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe32c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe330),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe334),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe338),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe344),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe348),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe34c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xe350),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0x11a2c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0x11a50),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x980),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x660),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x1660),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x418c),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x518c),
+};
+
+static
+void rtw8922a_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
+{
+ /* Feature move to firmware */
+}
+
+static
+void rtw8922a_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
+{
+ if (!state) {
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x0c110);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x01018);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x00000);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c110);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x01018);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000);
+ } else {
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x0c110);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x09018);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x00000);
+
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD1, RFREG_MASK, 0x0c110);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWD0, RFREG_MASK, 0x09018);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_LUTWE, RFREG_MASK, 0x00000);
+ }
+}
+
+static void rtw8922a_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+}
+
static void rtw8922a_fill_freq_with_ppdu(struct rtw89_dev *rtwdev,
struct rtw89_rx_phy_ppdu *phy_ppdu,
struct ieee80211_rx_status *status)
@@ -2367,6 +2503,13 @@ static const struct rtw89_chip_ops rtw8922a_chip_ops = {
.btc_set_rfe = rtw8922a_btc_set_rfe,
.btc_init_cfg = rtw8922a_btc_init_cfg,
+ .btc_set_wl_pri = NULL,
+ .btc_set_wl_txpwr_ctrl = rtw8922a_btc_set_wl_txpwr_ctrl,
+ .btc_get_bt_rssi = rtw8922a_btc_get_bt_rssi,
+ .btc_update_bt_cnt = rtw8922a_btc_update_bt_cnt,
+ .btc_wl_s1_standby = rtw8922a_btc_wl_s1_standby,
+ .btc_set_wl_rx_gain = rtw8922a_btc_set_wl_rx_gain,
+ .btc_set_policy = rtw89_btc_set_policy_v1,
};
const struct rtw89_chip_info rtw8922a_chip_info = {
@@ -2405,7 +2548,9 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.dig_table = NULL,
.dig_regs = &rtw8922a_dig_regs,
.tssi_dbw_table = NULL,
+ .support_macid_num = 32,
.support_chanctx_num = 2,
+ .support_rnr = true,
.support_bands = BIT(NL80211_BAND_2GHZ) |
BIT(NL80211_BAND_5GHZ) |
BIT(NL80211_BAND_6GHZ),
@@ -2436,7 +2581,22 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.efuse_blocks = rtw8922a_efuse_blocks,
.phycap_addr = 0x1700,
.phycap_size = 0x38,
-
+ .para_ver = 0xf,
+ .wlcx_desired = 0x07110000,
+ .btcx_desired = 0x7,
+ .scbd = 0x1,
+ .mailbox = 0x1,
+
+ .afh_guard_ch = 6,
+ .wl_rssi_thres = rtw89_btc_8922a_wl_rssi_thres,
+ .bt_rssi_thres = rtw89_btc_8922a_bt_rssi_thres,
+ .rssi_tol = 2,
+ .mon_reg_num = ARRAY_SIZE(rtw89_btc_8922a_mon_reg),
+ .mon_reg = rtw89_btc_8922a_mon_reg,
+ .rf_para_ulink_num = ARRAY_SIZE(rtw89_btc_8922a_rf_ul),
+ .rf_para_ulink = rtw89_btc_8922a_rf_ul,
+ .rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8922a_rf_dl),
+ .rf_para_dlink = rtw89_btc_8922a_rf_dl,
.ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
BIT(RTW89_PS_MODE_CLK_GATED) |
BIT(RTW89_PS_MODE_PWR_GATED),
@@ -2453,7 +2613,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = {
.c2h_counter_reg = {R_BE_UDM1 + 1, B_BE_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8},
.c2h_regs = rtw8922a_c2h_regs,
.page_regs = &rtw8922a_page_regs,
- .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3,
+ .wow_reason_reg = rtw8922a_wow_wakeup_regs,
.cfo_src_fd = true,
.cfo_hw_comp = true,
.dcfo_comp = NULL,
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
index 2a371829268c..0ebcb06ae848 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c
@@ -255,6 +255,7 @@ static void rtw8922a_chlk_ktbl_sel(struct rtw89_dev *rtwdev, u8 kpath, u8 idx)
static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
{
struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc;
+ struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V1] = {};
enum rtw89_sub_entity_idx sub_entity_idx;
const struct rtw89_chan *chan;
enum rtw89_entity_mode mode;
@@ -265,16 +266,28 @@ static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev)
switch (mode) {
case RTW89_ENTITY_MODE_MCC_PREPARE:
sub_entity_idx = RTW89_SUB_ENTITY_1;
- tbl_sel = 1;
break;
default:
sub_entity_idx = RTW89_SUB_ENTITY_0;
- tbl_sel = 0;
break;
}
chan = rtw89_chan_get(rtwdev, sub_entity_idx);
+ for (tbl_sel = 0; tbl_sel < ARRAY_SIZE(desc); tbl_sel++) {
+ struct rtw89_rfk_chan_desc *p = &desc[tbl_sel];
+
+ p->ch = rfk_mcc->ch[tbl_sel];
+
+ p->has_band = true;
+ p->band = rfk_mcc->band[tbl_sel];
+
+ p->has_bw = true;
+ p->bw = rfk_mcc->bw[tbl_sel];
+ }
+
+ tbl_sel = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan);
+
rfk_mcc->ch[tbl_sel] = chan->channel;
rfk_mcc->band[tbl_sel] = chan->band_type;
rfk_mcc->bw[tbl_sel] = chan->band_width;
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
index 4981b657bd7b..47f855a7a268 100644
--- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
+++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c
@@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
.rpwm_addr = R_BE_PCIE_HRPWM,
.cpwm_addr = R_BE_PCIE_CRPWM,
.mit_addr = R_BE_PCIE_MIT_CH_EN,
+ .wp_sel_addr = R_BE_WP_ADDR_H_SEL0_3_V1,
.tx_dma_ch_mask = 0,
.bd_idx_addr_low_power = NULL,
.dma_addr_set = &rtw89_pci_ch_dma_addr_set_be,
@@ -61,6 +62,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = {
static const struct rtw89_driver_info rtw89_8922ae_info = {
.chip = &rtw8922a_chip_info,
+ .quirks = NULL,
.bus = {
.pci = &rtw8922a_pci_info,
},
diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h
index bd7a657188d9..4ae081d2d3b4 100644
--- a/drivers/net/wireless/realtek/rtw89/sar.h
+++ b/drivers/net/wireless/realtek/rtw89/sar.h
@@ -7,8 +7,8 @@
#include "core.h"
-#define RTW89_SAR_TXPWR_MAC_MAX S8_MAX
-#define RTW89_SAR_TXPWR_MAC_MIN S8_MIN
+#define RTW89_SAR_TXPWR_MAC_MAX 63
+#define RTW89_SAR_TXPWR_MAC_MIN -64
struct rtw89_sar_handler {
const char *descr_sar_source;
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
index 99896d85d2f8..5fc2faa9ba5a 100644
--- a/drivers/net/wireless/realtek/rtw89/ser.c
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -308,9 +308,13 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
{
- struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
- struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+ struct rtw89_vif *target_rtwvif = (struct rtw89_vif *)data;
struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_dev *rtwdev = rtwvif->rtwdev;
+
+ if (rtwvif != target_rtwvif)
+ return;
if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
index c467a80ffa88..3882938c0893 100644
--- a/drivers/net/wireless/realtek/rtw89/txrx.h
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -113,6 +113,8 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate)
#define RTW89_TXWD_INFO0_GI_LTF GENMASK(27, 25)
#define RTW89_TXWD_INFO0_DATA_RATE GENMASK(24, 16)
#define RTW89_TXWD_INFO0_DATA_ER BIT(15)
+#define RTW89_TXWD_INFO0_DATA_STBC BIT(12)
+#define RTW89_TXWD_INFO0_DATA_LDPC BIT(11)
#define RTW89_TXWD_INFO0_DISDATAFB BIT(10)
#define RTW89_TXWD_INFO0_DATA_BW_ER BIT(8)
#define RTW89_TXWD_INFO0_MULTIPORT_ID GENMASK(6, 4)
@@ -556,6 +558,8 @@ struct rtw89_phy_sts_ie0 {
#define RTW89_PHY_STS_IE01_W2_AVG_SNR GENMASK(5, 0)
#define RTW89_PHY_STS_IE01_W2_EVM_MAX GENMASK(15, 8)
#define RTW89_PHY_STS_IE01_W2_EVM_MIN GENMASK(23, 16)
+#define RTW89_PHY_STS_IE01_W2_LDPC BIT(28)
+#define RTW89_PHY_STS_IE01_W2_STBC BIT(30)
enum rtw89_tx_channel {
RTW89_TXCH_ACH0 = 0,
diff --git a/drivers/net/wireless/realtek/rtw89/util.c b/drivers/net/wireless/realtek/rtw89/util.c
new file mode 100644
index 000000000000..e71956ce9853
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/util.c
@@ -0,0 +1,106 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2024 Realtek Corporation
+ */
+
+#include "util.h"
+
+#define FRAC_ROWS 3
+#define FRAC_ROW_MAX (FRAC_ROWS - 1)
+#define NORM_ROW_MIN FRAC_ROWS
+
+static const u32 db_invert_table[12][8] = {
+ /* rows 0~2 in unit of U(32,3) */
+ {10, 13, 16, 20, 25, 32, 40, 50},
+ {64, 80, 101, 128, 160, 201, 256, 318},
+ {401, 505, 635, 800, 1007, 1268, 1596, 2010},
+ /* rows 3~11 in unit of U(32,0) */
+ {316, 398, 501, 631, 794, 1000, 1259, 1585},
+ {1995, 2512, 3162, 3981, 5012, 6310, 7943, 10000},
+ {12589, 15849, 19953, 25119, 31623, 39811, 50119, 63098},
+ {79433, 100000, 125893, 158489, 199526, 251189, 316228, 398107},
+ {501187, 630957, 794328, 1000000, 1258925, 1584893, 1995262, 2511886},
+ {3162278, 3981072, 5011872, 6309573, 7943282, 1000000, 12589254,
+ 15848932},
+ {19952623, 25118864, 31622777, 39810717, 50118723, 63095734, 79432823,
+ 100000000},
+ {125892541, 158489319, 199526232, 251188643, 316227766, 398107171,
+ 501187234, 630957345},
+ {794328235, 1000000000, 1258925412, 1584893192, 1995262315, 2511886432U,
+ 3162277660U, 3981071706U},
+};
+
+u32 rtw89_linear_2_db(u64 val)
+{
+ u8 i, j;
+ u32 dB;
+
+ for (i = 0; i < 12; i++) {
+ for (j = 0; j < 8; j++) {
+ if (i <= FRAC_ROW_MAX &&
+ (val << RTW89_LINEAR_FRAC_BITS) <= db_invert_table[i][j])
+ goto cnt;
+ else if (i > FRAC_ROW_MAX && val <= db_invert_table[i][j])
+ goto cnt;
+ }
+ }
+
+ return 96; /* maximum 96 dB */
+
+cnt:
+ /* special cases */
+ if (j == 0 && i == 0)
+ goto end;
+
+ if (i == NORM_ROW_MIN && j == 0) {
+ if (db_invert_table[NORM_ROW_MIN][0] - val >
+ val - (db_invert_table[FRAC_ROW_MAX][7] >> RTW89_LINEAR_FRAC_BITS)) {
+ i = FRAC_ROW_MAX;
+ j = 7;
+ }
+ goto end;
+ }
+
+ if (i <= FRAC_ROW_MAX)
+ val <<= RTW89_LINEAR_FRAC_BITS;
+
+ /* compare difference to get precise dB */
+ if (j == 0) {
+ if (db_invert_table[i][j] - val >
+ val - db_invert_table[i - 1][7]) {
+ i--;
+ j = 7;
+ }
+ } else {
+ if (db_invert_table[i][j] - val >
+ val - db_invert_table[i][j - 1]) {
+ j--;
+ }
+ }
+end:
+ dB = (i << 3) + j + 1;
+
+ return dB;
+}
+EXPORT_SYMBOL(rtw89_linear_2_db);
+
+u64 rtw89_db_2_linear(u32 db)
+{
+ u64 linear;
+ u8 i, j;
+
+ if (db > 96)
+ db = 96;
+ else if (db < 1)
+ return 1;
+
+ i = (db - 1) >> 3;
+ j = (db - 1) & 0x7;
+
+ linear = db_invert_table[i][j];
+
+ if (i >= NORM_ROW_MIN)
+ linear = linear << RTW89_LINEAR_FRAC_BITS;
+
+ return linear;
+}
+EXPORT_SYMBOL(rtw89_db_2_linear);
diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h
index e2ed4565025d..e82e7df052d8 100644
--- a/drivers/net/wireless/realtek/rtw89/util.h
+++ b/drivers/net/wireless/realtek/rtw89/util.h
@@ -6,6 +6,8 @@
#include "core.h"
+#define RTW89_LINEAR_FRAC_BITS 3
+
#define rtw89_iterate_vifs_bh(rtwdev, iterator, data) \
ieee80211_iterate_active_interfaces_atomic((rtwdev)->hw, \
IEEE80211_IFACE_ITER_NORMAL, iterator, data)
@@ -55,4 +57,7 @@ static inline void ether_addr_copy_mask(u8 *dst, const u8 *src, u8 mask)
}
}
+u32 rtw89_linear_2_db(u64 linear);
+u64 rtw89_db_2_linear(u32 db);
+
#endif
diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c
index ccad026defb5..9882064ef68d 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.c
+++ b/drivers/net/wireless/realtek/rtw89/wow.c
@@ -12,6 +12,668 @@
#include "util.h"
#include "wow.h"
+void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ const u8 *rsn, *ies = mgmt->u.assoc_req.variable;
+ struct rtw89_rsn_ie *rsn_ie;
+
+ rsn = cfg80211_find_ie(WLAN_EID_RSN, ies, skb->len);
+ if (!rsn)
+ return;
+
+ rsn_ie = (struct rtw89_rsn_ie *)rsn;
+ rtw_wow->akm = rsn_ie->akm_cipher_suite.type;
+}
+
+#define RTW89_CIPHER_INFO_DEF(cipher) \
+ {WLAN_CIPHER_SUITE_ ## cipher, .fw_alg = RTW89_WOW_FW_ALG_ ## cipher, \
+ .len = WLAN_KEY_LEN_ ## cipher}
+
+static const struct rtw89_cipher_info rtw89_cipher_info_defs[] = {
+ RTW89_CIPHER_INFO_DEF(WEP40),
+ RTW89_CIPHER_INFO_DEF(WEP104),
+ RTW89_CIPHER_INFO_DEF(TKIP),
+ RTW89_CIPHER_INFO_DEF(CCMP),
+ RTW89_CIPHER_INFO_DEF(GCMP),
+ RTW89_CIPHER_INFO_DEF(CCMP_256),
+ RTW89_CIPHER_INFO_DEF(GCMP_256),
+ RTW89_CIPHER_INFO_DEF(AES_CMAC),
+};
+
+#undef RTW89_CIPHER_INFO_DEF
+
+static const
+struct rtw89_cipher_info *rtw89_cipher_alg_recognize(u32 cipher)
+{
+ const struct rtw89_cipher_info *cipher_info_defs;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rtw89_cipher_info_defs); i++) {
+ cipher_info_defs = &rtw89_cipher_info_defs[i];
+ if (cipher_info_defs->cipher == cipher)
+ return cipher_info_defs;
+ }
+
+ return NULL;
+}
+
+static int _pn_to_iv(struct rtw89_dev *rtwdev, struct ieee80211_key_conf *key,
+ u8 *iv, u64 pn, u8 key_idx)
+{
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ iv[0] = u64_get_bits(pn, RTW89_KEY_PN_1);
+ iv[1] = (u64_get_bits(pn, RTW89_KEY_PN_1) | 0x20) & 0x7f;
+ iv[2] = u64_get_bits(pn, RTW89_KEY_PN_0);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ iv[0] = u64_get_bits(pn, RTW89_KEY_PN_0);
+ iv[1] = u64_get_bits(pn, RTW89_KEY_PN_1);
+ iv[2] = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ iv[3] = BIT(5) | ((key_idx & 0x3) << 6);
+ iv[4] = u64_get_bits(pn, RTW89_KEY_PN_2);
+ iv[5] = u64_get_bits(pn, RTW89_KEY_PN_3);
+ iv[6] = u64_get_bits(pn, RTW89_KEY_PN_4);
+ iv[7] = u64_get_bits(pn, RTW89_KEY_PN_5);
+
+ return 0;
+}
+
+static int rtw89_rx_pn_to_iv(struct rtw89_dev *rtwdev,
+ struct ieee80211_key_conf *key,
+ u8 *iv)
+{
+ struct ieee80211_key_seq seq;
+ int err;
+ u64 pn;
+
+ ieee80211_get_key_rx_seq(key, 0, &seq);
+
+ /* seq.ccmp.pn[] is BE order array */
+ pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) |
+ u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) |
+ u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) |
+ u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) |
+ u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) |
+ u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0);
+
+ err = _pn_to_iv(rtwdev, key, iv, pn, key->keyidx);
+ if (err)
+ return err;
+
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d pn-%llx to iv-%*ph\n",
+ __func__, key->keyidx, pn, 8, iv);
+
+ return 0;
+}
+
+static int rtw89_tx_pn_to_iv(struct rtw89_dev *rtwdev,
+ struct ieee80211_key_conf *key,
+ u8 *iv)
+{
+ int err;
+ u64 pn;
+
+ pn = atomic64_inc_return(&key->tx_pn);
+ err = _pn_to_iv(rtwdev, key, iv, pn, key->keyidx);
+ if (err)
+ return err;
+
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d pn-%llx to iv-%*ph\n",
+ __func__, key->keyidx, pn, 8, iv);
+
+ return 0;
+}
+
+static int _iv_to_pn(struct rtw89_dev *rtwdev, u8 *iv, u64 *pn, u8 *key_id,
+ struct ieee80211_key_conf *key)
+{
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ *pn = u64_encode_bits(iv[2], RTW89_KEY_PN_0) |
+ u64_encode_bits(iv[0], RTW89_KEY_PN_1);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ *pn = u64_encode_bits(iv[0], RTW89_KEY_PN_0) |
+ u64_encode_bits(iv[1], RTW89_KEY_PN_1);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *pn |= u64_encode_bits(iv[4], RTW89_KEY_PN_2) |
+ u64_encode_bits(iv[5], RTW89_KEY_PN_3) |
+ u64_encode_bits(iv[6], RTW89_KEY_PN_4) |
+ u64_encode_bits(iv[7], RTW89_KEY_PN_5);
+
+ if (key_id)
+ *key_id = *(iv + 3) >> 6;
+
+ return 0;
+}
+
+static int rtw89_rx_iv_to_pn(struct rtw89_dev *rtwdev,
+ struct ieee80211_key_conf *key,
+ u8 *iv)
+{
+ struct ieee80211_key_seq seq;
+ int err;
+ u64 pn;
+
+ err = _iv_to_pn(rtwdev, iv, &pn, NULL, key);
+ if (err)
+ return err;
+
+ /* seq.ccmp.pn[] is BE order array */
+ seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5);
+ seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4);
+ seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3);
+ seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2);
+ seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1);
+ seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0);
+
+ ieee80211_set_key_rx_seq(key, 0, &seq);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d iv-%*ph to pn-%*ph\n",
+ __func__, key->keyidx, 8, iv, 6, seq.ccmp.pn);
+
+ return 0;
+}
+
+static int rtw89_tx_iv_to_pn(struct rtw89_dev *rtwdev,
+ struct ieee80211_key_conf *key,
+ u8 *iv)
+{
+ int err;
+ u64 pn;
+
+ err = _iv_to_pn(rtwdev, iv, &pn, NULL, key);
+ if (err)
+ return err;
+
+ atomic64_set(&key->tx_pn, pn);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d iv-%*ph to pn-%llx\n",
+ __func__, key->keyidx, 8, iv, pn);
+
+ return 0;
+}
+
+static int rtw89_rx_pn_get_pmf(struct rtw89_dev *rtwdev,
+ struct ieee80211_key_conf *key,
+ struct rtw89_wow_gtk_info *gtk_info)
+{
+ struct ieee80211_key_seq seq;
+ u64 pn;
+
+ if (key->keyidx == 4)
+ memcpy(gtk_info->igtk[0], key->key, key->keylen);
+ else if (key->keyidx == 5)
+ memcpy(gtk_info->igtk[1], key->key, key->keylen);
+ else
+ return -EINVAL;
+
+ ieee80211_get_key_rx_seq(key, 0, &seq);
+
+ /* seq.ccmp.pn[] is BE order array */
+ pn = u64_encode_bits(seq.ccmp.pn[0], RTW89_KEY_PN_5) |
+ u64_encode_bits(seq.ccmp.pn[1], RTW89_KEY_PN_4) |
+ u64_encode_bits(seq.ccmp.pn[2], RTW89_KEY_PN_3) |
+ u64_encode_bits(seq.ccmp.pn[3], RTW89_KEY_PN_2) |
+ u64_encode_bits(seq.ccmp.pn[4], RTW89_KEY_PN_1) |
+ u64_encode_bits(seq.ccmp.pn[5], RTW89_KEY_PN_0);
+ gtk_info->ipn = cpu_to_le64(pn);
+ gtk_info->igtk_keyid = cpu_to_le32(key->keyidx);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d pn-%llx\n",
+ __func__, key->keyidx, pn);
+
+ return 0;
+}
+
+static int rtw89_rx_pn_set_pmf(struct rtw89_dev *rtwdev,
+ struct ieee80211_key_conf *key,
+ u64 pn)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+ struct ieee80211_key_seq seq;
+
+ if (key->keyidx != aoac_rpt->igtk_key_id)
+ return 0;
+
+ /* seq.ccmp.pn[] is BE order array */
+ seq.ccmp.pn[0] = u64_get_bits(pn, RTW89_KEY_PN_5);
+ seq.ccmp.pn[1] = u64_get_bits(pn, RTW89_KEY_PN_4);
+ seq.ccmp.pn[2] = u64_get_bits(pn, RTW89_KEY_PN_3);
+ seq.ccmp.pn[3] = u64_get_bits(pn, RTW89_KEY_PN_2);
+ seq.ccmp.pn[4] = u64_get_bits(pn, RTW89_KEY_PN_1);
+ seq.ccmp.pn[5] = u64_get_bits(pn, RTW89_KEY_PN_0);
+
+ ieee80211_set_key_rx_seq(key, 0, &seq);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s key %d pn-%*ph\n",
+ __func__, key->keyidx, 6, seq.ccmp.pn);
+
+ return 0;
+}
+
+static void rtw89_wow_get_key_info_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_key_info *key_info = &rtw_wow->key_info;
+ struct rtw89_wow_gtk_info *gtk_info = &rtw_wow->gtk_info;
+ const struct rtw89_cipher_info *cipher_info;
+ bool *err = data;
+ int ret;
+
+ cipher_info = rtw89_cipher_alg_recognize(key->cipher);
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (sta) {
+ ret = rtw89_tx_pn_to_iv(rtwdev, key,
+ key_info->ptk_tx_iv);
+ if (ret)
+ goto err;
+ ret = rtw89_rx_pn_to_iv(rtwdev, key,
+ key_info->ptk_rx_iv);
+ if (ret)
+ goto err;
+
+ rtw_wow->ptk_alg = cipher_info->fw_alg;
+ rtw_wow->ptk_keyidx = key->keyidx;
+ } else {
+ ret = rtw89_rx_pn_to_iv(rtwdev, key,
+ key_info->gtk_rx_iv[key->keyidx]);
+ if (ret)
+ goto err;
+
+ rtw_wow->gtk_alg = cipher_info->fw_alg;
+ key_info->gtk_keyidx = key->keyidx;
+ }
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ ret = rtw89_rx_pn_get_pmf(rtwdev, key, gtk_info);
+ if (ret)
+ goto err;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ /* WEP only set group key in mac80211, but fw need to set
+ * both of pairwise key and group key.
+ */
+ rtw_wow->ptk_alg = cipher_info->fw_alg;
+ rtw_wow->ptk_keyidx = key->keyidx;
+ rtw_wow->gtk_alg = cipher_info->fw_alg;
+ key_info->gtk_keyidx = key->keyidx;
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "unsupport cipher %x\n",
+ key->cipher);
+ goto err;
+ }
+
+ return;
+err:
+ *err = true;
+}
+
+static void rtw89_wow_set_key_info_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+ struct rtw89_set_key_info_iter_data *iter_data = data;
+ bool update_tx_key_info = iter_data->rx_ready;
+ int ret;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ case WLAN_CIPHER_SUITE_CCMP:
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ if (sta && !update_tx_key_info) {
+ ret = rtw89_rx_iv_to_pn(rtwdev, key,
+ aoac_rpt->ptk_rx_iv);
+ if (ret)
+ goto err;
+ }
+
+ if (sta && update_tx_key_info) {
+ ret = rtw89_tx_iv_to_pn(rtwdev, key,
+ aoac_rpt->ptk_tx_iv);
+ if (ret)
+ goto err;
+ }
+
+ if (!sta && !update_tx_key_info) {
+ ret = rtw89_rx_iv_to_pn(rtwdev, key,
+ aoac_rpt->gtk_rx_iv[key->keyidx]);
+ if (ret)
+ goto err;
+ }
+
+ if (!sta && update_tx_key_info && aoac_rpt->rekey_ok)
+ iter_data->gtk_cipher = key->cipher;
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (update_tx_key_info) {
+ if (aoac_rpt->rekey_ok)
+ iter_data->igtk_cipher = key->cipher;
+ } else {
+ ret = rtw89_rx_pn_set_pmf(rtwdev, key,
+ aoac_rpt->igtk_ipn);
+ if (ret)
+ goto err;
+ }
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "unsupport cipher %x\n",
+ key->cipher);
+ goto err;
+ }
+
+ return;
+
+err:
+ iter_data->error = true;
+}
+
+static void rtw89_wow_key_clear(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+
+ memset(&rtw_wow->aoac_rpt, 0, sizeof(rtw_wow->aoac_rpt));
+ memset(&rtw_wow->gtk_info, 0, sizeof(rtw_wow->gtk_info));
+ memset(&rtw_wow->key_info, 0, sizeof(rtw_wow->key_info));
+ rtw_wow->ptk_alg = 0;
+ rtw_wow->gtk_alg = 0;
+}
+
+static void rtw89_wow_construct_key_info(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_key_info *key_info = &rtw_wow->key_info;
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ bool err = false;
+
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(rtwdev->hw, wow_vif,
+ rtw89_wow_get_key_info_iter, &err);
+ rcu_read_unlock();
+
+ if (err) {
+ rtw89_wow_key_clear(rtwdev);
+ return;
+ }
+
+ key_info->valid_check = RTW89_WOW_VALID_CHECK;
+ key_info->symbol_check_en = RTW89_WOW_SYMBOL_CHK_PTK |
+ RTW89_WOW_SYMBOL_CHK_GTK;
+}
+
+static void rtw89_wow_debug_aoac_rpt(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+
+ if (!rtw89_debug_is_enabled(rtwdev, RTW89_DBG_WOW))
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] rpt_ver = %d\n",
+ aoac_rpt->rpt_ver);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] sec_type = %d\n",
+ aoac_rpt->sec_type);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] key_idx = %d\n",
+ aoac_rpt->key_idx);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] pattern_idx = %d\n",
+ aoac_rpt->pattern_idx);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] rekey_ok = %d\n",
+ aoac_rpt->rekey_ok);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] ptk_tx_iv = %*ph\n",
+ 8, aoac_rpt->ptk_tx_iv);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW,
+ "[aoac_rpt] eapol_key_replay_count = %*ph\n",
+ 8, aoac_rpt->eapol_key_replay_count);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] ptk_rx_iv = %*ph\n",
+ 8, aoac_rpt->ptk_rx_iv);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] gtk_rx_iv[0] = %*ph\n",
+ 8, aoac_rpt->gtk_rx_iv[0]);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] gtk_rx_iv[1] = %*ph\n",
+ 8, aoac_rpt->gtk_rx_iv[1]);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] gtk_rx_iv[2] = %*ph\n",
+ 8, aoac_rpt->gtk_rx_iv[2]);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] gtk_rx_iv[3] = %*ph\n",
+ 8, aoac_rpt->gtk_rx_iv[3]);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] igtk_key_id = %llu\n",
+ aoac_rpt->igtk_key_id);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] igtk_ipn = %llu\n",
+ aoac_rpt->igtk_ipn);
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "[aoac_rpt] igtk = %*ph\n",
+ 32, aoac_rpt->igtk);
+}
+
+static int rtw89_wow_get_aoac_rpt_reg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+ struct rtw89_mac_c2h_info c2h_info = {};
+ struct rtw89_mac_h2c_info h2c_info = {};
+ u8 igtk_ipn[8];
+ u8 key_idx;
+ int ret;
+
+ h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_1;
+ h2c_info.content_len = 2;
+ ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
+ if (ret)
+ return ret;
+
+ aoac_rpt->key_idx =
+ u32_get_bits(c2h_info.u.c2hreg[0], RTW89_C2HREG_AOAC_RPT_1_W0_KEY_IDX);
+ key_idx = aoac_rpt->key_idx;
+ aoac_rpt->gtk_rx_iv[key_idx][0] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_1_W1_IV_0);
+ aoac_rpt->gtk_rx_iv[key_idx][1] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_1_W1_IV_1);
+ aoac_rpt->gtk_rx_iv[key_idx][2] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_1_W1_IV_2);
+ aoac_rpt->gtk_rx_iv[key_idx][3] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_1_W1_IV_3);
+ aoac_rpt->gtk_rx_iv[key_idx][4] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_1_W2_IV_4);
+ aoac_rpt->gtk_rx_iv[key_idx][5] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_1_W2_IV_5);
+ aoac_rpt->gtk_rx_iv[key_idx][6] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_1_W2_IV_6);
+ aoac_rpt->gtk_rx_iv[key_idx][7] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_1_W2_IV_7);
+ aoac_rpt->ptk_rx_iv[0] =
+ u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_0);
+ aoac_rpt->ptk_rx_iv[1] =
+ u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_1);
+ aoac_rpt->ptk_rx_iv[2] =
+ u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_2);
+ aoac_rpt->ptk_rx_iv[3] =
+ u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_1_W3_PTK_IV_3);
+
+ h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_AOAC_RPT_2;
+ h2c_info.content_len = 2;
+ ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
+ if (ret)
+ return ret;
+
+ aoac_rpt->ptk_rx_iv[4] =
+ u32_get_bits(c2h_info.u.c2hreg[0], RTW89_C2HREG_AOAC_RPT_2_W0_PTK_IV_4);
+ aoac_rpt->ptk_rx_iv[5] =
+ u32_get_bits(c2h_info.u.c2hreg[0], RTW89_C2HREG_AOAC_RPT_2_W0_PTK_IV_5);
+ aoac_rpt->ptk_rx_iv[6] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_2_W1_PTK_IV_6);
+ aoac_rpt->ptk_rx_iv[7] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_2_W1_PTK_IV_7);
+ igtk_ipn[0] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_2_W1_IGTK_IPN_IV_0);
+ igtk_ipn[1] =
+ u32_get_bits(c2h_info.u.c2hreg[1], RTW89_C2HREG_AOAC_RPT_2_W1_IGTK_IPN_IV_1);
+ igtk_ipn[2] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_2);
+ igtk_ipn[3] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_3);
+ igtk_ipn[4] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_4);
+ igtk_ipn[5] =
+ u32_get_bits(c2h_info.u.c2hreg[2], RTW89_C2HREG_AOAC_RPT_2_W2_IGTK_IPN_IV_5);
+ igtk_ipn[6] =
+ u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_6);
+ igtk_ipn[7] =
+ u32_get_bits(c2h_info.u.c2hreg[3], RTW89_C2HREG_AOAC_RPT_2_W3_IGTK_IPN_IV_7);
+ aoac_rpt->igtk_ipn = u64_encode_bits(igtk_ipn[0], RTW89_IGTK_IPN_0) |
+ u64_encode_bits(igtk_ipn[1], RTW89_IGTK_IPN_1) |
+ u64_encode_bits(igtk_ipn[2], RTW89_IGTK_IPN_2) |
+ u64_encode_bits(igtk_ipn[3], RTW89_IGTK_IPN_3) |
+ u64_encode_bits(igtk_ipn[4], RTW89_IGTK_IPN_4) |
+ u64_encode_bits(igtk_ipn[5], RTW89_IGTK_IPN_5) |
+ u64_encode_bits(igtk_ipn[6], RTW89_IGTK_IPN_6) |
+ u64_encode_bits(igtk_ipn[7], RTW89_IGTK_IPN_7);
+
+ return 0;
+}
+
+static int rtw89_wow_get_aoac_rpt(struct rtw89_dev *rtwdev, bool rx_ready)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ int ret;
+
+ if (!rtw_wow->ptk_alg)
+ return -EPERM;
+
+ if (!rx_ready) {
+ ret = rtw89_wow_get_aoac_rpt_reg(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to get aoac rpt by reg\n");
+ return ret;
+ }
+ } else {
+ ret = rtw89_fw_h2c_wow_request_aoac(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to get aoac rpt by pkt\n");
+ return ret;
+ }
+ }
+
+ rtw89_wow_debug_aoac_rpt(rtwdev);
+
+ return 0;
+}
+
+static struct ieee80211_key_conf *rtw89_wow_gtk_rekey(struct rtw89_dev *rtwdev,
+ u32 cipher, u8 keyidx, u8 *gtk)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ const struct rtw89_cipher_info *cipher_info;
+ struct ieee80211_key_conf *rekey_conf;
+ struct ieee80211_key_conf *key;
+ u8 sz;
+
+ cipher_info = rtw89_cipher_alg_recognize(cipher);
+ sz = struct_size(rekey_conf, key, cipher_info->len);
+ rekey_conf = kmalloc(sz, GFP_KERNEL);
+ if (!rekey_conf)
+ return NULL;
+
+ rekey_conf->cipher = cipher;
+ rekey_conf->keyidx = keyidx;
+ rekey_conf->keylen = cipher_info->len;
+ memcpy(rekey_conf->key, gtk,
+ flex_array_size(rekey_conf, key, cipher_info->len));
+
+ /* ieee80211_gtk_rekey_add() will call set_key(), therefore we
+ * need to unlock mutex
+ */
+ mutex_unlock(&rtwdev->mutex);
+ key = ieee80211_gtk_rekey_add(wow_vif, rekey_conf, -1);
+ mutex_lock(&rtwdev->mutex);
+
+ kfree(rekey_conf);
+ if (IS_ERR(key)) {
+ rtw89_err(rtwdev, "ieee80211_gtk_rekey_add failed\n");
+ return NULL;
+ }
+
+ return key;
+}
+
+static void rtw89_wow_update_key_info(struct rtw89_dev *rtwdev, bool rx_ready)
+{
+ struct ieee80211_vif *wow_vif = rtwdev->wow.wow_vif;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
+ struct rtw89_set_key_info_iter_data data = {.error = false,
+ .rx_ready = rx_ready};
+ struct ieee80211_key_conf *key;
+
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(rtwdev->hw, wow_vif,
+ rtw89_wow_set_key_info_iter, &data);
+ rcu_read_unlock();
+
+ if (data.error) {
+ rtw89_debug(rtwdev, RTW89_DBG_WOW, "%s error\n", __func__);
+ return;
+ }
+
+ if (!data.gtk_cipher)
+ return;
+
+ key = rtw89_wow_gtk_rekey(rtwdev, data.gtk_cipher, aoac_rpt->key_idx,
+ aoac_rpt->gtk);
+ if (!key)
+ return;
+
+ rtw89_rx_iv_to_pn(rtwdev, key,
+ aoac_rpt->gtk_rx_iv[key->keyidx]);
+
+ if (!data.igtk_cipher)
+ return;
+
+ key = rtw89_wow_gtk_rekey(rtwdev, data.igtk_cipher, aoac_rpt->igtk_key_id,
+ aoac_rpt->igtk);
+ if (!key)
+ return;
+
+ rtw89_rx_pn_set_pmf(rtwdev, key, aoac_rpt->igtk_ipn);
+ ieee80211_gtk_rekey_notify(wow_vif, wow_vif->bss_conf.bssid,
+ aoac_rpt->eapol_key_replay_count,
+ GFP_KERNEL);
+}
+
static void rtw89_wow_leave_deep_ps(struct rtw89_dev *rtwdev)
{
__rtw89_leave_ps_mode(rtwdev);
@@ -59,13 +721,20 @@ static void rtw89_wow_set_rx_filter(struct rtw89_dev *rtwdev, bool enable)
static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
{
- u32 wow_reason_reg = rtwdev->chip->wow_reason_reg;
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt;
struct cfg80211_wowlan_nd_info nd_info;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
+ u32 wow_reason_reg;
u8 reason;
+ if (RTW89_CHK_FW_FEATURE(WOW_REASON_V1, &rtwdev->fw))
+ wow_reason_reg = rtwdev->chip->wow_reason_reg[RTW89_WOW_REASON_V1];
+ else
+ wow_reason_reg = rtwdev->chip->wow_reason_reg[RTW89_WOW_REASON_V0];
+
reason = rtw89_read8(rtwdev, wow_reason_reg);
switch (reason) {
case RTW89_WOW_RSN_RX_DEAUTH:
@@ -85,10 +754,7 @@ static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev)
rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: Rx gtk rekey\n");
break;
case RTW89_WOW_RSN_RX_PATTERN_MATCH:
- /* Current firmware and driver don't report pattern index
- * Use pattern_idx to 0 defaultly.
- */
- wakeup.pattern_idx = 0;
+ wakeup.pattern_idx = aoac_rpt->pattern_idx;
rtw89_debug(rtwdev, RTW89_DBG_WOW, "WOW: Rx pattern match packet\n");
break;
case RTW89_WOW_RSN_RX_NLO:
@@ -454,17 +1120,21 @@ static int rtw89_wow_check_fw_status(struct rtw89_dev *rtwdev, bool wow_enable)
static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
{
enum rtw89_fw_type fw_type = wow ? RTW89_FW_WOWLAN : RTW89_FW_NORMAL;
+ enum rtw89_chip_gen chip_gen = rtwdev->chip->chip_gen;
struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
struct ieee80211_vif *wow_vif = rtw_wow->wow_vif;
struct rtw89_vif *rtwvif = (struct rtw89_vif *)wow_vif->drv_priv;
+ enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id;
const struct rtw89_chip_info *chip = rtwdev->chip;
bool include_bb = !!chip->bbmcu_nr;
+ bool disable_intr_for_dlfw = false;
struct ieee80211_sta *wow_sta;
struct rtw89_sta *rtwsta = NULL;
bool is_conn = true;
int ret;
- rtw89_hci_disable_intr(rtwdev);
+ if (chip_id == RTL8852C || chip_id == RTL8922A)
+ disable_intr_for_dlfw = true;
wow_sta = ieee80211_find_sta(wow_vif, rtwvif->bssid);
if (wow_sta)
@@ -472,12 +1142,18 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
else
is_conn = false;
+ if (disable_intr_for_dlfw)
+ rtw89_hci_disable_intr(rtwdev);
+
ret = rtw89_fw_download(rtwdev, fw_type, include_bb);
if (ret) {
rtw89_warn(rtwdev, "download fw failed\n");
return ret;
}
+ if (disable_intr_for_dlfw)
+ rtw89_hci_enable_intr(rtwdev);
+
rtw89_phy_init_rf_reg(rtwdev, true);
ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta,
@@ -519,8 +1195,10 @@ static int rtw89_wow_swap_fw(struct rtw89_dev *rtwdev, bool wow)
rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, wow_vif);
}
+ if (chip_gen == RTW89_CHIP_BE)
+ rtw89_phy_rfk_pre_ntfy_and_wait(rtwdev, RTW89_PHY_0, 5);
+
rtw89_mac_hw_mgnt_sec(rtwdev, wow);
- rtw89_hci_enable_intr(rtwdev);
return 0;
}
@@ -595,19 +1273,38 @@ static int rtw89_wow_disable_trx_pre(struct rtw89_dev *rtwdev)
rtw89_err(rtwdev, "failed to config mac\n");
return ret;
}
+
+ /* Before enabling interrupt, we need to get AOAC report by reg due to RX
+ * not enabled yet. Also, we need to sync RX related IV from firmware to
+ * mac80211 before receiving RX packets from driver.
+ * After enabling interrupt, we can get AOAC report from h2c and c2h, and
+ * can get TX IV and complete rekey info. We need to update TX related IV
+ * and new GTK info if rekey happened.
+ */
+ ret = rtw89_wow_get_aoac_rpt(rtwdev, false);
+ if (!ret)
+ rtw89_wow_update_key_info(rtwdev, false);
+
rtw89_hci_enable_intr(rtwdev);
+ ret = rtw89_wow_get_aoac_rpt(rtwdev, true);
+ if (!ret)
+ rtw89_wow_update_key_info(rtwdev, true);
return 0;
}
static int rtw89_wow_disable_trx_post(struct rtw89_dev *rtwdev)
{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+ struct ieee80211_vif *vif = rtw_wow->wow_vif;
int ret;
ret = rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
if (ret)
rtw89_err(rtwdev, "cfg ppdu status\n");
+ rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true);
+
return ret;
}
@@ -618,6 +1315,7 @@ static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev)
int ret;
rtw89_wow_pattern_write(rtwdev);
+ rtw89_wow_construct_key_info(rtwdev);
ret = rtw89_fw_h2c_keep_alive(rtwdev, rtwvif, true);
if (ret) {
@@ -631,6 +1329,16 @@ static int rtw89_wow_fw_start(struct rtw89_dev *rtwdev)
goto out;
}
+ ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, true);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to enable GTK offload\n");
+ goto out;
+ }
+
+ ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, true);
+ if (ret)
+ rtw89_warn(rtwdev, "wow: failed to enable arp offload\n");
+
ret = rtw89_wow_cfg_wake(rtwdev, true);
if (ret) {
rtw89_err(rtwdev, "wow: failed to config wake\n");
@@ -667,6 +1375,17 @@ static int rtw89_wow_fw_stop(struct rtw89_dev *rtwdev)
goto out;
}
+ ret = rtw89_fw_h2c_wow_gtk_ofld(rtwdev, rtwvif, false);
+ if (ret) {
+ rtw89_err(rtwdev, "wow: failed to disable GTK offload\n");
+ goto out;
+ }
+
+ ret = rtw89_fw_h2c_arp_offload(rtwdev, rtwvif, false);
+ if (ret)
+ rtw89_warn(rtwdev, "wow: failed to disable arp offload\n");
+
+ rtw89_wow_key_clear(rtwdev);
rtw89_fw_release_general_pkt_list(rtwdev, true);
ret = rtw89_wow_cfg_wake(rtwdev, false);
diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h
index a2f7b2e3cdb4..0d90add0e88d 100644
--- a/drivers/net/wireless/realtek/rtw89/wow.h
+++ b/drivers/net/wireless/realtek/rtw89/wow.h
@@ -5,6 +5,26 @@
#ifndef __RTW89_WOW_H__
#define __RTW89_WOW_H__
+#define RTW89_KEY_PN_0 GENMASK_ULL(7, 0)
+#define RTW89_KEY_PN_1 GENMASK_ULL(15, 8)
+#define RTW89_KEY_PN_2 GENMASK_ULL(23, 16)
+#define RTW89_KEY_PN_3 GENMASK_ULL(31, 24)
+#define RTW89_KEY_PN_4 GENMASK_ULL(39, 32)
+#define RTW89_KEY_PN_5 GENMASK_ULL(47, 40)
+
+#define RTW89_IGTK_IPN_0 GENMASK_ULL(7, 0)
+#define RTW89_IGTK_IPN_1 GENMASK_ULL(15, 8)
+#define RTW89_IGTK_IPN_2 GENMASK_ULL(23, 16)
+#define RTW89_IGTK_IPN_3 GENMASK_ULL(31, 24)
+#define RTW89_IGTK_IPN_4 GENMASK_ULL(39, 32)
+#define RTW89_IGTK_IPN_5 GENMASK_ULL(47, 40)
+#define RTW89_IGTK_IPN_6 GENMASK_ULL(55, 48)
+#define RTW89_IGTK_IPN_7 GENMASK_ULL(63, 56)
+
+#define RTW89_WOW_VALID_CHECK 0xDD
+#define RTW89_WOW_SYMBOL_CHK_PTK BIT(0)
+#define RTW89_WOW_SYMBOL_CHK_GTK BIT(1)
+
enum rtw89_wake_reason {
RTW89_WOW_RSN_RX_PTK_REKEY = 0x1,
RTW89_WOW_RSN_RX_GTK_REKEY = 0x2,
@@ -15,7 +35,74 @@ enum rtw89_wake_reason {
RTW89_WOW_RSN_RX_NLO = 0x55,
};
+enum rtw89_fw_alg {
+ RTW89_WOW_FW_ALG_WEP40 = 0x1,
+ RTW89_WOW_FW_ALG_WEP104 = 0x2,
+ RTW89_WOW_FW_ALG_TKIP = 0x3,
+ RTW89_WOW_FW_ALG_CCMP = 0x6,
+ RTW89_WOW_FW_ALG_CCMP_256 = 0x7,
+ RTW89_WOW_FW_ALG_GCMP = 0x8,
+ RTW89_WOW_FW_ALG_GCMP_256 = 0x9,
+ RTW89_WOW_FW_ALG_AES_CMAC = 0xa,
+};
+
+struct rtw89_cipher_suite {
+ u8 oui[3];
+ u8 type;
+} __packed;
+
+struct rtw89_rsn_ie {
+ u8 tag_number;
+ u8 tag_length;
+ __le16 rsn_version;
+ struct rtw89_cipher_suite group_cipher_suite;
+ __le16 pairwise_cipher_suite_cnt;
+ struct rtw89_cipher_suite pairwise_cipher_suite;
+ __le16 akm_cipher_suite_cnt;
+ struct rtw89_cipher_suite akm_cipher_suite;
+} __packed;
+
+struct rtw89_cipher_info {
+ u32 cipher;
+ u8 fw_alg;
+ enum ieee80211_key_len len;
+};
+
+struct rtw89_set_key_info_iter_data {
+ u32 gtk_cipher;
+ u32 igtk_cipher;
+ bool rx_ready;
+ bool error;
+};
+
+static inline int rtw89_wow_get_sec_hdr_len(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_wow_param *rtw_wow = &rtwdev->wow;
+
+ if (!(rtwdev->chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)))
+ return 0;
+
+ switch (rtw_wow->ptk_alg) {
+ case RTW89_WOW_FW_ALG_WEP40:
+ return 4;
+ case RTW89_WOW_FW_ALG_TKIP:
+ case RTW89_WOW_FW_ALG_CCMP:
+ case RTW89_WOW_FW_ALG_GCMP_256:
+ return 8;
+ default:
+ return 0;
+ }
+}
+
+#ifdef CONFIG_PM
int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan);
int rtw89_wow_resume(struct rtw89_dev *rtwdev);
+void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb);
+#else
+static inline
+void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb)
+{
+}
+#endif
#endif
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index 211fa25b9a78..3425a473b9a1 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -410,10 +410,11 @@ static int rsi_mac80211_start(struct ieee80211_hw *hw)
/**
* rsi_mac80211_stop() - This is the last handler that 802.11 module calls.
* @hw: Pointer to the ieee80211_hw structure.
+ * @suspend: true if the this was called from suspend flow.
*
* Return: None.
*/
-static void rsi_mac80211_stop(struct ieee80211_hw *hw)
+static void rsi_mac80211_stop(struct ieee80211_hw *hw, bool suspend)
{
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index 8e7b757475d2..1e578533e473 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -1519,36 +1519,7 @@ static struct sdio_driver rsi_driver = {
}
#endif
};
-
-/**
- * rsi_module_init() - This function registers the sdio module.
- * @void: Void.
- *
- * Return: 0 on success.
- */
-static int rsi_module_init(void)
-{
- int ret;
-
- ret = sdio_register_driver(&rsi_driver);
- rsi_dbg(INIT_ZONE, "%s: Registering driver\n", __func__);
- return ret;
-}
-
-/**
- * rsi_module_exit() - This function unregisters the sdio module.
- * @void: Void.
- *
- * Return: None.
- */
-static void rsi_module_exit(void)
-{
- sdio_unregister_driver(&rsi_driver);
- rsi_dbg(INFO_ZONE, "%s: Unregistering driver\n", __func__);
-}
-
-module_init(rsi_module_init);
-module_exit(rsi_module_exit);
+module_sdio_driver(rsi_driver);
MODULE_AUTHOR("Redpine Signals Inc");
MODULE_DESCRIPTION("Common SDIO layer for RSI drivers");
diff --git a/drivers/net/wireless/silabs/wfx/bus_sdio.c b/drivers/net/wireless/silabs/wfx/bus_sdio.c
index 909d5f346a01..f290eecde773 100644
--- a/drivers/net/wireless/silabs/wfx/bus_sdio.c
+++ b/drivers/net/wireless/silabs/wfx/bus_sdio.c
@@ -267,7 +267,6 @@ struct sdio_driver wfx_sdio_driver = {
.probe = wfx_sdio_probe,
.remove = wfx_sdio_remove,
.drv = {
- .owner = THIS_MODULE,
.of_match_table = wfx_sdio_of_match,
}
};
diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c
index a904602f02ce..216d43c8bd6e 100644
--- a/drivers/net/wireless/silabs/wfx/sta.c
+++ b/drivers/net/wireless/silabs/wfx/sta.c
@@ -805,7 +805,7 @@ int wfx_start(struct ieee80211_hw *hw)
return 0;
}
-void wfx_stop(struct ieee80211_hw *hw)
+void wfx_stop(struct ieee80211_hw *hw, bool suspend)
{
struct wfx_dev *wdev = hw->priv;
diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h
index c478ddcb934b..7817c7c6f3dd 100644
--- a/drivers/net/wireless/silabs/wfx/sta.h
+++ b/drivers/net/wireless/silabs/wfx/sta.h
@@ -20,7 +20,7 @@ struct wfx_sta_priv {
/* mac80211 interface */
int wfx_start(struct ieee80211_hw *hw);
-void wfx_stop(struct ieee80211_hw *hw);
+void wfx_stop(struct ieee80211_hw *hw, bool suspend);
int wfx_config(struct ieee80211_hw *hw, u32 changed);
int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value);
void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx);
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 8ef1d06b9bbd..c259da8161e4 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -90,7 +90,7 @@ out:
return ret;
}
-void cw1200_stop(struct ieee80211_hw *dev)
+void cw1200_stop(struct ieee80211_hw *dev, bool suspend)
{
struct cw1200_common *priv = dev->priv;
LIST_HEAD(list);
diff --git a/drivers/net/wireless/st/cw1200/sta.h b/drivers/net/wireless/st/cw1200/sta.h
index a49f187c7049..b955b92cfd73 100644
--- a/drivers/net/wireless/st/cw1200/sta.h
+++ b/drivers/net/wireless/st/cw1200/sta.h
@@ -13,7 +13,7 @@
/* mac80211 API */
int cw1200_start(struct ieee80211_hw *dev);
-void cw1200_stop(struct ieee80211_hw *dev);
+void cw1200_stop(struct ieee80211_hw *dev, bool suspend);
int cw1200_add_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif);
void cw1200_remove_interface(struct ieee80211_hw *dev,
diff --git a/drivers/net/wireless/ti/wl1251/acx.h b/drivers/net/wireless/ti/wl1251/acx.h
index 1da6ab664e41..af5ec7f12231 100644
--- a/drivers/net/wireless/ti/wl1251/acx.h
+++ b/drivers/net/wireless/ti/wl1251/acx.h
@@ -229,7 +229,7 @@ struct acx_rx_msdu_lifetime {
* === ==========
* 31:12 Reserved - Always equal to 0.
* 11 Association - When set, the WiLink receives all association
- * related frames (association request/response, reassocation
+ * related frames (association request/response, reassociation
* request/response, and disassociation). When clear, these frames
* are discarded.
* 10 Auth/De auth - When set, the WiLink receives all authentication
diff --git a/drivers/net/wireless/ti/wl1251/cmd.h b/drivers/net/wireless/ti/wl1251/cmd.h
index e5874186f9d7..39159201b97e 100644
--- a/drivers/net/wireless/ti/wl1251/cmd.h
+++ b/drivers/net/wireless/ti/wl1251/cmd.h
@@ -89,8 +89,6 @@ enum wl1251_commands {
struct wl1251_cmd_header {
u16 id;
u16 status;
- /* payload */
- u8 data[];
} __packed;
struct wl1251_command {
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 0da2d29dd7bd..bb53d681c11b 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -415,7 +415,7 @@ out:
return ret;
}
-static void wl1251_op_stop(struct ieee80211_hw *hw)
+static void wl1251_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct wl1251 *wl = hw->priv;
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 4e5b351f80f0..c705081249d6 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -323,25 +323,7 @@ static struct sdio_driver wl1251_sdio_driver = {
.remove = wl1251_sdio_remove,
.drv.pm = &wl1251_sdio_pm_ops,
};
-
-static int __init wl1251_sdio_init(void)
-{
- int err;
-
- err = sdio_register_driver(&wl1251_sdio_driver);
- if (err)
- wl1251_error("failed to register sdio driver: %d", err);
- return err;
-}
-
-static void __exit wl1251_sdio_exit(void)
-{
- sdio_unregister_driver(&wl1251_sdio_driver);
- wl1251_notice("unloaded");
-}
-
-module_init(wl1251_sdio_init);
-module_exit(wl1251_sdio_exit);
+module_sdio_driver(wl1251_sdio_driver);
MODULE_DESCRIPTION("TI WL1251 SDIO helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/ti/wl1251/wl12xx_80211.h b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
index 7e28fe435b43..3d5b0df5b231 100644
--- a/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wl1251/wl12xx_80211.h
@@ -65,7 +65,6 @@ struct ieee80211_header {
u8 sa[ETH_ALEN];
u8 bssid[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[];
} __packed;
struct wl12xx_ie_header {
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index 2ccac1cdec01..39d8eebb9b6e 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -1177,8 +1177,49 @@ static int wl18xx_hw_init(struct wl1271 *wl)
return ret;
}
-static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
- struct wl_fw_status *fw_status)
+static void wl18xx_convert_fw_status_8_9_1(struct wl1271 *wl,
+ void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ struct wl18xx_fw_status_8_9_1 *int_fw_status = raw_fw_status;
+
+ fw_status->intr = le32_to_cpu(int_fw_status->intr);
+ fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
+ fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
+ fw_status->tx_results_counter = int_fw_status->tx_results_counter;
+ fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
+
+ fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
+ fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
+ fw_status->link_fast_bitmap =
+ le32_to_cpu(int_fw_status->link_fast_bitmap);
+ fw_status->total_released_blks =
+ le32_to_cpu(int_fw_status->total_released_blks);
+ fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
+
+ fw_status->counters.tx_released_pkts =
+ int_fw_status->counters.tx_released_pkts;
+ fw_status->counters.tx_lnk_free_pkts =
+ int_fw_status->counters.tx_lnk_free_pkts;
+ fw_status->counters.tx_lnk_sec_pn16 =
+ int_fw_status->counters.tx_lnk_sec_pn16;
+ fw_status->counters.tx_voice_released_blks =
+ int_fw_status->counters.tx_voice_released_blks;
+ fw_status->counters.tx_last_rate =
+ int_fw_status->counters.tx_last_rate;
+ fw_status->counters.tx_last_rate_mbps =
+ int_fw_status->counters.tx_last_rate_mbps;
+ fw_status->counters.hlid =
+ int_fw_status->counters.hlid;
+
+ fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
+
+ fw_status->priv = &int_fw_status->priv;
+}
+
+static void wl18xx_convert_fw_status_8_9_0(struct wl1271 *wl,
+ void *raw_fw_status,
+ struct wl_fw_status *fw_status)
{
struct wl18xx_fw_status *int_fw_status = raw_fw_status;
@@ -1214,6 +1255,15 @@ static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
fw_status->priv = &int_fw_status->priv;
}
+static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ if (wl->chip.fw_ver[FW_VER_MAJOR] == 0)
+ wl18xx_convert_fw_status_8_9_0(wl, raw_fw_status, fw_status);
+ else
+ wl18xx_convert_fw_status_8_9_1(wl, raw_fw_status, fw_status);
+}
+
static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
struct sk_buff *skb)
@@ -1515,12 +1565,29 @@ static int wl18xx_handle_static_data(struct wl1271 *wl,
{
struct wl18xx_static_data_priv *static_data_priv =
(struct wl18xx_static_data_priv *) static_data->priv;
+ size_t fw_status_len;
strscpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version,
sizeof(wl->chip.phy_fw_ver_str));
wl1271_info("PHY firmware version: %s", static_data_priv->phy_version);
+ /* Adjust the firmware status size according to the firmware version */
+ if (wl->chip.fw_ver[FW_VER_MAJOR] == 0)
+ fw_status_len = sizeof(struct wl18xx_fw_status);
+ else
+ fw_status_len = sizeof(struct wl18xx_fw_status_8_9_1);
+
+ if (wl->fw_status_len != fw_status_len) {
+ void *new_status = krealloc(wl->raw_fw_status, fw_status_len,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!new_status)
+ return -ENOMEM;
+
+ wl->raw_fw_status = new_status;
+ wl->fw_status_len = fw_status_len;
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 55d9b0861c53..beef393853ef 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -129,6 +129,14 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
wl1271_free_tx_id(wl, id);
}
+static u8 wl18xx_next_tx_idx(u8 idx)
+{
+ if (++idx >= WL18XX_FW_MAX_TX_STATUS_DESC)
+ idx = 0;
+
+ return idx;
+}
+
void wl18xx_tx_immediate_complete(struct wl1271 *wl)
{
struct wl18xx_fw_status_priv *status_priv =
@@ -161,9 +169,8 @@ void wl18xx_tx_immediate_complete(struct wl1271 *wl)
return;
}
- for (i = priv->last_fw_rls_idx;
- i != status_priv->fw_release_idx;
- i = (i + 1) % WL18XX_FW_MAX_TX_STATUS_DESC) {
+ for (i = priv->last_fw_rls_idx; i != status_priv->fw_release_idx;
+ i = wl18xx_next_tx_idx(i)) {
wl18xx_tx_complete_packet(wl,
status_priv->released_tx_desc[i]);
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index b642e0c437bb..de6c671c4be6 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -13,7 +13,7 @@
/* minimum FW required for driver */
#define WL18XX_CHIP_VER 8
#define WL18XX_IFTYPE_VER 9
-#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
+#define WL18XX_MAJOR_VER 0
#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
#define WL18XX_MINOR_VER 58
@@ -155,6 +155,66 @@ struct wl18xx_fw_status {
struct wl18xx_fw_status_priv priv;
} __packed;
+struct wl18xx_fw_packet_counters_8_9_1 {
+ /* Cumulative counter of released packets per AC */
+ u8 tx_released_pkts[NUM_TX_QUEUES];
+
+ /* Cumulative counter of freed packets per HLID */
+ u8 tx_lnk_free_pkts[WL18XX_MAX_LINKS];
+
+ /* PN16 of last TKIP/AES seq-num per HLID */
+ __le16 tx_lnk_sec_pn16[WL18XX_MAX_LINKS];
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
+
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+
+ /* Tx rate or Tx rate estimate pre-calculated by fw in mbps units */
+ u8 tx_last_rate_mbps;
+
+ /* hlid for which the rates were reported */
+ u8 hlid;
+} __packed;
+
+/* FW status registers */
+struct wl18xx_fw_status_8_9_1 {
+ __le32 intr;
+ u8 fw_rx_counter;
+ u8 drv_rx_counter;
+ u8 reserved;
+ u8 tx_results_counter;
+ __le32 rx_pkt_descs[WL18XX_NUM_RX_DESCRIPTORS];
+
+ __le32 fw_localtime;
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /*
+ * A bitmap (where each bit represents a single HLID) to indicate
+ * if the station is in Fast mode
+ */
+ __le32 link_fast_bitmap;
+
+ /* Cumulative counter of total released mem blocks since FW-reset */
+ __le32 total_released_blks;
+
+ /* Size (in Memory Blocks) of TX pool */
+ __le32 tx_total;
+
+ struct wl18xx_fw_packet_counters_8_9_1 counters;
+
+ __le32 log_start_addr;
+
+ /* Private status to be used by the lower drivers */
+ struct wl18xx_fw_status_priv priv;
+} __packed;
+
#define WL18XX_PHY_VERSION_MAX_LEN 20
struct wl18xx_static_data_priv {
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index a939fd89a7f5..cd8ad0fe59cc 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -333,6 +333,14 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
wl->links[link].wlvif = wlvif;
/*
+ * Take the last sec_pn16 value from the current FW status. On recovery,
+ * we might not have fw_status yet, and tx_lnk_sec_pn16[] will be NULL.
+ */
+ if (wl->fw_status->counters.tx_lnk_sec_pn16)
+ wl->links[link].prev_sec_pn16 =
+ le16_to_cpu(wl->fw_status->counters.tx_lnk_sec_pn16[link]);
+
+ /*
* Take saved value for total freed packets from wlvif, in case this is
* recovery/resume
*/
@@ -360,6 +368,7 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
wl->links[*hlid].allocated_pkts = 0;
wl->links[*hlid].prev_freed_pkts = 0;
+ wl->links[*hlid].prev_sec_pn16 = 0;
wl->links[*hlid].ba_bitmap = 0;
eth_zero_addr(wl->links[*hlid].addr);
@@ -1566,13 +1575,6 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
wlvif->band));
- if (!cmd->supported_rates) {
- wl1271_debug(DEBUG_CMD,
- "peer has no supported rates yet, configuring basic rates: 0x%x",
- wlvif->basic_rate_set);
- cmd->supported_rates = cpu_to_le32(wlvif->basic_rate_set);
- }
-
wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
cmd->supported_rates, sta->uapsd_queues);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index f2609d5b6bf7..4c2f2608ef3b 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -208,8 +208,6 @@ enum cmd_templ {
struct wl1271_cmd_header {
__le16 id;
__le16 status;
- /* payload */
- u8 data[];
} __packed;
#define WL1271_CMD_MAX_PARAMS 572
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 2499dc908305..6c3a8ea9613e 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -83,7 +83,7 @@ int wlcore_event_fw_logger(struct wl1271 *wl)
/* Copy initial part up to the end of ring buffer */
len = min(actual_len, available_len);
wl12xx_copy_fwlog(wl, &buffer[start_loc], len);
- clear_ptr = addr_ptr + start_loc + actual_len;
+ clear_ptr = addr_ptr + start_loc + len;
if (clear_ptr == buff_end_ptr)
clear_ptr = buff_start_ptr;
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index ef12169f8044..0c77b8524160 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -379,6 +379,8 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
{
+ struct wl12xx_vif *wlvifsta;
+ struct wl12xx_vif *wlvifap;
struct wl12xx_vif *wlvif;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
@@ -392,7 +394,7 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
if (ret < 0)
return ret;
- wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
+ wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, status);
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
@@ -410,23 +412,100 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
}
+ /* Find an authorized STA vif */
+ wlvifsta = NULL;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ if (wlvif->sta.hlid != WL12XX_INVALID_LINK_ID &&
+ test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) {
+ wlvifsta = wlvif;
+ break;
+ }
+ }
+
+ /* Find a started AP vif */
+ wlvifap = NULL;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
+ wlvif->inconn_count == 0 &&
+ test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ wlvifap = wlvif;
+ break;
+ }
+ }
for_each_set_bit(i, wl->links_map, wl->num_links) {
- u8 diff;
+ u16 diff16, sec_pn16;
+ u8 diff, tx_lnk_free_pkts;
+
lnk = &wl->links[i];
/* prevent wrap-around in freed-packets counter */
- diff = (status->counters.tx_lnk_free_pkts[i] -
- lnk->prev_freed_pkts) & 0xff;
+ tx_lnk_free_pkts = status->counters.tx_lnk_free_pkts[i];
+ diff = (tx_lnk_free_pkts - lnk->prev_freed_pkts) & 0xff;
+
+ if (diff) {
+ lnk->allocated_pkts -= diff;
+ lnk->prev_freed_pkts = tx_lnk_free_pkts;
+ }
+
+ /* Get the current sec_pn16 value if present */
+ if (status->counters.tx_lnk_sec_pn16)
+ sec_pn16 = __le16_to_cpu(status->counters.tx_lnk_sec_pn16[i]);
+ else
+ sec_pn16 = 0;
+ /* prevent wrap-around in pn16 counter */
+ diff16 = (sec_pn16 - lnk->prev_sec_pn16) & 0xffff;
- if (diff == 0)
+ /* FIXME: since free_pkts is a 8-bit counter of packets that
+ * rolls over, it can become zero. If it is zero, then we
+ * omit processing below. Is that really correct?
+ */
+ if (tx_lnk_free_pkts <= 0)
continue;
- lnk->allocated_pkts -= diff;
- lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
+ /* For a station that has an authorized link: */
+ if (wlvifsta && wlvifsta->sta.hlid == i) {
+ if (wlvifsta->encryption_type == KEY_TKIP ||
+ wlvifsta->encryption_type == KEY_AES) {
+ if (diff16) {
+ lnk->prev_sec_pn16 = sec_pn16;
+ /* accumulate the prev_freed_pkts
+ * counter according to the PN from
+ * firmware
+ */
+ lnk->total_freed_pkts += diff16;
+ }
+ } else {
+ if (diff)
+ /* accumulate the prev_freed_pkts
+ * counter according to the free packets
+ * count from firmware
+ */
+ lnk->total_freed_pkts += diff;
+ }
+ }
- /* accumulate the prev_freed_pkts counter */
- lnk->total_freed_pkts += diff;
+ /* For an AP that has been started */
+ if (wlvifap && test_bit(i, wlvifap->ap.sta_hlid_map)) {
+ if (wlvifap->encryption_type == KEY_TKIP ||
+ wlvifap->encryption_type == KEY_AES) {
+ if (diff16) {
+ lnk->prev_sec_pn16 = sec_pn16;
+ /* accumulate the prev_freed_pkts
+ * counter according to the PN from
+ * firmware
+ */
+ lnk->total_freed_pkts += diff16;
+ }
+ } else {
+ if (diff)
+ /* accumulate the prev_freed_pkts
+ * counter according to the free packets
+ * count from firmware
+ */
+ lnk->total_freed_pkts += diff;
+ }
+ }
}
/* prevent wrap-around in total blocks counter */
@@ -2006,7 +2085,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
}
-static void wlcore_op_stop(struct ieee80211_hw *hw)
+static void wlcore_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct wl1271 *wl = hw->priv;
@@ -3537,6 +3616,10 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
return ret;
}
+ /* Store AP encryption key type */
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wlvif->encryption_type = key_type;
+
/*
* reconfiguring arp response if the unicast (or common)
* encryption key type was changed
@@ -5139,19 +5222,23 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
/* Add station (AP mode) */
if (is_ap &&
- old_state == IEEE80211_STA_NOTEXIST &&
- new_state == IEEE80211_STA_NONE) {
+ old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
ret = wl12xx_sta_add(wl, wlvif, sta);
if (ret)
return ret;
+ wl_sta->fw_added = true;
+
wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
}
/* Remove station (AP mode) */
if (is_ap &&
- old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST) {
+ old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH) {
+ wl_sta->fw_added = false;
+
/* must not fail */
wl12xx_sta_remove(wl, wlvif, sta);
@@ -5165,11 +5252,6 @@ static int wl12xx_update_sta_state(struct wl1271 *wl,
if (ret < 0)
return ret;
- /* reconfigure rates */
- ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
- if (ret < 0)
- return ret;
-
ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap,
true,
wl_sta->hlid);
diff --git a/drivers/net/wireless/ti/wlcore/sysfs.c b/drivers/net/wireless/ti/wlcore/sysfs.c
index f0c7e09b314d..c07acfcbbd9c 100644
--- a/drivers/net/wireless/ti/wlcore/sysfs.c
+++ b/drivers/net/wireless/ti/wlcore/sysfs.c
@@ -19,11 +19,8 @@ static ssize_t bt_coex_state_show(struct device *dev,
struct wl1271 *wl = dev_get_drvdata(dev);
ssize_t len;
- len = PAGE_SIZE;
-
mutex_lock(&wl->mutex);
- len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
- wl->sg_enabled);
+ len = sysfs_emit(buf, "%d\n\n0 - off\n1 - on\n", wl->sg_enabled);
mutex_unlock(&wl->mutex);
return len;
@@ -78,13 +75,11 @@ static ssize_t hw_pg_ver_show(struct device *dev,
struct wl1271 *wl = dev_get_drvdata(dev);
ssize_t len;
- len = PAGE_SIZE;
-
mutex_lock(&wl->mutex);
if (wl->hw_pg_ver >= 0)
- len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
+ len = sysfs_emit(buf, "%d\n", wl->hw_pg_ver);
else
- len = snprintf(buf, len, "n/a\n");
+ len = sysfs_emit(buf, "n/a\n");
mutex_unlock(&wl->mutex);
return len;
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 7bd3ce2f0804..464587d16ab2 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -140,11 +140,8 @@ EXPORT_SYMBOL(wl12xx_is_dummy_packet);
static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, struct ieee80211_sta *sta)
{
- if (sta) {
- struct wl1271_station *wl_sta;
-
- wl_sta = (struct wl1271_station *)sta->drv_priv;
- return wl_sta->hlid;
+ if (sta && wl1271_station(sta)->fw_added) {
+ return wl1271_station(sta)->hlid;
} else {
struct ieee80211_hdr *hdr;
diff --git a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
index 1dd7ecc11f86..602915c4da26 100644
--- a/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
+++ b/drivers/net/wireless/ti/wlcore/wl12xx_80211.h
@@ -66,7 +66,6 @@ struct ieee80211_header {
u8 sa[ETH_ALEN];
u8 bssid[ETH_ALEN];
__le16 seq_ctl;
- u8 payload[];
} __packed;
struct wl12xx_ie_header {
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index eefae3f867b9..5bdcb341629c 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -151,6 +151,9 @@ struct wl_fw_status {
*/
u8 *tx_lnk_free_pkts;
+ /* PN16 of last TKIP/AES seq-num per HLID */
+ __le16 *tx_lnk_sec_pn16;
+
/* Cumulative counter of released Voice memory blocks */
u8 tx_voice_released_blks;
@@ -259,6 +262,7 @@ struct wl1271_link {
/* accounting for allocated / freed packets in FW */
u8 allocated_pkts;
u8 prev_freed_pkts;
+ u16 prev_sec_pn16;
u8 addr[ETH_ALEN];
@@ -324,6 +328,7 @@ struct wl12xx_rx_filter {
struct wl1271_station {
u8 hlid;
+ bool fw_added;
bool in_connection;
/*
@@ -335,6 +340,11 @@ struct wl1271_station {
u64 total_freed_pkts;
};
+static inline struct wl1271_station *wl1271_station(struct ieee80211_sta *sta)
+{
+ return (struct wl1271_station *)sta->drv_priv;
+}
+
struct wl12xx_vif {
struct wl1271 *wl;
struct list_head list;
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c
index 59e1fc0018df..d86e6ff4523d 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.c
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.c
@@ -69,6 +69,10 @@ static bool mlo;
module_param(mlo, bool, 0444);
MODULE_PARM_DESC(mlo, "Support MLO");
+static bool multi_radio;
+module_param(multi_radio, bool, 0444);
+MODULE_PARM_DESC(mlo, "Support Multiple Radios per wiphy");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -216,7 +220,7 @@ static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = {
struct hwsim_vif_priv {
u32 magic;
- u32 skip_beacons;
+ u32 skip_beacons[IEEE80211_MLD_MAX_NUM_LINKS];
u8 bssid[ETH_ALEN];
bool assoc;
bool bcn_en;
@@ -669,6 +673,10 @@ struct mac80211_hwsim_data {
struct ieee80211_iface_limit if_limits[3];
int n_if_limits;
+ struct ieee80211_iface_combination if_combination_radio;
+ struct wiphy_radio_freq_range radio_range[NUM_NL80211_BANDS];
+ struct wiphy_radio radio[NUM_NL80211_BANDS];
+
u32 ciphers[ARRAY_SIZE(hwsim_ciphers)];
struct mac_address addresses[2];
@@ -917,6 +925,7 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
[HWSIM_ATTR_PMSR_SUPPORT] = NLA_POLICY_NESTED(hwsim_pmsr_capa_policy),
[HWSIM_ATTR_PMSR_RESULT] = NLA_POLICY_NESTED(hwsim_pmsr_peers_result_policy),
+ [HWSIM_ATTR_MULTI_RADIO] = { .type = NLA_FLAG },
};
#if IS_REACHABLE(CONFIG_VIRTIO)
@@ -1721,6 +1730,9 @@ static void mac80211_hwsim_rx(struct mac80211_hwsim_data *data,
sp->active_links_rx &= ~BIT(link_id);
else
sp->active_links_rx |= BIT(link_id);
+
+ rx_status->link_valid = true;
+ rx_status->link_id = link_id;
}
rcu_read_unlock();
}
@@ -2095,7 +2107,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
}
-static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
+static void mac80211_hwsim_stop(struct ieee80211_hw *hw, bool suspend)
{
struct mac80211_hwsim_data *data = hw->priv;
int i;
@@ -2133,13 +2145,16 @@ static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
}
#ifdef CONFIG_MAC80211_DEBUGFS
-static void mac80211_hwsim_vif_add_debugfs(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static void
+mac80211_hwsim_link_add_debugfs(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *link_conf,
+ struct dentry *dir)
{
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
- debugfs_create_u32("skip_beacons", 0600, vif->debugfs_dir,
- &vp->skip_beacons);
+ debugfs_create_u32("skip_beacons", 0600, dir,
+ &vp->skip_beacons[link_conf->link_id]);
}
#endif
@@ -2214,8 +2229,8 @@ static void __mac80211_hwsim_beacon_tx(struct ieee80211_bss_conf *link_conf,
/* TODO: get MCS */
int bitrate = 100;
- if (vp->skip_beacons) {
- vp->skip_beacons--;
+ if (vp->skip_beacons[link_conf->link_id]) {
+ vp->skip_beacons[link_conf->link_id]--;
dev_kfree_skb(skb);
return;
}
@@ -2307,6 +2322,10 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif, link_id))
ieee80211_csa_finish(vif, link_id);
+
+ if (link_conf->color_change_active &&
+ ieee80211_beacon_cntdwn_is_complete(vif, link_id))
+ ieee80211_color_change_finish(vif, link_id);
}
static enum hrtimer_restart
@@ -2351,6 +2370,7 @@ static const char * const hwsim_chanwidths[] = {
[NL80211_CHAN_WIDTH_4] = "4MHz",
[NL80211_CHAN_WIDTH_8] = "8MHz",
[NL80211_CHAN_WIDTH_16] = "16MHz",
+ [NL80211_CHAN_WIDTH_320] = "eht320",
};
static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
@@ -3250,7 +3270,7 @@ static int mac80211_hwsim_switch_vif_chanctx(struct ieee80211_hw *hw,
hwsim_clear_chanctx_magic(vifs[i].old_ctx);
break;
default:
- WARN_ON("Invalid mode");
+ WARN(1, "Invalid mode %d\n", mode);
}
}
return 0;
@@ -3922,7 +3942,7 @@ out:
#ifdef CONFIG_MAC80211_DEBUGFS
#define HWSIM_DEBUGFS_OPS \
- .vif_add_debugfs = mac80211_hwsim_vif_add_debugfs,
+ .link_add_debugfs = mac80211_hwsim_link_add_debugfs,
#else
#define HWSIM_DEBUGFS_OPS
#endif
@@ -4007,6 +4027,7 @@ struct hwsim_new_radio_params {
bool reg_strict;
bool p2p_device;
bool use_chanctx;
+ bool multi_radio;
bool destroy_on_close;
const char *hwname;
bool no_vif;
@@ -4083,6 +4104,12 @@ static int append_radio_msg(struct sk_buff *skb, int id,
return ret;
}
+ if (param->multi_radio) {
+ ret = nla_put_flag(skb, HWSIM_ATTR_MULTI_RADIO);
+ if (ret < 0)
+ return ret;
+ }
+
if (param->hwname) {
ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME,
strlen(param->hwname), param->hwname);
@@ -4122,7 +4149,8 @@ out_err:
static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
{
- .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .types_mask = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -4229,7 +4257,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
},
},
{
- .types_mask = BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -4380,8 +4409,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = {
static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
{
- /* TODO: should we support other types, e.g., P2P? */
- .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .types_mask = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -4505,7 +4534,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
},
},
{
- .types_mask = BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.he_cap = {
.has_he = true,
.he_cap_elem = {
@@ -4676,8 +4706,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = {
static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
{
- /* TODO: should we support other types, e.g., P2P? */
- .types_mask = BIT(NL80211_IFTYPE_STATION),
+ .types_mask = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
.he_6ghz_capa = {
.capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START |
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP |
@@ -4822,7 +4852,8 @@ static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = {
},
},
{
- .types_mask = BIT(NL80211_IFTYPE_AP),
+ .types_mask = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO),
.he_6ghz_capa = {
.capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START |
IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP |
@@ -5099,6 +5130,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
struct net *net;
int idx, i;
int n_limits = 0;
+ int n_bands = 0;
if (WARN_ON(param->channels > 1 && !param->use_chanctx))
return -EINVAL;
@@ -5202,22 +5234,22 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
n_limits++;
}
+ data->if_combination.radar_detect_widths =
+ BIT(NL80211_CHAN_WIDTH_5) |
+ BIT(NL80211_CHAN_WIDTH_10) |
+ BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160);
+
if (data->use_chanctx) {
hw->wiphy->max_scan_ssids = 255;
hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
hw->wiphy->max_remain_on_channel_duration = 1000;
- data->if_combination.radar_detect_widths = 0;
data->if_combination.num_different_channels = data->channels;
} else {
data->if_combination.num_different_channels = 1;
- data->if_combination.radar_detect_widths =
- BIT(NL80211_CHAN_WIDTH_5) |
- BIT(NL80211_CHAN_WIDTH_10) |
- BIT(NL80211_CHAN_WIDTH_20_NOHT) |
- BIT(NL80211_CHAN_WIDTH_20) |
- BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80) |
- BIT(NL80211_CHAN_WIDTH_160);
}
if (!n_limits) {
@@ -5313,6 +5345,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
wiphy_ext_feature_set(hw->wiphy,
NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT);
+ wiphy_ext_feature_set(hw->wiphy,
+ NL80211_EXT_FEATURE_BSS_COLOR);
hw->wiphy->interface_modes = param->iftypes;
@@ -5333,6 +5367,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) {
struct ieee80211_supported_band *sband = &data->bands[band];
+ struct wiphy_radio_freq_range *radio_range;
+ const struct ieee80211_channel *c;
+ struct wiphy_radio *radio;
sband->band = band;
@@ -5406,8 +5443,36 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
mac80211_hwsim_sband_capab(sband);
hw->wiphy->bands[band] = sband;
+
+ if (!param->multi_radio)
+ continue;
+
+ c = sband->channels;
+ radio_range = &data->radio_range[n_bands];
+ radio_range->start_freq = ieee80211_channel_to_khz(c) - 10000;
+
+ c += sband->n_channels - 1;
+ radio_range->end_freq = ieee80211_channel_to_khz(c) + 10000;
+
+ radio = &data->radio[n_bands++];
+ radio->freq_range = radio_range;
+ radio->n_freq_range = 1;
+ radio->iface_combinations = &data->if_combination_radio;
+ radio->n_iface_combinations = 1;
+ }
+
+ if (param->multi_radio) {
+ hw->wiphy->radio = data->radio;
+ hw->wiphy->n_radio = n_bands;
+
+ memcpy(&data->if_combination_radio, &data->if_combination,
+ sizeof(data->if_combination));
+ data->if_combination.num_different_channels *= n_bands;
}
+ if (data->use_chanctx)
+ data->if_combination.radar_detect_widths = 0;
+
/* By default all radios belong to the first group */
data->group = 1;
mutex_init(&data->mutex);
@@ -6025,6 +6090,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
else
param.use_chanctx = (param.channels > 1);
+ if (info->attrs[HWSIM_ATTR_MULTI_RADIO])
+ param.multi_radio = true;
+
if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2])
param.reg_alpha2 =
nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]);
@@ -6105,7 +6173,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
param.mlo = info->attrs[HWSIM_ATTR_MLO_SUPPORT];
- if (param.mlo)
+ if (param.mlo || param.multi_radio)
param.use_chanctx = true;
if (info->attrs[HWSIM_ATTR_RADIO_NAME]) {
@@ -6558,17 +6626,13 @@ static void hwsim_virtio_rx_done(struct virtqueue *vq)
static int init_vqs(struct virtio_device *vdev)
{
- vq_callback_t *callbacks[HWSIM_NUM_VQS] = {
- [HWSIM_VQ_TX] = hwsim_virtio_tx_done,
- [HWSIM_VQ_RX] = hwsim_virtio_rx_done,
- };
- const char *names[HWSIM_NUM_VQS] = {
- [HWSIM_VQ_TX] = "tx",
- [HWSIM_VQ_RX] = "rx",
+ struct virtqueue_info vqs_info[HWSIM_NUM_VQS] = {
+ [HWSIM_VQ_TX] = { "tx", hwsim_virtio_tx_done },
+ [HWSIM_VQ_RX] = { "rx", hwsim_virtio_rx_done },
};
return virtio_find_vqs(vdev, HWSIM_NUM_VQS,
- hwsim_vqs, callbacks, names, NULL);
+ hwsim_vqs, vqs_info, NULL);
}
static int fill_vq(struct virtqueue *vq)
@@ -6662,7 +6726,6 @@ MODULE_DEVICE_TABLE(virtio, id_table);
static struct virtio_driver virtio_hwsim = {
.driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
.id_table = id_table,
.probe = hwsim_virtio_probe,
.remove = hwsim_virtio_remove,
@@ -6751,11 +6814,11 @@ static int __init init_mac80211_hwsim(void)
param.regd = &hwsim_world_regdom_custom_01;
break;
case HWSIM_REGTEST_CUSTOM_WORLD:
- param.regd = &hwsim_world_regdom_custom_01;
+ param.regd = &hwsim_world_regdom_custom_03;
break;
case HWSIM_REGTEST_CUSTOM_WORLD_2:
if (i == 0)
- param.regd = &hwsim_world_regdom_custom_01;
+ param.regd = &hwsim_world_regdom_custom_03;
else if (i == 1)
param.regd = &hwsim_world_regdom_custom_02;
break;
@@ -6799,7 +6862,8 @@ static int __init init_mac80211_hwsim(void)
param.p2p_device = support_p2p_device;
param.mlo = mlo;
- param.use_chanctx = channels > 1 || mlo;
+ param.multi_radio = multi_radio;
+ param.use_chanctx = channels > 1 || mlo || multi_radio;
param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK;
if (param.p2p_device)
param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.h b/drivers/net/wireless/virtual/mac80211_hwsim.h
index 21b1afd83dc1..f32fc3a492b0 100644
--- a/drivers/net/wireless/virtual/mac80211_hwsim.h
+++ b/drivers/net/wireless/virtual/mac80211_hwsim.h
@@ -157,6 +157,9 @@ enum hwsim_commands {
* to provide details about peer measurement request (nl80211_peer_measurement_attrs)
* @HWSIM_ATTR_PMSR_RESULT: nested attributed used with %HWSIM_CMD_REPORT_PMSR
* to provide peer measurement result (nl80211_peer_measurement_attrs)
+ * @HWSIM_ATTR_MULTI_RADIO: Register multiple wiphy radios (flag).
+ * Adds one radio for each band. Number of supported channels will be set for
+ * each radio instead of for the wiphy.
* @__HWSIM_ATTR_MAX: enum limit
*/
enum hwsim_attrs {
@@ -189,6 +192,7 @@ enum hwsim_attrs {
HWSIM_ATTR_PMSR_SUPPORT,
HWSIM_ATTR_PMSR_REQUEST,
HWSIM_ATTR_PMSR_RESULT,
+ HWSIM_ATTR_MULTI_RADIO,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
@@ -257,7 +261,7 @@ enum hwsim_tx_rate_flags {
};
/**
- * struct hwsim_tx_rate - rate selection/status
+ * struct hwsim_tx_rate_flag - rate selection/status
*
* @idx: rate index to attempt to send with
* @flags: the rate flags according to &enum hwsim_tx_rate_flags
@@ -295,7 +299,7 @@ enum hwsim_vqs {
};
/**
- * enum hwsim_rate_info -- bitrate information.
+ * enum hwsim_rate_info_attributes - bitrate information.
*
* Information about a receiving or transmitting bitrate
* that can be mapped to struct rate_info
diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c
index 6a84ec58d618..4ee374080466 100644
--- a/drivers/net/wireless/virtual/virt_wifi.c
+++ b/drivers/net/wireless/virtual/virt_wifi.c
@@ -136,6 +136,9 @@ static struct ieee80211_supported_band band_5ghz = {
/* Assigned at module init. Guaranteed locally-administered and unicast. */
static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {};
+#define VIRT_WIFI_SSID "VirtWifi"
+#define VIRT_WIFI_SSID_LEN 8
+
static void virt_wifi_inform_bss(struct wiphy *wiphy)
{
u64 tsf = div_u64(ktime_get_boottime_ns(), 1000);
@@ -146,8 +149,8 @@ static void virt_wifi_inform_bss(struct wiphy *wiphy)
u8 ssid[8];
} __packed ssid = {
.tag = WLAN_EID_SSID,
- .len = 8,
- .ssid = "VirtWifi",
+ .len = VIRT_WIFI_SSID_LEN,
+ .ssid = VIRT_WIFI_SSID,
};
informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz,
@@ -213,6 +216,8 @@ struct virt_wifi_netdev_priv {
struct net_device *upperdev;
u32 tx_packets;
u32 tx_failed;
+ u32 connect_requested_ssid_len;
+ u8 connect_requested_ssid[IEEE80211_MAX_SSID_LEN];
u8 connect_requested_bss[ETH_ALEN];
bool is_up;
bool is_connected;
@@ -229,6 +234,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev,
if (priv->being_deleted || !priv->is_up)
return -EBUSY;
+ if (!sme->ssid)
+ return -EINVAL;
+
+ priv->connect_requested_ssid_len = sme->ssid_len;
+ memcpy(priv->connect_requested_ssid, sme->ssid, sme->ssid_len);
+
could_schedule = schedule_delayed_work(&priv->connect, HZ * 2);
if (!could_schedule)
return -EBUSY;
@@ -252,12 +263,15 @@ static void virt_wifi_connect_complete(struct work_struct *work)
container_of(work, struct virt_wifi_netdev_priv, connect.work);
u8 *requested_bss = priv->connect_requested_bss;
bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid);
+ bool right_ssid = priv->connect_requested_ssid_len == VIRT_WIFI_SSID_LEN &&
+ !memcmp(priv->connect_requested_ssid, VIRT_WIFI_SSID,
+ priv->connect_requested_ssid_len);
u16 status = WLAN_STATUS_SUCCESS;
if (is_zero_ether_addr(requested_bss))
requested_bss = NULL;
- if (!priv->is_up || (requested_bss && !right_addr))
+ if (!priv->is_up || (requested_bss && !right_addr) || !right_ssid)
status = WLAN_STATUS_UNSPECIFIED_FAILURE;
else
priv->is_connected = true;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
index 900c063bd724..f90c33d19b39 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c
@@ -326,7 +326,7 @@ out:
return r;
}
-void zd_op_stop(struct ieee80211_hw *hw)
+void zd_op_stop(struct ieee80211_hw *hw, bool suspend)
{
struct zd_mac *mac = zd_hw_mac(hw);
struct zd_chip *chip = &mac->chip;
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.h b/drivers/net/wireless/zydas/zd1211rw/zd_mac.h
index 5ff84bdc5a4c..053748a474ec 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.h
@@ -303,7 +303,7 @@ void zd_mac_tx_failed(struct urb *urb);
void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
int zd_op_start(struct ieee80211_hw *hw);
-void zd_op_stop(struct ieee80211_hw *hw);
+void zd_op_stop(struct ieee80211_hw *hw, bool suspend);
int zd_restore_settings(struct zd_mac *mac);
#ifdef DEBUG
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index f3b567a13ded..a8a94edf2a70 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -1476,7 +1476,7 @@ static void zd_usb_stop(struct zd_usb *usb)
{
dev_dbg_f(zd_usb_dev(usb), "\n");
- zd_op_stop(zd_usb_to_hw(usb));
+ zd_op_stop(zd_usb_to_hw(usb), false);
zd_usb_disable_tx(usb);
zd_usb_disable_rx(usb);
@@ -1698,7 +1698,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
int r, i, req_len, actual_req_len, try_count = 0;
struct usb_device *udev;
struct usb_req_read_regs *req = NULL;
- unsigned long timeout;
+ unsigned long time_left;
bool retry = false;
if (count < 1) {
@@ -1748,9 +1748,9 @@ retry_read:
goto error;
}
- timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion,
- msecs_to_jiffies(50));
- if (!timeout) {
+ time_left = wait_for_completion_timeout(&usb->intr.read_regs.completion,
+ msecs_to_jiffies(50));
+ if (!time_left) {
disable_read_regs_int(usb);
dev_dbg_f(zd_usb_dev(usb), "read timed out\n");
r = -ETIMEDOUT;
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
index 2fe724d623c0..33d6342124bc 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
@@ -33,7 +33,8 @@ static int ipc_devlink_get_param(struct devlink *dl, u32 id,
/* Set the param values for the specific param ID's */
static int ipc_devlink_set_param(struct devlink *dl, u32 id,
- struct devlink_param_gset_ctx *ctx)
+ struct devlink_param_gset_ctx *ctx,
+ struct netlink_ext_ack *extack)
{
struct iosm_devlink *ipc_devlink = devlink_priv(dl);
@@ -210,7 +211,7 @@ static int ipc_devlink_create_region(struct iosm_devlink *devlink)
rc = PTR_ERR(devlink->cd_regions[i]);
dev_err(devlink->dev, "Devlink region fail,err %d", rc);
/* Delete previously created regions */
- for ( ; i >= 0; i--)
+ for (i--; i >= 0; i--)
devlink_region_destroy(devlink->cd_regions[i]);
goto region_create_fail;
}
diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c
index 3f72ae943b29..d5a9360323d2 100644
--- a/drivers/net/wwan/mhi_wwan_mbim.c
+++ b/drivers/net/wwan/mhi_wwan_mbim.c
@@ -42,6 +42,8 @@
#define MHI_MBIM_LINK_HASH_SIZE 8
#define LINK_HASH(session) ((session) % MHI_MBIM_LINK_HASH_SIZE)
+#define WDS_BIND_MUX_DATA_PORT_MUX_ID 112
+
struct mhi_mbim_link {
struct mhi_mbim_context *mbim;
struct net_device *ndev;
@@ -93,6 +95,15 @@ static struct mhi_mbim_link *mhi_mbim_get_link_rcu(struct mhi_mbim_context *mbim
return NULL;
}
+static int mhi_mbim_get_link_mux_id(struct mhi_controller *cntrl)
+{
+ if (strcmp(cntrl->name, "foxconn-dw5934e") == 0 ||
+ strcmp(cntrl->name, "foxconn-t99w515") == 0)
+ return WDS_BIND_MUX_DATA_PORT_MUX_ID;
+
+ return 0;
+}
+
static struct sk_buff *mbim_tx_fixup(struct sk_buff *skb, unsigned int session,
u16 tx_seq)
{
@@ -596,7 +607,7 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
{
struct mhi_controller *cntrl = mhi_dev->mhi_cntrl;
struct mhi_mbim_context *mbim;
- int err;
+ int err, link_id;
mbim = devm_kzalloc(&mhi_dev->dev, sizeof(*mbim), GFP_KERNEL);
if (!mbim)
@@ -617,8 +628,11 @@ static int mhi_mbim_probe(struct mhi_device *mhi_dev, const struct mhi_device_id
/* Number of transfer descriptors determines size of the queue */
mbim->rx_queue_sz = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE);
+ /* Get the corresponding mux_id from mhi */
+ link_id = mhi_mbim_get_link_mux_id(cntrl);
+
/* Register wwan link ops with MHI controller representing WWAN instance */
- return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, 0);
+ return wwan_register_ops(&cntrl->mhi_dev->dev, &mhi_mbim_wwan_ops, mbim, link_id);
}
static void mhi_mbim_remove(struct mhi_device *mhi_dev)
@@ -648,7 +662,6 @@ static struct mhi_driver mhi_mbim_driver = {
.id_table = mhi_mbim_id_table,
.driver = {
.name = "mhi_wwan_mbim",
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.c b/drivers/net/wwan/t7xx/t7xx_netdev.c
index 3ef4a8a4f8fd..91fa082e9cab 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.c
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.c
@@ -253,22 +253,27 @@ static void t7xx_ccmni_wwan_setup(struct net_device *dev)
dev->netdev_ops = &ccmni_netdev_ops;
}
-static void t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
+static int t7xx_init_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
{
int i;
/* one HW, but shared with multiple net devices,
* so add a dummy device for NAPI.
*/
- init_dummy_netdev(&ctlb->dummy_dev);
+ ctlb->dummy_dev = alloc_netdev_dummy(0);
+ if (!ctlb->dummy_dev)
+ return -ENOMEM;
+
atomic_set(&ctlb->napi_usr_refcnt, 0);
ctlb->is_napi_en = false;
for (i = 0; i < RXQ_NUM; i++) {
ctlb->napi[i] = &ctlb->hif_ctrl->rxq[i].napi;
- netif_napi_add_weight(&ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll,
+ netif_napi_add_weight(ctlb->dummy_dev, ctlb->napi[i], t7xx_dpmaif_napi_rx_poll,
NIC_NAPI_POLL_BUDGET);
}
+
+ return 0;
}
static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
@@ -279,6 +284,7 @@ static void t7xx_uninit_netdev_napi(struct t7xx_ccmni_ctrl *ctlb)
netif_napi_del(ctlb->napi[i]);
ctlb->napi[i] = NULL;
}
+ free_netdev(ctlb->dummy_dev);
}
static int t7xx_ccmni_wwan_newlink(void *ctxt, struct net_device *dev, u32 if_id,
@@ -480,6 +486,7 @@ int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
{
struct device *dev = &t7xx_dev->pdev->dev;
struct t7xx_ccmni_ctrl *ctlb;
+ int ret;
ctlb = devm_kzalloc(dev, sizeof(*ctlb), GFP_KERNEL);
if (!ctlb)
@@ -495,7 +502,12 @@ int t7xx_ccmni_init(struct t7xx_pci_dev *t7xx_dev)
if (!ctlb->hif_ctrl)
return -ENOMEM;
- t7xx_init_netdev_napi(ctlb);
+ ret = t7xx_init_netdev_napi(ctlb);
+ if (ret) {
+ t7xx_dpmaif_hif_exit(ctlb->hif_ctrl);
+ return ret;
+ }
+
init_md_status_notifier(t7xx_dev);
return 0;
}
diff --git a/drivers/net/wwan/t7xx/t7xx_netdev.h b/drivers/net/wwan/t7xx/t7xx_netdev.h
index f5ed6f99a145..b18312f49844 100644
--- a/drivers/net/wwan/t7xx/t7xx_netdev.h
+++ b/drivers/net/wwan/t7xx/t7xx_netdev.h
@@ -48,7 +48,7 @@ struct t7xx_ccmni_ctrl {
unsigned int md_sta;
struct t7xx_fsm_notifier md_status_notify;
bool wwan_is_registered;
- struct net_device dummy_dev;
+ struct net_device *dummy_dev;
struct napi_struct *napi[RXQ_NUM];
atomic_t napi_usr_refcnt;
bool is_napi_en;
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index e0b1e7a616ca..10a8c1080b10 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -852,6 +852,7 @@ static void t7xx_pci_remove(struct pci_dev *pdev)
static const struct pci_device_id t7xx_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x4d75) },
+ { PCI_DEVICE(0x14c0, 0x4d75) }, // Dell DW5933e
{ }
};
MODULE_DEVICE_TABLE(pci, t7xx_pci_table);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 1fcbd83f7ff2..17421da139f2 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -390,9 +390,8 @@ bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
void xenvif_carrier_on(struct xenvif *vif);
-/* Callback from stack when TX packet can be released */
-void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf,
- bool zerocopy_success);
+/* Callbacks from stack when TX packet can be released */
+extern const struct ubuf_info_ops xenvif_ubuf_ops;
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
{
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 7cff90aa8d24..325fcb3d1075 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -358,7 +358,7 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu)
if (mtu > max)
return -EINVAL;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
}
@@ -593,7 +593,7 @@ int xenvif_init_queue(struct xenvif_queue *queue)
for (i = 0; i < MAX_PENDING_REQS; i++) {
queue->pending_tx_info[i].callback_struct = (struct ubuf_info_msgzc)
- { { .callback = xenvif_zerocopy_callback },
+ { { .ops = &xenvif_ubuf_ops },
{ { .ctx = NULL,
.desc = i } } };
queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index ef76850d9bcd..5836995d6774 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -38,6 +38,7 @@
#include <linux/if_vlan.h>
#include <linux/udp.h>
#include <linux/highmem.h>
+#include <linux/skbuff_ref.h>
#include <net/tcp.h>
@@ -1156,7 +1157,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s
uarg = skb_shinfo(skb)->destructor_arg;
/* increase inflight counter to offset decrement in callback */
atomic_inc(&queue->inflight_packets);
- uarg->callback(NULL, uarg, true);
+ uarg->ops->complete(NULL, uarg, true);
skb_shinfo(skb)->destructor_arg = NULL;
/* Fill the skb with the new (local) frags. */
@@ -1278,8 +1279,9 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
return work_done;
}
-void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
- bool zerocopy_success)
+static void xenvif_zerocopy_callback(struct sk_buff *skb,
+ struct ubuf_info *ubuf_base,
+ bool zerocopy_success)
{
unsigned long flags;
pending_ring_idx_t index;
@@ -1312,6 +1314,10 @@ void xenvif_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *ubuf_base,
xenvif_skb_zerocopy_complete(queue);
}
+const struct ubuf_info_ops xenvif_ubuf_ops = {
+ .complete = xenvif_zerocopy_callback,
+};
+
static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
{
struct gnttab_unmap_grant_ref *gop;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 8d2aee88526c..4265c1cd0ff7 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1376,7 +1376,7 @@ static int xennet_change_mtu(struct net_device *dev, int mtu)
if (mtu > max)
return -EINVAL;
- dev->mtu = mtu;
+ WRITE_ONCE(dev->mtu, mtu);
return 0;
}