summaryrefslogtreecommitdiffstats
path: root/target
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2023-11-17 17:59:29 +0100
committerFelix Fietkau <nbd@nbd.name>2023-12-08 14:29:54 +0100
commit6407ef8d2bcb4a0a6284de09cd77bd1868c1d6ea (patch)
tree3ac06a7a371a68205e76b0bcf62e09ef0608dea6 /target
parent0c1cffd00ecbf3dde3c7f6437920bd41e6e268e5 (diff)
downloadopenwrt-6407ef8d2bcb4a0a6284de09cd77bd1868c1d6ea.tar.gz
openwrt-6407ef8d2bcb4a0a6284de09cd77bd1868c1d6ea.tar.bz2
openwrt-6407ef8d2bcb4a0a6284de09cd77bd1868c1d6ea.zip
kernel: backport upstream mediatek WED changes
Reorder and update existing patches Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'target')
-rw-r--r--target/linux/generic/backport-5.15/722-v6.0-net-ethernet-mtk_eth_wed-add-axi-bus-support.patch13
-rw-r--r--target/linux/generic/backport-5.15/728-v6.1-02-net-ethernet-mtk_eth_wed-add-missing-put_device-in-m.patch4
-rw-r--r--target/linux/generic/backport-5.15/728-v6.1-03-net-ethernet-mtk_eth_wed-add-missing-of_node_put.patch6
-rw-r--r--target/linux/generic/backport-5.15/729-02-v6.1-net-ethernet-mtk_wed-introduce-wed-wo-support.patch2
-rw-r--r--target/linux/generic/backport-5.15/729-03-v6.1-net-ethernet-mtk_wed-rename-tx_wdma-array-in-rx_wdma.patch6
-rw-r--r--target/linux/generic/backport-5.15/729-04-v6.1-net-ethernet-mtk_wed-add-configure-wed-wo-support.patch48
-rw-r--r--target/linux/generic/backport-5.15/729-09-v6.2-net-ethernet-mtk_wed-add-wcid-overwritten-support-fo.patch4
-rw-r--r--target/linux/generic/backport-5.15/729-10-v6.2-net-ethernet-mtk_wed-return-status-value-in-mtk_wdma.patch2
-rw-r--r--target/linux/generic/backport-5.15/729-12-v6.2-net-ethernet-mtk_wed-update-mtk_wed_stop.patch2
-rw-r--r--target/linux/generic/backport-5.15/729-13-v6.2-net-ethernet-mtk_wed-add-mtk_wed_rx_reset-routine.patch14
-rw-r--r--target/linux/generic/backport-5.15/729-14-v6.2-net-ethernet-mtk_wed-add-reset-to-tx_ring_setup-call.patch10
-rw-r--r--target/linux/generic/backport-5.15/729-23-v6.3-net-ethernet-mtk_wed-add-reset-to-rx_ring_setup-call.patch8
-rw-r--r--target/linux/generic/backport-5.15/730-15-v6.3-net-ethernet-mtk_wed-No-need-to-clear-memory-after-a.patch2
-rw-r--r--target/linux/generic/backport-5.15/730-17-v6.3-net-ethernet-mtk_wed-fix-possible-deadlock-if-mtk_we.patch2
-rw-r--r--target/linux/generic/backport-5.15/750-v6.5-05-net-ethernet-mtk_eth_soc-add-version-in-mtk_soc_data.patch4
-rw-r--r--target/linux/generic/backport-5.15/751-01-v6.4-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch (renamed from target/linux/generic/pending-6.1/736-01-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch)2
-rw-r--r--target/linux/generic/backport-5.15/751-02-v6.4-net-ethernet-mediatek-mtk_ppe-prefer-newly-added-l2-.patch (renamed from target/linux/generic/pending-5.15/736-02-net-ethernet-mediatek-mtk_ppe-prefer-newly-added-l2-.patch)2
-rw-r--r--target/linux/generic/backport-5.15/751-03-v6.4-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch (renamed from target/linux/generic/pending-6.1/736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch)18
-rw-r--r--target/linux/generic/backport-5.15/751-04-v6.4-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch (renamed from target/linux/generic/pending-5.15/736-04-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch)22
-rw-r--r--target/linux/generic/backport-5.15/752-01-v6.6-net-ethernet-mtk_wed-add-some-more-info-in-wed_txinf.patch45
-rw-r--r--target/linux/generic/backport-5.15/752-02-v6.6-net-ethernet-mtk_wed-minor-change-in-wed_-tx-rx-info.patch47
-rw-r--r--target/linux/generic/backport-5.15/752-03-v6.6-net-ethernet-mtk_eth_soc-rely-on-mtk_pse_port-defini.patch29
-rw-r--r--target/linux/generic/backport-5.15/752-04-v6.6-net-ethernet-mtk_wed-check-update_wo_rx_stats-in-mtk.patch26
-rw-r--r--target/linux/generic/backport-5.15/752-05-v6.7-net-ethernet-mtk_wed-do-not-assume-offload-callbacks.patch68
-rw-r--r--target/linux/generic/backport-5.15/752-06-v6.7-net-ethernet-mtk_wed-introduce-versioning-utility-ro.patch232
-rw-r--r--target/linux/generic/backport-5.15/752-07-v6.7-net-ethernet-mtk_wed-do-not-configure-rx-offload-if-.patch234
-rw-r--r--target/linux/generic/backport-5.15/752-08-v6.7-net-ethernet-mtk_wed-rename-mtk_rxbm_desc-in-mtk_wed.patch52
-rw-r--r--target/linux/generic/backport-5.15/752-09-v6.7-net-ethernet-mtk_wed-introduce-mtk_wed_buf-structure.patch87
-rw-r--r--target/linux/generic/backport-5.15/752-10-v6.7-net-ethernet-mtk_wed-move-mem_region-array-out-of-mt.patch88
-rw-r--r--target/linux/generic/backport-5.15/752-11-v6.7-net-ethernet-mtk_wed-make-memory-region-optional.patch71
-rw-r--r--target/linux/generic/backport-5.15/752-12-v6.7-net-ethernet-mtk_wed-fix-EXT_INT_STATUS_RX_FBUF-defi.patch27
-rw-r--r--target/linux/generic/backport-5.15/752-13-v6.7-net-ethernet-mtk_wed-add-mtk_wed_soc_data-structure.patch217
-rw-r--r--target/linux/generic/backport-5.15/752-14-v6.7-net-ethernet-mtk_wed-introduce-WED-support-for-MT798.patch1280
-rw-r--r--target/linux/generic/backport-5.15/752-15-v6.7-net-ethernet-mtk_wed-refactor-mtk_wed_check_wfdma_rx.patch95
-rw-r--r--target/linux/generic/backport-5.15/752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch465
-rw-r--r--target/linux/generic/backport-5.15/752-17-v6.7-net-ethernet-mtk_wed-introduce-hw_rro-support-for-MT.patch483
-rw-r--r--target/linux/generic/backport-5.15/752-18-v6.7-net-ethernet-mtk_wed-debugfs-move-wed_v2-specific-re.patch78
-rw-r--r--target/linux/generic/backport-5.15/752-19-v6.7-net-ethernet-mtk_wed-debugfs-add-WED-3.0-debugfs-ent.patch432
-rw-r--r--target/linux/generic/backport-5.15/752-20-v6.7-net-ethernet-mtk_wed-add-wed-3.0-reset-support.patch587
-rw-r--r--target/linux/generic/backport-5.15/764-01-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch (renamed from target/linux/generic/backport-5.15/751-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-02-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch (renamed from target/linux/generic/backport-5.15/752-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-03-v5.17-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch (renamed from target/linux/generic/backport-5.15/753-v5.17-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-04-v5.17-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch (renamed from target/linux/generic/backport-5.15/754-v5.17-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-05-v5.17-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch (renamed from target/linux/generic/backport-5.15/755-v5.17-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-06-v5.17-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch (renamed from target/linux/generic/backport-5.15/756-v5.17-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-07-v5.17-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch (renamed from target/linux/generic/backport-5.15/757-v5.17-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-08-v5.17-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch (renamed from target/linux/generic/backport-5.15/758-v5.17-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-09-v5.17-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch (renamed from target/linux/generic/backport-5.15/759-v5.17-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-10-v5.17-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch (renamed from target/linux/generic/backport-5.15/760-v5.17-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-11-v5.17-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch (renamed from target/linux/generic/backport-5.15/761-v5.17-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-12-v5.17-net-next-net-dsa-qca8k-add-support-for-mirror-mode.patch (renamed from target/linux/generic/backport-5.15/762-v5.17-net-next-net-dsa-qca8k-add-support-for-mirror-mode.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-13-v5.17-net-next-net-dsa-qca8k-add-LAG-support.patch (renamed from target/linux/generic/backport-5.15/763-v5.17-net-next-net-dsa-qca8k-add-LAG-support.patch)0
-rw-r--r--target/linux/generic/backport-5.15/764-14-v5.17-net-next-net-dsa-qca8k-fix-warning-in-LAG-feature.patch (renamed from target/linux/generic/backport-5.15/764-v5.17-net-next-net-dsa-qca8k-fix-warning-in-LAG-feature.patch)0
-rw-r--r--target/linux/generic/backport-5.15/794-v6.2-net-core-Allow-live-renaming-when-an-interface-is-up.patch6
-rw-r--r--target/linux/generic/backport-6.1/751-01-v6.4-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch (renamed from target/linux/generic/pending-5.15/736-01-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch)11
-rw-r--r--target/linux/generic/backport-6.1/751-02-v6.4-net-ethernet-mediatek-mtk_ppe-prefer-newly-added-l2-.patch (renamed from target/linux/generic/pending-6.1/736-02-net-ethernet-mediatek-mtk_ppe-prefer-newly-added-l2-.patch)0
-rw-r--r--target/linux/generic/backport-6.1/751-03-v6.4-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch (renamed from target/linux/generic/pending-5.15/736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch)21
-rw-r--r--target/linux/generic/backport-6.1/751-04-v6.4-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch (renamed from target/linux/generic/pending-6.1/736-04-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch)22
-rw-r--r--target/linux/generic/backport-6.1/752-01-v6.6-net-ethernet-mtk_wed-add-some-more-info-in-wed_txinf.patch45
-rw-r--r--target/linux/generic/backport-6.1/752-02-v6.6-net-ethernet-mtk_wed-minor-change-in-wed_-tx-rx-info.patch47
-rw-r--r--target/linux/generic/backport-6.1/752-03-v6.6-net-ethernet-mtk_eth_soc-rely-on-mtk_pse_port-defini.patch29
-rw-r--r--target/linux/generic/backport-6.1/752-04-v6.6-net-ethernet-mtk_wed-check-update_wo_rx_stats-in-mtk.patch26
-rw-r--r--target/linux/generic/backport-6.1/752-05-v6.7-net-ethernet-mtk_wed-do-not-assume-offload-callbacks.patch68
-rw-r--r--target/linux/generic/backport-6.1/752-06-v6.7-net-ethernet-mtk_wed-introduce-versioning-utility-ro.patch232
-rw-r--r--target/linux/generic/backport-6.1/752-07-v6.7-net-ethernet-mtk_wed-do-not-configure-rx-offload-if-.patch234
-rw-r--r--target/linux/generic/backport-6.1/752-08-v6.7-net-ethernet-mtk_wed-rename-mtk_rxbm_desc-in-mtk_wed.patch52
-rw-r--r--target/linux/generic/backport-6.1/752-09-v6.7-net-ethernet-mtk_wed-introduce-mtk_wed_buf-structure.patch87
-rw-r--r--target/linux/generic/backport-6.1/752-10-v6.7-net-ethernet-mtk_wed-move-mem_region-array-out-of-mt.patch88
-rw-r--r--target/linux/generic/backport-6.1/752-11-v6.7-net-ethernet-mtk_wed-make-memory-region-optional.patch71
-rw-r--r--target/linux/generic/backport-6.1/752-12-v6.7-net-ethernet-mtk_wed-fix-EXT_INT_STATUS_RX_FBUF-defi.patch27
-rw-r--r--target/linux/generic/backport-6.1/752-13-v6.7-net-ethernet-mtk_wed-add-mtk_wed_soc_data-structure.patch217
-rw-r--r--target/linux/generic/backport-6.1/752-14-v6.7-net-ethernet-mtk_wed-introduce-WED-support-for-MT798.patch1280
-rw-r--r--target/linux/generic/backport-6.1/752-15-v6.7-net-ethernet-mtk_wed-refactor-mtk_wed_check_wfdma_rx.patch95
-rw-r--r--target/linux/generic/backport-6.1/752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch465
-rw-r--r--target/linux/generic/backport-6.1/752-17-v6.7-net-ethernet-mtk_wed-introduce-hw_rro-support-for-MT.patch483
-rw-r--r--target/linux/generic/backport-6.1/752-18-v6.7-net-ethernet-mtk_wed-debugfs-move-wed_v2-specific-re.patch78
-rw-r--r--target/linux/generic/backport-6.1/752-19-v6.7-net-ethernet-mtk_wed-debugfs-add-WED-3.0-debugfs-ent.patch432
-rw-r--r--target/linux/generic/backport-6.1/752-20-v6.7-net-ethernet-mtk_wed-add-wed-3.0-reset-support.patch587
-rw-r--r--target/linux/generic/backport-6.1/794-v6.2-net-core-Allow-live-renaming-when-an-interface-is-up.patch6
-rw-r--r--target/linux/generic/hack-5.15/721-net-add-packet-mangeling.patch10
-rw-r--r--target/linux/generic/hack-6.1/721-net-add-packet-mangeling.patch8
-rw-r--r--target/linux/generic/pending-5.15/680-NET-skip-GRO-for-foreign-MAC-addresses.patch2
-rw-r--r--target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch6
-rw-r--r--target/linux/generic/pending-5.15/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch6
-rw-r--r--target/linux/generic/pending-5.15/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch4
-rw-r--r--target/linux/generic/pending-5.15/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch2
-rw-r--r--target/linux/generic/pending-5.15/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch20
-rw-r--r--target/linux/generic/pending-5.15/760-net-core-add-optional-threading-for-backlog-processi.patch2
-rw-r--r--target/linux/generic/pending-6.1/680-NET-skip-GRO-for-foreign-MAC-addresses.patch2
-rw-r--r--target/linux/generic/pending-6.1/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch2
-rw-r--r--target/linux/generic/pending-6.1/731-net-permit-ieee80211_ptr-even-with-no-CFG82111-suppo.patch2
-rw-r--r--target/linux/generic/pending-6.1/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch6
-rw-r--r--target/linux/generic/pending-6.1/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch4
-rw-r--r--target/linux/generic/pending-6.1/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch2
-rw-r--r--target/linux/generic/pending-6.1/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch20
-rw-r--r--target/linux/generic/pending-6.1/760-net-core-add-optional-threading-for-backlog-processi.patch2
-rw-r--r--target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch6
-rw-r--r--target/linux/mediatek/patches-5.15/940-net-ethernet-mtk_wed-rename-mtk_wed_get_memory_regio.patch23
-rw-r--r--target/linux/mediatek/patches-5.15/942-net-ethernet-mtk_wed-move-cpuboot-in-a-dedicated-dts.patch54
-rw-r--r--target/linux/mediatek/patches-5.15/943-net-ethernet-mtk_wed-move-ilm-a-dedicated-dts-node.patch87
-rw-r--r--target/linux/mediatek/patches-5.15/944-net-ethernet-mtk_wed-move-dlm-a-dedicated-dts-node.patch4
-rw-r--r--target/linux/mediatek/patches-6.1/940-net-ethernet-mtk_wed-rename-mtk_wed_get_memory_regio.patch23
-rw-r--r--target/linux/mediatek/patches-6.1/942-net-ethernet-mtk_wed-move-cpuboot-in-a-dedicated-dts.patch54
-rw-r--r--target/linux/mediatek/patches-6.1/943-net-ethernet-mtk_wed-move-ilm-a-dedicated-dts-node.patch87
-rw-r--r--target/linux/mediatek/patches-6.1/944-net-ethernet-mtk_wed-move-dlm-a-dedicated-dts-node.patch4
105 files changed, 9606 insertions, 359 deletions
diff --git a/target/linux/generic/backport-5.15/722-v6.0-net-ethernet-mtk_eth_wed-add-axi-bus-support.patch b/target/linux/generic/backport-5.15/722-v6.0-net-ethernet-mtk_eth_wed-add-axi-bus-support.patch
index 70b7bad31f..f4b78a2798 100644
--- a/target/linux/generic/backport-5.15/722-v6.0-net-ethernet-mtk_eth_wed-add-axi-bus-support.patch
+++ b/target/linux/generic/backport-5.15/722-v6.0-net-ethernet-mtk_eth_wed-add-axi-bus-support.patch
@@ -82,7 +82,7 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
if (!hw_list[!hw->index]->wed_dev &&
hw->eth->dma_dev != hw->eth->dev)
-@@ -356,40 +380,54 @@ mtk_wed_detach(struct mtk_wed_device *de
+@@ -356,40 +380,47 @@ mtk_wed_detach(struct mtk_wed_device *de
static void
mtk_wed_bus_init(struct mtk_wed_device *dev)
{
@@ -97,7 +97,6 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+ case MTK_WED_BUS_PCIE: {
+ struct device_node *np = dev->hw->eth->dev->of_node;
+ struct regmap *regs;
-+ u32 val;
+
+ regs = syscon_regmap_lookup_by_phandle(np,
+ "mediatek,wed-pcie");
@@ -139,20 +138,14 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+ wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+
-+ val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
-+ val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+
-+ val = wed_r32(dev, MTK_WED_PCIE_CFG_INTM);
-+ val = wed_r32(dev, MTK_WED_PCIE_CFG_BASE);
-+
+ /* pcie interrupt status trigger register */
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+ wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+
+ /* pola setting */
-+ val = wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
+ break;
@@ -168,7 +161,7 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
}
static void
-@@ -800,12 +838,14 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -800,12 +831,14 @@ mtk_wed_attach(struct mtk_wed_device *de
__releases(RCU)
{
struct mtk_wed_hw *hw;
@@ -184,7 +177,7 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
!try_module_get(THIS_MODULE))
ret = -ENODEV;
-@@ -823,8 +863,10 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -823,8 +856,10 @@ mtk_wed_attach(struct mtk_wed_device *de
goto out;
}
diff --git a/target/linux/generic/backport-5.15/728-v6.1-02-net-ethernet-mtk_eth_wed-add-missing-put_device-in-m.patch b/target/linux/generic/backport-5.15/728-v6.1-02-net-ethernet-mtk_eth_wed-add-missing-put_device-in-m.patch
index ef5374dcc5..4f0b78f110 100644
--- a/target/linux/generic/backport-5.15/728-v6.1-02-net-ethernet-mtk_eth_wed-add-missing-put_device-in-m.patch
+++ b/target/linux/generic/backport-5.15/728-v6.1-02-net-ethernet-mtk_eth_wed-add-missing-put_device-in-m.patch
@@ -16,7 +16,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1084,11 +1084,11 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1077,11 +1077,11 @@ void mtk_wed_add_hw(struct device_node *
get_device(&pdev->dev);
irq = platform_get_irq(pdev, 0);
if (irq < 0)
@@ -30,7 +30,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
-@@ -1131,8 +1131,14 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1124,8 +1124,14 @@ void mtk_wed_add_hw(struct device_node *
hw_list[index] = hw;
diff --git a/target/linux/generic/backport-5.15/728-v6.1-03-net-ethernet-mtk_eth_wed-add-missing-of_node_put.patch b/target/linux/generic/backport-5.15/728-v6.1-03-net-ethernet-mtk_eth_wed-add-missing-of_node_put.patch
index 0a452d4a7d..32f62aaed2 100644
--- a/target/linux/generic/backport-5.15/728-v6.1-03-net-ethernet-mtk_eth_wed-add-missing-of_node_put.patch
+++ b/target/linux/generic/backport-5.15/728-v6.1-03-net-ethernet-mtk_eth_wed-add-missing-of_node_put.patch
@@ -15,7 +15,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1079,7 +1079,7 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1072,7 +1072,7 @@ void mtk_wed_add_hw(struct device_node *
pdev = of_find_device_by_node(np);
if (!pdev)
@@ -24,7 +24,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
get_device(&pdev->dev);
irq = platform_get_irq(pdev, 0);
-@@ -1139,6 +1139,8 @@ unlock:
+@@ -1132,6 +1132,8 @@ unlock:
mutex_unlock(&hw_lock);
err_put_device:
put_device(&pdev->dev);
@@ -33,7 +33,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
}
void mtk_wed_exit(void)
-@@ -1159,6 +1161,7 @@ void mtk_wed_exit(void)
+@@ -1152,6 +1154,7 @@ void mtk_wed_exit(void)
hw_list[i] = NULL;
debugfs_remove(hw->debugfs_dir);
put_device(hw->dev);
diff --git a/target/linux/generic/backport-5.15/729-02-v6.1-net-ethernet-mtk_wed-introduce-wed-wo-support.patch b/target/linux/generic/backport-5.15/729-02-v6.1-net-ethernet-mtk_wed-introduce-wed-wo-support.patch
index dbd7e30fbb..fd5f45df2a 100644
--- a/target/linux/generic/backport-5.15/729-02-v6.1-net-ethernet-mtk_wed-introduce-wed-wo-support.patch
+++ b/target/linux/generic/backport-5.15/729-02-v6.1-net-ethernet-mtk_wed-introduce-wed-wo-support.patch
@@ -44,7 +44,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
struct device_node *wlan_node;
-@@ -885,9 +888,11 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -878,9 +881,11 @@ mtk_wed_attach(struct mtk_wed_device *de
}
mtk_wed_hw_init_early(dev);
diff --git a/target/linux/generic/backport-5.15/729-03-v6.1-net-ethernet-mtk_wed-rename-tx_wdma-array-in-rx_wdma.patch b/target/linux/generic/backport-5.15/729-03-v6.1-net-ethernet-mtk_wed-rename-tx_wdma-array-in-rx_wdma.patch
index ffd6bc3589..a002a5f851 100644
--- a/target/linux/generic/backport-5.15/729-03-v6.1-net-ethernet-mtk_wed-rename-tx_wdma-array-in-rx_wdma.patch
+++ b/target/linux/generic/backport-5.15/729-03-v6.1-net-ethernet-mtk_wed-rename-tx_wdma-array-in-rx_wdma.patch
@@ -23,7 +23,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
}
static void
-@@ -695,10 +695,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device
+@@ -688,10 +688,10 @@ mtk_wed_ring_alloc(struct mtk_wed_device
}
static int
@@ -36,7 +36,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size))
return -ENOMEM;
-@@ -812,9 +812,9 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -805,9 +805,9 @@ mtk_wed_start(struct mtk_wed_device *dev
{
int i;
@@ -49,7 +49,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
-@@ -923,7 +923,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+@@ -916,7 +916,7 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
sizeof(*ring->desc)))
return -ENOMEM;
diff --git a/target/linux/generic/backport-5.15/729-04-v6.1-net-ethernet-mtk_wed-add-configure-wed-wo-support.patch b/target/linux/generic/backport-5.15/729-04-v6.1-net-ethernet-mtk_wed-add-configure-wed-wo-support.patch
index 4c34d0cb33..eca29739b4 100644
--- a/target/linux/generic/backport-5.15/729-04-v6.1-net-ethernet-mtk_wed-add-configure-wed-wo-support.patch
+++ b/target/linux/generic/backport-5.15/729-04-v6.1-net-ethernet-mtk_wed-add-configure-wed-wo-support.patch
@@ -409,7 +409,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
struct device_node *wlan_node;
-@@ -441,10 +667,12 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+@@ -434,10 +660,12 @@ mtk_wed_set_wpdma(struct mtk_wed_device
} else {
mtk_wed_bus_init(dev);
@@ -426,7 +426,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
}
}
-@@ -494,6 +722,132 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
+@@ -487,6 +715,132 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
}
}
@@ -559,7 +559,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
static void
mtk_wed_hw_init(struct mtk_wed_device *dev)
{
-@@ -505,11 +859,11 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+@@ -498,11 +852,11 @@ mtk_wed_hw_init(struct mtk_wed_device *d
wed_w32(dev, MTK_WED_TX_BM_CTRL,
MTK_WED_TX_BM_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
@@ -573,7 +573,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
-@@ -536,9 +890,9 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+@@ -529,9 +883,9 @@ mtk_wed_hw_init(struct mtk_wed_device *d
wed_w32(dev, MTK_WED_TX_TKID_CTRL,
MTK_WED_TX_TKID_CTRL_PAUSE |
FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
@@ -585,7 +585,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
MTK_WED_TX_TKID_DYN_THR_HI);
-@@ -546,18 +900,28 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+@@ -539,18 +893,28 @@ mtk_wed_hw_init(struct mtk_wed_device *d
mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
@@ -617,7 +617,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
{
void *head = (void *)ring->desc;
int i;
-@@ -567,7 +931,10 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
+@@ -560,7 +924,10 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
desc->buf0 = 0;
@@ -629,7 +629,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
desc->buf1 = 0;
desc->info = 0;
}
-@@ -623,7 +990,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -616,7 +983,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
if (!dev->tx_ring[i].desc)
continue;
@@ -639,7 +639,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
}
if (mtk_wed_poll_busy(dev))
-@@ -641,6 +1009,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -634,6 +1002,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
@@ -649,7 +649,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
-@@ -675,12 +1046,11 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -668,12 +1039,11 @@ mtk_wed_reset_dma(struct mtk_wed_device
MTK_WED_WPDMA_RESET_IDX_RX);
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
}
@@ -663,7 +663,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
{
ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
&ring->desc_phys, GFP_KERNEL);
-@@ -689,7 +1059,7 @@ mtk_wed_ring_alloc(struct mtk_wed_device
+@@ -682,7 +1052,7 @@ mtk_wed_ring_alloc(struct mtk_wed_device
ring->desc_size = desc_size;
ring->size = size;
@@ -672,7 +672,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return 0;
}
-@@ -698,9 +1068,14 @@ static int
+@@ -691,9 +1061,14 @@ static int
mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size)
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
@@ -689,7 +689,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
-@@ -717,6 +1092,60 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
+@@ -710,6 +1085,60 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
return 0;
}
@@ -750,7 +750,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
static void
mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
{
-@@ -739,6 +1168,8 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+@@ -732,6 +1161,8 @@ mtk_wed_configure_irq(struct mtk_wed_dev
wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
} else {
@@ -759,7 +759,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
/* initail tx interrupt trigger */
wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
-@@ -757,6 +1188,16 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+@@ -750,6 +1181,16 @@ mtk_wed_configure_irq(struct mtk_wed_dev
FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
dev->wlan.txfree_tbit));
@@ -776,7 +776,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
wed_set(dev, MTK_WED_WDMA_INT_CTRL,
FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
-@@ -794,9 +1235,15 @@ mtk_wed_dma_enable(struct mtk_wed_device
+@@ -787,9 +1228,15 @@ mtk_wed_dma_enable(struct mtk_wed_device
wdma_set(dev, MTK_WDMA_GLO_CFG,
MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
} else {
@@ -792,7 +792,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
-@@ -804,6 +1251,15 @@ mtk_wed_dma_enable(struct mtk_wed_device
+@@ -797,6 +1244,15 @@ mtk_wed_dma_enable(struct mtk_wed_device
wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
@@ -808,7 +808,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
}
}
-@@ -829,7 +1285,19 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -822,7 +1278,19 @@ mtk_wed_start(struct mtk_wed_device *dev
val |= BIT(0) | (BIT(1) * !!dev->hw->index);
regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
} else {
@@ -829,7 +829,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
}
mtk_wed_dma_enable(dev);
-@@ -863,7 +1331,7 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -856,7 +1324,7 @@ mtk_wed_attach(struct mtk_wed_device *de
if (!hw) {
module_put(THIS_MODULE);
ret = -ENODEV;
@@ -838,7 +838,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
}
device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
-@@ -876,15 +1344,24 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -869,15 +1337,24 @@ mtk_wed_attach(struct mtk_wed_device *de
dev->dev = hw->dev;
dev->irq = hw->irq;
dev->wdma_idx = hw->index;
@@ -866,7 +866,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
}
mtk_wed_hw_init_early(dev);
-@@ -893,8 +1370,10 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -886,8 +1363,10 @@ mtk_wed_attach(struct mtk_wed_device *de
BIT(hw->index), 0);
else
ret = mtk_wed_wo_init(hw);
@@ -878,7 +878,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
mutex_unlock(&hw_lock);
return ret;
-@@ -917,10 +1396,11 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+@@ -910,10 +1389,11 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
* WDMA RX.
*/
@@ -892,7 +892,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
return -ENOMEM;
if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE))
-@@ -967,6 +1447,37 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
+@@ -960,6 +1440,37 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
return 0;
}
@@ -930,7 +930,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
static u32
mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
{
-@@ -1063,7 +1574,9 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1056,7 +1567,9 @@ void mtk_wed_add_hw(struct device_node *
static const struct mtk_wed_ops wed_ops = {
.attach = mtk_wed_attach,
.tx_ring_setup = mtk_wed_tx_ring_setup,
@@ -940,7 +940,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
.start = mtk_wed_start,
.stop = mtk_wed_stop,
.reset_dma = mtk_wed_reset_dma,
-@@ -1072,6 +1585,7 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1065,6 +1578,7 @@ void mtk_wed_add_hw(struct device_node *
.irq_get = mtk_wed_irq_get,
.irq_set_mask = mtk_wed_irq_set_mask,
.detach = mtk_wed_detach,
diff --git a/target/linux/generic/backport-5.15/729-09-v6.2-net-ethernet-mtk_wed-add-wcid-overwritten-support-fo.patch b/target/linux/generic/backport-5.15/729-09-v6.2-net-ethernet-mtk_wed-add-wcid-overwritten-support-fo.patch
index d91d829911..117ccc0902 100644
--- a/target/linux/generic/backport-5.15/729-09-v6.2-net-ethernet-mtk_wed-add-wcid-overwritten-support-fo.patch
+++ b/target/linux/generic/backport-5.15/729-09-v6.2-net-ethernet-mtk_wed-add-wcid-overwritten-support-fo.patch
@@ -27,7 +27,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
}
static void
-@@ -1297,9 +1297,10 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -1290,9 +1290,10 @@ mtk_wed_start(struct mtk_wed_device *dev
if (mtk_wed_rro_cfg(dev))
return;
@@ -39,7 +39,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
mtk_wed_dma_enable(dev);
dev->running = true;
}
-@@ -1365,11 +1366,13 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -1358,11 +1359,13 @@ mtk_wed_attach(struct mtk_wed_device *de
}
mtk_wed_hw_init_early(dev);
diff --git a/target/linux/generic/backport-5.15/729-10-v6.2-net-ethernet-mtk_wed-return-status-value-in-mtk_wdma.patch b/target/linux/generic/backport-5.15/729-10-v6.2-net-ethernet-mtk_wed-return-status-value-in-mtk_wdma.patch
index d97bb715e0..ec58c3fc57 100644
--- a/target/linux/generic/backport-5.15/729-10-v6.2-net-ethernet-mtk_wed-return-status-value-in-mtk_wdma.patch
+++ b/target/linux/generic/backport-5.15/729-10-v6.2-net-ethernet-mtk_wed-return-status-value-in-mtk_wdma.patch
@@ -70,7 +70,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
}
if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
-@@ -1006,11 +1009,7 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -999,11 +1002,7 @@ mtk_wed_reset_dma(struct mtk_wed_device
wed_w32(dev, MTK_WED_RESET_IDX, 0);
}
diff --git a/target/linux/generic/backport-5.15/729-12-v6.2-net-ethernet-mtk_wed-update-mtk_wed_stop.patch b/target/linux/generic/backport-5.15/729-12-v6.2-net-ethernet-mtk_wed-update-mtk_wed_stop.patch
index dfc0f8c3f3..f4e842d515 100644
--- a/target/linux/generic/backport-5.15/729-12-v6.2-net-ethernet-mtk_wed-update-mtk_wed_stop.patch
+++ b/target/linux/generic/backport-5.15/729-12-v6.2-net-ethernet-mtk_wed-update-mtk_wed_stop.patch
@@ -67,7 +67,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
mtk_wdma_rx_reset(dev);
mtk_wed_reset(dev, MTK_WED_RESET_WED);
-@@ -677,7 +691,7 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
+@@ -670,7 +684,7 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
{
u32 mask, set;
diff --git a/target/linux/generic/backport-5.15/729-13-v6.2-net-ethernet-mtk_wed-add-mtk_wed_rx_reset-routine.patch b/target/linux/generic/backport-5.15/729-13-v6.2-net-ethernet-mtk_wed-add-mtk_wed_rx_reset-routine.patch
index 2205fea513..a0fc9da99e 100644
--- a/target/linux/generic/backport-5.15/729-13-v6.2-net-ethernet-mtk_wed-add-mtk_wed_rx_reset-routine.patch
+++ b/target/linux/generic/backport-5.15/729-13-v6.2-net-ethernet-mtk_wed-add-mtk_wed_rx_reset-routine.patch
@@ -13,7 +13,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -951,42 +951,130 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
+@@ -944,42 +944,130 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
}
static u32
@@ -170,7 +170,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
}
static void
-@@ -1004,19 +1092,23 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -997,19 +1085,23 @@ mtk_wed_reset_dma(struct mtk_wed_device
true);
}
@@ -201,7 +201,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
-@@ -1033,6 +1125,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -1026,6 +1118,9 @@ mtk_wed_reset_dma(struct mtk_wed_device
MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
}
@@ -211,7 +211,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
for (i = 0; i < 100; i++) {
val = wed_r32(dev, MTK_WED_TX_BM_INTF);
if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
-@@ -1040,8 +1135,19 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -1033,8 +1128,19 @@ mtk_wed_reset_dma(struct mtk_wed_device
}
mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
@@ -231,7 +231,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
if (busy) {
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
-@@ -1052,6 +1158,17 @@ mtk_wed_reset_dma(struct mtk_wed_device
+@@ -1045,6 +1151,17 @@ mtk_wed_reset_dma(struct mtk_wed_device
MTK_WED_WPDMA_RESET_IDX_RX);
wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
}
@@ -249,7 +249,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
}
static int
-@@ -1274,6 +1391,9 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -1267,6 +1384,9 @@ mtk_wed_start(struct mtk_wed_device *dev
{
int i;
@@ -259,7 +259,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
if (!dev->rx_wdma[i].desc)
mtk_wed_wdma_rx_ring_setup(dev, i, 16);
-@@ -1362,10 +1482,6 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -1355,10 +1475,6 @@ mtk_wed_attach(struct mtk_wed_device *de
goto out;
if (mtk_wed_get_rx_capa(dev)) {
diff --git a/target/linux/generic/backport-5.15/729-14-v6.2-net-ethernet-mtk_wed-add-reset-to-tx_ring_setup-call.patch b/target/linux/generic/backport-5.15/729-14-v6.2-net-ethernet-mtk_wed-add-reset-to-tx_ring_setup-call.patch
index 602483bcb8..4404971cc7 100644
--- a/target/linux/generic/backport-5.15/729-14-v6.2-net-ethernet-mtk_wed-add-reset-to-tx_ring_setup-call.patch
+++ b/target/linux/generic/backport-5.15/729-14-v6.2-net-ethernet-mtk_wed-add-reset-to-tx_ring_setup-call.patch
@@ -14,7 +14,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1188,7 +1188,8 @@ mtk_wed_ring_alloc(struct mtk_wed_device
+@@ -1181,7 +1181,8 @@ mtk_wed_ring_alloc(struct mtk_wed_device
}
static int
@@ -24,7 +24,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
-@@ -1197,8 +1198,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
+@@ -1190,8 +1191,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
return -EINVAL;
wdma = &dev->rx_wdma[idx];
@@ -35,7 +35,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
-@@ -1396,7 +1397,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+@@ -1389,7 +1390,7 @@ mtk_wed_start(struct mtk_wed_device *dev
for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
if (!dev->rx_wdma[i].desc)
@@ -44,7 +44,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
mtk_wed_hw_init(dev);
mtk_wed_configure_irq(dev, irq_mask);
-@@ -1505,7 +1506,8 @@ unlock:
+@@ -1498,7 +1499,8 @@ unlock:
}
static int
@@ -54,7 +54,7 @@ Signed-off-by: Paolo Abeni <pabeni@redhat.com>
{
struct mtk_wed_ring *ring = &dev->tx_ring[idx];
-@@ -1524,11 +1526,12 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+@@ -1517,11 +1519,12 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring)))
return -EINVAL;
diff --git a/target/linux/generic/backport-5.15/729-23-v6.3-net-ethernet-mtk_wed-add-reset-to-rx_ring_setup-call.patch b/target/linux/generic/backport-5.15/729-23-v6.3-net-ethernet-mtk_wed-add-reset-to-rx_ring_setup-call.patch
index cf81acf491..c63628da99 100644
--- a/target/linux/generic/backport-5.15/729-23-v6.3-net-ethernet-mtk_wed-add-reset-to-rx_ring_setup-call.patch
+++ b/target/linux/generic/backport-5.15/729-23-v6.3-net-ethernet-mtk_wed-add-reset-to-rx_ring_setup-call.patch
@@ -17,7 +17,7 @@ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1259,7 +1259,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
+@@ -1252,7 +1252,8 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
}
static int
@@ -27,7 +27,7 @@ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
{
u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
struct mtk_wed_ring *wdma;
-@@ -1268,8 +1269,8 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
+@@ -1261,8 +1262,8 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
return -EINVAL;
wdma = &dev->tx_wdma[idx];
@@ -38,7 +38,7 @@ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
return -ENOMEM;
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
-@@ -1279,6 +1280,9 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
+@@ -1272,6 +1273,9 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
@@ -48,7 +48,7 @@ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
if (!idx) {
wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE,
wdma->desc_phys);
-@@ -1618,18 +1622,20 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
+@@ -1611,18 +1615,20 @@ mtk_wed_txfree_ring_setup(struct mtk_wed
}
static int
diff --git a/target/linux/generic/backport-5.15/730-15-v6.3-net-ethernet-mtk_wed-No-need-to-clear-memory-after-a.patch b/target/linux/generic/backport-5.15/730-15-v6.3-net-ethernet-mtk_wed-No-need-to-clear-memory-after-a.patch
index 74a77ddaca..a3bb1c5db7 100644
--- a/target/linux/generic/backport-5.15/730-15-v6.3-net-ethernet-mtk_wed-No-need-to-clear-memory-after-a.patch
+++ b/target/linux/generic/backport-5.15/730-15-v6.3-net-ethernet-mtk_wed-No-need-to-clear-memory-after-a.patch
@@ -16,7 +16,7 @@ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -786,7 +786,6 @@ mtk_wed_rro_ring_alloc(struct mtk_wed_de
+@@ -779,7 +779,6 @@ mtk_wed_rro_ring_alloc(struct mtk_wed_de
ring->desc_size = sizeof(*ring->desc);
ring->size = size;
diff --git a/target/linux/generic/backport-5.15/730-17-v6.3-net-ethernet-mtk_wed-fix-possible-deadlock-if-mtk_we.patch b/target/linux/generic/backport-5.15/730-17-v6.3-net-ethernet-mtk_wed-fix-possible-deadlock-if-mtk_we.patch
index d1c5fb6656..0afe7106e5 100644
--- a/target/linux/generic/backport-5.15/730-17-v6.3-net-ethernet-mtk_wed-fix-possible-deadlock-if-mtk_we.patch
+++ b/target/linux/generic/backport-5.15/730-17-v6.3-net-ethernet-mtk_wed-fix-possible-deadlock-if-mtk_we.patch
@@ -43,7 +43,7 @@ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
mutex_unlock(&hw_lock);
}
-@@ -1545,8 +1550,10 @@ mtk_wed_attach(struct mtk_wed_device *de
+@@ -1538,8 +1543,10 @@ mtk_wed_attach(struct mtk_wed_device *de
ret = mtk_wed_wo_init(hw);
}
out:
diff --git a/target/linux/generic/backport-5.15/750-v6.5-05-net-ethernet-mtk_eth_soc-add-version-in-mtk_soc_data.patch b/target/linux/generic/backport-5.15/750-v6.5-05-net-ethernet-mtk_eth_soc-add-version-in-mtk_soc_data.patch
index d1d692002d..40f27d4feb 100644
--- a/target/linux/generic/backport-5.15/750-v6.5-05-net-ethernet-mtk_eth_soc-add-version-in-mtk_soc_data.patch
+++ b/target/linux/generic/backport-5.15/750-v6.5-05-net-ethernet-mtk_eth_soc-add-version-in-mtk_soc_data.patch
@@ -530,7 +530,7 @@ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
pse_port = 8;
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -1091,7 +1091,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+@@ -1084,7 +1084,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *
} else {
struct mtk_eth *eth = dev->hw->eth;
@@ -539,7 +539,7 @@ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
wed_set(dev, MTK_WED_RESET_IDX,
MTK_WED_RESET_IDX_RX_V2);
else
-@@ -1813,7 +1813,7 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1806,7 +1806,7 @@ void mtk_wed_add_hw(struct device_node *
hw->wdma = wdma;
hw->index = index;
hw->irq = irq;
diff --git a/target/linux/generic/pending-6.1/736-01-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch b/target/linux/generic/backport-5.15/751-01-v6.4-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch
index 4b97b3d47c..21d0f045d9 100644
--- a/target/linux/generic/pending-6.1/736-01-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch
+++ b/target/linux/generic/backport-5.15/751-01-v6.4-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch
@@ -19,7 +19,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -1451,6 +1451,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk
+@@ -1432,6 +1432,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk
int mtk_eth_offload_init(struct mtk_eth *eth);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
diff --git a/target/linux/generic/pending-5.15/736-02-net-ethernet-mediatek-mtk_ppe-prefer-newly-added-l2-.patch b/target/linux/generic/backport-5.15/751-02-v6.4-net-ethernet-mediatek-mtk_ppe-prefer-newly-added-l2-.patch
index e89d4cd97b..84b768bd79 100644
--- a/target/linux/generic/pending-5.15/736-02-net-ethernet-mediatek-mtk_ppe-prefer-newly-added-l2-.patch
+++ b/target/linux/generic/backport-5.15/751-02-v6.4-net-ethernet-mediatek-mtk_ppe-prefer-newly-added-l2-.patch
@@ -12,7 +12,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
-@@ -663,10 +663,20 @@ void mtk_foe_entry_clear(struct mtk_ppe
+@@ -662,10 +662,20 @@ void mtk_foe_entry_clear(struct mtk_ppe
static int
mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
diff --git a/target/linux/generic/pending-6.1/736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch b/target/linux/generic/backport-5.15/751-03-v6.4-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch
index ecdb345d69..a9f82ca3cb 100644
--- a/target/linux/generic/pending-6.1/736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch
+++ b/target/linux/generic/backport-5.15/751-03-v6.4-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch
@@ -15,7 +15,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
-@@ -477,42 +477,43 @@ int mtk_foe_entry_set_queue(struct mtk_e
+@@ -482,42 +482,43 @@ int mtk_foe_entry_set_queue(struct mtk_e
return 0;
}
@@ -75,7 +75,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
hwe->ib1 &= ~MTK_FOE_IB1_STATE;
-@@ -532,7 +533,8 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
+@@ -537,7 +538,8 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
return;
@@ -85,7 +85,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
kfree(entry);
}
-@@ -548,66 +550,55 @@ static int __mtk_foe_entry_idle_time(str
+@@ -553,66 +555,55 @@ static int __mtk_foe_entry_idle_time(str
return now - timestamp;
}
@@ -181,7 +181,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
}
static void
-@@ -650,7 +641,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
+@@ -655,7 +646,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
spin_lock_bh(&ppe_lock);
@@ -191,7 +191,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
spin_unlock_bh(&ppe_lock);
}
-@@ -697,8 +689,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
+@@ -702,8 +694,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
{
const struct mtk_soc_data *soc = ppe->eth->soc;
struct mtk_flow_entry *flow_info;
@@ -201,7 +201,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
int type;
-@@ -706,30 +698,30 @@ mtk_foe_entry_commit_subflow(struct mtk_
+@@ -711,30 +703,30 @@ mtk_foe_entry_commit_subflow(struct mtk_
if (!flow_info)
return;
@@ -242,7 +242,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
}
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
-@@ -739,9 +731,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe
+@@ -744,9 +736,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe
struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
struct mtk_flow_entry *entry;
struct mtk_foe_bridge key = {};
@@ -254,7 +254,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
u8 *tag;
spin_lock_bh(&ppe_lock);
-@@ -749,20 +743,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe
+@@ -754,20 +748,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe
if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
goto out;
@@ -281,7 +281,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
continue;
}
-@@ -813,9 +801,17 @@ out:
+@@ -816,9 +804,17 @@ out:
int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
diff --git a/target/linux/generic/pending-5.15/736-04-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch b/target/linux/generic/backport-5.15/751-04-v6.4-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch
index 983b77d609..2ea6d341b0 100644
--- a/target/linux/generic/pending-5.15/736-04-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch
+++ b/target/linux/generic/backport-5.15/751-04-v6.4-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch
@@ -14,7 +14,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
-@@ -80,9 +80,9 @@ static int mtk_ppe_mib_wait_busy(struct
+@@ -79,9 +79,9 @@ static int mtk_ppe_mib_wait_busy(struct
int ret;
u32 val;
@@ -27,7 +27,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
if (ret)
dev_err(ppe->dev, "MIB table busy");
-@@ -90,17 +90,31 @@ static int mtk_ppe_mib_wait_busy(struct
+@@ -89,17 +89,31 @@ static int mtk_ppe_mib_wait_busy(struct
return ret;
}
@@ -61,7 +61,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
-@@ -109,19 +123,19 @@ static int mtk_mib_entry_read(struct mtk
+@@ -108,19 +122,19 @@ static int mtk_mib_entry_read(struct mtk
if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
/* 64 bit for each counter */
u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
@@ -86,7 +86,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
}
static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
-@@ -526,13 +540,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
+@@ -525,13 +539,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
dma_wmb();
mtk_ppe_cache_clear(ppe);
@@ -100,7 +100,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
}
entry->hash = 0xffff;
-@@ -557,11 +564,14 @@ static int __mtk_foe_entry_idle_time(str
+@@ -556,11 +563,14 @@ static int __mtk_foe_entry_idle_time(str
}
static bool
@@ -116,7 +116,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
int len;
if (hash == 0xffff)
-@@ -572,18 +582,35 @@ mtk_flow_entry_update(struct mtk_ppe *pp
+@@ -571,18 +581,35 @@ mtk_flow_entry_update(struct mtk_ppe *pp
memcpy(&foe, hwe, len);
if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
@@ -155,7 +155,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
struct mtk_flow_entry *cur;
struct hlist_node *tmp;
int idle;
-@@ -592,7 +619,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe
+@@ -591,7 +618,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe
hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) {
int cur_idle;
@@ -166,7 +166,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
__mtk_foe_entry_clear(ppe, entry, false);
continue;
}
-@@ -607,10 +636,29 @@ mtk_flow_entry_update_l2(struct mtk_ppe
+@@ -606,10 +635,29 @@ mtk_flow_entry_update_l2(struct mtk_ppe
}
}
@@ -196,7 +196,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
struct mtk_eth *eth = ppe->eth;
u16 timestamp = mtk_eth_timestamp(eth);
struct mtk_foe_entry *hwe;
-@@ -641,6 +689,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
+@@ -640,6 +688,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
dma_wmb();
@@ -209,7 +209,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
mtk_ppe_cache_clear(ppe);
}
-@@ -805,21 +859,6 @@ out:
+@@ -802,21 +856,6 @@ out:
spin_unlock_bh(&ppe_lock);
}
@@ -231,7 +231,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
{
if (!ppe)
-@@ -847,32 +886,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe
+@@ -844,32 +883,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe
return mtk_ppe_wait_busy(ppe);
}
diff --git a/target/linux/generic/backport-5.15/752-01-v6.6-net-ethernet-mtk_wed-add-some-more-info-in-wed_txinf.patch b/target/linux/generic/backport-5.15/752-01-v6.6-net-ethernet-mtk_wed-add-some-more-info-in-wed_txinf.patch
new file mode 100644
index 0000000000..a224b62624
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-01-v6.6-net-ethernet-mtk_wed-add-some-more-info-in-wed_txinf.patch
@@ -0,0 +1,45 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sun, 27 Aug 2023 19:31:41 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: add some more info in wed_txinfo_show
+ handler
+
+Add some new info in Wireless Ethernet Dispatcher wed_txinfo_show
+debugfs handler useful during debugging.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://lore.kernel.org/r/3390292655d568180b73d2a25576f61aa63310e5.1693157377.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -127,8 +127,17 @@ wed_txinfo_show(struct seq_file *s, void
+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
+
+- DUMP_STR("TX FREE"),
++ DUMP_STR("WED TX FREE"),
+ DUMP_WED(WED_RX_MIB(0)),
++ DUMP_WED_RING(WED_RING_RX(0)),
++ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
++ DUMP_WED(WED_RX_MIB(1)),
++ DUMP_WED_RING(WED_RING_RX(1)),
++ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
++
++ DUMP_STR("WED WPDMA TX FREE"),
++ DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
++ DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -266,6 +266,8 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
+ #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
++#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
++#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
+
+ #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
+ #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
diff --git a/target/linux/generic/backport-5.15/752-02-v6.6-net-ethernet-mtk_wed-minor-change-in-wed_-tx-rx-info.patch b/target/linux/generic/backport-5.15/752-02-v6.6-net-ethernet-mtk_wed-minor-change-in-wed_-tx-rx-info.patch
new file mode 100644
index 0000000000..df6edfdf94
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-02-v6.6-net-ethernet-mtk_wed-minor-change-in-wed_-tx-rx-info.patch
@@ -0,0 +1,47 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sun, 27 Aug 2023 19:33:47 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: minor change in wed_{tx,rx}info_show
+
+No functional changes, just cosmetic ones.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://lore.kernel.org/r/71e046c72a978745f0435af265dda610aa9bfbcf.1693157578.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -84,7 +84,6 @@ dump_wed_regs(struct seq_file *s, struct
+ }
+ }
+
+-
+ static int
+ wed_txinfo_show(struct seq_file *s, void *data)
+ {
+@@ -142,10 +141,8 @@ wed_txinfo_show(struct seq_file *s, void
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+- if (!dev)
+- return 0;
+-
+- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+ }
+@@ -217,10 +214,8 @@ wed_rxinfo_show(struct seq_file *s, void
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+- if (!dev)
+- return 0;
+-
+- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+ }
diff --git a/target/linux/generic/backport-5.15/752-03-v6.6-net-ethernet-mtk_eth_soc-rely-on-mtk_pse_port-defini.patch b/target/linux/generic/backport-5.15/752-03-v6.6-net-ethernet-mtk_eth_soc-rely-on-mtk_pse_port-defini.patch
new file mode 100644
index 0000000000..0bf9dea24f
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-03-v6.6-net-ethernet-mtk_eth_soc-rely-on-mtk_pse_port-defini.patch
@@ -0,0 +1,29 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue, 12 Sep 2023 10:22:56 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on mtk_pse_port definitions
+ in mtk_flow_set_output_device
+
+Similar to ethernet ports, rely on mtk_pse_port definitions for
+pse wdma ports as well.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/b86bdb717e963e3246c1dec5f736c810703cf056.1694506814.git.lorenzo@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -196,10 +196,10 @@ mtk_flow_set_output_device(struct mtk_et
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ switch (info.wdma_idx) {
+ case 0:
+- pse_port = 8;
++ pse_port = PSE_WDMA0_PORT;
+ break;
+ case 1:
+- pse_port = 9;
++ pse_port = PSE_WDMA1_PORT;
+ break;
+ default:
+ return -EINVAL;
diff --git a/target/linux/generic/backport-5.15/752-04-v6.6-net-ethernet-mtk_wed-check-update_wo_rx_stats-in-mtk.patch b/target/linux/generic/backport-5.15/752-04-v6.6-net-ethernet-mtk_wed-check-update_wo_rx_stats-in-mtk.patch
new file mode 100644
index 0000000000..c99e1334d4
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-04-v6.6-net-ethernet-mtk_wed-check-update_wo_rx_stats-in-mtk.patch
@@ -0,0 +1,26 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue, 12 Sep 2023 10:28:00 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: check update_wo_rx_stats in
+ mtk_wed_update_rx_stats()
+
+Check if update_wo_rx_stats function pointer is properly set in
+mtk_wed_update_rx_stats routine before accessing it.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/b0d233386e059bccb59f18f69afb79a7806e5ded.1694507226.git.lorenzo@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -68,6 +68,9 @@ mtk_wed_update_rx_stats(struct mtk_wed_d
+ struct mtk_wed_wo_rx_stats *stats;
+ int i;
+
++ if (!wed->wlan.update_wo_rx_stats)
++ return;
++
+ if (count * sizeof(*stats) > skb->len - sizeof(u32))
+ return;
+
diff --git a/target/linux/generic/backport-5.15/752-05-v6.7-net-ethernet-mtk_wed-do-not-assume-offload-callbacks.patch b/target/linux/generic/backport-5.15/752-05-v6.7-net-ethernet-mtk_wed-do-not-assume-offload-callbacks.patch
new file mode 100644
index 0000000000..cd7fb92e20
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-05-v6.7-net-ethernet-mtk_wed-do-not-assume-offload-callbacks.patch
@@ -0,0 +1,68 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Wed, 13 Sep 2023 20:42:47 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: do not assume offload callbacks are
+ always set
+
+Check if wlan.offload_enable and wlan.offload_disable callbacks are set
+in mtk_wed_flow_add/mtk_wed_flow_remove since mt7996 will not rely
+on them.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -1712,19 +1712,20 @@ mtk_wed_irq_set_mask(struct mtk_wed_devi
+ int mtk_wed_flow_add(int index)
+ {
+ struct mtk_wed_hw *hw = hw_list[index];
+- int ret;
++ int ret = 0;
+
+- if (!hw || !hw->wed_dev)
+- return -ENODEV;
++ mutex_lock(&hw_lock);
+
+- if (hw->num_flows) {
+- hw->num_flows++;
+- return 0;
++ if (!hw || !hw->wed_dev) {
++ ret = -ENODEV;
++ goto out;
+ }
+
+- mutex_lock(&hw_lock);
+- if (!hw->wed_dev) {
+- ret = -ENODEV;
++ if (!hw->wed_dev->wlan.offload_enable)
++ goto out;
++
++ if (hw->num_flows) {
++ hw->num_flows++;
+ goto out;
+ }
+
+@@ -1743,14 +1744,15 @@ void mtk_wed_flow_remove(int index)
+ {
+ struct mtk_wed_hw *hw = hw_list[index];
+
+- if (!hw)
+- return;
++ mutex_lock(&hw_lock);
+
+- if (--hw->num_flows)
+- return;
++ if (!hw || !hw->wed_dev)
++ goto out;
+
+- mutex_lock(&hw_lock);
+- if (!hw->wed_dev)
++ if (!hw->wed_dev->wlan.offload_disable)
++ goto out;
++
++ if (--hw->num_flows)
+ goto out;
+
+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
diff --git a/target/linux/generic/backport-5.15/752-06-v6.7-net-ethernet-mtk_wed-introduce-versioning-utility-ro.patch b/target/linux/generic/backport-5.15/752-06-v6.7-net-ethernet-mtk_wed-introduce-versioning-utility-ro.patch
new file mode 100644
index 0000000000..2948188650
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-06-v6.7-net-ethernet-mtk_wed-introduce-versioning-utility-ro.patch
@@ -0,0 +1,232 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:05 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce versioning utility routines
+
+Similar to mtk_eth_soc, introduce the following wed versioning
+utility routines:
+- mtk_wed_is_v1
+- mtk_wed_is_v2
+
+This is a preliminary patch to introduce WED support for MT7988 SoC
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -277,7 +277,7 @@ mtk_wed_assign(struct mtk_wed_device *de
+ if (!hw->wed_dev)
+ goto out;
+
+- if (hw->version == 1)
++ if (mtk_wed_is_v1(hw))
+ return NULL;
+
+ /* MT7986 WED devices do not have any pcie slot restrictions */
+@@ -358,7 +358,7 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_d
+ desc->buf0 = cpu_to_le32(buf_phys);
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size) |
+@@ -497,7 +497,7 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
+ {
+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+@@ -576,7 +576,7 @@ mtk_wed_dma_disable(struct mtk_wed_devic
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+@@ -605,7 +605,7 @@ mtk_wed_stop(struct mtk_wed_device *dev)
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ return;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
+@@ -624,7 +624,7 @@ mtk_wed_deinit(struct mtk_wed_device *de
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ return;
+
+ wed_clr(dev, MTK_WED_CTRL,
+@@ -730,7 +730,7 @@ mtk_wed_bus_init(struct mtk_wed_device *
+ static void
+ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+ {
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ } else {
+ mtk_wed_bus_init(dev);
+@@ -761,7 +761,7 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ u32 offset = dev->hw->index ? 0x04000400 : 0;
+
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+@@ -934,7 +934,7 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_TX_BM_TKID,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+@@ -967,7 +967,7 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+@@ -1217,7 +1217,7 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ }
+
+ dev->init_done = false;
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ return;
+
+ if (!busy) {
+@@ -1343,7 +1343,7 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
+ MTK_WED_PCIE_INT_TRIGGER_STATUS);
+
+@@ -1416,7 +1416,7 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ } else {
+@@ -1465,7 +1465,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+
+ mtk_wed_set_ext_int(dev, true);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
+ dev->hw->index);
+@@ -1550,7 +1550,7 @@ mtk_wed_attach(struct mtk_wed_device *de
+ }
+
+ mtk_wed_hw_init_early(dev);
+- if (hw->version == 1) {
++ if (mtk_wed_is_v1(hw)) {
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), 0);
+ } else {
+@@ -1618,7 +1618,7 @@ static int
+ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+ {
+ struct mtk_wed_ring *ring = &dev->txfree_ring;
+- int i, index = dev->hw->version == 1;
++ int i, index = mtk_wed_is_v1(dev->hw);
+
+ /*
+ * For txfree event handling, the same DMA ring is shared between WED
+@@ -1676,7 +1676,7 @@ mtk_wed_irq_get(struct mtk_wed_device *d
+ {
+ u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+@@ -1843,7 +1843,7 @@ mtk_wed_setup_tc(struct mtk_wed_device *
+ {
+ struct mtk_wed_hw *hw = wed->hw;
+
+- if (hw->version < 2)
++ if (mtk_wed_is_v1(hw))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+@@ -1917,9 +1917,9 @@ void mtk_wed_add_hw(struct device_node *
+ hw->wdma = wdma;
+ hw->index = index;
+ hw->irq = irq;
+- hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
++ hw->version = eth->soc->version;
+
+- if (hw->version == 1) {
++ if (mtk_wed_is_v1(hw)) {
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -40,6 +40,16 @@ struct mtk_wdma_info {
+ };
+
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
++static inline bool mtk_wed_is_v1(struct mtk_wed_hw *hw)
++{
++ return hw->version == 1;
++}
++
++static inline bool mtk_wed_is_v2(struct mtk_wed_hw *hw)
++{
++ return hw->version == 2;
++}
++
+ static inline void
+ wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+ {
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -263,7 +263,7 @@ void mtk_wed_hw_add_debugfs(struct mtk_w
+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+- if (hw->version != 1)
++ if (!mtk_wed_is_v1(hw))
+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
+ &wed_rxinfo_fops);
+ }
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -207,7 +207,7 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+ {
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ return 0;
+
+ if (WARN_ON(!wo))
diff --git a/target/linux/generic/backport-5.15/752-07-v6.7-net-ethernet-mtk_wed-do-not-configure-rx-offload-if-.patch b/target/linux/generic/backport-5.15/752-07-v6.7-net-ethernet-mtk_wed-do-not-configure-rx-offload-if-.patch
new file mode 100644
index 0000000000..bc34aa33a9
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-07-v6.7-net-ethernet-mtk_wed-do-not-configure-rx-offload-if-.patch
@@ -0,0 +1,234 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:06 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: do not configure rx offload if not
+ supported
+
+Check if rx offload is supported running mtk_wed_get_rx_capa routine
+before configuring it. This is a preliminary patch to introduce Wireless
+Ethernet Dispatcher (WED) support for MT7988 SoC.
+
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -605,7 +605,7 @@ mtk_wed_stop(struct mtk_wed_device *dev)
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+- if (mtk_wed_is_v1(dev->hw))
++ if (!mtk_wed_get_rx_capa(dev))
+ return;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
+@@ -732,16 +732,21 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+ {
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+- } else {
+- mtk_wed_bus_init(dev);
+-
+- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
+- wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
+- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+- wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
+- wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
++ return;
+ }
++
++ mtk_wed_bus_init(dev);
++
++ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
++ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
++ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
++ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
++
++ if (!mtk_wed_get_rx_capa(dev))
++ return;
++
++ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
++ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
+ }
+
+ static void
+@@ -973,15 +978,17 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ } else {
+ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+- /* rx hw init */
+- wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+- MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+- MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+- wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+-
+- mtk_wed_rx_buffer_hw_init(dev);
+- mtk_wed_rro_hw_init(dev);
+- mtk_wed_route_qm_hw_init(dev);
++ if (mtk_wed_get_rx_capa(dev)) {
++ /* rx hw init */
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
++ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
++ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
++
++ mtk_wed_rx_buffer_hw_init(dev);
++ mtk_wed_rro_hw_init(dev);
++ mtk_wed_route_qm_hw_init(dev);
++ }
+ }
+
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+@@ -1353,8 +1360,6 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+ } else {
+- wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
+- GENMASK(1, 0));
+ /* initail tx interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+@@ -1373,15 +1378,20 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
+ dev->wlan.txfree_tbit));
+
+- wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
+- MTK_WED_WPDMA_INT_CTRL_RX0_EN |
+- MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
+- MTK_WED_WPDMA_INT_CTRL_RX1_EN |
+- MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
+- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
+- dev->wlan.rx_tbit[0]) |
+- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
+- dev->wlan.rx_tbit[1]));
++ if (mtk_wed_get_rx_capa(dev)) {
++ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
++ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
++ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
++ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
++ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
++ dev->wlan.rx_tbit[0]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
++ dev->wlan.rx_tbit[1]));
++
++ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
++ GENMASK(1, 0));
++ }
+
+ wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
+ wed_set(dev, MTK_WED_WDMA_INT_CTRL,
+@@ -1400,6 +1410,8 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ static void
+ mtk_wed_dma_enable(struct mtk_wed_device *dev)
+ {
++ int i;
++
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+
+ wed_set(dev, MTK_WED_GLO_CFG,
+@@ -1419,33 +1431,33 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ if (mtk_wed_is_v1(dev->hw)) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+- } else {
+- int i;
++ return;
++ }
+
+- wed_set(dev, MTK_WED_WPDMA_CTRL,
+- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
++ wed_set(dev, MTK_WED_WPDMA_CTRL,
++ MTK_WED_WPDMA_CTRL_SDL1_FIXED);
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
++ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
++ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+
+- wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+- MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+- MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
++ if (!mtk_wed_get_rx_capa(dev))
++ return;
+
+- wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+- MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+- MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+-
+- wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+- MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+- MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
++ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
++ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
++ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
+- wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+- MTK_WED_WPDMA_RX_D_RX_DRV_EN |
+- FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
+- FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
+- 0x2));
++ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
++ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
++ 0x2));
+
+- for (i = 0; i < MTK_WED_RX_QUEUES; i++)
+- mtk_wed_check_wfdma_rx_fill(dev, i);
+- }
++ for (i = 0; i < MTK_WED_RX_QUEUES; i++)
++ mtk_wed_check_wfdma_rx_fill(dev, i);
+ }
+
+ static void
+@@ -1472,7 +1484,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+
+ val |= BIT(0) | (BIT(1) * !!dev->hw->index);
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+- } else {
++ } else if (mtk_wed_get_rx_capa(dev)) {
+ /* driver set mid ready and only once */
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+@@ -1484,7 +1496,6 @@ mtk_wed_start(struct mtk_wed_device *dev
+
+ if (mtk_wed_rro_cfg(dev))
+ return;
+-
+ }
+
+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
+@@ -1550,13 +1561,14 @@ mtk_wed_attach(struct mtk_wed_device *de
+ }
+
+ mtk_wed_hw_init_early(dev);
+- if (mtk_wed_is_v1(hw)) {
++ if (mtk_wed_is_v1(hw))
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), 0);
+- } else {
++ else
+ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
++
++ if (mtk_wed_get_rx_capa(dev))
+ ret = mtk_wed_wo_init(hw);
+- }
+ out:
+ if (ret) {
+ dev_err(dev->hw->dev, "failed to attach wed device\n");
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -207,7 +207,7 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+ {
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+
+- if (mtk_wed_is_v1(dev->hw))
++ if (!mtk_wed_get_rx_capa(dev))
+ return 0;
+
+ if (WARN_ON(!wo))
diff --git a/target/linux/generic/backport-5.15/752-08-v6.7-net-ethernet-mtk_wed-rename-mtk_rxbm_desc-in-mtk_wed.patch b/target/linux/generic/backport-5.15/752-08-v6.7-net-ethernet-mtk_wed-rename-mtk_rxbm_desc-in-mtk_wed.patch
new file mode 100644
index 0000000000..d83434fb2c
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-08-v6.7-net-ethernet-mtk_wed-rename-mtk_rxbm_desc-in-mtk_wed.patch
@@ -0,0 +1,52 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:07 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: rename mtk_rxbm_desc in
+ mtk_wed_bm_desc
+
+Rename mtk_rxbm_desc structure in mtk_wed_bm_desc since it will be used
+even on tx side by MT7988 SoC.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -421,7 +421,7 @@ free_pagelist:
+ static int
+ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
+ {
+- struct mtk_rxbm_desc *desc;
++ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+
+ dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
+@@ -441,7 +441,7 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_d
+ static void
+ mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
+ {
+- struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
++ struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc;
+
+ if (!desc)
+ return;
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -45,7 +45,7 @@ enum mtk_wed_wo_cmd {
+ MTK_WED_WO_CMD_WED_END
+ };
+
+-struct mtk_rxbm_desc {
++struct mtk_wed_bm_desc {
+ __le32 buf0;
+ __le32 token;
+ } __packed __aligned(4);
+@@ -105,7 +105,7 @@ struct mtk_wed_device {
+ struct {
+ int size;
+ struct page_frag_cache rx_page;
+- struct mtk_rxbm_desc *desc;
++ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ } rx_buf_ring;
+
diff --git a/target/linux/generic/backport-5.15/752-09-v6.7-net-ethernet-mtk_wed-introduce-mtk_wed_buf-structure.patch b/target/linux/generic/backport-5.15/752-09-v6.7-net-ethernet-mtk_wed-introduce-mtk_wed_buf-structure.patch
new file mode 100644
index 0000000000..8000a8759e
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-09-v6.7-net-ethernet-mtk_wed-introduce-mtk_wed_buf-structure.patch
@@ -0,0 +1,87 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:08 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce mtk_wed_buf structure
+
+Introduce mtk_wed_buf structure to store both virtual and physical
+addresses allocated in mtk_wed_tx_buffer_alloc() routine. This is a
+preliminary patch to add WED support for MT7988 SoC since it relies on a
+different dma descriptor layout not storing page dma addresses.
+
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -299,9 +299,9 @@ out:
+ static int
+ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
+ {
++ struct mtk_wed_buf *page_list;
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+- void **page_list;
+ int token = dev->wlan.token_start;
+ int ring_size;
+ int n_pages;
+@@ -342,7 +342,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_d
+ return -ENOMEM;
+ }
+
+- page_list[page_idx++] = page;
++ page_list[page_idx].p = page;
++ page_list[page_idx++].phy_addr = page_phys;
+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+@@ -386,8 +387,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_d
+ static void
+ mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
+ {
++ struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages;
+ struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
+- void **page_list = dev->tx_buf_ring.pages;
+ int page_idx;
+ int i;
+
+@@ -399,13 +400,12 @@ mtk_wed_free_tx_buffer(struct mtk_wed_de
+
+ for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
+ i += MTK_WED_BUF_PER_PAGE) {
+- void *page = page_list[page_idx++];
+- dma_addr_t buf_addr;
++ dma_addr_t buf_addr = page_list[page_idx].phy_addr;
++ void *page = page_list[page_idx++].p;
+
+ if (!page)
+ break;
+
+- buf_addr = le32_to_cpu(desc[i].buf0);
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -76,6 +76,11 @@ struct mtk_wed_wo_rx_stats {
+ __le32 rx_drop_cnt;
+ };
+
++struct mtk_wed_buf {
++ void *p;
++ dma_addr_t phy_addr;
++};
++
+ struct mtk_wed_device {
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ const struct mtk_wed_ops *ops;
+@@ -97,7 +102,7 @@ struct mtk_wed_device {
+
+ struct {
+ int size;
+- void **pages;
++ struct mtk_wed_buf *pages;
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ } tx_buf_ring;
diff --git a/target/linux/generic/backport-5.15/752-10-v6.7-net-ethernet-mtk_wed-move-mem_region-array-out-of-mt.patch b/target/linux/generic/backport-5.15/752-10-v6.7-net-ethernet-mtk_wed-move-mem_region-array-out-of-mt.patch
new file mode 100644
index 0000000000..98d782b1d0
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-10-v6.7-net-ethernet-mtk_wed-move-mem_region-array-out-of-mt.patch
@@ -0,0 +1,88 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:09 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: move mem_region array out of
+ mtk_wed_mcu_load_firmware
+
+Remove mtk_wed_wo_memory_region boot structure in mtk_wed_wo.
+This is a preliminary patch to introduce WED support for MT7988 SoC.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -16,14 +16,30 @@
+ #include "mtk_wed_wo.h"
+ #include "mtk_wed.h"
+
++static struct mtk_wed_wo_memory_region mem_region[] = {
++ [MTK_WED_WO_REGION_EMI] = {
++ .name = "wo-emi",
++ },
++ [MTK_WED_WO_REGION_ILM] = {
++ .name = "wo-ilm",
++ },
++ [MTK_WED_WO_REGION_DATA] = {
++ .name = "wo-data",
++ .shared = true,
++ },
++ [MTK_WED_WO_REGION_BOOT] = {
++ .name = "wo-boot",
++ },
++};
++
+ static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
+ {
+- return readl(wo->boot.addr + reg);
++ return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ }
+
+ static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+ {
+- writel(val, wo->boot.addr + reg);
++ writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ }
+
+ static struct sk_buff *
+@@ -294,18 +310,6 @@ next:
+ static int
+ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
+ {
+- static struct mtk_wed_wo_memory_region mem_region[] = {
+- [MTK_WED_WO_REGION_EMI] = {
+- .name = "wo-emi",
+- },
+- [MTK_WED_WO_REGION_ILM] = {
+- .name = "wo-ilm",
+- },
+- [MTK_WED_WO_REGION_DATA] = {
+- .name = "wo-data",
+- .shared = true,
+- },
+- };
+ const struct mtk_wed_fw_trailer *trailer;
+ const struct firmware *fw;
+ const char *fw_name;
+@@ -319,11 +323,6 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ return ret;
+ }
+
+- wo->boot.name = "wo-boot";
+- ret = mtk_wed_get_memory_region(wo, &wo->boot);
+- if (ret)
+- return ret;
+-
+ /* set dummy cr */
+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
+ wo->hw->index + 1);
+--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+@@ -228,7 +228,6 @@ struct mtk_wed_wo_queue {
+
+ struct mtk_wed_wo {
+ struct mtk_wed_hw *hw;
+- struct mtk_wed_wo_memory_region boot;
+
+ struct mtk_wed_wo_queue q_tx;
+ struct mtk_wed_wo_queue q_rx;
diff --git a/target/linux/generic/backport-5.15/752-11-v6.7-net-ethernet-mtk_wed-make-memory-region-optional.patch b/target/linux/generic/backport-5.15/752-11-v6.7-net-ethernet-mtk_wed-make-memory-region-optional.patch
new file mode 100644
index 0000000000..48b0d02049
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-11-v6.7-net-ethernet-mtk_wed-make-memory-region-optional.patch
@@ -0,0 +1,71 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:10 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: make memory region optional
+
+Make mtk_wed_wo_memory_region optionals.
+This is a preliminary patch to introduce Wireless Ethernet Dispatcher
+support for MT7988 SoC since MT7988 WED fw image will have a different
+layout.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -234,19 +234,13 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+ }
+
+ static int
+-mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
++mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
+ struct mtk_wed_wo_memory_region *region)
+ {
+ struct reserved_mem *rmem;
+ struct device_node *np;
+- int index;
+
+- index = of_property_match_string(wo->hw->node, "memory-region-names",
+- region->name);
+- if (index < 0)
+- return index;
+-
+- np = of_parse_phandle(wo->hw->node, "memory-region", index);
++ np = of_parse_phandle(hw->node, "memory-region", index);
+ if (!np)
+ return -ENODEV;
+
+@@ -258,7 +252,7 @@ mtk_wed_get_memory_region(struct mtk_wed
+
+ region->phy_addr = rmem->base;
+ region->size = rmem->size;
+- region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
++ region->addr = devm_ioremap(hw->dev, region->phy_addr, region->size);
+
+ return !region->addr ? -EINVAL : 0;
+ }
+@@ -271,6 +265,9 @@ mtk_wed_mcu_run_firmware(struct mtk_wed_
+ const struct mtk_wed_fw_trailer *trailer;
+ const struct mtk_wed_fw_region *fw_region;
+
++ if (!region->phy_addr || !region->size)
++ return 0;
++
+ trailer_ptr = fw->data + fw->size - sizeof(*trailer);
+ trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
+ region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
+@@ -318,7 +315,13 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+
+ /* load firmware region metadata */
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+- ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
++ int index = of_property_match_string(wo->hw->node,
++ "memory-region-names",
++ mem_region[i].name);
++ if (index < 0)
++ continue;
++
++ ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]);
+ if (ret)
+ return ret;
+ }
diff --git a/target/linux/generic/backport-5.15/752-12-v6.7-net-ethernet-mtk_wed-fix-EXT_INT_STATUS_RX_FBUF-defi.patch b/target/linux/generic/backport-5.15/752-12-v6.7-net-ethernet-mtk_wed-fix-EXT_INT_STATUS_RX_FBUF-defi.patch
new file mode 100644
index 0000000000..878e8fe996
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-12-v6.7-net-ethernet-mtk_wed-fix-EXT_INT_STATUS_RX_FBUF-defi.patch
@@ -0,0 +1,27 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:11 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: fix EXT_INT_STATUS_RX_FBUF
+ definitions for MT7986 SoC
+
+Fix MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH and
+MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH definitions for MT7986 (MT7986 is
+the only SoC to use them).
+
+Fixes: de84a090d99a ("net: ethernet: mtk_eth_wed: add wed support for mt7986 chipset")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -64,8 +64,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
diff --git a/target/linux/generic/backport-5.15/752-13-v6.7-net-ethernet-mtk_wed-add-mtk_wed_soc_data-structure.patch b/target/linux/generic/backport-5.15/752-13-v6.7-net-ethernet-mtk_wed-add-mtk_wed_soc_data-structure.patch
new file mode 100644
index 0000000000..c43114fb5b
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-13-v6.7-net-ethernet-mtk_wed-add-mtk_wed_soc_data-structure.patch
@@ -0,0 +1,217 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:12 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: add mtk_wed_soc_data structure
+
+Introduce mtk_wed_soc_data utility structure to contain per-SoC
+definitions.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -48,6 +48,26 @@ struct mtk_wed_flow_block_priv {
+ struct net_device *dev;
+ };
+
++static const struct mtk_wed_soc_data mt7622_data = {
++ .regmap = {
++ .tx_bm_tkid = 0x088,
++ .wpdma_rx_ring0 = 0x770,
++ .reset_idx_tx_mask = GENMASK(3, 0),
++ .reset_idx_rx_mask = GENMASK(17, 16),
++ },
++ .wdma_desc_size = sizeof(struct mtk_wdma_desc),
++};
++
++static const struct mtk_wed_soc_data mt7986_data = {
++ .regmap = {
++ .tx_bm_tkid = 0x0c8,
++ .wpdma_rx_ring0 = 0x770,
++ .reset_idx_tx_mask = GENMASK(1, 0),
++ .reset_idx_rx_mask = GENMASK(7, 6),
++ },
++ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
++};
++
+ static void
+ wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+ {
+@@ -746,7 +766,7 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+ return;
+
+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
++ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
+ }
+
+ static void
+@@ -940,22 +960,10 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+ if (mtk_wed_is_v1(dev->hw)) {
+- wed_w32(dev, MTK_WED_TX_BM_TKID,
+- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+- dev->wlan.token_start) |
+- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+- dev->wlan.token_start +
+- dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+ } else {
+- wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
+- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+- dev->wlan.token_start) |
+- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+- dev->wlan.token_start +
+- dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
+ MTK_WED_TX_BM_DYN_THR_HI_V2);
+@@ -970,6 +978,11 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+ MTK_WED_TX_TKID_DYN_THR_HI);
+ }
+
++ wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid,
++ FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) |
++ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
++ dev->wlan.token_start + dev->wlan.nbuf - 1));
++
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ if (mtk_wed_is_v1(dev->hw)) {
+@@ -1104,13 +1117,8 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
+ } else {
+- struct mtk_eth *eth = dev->hw->eth;
+-
+- if (mtk_is_netsys_v2_or_greater(eth))
+- wed_set(dev, MTK_WED_RESET_IDX,
+- MTK_WED_RESET_IDX_RX_V2);
+- else
+- wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
++ wed_set(dev, MTK_WED_RESET_IDX,
++ dev->hw->soc->regmap.reset_idx_rx_mask);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+@@ -1163,7 +1171,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
+ } else {
+- wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
++ wed_w32(dev, MTK_WED_RESET_IDX,
++ dev->hw->soc->regmap.reset_idx_tx_mask);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+@@ -1255,7 +1264,6 @@ static int
+ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
+ bool reset)
+ {
+- u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma;
+
+ if (idx >= ARRAY_SIZE(dev->rx_wdma))
+@@ -1263,7 +1271,7 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
+
+ wdma = &dev->rx_wdma[idx];
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+- desc_size, true))
++ dev->hw->soc->wdma_desc_size, true))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+@@ -1284,7 +1292,6 @@ static int
+ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
+ bool reset)
+ {
+- u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma;
+
+ if (idx >= ARRAY_SIZE(dev->tx_wdma))
+@@ -1292,7 +1299,7 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
+
+ wdma = &dev->tx_wdma[idx];
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+- desc_size, true))
++ dev->hw->soc->wdma_desc_size, true))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+@@ -1931,7 +1938,12 @@ void mtk_wed_add_hw(struct device_node *
+ hw->irq = irq;
+ hw->version = eth->soc->version;
+
+- if (mtk_wed_is_v1(hw)) {
++ switch (hw->version) {
++ case 2:
++ hw->soc = &mt7986_data;
++ break;
++ default:
++ case 1:
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+@@ -1945,6 +1957,8 @@ void mtk_wed_add_hw(struct device_node *
+ regmap_write(hw->mirror, 0, 0);
+ regmap_write(hw->mirror, 4, 0);
+ }
++ hw->soc = &mt7622_data;
++ break;
+ }
+
+ mtk_wed_hw_add_debugfs(hw);
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -12,7 +12,18 @@
+ struct mtk_eth;
+ struct mtk_wed_wo;
+
++struct mtk_wed_soc_data {
++ struct {
++ u32 tx_bm_tkid;
++ u32 wpdma_rx_ring0;
++ u32 reset_idx_tx_mask;
++ u32 reset_idx_rx_mask;
++ } regmap;
++ u32 wdma_desc_size;
++};
++
+ struct mtk_wed_hw {
++ const struct mtk_wed_soc_data *soc;
+ struct device_node *node;
+ struct mtk_eth *eth;
+ struct regmap *regs;
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -100,8 +100,6 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_TX_BM_BASE 0x084
+
+-#define MTK_WED_TX_BM_TKID 0x088
+-#define MTK_WED_TX_BM_TKID_V2 0x0c8
+ #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
+ #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
+
+@@ -160,9 +158,6 @@ struct mtk_wdma_desc {
+ #define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+ #define MTK_WED_RESET_IDX 0x20c
+-#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
+-#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
+-#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6)
+ #define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
+
+ #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
+@@ -286,7 +281,6 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
+
+ #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
+-#define MTK_WED_WPDMA_RX_RING 0x770
+
+ #define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
+ #define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
diff --git a/target/linux/generic/backport-5.15/752-14-v6.7-net-ethernet-mtk_wed-introduce-WED-support-for-MT798.patch b/target/linux/generic/backport-5.15/752-14-v6.7-net-ethernet-mtk_wed-introduce-WED-support-for-MT798.patch
new file mode 100644
index 0000000000..f53b822224
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-14-v6.7-net-ethernet-mtk_wed-introduce-WED-support-for-MT798.patch
@@ -0,0 +1,1280 @@
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:13 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce WED support for MT7988
+
+Similar to MT7986 and MT7622, enable Wireless Ethernet Ditpatcher for
+MT7988 in order to offload traffic forwarded from LAN/WLAN to WLAN/LAN
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -195,6 +195,7 @@ static const struct mtk_reg_map mt7988_r
+ .wdma_base = {
+ [0] = 0x4800,
+ [1] = 0x4c00,
++ [2] = 0x5000,
+ },
+ .pse_iq_sta = 0x0180,
+ .pse_oq_sta = 0x01a0,
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -1129,7 +1129,7 @@ struct mtk_reg_map {
+ u32 gdm1_cnt;
+ u32 gdma_to_ppe0;
+ u32 ppe_base;
+- u32 wdma_base[2];
++ u32 wdma_base[3];
+ u32 pse_iq_sta;
+ u32 pse_oq_sta;
+ };
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -201,6 +201,9 @@ mtk_flow_set_output_device(struct mtk_et
+ case 1:
+ pse_port = PSE_WDMA1_PORT;
+ break;
++ case 2:
++ pse_port = PSE_WDMA2_PORT;
++ break;
+ default:
+ return -EINVAL;
+ }
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -16,17 +16,19 @@
+ #include <net/flow_offload.h>
+ #include <net/pkt_cls.h>
+ #include "mtk_eth_soc.h"
+-#include "mtk_wed_regs.h"
+ #include "mtk_wed.h"
+ #include "mtk_ppe.h"
+ #include "mtk_wed_wo.h"
+
+ #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
+
+-#define MTK_WED_PKT_SIZE 1900
++#define MTK_WED_PKT_SIZE 1920
+ #define MTK_WED_BUF_SIZE 2048
++#define MTK_WED_PAGE_BUF_SIZE 128
+ #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
++#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
+ #define MTK_WED_RX_RING_SIZE 1536
++#define MTK_WED_RX_PG_BM_CNT 8192
+
+ #define MTK_WED_TX_RING_SIZE 2048
+ #define MTK_WED_WDMA_RING_SIZE 1024
+@@ -40,7 +42,10 @@
+ #define MTK_WED_RRO_QUE_CNT 8192
+ #define MTK_WED_MIOD_ENTRY_CNT 128
+
+-static struct mtk_wed_hw *hw_list[2];
++#define MTK_WED_TX_BM_DMA_SIZE 65536
++#define MTK_WED_TX_BM_PKT_CNT 32768
++
++static struct mtk_wed_hw *hw_list[3];
+ static DEFINE_MUTEX(hw_lock);
+
+ struct mtk_wed_flow_block_priv {
+@@ -55,6 +60,7 @@ static const struct mtk_wed_soc_data mt7
+ .reset_idx_tx_mask = GENMASK(3, 0),
+ .reset_idx_rx_mask = GENMASK(17, 16),
+ },
++ .tx_ring_desc_size = sizeof(struct mtk_wdma_desc),
+ .wdma_desc_size = sizeof(struct mtk_wdma_desc),
+ };
+
+@@ -65,6 +71,18 @@ static const struct mtk_wed_soc_data mt7
+ .reset_idx_tx_mask = GENMASK(1, 0),
+ .reset_idx_rx_mask = GENMASK(7, 6),
+ },
++ .tx_ring_desc_size = sizeof(struct mtk_wdma_desc),
++ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
++};
++
++static const struct mtk_wed_soc_data mt7988_data = {
++ .regmap = {
++ .tx_bm_tkid = 0x0c8,
++ .wpdma_rx_ring0 = 0x7d0,
++ .reset_idx_tx_mask = GENMASK(1, 0),
++ .reset_idx_rx_mask = GENMASK(7, 6),
++ },
++ .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc),
+ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
+ };
+
+@@ -319,33 +337,38 @@ out:
+ static int
+ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
+ {
++ u32 desc_size = dev->hw->soc->tx_ring_desc_size;
++ int i, page_idx = 0, n_pages, ring_size;
++ int token = dev->wlan.token_start;
+ struct mtk_wed_buf *page_list;
+- struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+- int token = dev->wlan.token_start;
+- int ring_size;
+- int n_pages;
+- int i, page_idx;
++ void *desc_ptr;
+
+- ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+- n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
++ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
++ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
++ dev->tx_buf_ring.size = ring_size;
++ } else {
++ dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE;
++ ring_size = MTK_WED_TX_BM_PKT_CNT;
++ }
++ n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE;
+
+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+- dev->tx_buf_ring.size = ring_size;
+ dev->tx_buf_ring.pages = page_list;
+
+- desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
+- &desc_phys, GFP_KERNEL);
+- if (!desc)
++ desc_ptr = dma_alloc_coherent(dev->hw->dev,
++ dev->tx_buf_ring.size * desc_size,
++ &desc_phys, GFP_KERNEL);
++ if (!desc_ptr)
+ return -ENOMEM;
+
+- dev->tx_buf_ring.desc = desc;
++ dev->tx_buf_ring.desc = desc_ptr;
+ dev->tx_buf_ring.desc_phys = desc_phys;
+
+- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
++ for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+ dma_addr_t page_phys, buf_phys;
+ struct page *page;
+ void *buf;
+@@ -371,28 +394,31 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_d
+ buf_phys = page_phys;
+
+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
+- u32 txd_size;
+- u32 ctrl;
+-
+- txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
++ struct mtk_wdma_desc *desc = desc_ptr;
+
+ desc->buf0 = cpu_to_le32(buf_phys);
+- desc->buf1 = cpu_to_le32(buf_phys + txd_size);
++ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
++ u32 txd_size, ctrl;
+
+- if (mtk_wed_is_v1(dev->hw))
+- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+- MTK_WED_BUF_SIZE - txd_size) |
+- MTK_WDMA_DESC_CTRL_LAST_SEG1;
+- else
+- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
+- MTK_WED_BUF_SIZE - txd_size) |
+- MTK_WDMA_DESC_CTRL_LAST_SEG0;
+- desc->ctrl = cpu_to_le32(ctrl);
+- desc->info = 0;
+- desc++;
++ txd_size = dev->wlan.init_buf(buf, buf_phys,
++ token++);
++ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
++ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size);
++ if (mtk_wed_is_v1(dev->hw))
++ ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 |
++ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
++ MTK_WED_BUF_SIZE - txd_size);
++ else
++ ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 |
++ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
++ MTK_WED_BUF_SIZE - txd_size);
++ desc->ctrl = cpu_to_le32(ctrl);
++ desc->info = 0;
++ } else {
++ desc->ctrl = cpu_to_le32(token << 16);
++ }
+
++ desc_ptr += desc_size;
+ buf += MTK_WED_BUF_SIZE;
+ buf_phys += MTK_WED_BUF_SIZE;
+ }
+@@ -408,31 +434,31 @@ static void
+ mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
+ {
+ struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages;
+- struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
+- int page_idx;
+- int i;
++ struct mtk_wed_hw *hw = dev->hw;
++ int i, page_idx = 0;
+
+ if (!page_list)
+ return;
+
+- if (!desc)
++ if (!dev->tx_buf_ring.desc)
+ goto free_pagelist;
+
+- for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
+- i += MTK_WED_BUF_PER_PAGE) {
+- dma_addr_t buf_addr = page_list[page_idx].phy_addr;
++ for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
++ dma_addr_t page_phy = page_list[page_idx].phy_addr;
+ void *page = page_list[page_idx++].p;
+
+ if (!page)
+ break;
+
+- dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
++ dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+ }
+
+- dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
+- desc, dev->tx_buf_ring.desc_phys);
++ dma_free_coherent(dev->hw->dev,
++ dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size,
++ dev->tx_buf_ring.desc,
++ dev->tx_buf_ring.desc_phys);
+
+ free_pagelist:
+ kfree(page_list);
+@@ -517,13 +543,23 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
+ {
+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+- if (mtk_wed_is_v1(dev->hw))
++ switch (dev->hw->version) {
++ case 1:
+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+- else
++ break;
++ case 2:
+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
++ break;
++ case 3:
++ mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
++ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
++ break;
++ default:
++ break;
++ }
+
+ if (!dev->hw->num_flows)
+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+@@ -535,6 +571,9 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
+ static void
+ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
+ {
++ if (!mtk_wed_is_v2(dev->hw))
++ return;
++
+ if (enable) {
+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ wed_w32(dev, MTK_WED_TXP_DW1,
+@@ -609,6 +648,14 @@ mtk_wed_dma_disable(struct mtk_wed_devic
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
++
++ if (mtk_wed_is_v3_or_greater(dev->hw) &&
++ mtk_wed_get_rx_capa(dev)) {
++ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG,
++ MTK_WDMA_PREF_TX_CFG_PREF_EN);
++ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG,
++ MTK_WDMA_PREF_RX_CFG_PREF_EN);
++ }
+ }
+
+ mtk_wed_set_512_support(dev, false);
+@@ -651,6 +698,14 @@ mtk_wed_deinit(struct mtk_wed_device *de
+ MTK_WED_CTRL_RX_ROUTE_QM_EN |
+ MTK_WED_CTRL_WED_RX_BM_EN |
+ MTK_WED_CTRL_RX_RRO_QM_EN);
++
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
++ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU);
++ wed_clr(dev, MTK_WED_PCIE_INT_CTRL,
++ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
++ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER);
++ }
+ }
+
+ static void
+@@ -700,21 +755,37 @@ mtk_wed_detach(struct mtk_wed_device *de
+ mutex_unlock(&hw_lock);
+ }
+
+-#define PCIE_BASE_ADDR0 0x11280000
+ static void
+ mtk_wed_bus_init(struct mtk_wed_device *dev)
+ {
+ switch (dev->wlan.bus_type) {
+ case MTK_WED_BUS_PCIE: {
+ struct device_node *np = dev->hw->eth->dev->of_node;
+- struct regmap *regs;
+
+- regs = syscon_regmap_lookup_by_phandle(np,
+- "mediatek,wed-pcie");
+- if (IS_ERR(regs))
+- break;
++ if (mtk_wed_is_v2(dev->hw)) {
++ struct regmap *regs;
++
++ regs = syscon_regmap_lookup_by_phandle(np,
++ "mediatek,wed-pcie");
++ if (IS_ERR(regs))
++ break;
+
+- regmap_update_bits(regs, 0, BIT(0), BIT(0));
++ regmap_update_bits(regs, 0, BIT(0), BIT(0));
++ }
++
++ if (dev->wlan.msi) {
++ wed_w32(dev, MTK_WED_PCIE_CFG_INTM,
++ dev->hw->pcie_base | 0xc08);
++ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
++ dev->hw->pcie_base | 0xc04);
++ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8));
++ } else {
++ wed_w32(dev, MTK_WED_PCIE_CFG_INTM,
++ dev->hw->pcie_base | 0x180);
++ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
++ dev->hw->pcie_base | 0x184);
++ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
++ }
+
+ wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
+@@ -722,19 +793,9 @@ mtk_wed_bus_init(struct mtk_wed_device *
+ /* pcie interrupt control: pola/source selection */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+- wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+-
+- wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+-
+- /* pcie interrupt status trigger register */
+- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+- wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+-
+- /* pola setting */
+- wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+- MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
++ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER |
++ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL,
++ dev->hw->index));
+ break;
+ }
+ case MTK_WED_BUS_AXI:
+@@ -772,18 +833,19 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+ static void
+ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
+ {
+- u32 mask, set;
++ u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2);
++ u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE;
+
+ mtk_wed_deinit(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ mtk_wed_set_wpdma(dev);
+
+- mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
+- MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+- MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+- set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
+- MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+- MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
++ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
++ mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
++ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
++ set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
++ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
++ }
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+ if (mtk_wed_is_v1(dev->hw)) {
+@@ -931,11 +993,18 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_
+ }
+
+ /* configure RX_ROUTE_QM */
+- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
+- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+- FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
+- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++ if (mtk_wed_is_v2(dev->hw)) {
++ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
++ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
++ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT,
++ 0x3 + dev->hw->index));
++ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++ } else {
++ wed_set(dev, MTK_WED_RTQM_ENQ_CFG0,
++ FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT,
++ 0x3 + dev->hw->index));
++ }
+ /* enable RX_ROUTE_QM */
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+ }
+@@ -948,22 +1017,30 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ dev->init_done = true;
+ mtk_wed_set_ext_int(dev, false);
+- wed_w32(dev, MTK_WED_TX_BM_CTRL,
+- MTK_WED_TX_BM_CTRL_PAUSE |
+- FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+- dev->tx_buf_ring.size / 128) |
+- FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+- MTK_WED_TX_RING_SIZE / 256));
+
+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
+-
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+ if (mtk_wed_is_v1(dev->hw)) {
++ wed_w32(dev, MTK_WED_TX_BM_CTRL,
++ MTK_WED_TX_BM_CTRL_PAUSE |
++ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
++ dev->tx_buf_ring.size / 128) |
++ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
++ MTK_WED_TX_RING_SIZE / 256));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+- } else {
++ } else if (mtk_wed_is_v2(dev->hw)) {
++ wed_w32(dev, MTK_WED_TX_BM_CTRL,
++ MTK_WED_TX_BM_CTRL_PAUSE |
++ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
++ dev->tx_buf_ring.size / 128) |
++ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
++ MTK_WED_TX_RING_SIZE / 256));
++ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
++ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
++ MTK_WED_TX_TKID_DYN_THR_HI);
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
+ MTK_WED_TX_BM_DYN_THR_HI_V2);
+@@ -973,9 +1050,6 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+ dev->tx_buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
+ dev->tx_buf_ring.size / 128));
+- wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+- FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+- MTK_WED_TX_TKID_DYN_THR_HI);
+ }
+
+ wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid,
+@@ -985,26 +1059,62 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ /* switch to new bm architecture */
++ wed_clr(dev, MTK_WED_TX_BM_CTRL,
++ MTK_WED_TX_BM_CTRL_LEGACY_EN);
++
++ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
++ MTK_WED_TX_TKID_CTRL_PAUSE |
++ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3,
++ dev->wlan.nbuf / 128) |
++ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3,
++ dev->wlan.nbuf / 128));
++ /* return SKBID + SDP back to bm */
++ wed_set(dev, MTK_WED_TX_TKID_CTRL,
++ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
++
++ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR,
++ MTK_WED_TX_BM_PKT_CNT |
++ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
++ }
++
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+- } else {
+- wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+- if (mtk_wed_get_rx_capa(dev)) {
+- /* rx hw init */
+- wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+- MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+- MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+- wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+-
+- mtk_wed_rx_buffer_hw_init(dev);
+- mtk_wed_rro_hw_init(dev);
+- mtk_wed_route_qm_hw_init(dev);
+- }
++ } else if (mtk_wed_get_rx_capa(dev)) {
++ /* rx hw init */
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
++ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
++ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
++
++ /* reset prefetch index of ring */
++ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
++ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
++ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++
++ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
++ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
++ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++
++ /* reset prefetch FIFO of ring */
++ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR |
++ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR);
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0);
++
++ mtk_wed_rx_buffer_hw_init(dev);
++ mtk_wed_rro_hw_init(dev);
++ mtk_wed_route_qm_hw_init(dev);
+ }
+
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
++ if (!mtk_wed_is_v1(dev->hw))
++ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+ }
+
+ static void
+@@ -1302,6 +1412,24 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
+ dev->hw->soc->wdma_desc_size, true))
+ return -ENOMEM;
+
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ struct mtk_wdma_desc *desc = wdma->desc;
++ int i;
++
++ for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) {
++ desc->buf0 = 0;
++ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
++ desc->buf1 = 0;
++ desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE);
++ desc++;
++ desc->buf0 = 0;
++ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
++ desc->buf1 = 0;
++ desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE);
++ desc++;
++ }
++ }
++
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+@@ -1367,6 +1495,9 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+ } else {
++ if (mtk_wed_is_v3_or_greater(dev->hw))
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
++
+ /* initail tx interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+@@ -1419,33 +1550,60 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ {
+ int i;
+
+- wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
++ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
++ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
++ wdma_set(dev, MTK_WDMA_GLO_CFG,
++ MTK_WDMA_GLO_CFG_TX_DMA_EN |
++ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
++ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
++ wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED);
++ } else {
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN |
++ MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR);
++ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
++ }
+
+ wed_set(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+- wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+- MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
++
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+- wdma_set(dev, MTK_WDMA_GLO_CFG,
+- MTK_WDMA_GLO_CFG_TX_DMA_EN |
+- MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+-
+ if (mtk_wed_is_v1(dev->hw)) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ return;
+ }
+
+- wed_set(dev, MTK_WED_WPDMA_CTRL,
+- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
++
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) |
++ FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8));
++ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
++ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
++
++ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST);
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
++
++ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++ }
++
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+@@ -1457,11 +1615,22 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
++ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN);
+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
+- FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
+- 0x2));
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2));
++
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_EN |
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) |
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8));
++
++ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
++ wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
++ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
++ }
+
+ for (i = 0; i < MTK_WED_RX_QUEUES; i++)
+ mtk_wed_check_wfdma_rx_fill(dev, i);
+@@ -1501,6 +1670,12 @@ mtk_wed_start(struct mtk_wed_device *dev
+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
+
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_w32(dev, MTK_WED_EXT_INT_MASK3,
++ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
++ wed_r32(dev, MTK_WED_EXT_INT_MASK3);
++ }
++
+ if (mtk_wed_rro_cfg(dev))
+ return;
+ }
+@@ -1552,6 +1727,7 @@ mtk_wed_attach(struct mtk_wed_device *de
+ dev->irq = hw->irq;
+ dev->wdma_idx = hw->index;
+ dev->version = hw->version;
++ dev->hw->pcie_base = mtk_wed_get_pcie_base(dev);
+
+ if (hw->eth->dma_dev == hw->eth->dev &&
+ of_dma_is_coherent(hw->eth->dev->of_node))
+@@ -1619,6 +1795,23 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+ ring->reg_base = MTK_WED_RING_TX(idx);
+ ring->wpdma = regs;
+
++ if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) {
++ /* reset prefetch index */
++ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
++ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
++
++ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
++ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
++
++ /* reset prefetch FIFO */
++ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG,
++ MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR |
++ MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR);
++ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0);
++ }
++
+ /* WED -> WPDMA */
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
+@@ -1693,15 +1886,13 @@ mtk_wed_rx_ring_setup(struct mtk_wed_dev
+ static u32
+ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+ {
+- u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
++ u32 val, ext_mask;
+
+- if (mtk_wed_is_v1(dev->hw))
+- ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
++ if (mtk_wed_is_v3_or_greater(dev->hw))
++ ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
++ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ else
+- ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+- MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+- MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+- MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
++ ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
+@@ -1942,6 +2133,9 @@ void mtk_wed_add_hw(struct device_node *
+ case 2:
+ hw->soc = &mt7986_data;
+ break;
++ case 3:
++ hw->soc = &mt7988_data;
++ break;
+ default:
+ case 1:
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -9,6 +9,8 @@
+ #include <linux/regmap.h>
+ #include <linux/netdevice.h>
+
++#include "mtk_wed_regs.h"
++
+ struct mtk_eth;
+ struct mtk_wed_wo;
+
+@@ -19,6 +21,7 @@ struct mtk_wed_soc_data {
+ u32 reset_idx_tx_mask;
+ u32 reset_idx_rx_mask;
+ } regmap;
++ u32 tx_ring_desc_size;
+ u32 wdma_desc_size;
+ };
+
+@@ -35,6 +38,7 @@ struct mtk_wed_hw {
+ struct dentry *debugfs_dir;
+ struct mtk_wed_device *wed_dev;
+ struct mtk_wed_wo *wed_wo;
++ u32 pcie_base;
+ u32 debugfs_reg;
+ u32 num_flows;
+ u8 version;
+@@ -61,6 +65,16 @@ static inline bool mtk_wed_is_v2(struct
+ return hw->version == 2;
+ }
+
++static inline bool mtk_wed_is_v3(struct mtk_wed_hw *hw)
++{
++ return hw->version == 3;
++}
++
++static inline bool mtk_wed_is_v3_or_greater(struct mtk_wed_hw *hw)
++{
++ return hw->version > 2;
++}
++
+ static inline void
+ wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+ {
+@@ -143,6 +157,21 @@ wpdma_txfree_w32(struct mtk_wed_device *
+ writel(val, dev->txfree_ring.wpdma + reg);
+ }
+
++static inline u32 mtk_wed_get_pcie_base(struct mtk_wed_device *dev)
++{
++ if (!mtk_wed_is_v3_or_greater(dev->hw))
++ return MTK_WED_PCIE_BASE;
++
++ switch (dev->hw->index) {
++ case 1:
++ return MTK_WED_PCIE_BASE1;
++ case 2:
++ return MTK_WED_PCIE_BASE2;
++ default:
++ return MTK_WED_PCIE_BASE0;
++ }
++}
++
+ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index);
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -331,10 +331,22 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ wo->hw->index + 1);
+
+ /* load firmware */
+- if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed"))
+- fw_name = MT7981_FIRMWARE_WO;
+- else
+- fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0;
++ switch (wo->hw->version) {
++ case 2:
++ if (of_device_is_compatible(wo->hw->node,
++ "mediatek,mt7981-wed"))
++ fw_name = MT7981_FIRMWARE_WO;
++ else
++ fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1
++ : MT7986_FIRMWARE_WO0;
++ break;
++ case 3:
++ fw_name = wo->hw->index ? MT7988_FIRMWARE_WO1
++ : MT7988_FIRMWARE_WO0;
++ break;
++ default:
++ return -EINVAL;
++ }
+
+ ret = request_firmware(&fw, fw_name, wo->hw->dev);
+ if (ret)
+@@ -355,15 +367,16 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ }
+
+ /* set the start address */
+- boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
+- : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
++ if (!mtk_wed_is_v3_or_greater(wo->hw) && wo->hw->index)
++ boot_cr = MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR;
++ else
++ boot_cr = MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
+ wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
+ /* wo firmware reset */
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
+
+- val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
+- val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
+- : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
++ val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR) |
++ MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
+ out:
+ release_firmware(fw);
+@@ -398,3 +411,5 @@ int mtk_wed_mcu_init(struct mtk_wed_wo *
+ MODULE_FIRMWARE(MT7981_FIRMWARE_WO);
+ MODULE_FIRMWARE(MT7986_FIRMWARE_WO0);
+ MODULE_FIRMWARE(MT7986_FIRMWARE_WO1);
++MODULE_FIRMWARE(MT7988_FIRMWARE_WO0);
++MODULE_FIRMWARE(MT7988_FIRMWARE_WO1);
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -13,6 +13,9 @@
+ #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
+ #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
+
++#define MTK_WDMA_TXD0_DESC_INFO_DMA_DONE BIT(29)
++#define MTK_WDMA_TXD1_DESC_INFO_DMA_DONE BIT(31)
++
+ struct mtk_wdma_desc {
+ __le32 buf0;
+ __le32 ctrl;
+@@ -37,6 +40,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
+ #define MTK_WED_RESET_RX_RRO_QM BIT(20)
+ #define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
++#define MTK_WED_RESET_TX_AMSDU BIT(22)
+ #define MTK_WED_RESET_WED BIT(31)
+
+ #define MTK_WED_CTRL 0x00c
+@@ -44,6 +48,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
+ #define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
+ #define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
++#define MTK_WED_CTRL_WED_RX_IND_CMD_EN BIT(5)
++#define MTK_WED_CTRL_WED_RX_PG_BM_EN BIT(6)
++#define MTK_WED_CTRL_WED_RX_PG_BM_BUSY BIT(7)
+ #define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
+ #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
+ #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
+@@ -54,9 +61,14 @@ struct mtk_wdma_desc {
+ #define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
+ #define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
+ #define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
++#define MTK_WED_CTRL_TX_TKID_ALI_EN BIT(20)
++#define MTK_WED_CTRL_TX_TKID_ALI_BUSY BIT(21)
++#define MTK_WED_CTRL_TX_AMSDU_EN BIT(22)
++#define MTK_WED_CTRL_TX_AMSDU_BUSY BIT(23)
+ #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
+ #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
+ #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
++#define MTK_WED_CTRL_FLD_MIB_RD_CLR BIT(28)
+
+ #define MTK_WED_EXT_INT_STATUS 0x020
+ #define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
+@@ -89,6 +101,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_MASK 0x028
+ #define MTK_WED_EXT_INT_MASK1 0x02c
+ #define MTK_WED_EXT_INT_MASK2 0x030
++#define MTK_WED_EXT_INT_MASK3 0x034
+
+ #define MTK_WED_STATUS 0x060
+ #define MTK_WED_STATUS_TX GENMASK(15, 8)
+@@ -96,9 +109,14 @@ struct mtk_wdma_desc {
+ #define MTK_WED_TX_BM_CTRL 0x080
+ #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+ #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
++#define MTK_WED_TX_BM_CTRL_LEGACY_EN BIT(26)
++#define MTK_WED_TX_TKID_CTRL_FREE_FORMAT BIT(27)
+ #define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
+
+ #define MTK_WED_TX_BM_BASE 0x084
++#define MTK_WED_TX_BM_INIT_PTR 0x088
++#define MTK_WED_TX_BM_SW_TAIL_IDX GENMASK(16, 0)
++#define MTK_WED_TX_BM_INIT_SW_TAIL_IDX BIT(16)
+
+ #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
+ #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
+@@ -122,6 +140,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+ #define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+
++#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0)
++#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16)
++
+ #define MTK_WED_TX_TKID_DYN_THR 0x0e0
+ #define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
+ #define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
+@@ -199,12 +220,15 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
+-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(15, 12)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4 BIT(18)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
+-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK BIT(20)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
+ #define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
++#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST BIT(25)
+ #define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
++#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK BIT(30)
+
+ #define MTK_WED_WPDMA_RESET_IDX 0x50c
+ #define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
+@@ -250,9 +274,10 @@ struct mtk_wdma_desc {
+ #define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
+
+ #define MTK_WED_PCIE_INT_CTRL 0x57c
+-#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
+-#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
+ #define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
++#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
++#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
++#define MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER BIT(21)
+
+ #define MTK_WED_WPDMA_CFG_BASE 0x580
+ #define MTK_WED_WPDMA_CFG_INT_MASK 0x584
+@@ -286,6 +311,20 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
+ #define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
+
++#define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
++#define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
++#define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
++#define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
++
++#define MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX 0x7b8
++#define MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR BIT(15)
++
++#define MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX 0x7bc
++
++#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG 0x7c0
++#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR BIT(0)
++#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR BIT(16)
++
+ #define MTK_WED_WDMA_RING_TX 0x800
+
+ #define MTK_WED_WDMA_TX_MIB 0x810
+@@ -293,6 +332,18 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
+ #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
+
++#define MTK_WED_WDMA_RX_PREF_CFG 0x950
++#define MTK_WED_WDMA_RX_PREF_EN BIT(0)
++#define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
++#define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
++#define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
++#define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
++#define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
++
++#define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
++#define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
++#define MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR BIT(16)
++
+ #define MTK_WED_WDMA_GLO_CFG 0xa04
+ #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
+ #define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
+@@ -325,6 +376,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
+
+ #define MTK_WED_WDMA_INT_CTRL 0xa2c
++#define MTK_WED_WDMA_INT_POLL_PRD GENMASK(7, 0)
+ #define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
+
+ #define MTK_WED_WDMA_CFG_BASE 0xaa0
+@@ -388,6 +440,18 @@ struct mtk_wdma_desc {
+ #define MTK_WDMA_INT_GRP1 0x250
+ #define MTK_WDMA_INT_GRP2 0x254
+
++#define MTK_WDMA_PREF_TX_CFG 0x2d0
++#define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
++
++#define MTK_WDMA_PREF_RX_CFG 0x2dc
++#define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
++
++#define MTK_WDMA_WRBK_TX_CFG 0x300
++#define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
++
++#define MTK_WDMA_WRBK_RX_CFG 0x344
++#define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
++
+ #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
+ #define MTK_PCIE_MIRROR_MAP_EN BIT(0)
+ #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
+@@ -401,6 +465,30 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
+ #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
+
++#define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
++#define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
++#define MTK_WED_RTQM_IGRS0_I2H_PKT_CNT(_n) (0xb2c + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS0_FDROP_CNT 0xb34
++
++#define MTK_WED_RTQM_IGRS1_I2HW_DMAD_CNT 0xb44
++#define MTK_WED_RTQM_IGRS1_I2H_DMAD_CNT(_n) (0xb48 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS1_I2HW_PKT_CNT 0xb50
++#define MTK_WED_RTQM_IGRS1_I2H_PKT_CNT(_n) (0xb54 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS1_FDROP_CNT 0xb5c
++
++#define MTK_WED_RTQM_IGRS2_I2HW_DMAD_CNT 0xb6c
++#define MTK_WED_RTQM_IGRS2_I2H_DMAD_CNT(_n) (0xb70 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS2_I2HW_PKT_CNT 0xb78
++#define MTK_WED_RTQM_IGRS2_I2H_PKT_CNT(_n) (0xb7c + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS2_FDROP_CNT 0xb84
++
++#define MTK_WED_RTQM_IGRS3_I2HW_DMAD_CNT 0xb94
++#define MTK_WED_RTQM_IGRS3_I2H_DMAD_CNT(_n) (0xb98 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS3_I2HW_PKT_CNT 0xba0
++#define MTK_WED_RTQM_IGRS3_I2H_PKT_CNT(_n) (0xba4 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS3_FDROP_CNT 0xbac
++
+ #define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
+ #define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
+ #define MTK_WED_RTQM_Q2N_MIB 0xb80
+@@ -409,6 +497,24 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RTQM_Q2B_MIB 0xb8c
+ #define MTK_WED_RTQM_PFDBK_MIB 0xb90
+
++#define MTK_WED_RTQM_ENQ_CFG0 0xbb8
++#define MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT GENMASK(15, 12)
++
++#define MTK_WED_RTQM_FDROP_MIB 0xb84
++#define MTK_WED_RTQM_ENQ_I2Q_DMAD_CNT 0xbbc
++#define MTK_WED_RTQM_ENQ_I2N_DMAD_CNT 0xbc0
++#define MTK_WED_RTQM_ENQ_I2Q_PKT_CNT 0xbc4
++#define MTK_WED_RTQM_ENQ_I2N_PKT_CNT 0xbc8
++#define MTK_WED_RTQM_ENQ_USED_ENTRY_CNT 0xbcc
++#define MTK_WED_RTQM_ENQ_ERR_CNT 0xbd0
++
++#define MTK_WED_RTQM_DEQ_DMAD_CNT 0xbd8
++#define MTK_WED_RTQM_DEQ_Q2I_DMAD_CNT 0xbdc
++#define MTK_WED_RTQM_DEQ_PKT_CNT 0xbe0
++#define MTK_WED_RTQM_DEQ_Q2I_PKT_CNT 0xbe4
++#define MTK_WED_RTQM_DEQ_USED_PFDBK_CNT 0xbe8
++#define MTK_WED_RTQM_DEQ_ERR_CNT 0xbec
++
+ #define MTK_WED_RROQM_GLO_CFG 0xc04
+ #define MTK_WED_RROQM_RST_IDX 0xc08
+ #define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
+@@ -458,7 +564,116 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RX_BM_INTF 0xd9c
+ #define MTK_WED_RX_BM_ERR_STS 0xda8
+
++#define MTK_RRO_IND_CMD_SIGNATURE 0xe00
++#define MTK_RRO_IND_CMD_DMA_IDX GENMASK(11, 0)
++#define MTK_RRO_IND_CMD_MAGIC_CNT GENMASK(30, 28)
++
++#define MTK_WED_IND_CMD_RX_CTRL0 0xe04
++#define MTK_WED_IND_CMD_PROC_IDX GENMASK(11, 0)
++#define MTK_WED_IND_CMD_PREFETCH_FREE_CNT GENMASK(19, 16)
++#define MTK_WED_IND_CMD_MAGIC_CNT GENMASK(30, 28)
++
++#define MTK_WED_IND_CMD_RX_CTRL1 0xe08
++#define MTK_WED_IND_CMD_RX_CTRL2 0xe0c
++#define MTK_WED_IND_CMD_MAX_CNT GENMASK(11, 0)
++#define MTK_WED_IND_CMD_BASE_M GENMASK(19, 16)
++
++#define MTK_WED_RRO_CFG0 0xe10
++#define MTK_WED_RRO_CFG1 0xe14
++#define MTK_WED_RRO_CFG1_MAX_WIN_SZ GENMASK(31, 29)
++#define MTK_WED_RRO_CFG1_ACK_SN_BASE_M GENMASK(19, 16)
++#define MTK_WED_RRO_CFG1_PARTICL_SE_ID GENMASK(11, 0)
++
++#define MTK_WED_ADDR_ELEM_CFG0 0xe18
++#define MTK_WED_ADDR_ELEM_CFG1 0xe1c
++#define MTK_WED_ADDR_ELEM_PREFETCH_FREE_CNT GENMASK(19, 16)
++
++#define MTK_WED_ADDR_ELEM_TBL_CFG 0xe20
++#define MTK_WED_ADDR_ELEM_TBL_OFFSET GENMASK(6, 0)
++#define MTK_WED_ADDR_ELEM_TBL_RD_RDY BIT(28)
++#define MTK_WED_ADDR_ELEM_TBL_WR_RDY BIT(29)
++#define MTK_WED_ADDR_ELEM_TBL_RD BIT(30)
++#define MTK_WED_ADDR_ELEM_TBL_WR BIT(31)
++
++#define MTK_WED_RADDR_ELEM_TBL_WDATA 0xe24
++#define MTK_WED_RADDR_ELEM_TBL_RDATA 0xe28
++
++#define MTK_WED_PN_CHECK_CFG 0xe30
++#define MTK_WED_PN_CHECK_SE_ID GENMASK(11, 0)
++#define MTK_WED_PN_CHECK_RD_RDY BIT(28)
++#define MTK_WED_PN_CHECK_WR_RDY BIT(29)
++#define MTK_WED_PN_CHECK_RD BIT(30)
++#define MTK_WED_PN_CHECK_WR BIT(31)
++
++#define MTK_WED_PN_CHECK_WDATA_M 0xe38
++#define MTK_WED_PN_CHECK_IS_FIRST BIT(17)
++
++#define MTK_WED_RRO_MSDU_PG_RING_CFG(_n) (0xe44 + (_n) * 0x8)
++
++#define MTK_WED_RRO_MSDU_PG_RING2_CFG 0xe58
++#define MTK_WED_RRO_MSDU_PG_DRV_CLR BIT(26)
++#define MTK_WED_RRO_MSDU_PG_DRV_EN BIT(31)
++
++#define MTK_WED_RRO_MSDU_PG_CTRL0(_n) (0xe5c + (_n) * 0xc)
++#define MTK_WED_RRO_MSDU_PG_CTRL1(_n) (0xe60 + (_n) * 0xc)
++#define MTK_WED_RRO_MSDU_PG_CTRL2(_n) (0xe64 + (_n) * 0xc)
++
++#define MTK_WED_RRO_RX_D_RX(_n) (0xe80 + (_n) * 0x10)
++
++#define MTK_WED_RRO_RX_MAGIC_CNT BIT(13)
++
++#define MTK_WED_RRO_RX_D_CFG(_n) (0xea0 + (_n) * 0x4)
++#define MTK_WED_RRO_RX_D_DRV_CLR BIT(26)
++#define MTK_WED_RRO_RX_D_DRV_EN BIT(31)
++
++#define MTK_WED_RRO_PG_BM_RX_DMAM 0xeb0
++#define MTK_WED_RRO_PG_BM_RX_SDL0 GENMASK(13, 0)
++
++#define MTK_WED_RRO_PG_BM_BASE 0xeb4
++#define MTK_WED_RRO_PG_BM_INIT_PTR 0xeb8
++#define MTK_WED_RRO_PG_BM_SW_TAIL_IDX GENMASK(15, 0)
++#define MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX BIT(16)
++
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX 0xeec
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN BIT(0)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR BIT(1)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG GENMASK(6, 2)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN BIT(8)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR BIT(9)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG GENMASK(14, 10)
++
++#define MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG 0xef4
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN BIT(0)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR BIT(1)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG GENMASK(6, 2)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN BIT(8)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR BIT(9)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG GENMASK(14, 10)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN BIT(16)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
++
++#define MTK_WED_RX_IND_CMD_CNT0 0xf20
++#define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)
++
++#define MTK_WED_RX_IND_CMD_CNT(_n) (0xf20 + (_n) * 0x4)
++#define MTK_WED_IND_CMD_MAGIC_CNT_FAIL_CNT GENMASK(15, 0)
++
++#define MTK_WED_RX_ADDR_ELEM_CNT(_n) (0xf48 + (_n) * 0x4)
++#define MTK_WED_ADDR_ELEM_SIG_FAIL_CNT GENMASK(15, 0)
++#define MTK_WED_ADDR_ELEM_FIRST_SIG_FAIL_CNT GENMASK(31, 16)
++#define MTK_WED_ADDR_ELEM_ACKSN_CNT GENMASK(27, 0)
++
++#define MTK_WED_RX_MSDU_PG_CNT(_n) (0xf5c + (_n) * 0x4)
++
++#define MTK_WED_RX_PN_CHK_CNT 0xf70
++#define MTK_WED_PN_CHK_FAIL_CNT GENMASK(15, 0)
++
+ #define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
+ #define MTK_WED_PCIE_INT_MASK 0x0
+
++#define MTK_WED_PCIE_BASE 0x11280000
++#define MTK_WED_PCIE_BASE0 0x11300000
++#define MTK_WED_PCIE_BASE1 0x11310000
++#define MTK_WED_PCIE_BASE2 0x11290000
+ #endif
+--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+@@ -91,6 +91,8 @@ enum mtk_wed_dummy_cr_idx {
+ #define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
+ #define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
+ #define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
++#define MT7988_FIRMWARE_WO0 "mediatek/mt7988_wo_0.bin"
++#define MT7988_FIRMWARE_WO1 "mediatek/mt7988_wo_1.bin"
+
+ #define MTK_WO_MCU_CFG_LS_BASE 0
+ #define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -139,6 +139,8 @@ struct mtk_wed_device {
+ u32 wpdma_rx;
+
+ bool wcid_512;
++ bool hw_rro;
++ bool msi;
+
+ u16 token_start;
+ unsigned int nbuf;
+@@ -212,10 +214,12 @@ mtk_wed_device_attach(struct mtk_wed_dev
+ return ret;
+ }
+
+-static inline bool
+-mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
++static inline bool mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
+ {
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
++ if (dev->version == 3)
++ return dev->wlan.hw_rro;
++
+ return dev->version != 1;
+ #else
+ return false;
diff --git a/target/linux/generic/backport-5.15/752-15-v6.7-net-ethernet-mtk_wed-refactor-mtk_wed_check_wfdma_rx.patch b/target/linux/generic/backport-5.15/752-15-v6.7-net-ethernet-mtk_wed-refactor-mtk_wed_check_wfdma_rx.patch
new file mode 100644
index 0000000000..e91ae69d08
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-15-v6.7-net-ethernet-mtk_wed-refactor-mtk_wed_check_wfdma_rx.patch
@@ -0,0 +1,95 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:14 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: refactor mtk_wed_check_wfdma_rx_fill
+ routine
+
+Refactor mtk_wed_check_wfdma_rx_fill() in order to be reused adding HW
+receive offload support for MT7988 SoC.
+
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -585,22 +585,15 @@ mtk_wed_set_512_support(struct mtk_wed_d
+ }
+ }
+
+-#define MTK_WFMDA_RX_DMA_EN BIT(2)
+-static void
+-mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
++static int
++mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev,
++ struct mtk_wed_ring *ring)
+ {
+- u32 val;
+ int i;
+
+- if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
+- return; /* queue is not configured by mt76 */
+-
+ for (i = 0; i < 3; i++) {
+- u32 cur_idx;
++ u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX);
+
+- cur_idx = wed_r32(dev,
+- MTK_WED_WPDMA_RING_RX_DATA(idx) +
+- MTK_WED_RING_OFS_CPU_IDX);
+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
+ break;
+
+@@ -609,12 +602,10 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_w
+
+ if (i == 3) {
+ dev_err(dev->hw->dev, "rx dma enable failed\n");
+- return;
++ return -ETIMEDOUT;
+ }
+
+- val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
+- MTK_WFMDA_RX_DMA_EN;
+- wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
++ return 0;
+ }
+
+ static void
+@@ -1545,6 +1536,7 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+ }
+
++#define MTK_WFMDA_RX_DMA_EN BIT(2)
+ static void
+ mtk_wed_dma_enable(struct mtk_wed_device *dev)
+ {
+@@ -1632,8 +1624,26 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
+ }
+
+- for (i = 0; i < MTK_WED_RX_QUEUES; i++)
+- mtk_wed_check_wfdma_rx_fill(dev, i);
++ for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
++ struct mtk_wed_ring *ring = &dev->rx_ring[i];
++ u32 val;
++
++ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
++ continue; /* queue is not configured by mt76 */
++
++ if (mtk_wed_check_wfdma_rx_fill(dev, ring)) {
++ dev_err(dev->hw->dev,
++ "rx_ring(%d) dma enable failed\n", i);
++ continue;
++ }
++
++ val = wifi_r32(dev,
++ dev->wlan.wpdma_rx_glo -
++ dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN;
++ wifi_w32(dev,
++ dev->wlan.wpdma_rx_glo - dev->wlan.phy_base,
++ val);
++ }
+ }
+
+ static void
diff --git a/target/linux/generic/backport-5.15/752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch b/target/linux/generic/backport-5.15/752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch
new file mode 100644
index 0000000000..21a4e0759f
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch
@@ -0,0 +1,465 @@
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:15 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce partial AMSDU offload
+ support for MT7988
+
+Introduce partial AMSDU offload support for MT7988 SoC in order to merge
+in hw packets belonging to the same AMSDU before passing them to the
+WLAN nic.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -438,7 +438,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_e
+ }
+
+ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+- int wdma_idx, int txq, int bss, int wcid)
++ int wdma_idx, int txq, int bss, int wcid,
++ bool amsdu_en)
+ {
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+@@ -450,6 +451,7 @@ int mtk_foe_entry_set_wdma(struct mtk_et
+ MTK_FOE_IB2_WDMA_WINFO_V2;
+ l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
+ FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
++ l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en);
+ break;
+ case 2:
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -88,13 +88,13 @@ enum {
+ #define MTK_FOE_WINFO_BSS_V3 GENMASK(23, 16)
+ #define MTK_FOE_WINFO_WCID_V3 GENMASK(15, 0)
+
+-#define MTK_FOE_WINFO_PAO_USR_INFO GENMASK(15, 0)
+-#define MTK_FOE_WINFO_PAO_TID GENMASK(19, 16)
+-#define MTK_FOE_WINFO_PAO_IS_FIXEDRATE BIT(20)
+-#define MTK_FOE_WINFO_PAO_IS_PRIOR BIT(21)
+-#define MTK_FOE_WINFO_PAO_IS_SP BIT(22)
+-#define MTK_FOE_WINFO_PAO_HF BIT(23)
+-#define MTK_FOE_WINFO_PAO_AMSDU_EN BIT(24)
++#define MTK_FOE_WINFO_AMSDU_USR_INFO GENMASK(15, 0)
++#define MTK_FOE_WINFO_AMSDU_TID GENMASK(19, 16)
++#define MTK_FOE_WINFO_AMSDU_IS_FIXEDRATE BIT(20)
++#define MTK_FOE_WINFO_AMSDU_IS_PRIOR BIT(21)
++#define MTK_FOE_WINFO_AMSDU_IS_SP BIT(22)
++#define MTK_FOE_WINFO_AMSDU_HF BIT(23)
++#define MTK_FOE_WINFO_AMSDU_EN BIT(24)
+
+ enum {
+ MTK_FOE_STATE_INVALID,
+@@ -123,7 +123,7 @@ struct mtk_foe_mac_info {
+
+ /* netsys_v3 */
+ u32 w3info;
+- u32 wpao;
++ u32 amsdu;
+ };
+
+ /* software-only entry type */
+@@ -393,7 +393,8 @@ int mtk_foe_entry_set_vlan(struct mtk_et
+ int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid);
+ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+- int wdma_idx, int txq, int bss, int wcid);
++ int wdma_idx, int txq, int bss, int wcid,
++ bool amsdu_en);
+ int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ unsigned int queue);
+ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -111,6 +111,7 @@ mtk_flow_get_wdma_info(struct net_device
+ info->queue = path->mtk_wdma.queue;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
++ info->amsdu = path->mtk_wdma.amsdu;
+
+ return 0;
+ }
+@@ -192,7 +193,7 @@ mtk_flow_set_output_device(struct mtk_et
+
+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
+ mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
+- info.bss, info.wcid);
++ info.bss, info.wcid, info.amsdu);
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ switch (info.wdma_idx) {
+ case 0:
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -29,6 +29,8 @@
+ #define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
+ #define MTK_WED_RX_RING_SIZE 1536
+ #define MTK_WED_RX_PG_BM_CNT 8192
++#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
++#define MTK_WED_AMSDU_NPAGES 32
+
+ #define MTK_WED_TX_RING_SIZE 2048
+ #define MTK_WED_WDMA_RING_SIZE 1024
+@@ -172,6 +174,23 @@ mtk_wdma_rx_reset(struct mtk_wed_device
+ return ret;
+ }
+
++static u32
++mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
++{
++ return !!(wed_r32(dev, reg) & mask);
++}
++
++static int
++mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
++{
++ int sleep = 15000;
++ int timeout = 100 * sleep;
++ u32 val;
++
++ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
++ timeout, false, dev, reg, mask);
++}
++
+ static void
+ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
+ {
+@@ -335,6 +354,118 @@ out:
+ }
+
+ static int
++mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev)
++{
++ struct mtk_wed_hw *hw = dev->hw;
++ struct mtk_wed_amsdu *wed_amsdu;
++ int i;
++
++ if (!mtk_wed_is_v3_or_greater(hw))
++ return 0;
++
++ wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES,
++ sizeof(*wed_amsdu), GFP_KERNEL);
++ if (!wed_amsdu)
++ return -ENOMEM;
++
++ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
++ void *ptr;
++
++ /* each segment is 64K */
++ ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN |
++ __GFP_ZERO | __GFP_COMP |
++ GFP_DMA32,
++ get_order(MTK_WED_AMSDU_BUF_SIZE));
++ if (!ptr)
++ goto error;
++
++ wed_amsdu[i].txd = ptr;
++ wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr,
++ MTK_WED_AMSDU_BUF_SIZE,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy))
++ goto error;
++ }
++ dev->hw->wed_amsdu = wed_amsdu;
++
++ return 0;
++
++error:
++ for (i--; i >= 0; i--)
++ dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy,
++ MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
++ return -ENOMEM;
++}
++
++static void
++mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev)
++{
++ struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
++ int i;
++
++ if (!wed_amsdu)
++ return;
++
++ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
++ dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy,
++ MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
++ free_pages((unsigned long)wed_amsdu[i].txd,
++ get_order(MTK_WED_AMSDU_BUF_SIZE));
++ }
++}
++
++static int
++mtk_wed_amsdu_init(struct mtk_wed_device *dev)
++{
++ struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
++ int i, ret;
++
++ if (!wed_amsdu)
++ return 0;
++
++ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++)
++ wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i),
++ wed_amsdu[i].txd_phy);
++
++ /* init all sta parameter */
++ wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL |
++ MTK_WED_AMSDU_STA_WTBL_HDRT_MODE |
++ FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN,
++ dev->wlan.amsdu_max_len >> 8) |
++ FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM,
++ dev->wlan.amsdu_max_subframes));
++
++ wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT);
++
++ ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO,
++ MTK_WED_AMSDU_STA_INFO_DO_INIT);
++ if (ret) {
++ dev_err(dev->hw->dev, "amsdu initialization failed\n");
++ return ret;
++ }
++
++ /* init partial amsdu offload txd src */
++ wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG,
++ FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index));
++
++ /* init qmem */
++ wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET);
++ ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29));
++ if (ret) {
++ pr_info("%s: amsdu qmem initialization failed\n", __func__);
++ return ret;
++ }
++
++ /* eagle E1 PCIE1 tx ring 22 flow control issue */
++ if (dev->wlan.id == 0x7991)
++ wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING);
++
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
++
++ return 0;
++}
++
++static int
+ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
+ {
+ u32 desc_size = dev->hw->soc->tx_ring_desc_size;
+@@ -708,6 +839,7 @@ __mtk_wed_detach(struct mtk_wed_device *
+
+ mtk_wdma_rx_reset(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
++ mtk_wed_amsdu_free_buffer(dev);
+ mtk_wed_free_tx_buffer(dev);
+ mtk_wed_free_tx_rings(dev);
+
+@@ -1128,23 +1260,6 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
+ }
+ }
+
+-static u32
+-mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+-{
+- return !!(wed_r32(dev, reg) & mask);
+-}
+-
+-static int
+-mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+-{
+- int sleep = 15000;
+- int timeout = 100 * sleep;
+- u32 val;
+-
+- return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+- timeout, false, dev, reg, mask);
+-}
+-
+ static int
+ mtk_wed_rx_reset(struct mtk_wed_device *dev)
+ {
+@@ -1691,6 +1806,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+ }
+
+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
++ mtk_wed_amsdu_init(dev);
+
+ mtk_wed_dma_enable(dev);
+ dev->running = true;
+@@ -1747,6 +1863,10 @@ mtk_wed_attach(struct mtk_wed_device *de
+ if (ret)
+ goto out;
+
++ ret = mtk_wed_amsdu_buffer_alloc(dev);
++ if (ret)
++ goto out;
++
+ if (mtk_wed_get_rx_capa(dev)) {
+ ret = mtk_wed_rro_alloc(dev);
+ if (ret)
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -25,6 +25,11 @@ struct mtk_wed_soc_data {
+ u32 wdma_desc_size;
+ };
+
++struct mtk_wed_amsdu {
++ void *txd;
++ dma_addr_t txd_phy;
++};
++
+ struct mtk_wed_hw {
+ const struct mtk_wed_soc_data *soc;
+ struct device_node *node;
+@@ -38,6 +43,7 @@ struct mtk_wed_hw {
+ struct dentry *debugfs_dir;
+ struct mtk_wed_device *wed_dev;
+ struct mtk_wed_wo *wed_wo;
++ struct mtk_wed_amsdu *wed_amsdu;
+ u32 pcie_base;
+ u32 debugfs_reg;
+ u32 num_flows;
+@@ -52,6 +58,7 @@ struct mtk_wdma_info {
+ u8 queue;
+ u16 wcid;
+ u8 bss;
++ u8 amsdu;
+ };
+
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -672,6 +672,82 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
+ #define MTK_WED_PCIE_INT_MASK 0x0
+
++#define MTK_WED_AMSDU_FIFO 0x1800
++#define MTK_WED_AMSDU_IS_PRIOR0_RING BIT(10)
++
++#define MTK_WED_AMSDU_STA_INFO 0x01810
++#define MTK_WED_AMSDU_STA_INFO_DO_INIT BIT(0)
++#define MTK_WED_AMSDU_STA_INFO_SET_INIT BIT(1)
++
++#define MTK_WED_AMSDU_STA_INFO_INIT 0x01814
++#define MTK_WED_AMSDU_STA_WTBL_HDRT_MODE BIT(0)
++#define MTK_WED_AMSDU_STA_RMVL BIT(1)
++#define MTK_WED_AMSDU_STA_MAX_AMSDU_LEN GENMASK(7, 2)
++#define MTK_WED_AMSDU_STA_MAX_AMSDU_NUM GENMASK(11, 8)
++
++#define MTK_WED_AMSDU_HIFTXD_BASE_L(_n) (0x1980 + (_n) * 0x4)
++
++#define MTK_WED_AMSDU_PSE 0x1910
++#define MTK_WED_AMSDU_PSE_RESET BIT(16)
++
++#define MTK_WED_AMSDU_HIFTXD_CFG 0x1968
++#define MTK_WED_AMSDU_HIFTXD_SRC GENMASK(16, 15)
++
++#define MTK_WED_MON_AMSDU_FIFO_DMAD 0x1a34
++
++#define MTK_WED_MON_AMSDU_ENG_DMAD(_n) (0x1a80 + (_n) * 0x50)
++#define MTK_WED_MON_AMSDU_ENG_QFPL(_n) (0x1a84 + (_n) * 0x50)
++#define MTK_WED_MON_AMSDU_ENG_QENI(_n) (0x1a88 + (_n) * 0x50)
++#define MTK_WED_MON_AMSDU_ENG_QENO(_n) (0x1a8c + (_n) * 0x50)
++#define MTK_WED_MON_AMSDU_ENG_MERG(_n) (0x1a90 + (_n) * 0x50)
++
++#define MTK_WED_MON_AMSDU_ENG_CNT8(_n) (0x1a94 + (_n) * 0x50)
++#define MTK_WED_AMSDU_ENG_MAX_QGPP_CNT GENMASK(10, 0)
++#define MTK_WED_AMSDU_ENG_MAX_PL_CNT GENMASK(27, 16)
++
++#define MTK_WED_MON_AMSDU_ENG_CNT9(_n) (0x1a98 + (_n) * 0x50)
++#define MTK_WED_AMSDU_ENG_CUR_ENTRY GENMASK(10, 0)
++#define MTK_WED_AMSDU_ENG_MAX_BUF_MERGED GENMASK(20, 16)
++#define MTK_WED_AMSDU_ENG_MAX_MSDU_MERGED GENMASK(28, 24)
++
++#define MTK_WED_MON_AMSDU_QMEM_STS1 0x1e04
++
++#define MTK_WED_MON_AMSDU_QMEM_CNT(_n) (0x1e0c + (_n) * 0x4)
++#define MTK_WED_AMSDU_QMEM_FQ_CNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_SP_QCNT GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID0_QCNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID1_QCNT GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID2_QCNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID3_QCNT GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID4_QCNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID5_QCNT GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID6_QCNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID7_QCNT GENMASK(11, 0)
++
++#define MTK_WED_MON_AMSDU_QMEM_PTR(_n) (0x1e20 + (_n) * 0x4)
++#define MTK_WED_AMSDU_QMEM_FQ_HEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_SP_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID0_QHEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID1_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID2_QHEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID3_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID4_QHEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID5_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID6_QHEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID7_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_FQ_TAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_SP_QTAIL GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID0_QTAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID1_QTAIL GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID2_QTAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID3_QTAIL GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID4_QTAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID5_QTAIL GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID6_QTAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID7_QTAIL GENMASK(11, 0)
++
++#define MTK_WED_MON_AMSDU_HIFTXD_FETCH_MSDU(_n) (0x1ec4 + (_n) * 0x4)
++
+ #define MTK_WED_PCIE_BASE 0x11280000
+ #define MTK_WED_PCIE_BASE0 0x11300000
+ #define MTK_WED_PCIE_BASE1 0x11310000
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -906,6 +906,7 @@ struct net_device_path {
+ u8 queue;
+ u16 wcid;
+ u8 bss;
++ u8 amsdu;
+ } mtk_wdma;
+ };
+ };
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -129,6 +129,7 @@ struct mtk_wed_device {
+ enum mtk_wed_bus_tye bus_type;
+ void __iomem *base;
+ u32 phy_base;
++ u32 id;
+
+ u32 wpdma_phys;
+ u32 wpdma_int;
+@@ -147,10 +148,12 @@ struct mtk_wed_device {
+ unsigned int rx_nbuf;
+ unsigned int rx_npkt;
+ unsigned int rx_size;
++ unsigned int amsdu_max_len;
+
+ u8 tx_tbit[MTK_WED_TX_QUEUES];
+ u8 rx_tbit[MTK_WED_RX_QUEUES];
+ u8 txfree_tbit;
++ u8 amsdu_max_subframes;
+
+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+ int (*offload_enable)(struct mtk_wed_device *wed);
+@@ -224,6 +227,15 @@ static inline bool mtk_wed_get_rx_capa(s
+ #else
+ return false;
+ #endif
++}
++
++static inline bool mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev)
++{
++#ifdef CONFIG_NET_MEDIATEK_SOC_WED
++ return dev->version == 3;
++#else
++ return false;
++#endif
+ }
+
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
diff --git a/target/linux/generic/backport-5.15/752-17-v6.7-net-ethernet-mtk_wed-introduce-hw_rro-support-for-MT.patch b/target/linux/generic/backport-5.15/752-17-v6.7-net-ethernet-mtk_wed-introduce-hw_rro-support-for-MT.patch
new file mode 100644
index 0000000000..0cf4c18875
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-17-v6.7-net-ethernet-mtk_wed-introduce-hw_rro-support-for-MT.patch
@@ -0,0 +1,483 @@
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:16 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce hw_rro support for MT7988
+
+MT7988 SoC support 802.11 receive reordering offload in hw while
+MT7986 SoC implements it through the firmware running on the mcu.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -26,7 +26,7 @@
+ #define MTK_WED_BUF_SIZE 2048
+ #define MTK_WED_PAGE_BUF_SIZE 128
+ #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
+-#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
++#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE)
+ #define MTK_WED_RX_RING_SIZE 1536
+ #define MTK_WED_RX_PG_BM_CNT 8192
+ #define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
+@@ -596,6 +596,68 @@ free_pagelist:
+ }
+
+ static int
++mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
++{
++ int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE;
++ struct mtk_wed_buf *page_list;
++ struct mtk_wed_bm_desc *desc;
++ dma_addr_t desc_phys;
++ int i, page_idx = 0;
++
++ if (!dev->wlan.hw_rro)
++ return 0;
++
++ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
++ if (!page_list)
++ return -ENOMEM;
++
++ dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
++ dev->hw_rro.pages = page_list;
++ desc = dma_alloc_coherent(dev->hw->dev,
++ dev->wlan.rx_nbuf * sizeof(*desc),
++ &desc_phys, GFP_KERNEL);
++ if (!desc)
++ return -ENOMEM;
++
++ dev->hw_rro.desc = desc;
++ dev->hw_rro.desc_phys = desc_phys;
++
++ for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
++ dma_addr_t page_phys, buf_phys;
++ struct page *page;
++ int s;
++
++ page = __dev_alloc_page(GFP_KERNEL);
++ if (!page)
++ return -ENOMEM;
++
++ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
++ DMA_BIDIRECTIONAL);
++ if (dma_mapping_error(dev->hw->dev, page_phys)) {
++ __free_page(page);
++ return -ENOMEM;
++ }
++
++ page_list[page_idx].p = page;
++ page_list[page_idx++].phy_addr = page_phys;
++ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
++ DMA_BIDIRECTIONAL);
++
++ buf_phys = page_phys;
++ for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) {
++ desc->buf0 = cpu_to_le32(buf_phys);
++ buf_phys += MTK_WED_PAGE_BUF_SIZE;
++ desc++;
++ }
++
++ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
++ DMA_BIDIRECTIONAL);
++ }
++
++ return 0;
++}
++
++static int
+ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
+ {
+ struct mtk_wed_bm_desc *desc;
+@@ -612,7 +674,42 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_d
+ dev->rx_buf_ring.desc_phys = desc_phys;
+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
+
+- return 0;
++ return mtk_wed_hwrro_buffer_alloc(dev);
++}
++
++static void
++mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev)
++{
++ struct mtk_wed_buf *page_list = dev->hw_rro.pages;
++ struct mtk_wed_bm_desc *desc = dev->hw_rro.desc;
++ int i, page_idx = 0;
++
++ if (!dev->wlan.hw_rro)
++ return;
++
++ if (!page_list)
++ return;
++
++ if (!desc)
++ goto free_pagelist;
++
++ for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
++ dma_addr_t buf_addr = page_list[page_idx].phy_addr;
++ void *page = page_list[page_idx++].p;
++
++ if (!page)
++ break;
++
++ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
++ DMA_BIDIRECTIONAL);
++ __free_page(page);
++ }
++
++ dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc),
++ desc, dev->hw_rro.desc_phys);
++
++free_pagelist:
++ kfree(page_list);
+ }
+
+ static void
+@@ -626,6 +723,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_de
+ dev->wlan.release_rx_buf(dev);
+ dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
+ desc, dev->rx_buf_ring.desc_phys);
++
++ mtk_wed_hwrro_free_buffer(dev);
++}
++
++static void
++mtk_wed_hwrro_init(struct mtk_wed_device *dev)
++{
++ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
++ return;
++
++ wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
++ FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
++
++ wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys);
++
++ wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
++ MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
++ FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
++ MTK_WED_RX_PG_BM_CNT));
++
++ /* enable rx_page_bm to fetch dmad */
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
+ }
+
+ static void
+@@ -639,6 +758,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed
+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
++
++ mtk_wed_hwrro_init(dev);
+ }
+
+ static void
+@@ -934,6 +1055,8 @@ mtk_wed_bus_init(struct mtk_wed_device *
+ static void
+ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+ {
++ int i;
++
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ return;
+@@ -951,6 +1074,15 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+
+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
++
++ if (!dev->wlan.hw_rro)
++ return;
++
++ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
++ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
++ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++)
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
++ dev->wlan.wpdma_rx_pg + i * 0x10);
+ }
+
+ static void
+@@ -1762,6 +1894,165 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ }
+
+ static void
++mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset)
++{
++ int i;
++
++ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
++ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
++
++ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
++ return;
++
++ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_CLR);
++
++ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
++ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
++ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
++ dev->wlan.rro_rx_tbit[0]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
++ dev->wlan.rro_rx_tbit[1]));
++
++ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
++ dev->wlan.rx_pg_tbit[0]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
++ dev->wlan.rx_pg_tbit[1]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
++ dev->wlan.rx_pg_tbit[2]));
++
++ /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
++ * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
++ */
++ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_EN);
++
++ for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
++ struct mtk_wed_ring *ring = &dev->rx_rro_ring[i];
++
++ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
++ continue;
++
++ if (mtk_wed_check_wfdma_rx_fill(dev, ring))
++ dev_err(dev->hw->dev,
++ "rx_rro_ring(%d) initialization failed\n", i);
++ }
++
++ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
++ struct mtk_wed_ring *ring = &dev->rx_page_ring[i];
++
++ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
++ continue;
++
++ if (mtk_wed_check_wfdma_rx_fill(dev, ring))
++ dev_err(dev->hw->dev,
++ "rx_page_ring(%d) initialization failed\n", i);
++ }
++}
++
++static void
++mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx,
++ void __iomem *regs)
++{
++ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
++
++ ring->wpdma = regs;
++ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
++ readl(regs));
++ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
++ readl(regs + MTK_WED_RING_OFS_COUNT));
++ ring->flags |= MTK_WED_RING_CONFIGURED;
++}
++
++static void
++mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
++{
++ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
++
++ ring->wpdma = regs;
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
++ readl(regs));
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
++ readl(regs + MTK_WED_RING_OFS_COUNT));
++ ring->flags |= MTK_WED_RING_CONFIGURED;
++}
++
++static int
++mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
++{
++ struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
++ u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
++ int i, count = 0;
++
++ ring->wpdma = regs;
++ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
++ readl(regs) & 0xfffffff0);
++
++ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
++ readl(regs + MTK_WED_RING_OFS_COUNT));
++
++ /* ack sn cr */
++ wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
++ dev->wlan.ind_cmd.ack_sn_addr);
++ wed_w32(dev, MTK_WED_RRO_CFG1,
++ FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
++ dev->wlan.ind_cmd.win_size) |
++ FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
++ dev->wlan.ind_cmd.particular_sid));
++
++ /* particular session addr element */
++ wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0,
++ dev->wlan.ind_cmd.particular_se_phys);
++
++ for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
++ wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
++ dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
++ wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
++ MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
++
++ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
++ while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100)
++ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
++ if (count >= 100)
++ dev_err(dev->hw->dev,
++ "write ba session base failed\n");
++ }
++
++ /* pn check init */
++ for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
++ wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
++ MTK_WED_PN_CHECK_IS_FIRST);
++
++ wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
++ FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
++
++ count = 0;
++ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
++ while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100)
++ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
++ if (count >= 100)
++ dev_err(dev->hw->dev,
++ "session(%d) initialization failed\n", i);
++ }
++
++ wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
++
++ return 0;
++}
++
++static void
+ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+ {
+ int i;
+@@ -2215,6 +2506,10 @@ void mtk_wed_add_hw(struct device_node *
+ .detach = mtk_wed_detach,
+ .ppe_check = mtk_wed_ppe_check,
+ .setup_tc = mtk_wed_setup_tc,
++ .start_hw_rro = mtk_wed_start_hw_rro,
++ .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
++ .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
++ .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
+ };
+ struct device_node *eth_np = eth->dev->of_node;
+ struct platform_device *pdev;
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -10,6 +10,7 @@
+
+ #define MTK_WED_TX_QUEUES 2
+ #define MTK_WED_RX_QUEUES 2
++#define MTK_WED_RX_PAGE_QUEUES 3
+
+ #define WED_WO_STA_REC 0x6
+
+@@ -99,6 +100,9 @@ struct mtk_wed_device {
+ struct mtk_wed_ring txfree_ring;
+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
++ struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
++ struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
++ struct mtk_wed_ring ind_cmd_ring;
+
+ struct {
+ int size;
+@@ -120,6 +124,13 @@ struct mtk_wed_device {
+ dma_addr_t fdbk_phys;
+ } rro;
+
++ struct {
++ int size;
++ struct mtk_wed_buf *pages;
++ struct mtk_wed_bm_desc *desc;
++ dma_addr_t desc_phys;
++ } hw_rro;
++
+ /* filled by driver: */
+ struct {
+ union {
+@@ -138,6 +149,8 @@ struct mtk_wed_device {
+ u32 wpdma_txfree;
+ u32 wpdma_rx_glo;
+ u32 wpdma_rx;
++ u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
++ u32 wpdma_rx_pg;
+
+ bool wcid_512;
+ bool hw_rro;
+@@ -152,9 +165,20 @@ struct mtk_wed_device {
+
+ u8 tx_tbit[MTK_WED_TX_QUEUES];
+ u8 rx_tbit[MTK_WED_RX_QUEUES];
++ u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
++ u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
+ u8 txfree_tbit;
+ u8 amsdu_max_subframes;
+
++ struct {
++ u8 se_group_nums;
++ u16 win_size;
++ u16 particular_sid;
++ u32 ack_sn_addr;
++ dma_addr_t particular_se_phys;
++ dma_addr_t addr_elem_phys[1024];
++ } ind_cmd;
++
+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+ int (*offload_enable)(struct mtk_wed_device *wed);
+ void (*offload_disable)(struct mtk_wed_device *wed);
+@@ -193,6 +217,14 @@ struct mtk_wed_ops {
+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
+ int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
+ enum tc_setup_type type, void *type_data);
++ void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask,
++ bool reset);
++ void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
++ void __iomem *regs);
++ void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
++ void __iomem *regs);
++ int (*ind_rx_ring_setup)(struct mtk_wed_device *dev,
++ void __iomem *regs);
+ };
+
+ extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+@@ -264,6 +296,15 @@ static inline bool mtk_wed_is_amsdu_supp
+ #define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
+ #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
+ (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
++#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \
++ (_dev)->ops->start_hw_rro(_dev, _mask, _reset)
++#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
++ (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
++#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
++ (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
++#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
++ (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
++
+ #else
+ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+ {
+@@ -283,6 +324,10 @@ static inline bool mtk_wed_device_active
+ #define mtk_wed_device_stop(_dev) do {} while (0)
+ #define mtk_wed_device_dma_reset(_dev) do {} while (0)
+ #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
++#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0)
++#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
++#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
++#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
+ #endif
+
+ #endif
diff --git a/target/linux/generic/backport-5.15/752-18-v6.7-net-ethernet-mtk_wed-debugfs-move-wed_v2-specific-re.patch b/target/linux/generic/backport-5.15/752-18-v6.7-net-ethernet-mtk_wed-debugfs-move-wed_v2-specific-re.patch
new file mode 100644
index 0000000000..5ea43a4445
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-18-v6.7-net-ethernet-mtk_wed-debugfs-move-wed_v2-specific-re.patch
@@ -0,0 +1,78 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:17 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: debugfs: move wed_v2 specific regs
+ out of regs array
+
+Move specific WED2.0 debugfs entries out of regs array. This is a
+preliminary patch to introduce WED 3.0 debugfs info.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -151,7 +151,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
+ static int
+ wed_rxinfo_show(struct seq_file *s, void *data)
+ {
+- static const struct reg_dump regs[] = {
++ static const struct reg_dump regs_common[] = {
+ DUMP_STR("WPDMA RX"),
+ DUMP_WPDMA_RX_RING(0),
+ DUMP_WPDMA_RX_RING(1),
+@@ -169,7 +169,7 @@ wed_rxinfo_show(struct seq_file *s, void
+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
+
+- DUMP_STR("WED RRO"),
++ DUMP_STR("WED WO RRO"),
+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
+ DUMP_WED(WED_RROQM_MID_MIB),
+ DUMP_WED(WED_RROQM_MOD_MIB),
+@@ -180,17 +180,6 @@ wed_rxinfo_show(struct seq_file *s, void
+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
+
+- DUMP_STR("WED Route QM"),
+- DUMP_WED(WED_RTQM_R2H_MIB(0)),
+- DUMP_WED(WED_RTQM_R2Q_MIB(0)),
+- DUMP_WED(WED_RTQM_Q2H_MIB(0)),
+- DUMP_WED(WED_RTQM_R2H_MIB(1)),
+- DUMP_WED(WED_RTQM_R2Q_MIB(1)),
+- DUMP_WED(WED_RTQM_Q2H_MIB(1)),
+- DUMP_WED(WED_RTQM_Q2N_MIB),
+- DUMP_WED(WED_RTQM_Q2B_MIB),
+- DUMP_WED(WED_RTQM_PFDBK_MIB),
+-
+ DUMP_STR("WED WDMA TX"),
+ DUMP_WED(WED_WDMA_TX_MIB),
+ DUMP_WED_RING(WED_WDMA_RING_TX),
+@@ -211,11 +200,25 @@ wed_rxinfo_show(struct seq_file *s, void
+ DUMP_WED(WED_RX_BM_INTF),
+ DUMP_WED(WED_RX_BM_ERR_STS),
+ };
++ static const struct reg_dump regs_wed_v2[] = {
++ DUMP_STR("WED Route QM"),
++ DUMP_WED(WED_RTQM_R2H_MIB(0)),
++ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
++ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
++ DUMP_WED(WED_RTQM_R2H_MIB(1)),
++ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
++ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
++ DUMP_WED(WED_RTQM_Q2N_MIB),
++ DUMP_WED(WED_RTQM_Q2B_MIB),
++ DUMP_WED(WED_RTQM_PFDBK_MIB),
++ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+- if (dev)
+- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++ if (dev) {
++ dump_wed_regs(s, dev, regs_common, ARRAY_SIZE(regs_common));
++ dump_wed_regs(s, dev, regs_wed_v2, ARRAY_SIZE(regs_wed_v2));
++ }
+
+ return 0;
+ }
diff --git a/target/linux/generic/backport-5.15/752-19-v6.7-net-ethernet-mtk_wed-debugfs-add-WED-3.0-debugfs-ent.patch b/target/linux/generic/backport-5.15/752-19-v6.7-net-ethernet-mtk_wed-debugfs-add-WED-3.0-debugfs-ent.patch
new file mode 100644
index 0000000000..f491d2fd80
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-19-v6.7-net-ethernet-mtk_wed-debugfs-add-WED-3.0-debugfs-ent.patch
@@ -0,0 +1,432 @@
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:18 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: debugfs: add WED 3.0 debugfs entries
+
+Introduce WED3.0 debugfs entries useful for debugging.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -11,6 +11,7 @@ struct reg_dump {
+ u16 offset;
+ u8 type;
+ u8 base;
++ u32 mask;
+ };
+
+ enum {
+@@ -25,6 +26,8 @@ enum {
+
+ #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
+ #define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
++#define DUMP_REG_MASK(_reg, _mask) \
++ { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask }
+ #define DUMP_RING(_prefix, _base, ...) \
+ { _prefix " BASE", _base, __VA_ARGS__ }, \
+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
+@@ -32,6 +35,7 @@ enum {
+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
+
+ #define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
++#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask)
+ #define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
+
+ #define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
+@@ -212,12 +216,58 @@ wed_rxinfo_show(struct seq_file *s, void
+ DUMP_WED(WED_RTQM_Q2B_MIB),
+ DUMP_WED(WED_RTQM_PFDBK_MIB),
+ };
++ static const struct reg_dump regs_wed_v3[] = {
++ DUMP_STR("WED RX RRO DATA"),
++ DUMP_WED_RING(WED_RRO_RX_D_RX(0)),
++ DUMP_WED_RING(WED_RRO_RX_D_RX(1)),
++
++ DUMP_STR("WED RX MSDU PAGE"),
++ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)),
++ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)),
++ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)),
++
++ DUMP_STR("WED RX IND CMD"),
++ DUMP_WED(WED_IND_CMD_RX_CTRL1),
++ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT),
++ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX),
++ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX),
++ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT),
++ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT),
++ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0,
++ WED_IND_CMD_PREFETCH_FREE_CNT),
++ DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID),
++
++ DUMP_STR("WED ADDR ELEM"),
++ DUMP_WED(WED_ADDR_ELEM_CFG0),
++ DUMP_WED_MASK(WED_ADDR_ELEM_CFG1,
++ WED_ADDR_ELEM_PREFETCH_FREE_CNT),
++
++ DUMP_STR("WED Route QM"),
++ DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT),
++ DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT),
++ DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT),
++ DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT),
++ DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT),
++ DUMP_WED(WED_RTQM_ENQ_ERR_CNT),
++
++ DUMP_WED(WED_RTQM_DEQ_DMAD_CNT),
++ DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT),
++ DUMP_WED(WED_RTQM_DEQ_PKT_CNT),
++ DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT),
++ DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT),
++ DUMP_WED(WED_RTQM_DEQ_ERR_CNT),
++ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (dev) {
+ dump_wed_regs(s, dev, regs_common, ARRAY_SIZE(regs_common));
+- dump_wed_regs(s, dev, regs_wed_v2, ARRAY_SIZE(regs_wed_v2));
++ if (mtk_wed_is_v2(hw))
++ dump_wed_regs(s, dev,
++ regs_wed_v2, ARRAY_SIZE(regs_wed_v2));
++ else
++ dump_wed_regs(s, dev,
++ regs_wed_v3, ARRAY_SIZE(regs_wed_v3));
+ }
+
+ return 0;
+@@ -225,6 +275,314 @@ wed_rxinfo_show(struct seq_file *s, void
+ DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
+
+ static int
++wed_amsdu_show(struct seq_file *s, void *data)
++{
++ static const struct reg_dump regs[] = {
++ DUMP_STR("WED AMDSU INFO"),
++ DUMP_WED(WED_MON_AMSDU_FIFO_DMAD),
++
++ DUMP_STR("WED AMDSU ENG0 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(0)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(0)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(0)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(0)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(0)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG1 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(1)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(1)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(1)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(1)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(1)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(1),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG2 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(2)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(2)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(2)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(2)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(2)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG3 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(3)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(3)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(3)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(3)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(3)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG4 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(4)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(4)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(4)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(4)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(4)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG5 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(5)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(5)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(5)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(5)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(5)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG6 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(6)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(6)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(6)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(6)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(6)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG7 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(7)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(7)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(7)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(7)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(7)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG8 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(8)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(8)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(8)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(8)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(8)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED QMEM INFO"),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_FQ_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_SP_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID0_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID1_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID2_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID3_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID4_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID5_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID6_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID7_QCNT),
++
++ DUMP_STR("WED QMEM HEAD INFO"),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_FQ_HEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_SP_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID0_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID1_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID2_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID3_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID4_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID5_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID6_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID7_QHEAD),
++
++ DUMP_STR("WED QMEM TAIL INFO"),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_FQ_TAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_SP_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID0_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID1_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID2_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID3_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID4_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID5_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID6_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID7_QTAIL),
++
++ DUMP_STR("WED HIFTXD MSDU INFO"),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(1)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(2)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(3)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(4)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(5)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(6)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(7)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(8)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(9)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(10)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(11)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(12)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(13)),
++ };
++ struct mtk_wed_hw *hw = s->private;
++ struct mtk_wed_device *dev = hw->wed_dev;
++
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(wed_amsdu);
++
++static int
++wed_rtqm_show(struct seq_file *s, void *data)
++{
++ static const struct reg_dump regs[] = {
++ DUMP_STR("WED Route QM IGRS0(N2H + Recycle)"),
++ DUMP_WED(WED_RTQM_IGRS0_I2HW_DMAD_CNT),
++ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS0_I2HW_PKT_CNT),
++ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS0_FDROP_CNT),
++
++ DUMP_STR("WED Route QM IGRS1(Legacy)"),
++ DUMP_WED(WED_RTQM_IGRS1_I2HW_DMAD_CNT),
++ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS1_I2HW_PKT_CNT),
++ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS1_FDROP_CNT),
++
++ DUMP_STR("WED Route QM IGRS2(RRO3.0)"),
++ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
++ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS2_I2HW_PKT_CNT),
++ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS2_FDROP_CNT),
++
++ DUMP_STR("WED Route QM IGRS3(DEBUG)"),
++ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
++ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS3_I2HW_PKT_CNT),
++ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS3_FDROP_CNT),
++ };
++ struct mtk_wed_hw *hw = s->private;
++ struct mtk_wed_device *dev = hw->wed_dev;
++
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(wed_rtqm);
++
++static int
++wed_rro_show(struct seq_file *s, void *data)
++{
++ static const struct reg_dump regs[] = {
++ DUMP_STR("RRO/IND CMD CNT"),
++ DUMP_WED(WED_RX_IND_CMD_CNT(1)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(2)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(3)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(4)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(5)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(6)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(7)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(8)),
++ DUMP_WED_MASK(WED_RX_IND_CMD_CNT(9),
++ WED_IND_CMD_MAGIC_CNT_FAIL_CNT),
++
++ DUMP_WED(WED_RX_ADDR_ELEM_CNT(0)),
++ DUMP_WED_MASK(WED_RX_ADDR_ELEM_CNT(1),
++ WED_ADDR_ELEM_SIG_FAIL_CNT),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(1)),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(2)),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(3)),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(4)),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(5)),
++ DUMP_WED_MASK(WED_RX_PN_CHK_CNT,
++ WED_PN_CHK_FAIL_CNT),
++ };
++ struct mtk_wed_hw *hw = s->private;
++ struct mtk_wed_device *dev = hw->wed_dev;
++
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(wed_rro);
++
++static int
+ mtk_wed_reg_set(void *data, u64 val)
+ {
+ struct mtk_wed_hw *hw = data;
+@@ -266,7 +624,16 @@ void mtk_wed_hw_add_debugfs(struct mtk_w
+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+- if (!mtk_wed_is_v1(hw))
++ if (!mtk_wed_is_v1(hw)) {
+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
+ &wed_rxinfo_fops);
++ if (mtk_wed_is_v3_or_greater(hw)) {
++ debugfs_create_file_unsafe("amsdu", 0400, dir, hw,
++ &wed_amsdu_fops);
++ debugfs_create_file_unsafe("rtqm", 0400, dir, hw,
++ &wed_rtqm_fops);
++ debugfs_create_file_unsafe("rro", 0400, dir, hw,
++ &wed_rro_fops);
++ }
++ }
+ }
diff --git a/target/linux/generic/backport-5.15/752-20-v6.7-net-ethernet-mtk_wed-add-wed-3.0-reset-support.patch b/target/linux/generic/backport-5.15/752-20-v6.7-net-ethernet-mtk_wed-add-wed-3.0-reset-support.patch
new file mode 100644
index 0000000000..aaaabf05e8
--- /dev/null
+++ b/target/linux/generic/backport-5.15/752-20-v6.7-net-ethernet-mtk_wed-add-wed-3.0-reset-support.patch
@@ -0,0 +1,587 @@
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:19 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: add wed 3.0 reset support
+
+Introduce support for resetting Wireless Ethernet Dispatcher 3.0
+available on MT988 SoC.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -148,6 +148,90 @@ mtk_wdma_read_reset(struct mtk_wed_devic
+ return wdma_r32(dev, MTK_WDMA_GLO_CFG);
+ }
+
++static void
++mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev)
++{
++ u32 status;
++
++ if (!mtk_wed_is_v3_or_greater(dev->hw))
++ return;
++
++ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
++ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
++ dev_err(dev->hw->dev, "rx reset failed\n");
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
++ dev_err(dev->hw->dev, "rx reset failed\n");
++
++ wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
++ wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
++ dev_err(dev->hw->dev, "rx reset failed\n");
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
++ dev_err(dev->hw->dev, "rx reset failed\n");
++
++ /* prefetch FIFO */
++ wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
++ MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
++ MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
++ wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
++ MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
++ MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
++
++ /* core FIFO */
++ wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
++ wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
++
++ /* writeback FIFO */
++ wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
++ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++ wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
++ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++
++ wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
++ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
++ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++
++ /* prefetch ring status */
++ wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
++ MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
++ MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
++
++ /* writeback ring status */
++ wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
++ MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
++ MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
++}
++
+ static int
+ mtk_wdma_rx_reset(struct mtk_wed_device *dev)
+ {
+@@ -160,6 +244,7 @@ mtk_wdma_rx_reset(struct mtk_wed_device
+ if (ret)
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
++ mtk_wdma_v3_rx_reset(dev);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+@@ -192,6 +277,84 @@ mtk_wed_poll_busy(struct mtk_wed_device
+ }
+
+ static void
++mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev)
++{
++ u32 status;
++
++ if (!mtk_wed_is_v3_or_greater(dev->hw))
++ return;
++
++ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
++ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
++ dev_err(dev->hw->dev, "tx reset failed\n");
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
++ dev_err(dev->hw->dev, "tx reset failed\n");
++
++ wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
++ wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
++ dev_err(dev->hw->dev, "tx reset failed\n");
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
++ dev_err(dev->hw->dev, "tx reset failed\n");
++
++ /* prefetch FIFO */
++ wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
++ MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
++ MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
++ wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
++ MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
++ MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
++
++ /* core FIFO */
++ wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
++ wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
++
++ /* writeback FIFO */
++ wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
++ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++ wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
++ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++
++ wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
++ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
++ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++
++ /* prefetch ring status */
++ wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
++ MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
++ MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
++
++ /* writeback ring status */
++ wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
++ MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
++ MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
++}
++
++static void
+ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
+ {
+ u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
+@@ -202,6 +365,7 @@ mtk_wdma_tx_reset(struct mtk_wed_device
+ !(status & mask), 0, 10000))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
++ mtk_wdma_v3_tx_reset(dev);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+@@ -1405,13 +1569,33 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ if (ret)
+ return ret;
+
++ if (dev->wlan.hw_rro) {
++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
++ mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS,
++ MTK_WED_RX_IND_CMD_BUSY);
++ mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG);
++ }
++
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
++ if (!ret && mtk_wed_is_v3_or_greater(dev->hw))
++ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_BUSY);
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
+ } else {
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ /* 1.a. disable prefetch HW */
++ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_EN);
++ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_BUSY);
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
++ MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL);
++ }
++
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+@@ -1439,23 +1623,52 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
+ }
+
++ if (dev->wlan.hw_rro) {
++ /* disable rro msdu page drv */
++ wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_EN);
++
++ /* disable rro data drv */
++ wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
++
++ /* rro msdu page drv reset */
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_CLR);
++ mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_CLR);
++
++ /* rro data drv reset */
++ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2),
++ MTK_WED_RRO_RX_D_DRV_CLR);
++ mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2),
++ MTK_WED_RRO_RX_D_DRV_CLR);
++ }
++
+ /* reset route qm */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
+- if (ret)
++ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
+- else
+- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+- MTK_WED_RTQM_Q_RST);
++ } else if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_set(dev, MTK_WED_RTQM_RST, BIT(0));
++ wed_clr(dev, MTK_WED_RTQM_RST, BIT(0));
++ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
++ } else {
++ wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++ }
+
+ /* reset tx wdma */
+ mtk_wdma_tx_reset(dev);
+
+ /* reset tx wdma drv */
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
+- mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+- MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
++ if (mtk_wed_is_v3_or_greater(dev->hw))
++ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS,
++ MTK_WED_WPDMA_STATUS_TX_DRV);
++ else
++ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
++ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
+
+ /* reset wed rx dma */
+@@ -1476,6 +1689,14 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ MTK_WED_CTRL_WED_RX_BM_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
+
++ if (dev->wlan.hw_rro) {
++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
++ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
++ MTK_WED_CTRL_WED_RX_PG_BM_BUSY);
++ wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
++ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
++ }
++
+ /* wo change to enable state */
+ val = MTK_WED_WO_STATE_ENABLE;
+ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+@@ -1493,6 +1714,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ false);
+ }
+ mtk_wed_free_rx_buffer(dev);
++ mtk_wed_hwrro_free_buffer(dev);
+
+ return 0;
+ }
+@@ -1526,15 +1748,41 @@ mtk_wed_reset_dma(struct mtk_wed_device
+
+ /* 2. reset WDMA rx DMA */
+ busy = !!mtk_wdma_rx_reset(dev);
+- wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE |
++ wed_r32(dev, MTK_WED_WDMA_GLO_CFG);
++ val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN;
++ wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val);
++ } else {
++ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
++ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
++ }
++
+ if (!busy)
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
++ if (!busy && mtk_wed_is_v3_or_greater(dev->hw))
++ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_BUSY);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
+ } else {
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ /* 1.a. disable prefetch HW */
++ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_EN);
++ mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_BUSY);
++ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
++
++ /* 2. Reset dma index */
++ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
++ MTK_WED_WDMA_RESET_IDX_RX_ALL);
++ }
++
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
+@@ -1550,8 +1798,13 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ for (i = 0; i < 100; i++) {
+- val = wed_r32(dev, MTK_WED_TX_BM_INTF);
+- if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
++ if (mtk_wed_is_v1(dev->hw))
++ val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP,
++ wed_r32(dev, MTK_WED_TX_BM_INTF));
++ else
++ val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP,
++ wed_r32(dev, MTK_WED_TX_TKID_INTF));
++ if (val == 0x40)
+ break;
+ }
+
+@@ -1573,6 +1826,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
++ if (mtk_wed_is_v3_or_greater(dev->hw))
++ wed_w32(dev, MTK_WED_RX1_CTRL2, 0);
+ } else {
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
+ MTK_WED_WPDMA_RESET_IDX_TX |
+@@ -1589,7 +1844,14 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+- mtk_wed_rx_reset(dev);
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ /* reset amsdu engine */
++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
++ mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU);
++ }
++
++ if (mtk_wed_get_rx_capa(dev))
++ mtk_wed_rx_reset(dev);
+ }
+
+ static int
+@@ -1841,6 +2103,7 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
+
+ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++ wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
+ }
+
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+@@ -1904,6 +2167,12 @@ mtk_wed_start_hw_rro(struct mtk_wed_devi
+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
+ return;
+
++ if (reset) {
++ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_EN);
++ return;
++ }
++
+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_CLR);
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -28,6 +28,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RESET 0x008
+ #define MTK_WED_RESET_TX_BM BIT(0)
+ #define MTK_WED_RESET_RX_BM BIT(1)
++#define MTK_WED_RESET_RX_PG_BM BIT(2)
++#define MTK_WED_RESET_RRO_RX_TO_PG BIT(3)
+ #define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
+ #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
+ #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
+@@ -106,6 +108,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_STATUS 0x060
+ #define MTK_WED_STATUS_TX GENMASK(15, 8)
+
++#define MTK_WED_WPDMA_STATUS 0x068
++#define MTK_WED_WPDMA_STATUS_TX_DRV GENMASK(15, 8)
++
+ #define MTK_WED_TX_BM_CTRL 0x080
+ #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+ #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+@@ -140,6 +145,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+ #define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+
++#define MTK_WED_TX_TKID_INTF 0x0dc
++#define MTK_WED_TX_TKID_INTF_TKFIFO_FDEP GENMASK(25, 16)
++
+ #define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0)
+ #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16)
+
+@@ -190,6 +198,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
+
+ #define MTK_WED_SCR0 0x3c0
++#define MTK_WED_RX1_CTRL2 0x418
+ #define MTK_WED_WPDMA_INT_TRIGGER 0x504
+ #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
+ #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
+@@ -303,6 +312,7 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
+ #define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
++#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL BIT(20)
+ #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
+
+ #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
+@@ -313,6 +323,7 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
+ #define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
++#define MTK_WED_WPDMA_RX_D_PREF_BUSY BIT(1)
+ #define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
+ #define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
+
+@@ -334,11 +345,13 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WDMA_RX_PREF_CFG 0x950
+ #define MTK_WED_WDMA_RX_PREF_EN BIT(0)
++#define MTK_WED_WDMA_RX_PREF_BUSY BIT(1)
+ #define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
+ #define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
+ #define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
+ #define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
+ #define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
++#define MTK_WED_WDMA_RX_PREF_DDONE2_BUSY BIT(27)
+
+ #define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
+ #define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
+@@ -367,6 +380,7 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WDMA_RESET_IDX 0xa08
+ #define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
++#define MTK_WED_WDMA_RESET_IDX_RX_ALL BIT(20)
+ #define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
+
+ #define MTK_WED_WDMA_INT_CLR 0xa24
+@@ -437,21 +451,62 @@ struct mtk_wdma_desc {
+ #define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
+ #define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
+
++#define MTK_WDMA_XDMA_TX_FIFO_CFG 0x238
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR BIT(0)
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR BIT(4)
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR BIT(8)
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR BIT(12)
++
++#define MTK_WDMA_XDMA_RX_FIFO_CFG 0x23c
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR BIT(0)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR BIT(4)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR BIT(8)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR BIT(12)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR BIT(15)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR BIT(18)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR BIT(21)
++
+ #define MTK_WDMA_INT_GRP1 0x250
+ #define MTK_WDMA_INT_GRP2 0x254
+
+ #define MTK_WDMA_PREF_TX_CFG 0x2d0
+ #define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
++#define MTK_WDMA_PREF_TX_CFG_PREF_BUSY BIT(1)
+
+ #define MTK_WDMA_PREF_RX_CFG 0x2dc
+ #define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
++#define MTK_WDMA_PREF_RX_CFG_PREF_BUSY BIT(1)
++
++#define MTK_WDMA_PREF_RX_FIFO_CFG 0x2e0
++#define MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR BIT(0)
++#define MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR BIT(16)
++
++#define MTK_WDMA_PREF_TX_FIFO_CFG 0x2d4
++#define MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR BIT(0)
++#define MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR BIT(16)
++
++#define MTK_WDMA_PREF_SIDX_CFG 0x2e4
++#define MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
++#define MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
+
+ #define MTK_WDMA_WRBK_TX_CFG 0x300
++#define MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY BIT(0)
+ #define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
+
++#define MTK_WDMA_WRBK_TX_FIFO_CFG(_n) (0x304 + (_n) * 0x4)
++#define MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR BIT(0)
++
+ #define MTK_WDMA_WRBK_RX_CFG 0x344
++#define MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY BIT(0)
+ #define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
+
++#define MTK_WDMA_WRBK_RX_FIFO_CFG(_n) (0x348 + (_n) * 0x4)
++#define MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR BIT(0)
++
++#define MTK_WDMA_WRBK_SIDX_CFG 0x388
++#define MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
++#define MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
++
+ #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
+ #define MTK_PCIE_MIRROR_MAP_EN BIT(0)
+ #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
+@@ -465,6 +520,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
+ #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
+
++#define MTK_WED_RTQM_RST 0xb04
++
+ #define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
+ #define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
+ #define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
+@@ -653,6 +710,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
+ #define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
+
++#define MTK_WED_RRO_RX_HW_STS 0xf00
++#define MTK_WED_RX_IND_CMD_BUSY GENMASK(31, 0)
++
+ #define MTK_WED_RX_IND_CMD_CNT0 0xf20
+ #define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)
+
diff --git a/target/linux/generic/backport-5.15/751-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch b/target/linux/generic/backport-5.15/764-01-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch
index df9518d86c..df9518d86c 100644
--- a/target/linux/generic/backport-5.15/751-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch
+++ b/target/linux/generic/backport-5.15/764-01-v5.16-net-dsa-qca8k-fix-internal-delay-applied-to-the-wrong-PAD.patch
diff --git a/target/linux/generic/backport-5.15/752-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch b/target/linux/generic/backport-5.15/764-02-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch
index 7348d93ec4..7348d93ec4 100644
--- a/target/linux/generic/backport-5.15/752-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch
+++ b/target/linux/generic/backport-5.15/764-02-v5.16-net-dsa-qca8k-fix-MTU-calculation.patch
diff --git a/target/linux/generic/backport-5.15/753-v5.17-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch b/target/linux/generic/backport-5.15/764-03-v5.17-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch
index f477b1b929..f477b1b929 100644
--- a/target/linux/generic/backport-5.15/753-v5.17-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch
+++ b/target/linux/generic/backport-5.15/764-03-v5.17-net-next-net-dsa-qca8k-remove-redundant-check-in-parse_port_config.patch
diff --git a/target/linux/generic/backport-5.15/754-v5.17-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch b/target/linux/generic/backport-5.15/764-04-v5.17-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch
index 2cea88089d..2cea88089d 100644
--- a/target/linux/generic/backport-5.15/754-v5.17-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch
+++ b/target/linux/generic/backport-5.15/764-04-v5.17-net-next-net-dsa-qca8k-convert-to-GENMASK_FIELD_PREP_FIELD_GET.patch
diff --git a/target/linux/generic/backport-5.15/755-v5.17-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch b/target/linux/generic/backport-5.15/764-05-v5.17-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch
index 8c39b8ea29..8c39b8ea29 100644
--- a/target/linux/generic/backport-5.15/755-v5.17-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch
+++ b/target/linux/generic/backport-5.15/764-05-v5.17-net-next-net-dsa-qca8k-remove-extra-mutex_init-in-qca8k_setup.patch
diff --git a/target/linux/generic/backport-5.15/756-v5.17-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch b/target/linux/generic/backport-5.15/764-06-v5.17-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch
index 44d938c53e..44d938c53e 100644
--- a/target/linux/generic/backport-5.15/756-v5.17-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch
+++ b/target/linux/generic/backport-5.15/764-06-v5.17-net-next-net-dsa-qca8k-move-regmap-init-in-probe-and-set-it.patch
diff --git a/target/linux/generic/backport-5.15/757-v5.17-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch b/target/linux/generic/backport-5.15/764-07-v5.17-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch
index 4ca9c8ba41..4ca9c8ba41 100644
--- a/target/linux/generic/backport-5.15/757-v5.17-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch
+++ b/target/linux/generic/backport-5.15/764-07-v5.17-net-next-net-dsa-qca8k-initial-conversion-to-regmap-heper.patch
diff --git a/target/linux/generic/backport-5.15/758-v5.17-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch b/target/linux/generic/backport-5.15/764-08-v5.17-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch
index c8c050933b..c8c050933b 100644
--- a/target/linux/generic/backport-5.15/758-v5.17-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch
+++ b/target/linux/generic/backport-5.15/764-08-v5.17-net-next-net-dsa-qca8k-add-additional-MIB-counter-and-.patch
diff --git a/target/linux/generic/backport-5.15/759-v5.17-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch b/target/linux/generic/backport-5.15/764-09-v5.17-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch
index 8ad7ab472d..8ad7ab472d 100644
--- a/target/linux/generic/backport-5.15/759-v5.17-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch
+++ b/target/linux/generic/backport-5.15/764-09-v5.17-net-next-net-dsa-qca8k-add-support-for-port-fast-aging.patch
diff --git a/target/linux/generic/backport-5.15/760-v5.17-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch b/target/linux/generic/backport-5.15/764-10-v5.17-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch
index 659e482405..659e482405 100644
--- a/target/linux/generic/backport-5.15/760-v5.17-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch
+++ b/target/linux/generic/backport-5.15/764-10-v5.17-net-next-net-dsa-qca8k-add-set_ageing_time-support.patch
diff --git a/target/linux/generic/backport-5.15/761-v5.17-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch b/target/linux/generic/backport-5.15/764-11-v5.17-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch
index 8b97939ecb..8b97939ecb 100644
--- a/target/linux/generic/backport-5.15/761-v5.17-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch
+++ b/target/linux/generic/backport-5.15/764-11-v5.17-net-next-net-dsa-qca8k-add-support-for-mdb_add-del.patch
diff --git a/target/linux/generic/backport-5.15/762-v5.17-net-next-net-dsa-qca8k-add-support-for-mirror-mode.patch b/target/linux/generic/backport-5.15/764-12-v5.17-net-next-net-dsa-qca8k-add-support-for-mirror-mode.patch
index dc5a22935f..dc5a22935f 100644
--- a/target/linux/generic/backport-5.15/762-v5.17-net-next-net-dsa-qca8k-add-support-for-mirror-mode.patch
+++ b/target/linux/generic/backport-5.15/764-12-v5.17-net-next-net-dsa-qca8k-add-support-for-mirror-mode.patch
diff --git a/target/linux/generic/backport-5.15/763-v5.17-net-next-net-dsa-qca8k-add-LAG-support.patch b/target/linux/generic/backport-5.15/764-13-v5.17-net-next-net-dsa-qca8k-add-LAG-support.patch
index b53f1288d5..b53f1288d5 100644
--- a/target/linux/generic/backport-5.15/763-v5.17-net-next-net-dsa-qca8k-add-LAG-support.patch
+++ b/target/linux/generic/backport-5.15/764-13-v5.17-net-next-net-dsa-qca8k-add-LAG-support.patch
diff --git a/target/linux/generic/backport-5.15/764-v5.17-net-next-net-dsa-qca8k-fix-warning-in-LAG-feature.patch b/target/linux/generic/backport-5.15/764-14-v5.17-net-next-net-dsa-qca8k-fix-warning-in-LAG-feature.patch
index 7d811be11c..7d811be11c 100644
--- a/target/linux/generic/backport-5.15/764-v5.17-net-next-net-dsa-qca8k-fix-warning-in-LAG-feature.patch
+++ b/target/linux/generic/backport-5.15/764-14-v5.17-net-next-net-dsa-qca8k-fix-warning-in-LAG-feature.patch
diff --git a/target/linux/generic/backport-5.15/794-v6.2-net-core-Allow-live-renaming-when-an-interface-is-up.patch b/target/linux/generic/backport-5.15/794-v6.2-net-core-Allow-live-renaming-when-an-interface-is-up.patch
index 582e1ce2ef..b0860db266 100644
--- a/target/linux/generic/backport-5.15/794-v6.2-net-core-Allow-live-renaming-when-an-interface-is-up.patch
+++ b/target/linux/generic/backport-5.15/794-v6.2-net-core-Allow-live-renaming-when-an-interface-is-up.patch
@@ -46,7 +46,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -1642,7 +1642,6 @@ struct net_device_ops {
+@@ -1643,7 +1643,6 @@ struct net_device_ops {
* @IFF_FAILOVER: device is a failover master device
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
@@ -54,7 +54,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
* @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
* skb_headlen(skb) == 0 (data starts from frag0)
*/
-@@ -1677,7 +1676,7 @@ enum netdev_priv_flags {
+@@ -1678,7 +1677,7 @@ enum netdev_priv_flags {
IFF_FAILOVER = 1<<27,
IFF_FAILOVER_SLAVE = 1<<28,
IFF_L3MDEV_RX_HANDLER = 1<<29,
@@ -63,7 +63,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
};
-@@ -1711,7 +1710,6 @@ enum netdev_priv_flags {
+@@ -1712,7 +1711,6 @@ enum netdev_priv_flags {
#define IFF_FAILOVER IFF_FAILOVER
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
diff --git a/target/linux/generic/pending-5.15/736-01-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch b/target/linux/generic/backport-6.1/751-01-v6.4-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch
index caee22d2e9..46da5b283f 100644
--- a/target/linux/generic/pending-5.15/736-01-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch
+++ b/target/linux/generic/backport-6.1/751-01-v6.4-net-ethernet-mtk_eth_soc-add-code-for-offloading-flo.patch
@@ -11,10 +11,15 @@ PPE device.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.h | 3 +
+ .../net/ethernet/mediatek/mtk_ppe_offload.c | 37 ++++---
+ drivers/net/ethernet/mediatek/mtk_wed.c | 101 ++++++++++++++++++
+ include/linux/soc/mediatek/mtk_wed.h | 6 ++
+ 4 files changed, 133 insertions(+), 14 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
-@@ -1448,6 +1448,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk
+@@ -1435,6 +1435,9 @@ int mtk_gmac_rgmii_path_setup(struct mtk
int mtk_eth_offload_init(struct mtk_eth *eth);
int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
@@ -120,7 +125,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
static void
wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
{
-@@ -1760,6 +1767,99 @@ out:
+@@ -1753,6 +1760,99 @@ out:
mutex_unlock(&hw_lock);
}
@@ -220,7 +225,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
void __iomem *wdma, phys_addr_t wdma_phy,
int index)
-@@ -1779,6 +1879,7 @@ void mtk_wed_add_hw(struct device_node *
+@@ -1772,6 +1872,7 @@ void mtk_wed_add_hw(struct device_node *
.irq_set_mask = mtk_wed_irq_set_mask,
.detach = mtk_wed_detach,
.ppe_check = mtk_wed_ppe_check,
diff --git a/target/linux/generic/pending-6.1/736-02-net-ethernet-mediatek-mtk_ppe-prefer-newly-added-l2-.patch b/target/linux/generic/backport-6.1/751-02-v6.4-net-ethernet-mediatek-mtk_ppe-prefer-newly-added-l2-.patch
index 74cb562a51..74cb562a51 100644
--- a/target/linux/generic/pending-6.1/736-02-net-ethernet-mediatek-mtk_ppe-prefer-newly-added-l2-.patch
+++ b/target/linux/generic/backport-6.1/751-02-v6.4-net-ethernet-mediatek-mtk_ppe-prefer-newly-added-l2-.patch
diff --git a/target/linux/generic/pending-5.15/736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch b/target/linux/generic/backport-6.1/751-03-v6.4-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch
index c8be7a9e2b..8b8a8e11c7 100644
--- a/target/linux/generic/pending-5.15/736-03-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch
+++ b/target/linux/generic/backport-6.1/751-03-v6.4-net-ethernet-mtk_eth_soc-improve-keeping-track-of-of.patch
@@ -9,10 +9,13 @@ flow accounting support.
Signed-off-by: Felix Fietkau <nbd@nbd.name>
---
+ drivers/net/ethernet/mediatek/mtk_ppe.c | 162 ++++++++++++------------
+ drivers/net/ethernet/mediatek/mtk_ppe.h | 15 +--
+ 2 files changed, 86 insertions(+), 91 deletions(-)
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
-@@ -483,42 +483,43 @@ int mtk_foe_entry_set_queue(struct mtk_e
+@@ -476,42 +476,43 @@ int mtk_foe_entry_set_queue(struct mtk_e
return 0;
}
@@ -72,7 +75,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
hwe->ib1 &= ~MTK_FOE_IB1_STATE;
-@@ -538,7 +539,8 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
+@@ -531,7 +532,8 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
return;
@@ -82,7 +85,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
kfree(entry);
}
-@@ -554,66 +556,55 @@ static int __mtk_foe_entry_idle_time(str
+@@ -547,66 +549,55 @@ static int __mtk_foe_entry_idle_time(str
return now - timestamp;
}
@@ -178,7 +181,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
}
static void
-@@ -656,7 +647,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
+@@ -649,7 +640,8 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
spin_lock_bh(&ppe_lock);
@@ -188,7 +191,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
spin_unlock_bh(&ppe_lock);
}
-@@ -703,8 +695,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
+@@ -696,8 +688,8 @@ mtk_foe_entry_commit_subflow(struct mtk_
{
const struct mtk_soc_data *soc = ppe->eth->soc;
struct mtk_flow_entry *flow_info;
@@ -198,7 +201,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
int type;
-@@ -712,30 +704,30 @@ mtk_foe_entry_commit_subflow(struct mtk_
+@@ -705,30 +697,30 @@ mtk_foe_entry_commit_subflow(struct mtk_
if (!flow_info)
return;
@@ -239,7 +242,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
}
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
-@@ -745,9 +737,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe
+@@ -738,9 +730,11 @@ void __mtk_ppe_check_skb(struct mtk_ppe
struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
struct mtk_flow_entry *entry;
struct mtk_foe_bridge key = {};
@@ -251,7 +254,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
u8 *tag;
spin_lock_bh(&ppe_lock);
-@@ -755,20 +749,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe
+@@ -748,20 +742,14 @@ void __mtk_ppe_check_skb(struct mtk_ppe
if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
goto out;
@@ -278,7 +281,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
continue;
}
-@@ -819,9 +807,17 @@ out:
+@@ -810,9 +798,17 @@ out:
int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
{
diff --git a/target/linux/generic/pending-6.1/736-04-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch b/target/linux/generic/backport-6.1/751-04-v6.4-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch
index d1c153c086..e20f94f1d4 100644
--- a/target/linux/generic/pending-6.1/736-04-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch
+++ b/target/linux/generic/backport-6.1/751-04-v6.4-net-ethernet-mediatek-fix-ppe-flow-accounting-for-L2.patch
@@ -14,7 +14,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
-@@ -80,9 +80,9 @@ static int mtk_ppe_mib_wait_busy(struct
+@@ -79,9 +79,9 @@ static int mtk_ppe_mib_wait_busy(struct
int ret;
u32 val;
@@ -27,7 +27,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
if (ret)
dev_err(ppe->dev, "MIB table busy");
-@@ -90,17 +90,31 @@ static int mtk_ppe_mib_wait_busy(struct
+@@ -89,17 +89,31 @@ static int mtk_ppe_mib_wait_busy(struct
return ret;
}
@@ -61,7 +61,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
-@@ -109,19 +123,19 @@ static int mtk_mib_entry_read(struct mtk
+@@ -108,19 +122,19 @@ static int mtk_mib_entry_read(struct mtk
if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
/* 64 bit for each counter */
u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
@@ -86,7 +86,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
}
static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
-@@ -520,13 +534,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
+@@ -519,13 +533,6 @@ __mtk_foe_entry_clear(struct mtk_ppe *pp
hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
dma_wmb();
mtk_ppe_cache_clear(ppe);
@@ -100,7 +100,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
}
entry->hash = 0xffff;
-@@ -551,11 +558,14 @@ static int __mtk_foe_entry_idle_time(str
+@@ -550,11 +557,14 @@ static int __mtk_foe_entry_idle_time(str
}
static bool
@@ -116,7 +116,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
int len;
if (hash == 0xffff)
-@@ -566,18 +576,35 @@ mtk_flow_entry_update(struct mtk_ppe *pp
+@@ -565,18 +575,35 @@ mtk_flow_entry_update(struct mtk_ppe *pp
memcpy(&foe, hwe, len);
if (!mtk_flow_entry_match(ppe->eth, entry, &foe, len) ||
@@ -155,7 +155,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
struct mtk_flow_entry *cur;
struct hlist_node *tmp;
int idle;
-@@ -586,7 +613,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe
+@@ -585,7 +612,9 @@ mtk_flow_entry_update_l2(struct mtk_ppe
hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_list) {
int cur_idle;
@@ -166,7 +166,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
__mtk_foe_entry_clear(ppe, entry, false);
continue;
}
-@@ -601,10 +630,29 @@ mtk_flow_entry_update_l2(struct mtk_ppe
+@@ -600,10 +629,29 @@ mtk_flow_entry_update_l2(struct mtk_ppe
}
}
@@ -196,7 +196,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
struct mtk_eth *eth = ppe->eth;
u16 timestamp = mtk_eth_timestamp(eth);
struct mtk_foe_entry *hwe;
-@@ -635,6 +683,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
+@@ -634,6 +682,12 @@ __mtk_foe_entry_commit(struct mtk_ppe *p
dma_wmb();
@@ -209,7 +209,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
mtk_ppe_cache_clear(ppe);
}
-@@ -799,21 +853,6 @@ out:
+@@ -796,21 +850,6 @@ out:
spin_unlock_bh(&ppe_lock);
}
@@ -231,7 +231,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
{
if (!ppe)
-@@ -841,32 +880,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe
+@@ -838,32 +877,6 @@ int mtk_ppe_prepare_reset(struct mtk_ppe
return mtk_ppe_wait_busy(ppe);
}
diff --git a/target/linux/generic/backport-6.1/752-01-v6.6-net-ethernet-mtk_wed-add-some-more-info-in-wed_txinf.patch b/target/linux/generic/backport-6.1/752-01-v6.6-net-ethernet-mtk_wed-add-some-more-info-in-wed_txinf.patch
new file mode 100644
index 0000000000..a224b62624
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-01-v6.6-net-ethernet-mtk_wed-add-some-more-info-in-wed_txinf.patch
@@ -0,0 +1,45 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sun, 27 Aug 2023 19:31:41 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: add some more info in wed_txinfo_show
+ handler
+
+Add some new info in Wireless Ethernet Dispatcher wed_txinfo_show
+debugfs handler useful during debugging.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://lore.kernel.org/r/3390292655d568180b73d2a25576f61aa63310e5.1693157377.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -127,8 +127,17 @@ wed_txinfo_show(struct seq_file *s, void
+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
+
+- DUMP_STR("TX FREE"),
++ DUMP_STR("WED TX FREE"),
+ DUMP_WED(WED_RX_MIB(0)),
++ DUMP_WED_RING(WED_RING_RX(0)),
++ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
++ DUMP_WED(WED_RX_MIB(1)),
++ DUMP_WED_RING(WED_RING_RX(1)),
++ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
++
++ DUMP_STR("WED WPDMA TX FREE"),
++ DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
++ DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -266,6 +266,8 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
+ #define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
++#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
++#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
+
+ #define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
+ #define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
diff --git a/target/linux/generic/backport-6.1/752-02-v6.6-net-ethernet-mtk_wed-minor-change-in-wed_-tx-rx-info.patch b/target/linux/generic/backport-6.1/752-02-v6.6-net-ethernet-mtk_wed-minor-change-in-wed_-tx-rx-info.patch
new file mode 100644
index 0000000000..df6edfdf94
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-02-v6.6-net-ethernet-mtk_wed-minor-change-in-wed_-tx-rx-info.patch
@@ -0,0 +1,47 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Sun, 27 Aug 2023 19:33:47 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: minor change in wed_{tx,rx}info_show
+
+No functional changes, just cosmetic ones.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://lore.kernel.org/r/71e046c72a978745f0435af265dda610aa9bfbcf.1693157578.git.lorenzo@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -84,7 +84,6 @@ dump_wed_regs(struct seq_file *s, struct
+ }
+ }
+
+-
+ static int
+ wed_txinfo_show(struct seq_file *s, void *data)
+ {
+@@ -142,10 +141,8 @@ wed_txinfo_show(struct seq_file *s, void
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+- if (!dev)
+- return 0;
+-
+- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+ }
+@@ -217,10 +214,8 @@ wed_rxinfo_show(struct seq_file *s, void
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+- if (!dev)
+- return 0;
+-
+- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+ }
diff --git a/target/linux/generic/backport-6.1/752-03-v6.6-net-ethernet-mtk_eth_soc-rely-on-mtk_pse_port-defini.patch b/target/linux/generic/backport-6.1/752-03-v6.6-net-ethernet-mtk_eth_soc-rely-on-mtk_pse_port-defini.patch
new file mode 100644
index 0000000000..0bf9dea24f
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-03-v6.6-net-ethernet-mtk_eth_soc-rely-on-mtk_pse_port-defini.patch
@@ -0,0 +1,29 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue, 12 Sep 2023 10:22:56 +0200
+Subject: [PATCH] net: ethernet: mtk_eth_soc: rely on mtk_pse_port definitions
+ in mtk_flow_set_output_device
+
+Similar to ethernet ports, rely on mtk_pse_port definitions for
+pse wdma ports as well.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/b86bdb717e963e3246c1dec5f736c810703cf056.1694506814.git.lorenzo@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -196,10 +196,10 @@ mtk_flow_set_output_device(struct mtk_et
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ switch (info.wdma_idx) {
+ case 0:
+- pse_port = 8;
++ pse_port = PSE_WDMA0_PORT;
+ break;
+ case 1:
+- pse_port = 9;
++ pse_port = PSE_WDMA1_PORT;
+ break;
+ default:
+ return -EINVAL;
diff --git a/target/linux/generic/backport-6.1/752-04-v6.6-net-ethernet-mtk_wed-check-update_wo_rx_stats-in-mtk.patch b/target/linux/generic/backport-6.1/752-04-v6.6-net-ethernet-mtk_wed-check-update_wo_rx_stats-in-mtk.patch
new file mode 100644
index 0000000000..c99e1334d4
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-04-v6.6-net-ethernet-mtk_wed-check-update_wo_rx_stats-in-mtk.patch
@@ -0,0 +1,26 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Tue, 12 Sep 2023 10:28:00 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: check update_wo_rx_stats in
+ mtk_wed_update_rx_stats()
+
+Check if update_wo_rx_stats function pointer is properly set in
+mtk_wed_update_rx_stats routine before accessing it.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/b0d233386e059bccb59f18f69afb79a7806e5ded.1694507226.git.lorenzo@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -68,6 +68,9 @@ mtk_wed_update_rx_stats(struct mtk_wed_d
+ struct mtk_wed_wo_rx_stats *stats;
+ int i;
+
++ if (!wed->wlan.update_wo_rx_stats)
++ return;
++
+ if (count * sizeof(*stats) > skb->len - sizeof(u32))
+ return;
+
diff --git a/target/linux/generic/backport-6.1/752-05-v6.7-net-ethernet-mtk_wed-do-not-assume-offload-callbacks.patch b/target/linux/generic/backport-6.1/752-05-v6.7-net-ethernet-mtk_wed-do-not-assume-offload-callbacks.patch
new file mode 100644
index 0000000000..cd7fb92e20
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-05-v6.7-net-ethernet-mtk_wed-do-not-assume-offload-callbacks.patch
@@ -0,0 +1,68 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Wed, 13 Sep 2023 20:42:47 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: do not assume offload callbacks are
+ always set
+
+Check if wlan.offload_enable and wlan.offload_disable callbacks are set
+in mtk_wed_flow_add/mtk_wed_flow_remove since mt7996 will not rely
+on them.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -1712,19 +1712,20 @@ mtk_wed_irq_set_mask(struct mtk_wed_devi
+ int mtk_wed_flow_add(int index)
+ {
+ struct mtk_wed_hw *hw = hw_list[index];
+- int ret;
++ int ret = 0;
+
+- if (!hw || !hw->wed_dev)
+- return -ENODEV;
++ mutex_lock(&hw_lock);
+
+- if (hw->num_flows) {
+- hw->num_flows++;
+- return 0;
++ if (!hw || !hw->wed_dev) {
++ ret = -ENODEV;
++ goto out;
+ }
+
+- mutex_lock(&hw_lock);
+- if (!hw->wed_dev) {
+- ret = -ENODEV;
++ if (!hw->wed_dev->wlan.offload_enable)
++ goto out;
++
++ if (hw->num_flows) {
++ hw->num_flows++;
+ goto out;
+ }
+
+@@ -1743,14 +1744,15 @@ void mtk_wed_flow_remove(int index)
+ {
+ struct mtk_wed_hw *hw = hw_list[index];
+
+- if (!hw)
+- return;
++ mutex_lock(&hw_lock);
+
+- if (--hw->num_flows)
+- return;
++ if (!hw || !hw->wed_dev)
++ goto out;
+
+- mutex_lock(&hw_lock);
+- if (!hw->wed_dev)
++ if (!hw->wed_dev->wlan.offload_disable)
++ goto out;
++
++ if (--hw->num_flows)
+ goto out;
+
+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
diff --git a/target/linux/generic/backport-6.1/752-06-v6.7-net-ethernet-mtk_wed-introduce-versioning-utility-ro.patch b/target/linux/generic/backport-6.1/752-06-v6.7-net-ethernet-mtk_wed-introduce-versioning-utility-ro.patch
new file mode 100644
index 0000000000..2948188650
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-06-v6.7-net-ethernet-mtk_wed-introduce-versioning-utility-ro.patch
@@ -0,0 +1,232 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:05 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce versioning utility routines
+
+Similar to mtk_eth_soc, introduce the following wed versioning
+utility routines:
+- mtk_wed_is_v1
+- mtk_wed_is_v2
+
+This is a preliminary patch to introduce WED support for MT7988 SoC
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -277,7 +277,7 @@ mtk_wed_assign(struct mtk_wed_device *de
+ if (!hw->wed_dev)
+ goto out;
+
+- if (hw->version == 1)
++ if (mtk_wed_is_v1(hw))
+ return NULL;
+
+ /* MT7986 WED devices do not have any pcie slot restrictions */
+@@ -358,7 +358,7 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_d
+ desc->buf0 = cpu_to_le32(buf_phys);
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size) |
+@@ -497,7 +497,7 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
+ {
+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+@@ -576,7 +576,7 @@ mtk_wed_dma_disable(struct mtk_wed_devic
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+@@ -605,7 +605,7 @@ mtk_wed_stop(struct mtk_wed_device *dev)
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ return;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
+@@ -624,7 +624,7 @@ mtk_wed_deinit(struct mtk_wed_device *de
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ return;
+
+ wed_clr(dev, MTK_WED_CTRL,
+@@ -730,7 +730,7 @@ mtk_wed_bus_init(struct mtk_wed_device *
+ static void
+ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+ {
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ } else {
+ mtk_wed_bus_init(dev);
+@@ -761,7 +761,7 @@ mtk_wed_hw_init_early(struct mtk_wed_dev
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ u32 offset = dev->hw->index ? 0x04000400 : 0;
+
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+@@ -934,7 +934,7 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_TX_BM_TKID,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+@@ -967,7 +967,7 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+@@ -1217,7 +1217,7 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ }
+
+ dev->init_done = false;
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ return;
+
+ if (!busy) {
+@@ -1343,7 +1343,7 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
+ MTK_WED_PCIE_INT_TRIGGER_STATUS);
+
+@@ -1416,7 +1416,7 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ } else {
+@@ -1465,7 +1465,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+
+ mtk_wed_set_ext_int(dev, true);
+
+- if (dev->hw->version == 1) {
++ if (mtk_wed_is_v1(dev->hw)) {
+ u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
+ dev->hw->index);
+@@ -1550,7 +1550,7 @@ mtk_wed_attach(struct mtk_wed_device *de
+ }
+
+ mtk_wed_hw_init_early(dev);
+- if (hw->version == 1) {
++ if (mtk_wed_is_v1(hw)) {
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), 0);
+ } else {
+@@ -1618,7 +1618,7 @@ static int
+ mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+ {
+ struct mtk_wed_ring *ring = &dev->txfree_ring;
+- int i, index = dev->hw->version == 1;
++ int i, index = mtk_wed_is_v1(dev->hw);
+
+ /*
+ * For txfree event handling, the same DMA ring is shared between WED
+@@ -1676,7 +1676,7 @@ mtk_wed_irq_get(struct mtk_wed_device *d
+ {
+ u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+@@ -1843,7 +1843,7 @@ mtk_wed_setup_tc(struct mtk_wed_device *
+ {
+ struct mtk_wed_hw *hw = wed->hw;
+
+- if (hw->version < 2)
++ if (mtk_wed_is_v1(hw))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+@@ -1917,9 +1917,9 @@ void mtk_wed_add_hw(struct device_node *
+ hw->wdma = wdma;
+ hw->index = index;
+ hw->irq = irq;
+- hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
++ hw->version = eth->soc->version;
+
+- if (hw->version == 1) {
++ if (mtk_wed_is_v1(hw)) {
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -40,6 +40,16 @@ struct mtk_wdma_info {
+ };
+
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
++static inline bool mtk_wed_is_v1(struct mtk_wed_hw *hw)
++{
++ return hw->version == 1;
++}
++
++static inline bool mtk_wed_is_v2(struct mtk_wed_hw *hw)
++{
++ return hw->version == 2;
++}
++
+ static inline void
+ wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+ {
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -263,7 +263,7 @@ void mtk_wed_hw_add_debugfs(struct mtk_w
+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+- if (hw->version != 1)
++ if (!mtk_wed_is_v1(hw))
+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
+ &wed_rxinfo_fops);
+ }
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -207,7 +207,7 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+ {
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+
+- if (dev->hw->version == 1)
++ if (mtk_wed_is_v1(dev->hw))
+ return 0;
+
+ if (WARN_ON(!wo))
diff --git a/target/linux/generic/backport-6.1/752-07-v6.7-net-ethernet-mtk_wed-do-not-configure-rx-offload-if-.patch b/target/linux/generic/backport-6.1/752-07-v6.7-net-ethernet-mtk_wed-do-not-configure-rx-offload-if-.patch
new file mode 100644
index 0000000000..bc34aa33a9
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-07-v6.7-net-ethernet-mtk_wed-do-not-configure-rx-offload-if-.patch
@@ -0,0 +1,234 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:06 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: do not configure rx offload if not
+ supported
+
+Check if rx offload is supported running mtk_wed_get_rx_capa routine
+before configuring it. This is a preliminary patch to introduce Wireless
+Ethernet Dispatcher (WED) support for MT7988 SoC.
+
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -605,7 +605,7 @@ mtk_wed_stop(struct mtk_wed_device *dev)
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+- if (mtk_wed_is_v1(dev->hw))
++ if (!mtk_wed_get_rx_capa(dev))
+ return;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
+@@ -732,16 +732,21 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+ {
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+- } else {
+- mtk_wed_bus_init(dev);
+-
+- wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
+- wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
+- wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+- wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
+- wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
++ return;
+ }
++
++ mtk_wed_bus_init(dev);
++
++ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
++ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
++ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
++ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
++
++ if (!mtk_wed_get_rx_capa(dev))
++ return;
++
++ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
++ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
+ }
+
+ static void
+@@ -973,15 +978,17 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ } else {
+ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+- /* rx hw init */
+- wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+- MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+- MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+- wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+-
+- mtk_wed_rx_buffer_hw_init(dev);
+- mtk_wed_rro_hw_init(dev);
+- mtk_wed_route_qm_hw_init(dev);
++ if (mtk_wed_get_rx_capa(dev)) {
++ /* rx hw init */
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
++ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
++ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
++
++ mtk_wed_rx_buffer_hw_init(dev);
++ mtk_wed_rro_hw_init(dev);
++ mtk_wed_route_qm_hw_init(dev);
++ }
+ }
+
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+@@ -1353,8 +1360,6 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+ } else {
+- wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
+- GENMASK(1, 0));
+ /* initail tx interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+@@ -1373,15 +1378,20 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
+ dev->wlan.txfree_tbit));
+
+- wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
+- MTK_WED_WPDMA_INT_CTRL_RX0_EN |
+- MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
+- MTK_WED_WPDMA_INT_CTRL_RX1_EN |
+- MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
+- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
+- dev->wlan.rx_tbit[0]) |
+- FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
+- dev->wlan.rx_tbit[1]));
++ if (mtk_wed_get_rx_capa(dev)) {
++ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
++ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
++ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
++ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
++ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
++ dev->wlan.rx_tbit[0]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
++ dev->wlan.rx_tbit[1]));
++
++ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
++ GENMASK(1, 0));
++ }
+
+ wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
+ wed_set(dev, MTK_WED_WDMA_INT_CTRL,
+@@ -1400,6 +1410,8 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ static void
+ mtk_wed_dma_enable(struct mtk_wed_device *dev)
+ {
++ int i;
++
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+
+ wed_set(dev, MTK_WED_GLO_CFG,
+@@ -1419,33 +1431,33 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ if (mtk_wed_is_v1(dev->hw)) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+- } else {
+- int i;
++ return;
++ }
+
+- wed_set(dev, MTK_WED_WPDMA_CTRL,
+- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
++ wed_set(dev, MTK_WED_WPDMA_CTRL,
++ MTK_WED_WPDMA_CTRL_SDL1_FIXED);
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
++ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
++ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+
+- wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+- MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+- MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
++ if (!mtk_wed_get_rx_capa(dev))
++ return;
+
+- wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+- MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+- MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+-
+- wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+- MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+- MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
++ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
++ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
++ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
+- wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+- MTK_WED_WPDMA_RX_D_RX_DRV_EN |
+- FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
+- FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
+- 0x2));
++ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
++ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
++ 0x2));
+
+- for (i = 0; i < MTK_WED_RX_QUEUES; i++)
+- mtk_wed_check_wfdma_rx_fill(dev, i);
+- }
++ for (i = 0; i < MTK_WED_RX_QUEUES; i++)
++ mtk_wed_check_wfdma_rx_fill(dev, i);
+ }
+
+ static void
+@@ -1472,7 +1484,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+
+ val |= BIT(0) | (BIT(1) * !!dev->hw->index);
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+- } else {
++ } else if (mtk_wed_get_rx_capa(dev)) {
+ /* driver set mid ready and only once */
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+@@ -1484,7 +1496,6 @@ mtk_wed_start(struct mtk_wed_device *dev
+
+ if (mtk_wed_rro_cfg(dev))
+ return;
+-
+ }
+
+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
+@@ -1550,13 +1561,14 @@ mtk_wed_attach(struct mtk_wed_device *de
+ }
+
+ mtk_wed_hw_init_early(dev);
+- if (mtk_wed_is_v1(hw)) {
++ if (mtk_wed_is_v1(hw))
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), 0);
+- } else {
++ else
+ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
++
++ if (mtk_wed_get_rx_capa(dev))
+ ret = mtk_wed_wo_init(hw);
+- }
+ out:
+ if (ret) {
+ dev_err(dev->hw->dev, "failed to attach wed device\n");
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -207,7 +207,7 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+ {
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+
+- if (mtk_wed_is_v1(dev->hw))
++ if (!mtk_wed_get_rx_capa(dev))
+ return 0;
+
+ if (WARN_ON(!wo))
diff --git a/target/linux/generic/backport-6.1/752-08-v6.7-net-ethernet-mtk_wed-rename-mtk_rxbm_desc-in-mtk_wed.patch b/target/linux/generic/backport-6.1/752-08-v6.7-net-ethernet-mtk_wed-rename-mtk_rxbm_desc-in-mtk_wed.patch
new file mode 100644
index 0000000000..d83434fb2c
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-08-v6.7-net-ethernet-mtk_wed-rename-mtk_rxbm_desc-in-mtk_wed.patch
@@ -0,0 +1,52 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:07 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: rename mtk_rxbm_desc in
+ mtk_wed_bm_desc
+
+Rename mtk_rxbm_desc structure in mtk_wed_bm_desc since it will be used
+even on tx side by MT7988 SoC.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -421,7 +421,7 @@ free_pagelist:
+ static int
+ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
+ {
+- struct mtk_rxbm_desc *desc;
++ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+
+ dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
+@@ -441,7 +441,7 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_d
+ static void
+ mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
+ {
+- struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
++ struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc;
+
+ if (!desc)
+ return;
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -45,7 +45,7 @@ enum mtk_wed_wo_cmd {
+ MTK_WED_WO_CMD_WED_END
+ };
+
+-struct mtk_rxbm_desc {
++struct mtk_wed_bm_desc {
+ __le32 buf0;
+ __le32 token;
+ } __packed __aligned(4);
+@@ -105,7 +105,7 @@ struct mtk_wed_device {
+ struct {
+ int size;
+ struct page_frag_cache rx_page;
+- struct mtk_rxbm_desc *desc;
++ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ } rx_buf_ring;
+
diff --git a/target/linux/generic/backport-6.1/752-09-v6.7-net-ethernet-mtk_wed-introduce-mtk_wed_buf-structure.patch b/target/linux/generic/backport-6.1/752-09-v6.7-net-ethernet-mtk_wed-introduce-mtk_wed_buf-structure.patch
new file mode 100644
index 0000000000..8000a8759e
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-09-v6.7-net-ethernet-mtk_wed-introduce-mtk_wed_buf-structure.patch
@@ -0,0 +1,87 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:08 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce mtk_wed_buf structure
+
+Introduce mtk_wed_buf structure to store both virtual and physical
+addresses allocated in mtk_wed_tx_buffer_alloc() routine. This is a
+preliminary patch to add WED support for MT7988 SoC since it relies on a
+different dma descriptor layout not storing page dma addresses.
+
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -299,9 +299,9 @@ out:
+ static int
+ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
+ {
++ struct mtk_wed_buf *page_list;
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+- void **page_list;
+ int token = dev->wlan.token_start;
+ int ring_size;
+ int n_pages;
+@@ -342,7 +342,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_d
+ return -ENOMEM;
+ }
+
+- page_list[page_idx++] = page;
++ page_list[page_idx].p = page;
++ page_list[page_idx++].phy_addr = page_phys;
+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+@@ -386,8 +387,8 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_d
+ static void
+ mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
+ {
++ struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages;
+ struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
+- void **page_list = dev->tx_buf_ring.pages;
+ int page_idx;
+ int i;
+
+@@ -399,13 +400,12 @@ mtk_wed_free_tx_buffer(struct mtk_wed_de
+
+ for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
+ i += MTK_WED_BUF_PER_PAGE) {
+- void *page = page_list[page_idx++];
+- dma_addr_t buf_addr;
++ dma_addr_t buf_addr = page_list[page_idx].phy_addr;
++ void *page = page_list[page_idx++].p;
+
+ if (!page)
+ break;
+
+- buf_addr = le32_to_cpu(desc[i].buf0);
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -76,6 +76,11 @@ struct mtk_wed_wo_rx_stats {
+ __le32 rx_drop_cnt;
+ };
+
++struct mtk_wed_buf {
++ void *p;
++ dma_addr_t phy_addr;
++};
++
+ struct mtk_wed_device {
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ const struct mtk_wed_ops *ops;
+@@ -97,7 +102,7 @@ struct mtk_wed_device {
+
+ struct {
+ int size;
+- void **pages;
++ struct mtk_wed_buf *pages;
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ } tx_buf_ring;
diff --git a/target/linux/generic/backport-6.1/752-10-v6.7-net-ethernet-mtk_wed-move-mem_region-array-out-of-mt.patch b/target/linux/generic/backport-6.1/752-10-v6.7-net-ethernet-mtk_wed-move-mem_region-array-out-of-mt.patch
new file mode 100644
index 0000000000..98d782b1d0
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-10-v6.7-net-ethernet-mtk_wed-move-mem_region-array-out-of-mt.patch
@@ -0,0 +1,88 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:09 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: move mem_region array out of
+ mtk_wed_mcu_load_firmware
+
+Remove mtk_wed_wo_memory_region boot structure in mtk_wed_wo.
+This is a preliminary patch to introduce WED support for MT7988 SoC.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -16,14 +16,30 @@
+ #include "mtk_wed_wo.h"
+ #include "mtk_wed.h"
+
++static struct mtk_wed_wo_memory_region mem_region[] = {
++ [MTK_WED_WO_REGION_EMI] = {
++ .name = "wo-emi",
++ },
++ [MTK_WED_WO_REGION_ILM] = {
++ .name = "wo-ilm",
++ },
++ [MTK_WED_WO_REGION_DATA] = {
++ .name = "wo-data",
++ .shared = true,
++ },
++ [MTK_WED_WO_REGION_BOOT] = {
++ .name = "wo-boot",
++ },
++};
++
+ static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
+ {
+- return readl(wo->boot.addr + reg);
++ return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ }
+
+ static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+ {
+- writel(val, wo->boot.addr + reg);
++ writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ }
+
+ static struct sk_buff *
+@@ -294,18 +310,6 @@ next:
+ static int
+ mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
+ {
+- static struct mtk_wed_wo_memory_region mem_region[] = {
+- [MTK_WED_WO_REGION_EMI] = {
+- .name = "wo-emi",
+- },
+- [MTK_WED_WO_REGION_ILM] = {
+- .name = "wo-ilm",
+- },
+- [MTK_WED_WO_REGION_DATA] = {
+- .name = "wo-data",
+- .shared = true,
+- },
+- };
+ const struct mtk_wed_fw_trailer *trailer;
+ const struct firmware *fw;
+ const char *fw_name;
+@@ -319,11 +323,6 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ return ret;
+ }
+
+- wo->boot.name = "wo-boot";
+- ret = mtk_wed_get_memory_region(wo, &wo->boot);
+- if (ret)
+- return ret;
+-
+ /* set dummy cr */
+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
+ wo->hw->index + 1);
+--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+@@ -228,7 +228,6 @@ struct mtk_wed_wo_queue {
+
+ struct mtk_wed_wo {
+ struct mtk_wed_hw *hw;
+- struct mtk_wed_wo_memory_region boot;
+
+ struct mtk_wed_wo_queue q_tx;
+ struct mtk_wed_wo_queue q_rx;
diff --git a/target/linux/generic/backport-6.1/752-11-v6.7-net-ethernet-mtk_wed-make-memory-region-optional.patch b/target/linux/generic/backport-6.1/752-11-v6.7-net-ethernet-mtk_wed-make-memory-region-optional.patch
new file mode 100644
index 0000000000..48b0d02049
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-11-v6.7-net-ethernet-mtk_wed-make-memory-region-optional.patch
@@ -0,0 +1,71 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:10 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: make memory region optional
+
+Make mtk_wed_wo_memory_region optionals.
+This is a preliminary patch to introduce Wireless Ethernet Dispatcher
+support for MT7988 SoC since MT7988 WED fw image will have a different
+layout.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -234,19 +234,13 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+ }
+
+ static int
+-mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
++mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
+ struct mtk_wed_wo_memory_region *region)
+ {
+ struct reserved_mem *rmem;
+ struct device_node *np;
+- int index;
+
+- index = of_property_match_string(wo->hw->node, "memory-region-names",
+- region->name);
+- if (index < 0)
+- return index;
+-
+- np = of_parse_phandle(wo->hw->node, "memory-region", index);
++ np = of_parse_phandle(hw->node, "memory-region", index);
+ if (!np)
+ return -ENODEV;
+
+@@ -258,7 +252,7 @@ mtk_wed_get_memory_region(struct mtk_wed
+
+ region->phy_addr = rmem->base;
+ region->size = rmem->size;
+- region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
++ region->addr = devm_ioremap(hw->dev, region->phy_addr, region->size);
+
+ return !region->addr ? -EINVAL : 0;
+ }
+@@ -271,6 +265,9 @@ mtk_wed_mcu_run_firmware(struct mtk_wed_
+ const struct mtk_wed_fw_trailer *trailer;
+ const struct mtk_wed_fw_region *fw_region;
+
++ if (!region->phy_addr || !region->size)
++ return 0;
++
+ trailer_ptr = fw->data + fw->size - sizeof(*trailer);
+ trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
+ region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
+@@ -318,7 +315,13 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+
+ /* load firmware region metadata */
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+- ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
++ int index = of_property_match_string(wo->hw->node,
++ "memory-region-names",
++ mem_region[i].name);
++ if (index < 0)
++ continue;
++
++ ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]);
+ if (ret)
+ return ret;
+ }
diff --git a/target/linux/generic/backport-6.1/752-12-v6.7-net-ethernet-mtk_wed-fix-EXT_INT_STATUS_RX_FBUF-defi.patch b/target/linux/generic/backport-6.1/752-12-v6.7-net-ethernet-mtk_wed-fix-EXT_INT_STATUS_RX_FBUF-defi.patch
new file mode 100644
index 0000000000..878e8fe996
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-12-v6.7-net-ethernet-mtk_wed-fix-EXT_INT_STATUS_RX_FBUF-defi.patch
@@ -0,0 +1,27 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:11 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: fix EXT_INT_STATUS_RX_FBUF
+ definitions for MT7986 SoC
+
+Fix MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH and
+MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH definitions for MT7986 (MT7986 is
+the only SoC to use them).
+
+Fixes: de84a090d99a ("net: ethernet: mtk_eth_wed: add wed support for mt7986 chipset")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -64,8 +64,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
+ #define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(12)
+-#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(13)
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */
++#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
+ #define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
diff --git a/target/linux/generic/backport-6.1/752-13-v6.7-net-ethernet-mtk_wed-add-mtk_wed_soc_data-structure.patch b/target/linux/generic/backport-6.1/752-13-v6.7-net-ethernet-mtk_wed-add-mtk_wed_soc_data-structure.patch
new file mode 100644
index 0000000000..c43114fb5b
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-13-v6.7-net-ethernet-mtk_wed-add-mtk_wed_soc_data-structure.patch
@@ -0,0 +1,217 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:12 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: add mtk_wed_soc_data structure
+
+Introduce mtk_wed_soc_data utility structure to contain per-SoC
+definitions.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -48,6 +48,26 @@ struct mtk_wed_flow_block_priv {
+ struct net_device *dev;
+ };
+
++static const struct mtk_wed_soc_data mt7622_data = {
++ .regmap = {
++ .tx_bm_tkid = 0x088,
++ .wpdma_rx_ring0 = 0x770,
++ .reset_idx_tx_mask = GENMASK(3, 0),
++ .reset_idx_rx_mask = GENMASK(17, 16),
++ },
++ .wdma_desc_size = sizeof(struct mtk_wdma_desc),
++};
++
++static const struct mtk_wed_soc_data mt7986_data = {
++ .regmap = {
++ .tx_bm_tkid = 0x0c8,
++ .wpdma_rx_ring0 = 0x770,
++ .reset_idx_tx_mask = GENMASK(1, 0),
++ .reset_idx_rx_mask = GENMASK(7, 6),
++ },
++ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
++};
++
+ static void
+ wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+ {
+@@ -746,7 +766,7 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+ return;
+
+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+- wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
++ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
+ }
+
+ static void
+@@ -940,22 +960,10 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+ if (mtk_wed_is_v1(dev->hw)) {
+- wed_w32(dev, MTK_WED_TX_BM_TKID,
+- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+- dev->wlan.token_start) |
+- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+- dev->wlan.token_start +
+- dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+ } else {
+- wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
+- FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+- dev->wlan.token_start) |
+- FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+- dev->wlan.token_start +
+- dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
+ MTK_WED_TX_BM_DYN_THR_HI_V2);
+@@ -970,6 +978,11 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+ MTK_WED_TX_TKID_DYN_THR_HI);
+ }
+
++ wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid,
++ FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) |
++ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
++ dev->wlan.token_start + dev->wlan.nbuf - 1));
++
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ if (mtk_wed_is_v1(dev->hw)) {
+@@ -1104,13 +1117,8 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
+ } else {
+- struct mtk_eth *eth = dev->hw->eth;
+-
+- if (mtk_is_netsys_v2_or_greater(eth))
+- wed_set(dev, MTK_WED_RESET_IDX,
+- MTK_WED_RESET_IDX_RX_V2);
+- else
+- wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
++ wed_set(dev, MTK_WED_RESET_IDX,
++ dev->hw->soc->regmap.reset_idx_rx_mask);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+@@ -1163,7 +1171,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
+ } else {
+- wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
++ wed_w32(dev, MTK_WED_RESET_IDX,
++ dev->hw->soc->regmap.reset_idx_tx_mask);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+@@ -1255,7 +1264,6 @@ static int
+ mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
+ bool reset)
+ {
+- u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma;
+
+ if (idx >= ARRAY_SIZE(dev->rx_wdma))
+@@ -1263,7 +1271,7 @@ mtk_wed_wdma_rx_ring_setup(struct mtk_we
+
+ wdma = &dev->rx_wdma[idx];
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+- desc_size, true))
++ dev->hw->soc->wdma_desc_size, true))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+@@ -1284,7 +1292,6 @@ static int
+ mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
+ bool reset)
+ {
+- u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma;
+
+ if (idx >= ARRAY_SIZE(dev->tx_wdma))
+@@ -1292,7 +1299,7 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
+
+ wdma = &dev->tx_wdma[idx];
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+- desc_size, true))
++ dev->hw->soc->wdma_desc_size, true))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+@@ -1931,7 +1938,12 @@ void mtk_wed_add_hw(struct device_node *
+ hw->irq = irq;
+ hw->version = eth->soc->version;
+
+- if (mtk_wed_is_v1(hw)) {
++ switch (hw->version) {
++ case 2:
++ hw->soc = &mt7986_data;
++ break;
++ default:
++ case 1:
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+@@ -1945,6 +1957,8 @@ void mtk_wed_add_hw(struct device_node *
+ regmap_write(hw->mirror, 0, 0);
+ regmap_write(hw->mirror, 4, 0);
+ }
++ hw->soc = &mt7622_data;
++ break;
+ }
+
+ mtk_wed_hw_add_debugfs(hw);
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -12,7 +12,18 @@
+ struct mtk_eth;
+ struct mtk_wed_wo;
+
++struct mtk_wed_soc_data {
++ struct {
++ u32 tx_bm_tkid;
++ u32 wpdma_rx_ring0;
++ u32 reset_idx_tx_mask;
++ u32 reset_idx_rx_mask;
++ } regmap;
++ u32 wdma_desc_size;
++};
++
+ struct mtk_wed_hw {
++ const struct mtk_wed_soc_data *soc;
+ struct device_node *node;
+ struct mtk_eth *eth;
+ struct regmap *regs;
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -100,8 +100,6 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_TX_BM_BASE 0x084
+
+-#define MTK_WED_TX_BM_TKID 0x088
+-#define MTK_WED_TX_BM_TKID_V2 0x0c8
+ #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
+ #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
+
+@@ -160,9 +158,6 @@ struct mtk_wdma_desc {
+ #define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+ #define MTK_WED_RESET_IDX 0x20c
+-#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
+-#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
+-#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6)
+ #define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
+
+ #define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
+@@ -286,7 +281,6 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
+
+ #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
+-#define MTK_WED_WPDMA_RX_RING 0x770
+
+ #define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
+ #define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
diff --git a/target/linux/generic/backport-6.1/752-14-v6.7-net-ethernet-mtk_wed-introduce-WED-support-for-MT798.patch b/target/linux/generic/backport-6.1/752-14-v6.7-net-ethernet-mtk_wed-introduce-WED-support-for-MT798.patch
new file mode 100644
index 0000000000..f874899c5b
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-14-v6.7-net-ethernet-mtk_wed-introduce-WED-support-for-MT798.patch
@@ -0,0 +1,1280 @@
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:13 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce WED support for MT7988
+
+Similar to MT7986 and MT7622, enable Wireless Ethernet Ditpatcher for
+MT7988 in order to offload traffic forwarded from LAN/WLAN to WLAN/LAN
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -195,6 +195,7 @@ static const struct mtk_reg_map mt7988_r
+ .wdma_base = {
+ [0] = 0x4800,
+ [1] = 0x4c00,
++ [2] = 0x5000,
+ },
+ .pse_iq_sta = 0x0180,
+ .pse_oq_sta = 0x01a0,
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+@@ -1132,7 +1132,7 @@ struct mtk_reg_map {
+ u32 gdm1_cnt;
+ u32 gdma_to_ppe;
+ u32 ppe_base;
+- u32 wdma_base[2];
++ u32 wdma_base[3];
+ u32 pse_iq_sta;
+ u32 pse_oq_sta;
+ };
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -201,6 +201,9 @@ mtk_flow_set_output_device(struct mtk_et
+ case 1:
+ pse_port = PSE_WDMA1_PORT;
+ break;
++ case 2:
++ pse_port = PSE_WDMA2_PORT;
++ break;
+ default:
+ return -EINVAL;
+ }
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -16,17 +16,19 @@
+ #include <net/flow_offload.h>
+ #include <net/pkt_cls.h>
+ #include "mtk_eth_soc.h"
+-#include "mtk_wed_regs.h"
+ #include "mtk_wed.h"
+ #include "mtk_ppe.h"
+ #include "mtk_wed_wo.h"
+
+ #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
+
+-#define MTK_WED_PKT_SIZE 1900
++#define MTK_WED_PKT_SIZE 1920
+ #define MTK_WED_BUF_SIZE 2048
++#define MTK_WED_PAGE_BUF_SIZE 128
+ #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
++#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
+ #define MTK_WED_RX_RING_SIZE 1536
++#define MTK_WED_RX_PG_BM_CNT 8192
+
+ #define MTK_WED_TX_RING_SIZE 2048
+ #define MTK_WED_WDMA_RING_SIZE 1024
+@@ -40,7 +42,10 @@
+ #define MTK_WED_RRO_QUE_CNT 8192
+ #define MTK_WED_MIOD_ENTRY_CNT 128
+
+-static struct mtk_wed_hw *hw_list[2];
++#define MTK_WED_TX_BM_DMA_SIZE 65536
++#define MTK_WED_TX_BM_PKT_CNT 32768
++
++static struct mtk_wed_hw *hw_list[3];
+ static DEFINE_MUTEX(hw_lock);
+
+ struct mtk_wed_flow_block_priv {
+@@ -55,6 +60,7 @@ static const struct mtk_wed_soc_data mt7
+ .reset_idx_tx_mask = GENMASK(3, 0),
+ .reset_idx_rx_mask = GENMASK(17, 16),
+ },
++ .tx_ring_desc_size = sizeof(struct mtk_wdma_desc),
+ .wdma_desc_size = sizeof(struct mtk_wdma_desc),
+ };
+
+@@ -65,6 +71,18 @@ static const struct mtk_wed_soc_data mt7
+ .reset_idx_tx_mask = GENMASK(1, 0),
+ .reset_idx_rx_mask = GENMASK(7, 6),
+ },
++ .tx_ring_desc_size = sizeof(struct mtk_wdma_desc),
++ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
++};
++
++static const struct mtk_wed_soc_data mt7988_data = {
++ .regmap = {
++ .tx_bm_tkid = 0x0c8,
++ .wpdma_rx_ring0 = 0x7d0,
++ .reset_idx_tx_mask = GENMASK(1, 0),
++ .reset_idx_rx_mask = GENMASK(7, 6),
++ },
++ .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc),
+ .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc),
+ };
+
+@@ -319,33 +337,38 @@ out:
+ static int
+ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
+ {
++ u32 desc_size = dev->hw->soc->tx_ring_desc_size;
++ int i, page_idx = 0, n_pages, ring_size;
++ int token = dev->wlan.token_start;
+ struct mtk_wed_buf *page_list;
+- struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+- int token = dev->wlan.token_start;
+- int ring_size;
+- int n_pages;
+- int i, page_idx;
++ void *desc_ptr;
+
+- ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+- n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
++ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
++ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
++ dev->tx_buf_ring.size = ring_size;
++ } else {
++ dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE;
++ ring_size = MTK_WED_TX_BM_PKT_CNT;
++ }
++ n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE;
+
+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+- dev->tx_buf_ring.size = ring_size;
+ dev->tx_buf_ring.pages = page_list;
+
+- desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
+- &desc_phys, GFP_KERNEL);
+- if (!desc)
++ desc_ptr = dma_alloc_coherent(dev->hw->dev,
++ dev->tx_buf_ring.size * desc_size,
++ &desc_phys, GFP_KERNEL);
++ if (!desc_ptr)
+ return -ENOMEM;
+
+- dev->tx_buf_ring.desc = desc;
++ dev->tx_buf_ring.desc = desc_ptr;
+ dev->tx_buf_ring.desc_phys = desc_phys;
+
+- for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
++ for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+ dma_addr_t page_phys, buf_phys;
+ struct page *page;
+ void *buf;
+@@ -371,28 +394,31 @@ mtk_wed_tx_buffer_alloc(struct mtk_wed_d
+ buf_phys = page_phys;
+
+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
+- u32 txd_size;
+- u32 ctrl;
+-
+- txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
++ struct mtk_wdma_desc *desc = desc_ptr;
+
+ desc->buf0 = cpu_to_le32(buf_phys);
+- desc->buf1 = cpu_to_le32(buf_phys + txd_size);
++ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
++ u32 txd_size, ctrl;
+
+- if (mtk_wed_is_v1(dev->hw))
+- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+- MTK_WED_BUF_SIZE - txd_size) |
+- MTK_WDMA_DESC_CTRL_LAST_SEG1;
+- else
+- ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+- FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
+- MTK_WED_BUF_SIZE - txd_size) |
+- MTK_WDMA_DESC_CTRL_LAST_SEG0;
+- desc->ctrl = cpu_to_le32(ctrl);
+- desc->info = 0;
+- desc++;
++ txd_size = dev->wlan.init_buf(buf, buf_phys,
++ token++);
++ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
++ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size);
++ if (mtk_wed_is_v1(dev->hw))
++ ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 |
++ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
++ MTK_WED_BUF_SIZE - txd_size);
++ else
++ ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 |
++ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
++ MTK_WED_BUF_SIZE - txd_size);
++ desc->ctrl = cpu_to_le32(ctrl);
++ desc->info = 0;
++ } else {
++ desc->ctrl = cpu_to_le32(token << 16);
++ }
+
++ desc_ptr += desc_size;
+ buf += MTK_WED_BUF_SIZE;
+ buf_phys += MTK_WED_BUF_SIZE;
+ }
+@@ -408,31 +434,31 @@ static void
+ mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
+ {
+ struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages;
+- struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
+- int page_idx;
+- int i;
++ struct mtk_wed_hw *hw = dev->hw;
++ int i, page_idx = 0;
+
+ if (!page_list)
+ return;
+
+- if (!desc)
++ if (!dev->tx_buf_ring.desc)
+ goto free_pagelist;
+
+- for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
+- i += MTK_WED_BUF_PER_PAGE) {
+- dma_addr_t buf_addr = page_list[page_idx].phy_addr;
++ for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) {
++ dma_addr_t page_phy = page_list[page_idx].phy_addr;
+ void *page = page_list[page_idx++].p;
+
+ if (!page)
+ break;
+
+- dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
++ dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+ }
+
+- dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
+- desc, dev->tx_buf_ring.desc_phys);
++ dma_free_coherent(dev->hw->dev,
++ dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size,
++ dev->tx_buf_ring.desc,
++ dev->tx_buf_ring.desc_phys);
+
+ free_pagelist:
+ kfree(page_list);
+@@ -517,13 +543,23 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
+ {
+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+- if (mtk_wed_is_v1(dev->hw))
++ switch (dev->hw->version) {
++ case 1:
+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+- else
++ break;
++ case 2:
+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
++ break;
++ case 3:
++ mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
++ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
++ break;
++ default:
++ break;
++ }
+
+ if (!dev->hw->num_flows)
+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+@@ -535,6 +571,9 @@ mtk_wed_set_ext_int(struct mtk_wed_devic
+ static void
+ mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
+ {
++ if (!mtk_wed_is_v2(dev->hw))
++ return;
++
+ if (enable) {
+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ wed_w32(dev, MTK_WED_TXP_DW1,
+@@ -609,6 +648,14 @@ mtk_wed_dma_disable(struct mtk_wed_devic
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
++
++ if (mtk_wed_is_v3_or_greater(dev->hw) &&
++ mtk_wed_get_rx_capa(dev)) {
++ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG,
++ MTK_WDMA_PREF_TX_CFG_PREF_EN);
++ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG,
++ MTK_WDMA_PREF_RX_CFG_PREF_EN);
++ }
+ }
+
+ mtk_wed_set_512_support(dev, false);
+@@ -651,6 +698,14 @@ mtk_wed_deinit(struct mtk_wed_device *de
+ MTK_WED_CTRL_RX_ROUTE_QM_EN |
+ MTK_WED_CTRL_WED_RX_BM_EN |
+ MTK_WED_CTRL_RX_RRO_QM_EN);
++
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
++ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU);
++ wed_clr(dev, MTK_WED_PCIE_INT_CTRL,
++ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
++ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER);
++ }
+ }
+
+ static void
+@@ -700,21 +755,37 @@ mtk_wed_detach(struct mtk_wed_device *de
+ mutex_unlock(&hw_lock);
+ }
+
+-#define PCIE_BASE_ADDR0 0x11280000
+ static void
+ mtk_wed_bus_init(struct mtk_wed_device *dev)
+ {
+ switch (dev->wlan.bus_type) {
+ case MTK_WED_BUS_PCIE: {
+ struct device_node *np = dev->hw->eth->dev->of_node;
+- struct regmap *regs;
+
+- regs = syscon_regmap_lookup_by_phandle(np,
+- "mediatek,wed-pcie");
+- if (IS_ERR(regs))
+- break;
++ if (mtk_wed_is_v2(dev->hw)) {
++ struct regmap *regs;
++
++ regs = syscon_regmap_lookup_by_phandle(np,
++ "mediatek,wed-pcie");
++ if (IS_ERR(regs))
++ break;
+
+- regmap_update_bits(regs, 0, BIT(0), BIT(0));
++ regmap_update_bits(regs, 0, BIT(0), BIT(0));
++ }
++
++ if (dev->wlan.msi) {
++ wed_w32(dev, MTK_WED_PCIE_CFG_INTM,
++ dev->hw->pcie_base | 0xc08);
++ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
++ dev->hw->pcie_base | 0xc04);
++ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8));
++ } else {
++ wed_w32(dev, MTK_WED_PCIE_CFG_INTM,
++ dev->hw->pcie_base | 0x180);
++ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
++ dev->hw->pcie_base | 0x184);
++ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
++ }
+
+ wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
+@@ -722,19 +793,9 @@ mtk_wed_bus_init(struct mtk_wed_device *
+ /* pcie interrupt control: pola/source selection */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+- FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+- wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+-
+- wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+- wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+-
+- /* pcie interrupt status trigger register */
+- wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+- wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+-
+- /* pola setting */
+- wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+- MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
++ MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER |
++ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL,
++ dev->hw->index));
+ break;
+ }
+ case MTK_WED_BUS_AXI:
+@@ -772,18 +833,19 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+ static void
+ mtk_wed_hw_init_early(struct mtk_wed_device *dev)
+ {
+- u32 mask, set;
++ u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2);
++ u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE;
+
+ mtk_wed_deinit(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ mtk_wed_set_wpdma(dev);
+
+- mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
+- MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+- MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+- set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
+- MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+- MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
++ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
++ mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
++ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
++ set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
++ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
++ }
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+ if (mtk_wed_is_v1(dev->hw)) {
+@@ -931,11 +993,18 @@ mtk_wed_route_qm_hw_init(struct mtk_wed_
+ }
+
+ /* configure RX_ROUTE_QM */
+- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
+- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+- FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
+- wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++ if (mtk_wed_is_v2(dev->hw)) {
++ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
++ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
++ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT,
++ 0x3 + dev->hw->index));
++ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++ } else {
++ wed_set(dev, MTK_WED_RTQM_ENQ_CFG0,
++ FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT,
++ 0x3 + dev->hw->index));
++ }
+ /* enable RX_ROUTE_QM */
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+ }
+@@ -948,22 +1017,30 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ dev->init_done = true;
+ mtk_wed_set_ext_int(dev, false);
+- wed_w32(dev, MTK_WED_TX_BM_CTRL,
+- MTK_WED_TX_BM_CTRL_PAUSE |
+- FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+- dev->tx_buf_ring.size / 128) |
+- FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+- MTK_WED_TX_RING_SIZE / 256));
+
+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
+-
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+ if (mtk_wed_is_v1(dev->hw)) {
++ wed_w32(dev, MTK_WED_TX_BM_CTRL,
++ MTK_WED_TX_BM_CTRL_PAUSE |
++ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
++ dev->tx_buf_ring.size / 128) |
++ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
++ MTK_WED_TX_RING_SIZE / 256));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+- } else {
++ } else if (mtk_wed_is_v2(dev->hw)) {
++ wed_w32(dev, MTK_WED_TX_BM_CTRL,
++ MTK_WED_TX_BM_CTRL_PAUSE |
++ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
++ dev->tx_buf_ring.size / 128) |
++ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
++ MTK_WED_TX_RING_SIZE / 256));
++ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
++ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
++ MTK_WED_TX_TKID_DYN_THR_HI);
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
+ MTK_WED_TX_BM_DYN_THR_HI_V2);
+@@ -973,9 +1050,6 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+ dev->tx_buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
+ dev->tx_buf_ring.size / 128));
+- wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+- FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+- MTK_WED_TX_TKID_DYN_THR_HI);
+ }
+
+ wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid,
+@@ -985,26 +1059,62 @@ mtk_wed_hw_init(struct mtk_wed_device *d
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ /* switch to new bm architecture */
++ wed_clr(dev, MTK_WED_TX_BM_CTRL,
++ MTK_WED_TX_BM_CTRL_LEGACY_EN);
++
++ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
++ MTK_WED_TX_TKID_CTRL_PAUSE |
++ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3,
++ dev->wlan.nbuf / 128) |
++ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3,
++ dev->wlan.nbuf / 128));
++ /* return SKBID + SDP back to bm */
++ wed_set(dev, MTK_WED_TX_TKID_CTRL,
++ MTK_WED_TX_TKID_CTRL_FREE_FORMAT);
++
++ wed_w32(dev, MTK_WED_TX_BM_INIT_PTR,
++ MTK_WED_TX_BM_PKT_CNT |
++ MTK_WED_TX_BM_INIT_SW_TAIL_IDX);
++ }
++
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+- } else {
+- wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+- if (mtk_wed_get_rx_capa(dev)) {
+- /* rx hw init */
+- wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+- MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+- MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+- wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+-
+- mtk_wed_rx_buffer_hw_init(dev);
+- mtk_wed_rro_hw_init(dev);
+- mtk_wed_route_qm_hw_init(dev);
+- }
++ } else if (mtk_wed_get_rx_capa(dev)) {
++ /* rx hw init */
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
++ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
++ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
++
++ /* reset prefetch index of ring */
++ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
++ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX,
++ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++
++ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
++ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX,
++ MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR);
++
++ /* reset prefetch FIFO of ring */
++ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR |
++ MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR);
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0);
++
++ mtk_wed_rx_buffer_hw_init(dev);
++ mtk_wed_rro_hw_init(dev);
++ mtk_wed_route_qm_hw_init(dev);
+ }
+
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
++ if (!mtk_wed_is_v1(dev->hw))
++ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+ }
+
+ static void
+@@ -1302,6 +1412,24 @@ mtk_wed_wdma_tx_ring_setup(struct mtk_we
+ dev->hw->soc->wdma_desc_size, true))
+ return -ENOMEM;
+
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ struct mtk_wdma_desc *desc = wdma->desc;
++ int i;
++
++ for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) {
++ desc->buf0 = 0;
++ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
++ desc->buf1 = 0;
++ desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE);
++ desc++;
++ desc->buf0 = 0;
++ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
++ desc->buf1 = 0;
++ desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE);
++ desc++;
++ }
++ }
++
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+@@ -1367,6 +1495,9 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+ } else {
++ if (mtk_wed_is_v3_or_greater(dev->hw))
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN);
++
+ /* initail tx interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+@@ -1419,33 +1550,60 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ {
+ int i;
+
+- wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
++ if (!mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
++ MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
++ wdma_set(dev, MTK_WDMA_GLO_CFG,
++ MTK_WDMA_GLO_CFG_TX_DMA_EN |
++ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
++ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
++ wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED);
++ } else {
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN |
++ MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR);
++ wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
++ }
+
+ wed_set(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+- wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+- MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+- MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
++
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+- wdma_set(dev, MTK_WDMA_GLO_CFG,
+- MTK_WDMA_GLO_CFG_TX_DMA_EN |
+- MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+- MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+-
+ if (mtk_wed_is_v1(dev->hw)) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ return;
+ }
+
+- wed_set(dev, MTK_WED_WPDMA_CTRL,
+- MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
++
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) |
++ FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8));
++ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
++ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN);
++
++ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST);
++ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
++ MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK |
++ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
++
++ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++ }
++
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+@@ -1457,11 +1615,22 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
++ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN);
+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
+- FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
+- 0x2));
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2));
++
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_EN |
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) |
++ FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8));
++
++ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
++ wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
++ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
++ }
+
+ for (i = 0; i < MTK_WED_RX_QUEUES; i++)
+ mtk_wed_check_wfdma_rx_fill(dev, i);
+@@ -1501,6 +1670,12 @@ mtk_wed_start(struct mtk_wed_device *dev
+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
+
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_w32(dev, MTK_WED_EXT_INT_MASK3,
++ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
++ wed_r32(dev, MTK_WED_EXT_INT_MASK3);
++ }
++
+ if (mtk_wed_rro_cfg(dev))
+ return;
+ }
+@@ -1552,6 +1727,7 @@ mtk_wed_attach(struct mtk_wed_device *de
+ dev->irq = hw->irq;
+ dev->wdma_idx = hw->index;
+ dev->version = hw->version;
++ dev->hw->pcie_base = mtk_wed_get_pcie_base(dev);
+
+ if (hw->eth->dma_dev == hw->eth->dev &&
+ of_dma_is_coherent(hw->eth->dev->of_node))
+@@ -1619,6 +1795,23 @@ mtk_wed_tx_ring_setup(struct mtk_wed_dev
+ ring->reg_base = MTK_WED_RING_TX(idx);
+ ring->wpdma = regs;
+
++ if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) {
++ /* reset prefetch index */
++ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
++ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
++
++ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR |
++ MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR);
++
++ /* reset prefetch FIFO */
++ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG,
++ MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR |
++ MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR);
++ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0);
++ }
++
+ /* WED -> WPDMA */
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
+@@ -1693,15 +1886,13 @@ mtk_wed_rx_ring_setup(struct mtk_wed_dev
+ static u32
+ mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+ {
+- u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
++ u32 val, ext_mask;
+
+- if (mtk_wed_is_v1(dev->hw))
+- ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
++ if (mtk_wed_is_v3_or_greater(dev->hw))
++ ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
++ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ else
+- ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+- MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+- MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+- MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
++ ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
+@@ -1942,6 +2133,9 @@ void mtk_wed_add_hw(struct device_node *
+ case 2:
+ hw->soc = &mt7986_data;
+ break;
++ case 3:
++ hw->soc = &mt7988_data;
++ break;
+ default:
+ case 1:
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -9,6 +9,8 @@
+ #include <linux/regmap.h>
+ #include <linux/netdevice.h>
+
++#include "mtk_wed_regs.h"
++
+ struct mtk_eth;
+ struct mtk_wed_wo;
+
+@@ -19,6 +21,7 @@ struct mtk_wed_soc_data {
+ u32 reset_idx_tx_mask;
+ u32 reset_idx_rx_mask;
+ } regmap;
++ u32 tx_ring_desc_size;
+ u32 wdma_desc_size;
+ };
+
+@@ -35,6 +38,7 @@ struct mtk_wed_hw {
+ struct dentry *debugfs_dir;
+ struct mtk_wed_device *wed_dev;
+ struct mtk_wed_wo *wed_wo;
++ u32 pcie_base;
+ u32 debugfs_reg;
+ u32 num_flows;
+ u8 version;
+@@ -61,6 +65,16 @@ static inline bool mtk_wed_is_v2(struct
+ return hw->version == 2;
+ }
+
++static inline bool mtk_wed_is_v3(struct mtk_wed_hw *hw)
++{
++ return hw->version == 3;
++}
++
++static inline bool mtk_wed_is_v3_or_greater(struct mtk_wed_hw *hw)
++{
++ return hw->version > 2;
++}
++
+ static inline void
+ wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+ {
+@@ -143,6 +157,21 @@ wpdma_txfree_w32(struct mtk_wed_device *
+ writel(val, dev->txfree_ring.wpdma + reg);
+ }
+
++static inline u32 mtk_wed_get_pcie_base(struct mtk_wed_device *dev)
++{
++ if (!mtk_wed_is_v3_or_greater(dev->hw))
++ return MTK_WED_PCIE_BASE;
++
++ switch (dev->hw->index) {
++ case 1:
++ return MTK_WED_PCIE_BASE1;
++ case 2:
++ return MTK_WED_PCIE_BASE2;
++ default:
++ return MTK_WED_PCIE_BASE0;
++ }
++}
++
+ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index);
+--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+@@ -331,10 +331,22 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ wo->hw->index + 1);
+
+ /* load firmware */
+- if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed"))
+- fw_name = MT7981_FIRMWARE_WO;
+- else
+- fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0;
++ switch (wo->hw->version) {
++ case 2:
++ if (of_device_is_compatible(wo->hw->node,
++ "mediatek,mt7981-wed"))
++ fw_name = MT7981_FIRMWARE_WO;
++ else
++ fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1
++ : MT7986_FIRMWARE_WO0;
++ break;
++ case 3:
++ fw_name = wo->hw->index ? MT7988_FIRMWARE_WO1
++ : MT7988_FIRMWARE_WO0;
++ break;
++ default:
++ return -EINVAL;
++ }
+
+ ret = request_firmware(&fw, fw_name, wo->hw->dev);
+ if (ret)
+@@ -355,15 +367,16 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ }
+
+ /* set the start address */
+- boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
+- : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
++ if (!mtk_wed_is_v3_or_greater(wo->hw) && wo->hw->index)
++ boot_cr = MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR;
++ else
++ boot_cr = MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
+ wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
+ /* wo firmware reset */
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
+
+- val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
+- val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
+- : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
++ val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR) |
++ MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
+ out:
+ release_firmware(fw);
+@@ -398,3 +411,5 @@ int mtk_wed_mcu_init(struct mtk_wed_wo *
+ MODULE_FIRMWARE(MT7981_FIRMWARE_WO);
+ MODULE_FIRMWARE(MT7986_FIRMWARE_WO0);
+ MODULE_FIRMWARE(MT7986_FIRMWARE_WO1);
++MODULE_FIRMWARE(MT7988_FIRMWARE_WO0);
++MODULE_FIRMWARE(MT7988_FIRMWARE_WO1);
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -13,6 +13,9 @@
+ #define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
+ #define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
+
++#define MTK_WDMA_TXD0_DESC_INFO_DMA_DONE BIT(29)
++#define MTK_WDMA_TXD1_DESC_INFO_DMA_DONE BIT(31)
++
+ struct mtk_wdma_desc {
+ __le32 buf0;
+ __le32 ctrl;
+@@ -37,6 +40,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
+ #define MTK_WED_RESET_RX_RRO_QM BIT(20)
+ #define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
++#define MTK_WED_RESET_TX_AMSDU BIT(22)
+ #define MTK_WED_RESET_WED BIT(31)
+
+ #define MTK_WED_CTRL 0x00c
+@@ -44,6 +48,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
+ #define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
+ #define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
++#define MTK_WED_CTRL_WED_RX_IND_CMD_EN BIT(5)
++#define MTK_WED_CTRL_WED_RX_PG_BM_EN BIT(6)
++#define MTK_WED_CTRL_WED_RX_PG_BM_BUSY BIT(7)
+ #define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
+ #define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
+ #define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
+@@ -54,9 +61,14 @@ struct mtk_wdma_desc {
+ #define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
+ #define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
+ #define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
++#define MTK_WED_CTRL_TX_TKID_ALI_EN BIT(20)
++#define MTK_WED_CTRL_TX_TKID_ALI_BUSY BIT(21)
++#define MTK_WED_CTRL_TX_AMSDU_EN BIT(22)
++#define MTK_WED_CTRL_TX_AMSDU_BUSY BIT(23)
+ #define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
+ #define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
+ #define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
++#define MTK_WED_CTRL_FLD_MIB_RD_CLR BIT(28)
+
+ #define MTK_WED_EXT_INT_STATUS 0x020
+ #define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
+@@ -89,6 +101,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_EXT_INT_MASK 0x028
+ #define MTK_WED_EXT_INT_MASK1 0x02c
+ #define MTK_WED_EXT_INT_MASK2 0x030
++#define MTK_WED_EXT_INT_MASK3 0x034
+
+ #define MTK_WED_STATUS 0x060
+ #define MTK_WED_STATUS_TX GENMASK(15, 8)
+@@ -96,9 +109,14 @@ struct mtk_wdma_desc {
+ #define MTK_WED_TX_BM_CTRL 0x080
+ #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+ #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
++#define MTK_WED_TX_BM_CTRL_LEGACY_EN BIT(26)
++#define MTK_WED_TX_TKID_CTRL_FREE_FORMAT BIT(27)
+ #define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
+
+ #define MTK_WED_TX_BM_BASE 0x084
++#define MTK_WED_TX_BM_INIT_PTR 0x088
++#define MTK_WED_TX_BM_SW_TAIL_IDX GENMASK(16, 0)
++#define MTK_WED_TX_BM_INIT_SW_TAIL_IDX BIT(16)
+
+ #define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
+ #define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
+@@ -122,6 +140,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+ #define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+
++#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0)
++#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16)
++
+ #define MTK_WED_TX_TKID_DYN_THR 0x0e0
+ #define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
+ #define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
+@@ -199,12 +220,15 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
+-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(15, 12)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4 BIT(18)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
+-#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
++#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK BIT(20)
+ #define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
+ #define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
++#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST BIT(25)
+ #define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
++#define MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK BIT(30)
+
+ #define MTK_WED_WPDMA_RESET_IDX 0x50c
+ #define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
+@@ -250,9 +274,10 @@ struct mtk_wdma_desc {
+ #define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
+
+ #define MTK_WED_PCIE_INT_CTRL 0x57c
+-#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
+-#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
+ #define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
++#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
++#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
++#define MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER BIT(21)
+
+ #define MTK_WED_WPDMA_CFG_BASE 0x580
+ #define MTK_WED_WPDMA_CFG_INT_MASK 0x584
+@@ -286,6 +311,20 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
+ #define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
+
++#define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
++#define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
++#define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
++#define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
++
++#define MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX 0x7b8
++#define MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR BIT(15)
++
++#define MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX 0x7bc
++
++#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG 0x7c0
++#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR BIT(0)
++#define MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR BIT(16)
++
+ #define MTK_WED_WDMA_RING_TX 0x800
+
+ #define MTK_WED_WDMA_TX_MIB 0x810
+@@ -293,6 +332,18 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
+ #define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
+
++#define MTK_WED_WDMA_RX_PREF_CFG 0x950
++#define MTK_WED_WDMA_RX_PREF_EN BIT(0)
++#define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
++#define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
++#define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
++#define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
++#define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
++
++#define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
++#define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
++#define MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR BIT(16)
++
+ #define MTK_WED_WDMA_GLO_CFG 0xa04
+ #define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
+ #define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
+@@ -325,6 +376,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
+
+ #define MTK_WED_WDMA_INT_CTRL 0xa2c
++#define MTK_WED_WDMA_INT_POLL_PRD GENMASK(7, 0)
+ #define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
+
+ #define MTK_WED_WDMA_CFG_BASE 0xaa0
+@@ -388,6 +440,18 @@ struct mtk_wdma_desc {
+ #define MTK_WDMA_INT_GRP1 0x250
+ #define MTK_WDMA_INT_GRP2 0x254
+
++#define MTK_WDMA_PREF_TX_CFG 0x2d0
++#define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
++
++#define MTK_WDMA_PREF_RX_CFG 0x2dc
++#define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
++
++#define MTK_WDMA_WRBK_TX_CFG 0x300
++#define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
++
++#define MTK_WDMA_WRBK_RX_CFG 0x344
++#define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
++
+ #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
+ #define MTK_PCIE_MIRROR_MAP_EN BIT(0)
+ #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
+@@ -401,6 +465,30 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
+ #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
+
++#define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
++#define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
++#define MTK_WED_RTQM_IGRS0_I2H_PKT_CNT(_n) (0xb2c + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS0_FDROP_CNT 0xb34
++
++#define MTK_WED_RTQM_IGRS1_I2HW_DMAD_CNT 0xb44
++#define MTK_WED_RTQM_IGRS1_I2H_DMAD_CNT(_n) (0xb48 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS1_I2HW_PKT_CNT 0xb50
++#define MTK_WED_RTQM_IGRS1_I2H_PKT_CNT(_n) (0xb54 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS1_FDROP_CNT 0xb5c
++
++#define MTK_WED_RTQM_IGRS2_I2HW_DMAD_CNT 0xb6c
++#define MTK_WED_RTQM_IGRS2_I2H_DMAD_CNT(_n) (0xb70 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS2_I2HW_PKT_CNT 0xb78
++#define MTK_WED_RTQM_IGRS2_I2H_PKT_CNT(_n) (0xb7c + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS2_FDROP_CNT 0xb84
++
++#define MTK_WED_RTQM_IGRS3_I2HW_DMAD_CNT 0xb94
++#define MTK_WED_RTQM_IGRS3_I2H_DMAD_CNT(_n) (0xb98 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS3_I2HW_PKT_CNT 0xba0
++#define MTK_WED_RTQM_IGRS3_I2H_PKT_CNT(_n) (0xba4 + (_n) * 0x4)
++#define MTK_WED_RTQM_IGRS3_FDROP_CNT 0xbac
++
+ #define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
+ #define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
+ #define MTK_WED_RTQM_Q2N_MIB 0xb80
+@@ -409,6 +497,24 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RTQM_Q2B_MIB 0xb8c
+ #define MTK_WED_RTQM_PFDBK_MIB 0xb90
+
++#define MTK_WED_RTQM_ENQ_CFG0 0xbb8
++#define MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT GENMASK(15, 12)
++
++#define MTK_WED_RTQM_FDROP_MIB 0xb84
++#define MTK_WED_RTQM_ENQ_I2Q_DMAD_CNT 0xbbc
++#define MTK_WED_RTQM_ENQ_I2N_DMAD_CNT 0xbc0
++#define MTK_WED_RTQM_ENQ_I2Q_PKT_CNT 0xbc4
++#define MTK_WED_RTQM_ENQ_I2N_PKT_CNT 0xbc8
++#define MTK_WED_RTQM_ENQ_USED_ENTRY_CNT 0xbcc
++#define MTK_WED_RTQM_ENQ_ERR_CNT 0xbd0
++
++#define MTK_WED_RTQM_DEQ_DMAD_CNT 0xbd8
++#define MTK_WED_RTQM_DEQ_Q2I_DMAD_CNT 0xbdc
++#define MTK_WED_RTQM_DEQ_PKT_CNT 0xbe0
++#define MTK_WED_RTQM_DEQ_Q2I_PKT_CNT 0xbe4
++#define MTK_WED_RTQM_DEQ_USED_PFDBK_CNT 0xbe8
++#define MTK_WED_RTQM_DEQ_ERR_CNT 0xbec
++
+ #define MTK_WED_RROQM_GLO_CFG 0xc04
+ #define MTK_WED_RROQM_RST_IDX 0xc08
+ #define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
+@@ -458,7 +564,116 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RX_BM_INTF 0xd9c
+ #define MTK_WED_RX_BM_ERR_STS 0xda8
+
++#define MTK_RRO_IND_CMD_SIGNATURE 0xe00
++#define MTK_RRO_IND_CMD_DMA_IDX GENMASK(11, 0)
++#define MTK_RRO_IND_CMD_MAGIC_CNT GENMASK(30, 28)
++
++#define MTK_WED_IND_CMD_RX_CTRL0 0xe04
++#define MTK_WED_IND_CMD_PROC_IDX GENMASK(11, 0)
++#define MTK_WED_IND_CMD_PREFETCH_FREE_CNT GENMASK(19, 16)
++#define MTK_WED_IND_CMD_MAGIC_CNT GENMASK(30, 28)
++
++#define MTK_WED_IND_CMD_RX_CTRL1 0xe08
++#define MTK_WED_IND_CMD_RX_CTRL2 0xe0c
++#define MTK_WED_IND_CMD_MAX_CNT GENMASK(11, 0)
++#define MTK_WED_IND_CMD_BASE_M GENMASK(19, 16)
++
++#define MTK_WED_RRO_CFG0 0xe10
++#define MTK_WED_RRO_CFG1 0xe14
++#define MTK_WED_RRO_CFG1_MAX_WIN_SZ GENMASK(31, 29)
++#define MTK_WED_RRO_CFG1_ACK_SN_BASE_M GENMASK(19, 16)
++#define MTK_WED_RRO_CFG1_PARTICL_SE_ID GENMASK(11, 0)
++
++#define MTK_WED_ADDR_ELEM_CFG0 0xe18
++#define MTK_WED_ADDR_ELEM_CFG1 0xe1c
++#define MTK_WED_ADDR_ELEM_PREFETCH_FREE_CNT GENMASK(19, 16)
++
++#define MTK_WED_ADDR_ELEM_TBL_CFG 0xe20
++#define MTK_WED_ADDR_ELEM_TBL_OFFSET GENMASK(6, 0)
++#define MTK_WED_ADDR_ELEM_TBL_RD_RDY BIT(28)
++#define MTK_WED_ADDR_ELEM_TBL_WR_RDY BIT(29)
++#define MTK_WED_ADDR_ELEM_TBL_RD BIT(30)
++#define MTK_WED_ADDR_ELEM_TBL_WR BIT(31)
++
++#define MTK_WED_RADDR_ELEM_TBL_WDATA 0xe24
++#define MTK_WED_RADDR_ELEM_TBL_RDATA 0xe28
++
++#define MTK_WED_PN_CHECK_CFG 0xe30
++#define MTK_WED_PN_CHECK_SE_ID GENMASK(11, 0)
++#define MTK_WED_PN_CHECK_RD_RDY BIT(28)
++#define MTK_WED_PN_CHECK_WR_RDY BIT(29)
++#define MTK_WED_PN_CHECK_RD BIT(30)
++#define MTK_WED_PN_CHECK_WR BIT(31)
++
++#define MTK_WED_PN_CHECK_WDATA_M 0xe38
++#define MTK_WED_PN_CHECK_IS_FIRST BIT(17)
++
++#define MTK_WED_RRO_MSDU_PG_RING_CFG(_n) (0xe44 + (_n) * 0x8)
++
++#define MTK_WED_RRO_MSDU_PG_RING2_CFG 0xe58
++#define MTK_WED_RRO_MSDU_PG_DRV_CLR BIT(26)
++#define MTK_WED_RRO_MSDU_PG_DRV_EN BIT(31)
++
++#define MTK_WED_RRO_MSDU_PG_CTRL0(_n) (0xe5c + (_n) * 0xc)
++#define MTK_WED_RRO_MSDU_PG_CTRL1(_n) (0xe60 + (_n) * 0xc)
++#define MTK_WED_RRO_MSDU_PG_CTRL2(_n) (0xe64 + (_n) * 0xc)
++
++#define MTK_WED_RRO_RX_D_RX(_n) (0xe80 + (_n) * 0x10)
++
++#define MTK_WED_RRO_RX_MAGIC_CNT BIT(13)
++
++#define MTK_WED_RRO_RX_D_CFG(_n) (0xea0 + (_n) * 0x4)
++#define MTK_WED_RRO_RX_D_DRV_CLR BIT(26)
++#define MTK_WED_RRO_RX_D_DRV_EN BIT(31)
++
++#define MTK_WED_RRO_PG_BM_RX_DMAM 0xeb0
++#define MTK_WED_RRO_PG_BM_RX_SDL0 GENMASK(13, 0)
++
++#define MTK_WED_RRO_PG_BM_BASE 0xeb4
++#define MTK_WED_RRO_PG_BM_INIT_PTR 0xeb8
++#define MTK_WED_RRO_PG_BM_SW_TAIL_IDX GENMASK(15, 0)
++#define MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX BIT(16)
++
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX 0xeec
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN BIT(0)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR BIT(1)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG GENMASK(6, 2)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN BIT(8)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR BIT(9)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG GENMASK(14, 10)
++
++#define MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG 0xef4
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN BIT(0)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR BIT(1)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG GENMASK(6, 2)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN BIT(8)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR BIT(9)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG GENMASK(14, 10)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN BIT(16)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
++#define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
++
++#define MTK_WED_RX_IND_CMD_CNT0 0xf20
++#define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)
++
++#define MTK_WED_RX_IND_CMD_CNT(_n) (0xf20 + (_n) * 0x4)
++#define MTK_WED_IND_CMD_MAGIC_CNT_FAIL_CNT GENMASK(15, 0)
++
++#define MTK_WED_RX_ADDR_ELEM_CNT(_n) (0xf48 + (_n) * 0x4)
++#define MTK_WED_ADDR_ELEM_SIG_FAIL_CNT GENMASK(15, 0)
++#define MTK_WED_ADDR_ELEM_FIRST_SIG_FAIL_CNT GENMASK(31, 16)
++#define MTK_WED_ADDR_ELEM_ACKSN_CNT GENMASK(27, 0)
++
++#define MTK_WED_RX_MSDU_PG_CNT(_n) (0xf5c + (_n) * 0x4)
++
++#define MTK_WED_RX_PN_CHK_CNT 0xf70
++#define MTK_WED_PN_CHK_FAIL_CNT GENMASK(15, 0)
++
+ #define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
+ #define MTK_WED_PCIE_INT_MASK 0x0
+
++#define MTK_WED_PCIE_BASE 0x11280000
++#define MTK_WED_PCIE_BASE0 0x11300000
++#define MTK_WED_PCIE_BASE1 0x11310000
++#define MTK_WED_PCIE_BASE2 0x11290000
+ #endif
+--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+@@ -91,6 +91,8 @@ enum mtk_wed_dummy_cr_idx {
+ #define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
+ #define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
+ #define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
++#define MT7988_FIRMWARE_WO0 "mediatek/mt7988_wo_0.bin"
++#define MT7988_FIRMWARE_WO1 "mediatek/mt7988_wo_1.bin"
+
+ #define MTK_WO_MCU_CFG_LS_BASE 0
+ #define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -139,6 +139,8 @@ struct mtk_wed_device {
+ u32 wpdma_rx;
+
+ bool wcid_512;
++ bool hw_rro;
++ bool msi;
+
+ u16 token_start;
+ unsigned int nbuf;
+@@ -212,10 +214,12 @@ mtk_wed_device_attach(struct mtk_wed_dev
+ return ret;
+ }
+
+-static inline bool
+-mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
++static inline bool mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
+ {
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
++ if (dev->version == 3)
++ return dev->wlan.hw_rro;
++
+ return dev->version != 1;
+ #else
+ return false;
diff --git a/target/linux/generic/backport-6.1/752-15-v6.7-net-ethernet-mtk_wed-refactor-mtk_wed_check_wfdma_rx.patch b/target/linux/generic/backport-6.1/752-15-v6.7-net-ethernet-mtk_wed-refactor-mtk_wed_check_wfdma_rx.patch
new file mode 100644
index 0000000000..e91ae69d08
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-15-v6.7-net-ethernet-mtk_wed-refactor-mtk_wed_check_wfdma_rx.patch
@@ -0,0 +1,95 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:14 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: refactor mtk_wed_check_wfdma_rx_fill
+ routine
+
+Refactor mtk_wed_check_wfdma_rx_fill() in order to be reused adding HW
+receive offload support for MT7988 SoC.
+
+Co-developed-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -585,22 +585,15 @@ mtk_wed_set_512_support(struct mtk_wed_d
+ }
+ }
+
+-#define MTK_WFMDA_RX_DMA_EN BIT(2)
+-static void
+-mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
++static int
++mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev,
++ struct mtk_wed_ring *ring)
+ {
+- u32 val;
+ int i;
+
+- if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
+- return; /* queue is not configured by mt76 */
+-
+ for (i = 0; i < 3; i++) {
+- u32 cur_idx;
++ u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX);
+
+- cur_idx = wed_r32(dev,
+- MTK_WED_WPDMA_RING_RX_DATA(idx) +
+- MTK_WED_RING_OFS_CPU_IDX);
+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
+ break;
+
+@@ -609,12 +602,10 @@ mtk_wed_check_wfdma_rx_fill(struct mtk_w
+
+ if (i == 3) {
+ dev_err(dev->hw->dev, "rx dma enable failed\n");
+- return;
++ return -ETIMEDOUT;
+ }
+
+- val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
+- MTK_WFMDA_RX_DMA_EN;
+- wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
++ return 0;
+ }
+
+ static void
+@@ -1545,6 +1536,7 @@ mtk_wed_configure_irq(struct mtk_wed_dev
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+ }
+
++#define MTK_WFMDA_RX_DMA_EN BIT(2)
+ static void
+ mtk_wed_dma_enable(struct mtk_wed_device *dev)
+ {
+@@ -1632,8 +1624,26 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
+ }
+
+- for (i = 0; i < MTK_WED_RX_QUEUES; i++)
+- mtk_wed_check_wfdma_rx_fill(dev, i);
++ for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
++ struct mtk_wed_ring *ring = &dev->rx_ring[i];
++ u32 val;
++
++ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
++ continue; /* queue is not configured by mt76 */
++
++ if (mtk_wed_check_wfdma_rx_fill(dev, ring)) {
++ dev_err(dev->hw->dev,
++ "rx_ring(%d) dma enable failed\n", i);
++ continue;
++ }
++
++ val = wifi_r32(dev,
++ dev->wlan.wpdma_rx_glo -
++ dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN;
++ wifi_w32(dev,
++ dev->wlan.wpdma_rx_glo - dev->wlan.phy_base,
++ val);
++ }
+ }
+
+ static void
diff --git a/target/linux/generic/backport-6.1/752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch b/target/linux/generic/backport-6.1/752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch
new file mode 100644
index 0000000000..6534d73d8e
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-16-v6.7-net-ethernet-mtk_wed-introduce-partial-AMSDU-offload.patch
@@ -0,0 +1,465 @@
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:15 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce partial AMSDU offload
+ support for MT7988
+
+Introduce partial AMSDU offload support for MT7988 SoC in order to merge
+in hw packets belonging to the same AMSDU before passing them to the
+WLAN nic.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
+@@ -438,7 +438,8 @@ int mtk_foe_entry_set_pppoe(struct mtk_e
+ }
+
+ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+- int wdma_idx, int txq, int bss, int wcid)
++ int wdma_idx, int txq, int bss, int wcid,
++ bool amsdu_en)
+ {
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+@@ -450,6 +451,7 @@ int mtk_foe_entry_set_wdma(struct mtk_et
+ MTK_FOE_IB2_WDMA_WINFO_V2;
+ l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
+ FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
++ l2->amsdu = FIELD_PREP(MTK_FOE_WINFO_AMSDU_EN, amsdu_en);
+ break;
+ case 2:
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
+--- a/drivers/net/ethernet/mediatek/mtk_ppe.h
++++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
+@@ -88,13 +88,13 @@ enum {
+ #define MTK_FOE_WINFO_BSS_V3 GENMASK(23, 16)
+ #define MTK_FOE_WINFO_WCID_V3 GENMASK(15, 0)
+
+-#define MTK_FOE_WINFO_PAO_USR_INFO GENMASK(15, 0)
+-#define MTK_FOE_WINFO_PAO_TID GENMASK(19, 16)
+-#define MTK_FOE_WINFO_PAO_IS_FIXEDRATE BIT(20)
+-#define MTK_FOE_WINFO_PAO_IS_PRIOR BIT(21)
+-#define MTK_FOE_WINFO_PAO_IS_SP BIT(22)
+-#define MTK_FOE_WINFO_PAO_HF BIT(23)
+-#define MTK_FOE_WINFO_PAO_AMSDU_EN BIT(24)
++#define MTK_FOE_WINFO_AMSDU_USR_INFO GENMASK(15, 0)
++#define MTK_FOE_WINFO_AMSDU_TID GENMASK(19, 16)
++#define MTK_FOE_WINFO_AMSDU_IS_FIXEDRATE BIT(20)
++#define MTK_FOE_WINFO_AMSDU_IS_PRIOR BIT(21)
++#define MTK_FOE_WINFO_AMSDU_IS_SP BIT(22)
++#define MTK_FOE_WINFO_AMSDU_HF BIT(23)
++#define MTK_FOE_WINFO_AMSDU_EN BIT(24)
+
+ enum {
+ MTK_FOE_STATE_INVALID,
+@@ -123,7 +123,7 @@ struct mtk_foe_mac_info {
+
+ /* netsys_v3 */
+ u32 w3info;
+- u32 wpao;
++ u32 amsdu;
+ };
+
+ /* software-only entry type */
+@@ -394,7 +394,8 @@ int mtk_foe_entry_set_vlan(struct mtk_et
+ int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid);
+ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+- int wdma_idx, int txq, int bss, int wcid);
++ int wdma_idx, int txq, int bss, int wcid,
++ bool amsdu_en);
+ int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ unsigned int queue);
+ int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
++++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+@@ -111,6 +111,7 @@ mtk_flow_get_wdma_info(struct net_device
+ info->queue = path->mtk_wdma.queue;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
++ info->amsdu = path->mtk_wdma.amsdu;
+
+ return 0;
+ }
+@@ -192,7 +193,7 @@ mtk_flow_set_output_device(struct mtk_et
+
+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
+ mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
+- info.bss, info.wcid);
++ info.bss, info.wcid, info.amsdu);
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ switch (info.wdma_idx) {
+ case 0:
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -29,6 +29,8 @@
+ #define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
+ #define MTK_WED_RX_RING_SIZE 1536
+ #define MTK_WED_RX_PG_BM_CNT 8192
++#define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
++#define MTK_WED_AMSDU_NPAGES 32
+
+ #define MTK_WED_TX_RING_SIZE 2048
+ #define MTK_WED_WDMA_RING_SIZE 1024
+@@ -172,6 +174,23 @@ mtk_wdma_rx_reset(struct mtk_wed_device
+ return ret;
+ }
+
++static u32
++mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
++{
++ return !!(wed_r32(dev, reg) & mask);
++}
++
++static int
++mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
++{
++ int sleep = 15000;
++ int timeout = 100 * sleep;
++ u32 val;
++
++ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
++ timeout, false, dev, reg, mask);
++}
++
+ static void
+ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
+ {
+@@ -335,6 +354,118 @@ out:
+ }
+
+ static int
++mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev)
++{
++ struct mtk_wed_hw *hw = dev->hw;
++ struct mtk_wed_amsdu *wed_amsdu;
++ int i;
++
++ if (!mtk_wed_is_v3_or_greater(hw))
++ return 0;
++
++ wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES,
++ sizeof(*wed_amsdu), GFP_KERNEL);
++ if (!wed_amsdu)
++ return -ENOMEM;
++
++ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
++ void *ptr;
++
++ /* each segment is 64K */
++ ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN |
++ __GFP_ZERO | __GFP_COMP |
++ GFP_DMA32,
++ get_order(MTK_WED_AMSDU_BUF_SIZE));
++ if (!ptr)
++ goto error;
++
++ wed_amsdu[i].txd = ptr;
++ wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr,
++ MTK_WED_AMSDU_BUF_SIZE,
++ DMA_TO_DEVICE);
++ if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy))
++ goto error;
++ }
++ dev->hw->wed_amsdu = wed_amsdu;
++
++ return 0;
++
++error:
++ for (i--; i >= 0; i--)
++ dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy,
++ MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
++ return -ENOMEM;
++}
++
++static void
++mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev)
++{
++ struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
++ int i;
++
++ if (!wed_amsdu)
++ return;
++
++ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) {
++ dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy,
++ MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE);
++ free_pages((unsigned long)wed_amsdu[i].txd,
++ get_order(MTK_WED_AMSDU_BUF_SIZE));
++ }
++}
++
++static int
++mtk_wed_amsdu_init(struct mtk_wed_device *dev)
++{
++ struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu;
++ int i, ret;
++
++ if (!wed_amsdu)
++ return 0;
++
++ for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++)
++ wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i),
++ wed_amsdu[i].txd_phy);
++
++ /* init all sta parameter */
++ wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL |
++ MTK_WED_AMSDU_STA_WTBL_HDRT_MODE |
++ FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN,
++ dev->wlan.amsdu_max_len >> 8) |
++ FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM,
++ dev->wlan.amsdu_max_subframes));
++
++ wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT);
++
++ ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO,
++ MTK_WED_AMSDU_STA_INFO_DO_INIT);
++ if (ret) {
++ dev_err(dev->hw->dev, "amsdu initialization failed\n");
++ return ret;
++ }
++
++ /* init partial amsdu offload txd src */
++ wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG,
++ FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index));
++
++ /* init qmem */
++ wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET);
++ ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29));
++ if (ret) {
++ pr_info("%s: amsdu qmem initialization failed\n", __func__);
++ return ret;
++ }
++
++ /* eagle E1 PCIE1 tx ring 22 flow control issue */
++ if (dev->wlan.id == 0x7991)
++ wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING);
++
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
++
++ return 0;
++}
++
++static int
+ mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
+ {
+ u32 desc_size = dev->hw->soc->tx_ring_desc_size;
+@@ -708,6 +839,7 @@ __mtk_wed_detach(struct mtk_wed_device *
+
+ mtk_wdma_rx_reset(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
++ mtk_wed_amsdu_free_buffer(dev);
+ mtk_wed_free_tx_buffer(dev);
+ mtk_wed_free_tx_rings(dev);
+
+@@ -1128,23 +1260,6 @@ mtk_wed_ring_reset(struct mtk_wed_ring *
+ }
+ }
+
+-static u32
+-mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+-{
+- return !!(wed_r32(dev, reg) & mask);
+-}
+-
+-static int
+-mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+-{
+- int sleep = 15000;
+- int timeout = 100 * sleep;
+- u32 val;
+-
+- return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+- timeout, false, dev, reg, mask);
+-}
+-
+ static int
+ mtk_wed_rx_reset(struct mtk_wed_device *dev)
+ {
+@@ -1691,6 +1806,7 @@ mtk_wed_start(struct mtk_wed_device *dev
+ }
+
+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
++ mtk_wed_amsdu_init(dev);
+
+ mtk_wed_dma_enable(dev);
+ dev->running = true;
+@@ -1747,6 +1863,10 @@ mtk_wed_attach(struct mtk_wed_device *de
+ if (ret)
+ goto out;
+
++ ret = mtk_wed_amsdu_buffer_alloc(dev);
++ if (ret)
++ goto out;
++
+ if (mtk_wed_get_rx_capa(dev)) {
+ ret = mtk_wed_rro_alloc(dev);
+ if (ret)
+--- a/drivers/net/ethernet/mediatek/mtk_wed.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed.h
+@@ -25,6 +25,11 @@ struct mtk_wed_soc_data {
+ u32 wdma_desc_size;
+ };
+
++struct mtk_wed_amsdu {
++ void *txd;
++ dma_addr_t txd_phy;
++};
++
+ struct mtk_wed_hw {
+ const struct mtk_wed_soc_data *soc;
+ struct device_node *node;
+@@ -38,6 +43,7 @@ struct mtk_wed_hw {
+ struct dentry *debugfs_dir;
+ struct mtk_wed_device *wed_dev;
+ struct mtk_wed_wo *wed_wo;
++ struct mtk_wed_amsdu *wed_amsdu;
+ u32 pcie_base;
+ u32 debugfs_reg;
+ u32 num_flows;
+@@ -52,6 +58,7 @@ struct mtk_wdma_info {
+ u8 queue;
+ u16 wcid;
+ u8 bss;
++ u8 amsdu;
+ };
+
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -672,6 +672,82 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
+ #define MTK_WED_PCIE_INT_MASK 0x0
+
++#define MTK_WED_AMSDU_FIFO 0x1800
++#define MTK_WED_AMSDU_IS_PRIOR0_RING BIT(10)
++
++#define MTK_WED_AMSDU_STA_INFO 0x01810
++#define MTK_WED_AMSDU_STA_INFO_DO_INIT BIT(0)
++#define MTK_WED_AMSDU_STA_INFO_SET_INIT BIT(1)
++
++#define MTK_WED_AMSDU_STA_INFO_INIT 0x01814
++#define MTK_WED_AMSDU_STA_WTBL_HDRT_MODE BIT(0)
++#define MTK_WED_AMSDU_STA_RMVL BIT(1)
++#define MTK_WED_AMSDU_STA_MAX_AMSDU_LEN GENMASK(7, 2)
++#define MTK_WED_AMSDU_STA_MAX_AMSDU_NUM GENMASK(11, 8)
++
++#define MTK_WED_AMSDU_HIFTXD_BASE_L(_n) (0x1980 + (_n) * 0x4)
++
++#define MTK_WED_AMSDU_PSE 0x1910
++#define MTK_WED_AMSDU_PSE_RESET BIT(16)
++
++#define MTK_WED_AMSDU_HIFTXD_CFG 0x1968
++#define MTK_WED_AMSDU_HIFTXD_SRC GENMASK(16, 15)
++
++#define MTK_WED_MON_AMSDU_FIFO_DMAD 0x1a34
++
++#define MTK_WED_MON_AMSDU_ENG_DMAD(_n) (0x1a80 + (_n) * 0x50)
++#define MTK_WED_MON_AMSDU_ENG_QFPL(_n) (0x1a84 + (_n) * 0x50)
++#define MTK_WED_MON_AMSDU_ENG_QENI(_n) (0x1a88 + (_n) * 0x50)
++#define MTK_WED_MON_AMSDU_ENG_QENO(_n) (0x1a8c + (_n) * 0x50)
++#define MTK_WED_MON_AMSDU_ENG_MERG(_n) (0x1a90 + (_n) * 0x50)
++
++#define MTK_WED_MON_AMSDU_ENG_CNT8(_n) (0x1a94 + (_n) * 0x50)
++#define MTK_WED_AMSDU_ENG_MAX_QGPP_CNT GENMASK(10, 0)
++#define MTK_WED_AMSDU_ENG_MAX_PL_CNT GENMASK(27, 16)
++
++#define MTK_WED_MON_AMSDU_ENG_CNT9(_n) (0x1a98 + (_n) * 0x50)
++#define MTK_WED_AMSDU_ENG_CUR_ENTRY GENMASK(10, 0)
++#define MTK_WED_AMSDU_ENG_MAX_BUF_MERGED GENMASK(20, 16)
++#define MTK_WED_AMSDU_ENG_MAX_MSDU_MERGED GENMASK(28, 24)
++
++#define MTK_WED_MON_AMSDU_QMEM_STS1 0x1e04
++
++#define MTK_WED_MON_AMSDU_QMEM_CNT(_n) (0x1e0c + (_n) * 0x4)
++#define MTK_WED_AMSDU_QMEM_FQ_CNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_SP_QCNT GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID0_QCNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID1_QCNT GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID2_QCNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID3_QCNT GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID4_QCNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID5_QCNT GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID6_QCNT GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID7_QCNT GENMASK(11, 0)
++
++#define MTK_WED_MON_AMSDU_QMEM_PTR(_n) (0x1e20 + (_n) * 0x4)
++#define MTK_WED_AMSDU_QMEM_FQ_HEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_SP_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID0_QHEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID1_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID2_QHEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID3_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID4_QHEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID5_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID6_QHEAD GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID7_QHEAD GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_FQ_TAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_SP_QTAIL GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID0_QTAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID1_QTAIL GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID2_QTAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID3_QTAIL GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID4_QTAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID5_QTAIL GENMASK(11, 0)
++#define MTK_WED_AMSDU_QMEM_TID6_QTAIL GENMASK(27, 16)
++#define MTK_WED_AMSDU_QMEM_TID7_QTAIL GENMASK(11, 0)
++
++#define MTK_WED_MON_AMSDU_HIFTXD_FETCH_MSDU(_n) (0x1ec4 + (_n) * 0x4)
++
+ #define MTK_WED_PCIE_BASE 0x11280000
+ #define MTK_WED_PCIE_BASE0 0x11300000
+ #define MTK_WED_PCIE_BASE1 0x11310000
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -928,6 +928,7 @@ struct net_device_path {
+ u8 queue;
+ u16 wcid;
+ u8 bss;
++ u8 amsdu;
+ } mtk_wdma;
+ };
+ };
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -129,6 +129,7 @@ struct mtk_wed_device {
+ enum mtk_wed_bus_tye bus_type;
+ void __iomem *base;
+ u32 phy_base;
++ u32 id;
+
+ u32 wpdma_phys;
+ u32 wpdma_int;
+@@ -147,10 +148,12 @@ struct mtk_wed_device {
+ unsigned int rx_nbuf;
+ unsigned int rx_npkt;
+ unsigned int rx_size;
++ unsigned int amsdu_max_len;
+
+ u8 tx_tbit[MTK_WED_TX_QUEUES];
+ u8 rx_tbit[MTK_WED_RX_QUEUES];
+ u8 txfree_tbit;
++ u8 amsdu_max_subframes;
+
+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+ int (*offload_enable)(struct mtk_wed_device *wed);
+@@ -224,6 +227,15 @@ static inline bool mtk_wed_get_rx_capa(s
+ #else
+ return false;
+ #endif
++}
++
++static inline bool mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev)
++{
++#ifdef CONFIG_NET_MEDIATEK_SOC_WED
++ return dev->version == 3;
++#else
++ return false;
++#endif
+ }
+
+ #ifdef CONFIG_NET_MEDIATEK_SOC_WED
diff --git a/target/linux/generic/backport-6.1/752-17-v6.7-net-ethernet-mtk_wed-introduce-hw_rro-support-for-MT.patch b/target/linux/generic/backport-6.1/752-17-v6.7-net-ethernet-mtk_wed-introduce-hw_rro-support-for-MT.patch
new file mode 100644
index 0000000000..0cf4c18875
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-17-v6.7-net-ethernet-mtk_wed-introduce-hw_rro-support-for-MT.patch
@@ -0,0 +1,483 @@
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:16 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: introduce hw_rro support for MT7988
+
+MT7988 SoC support 802.11 receive reordering offload in hw while
+MT7986 SoC implements it through the firmware running on the mcu.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -26,7 +26,7 @@
+ #define MTK_WED_BUF_SIZE 2048
+ #define MTK_WED_PAGE_BUF_SIZE 128
+ #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
+-#define MTK_WED_RX_PAGE_BUF_PER_PAGE (PAGE_SIZE / 128)
++#define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE)
+ #define MTK_WED_RX_RING_SIZE 1536
+ #define MTK_WED_RX_PG_BM_CNT 8192
+ #define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4)
+@@ -596,6 +596,68 @@ free_pagelist:
+ }
+
+ static int
++mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev)
++{
++ int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE;
++ struct mtk_wed_buf *page_list;
++ struct mtk_wed_bm_desc *desc;
++ dma_addr_t desc_phys;
++ int i, page_idx = 0;
++
++ if (!dev->wlan.hw_rro)
++ return 0;
++
++ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
++ if (!page_list)
++ return -ENOMEM;
++
++ dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
++ dev->hw_rro.pages = page_list;
++ desc = dma_alloc_coherent(dev->hw->dev,
++ dev->wlan.rx_nbuf * sizeof(*desc),
++ &desc_phys, GFP_KERNEL);
++ if (!desc)
++ return -ENOMEM;
++
++ dev->hw_rro.desc = desc;
++ dev->hw_rro.desc_phys = desc_phys;
++
++ for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
++ dma_addr_t page_phys, buf_phys;
++ struct page *page;
++ int s;
++
++ page = __dev_alloc_page(GFP_KERNEL);
++ if (!page)
++ return -ENOMEM;
++
++ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
++ DMA_BIDIRECTIONAL);
++ if (dma_mapping_error(dev->hw->dev, page_phys)) {
++ __free_page(page);
++ return -ENOMEM;
++ }
++
++ page_list[page_idx].p = page;
++ page_list[page_idx++].phy_addr = page_phys;
++ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
++ DMA_BIDIRECTIONAL);
++
++ buf_phys = page_phys;
++ for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) {
++ desc->buf0 = cpu_to_le32(buf_phys);
++ buf_phys += MTK_WED_PAGE_BUF_SIZE;
++ desc++;
++ }
++
++ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
++ DMA_BIDIRECTIONAL);
++ }
++
++ return 0;
++}
++
++static int
+ mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
+ {
+ struct mtk_wed_bm_desc *desc;
+@@ -612,7 +674,42 @@ mtk_wed_rx_buffer_alloc(struct mtk_wed_d
+ dev->rx_buf_ring.desc_phys = desc_phys;
+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
+
+- return 0;
++ return mtk_wed_hwrro_buffer_alloc(dev);
++}
++
++static void
++mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev)
++{
++ struct mtk_wed_buf *page_list = dev->hw_rro.pages;
++ struct mtk_wed_bm_desc *desc = dev->hw_rro.desc;
++ int i, page_idx = 0;
++
++ if (!dev->wlan.hw_rro)
++ return;
++
++ if (!page_list)
++ return;
++
++ if (!desc)
++ goto free_pagelist;
++
++ for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) {
++ dma_addr_t buf_addr = page_list[page_idx].phy_addr;
++ void *page = page_list[page_idx++].p;
++
++ if (!page)
++ break;
++
++ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
++ DMA_BIDIRECTIONAL);
++ __free_page(page);
++ }
++
++ dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc),
++ desc, dev->hw_rro.desc_phys);
++
++free_pagelist:
++ kfree(page_list);
+ }
+
+ static void
+@@ -626,6 +723,28 @@ mtk_wed_free_rx_buffer(struct mtk_wed_de
+ dev->wlan.release_rx_buf(dev);
+ dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
+ desc, dev->rx_buf_ring.desc_phys);
++
++ mtk_wed_hwrro_free_buffer(dev);
++}
++
++static void
++mtk_wed_hwrro_init(struct mtk_wed_device *dev)
++{
++ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
++ return;
++
++ wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM,
++ FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128));
++
++ wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys);
++
++ wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR,
++ MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX |
++ FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX,
++ MTK_WED_RX_PG_BM_CNT));
++
++ /* enable rx_page_bm to fetch dmad */
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
+ }
+
+ static void
+@@ -639,6 +758,8 @@ mtk_wed_rx_buffer_hw_init(struct mtk_wed
+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
++
++ mtk_wed_hwrro_init(dev);
+ }
+
+ static void
+@@ -934,6 +1055,8 @@ mtk_wed_bus_init(struct mtk_wed_device *
+ static void
+ mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+ {
++ int i;
++
+ if (mtk_wed_is_v1(dev->hw)) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ return;
+@@ -951,6 +1074,15 @@ mtk_wed_set_wpdma(struct mtk_wed_device
+
+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+ wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx);
++
++ if (!dev->wlan.hw_rro)
++ return;
++
++ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]);
++ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]);
++ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++)
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i),
++ dev->wlan.wpdma_rx_pg + i * 0x10);
+ }
+
+ static void
+@@ -1762,6 +1894,165 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ }
+
+ static void
++mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset)
++{
++ int i;
++
++ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
++ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
++
++ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
++ return;
++
++ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_CLR);
++
++ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX,
++ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR |
++ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG,
++ dev->wlan.rro_rx_tbit[0]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG,
++ dev->wlan.rro_rx_tbit[1]));
++
++ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG,
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN |
++ MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG,
++ dev->wlan.rx_pg_tbit[0]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG,
++ dev->wlan.rx_pg_tbit[1]) |
++ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG,
++ dev->wlan.rx_pg_tbit[2]));
++
++ /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after
++ * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken
++ */
++ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_EN);
++
++ for (i = 0; i < MTK_WED_RX_QUEUES; i++) {
++ struct mtk_wed_ring *ring = &dev->rx_rro_ring[i];
++
++ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
++ continue;
++
++ if (mtk_wed_check_wfdma_rx_fill(dev, ring))
++ dev_err(dev->hw->dev,
++ "rx_rro_ring(%d) initialization failed\n", i);
++ }
++
++ for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) {
++ struct mtk_wed_ring *ring = &dev->rx_page_ring[i];
++
++ if (!(ring->flags & MTK_WED_RING_CONFIGURED))
++ continue;
++
++ if (mtk_wed_check_wfdma_rx_fill(dev, ring))
++ dev_err(dev->hw->dev,
++ "rx_page_ring(%d) initialization failed\n", i);
++ }
++}
++
++static void
++mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx,
++ void __iomem *regs)
++{
++ struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx];
++
++ ring->wpdma = regs;
++ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE,
++ readl(regs));
++ wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT,
++ readl(regs + MTK_WED_RING_OFS_COUNT));
++ ring->flags |= MTK_WED_RING_CONFIGURED;
++}
++
++static void
++mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs)
++{
++ struct mtk_wed_ring *ring = &dev->rx_page_ring[idx];
++
++ ring->wpdma = regs;
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE,
++ readl(regs));
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT,
++ readl(regs + MTK_WED_RING_OFS_COUNT));
++ ring->flags |= MTK_WED_RING_CONFIGURED;
++}
++
++static int
++mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
++{
++ struct mtk_wed_ring *ring = &dev->ind_cmd_ring;
++ u32 val = readl(regs + MTK_WED_RING_OFS_COUNT);
++ int i, count = 0;
++
++ ring->wpdma = regs;
++ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE,
++ readl(regs) & 0xfffffff0);
++
++ wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT,
++ readl(regs + MTK_WED_RING_OFS_COUNT));
++
++ /* ack sn cr */
++ wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base +
++ dev->wlan.ind_cmd.ack_sn_addr);
++ wed_w32(dev, MTK_WED_RRO_CFG1,
++ FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ,
++ dev->wlan.ind_cmd.win_size) |
++ FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID,
++ dev->wlan.ind_cmd.particular_sid));
++
++ /* particular session addr element */
++ wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0,
++ dev->wlan.ind_cmd.particular_se_phys);
++
++ for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) {
++ wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA,
++ dev->wlan.ind_cmd.addr_elem_phys[i] >> 4);
++ wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG,
++ MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f));
++
++ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
++ while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100)
++ val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG);
++ if (count >= 100)
++ dev_err(dev->hw->dev,
++ "write ba session base failed\n");
++ }
++
++ /* pn check init */
++ for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) {
++ wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M,
++ MTK_WED_PN_CHECK_IS_FIRST);
++
++ wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR |
++ FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i));
++
++ count = 0;
++ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
++ while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100)
++ val = wed_r32(dev, MTK_WED_PN_CHECK_CFG);
++ if (count >= 100)
++ dev_err(dev->hw->dev,
++ "session(%d) initialization failed\n", i);
++ }
++
++ wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN);
++ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
++
++ return 0;
++}
++
++static void
+ mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+ {
+ int i;
+@@ -2215,6 +2506,10 @@ void mtk_wed_add_hw(struct device_node *
+ .detach = mtk_wed_detach,
+ .ppe_check = mtk_wed_ppe_check,
+ .setup_tc = mtk_wed_setup_tc,
++ .start_hw_rro = mtk_wed_start_hw_rro,
++ .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup,
++ .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup,
++ .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup,
+ };
+ struct device_node *eth_np = eth->dev->of_node;
+ struct platform_device *pdev;
+--- a/include/linux/soc/mediatek/mtk_wed.h
++++ b/include/linux/soc/mediatek/mtk_wed.h
+@@ -10,6 +10,7 @@
+
+ #define MTK_WED_TX_QUEUES 2
+ #define MTK_WED_RX_QUEUES 2
++#define MTK_WED_RX_PAGE_QUEUES 3
+
+ #define WED_WO_STA_REC 0x6
+
+@@ -99,6 +100,9 @@ struct mtk_wed_device {
+ struct mtk_wed_ring txfree_ring;
+ struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
+ struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
++ struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
++ struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
++ struct mtk_wed_ring ind_cmd_ring;
+
+ struct {
+ int size;
+@@ -120,6 +124,13 @@ struct mtk_wed_device {
+ dma_addr_t fdbk_phys;
+ } rro;
+
++ struct {
++ int size;
++ struct mtk_wed_buf *pages;
++ struct mtk_wed_bm_desc *desc;
++ dma_addr_t desc_phys;
++ } hw_rro;
++
+ /* filled by driver: */
+ struct {
+ union {
+@@ -138,6 +149,8 @@ struct mtk_wed_device {
+ u32 wpdma_txfree;
+ u32 wpdma_rx_glo;
+ u32 wpdma_rx;
++ u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
++ u32 wpdma_rx_pg;
+
+ bool wcid_512;
+ bool hw_rro;
+@@ -152,9 +165,20 @@ struct mtk_wed_device {
+
+ u8 tx_tbit[MTK_WED_TX_QUEUES];
+ u8 rx_tbit[MTK_WED_RX_QUEUES];
++ u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
++ u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
+ u8 txfree_tbit;
+ u8 amsdu_max_subframes;
+
++ struct {
++ u8 se_group_nums;
++ u16 win_size;
++ u16 particular_sid;
++ u32 ack_sn_addr;
++ dma_addr_t particular_se_phys;
++ dma_addr_t addr_elem_phys[1024];
++ } ind_cmd;
++
+ u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
+ int (*offload_enable)(struct mtk_wed_device *wed);
+ void (*offload_disable)(struct mtk_wed_device *wed);
+@@ -193,6 +217,14 @@ struct mtk_wed_ops {
+ void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
+ int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
+ enum tc_setup_type type, void *type_data);
++ void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask,
++ bool reset);
++ void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
++ void __iomem *regs);
++ void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
++ void __iomem *regs);
++ int (*ind_rx_ring_setup)(struct mtk_wed_device *dev,
++ void __iomem *regs);
+ };
+
+ extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+@@ -264,6 +296,15 @@ static inline bool mtk_wed_is_amsdu_supp
+ #define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
+ #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
+ (_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
++#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \
++ (_dev)->ops->start_hw_rro(_dev, _mask, _reset)
++#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
++ (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
++#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
++ (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
++#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
++ (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
++
+ #else
+ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
+ {
+@@ -283,6 +324,10 @@ static inline bool mtk_wed_device_active
+ #define mtk_wed_device_stop(_dev) do {} while (0)
+ #define mtk_wed_device_dma_reset(_dev) do {} while (0)
+ #define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
++#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0)
++#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
++#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
++#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
+ #endif
+
+ #endif
diff --git a/target/linux/generic/backport-6.1/752-18-v6.7-net-ethernet-mtk_wed-debugfs-move-wed_v2-specific-re.patch b/target/linux/generic/backport-6.1/752-18-v6.7-net-ethernet-mtk_wed-debugfs-move-wed_v2-specific-re.patch
new file mode 100644
index 0000000000..5ea43a4445
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-18-v6.7-net-ethernet-mtk_wed-debugfs-move-wed_v2-specific-re.patch
@@ -0,0 +1,78 @@
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+Date: Mon, 18 Sep 2023 12:29:17 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: debugfs: move wed_v2 specific regs
+ out of regs array
+
+Move specific WED2.0 debugfs entries out of regs array. This is a
+preliminary patch to introduce WED 3.0 debugfs info.
+
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -151,7 +151,7 @@ DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
+ static int
+ wed_rxinfo_show(struct seq_file *s, void *data)
+ {
+- static const struct reg_dump regs[] = {
++ static const struct reg_dump regs_common[] = {
+ DUMP_STR("WPDMA RX"),
+ DUMP_WPDMA_RX_RING(0),
+ DUMP_WPDMA_RX_RING(1),
+@@ -169,7 +169,7 @@ wed_rxinfo_show(struct seq_file *s, void
+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
+
+- DUMP_STR("WED RRO"),
++ DUMP_STR("WED WO RRO"),
+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
+ DUMP_WED(WED_RROQM_MID_MIB),
+ DUMP_WED(WED_RROQM_MOD_MIB),
+@@ -180,17 +180,6 @@ wed_rxinfo_show(struct seq_file *s, void
+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
+
+- DUMP_STR("WED Route QM"),
+- DUMP_WED(WED_RTQM_R2H_MIB(0)),
+- DUMP_WED(WED_RTQM_R2Q_MIB(0)),
+- DUMP_WED(WED_RTQM_Q2H_MIB(0)),
+- DUMP_WED(WED_RTQM_R2H_MIB(1)),
+- DUMP_WED(WED_RTQM_R2Q_MIB(1)),
+- DUMP_WED(WED_RTQM_Q2H_MIB(1)),
+- DUMP_WED(WED_RTQM_Q2N_MIB),
+- DUMP_WED(WED_RTQM_Q2B_MIB),
+- DUMP_WED(WED_RTQM_PFDBK_MIB),
+-
+ DUMP_STR("WED WDMA TX"),
+ DUMP_WED(WED_WDMA_TX_MIB),
+ DUMP_WED_RING(WED_WDMA_RING_TX),
+@@ -211,11 +200,25 @@ wed_rxinfo_show(struct seq_file *s, void
+ DUMP_WED(WED_RX_BM_INTF),
+ DUMP_WED(WED_RX_BM_ERR_STS),
+ };
++ static const struct reg_dump regs_wed_v2[] = {
++ DUMP_STR("WED Route QM"),
++ DUMP_WED(WED_RTQM_R2H_MIB(0)),
++ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
++ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
++ DUMP_WED(WED_RTQM_R2H_MIB(1)),
++ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
++ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
++ DUMP_WED(WED_RTQM_Q2N_MIB),
++ DUMP_WED(WED_RTQM_Q2B_MIB),
++ DUMP_WED(WED_RTQM_PFDBK_MIB),
++ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+- if (dev)
+- dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++ if (dev) {
++ dump_wed_regs(s, dev, regs_common, ARRAY_SIZE(regs_common));
++ dump_wed_regs(s, dev, regs_wed_v2, ARRAY_SIZE(regs_wed_v2));
++ }
+
+ return 0;
+ }
diff --git a/target/linux/generic/backport-6.1/752-19-v6.7-net-ethernet-mtk_wed-debugfs-add-WED-3.0-debugfs-ent.patch b/target/linux/generic/backport-6.1/752-19-v6.7-net-ethernet-mtk_wed-debugfs-add-WED-3.0-debugfs-ent.patch
new file mode 100644
index 0000000000..f491d2fd80
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-19-v6.7-net-ethernet-mtk_wed-debugfs-add-WED-3.0-debugfs-ent.patch
@@ -0,0 +1,432 @@
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:18 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: debugfs: add WED 3.0 debugfs entries
+
+Introduce WED3.0 debugfs entries useful for debugging.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
+@@ -11,6 +11,7 @@ struct reg_dump {
+ u16 offset;
+ u8 type;
+ u8 base;
++ u32 mask;
+ };
+
+ enum {
+@@ -25,6 +26,8 @@ enum {
+
+ #define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
+ #define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
++#define DUMP_REG_MASK(_reg, _mask) \
++ { #_mask, MTK_##_reg, DUMP_TYPE_WED, 0, MTK_##_mask }
+ #define DUMP_RING(_prefix, _base, ...) \
+ { _prefix " BASE", _base, __VA_ARGS__ }, \
+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
+@@ -32,6 +35,7 @@ enum {
+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
+
+ #define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
++#define DUMP_WED_MASK(_reg, _mask) DUMP_REG_MASK(_reg, _mask)
+ #define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
+
+ #define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
+@@ -212,12 +216,58 @@ wed_rxinfo_show(struct seq_file *s, void
+ DUMP_WED(WED_RTQM_Q2B_MIB),
+ DUMP_WED(WED_RTQM_PFDBK_MIB),
+ };
++ static const struct reg_dump regs_wed_v3[] = {
++ DUMP_STR("WED RX RRO DATA"),
++ DUMP_WED_RING(WED_RRO_RX_D_RX(0)),
++ DUMP_WED_RING(WED_RRO_RX_D_RX(1)),
++
++ DUMP_STR("WED RX MSDU PAGE"),
++ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(0)),
++ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(1)),
++ DUMP_WED_RING(WED_RRO_MSDU_PG_CTRL0(2)),
++
++ DUMP_STR("WED RX IND CMD"),
++ DUMP_WED(WED_IND_CMD_RX_CTRL1),
++ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL2, WED_IND_CMD_MAX_CNT),
++ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_PROC_IDX),
++ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_DMA_IDX),
++ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0, WED_IND_CMD_MAGIC_CNT),
++ DUMP_WED_MASK(RRO_IND_CMD_SIGNATURE, RRO_IND_CMD_MAGIC_CNT),
++ DUMP_WED_MASK(WED_IND_CMD_RX_CTRL0,
++ WED_IND_CMD_PREFETCH_FREE_CNT),
++ DUMP_WED_MASK(WED_RRO_CFG1, WED_RRO_CFG1_PARTICL_SE_ID),
++
++ DUMP_STR("WED ADDR ELEM"),
++ DUMP_WED(WED_ADDR_ELEM_CFG0),
++ DUMP_WED_MASK(WED_ADDR_ELEM_CFG1,
++ WED_ADDR_ELEM_PREFETCH_FREE_CNT),
++
++ DUMP_STR("WED Route QM"),
++ DUMP_WED(WED_RTQM_ENQ_I2Q_DMAD_CNT),
++ DUMP_WED(WED_RTQM_ENQ_I2N_DMAD_CNT),
++ DUMP_WED(WED_RTQM_ENQ_I2Q_PKT_CNT),
++ DUMP_WED(WED_RTQM_ENQ_I2N_PKT_CNT),
++ DUMP_WED(WED_RTQM_ENQ_USED_ENTRY_CNT),
++ DUMP_WED(WED_RTQM_ENQ_ERR_CNT),
++
++ DUMP_WED(WED_RTQM_DEQ_DMAD_CNT),
++ DUMP_WED(WED_RTQM_DEQ_Q2I_DMAD_CNT),
++ DUMP_WED(WED_RTQM_DEQ_PKT_CNT),
++ DUMP_WED(WED_RTQM_DEQ_Q2I_PKT_CNT),
++ DUMP_WED(WED_RTQM_DEQ_USED_PFDBK_CNT),
++ DUMP_WED(WED_RTQM_DEQ_ERR_CNT),
++ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (dev) {
+ dump_wed_regs(s, dev, regs_common, ARRAY_SIZE(regs_common));
+- dump_wed_regs(s, dev, regs_wed_v2, ARRAY_SIZE(regs_wed_v2));
++ if (mtk_wed_is_v2(hw))
++ dump_wed_regs(s, dev,
++ regs_wed_v2, ARRAY_SIZE(regs_wed_v2));
++ else
++ dump_wed_regs(s, dev,
++ regs_wed_v3, ARRAY_SIZE(regs_wed_v3));
+ }
+
+ return 0;
+@@ -225,6 +275,314 @@ wed_rxinfo_show(struct seq_file *s, void
+ DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
+
+ static int
++wed_amsdu_show(struct seq_file *s, void *data)
++{
++ static const struct reg_dump regs[] = {
++ DUMP_STR("WED AMDSU INFO"),
++ DUMP_WED(WED_MON_AMSDU_FIFO_DMAD),
++
++ DUMP_STR("WED AMDSU ENG0 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(0)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(0)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(0)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(0)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(0)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(0),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(0),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG1 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(1)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(1)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(1)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(1)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(1)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(1),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(1),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG2 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(2)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(2)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(2)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(2)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(2)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(2),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(2),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG3 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(3)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(3)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(3)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(3)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(3)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(3),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(3),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG4 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(4)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(4)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(4)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(4)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(4)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(4),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG5 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(5)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(5)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(5)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(5)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(5)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(5),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(5),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG6 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(6)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(6)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(6)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(6)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(6)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(6),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(6),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG7 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(7)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(7)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(7)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(7)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(7)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(7),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(7),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(4),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED AMDSU ENG8 INFO"),
++ DUMP_WED(WED_MON_AMSDU_ENG_DMAD(8)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QFPL(8)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENI(8)),
++ DUMP_WED(WED_MON_AMSDU_ENG_QENO(8)),
++ DUMP_WED(WED_MON_AMSDU_ENG_MERG(8)),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8),
++ WED_AMSDU_ENG_MAX_PL_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT8(8),
++ WED_AMSDU_ENG_MAX_QGPP_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
++ WED_AMSDU_ENG_CUR_ENTRY),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
++ WED_AMSDU_ENG_MAX_BUF_MERGED),
++ DUMP_WED_MASK(WED_MON_AMSDU_ENG_CNT9(8),
++ WED_AMSDU_ENG_MAX_MSDU_MERGED),
++
++ DUMP_STR("WED QMEM INFO"),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_FQ_CNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(0), WED_AMSDU_QMEM_SP_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID0_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(1), WED_AMSDU_QMEM_TID1_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID2_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(2), WED_AMSDU_QMEM_TID3_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID4_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(3), WED_AMSDU_QMEM_TID5_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID6_QCNT),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_CNT(4), WED_AMSDU_QMEM_TID7_QCNT),
++
++ DUMP_STR("WED QMEM HEAD INFO"),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_FQ_HEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(0), WED_AMSDU_QMEM_SP_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID0_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(1), WED_AMSDU_QMEM_TID1_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID2_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(2), WED_AMSDU_QMEM_TID3_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID4_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(3), WED_AMSDU_QMEM_TID5_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID6_QHEAD),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(4), WED_AMSDU_QMEM_TID7_QHEAD),
++
++ DUMP_STR("WED QMEM TAIL INFO"),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_FQ_TAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(5), WED_AMSDU_QMEM_SP_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID0_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(6), WED_AMSDU_QMEM_TID1_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID2_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(7), WED_AMSDU_QMEM_TID3_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID4_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(8), WED_AMSDU_QMEM_TID5_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID6_QTAIL),
++ DUMP_WED_MASK(WED_MON_AMSDU_QMEM_PTR(9), WED_AMSDU_QMEM_TID7_QTAIL),
++
++ DUMP_STR("WED HIFTXD MSDU INFO"),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(1)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(2)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(3)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(4)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(5)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(6)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(7)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(8)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(9)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(10)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(11)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(12)),
++ DUMP_WED(WED_MON_AMSDU_HIFTXD_FETCH_MSDU(13)),
++ };
++ struct mtk_wed_hw *hw = s->private;
++ struct mtk_wed_device *dev = hw->wed_dev;
++
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(wed_amsdu);
++
++static int
++wed_rtqm_show(struct seq_file *s, void *data)
++{
++ static const struct reg_dump regs[] = {
++ DUMP_STR("WED Route QM IGRS0(N2H + Recycle)"),
++ DUMP_WED(WED_RTQM_IGRS0_I2HW_DMAD_CNT),
++ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS0_I2H_DMAD_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS0_I2HW_PKT_CNT),
++ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS0_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS0_FDROP_CNT),
++
++ DUMP_STR("WED Route QM IGRS1(Legacy)"),
++ DUMP_WED(WED_RTQM_IGRS1_I2HW_DMAD_CNT),
++ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS1_I2H_DMAD_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS1_I2HW_PKT_CNT),
++ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS1_I2H_PKT_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS1_FDROP_CNT),
++
++ DUMP_STR("WED Route QM IGRS2(RRO3.0)"),
++ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
++ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS2_I2H_DMAD_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS2_I2HW_PKT_CNT),
++ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS2_I2H_PKT_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS2_FDROP_CNT),
++
++ DUMP_STR("WED Route QM IGRS3(DEBUG)"),
++ DUMP_WED(WED_RTQM_IGRS2_I2HW_DMAD_CNT),
++ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS3_I2H_DMAD_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS3_I2HW_PKT_CNT),
++ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(0)),
++ DUMP_WED(WED_RTQM_IGRS3_I2H_PKT_CNT(1)),
++ DUMP_WED(WED_RTQM_IGRS3_FDROP_CNT),
++ };
++ struct mtk_wed_hw *hw = s->private;
++ struct mtk_wed_device *dev = hw->wed_dev;
++
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(wed_rtqm);
++
++static int
++wed_rro_show(struct seq_file *s, void *data)
++{
++ static const struct reg_dump regs[] = {
++ DUMP_STR("RRO/IND CMD CNT"),
++ DUMP_WED(WED_RX_IND_CMD_CNT(1)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(2)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(3)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(4)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(5)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(6)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(7)),
++ DUMP_WED(WED_RX_IND_CMD_CNT(8)),
++ DUMP_WED_MASK(WED_RX_IND_CMD_CNT(9),
++ WED_IND_CMD_MAGIC_CNT_FAIL_CNT),
++
++ DUMP_WED(WED_RX_ADDR_ELEM_CNT(0)),
++ DUMP_WED_MASK(WED_RX_ADDR_ELEM_CNT(1),
++ WED_ADDR_ELEM_SIG_FAIL_CNT),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(1)),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(2)),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(3)),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(4)),
++ DUMP_WED(WED_RX_MSDU_PG_CNT(5)),
++ DUMP_WED_MASK(WED_RX_PN_CHK_CNT,
++ WED_PN_CHK_FAIL_CNT),
++ };
++ struct mtk_wed_hw *hw = s->private;
++ struct mtk_wed_device *dev = hw->wed_dev;
++
++ if (dev)
++ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
++
++ return 0;
++}
++DEFINE_SHOW_ATTRIBUTE(wed_rro);
++
++static int
+ mtk_wed_reg_set(void *data, u64 val)
+ {
+ struct mtk_wed_hw *hw = data;
+@@ -266,7 +624,16 @@ void mtk_wed_hw_add_debugfs(struct mtk_w
+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+- if (!mtk_wed_is_v1(hw))
++ if (!mtk_wed_is_v1(hw)) {
+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
+ &wed_rxinfo_fops);
++ if (mtk_wed_is_v3_or_greater(hw)) {
++ debugfs_create_file_unsafe("amsdu", 0400, dir, hw,
++ &wed_amsdu_fops);
++ debugfs_create_file_unsafe("rtqm", 0400, dir, hw,
++ &wed_rtqm_fops);
++ debugfs_create_file_unsafe("rro", 0400, dir, hw,
++ &wed_rro_fops);
++ }
++ }
+ }
diff --git a/target/linux/generic/backport-6.1/752-20-v6.7-net-ethernet-mtk_wed-add-wed-3.0-reset-support.patch b/target/linux/generic/backport-6.1/752-20-v6.7-net-ethernet-mtk_wed-add-wed-3.0-reset-support.patch
new file mode 100644
index 0000000000..aaaabf05e8
--- /dev/null
+++ b/target/linux/generic/backport-6.1/752-20-v6.7-net-ethernet-mtk_wed-add-wed-3.0-reset-support.patch
@@ -0,0 +1,587 @@
+From: Sujuan Chen <sujuan.chen@mediatek.com>
+Date: Mon, 18 Sep 2023 12:29:19 +0200
+Subject: [PATCH] net: ethernet: mtk_wed: add wed 3.0 reset support
+
+Introduce support for resetting Wireless Ethernet Dispatcher 3.0
+available on MT988 SoC.
+
+Co-developed-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Signed-off-by: Sujuan Chen <sujuan.chen@mediatek.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+---
+
+--- a/drivers/net/ethernet/mediatek/mtk_wed.c
++++ b/drivers/net/ethernet/mediatek/mtk_wed.c
+@@ -148,6 +148,90 @@ mtk_wdma_read_reset(struct mtk_wed_devic
+ return wdma_r32(dev, MTK_WDMA_GLO_CFG);
+ }
+
++static void
++mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev)
++{
++ u32 status;
++
++ if (!mtk_wed_is_v3_or_greater(dev->hw))
++ return;
++
++ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
++ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
++ dev_err(dev->hw->dev, "rx reset failed\n");
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
++ dev_err(dev->hw->dev, "rx reset failed\n");
++
++ wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
++ wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
++ dev_err(dev->hw->dev, "rx reset failed\n");
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
++ dev_err(dev->hw->dev, "rx reset failed\n");
++
++ /* prefetch FIFO */
++ wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
++ MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
++ MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
++ wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG,
++ MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR |
++ MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR);
++
++ /* core FIFO */
++ wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
++ wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG,
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR |
++ MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR);
++
++ /* writeback FIFO */
++ wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
++ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++ wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
++ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++
++ wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0),
++ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1),
++ MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR);
++
++ /* prefetch ring status */
++ wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
++ MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
++ MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR);
++
++ /* writeback ring status */
++ wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
++ MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
++ MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR);
++}
++
+ static int
+ mtk_wdma_rx_reset(struct mtk_wed_device *dev)
+ {
+@@ -160,6 +244,7 @@ mtk_wdma_rx_reset(struct mtk_wed_device
+ if (ret)
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
++ mtk_wdma_v3_rx_reset(dev);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+@@ -192,6 +277,84 @@ mtk_wed_poll_busy(struct mtk_wed_device
+ }
+
+ static void
++mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev)
++{
++ u32 status;
++
++ if (!mtk_wed_is_v3_or_greater(dev->hw))
++ return;
++
++ wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN);
++ wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG))
++ dev_err(dev->hw->dev, "tx reset failed\n");
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG))
++ dev_err(dev->hw->dev, "tx reset failed\n");
++
++ wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN);
++ wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG))
++ dev_err(dev->hw->dev, "tx reset failed\n");
++
++ if (read_poll_timeout(wdma_r32, status,
++ !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY),
++ 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG))
++ dev_err(dev->hw->dev, "tx reset failed\n");
++
++ /* prefetch FIFO */
++ wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
++ MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
++ MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
++ wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG,
++ MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR |
++ MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR);
++
++ /* core FIFO */
++ wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
++ wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG,
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR |
++ MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR);
++
++ /* writeback FIFO */
++ wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
++ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++ wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
++ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++
++ wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0),
++ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1),
++ MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR);
++
++ /* prefetch ring status */
++ wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG,
++ MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG,
++ MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR);
++
++ /* writeback ring status */
++ wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG,
++ MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
++ wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG,
++ MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR);
++}
++
++static void
+ mtk_wdma_tx_reset(struct mtk_wed_device *dev)
+ {
+ u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
+@@ -202,6 +365,7 @@ mtk_wdma_tx_reset(struct mtk_wed_device
+ !(status & mask), 0, 10000))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
++ mtk_wdma_v3_tx_reset(dev);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+@@ -1405,13 +1569,33 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ if (ret)
+ return ret;
+
++ if (dev->wlan.hw_rro) {
++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN);
++ mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS,
++ MTK_WED_RX_IND_CMD_BUSY);
++ mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG);
++ }
++
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
++ if (!ret && mtk_wed_is_v3_or_greater(dev->hw))
++ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_BUSY);
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
+ } else {
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ /* 1.a. disable prefetch HW */
++ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_EN);
++ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG,
++ MTK_WED_WPDMA_RX_D_PREF_BUSY);
++ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
++ MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL);
++ }
++
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+@@ -1439,23 +1623,52 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
+ }
+
++ if (dev->wlan.hw_rro) {
++ /* disable rro msdu page drv */
++ wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_EN);
++
++ /* disable rro data drv */
++ wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN);
++
++ /* rro msdu page drv reset */
++ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_CLR);
++ mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_CLR);
++
++ /* rro data drv reset */
++ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2),
++ MTK_WED_RRO_RX_D_DRV_CLR);
++ mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2),
++ MTK_WED_RRO_RX_D_DRV_CLR);
++ }
++
+ /* reset route qm */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
+- if (ret)
++ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
+- else
+- wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+- MTK_WED_RTQM_Q_RST);
++ } else if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ wed_set(dev, MTK_WED_RTQM_RST, BIT(0));
++ wed_clr(dev, MTK_WED_RTQM_RST, BIT(0));
++ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
++ } else {
++ wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
++ }
+
+ /* reset tx wdma */
+ mtk_wdma_tx_reset(dev);
+
+ /* reset tx wdma drv */
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
+- mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+- MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
++ if (mtk_wed_is_v3_or_greater(dev->hw))
++ mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS,
++ MTK_WED_WPDMA_STATUS_TX_DRV);
++ else
++ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
++ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
+
+ /* reset wed rx dma */
+@@ -1476,6 +1689,14 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ MTK_WED_CTRL_WED_RX_BM_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
+
++ if (dev->wlan.hw_rro) {
++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN);
++ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
++ MTK_WED_CTRL_WED_RX_PG_BM_BUSY);
++ wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
++ wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM);
++ }
++
+ /* wo change to enable state */
+ val = MTK_WED_WO_STATE_ENABLE;
+ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+@@ -1493,6 +1714,7 @@ mtk_wed_rx_reset(struct mtk_wed_device *
+ false);
+ }
+ mtk_wed_free_rx_buffer(dev);
++ mtk_wed_hwrro_free_buffer(dev);
+
+ return 0;
+ }
+@@ -1526,15 +1748,41 @@ mtk_wed_reset_dma(struct mtk_wed_device
+
+ /* 2. reset WDMA rx DMA */
+ busy = !!mtk_wdma_rx_reset(dev);
+- wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE |
++ wed_r32(dev, MTK_WED_WDMA_GLO_CFG);
++ val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN;
++ wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val);
++ } else {
++ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
++ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
++ }
++
+ if (!busy)
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
++ if (!busy && mtk_wed_is_v3_or_greater(dev->hw))
++ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_BUSY);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
+ } else {
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ /* 1.a. disable prefetch HW */
++ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_EN);
++ mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_BUSY);
++ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG,
++ MTK_WED_WDMA_RX_PREF_DDONE2_EN);
++
++ /* 2. Reset dma index */
++ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
++ MTK_WED_WDMA_RESET_IDX_RX_ALL);
++ }
++
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
+@@ -1550,8 +1798,13 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ for (i = 0; i < 100; i++) {
+- val = wed_r32(dev, MTK_WED_TX_BM_INTF);
+- if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
++ if (mtk_wed_is_v1(dev->hw))
++ val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP,
++ wed_r32(dev, MTK_WED_TX_BM_INTF));
++ else
++ val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP,
++ wed_r32(dev, MTK_WED_TX_TKID_INTF));
++ if (val == 0x40)
+ break;
+ }
+
+@@ -1573,6 +1826,8 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
++ if (mtk_wed_is_v3_or_greater(dev->hw))
++ wed_w32(dev, MTK_WED_RX1_CTRL2, 0);
+ } else {
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
+ MTK_WED_WPDMA_RESET_IDX_TX |
+@@ -1589,7 +1844,14 @@ mtk_wed_reset_dma(struct mtk_wed_device
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+- mtk_wed_rx_reset(dev);
++ if (mtk_wed_is_v3_or_greater(dev->hw)) {
++ /* reset amsdu engine */
++ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN);
++ mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU);
++ }
++
++ if (mtk_wed_get_rx_capa(dev))
++ mtk_wed_rx_reset(dev);
+ }
+
+ static int
+@@ -1841,6 +2103,7 @@ mtk_wed_dma_enable(struct mtk_wed_device
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4);
+
+ wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN);
++ wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN);
+ }
+
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+@@ -1904,6 +2167,12 @@ mtk_wed_start_hw_rro(struct mtk_wed_devi
+ if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro)
+ return;
+
++ if (reset) {
++ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
++ MTK_WED_RRO_MSDU_PG_DRV_EN);
++ return;
++ }
++
+ wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR);
+ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG,
+ MTK_WED_RRO_MSDU_PG_DRV_CLR);
+--- a/drivers/net/ethernet/mediatek/mtk_wed_regs.h
++++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
+@@ -28,6 +28,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RESET 0x008
+ #define MTK_WED_RESET_TX_BM BIT(0)
+ #define MTK_WED_RESET_RX_BM BIT(1)
++#define MTK_WED_RESET_RX_PG_BM BIT(2)
++#define MTK_WED_RESET_RRO_RX_TO_PG BIT(3)
+ #define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
+ #define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
+ #define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
+@@ -106,6 +108,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_STATUS 0x060
+ #define MTK_WED_STATUS_TX GENMASK(15, 8)
+
++#define MTK_WED_WPDMA_STATUS 0x068
++#define MTK_WED_WPDMA_STATUS_TX_DRV GENMASK(15, 8)
++
+ #define MTK_WED_TX_BM_CTRL 0x080
+ #define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+ #define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+@@ -140,6 +145,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+ #define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+
++#define MTK_WED_TX_TKID_INTF 0x0dc
++#define MTK_WED_TX_TKID_INTF_TKFIFO_FDEP GENMASK(25, 16)
++
+ #define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3 GENMASK(7, 0)
+ #define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3 GENMASK(23, 16)
+
+@@ -190,6 +198,7 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
+
+ #define MTK_WED_SCR0 0x3c0
++#define MTK_WED_RX1_CTRL2 0x418
+ #define MTK_WED_WPDMA_INT_TRIGGER 0x504
+ #define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
+ #define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
+@@ -303,6 +312,7 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
+ #define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
++#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL BIT(20)
+ #define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
+
+ #define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
+@@ -313,6 +323,7 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WPDMA_RX_D_PREF_CFG 0x7b4
+ #define MTK_WED_WPDMA_RX_D_PREF_EN BIT(0)
++#define MTK_WED_WPDMA_RX_D_PREF_BUSY BIT(1)
+ #define MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE GENMASK(12, 8)
+ #define MTK_WED_WPDMA_RX_D_PREF_LOW_THRES GENMASK(21, 16)
+
+@@ -334,11 +345,13 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WDMA_RX_PREF_CFG 0x950
+ #define MTK_WED_WDMA_RX_PREF_EN BIT(0)
++#define MTK_WED_WDMA_RX_PREF_BUSY BIT(1)
+ #define MTK_WED_WDMA_RX_PREF_BURST_SIZE GENMASK(12, 8)
+ #define MTK_WED_WDMA_RX_PREF_LOW_THRES GENMASK(21, 16)
+ #define MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR BIT(24)
+ #define MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR BIT(25)
+ #define MTK_WED_WDMA_RX_PREF_DDONE2_EN BIT(26)
++#define MTK_WED_WDMA_RX_PREF_DDONE2_BUSY BIT(27)
+
+ #define MTK_WED_WDMA_RX_PREF_FIFO_CFG 0x95C
+ #define MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR BIT(0)
+@@ -367,6 +380,7 @@ struct mtk_wdma_desc {
+
+ #define MTK_WED_WDMA_RESET_IDX 0xa08
+ #define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
++#define MTK_WED_WDMA_RESET_IDX_RX_ALL BIT(20)
+ #define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
+
+ #define MTK_WED_WDMA_INT_CLR 0xa24
+@@ -437,21 +451,62 @@ struct mtk_wdma_desc {
+ #define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
+ #define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
+
++#define MTK_WDMA_XDMA_TX_FIFO_CFG 0x238
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR BIT(0)
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR BIT(4)
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR BIT(8)
++#define MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR BIT(12)
++
++#define MTK_WDMA_XDMA_RX_FIFO_CFG 0x23c
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR BIT(0)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR BIT(4)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR BIT(8)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR BIT(12)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR BIT(15)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR BIT(18)
++#define MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR BIT(21)
++
+ #define MTK_WDMA_INT_GRP1 0x250
+ #define MTK_WDMA_INT_GRP2 0x254
+
+ #define MTK_WDMA_PREF_TX_CFG 0x2d0
+ #define MTK_WDMA_PREF_TX_CFG_PREF_EN BIT(0)
++#define MTK_WDMA_PREF_TX_CFG_PREF_BUSY BIT(1)
+
+ #define MTK_WDMA_PREF_RX_CFG 0x2dc
+ #define MTK_WDMA_PREF_RX_CFG_PREF_EN BIT(0)
++#define MTK_WDMA_PREF_RX_CFG_PREF_BUSY BIT(1)
++
++#define MTK_WDMA_PREF_RX_FIFO_CFG 0x2e0
++#define MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR BIT(0)
++#define MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR BIT(16)
++
++#define MTK_WDMA_PREF_TX_FIFO_CFG 0x2d4
++#define MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR BIT(0)
++#define MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR BIT(16)
++
++#define MTK_WDMA_PREF_SIDX_CFG 0x2e4
++#define MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
++#define MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
+
+ #define MTK_WDMA_WRBK_TX_CFG 0x300
++#define MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY BIT(0)
+ #define MTK_WDMA_WRBK_TX_CFG_WRBK_EN BIT(30)
+
++#define MTK_WDMA_WRBK_TX_FIFO_CFG(_n) (0x304 + (_n) * 0x4)
++#define MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR BIT(0)
++
+ #define MTK_WDMA_WRBK_RX_CFG 0x344
++#define MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY BIT(0)
+ #define MTK_WDMA_WRBK_RX_CFG_WRBK_EN BIT(30)
+
++#define MTK_WDMA_WRBK_RX_FIFO_CFG(_n) (0x348 + (_n) * 0x4)
++#define MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR BIT(0)
++
++#define MTK_WDMA_WRBK_SIDX_CFG 0x388
++#define MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR GENMASK(3, 0)
++#define MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR GENMASK(5, 4)
++
+ #define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
+ #define MTK_PCIE_MIRROR_MAP_EN BIT(0)
+ #define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
+@@ -465,6 +520,8 @@ struct mtk_wdma_desc {
+ #define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
+ #define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
+
++#define MTK_WED_RTQM_RST 0xb04
++
+ #define MTK_WED_RTQM_IGRS0_I2HW_DMAD_CNT 0xb1c
+ #define MTK_WED_RTQM_IGRS0_I2H_DMAD_CNT(_n) (0xb20 + (_n) * 0x4)
+ #define MTK_WED_RTQM_IGRS0_I2HW_PKT_CNT 0xb28
+@@ -653,6 +710,9 @@ struct mtk_wdma_desc {
+ #define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR BIT(17)
+ #define MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG GENMASK(22, 18)
+
++#define MTK_WED_RRO_RX_HW_STS 0xf00
++#define MTK_WED_RX_IND_CMD_BUSY GENMASK(31, 0)
++
+ #define MTK_WED_RX_IND_CMD_CNT0 0xf20
+ #define MTK_WED_RX_IND_CMD_DBG_CNT_EN BIT(31)
+
diff --git a/target/linux/generic/backport-6.1/794-v6.2-net-core-Allow-live-renaming-when-an-interface-is-up.patch b/target/linux/generic/backport-6.1/794-v6.2-net-core-Allow-live-renaming-when-an-interface-is-up.patch
index c15604717b..a9e3c71d54 100644
--- a/target/linux/generic/backport-6.1/794-v6.2-net-core-Allow-live-renaming-when-an-interface-is-up.patch
+++ b/target/linux/generic/backport-6.1/794-v6.2-net-core-Allow-live-renaming-when-an-interface-is-up.patch
@@ -46,7 +46,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -1667,7 +1667,6 @@ struct net_device_ops {
+@@ -1668,7 +1668,6 @@ struct net_device_ops {
* @IFF_FAILOVER: device is a failover master device
* @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device
* @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device
@@ -54,7 +54,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
* @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with
* skb_headlen(skb) == 0 (data starts from frag0)
* @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN
-@@ -1703,7 +1702,7 @@ enum netdev_priv_flags {
+@@ -1704,7 +1703,7 @@ enum netdev_priv_flags {
IFF_FAILOVER = 1<<27,
IFF_FAILOVER_SLAVE = 1<<28,
IFF_L3MDEV_RX_HANDLER = 1<<29,
@@ -63,7 +63,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
IFF_CHANGE_PROTO_DOWN = BIT_ULL(32),
};
-@@ -1738,7 +1737,6 @@ enum netdev_priv_flags {
+@@ -1739,7 +1738,6 @@ enum netdev_priv_flags {
#define IFF_FAILOVER IFF_FAILOVER
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
diff --git a/target/linux/generic/hack-5.15/721-net-add-packet-mangeling.patch b/target/linux/generic/hack-5.15/721-net-add-packet-mangeling.patch
index ac9a161b5f..262a58036e 100644
--- a/target/linux/generic/hack-5.15/721-net-add-packet-mangeling.patch
+++ b/target/linux/generic/hack-5.15/721-net-add-packet-mangeling.patch
@@ -19,7 +19,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -1681,6 +1681,10 @@ enum netdev_priv_flags {
+@@ -1682,6 +1682,10 @@ enum netdev_priv_flags {
IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
};
@@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
#define IFF_EBRIDGE IFF_EBRIDGE
#define IFF_BONDING IFF_BONDING
-@@ -1712,6 +1716,7 @@ enum netdev_priv_flags {
+@@ -1713,6 +1717,7 @@ enum netdev_priv_flags {
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR
@@ -38,7 +38,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
/* Specifies the type of the struct net_device::ml_priv pointer */
enum netdev_ml_priv_type {
-@@ -2012,6 +2017,7 @@ struct net_device {
+@@ -2013,6 +2018,7 @@ struct net_device {
/* Read-mostly cache-line for fast-path access */
unsigned int flags;
unsigned int priv_flags;
@@ -46,7 +46,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
const struct net_device_ops *netdev_ops;
int ifindex;
unsigned short gflags;
-@@ -2072,6 +2078,11 @@ struct net_device {
+@@ -2073,6 +2079,11 @@ struct net_device {
const struct tlsdev_ops *tlsdev_ops;
#endif
@@ -58,7 +58,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
const struct header_ops *header_ops;
unsigned char operstate;
-@@ -2143,6 +2154,10 @@ struct net_device {
+@@ -2144,6 +2155,10 @@ struct net_device {
struct mctp_dev __rcu *mctp_ptr;
#endif
diff --git a/target/linux/generic/hack-6.1/721-net-add-packet-mangeling.patch b/target/linux/generic/hack-6.1/721-net-add-packet-mangeling.patch
index 8f940797c5..9ce8f82af0 100644
--- a/target/linux/generic/hack-6.1/721-net-add-packet-mangeling.patch
+++ b/target/linux/generic/hack-6.1/721-net-add-packet-mangeling.patch
@@ -19,7 +19,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -1706,6 +1706,7 @@ enum netdev_priv_flags {
+@@ -1707,6 +1707,7 @@ enum netdev_priv_flags {
/* was IFF_LIVE_RENAME_OK */
IFF_TX_SKB_NO_LINEAR = BIT_ULL(31),
IFF_CHANGE_PROTO_DOWN = BIT_ULL(32),
@@ -27,7 +27,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
};
#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
-@@ -1739,6 +1740,7 @@ enum netdev_priv_flags {
+@@ -1740,6 +1741,7 @@ enum netdev_priv_flags {
#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE
#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR
@@ -35,7 +35,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
/* Specifies the type of the struct net_device::ml_priv pointer */
enum netdev_ml_priv_type {
-@@ -2107,6 +2109,11 @@ struct net_device {
+@@ -2108,6 +2110,11 @@ struct net_device {
const struct tlsdev_ops *tlsdev_ops;
#endif
@@ -47,7 +47,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
const struct header_ops *header_ops;
unsigned char operstate;
-@@ -2182,6 +2189,10 @@ struct net_device {
+@@ -2183,6 +2190,10 @@ struct net_device {
struct mctp_dev __rcu *mctp_ptr;
#endif
diff --git a/target/linux/generic/pending-5.15/680-NET-skip-GRO-for-foreign-MAC-addresses.patch b/target/linux/generic/pending-5.15/680-NET-skip-GRO-for-foreign-MAC-addresses.patch
index 46856e1552..89b0c9f53c 100644
--- a/target/linux/generic/pending-5.15/680-NET-skip-GRO-for-foreign-MAC-addresses.patch
+++ b/target/linux/generic/pending-5.15/680-NET-skip-GRO-for-foreign-MAC-addresses.patch
@@ -11,7 +11,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2098,6 +2098,8 @@ struct net_device {
+@@ -2099,6 +2099,8 @@ struct net_device {
struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs;
diff --git a/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch b/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch
index 67cff4d22b..ca42728784 100644
--- a/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch
+++ b/target/linux/generic/pending-5.15/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch
@@ -10,7 +10,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -3099,8 +3099,8 @@ static irqreturn_t mtk_handle_irq_rx(int
+@@ -3100,8 +3100,8 @@ static irqreturn_t mtk_handle_irq_rx(int
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
@@ -20,7 +20,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
}
return IRQ_HANDLED;
-@@ -3112,8 +3112,8 @@ static irqreturn_t mtk_handle_irq_tx(int
+@@ -3113,8 +3113,8 @@ static irqreturn_t mtk_handle_irq_tx(int
eth->tx_events++;
if (likely(napi_schedule_prep(&eth->tx_napi))) {
@@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
}
return IRQ_HANDLED;
-@@ -4887,6 +4887,8 @@ static int mtk_probe(struct platform_dev
+@@ -4888,6 +4888,8 @@ static int mtk_probe(struct platform_dev
* for NAPI to work
*/
init_dummy_netdev(&eth->dummy_dev);
diff --git a/target/linux/generic/pending-5.15/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch b/target/linux/generic/pending-5.15/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch
index 56edb63234..c9c9d13c95 100644
--- a/target/linux/generic/pending-5.15/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch
+++ b/target/linux/generic/pending-5.15/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch
@@ -16,7 +16,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1516,12 +1516,28 @@ static void mtk_wake_queue(struct mtk_et
+@@ -1517,12 +1517,28 @@ static void mtk_wake_queue(struct mtk_et
}
}
@@ -45,7 +45,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
bool gso = false;
int tx_num;
-@@ -1543,6 +1559,18 @@ static netdev_tx_t mtk_start_xmit(struct
+@@ -1544,6 +1560,18 @@ static netdev_tx_t mtk_start_xmit(struct
return NETDEV_TX_BUSY;
}
@@ -64,7 +64,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
/* TSO: fill MSS info in tcp checksum field */
if (skb_is_gso(skb)) {
if (skb_cow_head(skb, 0)) {
-@@ -1558,8 +1586,14 @@ static netdev_tx_t mtk_start_xmit(struct
+@@ -1559,8 +1587,14 @@ static netdev_tx_t mtk_start_xmit(struct
}
}
diff --git a/target/linux/generic/pending-5.15/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch b/target/linux/generic/pending-5.15/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch
index cc6c9e91bf..82a76b569e 100644
--- a/target/linux/generic/pending-5.15/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch
+++ b/target/linux/generic/pending-5.15/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch
@@ -22,7 +22,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -723,6 +723,7 @@ static void mtk_mac_link_up(struct phyli
+@@ -724,6 +724,7 @@ static void mtk_mac_link_up(struct phyli
MAC_MCR_FORCE_RX_FC);
/* Configure speed */
@@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
switch (speed) {
case SPEED_2500:
case SPEED_1000:
-@@ -3292,6 +3293,9 @@ found:
+@@ -3293,6 +3294,9 @@ found:
if (dp->index >= MTK_QDMA_NUM_QUEUES)
return NOTIFY_DONE;
diff --git a/target/linux/generic/pending-5.15/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch b/target/linux/generic/pending-5.15/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch
index 104ce00b7e..ed0a544228 100644
--- a/target/linux/generic/pending-5.15/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch
+++ b/target/linux/generic/pending-5.15/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch
@@ -20,7 +20,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
-@@ -781,7 +782,9 @@ void __mtk_ppe_check_skb(struct mtk_ppe
+@@ -835,7 +836,9 @@ void __mtk_ppe_check_skb(struct mtk_ppe
skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
goto out;
diff --git a/target/linux/generic/pending-5.15/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch b/target/linux/generic/pending-5.15/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch
index b85b1364e1..29f565d312 100644
--- a/target/linux/generic/pending-5.15/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch
+++ b/target/linux/generic/pending-5.15/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch
@@ -249,7 +249,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
-
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -431,6 +431,30 @@ static void mtk_setup_bridge_switch(stru
+@@ -432,6 +432,30 @@ static void mtk_setup_bridge_switch(stru
MTK_GSW_CFG);
}
@@ -280,7 +280,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
-@@ -439,12 +463,20 @@ static struct phylink_pcs *mtk_mac_selec
+@@ -440,12 +464,20 @@ static struct phylink_pcs *mtk_mac_selec
struct mtk_eth *eth = mac->hw;
unsigned int sid;
@@ -307,7 +307,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
}
return NULL;
-@@ -500,7 +532,22 @@ static void mtk_mac_config(struct phylin
+@@ -501,7 +533,22 @@ static void mtk_mac_config(struct phylin
goto init_err;
}
break;
@@ -330,7 +330,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
break;
default:
goto err_phy;
-@@ -555,8 +602,6 @@ static void mtk_mac_config(struct phylin
+@@ -556,8 +603,6 @@ static void mtk_mac_config(struct phylin
val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
@@ -339,7 +339,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
}
/* SGMII */
-@@ -573,21 +618,40 @@ static void mtk_mac_config(struct phylin
+@@ -574,21 +619,40 @@ static void mtk_mac_config(struct phylin
/* Save the syscfg0 value for mac_finish */
mac->syscfg0 = val;
@@ -387,7 +387,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
return;
err_phy:
-@@ -633,10 +697,13 @@ static void mtk_mac_link_down(struct phy
+@@ -634,10 +698,13 @@ static void mtk_mac_link_down(struct phy
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
@@ -404,7 +404,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
}
static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
-@@ -708,13 +775,11 @@ static void mtk_set_queue_speed(struct m
+@@ -709,13 +776,11 @@ static void mtk_set_queue_speed(struct m
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
}
@@ -422,7 +422,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
u32 mcr;
mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
-@@ -748,6 +813,55 @@ static void mtk_mac_link_up(struct phyli
+@@ -749,6 +814,55 @@ static void mtk_mac_link_up(struct phyli
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
@@ -478,7 +478,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
static const struct phylink_mac_ops mtk_phylink_ops = {
.validate = phylink_generic_validate,
.mac_select_pcs = mtk_mac_select_pcs,
-@@ -4562,8 +4676,21 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4563,8 +4677,21 @@ static int mtk_add_mac(struct mtk_eth *e
phy_interface_zero(mac->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
mac->phylink_config.supported_interfaces);
@@ -500,7 +500,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
phy_mode, &mtk_phylink_ops);
-@@ -4756,6 +4883,13 @@ static int mtk_probe(struct platform_dev
+@@ -4757,6 +4884,13 @@ static int mtk_probe(struct platform_dev
if (err)
return err;
diff --git a/target/linux/generic/pending-5.15/760-net-core-add-optional-threading-for-backlog-processi.patch b/target/linux/generic/pending-5.15/760-net-core-add-optional-threading-for-backlog-processi.patch
index 62daef91b1..685a11f22d 100644
--- a/target/linux/generic/pending-5.15/760-net-core-add-optional-threading-for-backlog-processi.patch
+++ b/target/linux/generic/pending-5.15/760-net-core-add-optional-threading-for-backlog-processi.patch
@@ -20,7 +20,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
/**
* napi_disable - prevent NAPI from scheduling
-@@ -3363,6 +3364,7 @@ struct softnet_data {
+@@ -3364,6 +3365,7 @@ struct softnet_data {
unsigned int processed;
unsigned int time_squeeze;
unsigned int received_rps;
diff --git a/target/linux/generic/pending-6.1/680-NET-skip-GRO-for-foreign-MAC-addresses.patch b/target/linux/generic/pending-6.1/680-NET-skip-GRO-for-foreign-MAC-addresses.patch
index c6aae3b6cf..094d63dd89 100644
--- a/target/linux/generic/pending-6.1/680-NET-skip-GRO-for-foreign-MAC-addresses.patch
+++ b/target/linux/generic/pending-6.1/680-NET-skip-GRO-for-foreign-MAC-addresses.patch
@@ -11,7 +11,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2133,6 +2133,8 @@ struct net_device {
+@@ -2134,6 +2134,8 @@ struct net_device {
struct netdev_hw_addr_list mc;
struct netdev_hw_addr_list dev_addrs;
diff --git a/target/linux/generic/pending-6.1/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch b/target/linux/generic/pending-6.1/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch
index 842fef3a9c..be28fdc803 100644
--- a/target/linux/generic/pending-6.1/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch
+++ b/target/linux/generic/pending-6.1/702-net-ethernet-mtk_eth_soc-enable-threaded-NAPI.patch
@@ -10,7 +10,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -4941,6 +4941,8 @@ static int mtk_probe(struct platform_dev
+@@ -4942,6 +4942,8 @@ static int mtk_probe(struct platform_dev
* for NAPI to work
*/
init_dummy_netdev(&eth->dummy_dev);
diff --git a/target/linux/generic/pending-6.1/731-net-permit-ieee80211_ptr-even-with-no-CFG82111-suppo.patch b/target/linux/generic/pending-6.1/731-net-permit-ieee80211_ptr-even-with-no-CFG82111-suppo.patch
index 2c1ec55d09..df422e3a08 100644
--- a/target/linux/generic/pending-6.1/731-net-permit-ieee80211_ptr-even-with-no-CFG82111-suppo.patch
+++ b/target/linux/generic/pending-6.1/731-net-permit-ieee80211_ptr-even-with-no-CFG82111-suppo.patch
@@ -17,7 +17,7 @@ Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2168,7 +2168,7 @@ struct net_device {
+@@ -2169,7 +2169,7 @@ struct net_device {
#if IS_ENABLED(CONFIG_AX25)
void *ax25_ptr;
#endif
diff --git a/target/linux/generic/pending-6.1/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch b/target/linux/generic/pending-6.1/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch
index a1cc109050..fe8841dd3e 100644
--- a/target/linux/generic/pending-6.1/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch
+++ b/target/linux/generic/pending-6.1/732-01-net-ethernet-mtk_eth_soc-work-around-issue-with-send.patch
@@ -16,7 +16,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -1562,12 +1562,28 @@ static void mtk_wake_queue(struct mtk_et
+@@ -1563,12 +1563,28 @@ static void mtk_wake_queue(struct mtk_et
}
}
@@ -45,7 +45,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
bool gso = false;
int tx_num;
-@@ -1589,6 +1605,18 @@ static netdev_tx_t mtk_start_xmit(struct
+@@ -1590,6 +1606,18 @@ static netdev_tx_t mtk_start_xmit(struct
return NETDEV_TX_BUSY;
}
@@ -64,7 +64,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
/* TSO: fill MSS info in tcp checksum field */
if (skb_is_gso(skb)) {
if (skb_cow_head(skb, 0)) {
-@@ -1604,8 +1632,14 @@ static netdev_tx_t mtk_start_xmit(struct
+@@ -1605,8 +1633,14 @@ static netdev_tx_t mtk_start_xmit(struct
}
}
diff --git a/target/linux/generic/pending-6.1/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch b/target/linux/generic/pending-6.1/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch
index 29cbcad699..757d2edb2c 100644
--- a/target/linux/generic/pending-6.1/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch
+++ b/target/linux/generic/pending-6.1/732-03-net-ethernet-mtk_eth_soc-fix-remaining-throughput-re.patch
@@ -22,7 +22,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -766,6 +766,7 @@ static void mtk_mac_link_up(struct phyli
+@@ -767,6 +767,7 @@ static void mtk_mac_link_up(struct phyli
MAC_MCR_FORCE_RX_FC);
/* Configure speed */
@@ -30,7 +30,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
switch (speed) {
case SPEED_2500:
case SPEED_1000:
-@@ -3348,6 +3349,9 @@ found:
+@@ -3349,6 +3350,9 @@ found:
if (dp->index >= MTK_QDMA_NUM_QUEUES)
return NOTIFY_DONE;
diff --git a/target/linux/generic/pending-6.1/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch b/target/linux/generic/pending-6.1/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch
index 72d5743402..61042c1ad0 100644
--- a/target/linux/generic/pending-6.1/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch
+++ b/target/linux/generic/pending-6.1/734-net-ethernet-mtk_eth_soc-ppe-fix-L2-offloading-with-.patch
@@ -20,7 +20,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
#include <net/dsa.h>
#include "mtk_eth_soc.h"
#include "mtk_ppe.h"
-@@ -775,7 +776,9 @@ void __mtk_ppe_check_skb(struct mtk_ppe
+@@ -829,7 +830,9 @@ void __mtk_ppe_check_skb(struct mtk_ppe
skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
goto out;
diff --git a/target/linux/generic/pending-6.1/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch b/target/linux/generic/pending-6.1/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch
index bb760c1a69..23daa29998 100644
--- a/target/linux/generic/pending-6.1/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch
+++ b/target/linux/generic/pending-6.1/737-net-ethernet-mtk_eth_soc-add-paths-and-SerDes-modes-.patch
@@ -249,7 +249,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
-
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -474,6 +474,30 @@ static void mtk_setup_bridge_switch(stru
+@@ -475,6 +475,30 @@ static void mtk_setup_bridge_switch(stru
MTK_GSW_CFG);
}
@@ -280,7 +280,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
phy_interface_t interface)
{
-@@ -482,12 +506,20 @@ static struct phylink_pcs *mtk_mac_selec
+@@ -483,12 +507,20 @@ static struct phylink_pcs *mtk_mac_selec
struct mtk_eth *eth = mac->hw;
unsigned int sid;
@@ -307,7 +307,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
}
return NULL;
-@@ -543,7 +575,22 @@ static void mtk_mac_config(struct phylin
+@@ -544,7 +576,22 @@ static void mtk_mac_config(struct phylin
goto init_err;
}
break;
@@ -330,7 +330,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
break;
default:
goto err_phy;
-@@ -598,8 +645,6 @@ static void mtk_mac_config(struct phylin
+@@ -599,8 +646,6 @@ static void mtk_mac_config(struct phylin
val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
@@ -339,7 +339,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
}
/* SGMII */
-@@ -616,21 +661,40 @@ static void mtk_mac_config(struct phylin
+@@ -617,21 +662,40 @@ static void mtk_mac_config(struct phylin
/* Save the syscfg0 value for mac_finish */
mac->syscfg0 = val;
@@ -387,7 +387,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
return;
err_phy:
-@@ -676,10 +740,13 @@ static void mtk_mac_link_down(struct phy
+@@ -677,10 +741,13 @@ static void mtk_mac_link_down(struct phy
{
struct mtk_mac *mac = container_of(config, struct mtk_mac,
phylink_config);
@@ -404,7 +404,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
}
static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
-@@ -751,13 +818,11 @@ static void mtk_set_queue_speed(struct m
+@@ -752,13 +819,11 @@ static void mtk_set_queue_speed(struct m
mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
}
@@ -422,7 +422,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
u32 mcr;
mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
-@@ -791,6 +856,55 @@ static void mtk_mac_link_up(struct phyli
+@@ -792,6 +857,55 @@ static void mtk_mac_link_up(struct phyli
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
}
@@ -478,7 +478,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
static const struct phylink_mac_ops mtk_phylink_ops = {
.validate = phylink_generic_validate,
.mac_select_pcs = mtk_mac_select_pcs,
-@@ -4616,8 +4730,21 @@ static int mtk_add_mac(struct mtk_eth *e
+@@ -4617,8 +4731,21 @@ static int mtk_add_mac(struct mtk_eth *e
phy_interface_zero(mac->phylink_config.supported_interfaces);
__set_bit(PHY_INTERFACE_MODE_INTERNAL,
mac->phylink_config.supported_interfaces);
@@ -500,7 +500,7 @@ Signed-off-by: Daniel Golle <daniel@makrotopia.org>
phylink = phylink_create(&mac->phylink_config,
of_fwnode_handle(mac->of_node),
phy_mode, &mtk_phylink_ops);
-@@ -4810,6 +4937,13 @@ static int mtk_probe(struct platform_dev
+@@ -4811,6 +4938,13 @@ static int mtk_probe(struct platform_dev
if (err)
return err;
diff --git a/target/linux/generic/pending-6.1/760-net-core-add-optional-threading-for-backlog-processi.patch b/target/linux/generic/pending-6.1/760-net-core-add-optional-threading-for-backlog-processi.patch
index 0bfc2412b4..e50a3ef699 100644
--- a/target/linux/generic/pending-6.1/760-net-core-add-optional-threading-for-backlog-processi.patch
+++ b/target/linux/generic/pending-6.1/760-net-core-add-optional-threading-for-backlog-processi.patch
@@ -20,7 +20,7 @@ Signed-off-by: Felix Fietkau <nbd@nbd.name>
/**
* napi_disable - prevent NAPI from scheduling
-@@ -3128,6 +3129,7 @@ struct softnet_data {
+@@ -3129,6 +3130,7 @@ struct softnet_data {
unsigned int processed;
unsigned int time_squeeze;
unsigned int received_rps;
diff --git a/target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch b/target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch
index 4c144a7b1c..3f73277aa6 100644
--- a/target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch
+++ b/target/linux/mediatek/patches-5.15/703-v5.17-net-ethernet-mtk_eth_soc-implement-Clause-45-MDIO-ac.patch
@@ -20,7 +20,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -323,13 +323,35 @@ static int _mtk_mdio_write(struct mtk_et
+@@ -324,13 +324,35 @@ static int _mtk_mdio_write(struct mtk_et
if (ret < 0)
return ret;
@@ -63,7 +63,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
-@@ -346,12 +368,33 @@ static int _mtk_mdio_read(struct mtk_eth
+@@ -347,12 +369,33 @@ static int _mtk_mdio_read(struct mtk_eth
if (ret < 0)
return ret;
@@ -103,7 +103,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
ret = mtk_mdio_busy_wait(eth);
if (ret < 0)
-@@ -898,6 +941,7 @@ static int mtk_mdio_init(struct mtk_eth
+@@ -899,6 +942,7 @@ static int mtk_mdio_init(struct mtk_eth
eth->mii_bus->name = "mdio";
eth->mii_bus->read = mtk_mdio_read;
eth->mii_bus->write = mtk_mdio_write;
diff --git a/target/linux/mediatek/patches-5.15/940-net-ethernet-mtk_wed-rename-mtk_wed_get_memory_regio.patch b/target/linux/mediatek/patches-5.15/940-net-ethernet-mtk_wed-rename-mtk_wed_get_memory_regio.patch
index 2fe565f3b2..30be53518a 100644
--- a/target/linux/mediatek/patches-5.15/940-net-ethernet-mtk_wed-rename-mtk_wed_get_memory_regio.patch
+++ b/target/linux/mediatek/patches-5.15/940-net-ethernet-mtk_wed-rename-mtk_wed_get_memory_regio.patch
@@ -15,30 +15,23 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
-@@ -215,8 +215,8 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+@@ -234,8 +234,8 @@ int mtk_wed_mcu_msg_update(struct mtk_we
}
static int
--mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
+-mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
- struct mtk_wed_wo_memory_region *region)
-+mtk_wed_get_reserved_memory_region(struct mtk_wed_wo *wo,
++mtk_wed_get_reserved_memory_region(struct mtk_wed_hw *hw, int index,
+ struct mtk_wed_wo_memory_region *region)
{
struct reserved_mem *rmem;
struct device_node *np;
-@@ -311,13 +311,13 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+@@ -321,7 +321,7 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ if (index < 0)
+ continue;
- /* load firmware region metadata */
- for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
-- ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
-+ ret = mtk_wed_get_reserved_memory_region(wo, &mem_region[i]);
+- ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]);
++ ret = mtk_wed_get_reserved_memory_region(wo->hw, index, &mem_region[i]);
if (ret)
return ret;
}
-
- wo->boot.name = "wo-boot";
-- ret = mtk_wed_get_memory_region(wo, &wo->boot);
-+ ret = mtk_wed_get_reserved_memory_region(wo, &wo->boot);
- if (ret)
- return ret;
-
diff --git a/target/linux/mediatek/patches-5.15/942-net-ethernet-mtk_wed-move-cpuboot-in-a-dedicated-dts.patch b/target/linux/mediatek/patches-5.15/942-net-ethernet-mtk_wed-move-cpuboot-in-a-dedicated-dts.patch
index 9de4ffa433..b4bea2087b 100644
--- a/target/linux/mediatek/patches-5.15/942-net-ethernet-mtk_wed-move-cpuboot-in-a-dedicated-dts.patch
+++ b/target/linux/mediatek/patches-5.15/942-net-ethernet-mtk_wed-move-cpuboot-in-a-dedicated-dts.patch
@@ -23,15 +23,15 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
-@@ -18,12 +18,23 @@
+@@ -34,12 +34,23 @@ static struct mtk_wed_wo_memory_region m
static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
{
-- return readl(wo->boot.addr + reg);
+- return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ u32 val;
+
+ if (!wo->boot_regmap)
-+ return readl(wo->boot.addr + reg);
++ return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+
+ if (regmap_read(wo->boot_regmap, reg, &val))
+ val = ~0;
@@ -41,49 +41,41 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
{
-- writel(val, wo->boot.addr + reg);
+- writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ if (wo->boot_regmap)
+ regmap_write(wo->boot_regmap, reg, val);
+ else
-+ writel(val, wo->boot.addr + reg);
++ writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
}
static struct sk_buff *
-@@ -316,10 +327,21 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
- return ret;
- }
+@@ -313,6 +324,9 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ u32 val, boot_cr;
+ int ret, i;
-- wo->boot.name = "wo-boot";
-- ret = mtk_wed_get_reserved_memory_region(wo, &wo->boot);
-- if (ret)
-- return ret;
+ wo->boot_regmap = syscon_regmap_lookup_by_phandle(wo->hw->node,
+ "mediatek,wo-cpuboot");
-+ if (IS_ERR(wo->boot_regmap)) {
-+ if (wo->boot_regmap != ERR_PTR(-ENODEV))
-+ return PTR_ERR(wo->boot_regmap);
+
-+ /* For backward compatibility, we need to check if cpu_boot
-+ * is defined through reserved memory property.
-+ */
-+ wo->boot_regmap = NULL;
-+ wo->boot.name = "wo-boot";
-+ ret = mtk_wed_get_reserved_memory_region(wo, &wo->boot);
-+ if (ret)
-+ return ret;
-+ }
+ /* load firmware region metadata */
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+ int index = of_property_match_string(wo->hw->node,
+@@ -321,6 +335,9 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ if (index < 0)
+ continue;
- /* set dummy cr */
- wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
++ if (index == MTK_WED_WO_REGION_BOOT && !IS_ERR(wo->boot_regmap))
++ continue;
++
+ ret = mtk_wed_get_reserved_memory_region(wo->hw, index, &mem_region[i]);
+ if (ret)
+ return ret;
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
-@@ -228,7 +228,8 @@ struct mtk_wed_wo_queue {
-
+@@ -231,6 +231,7 @@ struct mtk_wed_wo_queue {
struct mtk_wed_wo {
struct mtk_wed_hw *hw;
-- struct mtk_wed_wo_memory_region boot;
-+ struct mtk_wed_wo_memory_region boot; /* backward compatibility */
-+ struct regmap *boot_regmap;
++ struct regmap *boot_regmap;
struct mtk_wed_wo_queue q_tx;
struct mtk_wed_wo_queue q_rx;
+
diff --git a/target/linux/mediatek/patches-5.15/943-net-ethernet-mtk_wed-move-ilm-a-dedicated-dts-node.patch b/target/linux/mediatek/patches-5.15/943-net-ethernet-mtk_wed-move-ilm-a-dedicated-dts-node.patch
index 7b6c5d1e27..b4ba5b0d2d 100644
--- a/target/linux/mediatek/patches-5.15/943-net-ethernet-mtk_wed-move-ilm-a-dedicated-dts-node.patch
+++ b/target/linux/mediatek/patches-5.15/943-net-ethernet-mtk_wed-move-ilm-a-dedicated-dts-node.patch
@@ -20,72 +20,67 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
-@@ -300,6 +300,52 @@ next:
+@@ -316,6 +316,39 @@ next:
}
static int
-+mtk_wed_mcu_load_memory_regions(struct mtk_wed_wo *wo,
-+ struct mtk_wed_wo_memory_region *region)
++mtk_wed_mcu_load_ilm(struct mtk_wed_wo *wo)
+{
++ struct mtk_wed_wo_memory_region *ilm_region;
++ struct resource res;
+ struct device_node *np;
+ int ret;
+
-+ /* firmware EMI memory region */
-+ ret = mtk_wed_get_reserved_memory_region(wo,
-+ &region[MTK_WED_WO_REGION_EMI]);
-+ if (ret)
-+ return ret;
-+
-+ /* firmware DATA memory region */
-+ ret = mtk_wed_get_reserved_memory_region(wo,
-+ &region[MTK_WED_WO_REGION_DATA]);
-+ if (ret)
-+ return ret;
-+
+ np = of_parse_phandle(wo->hw->node, "mediatek,wo-ilm", 0);
-+ if (np) {
-+ struct mtk_wed_wo_memory_region *ilm_region;
-+ struct resource res;
++ if (!np)
++ return 0;
++
++ ret = of_address_to_resource(np, 0, &res);
++ of_node_put(np);
+
-+ ret = of_address_to_resource(np, 0, &res);
-+ of_node_put(np);
++ if (ret < 0)
++ return ret;
+
-+ if (ret < 0)
-+ return ret;
++ ilm_region = &mem_region[MTK_WED_WO_REGION_ILM];
++ ilm_region->phy_addr = res.start;
++ ilm_region->size = resource_size(&res);
++ ilm_region->addr = devm_ioremap(wo->hw->dev, res.start,
++ resource_size(&res));
+
-+ ilm_region = &region[MTK_WED_WO_REGION_ILM];
-+ ilm_region->phy_addr = res.start;
-+ ilm_region->size = resource_size(&res);
-+ ilm_region->addr = devm_ioremap(wo->hw->dev, res.start,
-+ resource_size(&res));
++ if (!IS_ERR(ilm_region->addr))
++ return 0;
+
-+ return IS_ERR(ilm_region->addr) ? PTR_ERR(ilm_region->addr) : 0;
-+ }
++ ret = PTR_ERR(ilm_region->addr);
++ ilm_region->addr = NULL;
+
-+ /* For backward compatibility, we need to check if ILM
-+ * node is defined through reserved memory property.
-+ */
-+ return mtk_wed_get_reserved_memory_region(wo,
-+ &region[MTK_WED_WO_REGION_ILM]);
++ return ret;
+}
+
+static int
mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
{
- static struct mtk_wed_wo_memory_region mem_region[] = {
-@@ -320,12 +366,9 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ const struct mtk_wed_fw_trailer *trailer;
+@@ -324,14 +357,20 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
u32 val, boot_cr;
int ret, i;
-- /* load firmware region metadata */
-- for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
-- ret = mtk_wed_get_reserved_memory_region(wo, &mem_region[i]);
-- if (ret)
-- return ret;
-- }
-+ ret = mtk_wed_mcu_load_memory_regions(wo, mem_region);
-+ if (ret)
-+ return ret;
-
++ mtk_wed_mcu_load_ilm(wo);
wo->boot_regmap = syscon_regmap_lookup_by_phandle(wo->hw->node,
"mediatek,wo-cpuboot");
+
+ /* load firmware region metadata */
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+- int index = of_property_match_string(wo->hw->node,
+- "memory-region-names",
+- mem_region[i].name);
++ int index;
++
++ if (mem_region[i].addr)
++ continue;
++
++ index = of_property_match_string(wo->hw->node,
++ "memory-region-names",
++ mem_region[i].name);
+ if (index < 0)
+ continue;
+
diff --git a/target/linux/mediatek/patches-5.15/944-net-ethernet-mtk_wed-move-dlm-a-dedicated-dts-node.patch b/target/linux/mediatek/patches-5.15/944-net-ethernet-mtk_wed-move-dlm-a-dedicated-dts-node.patch
index eed97b4769..c92fcd43ce 100644
--- a/target/linux/mediatek/patches-5.15/944-net-ethernet-mtk_wed-move-dlm-a-dedicated-dts-node.patch
+++ b/target/linux/mediatek/patches-5.15/944-net-ethernet-mtk_wed-move-dlm-a-dedicated-dts-node.patch
@@ -22,7 +22,7 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -821,6 +821,24 @@ mtk_wed_rro_alloc(struct mtk_wed_device
+@@ -1320,6 +1320,24 @@ mtk_wed_rro_alloc(struct mtk_wed_device
struct device_node *np;
int index;
@@ -47,7 +47,7 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
index = of_property_match_string(dev->hw->node, "memory-region-names",
"wo-dlm");
if (index < 0)
-@@ -837,6 +855,7 @@ mtk_wed_rro_alloc(struct mtk_wed_device
+@@ -1336,6 +1354,7 @@ mtk_wed_rro_alloc(struct mtk_wed_device
return -ENODEV;
dev->rro.miod_phys = rmem->base;
diff --git a/target/linux/mediatek/patches-6.1/940-net-ethernet-mtk_wed-rename-mtk_wed_get_memory_regio.patch b/target/linux/mediatek/patches-6.1/940-net-ethernet-mtk_wed-rename-mtk_wed_get_memory_regio.patch
index 2fe565f3b2..30be53518a 100644
--- a/target/linux/mediatek/patches-6.1/940-net-ethernet-mtk_wed-rename-mtk_wed_get_memory_regio.patch
+++ b/target/linux/mediatek/patches-6.1/940-net-ethernet-mtk_wed-rename-mtk_wed_get_memory_regio.patch
@@ -15,30 +15,23 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
-@@ -215,8 +215,8 @@ int mtk_wed_mcu_msg_update(struct mtk_we
+@@ -234,8 +234,8 @@ int mtk_wed_mcu_msg_update(struct mtk_we
}
static int
--mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
+-mtk_wed_get_memory_region(struct mtk_wed_hw *hw, int index,
- struct mtk_wed_wo_memory_region *region)
-+mtk_wed_get_reserved_memory_region(struct mtk_wed_wo *wo,
++mtk_wed_get_reserved_memory_region(struct mtk_wed_hw *hw, int index,
+ struct mtk_wed_wo_memory_region *region)
{
struct reserved_mem *rmem;
struct device_node *np;
-@@ -311,13 +311,13 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+@@ -321,7 +321,7 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ if (index < 0)
+ continue;
- /* load firmware region metadata */
- for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
-- ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
-+ ret = mtk_wed_get_reserved_memory_region(wo, &mem_region[i]);
+- ret = mtk_wed_get_memory_region(wo->hw, index, &mem_region[i]);
++ ret = mtk_wed_get_reserved_memory_region(wo->hw, index, &mem_region[i]);
if (ret)
return ret;
}
-
- wo->boot.name = "wo-boot";
-- ret = mtk_wed_get_memory_region(wo, &wo->boot);
-+ ret = mtk_wed_get_reserved_memory_region(wo, &wo->boot);
- if (ret)
- return ret;
-
diff --git a/target/linux/mediatek/patches-6.1/942-net-ethernet-mtk_wed-move-cpuboot-in-a-dedicated-dts.patch b/target/linux/mediatek/patches-6.1/942-net-ethernet-mtk_wed-move-cpuboot-in-a-dedicated-dts.patch
index 9de4ffa433..b4bea2087b 100644
--- a/target/linux/mediatek/patches-6.1/942-net-ethernet-mtk_wed-move-cpuboot-in-a-dedicated-dts.patch
+++ b/target/linux/mediatek/patches-6.1/942-net-ethernet-mtk_wed-move-cpuboot-in-a-dedicated-dts.patch
@@ -23,15 +23,15 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
-@@ -18,12 +18,23 @@
+@@ -34,12 +34,23 @@ static struct mtk_wed_wo_memory_region m
static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
{
-- return readl(wo->boot.addr + reg);
+- return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ u32 val;
+
+ if (!wo->boot_regmap)
-+ return readl(wo->boot.addr + reg);
++ return readl(mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+
+ if (regmap_read(wo->boot_regmap, reg, &val))
+ val = ~0;
@@ -41,49 +41,41 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
{
-- writel(val, wo->boot.addr + reg);
+- writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
+ if (wo->boot_regmap)
+ regmap_write(wo->boot_regmap, reg, val);
+ else
-+ writel(val, wo->boot.addr + reg);
++ writel(val, mem_region[MTK_WED_WO_REGION_BOOT].addr + reg);
}
static struct sk_buff *
-@@ -316,10 +327,21 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
- return ret;
- }
+@@ -313,6 +324,9 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ u32 val, boot_cr;
+ int ret, i;
-- wo->boot.name = "wo-boot";
-- ret = mtk_wed_get_reserved_memory_region(wo, &wo->boot);
-- if (ret)
-- return ret;
+ wo->boot_regmap = syscon_regmap_lookup_by_phandle(wo->hw->node,
+ "mediatek,wo-cpuboot");
-+ if (IS_ERR(wo->boot_regmap)) {
-+ if (wo->boot_regmap != ERR_PTR(-ENODEV))
-+ return PTR_ERR(wo->boot_regmap);
+
-+ /* For backward compatibility, we need to check if cpu_boot
-+ * is defined through reserved memory property.
-+ */
-+ wo->boot_regmap = NULL;
-+ wo->boot.name = "wo-boot";
-+ ret = mtk_wed_get_reserved_memory_region(wo, &wo->boot);
-+ if (ret)
-+ return ret;
-+ }
+ /* load firmware region metadata */
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+ int index = of_property_match_string(wo->hw->node,
+@@ -321,6 +335,9 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ if (index < 0)
+ continue;
- /* set dummy cr */
- wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
++ if (index == MTK_WED_WO_REGION_BOOT && !IS_ERR(wo->boot_regmap))
++ continue;
++
+ ret = mtk_wed_get_reserved_memory_region(wo->hw, index, &mem_region[i]);
+ if (ret)
+ return ret;
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.h
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
-@@ -228,7 +228,8 @@ struct mtk_wed_wo_queue {
-
+@@ -231,6 +231,7 @@ struct mtk_wed_wo_queue {
struct mtk_wed_wo {
struct mtk_wed_hw *hw;
-- struct mtk_wed_wo_memory_region boot;
-+ struct mtk_wed_wo_memory_region boot; /* backward compatibility */
-+ struct regmap *boot_regmap;
++ struct regmap *boot_regmap;
struct mtk_wed_wo_queue q_tx;
struct mtk_wed_wo_queue q_rx;
+
diff --git a/target/linux/mediatek/patches-6.1/943-net-ethernet-mtk_wed-move-ilm-a-dedicated-dts-node.patch b/target/linux/mediatek/patches-6.1/943-net-ethernet-mtk_wed-move-ilm-a-dedicated-dts-node.patch
index 7b6c5d1e27..b4ba5b0d2d 100644
--- a/target/linux/mediatek/patches-6.1/943-net-ethernet-mtk_wed-move-ilm-a-dedicated-dts-node.patch
+++ b/target/linux/mediatek/patches-6.1/943-net-ethernet-mtk_wed-move-ilm-a-dedicated-dts-node.patch
@@ -20,72 +20,67 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
--- a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
-@@ -300,6 +300,52 @@ next:
+@@ -316,6 +316,39 @@ next:
}
static int
-+mtk_wed_mcu_load_memory_regions(struct mtk_wed_wo *wo,
-+ struct mtk_wed_wo_memory_region *region)
++mtk_wed_mcu_load_ilm(struct mtk_wed_wo *wo)
+{
++ struct mtk_wed_wo_memory_region *ilm_region;
++ struct resource res;
+ struct device_node *np;
+ int ret;
+
-+ /* firmware EMI memory region */
-+ ret = mtk_wed_get_reserved_memory_region(wo,
-+ &region[MTK_WED_WO_REGION_EMI]);
-+ if (ret)
-+ return ret;
-+
-+ /* firmware DATA memory region */
-+ ret = mtk_wed_get_reserved_memory_region(wo,
-+ &region[MTK_WED_WO_REGION_DATA]);
-+ if (ret)
-+ return ret;
-+
+ np = of_parse_phandle(wo->hw->node, "mediatek,wo-ilm", 0);
-+ if (np) {
-+ struct mtk_wed_wo_memory_region *ilm_region;
-+ struct resource res;
++ if (!np)
++ return 0;
++
++ ret = of_address_to_resource(np, 0, &res);
++ of_node_put(np);
+
-+ ret = of_address_to_resource(np, 0, &res);
-+ of_node_put(np);
++ if (ret < 0)
++ return ret;
+
-+ if (ret < 0)
-+ return ret;
++ ilm_region = &mem_region[MTK_WED_WO_REGION_ILM];
++ ilm_region->phy_addr = res.start;
++ ilm_region->size = resource_size(&res);
++ ilm_region->addr = devm_ioremap(wo->hw->dev, res.start,
++ resource_size(&res));
+
-+ ilm_region = &region[MTK_WED_WO_REGION_ILM];
-+ ilm_region->phy_addr = res.start;
-+ ilm_region->size = resource_size(&res);
-+ ilm_region->addr = devm_ioremap(wo->hw->dev, res.start,
-+ resource_size(&res));
++ if (!IS_ERR(ilm_region->addr))
++ return 0;
+
-+ return IS_ERR(ilm_region->addr) ? PTR_ERR(ilm_region->addr) : 0;
-+ }
++ ret = PTR_ERR(ilm_region->addr);
++ ilm_region->addr = NULL;
+
-+ /* For backward compatibility, we need to check if ILM
-+ * node is defined through reserved memory property.
-+ */
-+ return mtk_wed_get_reserved_memory_region(wo,
-+ &region[MTK_WED_WO_REGION_ILM]);
++ return ret;
+}
+
+static int
mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
{
- static struct mtk_wed_wo_memory_region mem_region[] = {
-@@ -320,12 +366,9 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
+ const struct mtk_wed_fw_trailer *trailer;
+@@ -324,14 +357,20 @@ mtk_wed_mcu_load_firmware(struct mtk_wed
u32 val, boot_cr;
int ret, i;
-- /* load firmware region metadata */
-- for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
-- ret = mtk_wed_get_reserved_memory_region(wo, &mem_region[i]);
-- if (ret)
-- return ret;
-- }
-+ ret = mtk_wed_mcu_load_memory_regions(wo, mem_region);
-+ if (ret)
-+ return ret;
-
++ mtk_wed_mcu_load_ilm(wo);
wo->boot_regmap = syscon_regmap_lookup_by_phandle(wo->hw->node,
"mediatek,wo-cpuboot");
+
+ /* load firmware region metadata */
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+- int index = of_property_match_string(wo->hw->node,
+- "memory-region-names",
+- mem_region[i].name);
++ int index;
++
++ if (mem_region[i].addr)
++ continue;
++
++ index = of_property_match_string(wo->hw->node,
++ "memory-region-names",
++ mem_region[i].name);
+ if (index < 0)
+ continue;
+
diff --git a/target/linux/mediatek/patches-6.1/944-net-ethernet-mtk_wed-move-dlm-a-dedicated-dts-node.patch b/target/linux/mediatek/patches-6.1/944-net-ethernet-mtk_wed-move-dlm-a-dedicated-dts-node.patch
index 209580d64a..c92fcd43ce 100644
--- a/target/linux/mediatek/patches-6.1/944-net-ethernet-mtk_wed-move-dlm-a-dedicated-dts-node.patch
+++ b/target/linux/mediatek/patches-6.1/944-net-ethernet-mtk_wed-move-dlm-a-dedicated-dts-node.patch
@@ -22,7 +22,7 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
--- a/drivers/net/ethernet/mediatek/mtk_wed.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
-@@ -814,6 +814,24 @@ mtk_wed_rro_alloc(struct mtk_wed_device
+@@ -1320,6 +1320,24 @@ mtk_wed_rro_alloc(struct mtk_wed_device
struct device_node *np;
int index;
@@ -47,7 +47,7 @@ Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
index = of_property_match_string(dev->hw->node, "memory-region-names",
"wo-dlm");
if (index < 0)
-@@ -830,6 +848,7 @@ mtk_wed_rro_alloc(struct mtk_wed_device
+@@ -1336,6 +1354,7 @@ mtk_wed_rro_alloc(struct mtk_wed_device
return -ENODEV;
dev->rro.miod_phys = rmem->base;