From a69839d4327d053b18d8e1b0e7ddeee78db78f4f Mon Sep 17 00:00:00 2001 From: Long Li Date: Thu, 3 Nov 2022 12:16:19 -0700 Subject: net: mana: Add support for auxiliary device In preparation for supporting MANA RDMA driver, add support for auxiliary device in the Ethernet driver. The RDMA device is modeled as an auxiliary device to the Ethernet device. Reviewed-by: Dexuan Cui Signed-off-by: Long Li Link: https://lore.kernel.org/r/1667502990-2559-2-git-send-email-longli@linuxonhyperv.com Acked-by: Haiyang Zhang Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/microsoft/Kconfig | 1 + drivers/net/ethernet/microsoft/mana/gdma.h | 2 + .../net/ethernet/microsoft/mana/mana_auxiliary.h | 10 +++ drivers/net/ethernet/microsoft/mana/mana_en.c | 83 +++++++++++++++++++++- 4 files changed, 95 insertions(+), 1 deletion(-) create mode 100644 drivers/net/ethernet/microsoft/mana/mana_auxiliary.h (limited to 'drivers/net/ethernet/microsoft') diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig index fe4e7a7d9c0b..090e6b983243 100644 --- a/drivers/net/ethernet/microsoft/Kconfig +++ b/drivers/net/ethernet/microsoft/Kconfig @@ -19,6 +19,7 @@ config MICROSOFT_MANA tristate "Microsoft Azure Network Adapter (MANA) support" depends on PCI_MSI && X86_64 depends on PCI_HYPERV + select AUXILIARY_BUS help This driver supports Microsoft Azure Network Adapter (MANA). So far, the driver is only supported on X86_64. diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h index 4a6efe6ada08..f321a2616d03 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma.h +++ b/drivers/net/ethernet/microsoft/mana/gdma.h @@ -204,6 +204,8 @@ struct gdma_dev { /* GDMA driver specific pointer */ void *driver_data; + + struct auxiliary_device *adev; }; #define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE diff --git a/drivers/net/ethernet/microsoft/mana/mana_auxiliary.h b/drivers/net/ethernet/microsoft/mana/mana_auxiliary.h new file mode 100644 index 000000000000..373d59756846 --- /dev/null +++ b/drivers/net/ethernet/microsoft/mana/mana_auxiliary.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2022, Microsoft Corporation. */ + +#include "mana.h" +#include + +struct mana_adev { + struct auxiliary_device adev; + struct gdma_dev *mdev; +}; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 9259a74eca40..8751e475d1ba 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -13,6 +13,19 @@ #include #include "mana.h" +#include "mana_auxiliary.h" + +static DEFINE_IDA(mana_adev_ida); + +static int mana_adev_idx_alloc(void) +{ + return ida_alloc(&mana_adev_ida, GFP_KERNEL); +} + +static void mana_adev_idx_free(int idx) +{ + ida_free(&mana_adev_ida, idx); +} /* Microsoft Azure Network Adapter (MANA) functions */ @@ -2106,6 +2119,69 @@ free_net: return err; } +static void adev_release(struct device *dev) +{ + struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev); + + kfree(madev); +} + +static void remove_adev(struct gdma_dev *gd) +{ + struct auxiliary_device *adev = gd->adev; + int id = adev->id; + + auxiliary_device_delete(adev); + auxiliary_device_uninit(adev); + + mana_adev_idx_free(id); + gd->adev = NULL; +} + +static int add_adev(struct gdma_dev *gd) +{ + struct auxiliary_device *adev; + struct mana_adev *madev; + int ret; + + madev = kzalloc(sizeof(*madev), GFP_KERNEL); + if (!madev) + return -ENOMEM; + + adev = &madev->adev; + ret = mana_adev_idx_alloc(); + if (ret < 0) + goto idx_fail; + adev->id = ret; + + adev->name = "rdma"; + adev->dev.parent = gd->gdma_context->dev; + adev->dev.release = adev_release; + madev->mdev = gd; + + ret = auxiliary_device_init(adev); + if (ret) + goto init_fail; + + ret = auxiliary_device_add(adev); + if (ret) + goto add_fail; + + gd->adev = adev; + return 0; + +add_fail: + auxiliary_device_uninit(adev); + +init_fail: + mana_adev_idx_free(adev->id); + +idx_fail: + kfree(madev); + + return ret; +} + int mana_probe(struct gdma_dev *gd, bool resuming) { struct gdma_context *gc = gd->gdma_context; @@ -2173,6 +2249,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming) break; } } + + err = add_adev(gd); out: if (err) mana_remove(gd, false); @@ -2189,6 +2267,10 @@ void mana_remove(struct gdma_dev *gd, bool suspending) int err; int i; + /* adev currently doesn't support suspending, always remove it */ + if (gd->adev) + remove_adev(gd); + for (i = 0; i < ac->num_ports; i++) { ndev = ac->ports[i]; if (!ndev) { @@ -2221,7 +2303,6 @@ void mana_remove(struct gdma_dev *gd, bool suspending) } mana_destroy_eq(ac); - out: mana_gd_deregister_device(gd); -- cgit v1.2.3 From f3dc096246091048677c45cfc0e24ad512927b52 Mon Sep 17 00:00:00 2001 From: Long Li Date: Thu, 3 Nov 2022 12:16:20 -0700 Subject: net: mana: Record the physical address for doorbell page region For supporting RDMA device with multiple user contexts with their individual doorbell pages, record the start address of doorbell page region for use by the RDMA driver to allocate user context doorbell IDs. Reviewed-by: Dexuan Cui Signed-off-by: Long Li Link: https://lore.kernel.org/r/1667502990-2559-3-git-send-email-longli@linuxonhyperv.com Acked-by: Haiyang Zhang Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/microsoft/mana/gdma.h | 2 ++ drivers/net/ethernet/microsoft/mana/gdma_main.c | 4 ++++ 2 files changed, 6 insertions(+) (limited to 'drivers/net/ethernet/microsoft') diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h index f321a2616d03..72eaec2470c0 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma.h +++ b/drivers/net/ethernet/microsoft/mana/gdma.h @@ -351,9 +351,11 @@ struct gdma_context { u32 test_event_eq_id; bool is_pf; + phys_addr_t bar0_pa; void __iomem *bar0_va; void __iomem *shm_base; void __iomem *db_page_base; + phys_addr_t phys_db_page_base; u32 db_page_size; /* Shared memory chanenl (used to bootstrap HWC) */ diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index a6f99b4344d9..9a698526e2a3 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -44,6 +44,9 @@ static void mana_gd_init_vf_regs(struct pci_dev *pdev) gc->db_page_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET); + gc->phys_db_page_base = gc->bar0_pa + + mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET); + gc->shm_base = gc->bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET); } @@ -1377,6 +1380,7 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mutex_init(&gc->eq_test_event_mutex); pci_set_drvdata(pdev, gc); + gc->bar0_pa = pci_resource_start(pdev, 0); bar0_va = pci_iomap(pdev, bar, 0); if (!bar0_va) -- cgit v1.2.3 From b5c1c9855be3b5b978fde975a63df3cabc273faa Mon Sep 17 00:00:00 2001 From: Long Li Date: Thu, 3 Nov 2022 12:16:21 -0700 Subject: net: mana: Handle vport sharing between devices For outgoing packets, the PF requires the VF to configure the vport with corresponding protection domain and doorbell ID for the kernel or user context. The vport can't be shared between different contexts. Implement the logic to exclusively take over the vport by either the Ethernet device or RDMA device. Reviewed-by: Dexuan Cui Signed-off-by: Long Li Link: https://lore.kernel.org/r/1667502990-2559-4-git-send-email-longli@linuxonhyperv.com Acked-by: Haiyang Zhang Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/microsoft/mana/mana.h | 7 ++++ drivers/net/ethernet/microsoft/mana/mana_en.c | 53 ++++++++++++++++++++++++++- 2 files changed, 58 insertions(+), 2 deletions(-) (limited to 'drivers/net/ethernet/microsoft') diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h index d58be64374c8..2883a08dbfb5 100644 --- a/drivers/net/ethernet/microsoft/mana/mana.h +++ b/drivers/net/ethernet/microsoft/mana/mana.h @@ -380,6 +380,10 @@ struct mana_port_context { mana_handle_t port_handle; mana_handle_t pf_filter_handle; + /* Mutex for sharing access to vport_use_count */ + struct mutex vport_mutex; + int vport_use_count; + u16 port_idx; bool port_is_up; @@ -631,4 +635,7 @@ struct mana_tx_package { struct gdma_posted_wqe_info wqe_info; }; +int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, + u32 doorbell_pg_id); +void mana_uncfg_vport(struct mana_port_context *apc); #endif /* _MANA_H */ diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 8751e475d1ba..efe14a343fd1 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -646,13 +646,48 @@ static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index, return 0; } -static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, - u32 doorbell_pg_id) +void mana_uncfg_vport(struct mana_port_context *apc) +{ + mutex_lock(&apc->vport_mutex); + apc->vport_use_count--; + WARN_ON(apc->vport_use_count < 0); + mutex_unlock(&apc->vport_mutex); +} +EXPORT_SYMBOL_NS(mana_uncfg_vport, NET_MANA); + +int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, + u32 doorbell_pg_id) { struct mana_config_vport_resp resp = {}; struct mana_config_vport_req req = {}; int err; + /* This function is used to program the Ethernet port in the hardware + * table. It can be called from the Ethernet driver or the RDMA driver. + * + * For Ethernet usage, the hardware supports only one active user on a + * physical port. The driver checks on the port usage before programming + * the hardware when creating the RAW QP (RDMA driver) or exposing the + * device to kernel NET layer (Ethernet driver). + * + * Because the RDMA driver doesn't know in advance which QP type the + * user will create, it exposes the device with all its ports. The user + * may not be able to create RAW QP on a port if this port is already + * in used by the Ethernet driver from the kernel. + * + * This physical port limitation only applies to the RAW QP. For RC QP, + * the hardware doesn't have this limitation. The user can create RC + * QPs on a physical port up to the hardware limits independent of the + * Ethernet usage on the same port. + */ + mutex_lock(&apc->vport_mutex); + if (apc->vport_use_count > 0) { + mutex_unlock(&apc->vport_mutex); + return -EBUSY; + } + apc->vport_use_count++; + mutex_unlock(&apc->vport_mutex); + mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX, sizeof(req), sizeof(resp)); req.vport = apc->port_handle; @@ -679,9 +714,16 @@ static int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, apc->tx_shortform_allowed = resp.short_form_allowed; apc->tx_vp_offset = resp.tx_vport_offset; + + netdev_info(apc->ndev, "Configured vPort %llu PD %u DB %u\n", + apc->port_handle, protection_dom_id, doorbell_pg_id); out: + if (err) + mana_uncfg_vport(apc); + return err; } +EXPORT_SYMBOL_NS(mana_cfg_vport, NET_MANA); static int mana_cfg_vport_steering(struct mana_port_context *apc, enum TRI_STATE rx, @@ -742,6 +784,9 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, resp.hdr.status); err = -EPROTO; } + + netdev_info(ndev, "Configured steering vPort %llu entries %u\n", + apc->port_handle, num_entries); out: kfree(req); return err; @@ -1804,6 +1849,7 @@ static void mana_destroy_vport(struct mana_port_context *apc) } mana_destroy_txq(apc); + mana_uncfg_vport(apc); if (gd->gdma_context->is_pf) mana_pf_deregister_hw_vport(apc); @@ -2076,6 +2122,9 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, apc->pf_filter_handle = INVALID_MANA_HANDLE; apc->port_idx = port_idx; + mutex_init(&apc->vport_mutex); + apc->vport_use_count = 0; + ndev->netdev_ops = &mana_devops; ndev->ethtool_ops = &mana_ethtool_ops; ndev->mtu = ETH_DATA_LEN; -- cgit v1.2.3 From 6fe254160bd033a1e62dbad9b734183b31144678 Mon Sep 17 00:00:00 2001 From: Ajay Sharma Date: Thu, 3 Nov 2022 12:16:22 -0700 Subject: net: mana: Set the DMA device max segment size MANA hardware doesn't have any restrictions on the DMA segment size, set it to the max allowed value. Signed-off-by: Ajay Sharma Reviewed-by: Dexuan Cui Signed-off-by: Long Li Link: https://lore.kernel.org/r/1667502990-2559-5-git-send-email-longli@linuxonhyperv.com Acked-by: Haiyang Zhang Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/microsoft/mana/gdma_main.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers/net/ethernet/microsoft') diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 9a698526e2a3..62089e112c05 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -1373,6 +1373,12 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto release_region; + err = dma_set_max_seg_size(&pdev->dev, UINT_MAX); + if (err) { + dev_err(&pdev->dev, "Failed to set dma device segment size\n"); + goto release_region; + } + err = -ENOMEM; gc = vzalloc(sizeof(*gc)); if (!gc) -- cgit v1.2.3 From 4c0ff7a106e16ab63e0b597557255c012f179578 Mon Sep 17 00:00:00 2001 From: Long Li Date: Thu, 3 Nov 2022 12:16:23 -0700 Subject: net: mana: Export Work Queue functions for use by RDMA driver RDMA device may need to create Ethernet device queues for use by Queue Pair type RAW. This allows a user-mode context accesses Ethernet hardware queues. Export the supporting functions for use by the RDMA driver. Reviewed-by: Dexuan Cui Signed-off-by: Long Li Link: https://lore.kernel.org/r/1667502990-2559-6-git-send-email-longli@linuxonhyperv.com Acked-by: Haiyang Zhang Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/microsoft/mana/gdma_main.c | 1 + drivers/net/ethernet/microsoft/mana/mana.h | 9 +++++++++ drivers/net/ethernet/microsoft/mana/mana_en.c | 16 +++++++++------- 3 files changed, 19 insertions(+), 7 deletions(-) (limited to 'drivers/net/ethernet/microsoft') diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 62089e112c05..f0e22954d5c0 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -152,6 +152,7 @@ int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, return mana_hwc_send_request(hwc, req_len, req, resp_len, resp); } +EXPORT_SYMBOL_NS(mana_gd_send_request, NET_MANA); int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, struct gdma_mem_info *gmi) diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h index 2883a08dbfb5..6e9e86fb4c02 100644 --- a/drivers/net/ethernet/microsoft/mana/mana.h +++ b/drivers/net/ethernet/microsoft/mana/mana.h @@ -635,6 +635,15 @@ struct mana_tx_package { struct gdma_posted_wqe_info wqe_info; }; +int mana_create_wq_obj(struct mana_port_context *apc, + mana_handle_t vport, + u32 wq_type, struct mana_obj_spec *wq_spec, + struct mana_obj_spec *cq_spec, + mana_handle_t *wq_obj); + +void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, + mana_handle_t wq_obj); + int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, u32 doorbell_pg_id); void mana_uncfg_vport(struct mana_port_context *apc); diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index efe14a343fd1..6ad4bc8cbc99 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -792,11 +792,11 @@ out: return err; } -static int mana_create_wq_obj(struct mana_port_context *apc, - mana_handle_t vport, - u32 wq_type, struct mana_obj_spec *wq_spec, - struct mana_obj_spec *cq_spec, - mana_handle_t *wq_obj) +int mana_create_wq_obj(struct mana_port_context *apc, + mana_handle_t vport, + u32 wq_type, struct mana_obj_spec *wq_spec, + struct mana_obj_spec *cq_spec, + mana_handle_t *wq_obj) { struct mana_create_wqobj_resp resp = {}; struct mana_create_wqobj_req req = {}; @@ -845,9 +845,10 @@ static int mana_create_wq_obj(struct mana_port_context *apc, out: return err; } +EXPORT_SYMBOL_NS(mana_create_wq_obj, NET_MANA); -static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, - mana_handle_t wq_obj) +void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, + mana_handle_t wq_obj) { struct mana_destroy_wqobj_resp resp = {}; struct mana_destroy_wqobj_req req = {}; @@ -872,6 +873,7 @@ static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err, resp.hdr.status); } +EXPORT_SYMBOL_NS(mana_destroy_wq_obj, NET_MANA); static void mana_destroy_eq(struct mana_context *ac) { -- cgit v1.2.3 From d44089e555ffe63a49cc6e94d0c03d933e413059 Mon Sep 17 00:00:00 2001 From: Long Li Date: Thu, 3 Nov 2022 12:16:24 -0700 Subject: net: mana: Record port number in netdev The port number is useful for user-mode application to identify this net device based on port index. Set to the correct value in ndev. Reviewed-by: Dexuan Cui Signed-off-by: Long Li Link: https://lore.kernel.org/r/1667502990-2559-7-git-send-email-longli@linuxonhyperv.com Acked-by: Haiyang Zhang Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/microsoft/mana/mana_en.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers/net/ethernet/microsoft') diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 6ad4bc8cbc99..b6303a43fa7c 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -2133,6 +2133,7 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, ndev->max_mtu = ndev->mtu; ndev->min_mtu = ndev->mtu; ndev->needed_headroom = MANA_HEADROOM; + ndev->dev_port = port_idx; SET_NETDEV_DEV(ndev, gc->dev); netif_carrier_off(ndev); -- cgit v1.2.3 From fd325cd648f15eb9a8b32a68de3bafc72bcfe753 Mon Sep 17 00:00:00 2001 From: Long Li Date: Thu, 3 Nov 2022 12:16:25 -0700 Subject: net: mana: Move header files to a common location In preparation to add MANA RDMA driver, move all the required header files to a common location for use by both Ethernet and RDMA drivers. Reviewed-by: Dexuan Cui Signed-off-by: Long Li Link: https://lore.kernel.org/r/1667502990-2559-8-git-send-email-longli@linuxonhyperv.com Acked-by: Haiyang Zhang Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/microsoft/mana/gdma.h | 689 --------------------- drivers/net/ethernet/microsoft/mana/gdma_main.c | 2 +- drivers/net/ethernet/microsoft/mana/hw_channel.c | 4 +- drivers/net/ethernet/microsoft/mana/hw_channel.h | 195 ------ drivers/net/ethernet/microsoft/mana/mana.h | 650 ------------------- .../net/ethernet/microsoft/mana/mana_auxiliary.h | 10 - drivers/net/ethernet/microsoft/mana/mana_bpf.c | 2 +- drivers/net/ethernet/microsoft/mana/mana_en.c | 4 +- drivers/net/ethernet/microsoft/mana/mana_ethtool.c | 2 +- drivers/net/ethernet/microsoft/mana/shm_channel.c | 2 +- drivers/net/ethernet/microsoft/mana/shm_channel.h | 21 - 11 files changed, 8 insertions(+), 1573 deletions(-) delete mode 100644 drivers/net/ethernet/microsoft/mana/gdma.h delete mode 100644 drivers/net/ethernet/microsoft/mana/hw_channel.h delete mode 100644 drivers/net/ethernet/microsoft/mana/mana.h delete mode 100644 drivers/net/ethernet/microsoft/mana/mana_auxiliary.h delete mode 100644 drivers/net/ethernet/microsoft/mana/shm_channel.h (limited to 'drivers/net/ethernet/microsoft') diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h deleted file mode 100644 index 72eaec2470c0..000000000000 --- a/drivers/net/ethernet/microsoft/mana/gdma.h +++ /dev/null @@ -1,689 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* Copyright (c) 2021, Microsoft Corporation. */ - -#ifndef _GDMA_H -#define _GDMA_H - -#include -#include - -#include "shm_channel.h" - -/* Structures labeled with "HW DATA" are exchanged with the hardware. All of - * them are naturally aligned and hence don't need __packed. - */ - -enum gdma_request_type { - GDMA_VERIFY_VF_DRIVER_VERSION = 1, - GDMA_QUERY_MAX_RESOURCES = 2, - GDMA_LIST_DEVICES = 3, - GDMA_REGISTER_DEVICE = 4, - GDMA_DEREGISTER_DEVICE = 5, - GDMA_GENERATE_TEST_EQE = 10, - GDMA_CREATE_QUEUE = 12, - GDMA_DISABLE_QUEUE = 13, - GDMA_CREATE_DMA_REGION = 25, - GDMA_DMA_REGION_ADD_PAGES = 26, - GDMA_DESTROY_DMA_REGION = 27, -}; - -enum gdma_queue_type { - GDMA_INVALID_QUEUE, - GDMA_SQ, - GDMA_RQ, - GDMA_CQ, - GDMA_EQ, -}; - -enum gdma_work_request_flags { - GDMA_WR_NONE = 0, - GDMA_WR_OOB_IN_SGL = BIT(0), - GDMA_WR_PAD_BY_SGE0 = BIT(1), -}; - -enum gdma_eqe_type { - GDMA_EQE_COMPLETION = 3, - GDMA_EQE_TEST_EVENT = 64, - GDMA_EQE_HWC_INIT_EQ_ID_DB = 129, - GDMA_EQE_HWC_INIT_DATA = 130, - GDMA_EQE_HWC_INIT_DONE = 131, -}; - -enum { - GDMA_DEVICE_NONE = 0, - GDMA_DEVICE_HWC = 1, - GDMA_DEVICE_MANA = 2, -}; - -struct gdma_resource { - /* Protect the bitmap */ - spinlock_t lock; - - /* The bitmap size in bits. */ - u32 size; - - /* The bitmap tracks the resources. */ - unsigned long *map; -}; - -union gdma_doorbell_entry { - u64 as_uint64; - - struct { - u64 id : 24; - u64 reserved : 8; - u64 tail_ptr : 31; - u64 arm : 1; - } cq; - - struct { - u64 id : 24; - u64 wqe_cnt : 8; - u64 tail_ptr : 32; - } rq; - - struct { - u64 id : 24; - u64 reserved : 8; - u64 tail_ptr : 32; - } sq; - - struct { - u64 id : 16; - u64 reserved : 16; - u64 tail_ptr : 31; - u64 arm : 1; - } eq; -}; /* HW DATA */ - -struct gdma_msg_hdr { - u32 hdr_type; - u32 msg_type; - u16 msg_version; - u16 hwc_msg_id; - u32 msg_size; -}; /* HW DATA */ - -struct gdma_dev_id { - union { - struct { - u16 type; - u16 instance; - }; - - u32 as_uint32; - }; -}; /* HW DATA */ - -struct gdma_req_hdr { - struct gdma_msg_hdr req; - struct gdma_msg_hdr resp; /* The expected response */ - struct gdma_dev_id dev_id; - u32 activity_id; -}; /* HW DATA */ - -struct gdma_resp_hdr { - struct gdma_msg_hdr response; - struct gdma_dev_id dev_id; - u32 activity_id; - u32 status; - u32 reserved; -}; /* HW DATA */ - -struct gdma_general_req { - struct gdma_req_hdr hdr; -}; /* HW DATA */ - -#define GDMA_MESSAGE_V1 1 - -struct gdma_general_resp { - struct gdma_resp_hdr hdr; -}; /* HW DATA */ - -#define GDMA_STANDARD_HEADER_TYPE 0 - -static inline void mana_gd_init_req_hdr(struct gdma_req_hdr *hdr, u32 code, - u32 req_size, u32 resp_size) -{ - hdr->req.hdr_type = GDMA_STANDARD_HEADER_TYPE; - hdr->req.msg_type = code; - hdr->req.msg_version = GDMA_MESSAGE_V1; - hdr->req.msg_size = req_size; - - hdr->resp.hdr_type = GDMA_STANDARD_HEADER_TYPE; - hdr->resp.msg_type = code; - hdr->resp.msg_version = GDMA_MESSAGE_V1; - hdr->resp.msg_size = resp_size; -} - -/* The 16-byte struct is part of the GDMA work queue entry (WQE). */ -struct gdma_sge { - u64 address; - u32 mem_key; - u32 size; -}; /* HW DATA */ - -struct gdma_wqe_request { - struct gdma_sge *sgl; - u32 num_sge; - - u32 inline_oob_size; - const void *inline_oob_data; - - u32 flags; - u32 client_data_unit; -}; - -enum gdma_page_type { - GDMA_PAGE_TYPE_4K, -}; - -#define GDMA_INVALID_DMA_REGION 0 - -struct gdma_mem_info { - struct device *dev; - - dma_addr_t dma_handle; - void *virt_addr; - u64 length; - - /* Allocated by the PF driver */ - u64 gdma_region; -}; - -#define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8 - -struct gdma_dev { - struct gdma_context *gdma_context; - - struct gdma_dev_id dev_id; - - u32 pdid; - u32 doorbell; - u32 gpa_mkey; - - /* GDMA driver specific pointer */ - void *driver_data; - - struct auxiliary_device *adev; -}; - -#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE - -#define GDMA_CQE_SIZE 64 -#define GDMA_EQE_SIZE 16 -#define GDMA_MAX_SQE_SIZE 512 -#define GDMA_MAX_RQE_SIZE 256 - -#define GDMA_COMP_DATA_SIZE 0x3C - -#define GDMA_EVENT_DATA_SIZE 0xC - -/* The WQE size must be a multiple of the Basic Unit, which is 32 bytes. */ -#define GDMA_WQE_BU_SIZE 32 - -#define INVALID_PDID UINT_MAX -#define INVALID_DOORBELL UINT_MAX -#define INVALID_MEM_KEY UINT_MAX -#define INVALID_QUEUE_ID UINT_MAX -#define INVALID_PCI_MSIX_INDEX UINT_MAX - -struct gdma_comp { - u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; - u32 wq_num; - bool is_sq; -}; - -struct gdma_event { - u32 details[GDMA_EVENT_DATA_SIZE / 4]; - u8 type; -}; - -struct gdma_queue; - -struct mana_eq { - struct gdma_queue *eq; -}; - -typedef void gdma_eq_callback(void *context, struct gdma_queue *q, - struct gdma_event *e); - -typedef void gdma_cq_callback(void *context, struct gdma_queue *q); - -/* The 'head' is the producer index. For SQ/RQ, when the driver posts a WQE - * (Note: the WQE size must be a multiple of the 32-byte Basic Unit), the - * driver increases the 'head' in BUs rather than in bytes, and notifies - * the HW of the updated head. For EQ/CQ, the driver uses the 'head' to track - * the HW head, and increases the 'head' by 1 for every processed EQE/CQE. - * - * The 'tail' is the consumer index for SQ/RQ. After the CQE of the SQ/RQ is - * processed, the driver increases the 'tail' to indicate that WQEs have - * been consumed by the HW, so the driver can post new WQEs into the SQ/RQ. - * - * The driver doesn't use the 'tail' for EQ/CQ, because the driver ensures - * that the EQ/CQ is big enough so they can't overflow, and the driver uses - * the owner bits mechanism to detect if the queue has become empty. - */ -struct gdma_queue { - struct gdma_dev *gdma_dev; - - enum gdma_queue_type type; - u32 id; - - struct gdma_mem_info mem_info; - - void *queue_mem_ptr; - u32 queue_size; - - bool monitor_avl_buf; - - u32 head; - u32 tail; - - /* Extra fields specific to EQ/CQ. */ - union { - struct { - bool disable_needed; - - gdma_eq_callback *callback; - void *context; - - unsigned int msix_index; - - u32 log2_throttle_limit; - } eq; - - struct { - gdma_cq_callback *callback; - void *context; - - struct gdma_queue *parent; /* For CQ/EQ relationship */ - } cq; - }; -}; - -struct gdma_queue_spec { - enum gdma_queue_type type; - bool monitor_avl_buf; - unsigned int queue_size; - - /* Extra fields specific to EQ/CQ. */ - union { - struct { - gdma_eq_callback *callback; - void *context; - - unsigned long log2_throttle_limit; - } eq; - - struct { - gdma_cq_callback *callback; - void *context; - - struct gdma_queue *parent_eq; - - } cq; - }; -}; - -struct gdma_irq_context { - void (*handler)(void *arg); - void *arg; -}; - -struct gdma_context { - struct device *dev; - - /* Per-vPort max number of queues */ - unsigned int max_num_queues; - unsigned int max_num_msix; - unsigned int num_msix_usable; - struct gdma_resource msix_resource; - struct gdma_irq_context *irq_contexts; - - /* This maps a CQ index to the queue structure. */ - unsigned int max_num_cqs; - struct gdma_queue **cq_table; - - /* Protect eq_test_event and test_event_eq_id */ - struct mutex eq_test_event_mutex; - struct completion eq_test_event; - u32 test_event_eq_id; - - bool is_pf; - phys_addr_t bar0_pa; - void __iomem *bar0_va; - void __iomem *shm_base; - void __iomem *db_page_base; - phys_addr_t phys_db_page_base; - u32 db_page_size; - - /* Shared memory chanenl (used to bootstrap HWC) */ - struct shm_channel shm_channel; - - /* Hardware communication channel (HWC) */ - struct gdma_dev hwc; - - /* Azure network adapter */ - struct gdma_dev mana; -}; - -#define MAX_NUM_GDMA_DEVICES 4 - -static inline bool mana_gd_is_mana(struct gdma_dev *gd) -{ - return gd->dev_id.type == GDMA_DEVICE_MANA; -} - -static inline bool mana_gd_is_hwc(struct gdma_dev *gd) -{ - return gd->dev_id.type == GDMA_DEVICE_HWC; -} - -u8 *mana_gd_get_wqe_ptr(const struct gdma_queue *wq, u32 wqe_offset); -u32 mana_gd_wq_avail_space(struct gdma_queue *wq); - -int mana_gd_test_eq(struct gdma_context *gc, struct gdma_queue *eq); - -int mana_gd_create_hwc_queue(struct gdma_dev *gd, - const struct gdma_queue_spec *spec, - struct gdma_queue **queue_ptr); - -int mana_gd_create_mana_eq(struct gdma_dev *gd, - const struct gdma_queue_spec *spec, - struct gdma_queue **queue_ptr); - -int mana_gd_create_mana_wq_cq(struct gdma_dev *gd, - const struct gdma_queue_spec *spec, - struct gdma_queue **queue_ptr); - -void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); - -int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); - -void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit); - -struct gdma_wqe { - u32 reserved :24; - u32 last_vbytes :8; - - union { - u32 flags; - - struct { - u32 num_sge :8; - u32 inline_oob_size_div4:3; - u32 client_oob_in_sgl :1; - u32 reserved1 :4; - u32 client_data_unit :14; - u32 reserved2 :2; - }; - }; -}; /* HW DATA */ - -#define INLINE_OOB_SMALL_SIZE 8 -#define INLINE_OOB_LARGE_SIZE 24 - -#define MAX_TX_WQE_SIZE 512 -#define MAX_RX_WQE_SIZE 256 - -struct gdma_cqe { - u32 cqe_data[GDMA_COMP_DATA_SIZE / 4]; - - union { - u32 as_uint32; - - struct { - u32 wq_num : 24; - u32 is_sq : 1; - u32 reserved : 4; - u32 owner_bits : 3; - }; - } cqe_info; -}; /* HW DATA */ - -#define GDMA_CQE_OWNER_BITS 3 - -#define GDMA_CQE_OWNER_MASK ((1 << GDMA_CQE_OWNER_BITS) - 1) - -#define SET_ARM_BIT 1 - -#define GDMA_EQE_OWNER_BITS 3 - -union gdma_eqe_info { - u32 as_uint32; - - struct { - u32 type : 8; - u32 reserved1 : 8; - u32 client_id : 2; - u32 reserved2 : 11; - u32 owner_bits : 3; - }; -}; /* HW DATA */ - -#define GDMA_EQE_OWNER_MASK ((1 << GDMA_EQE_OWNER_BITS) - 1) -#define INITIALIZED_OWNER_BIT(log2_num_entries) (1UL << (log2_num_entries)) - -struct gdma_eqe { - u32 details[GDMA_EVENT_DATA_SIZE / 4]; - u32 eqe_info; -}; /* HW DATA */ - -#define GDMA_REG_DB_PAGE_OFFSET 8 -#define GDMA_REG_DB_PAGE_SIZE 0x10 -#define GDMA_REG_SHM_OFFSET 0x18 - -#define GDMA_PF_REG_DB_PAGE_SIZE 0xD0 -#define GDMA_PF_REG_DB_PAGE_OFF 0xC8 -#define GDMA_PF_REG_SHM_OFF 0x70 - -#define GDMA_SRIOV_REG_CFG_BASE_OFF 0x108 - -#define MANA_PF_DEVICE_ID 0x00B9 -#define MANA_VF_DEVICE_ID 0x00BA - -struct gdma_posted_wqe_info { - u32 wqe_size_in_bu; -}; - -/* GDMA_GENERATE_TEST_EQE */ -struct gdma_generate_test_event_req { - struct gdma_req_hdr hdr; - u32 queue_index; -}; /* HW DATA */ - -/* GDMA_VERIFY_VF_DRIVER_VERSION */ -enum { - GDMA_PROTOCOL_V1 = 1, - GDMA_PROTOCOL_FIRST = GDMA_PROTOCOL_V1, - GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, -}; - -#define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) - -#define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT - -#define GDMA_DRV_CAP_FLAGS2 0 - -#define GDMA_DRV_CAP_FLAGS3 0 - -#define GDMA_DRV_CAP_FLAGS4 0 - -struct gdma_verify_ver_req { - struct gdma_req_hdr hdr; - - /* Mandatory fields required for protocol establishment */ - u64 protocol_ver_min; - u64 protocol_ver_max; - - /* Gdma Driver Capability Flags */ - u64 gd_drv_cap_flags1; - u64 gd_drv_cap_flags2; - u64 gd_drv_cap_flags3; - u64 gd_drv_cap_flags4; - - /* Advisory fields */ - u64 drv_ver; - u32 os_type; /* Linux = 0x10; Windows = 0x20; Other = 0x30 */ - u32 reserved; - u32 os_ver_major; - u32 os_ver_minor; - u32 os_ver_build; - u32 os_ver_platform; - u64 reserved_2; - u8 os_ver_str1[128]; - u8 os_ver_str2[128]; - u8 os_ver_str3[128]; - u8 os_ver_str4[128]; -}; /* HW DATA */ - -struct gdma_verify_ver_resp { - struct gdma_resp_hdr hdr; - u64 gdma_protocol_ver; - u64 pf_cap_flags1; - u64 pf_cap_flags2; - u64 pf_cap_flags3; - u64 pf_cap_flags4; -}; /* HW DATA */ - -/* GDMA_QUERY_MAX_RESOURCES */ -struct gdma_query_max_resources_resp { - struct gdma_resp_hdr hdr; - u32 status; - u32 max_sq; - u32 max_rq; - u32 max_cq; - u32 max_eq; - u32 max_db; - u32 max_mst; - u32 max_cq_mod_ctx; - u32 max_mod_cq; - u32 max_msix; -}; /* HW DATA */ - -/* GDMA_LIST_DEVICES */ -struct gdma_list_devices_resp { - struct gdma_resp_hdr hdr; - u32 num_of_devs; - u32 reserved; - struct gdma_dev_id devs[64]; -}; /* HW DATA */ - -/* GDMA_REGISTER_DEVICE */ -struct gdma_register_device_resp { - struct gdma_resp_hdr hdr; - u32 pdid; - u32 gpa_mkey; - u32 db_id; -}; /* HW DATA */ - -/* GDMA_CREATE_QUEUE */ -struct gdma_create_queue_req { - struct gdma_req_hdr hdr; - u32 type; - u32 reserved1; - u32 pdid; - u32 doolbell_id; - u64 gdma_region; - u32 reserved2; - u32 queue_size; - u32 log2_throttle_limit; - u32 eq_pci_msix_index; - u32 cq_mod_ctx_id; - u32 cq_parent_eq_id; - u8 rq_drop_on_overrun; - u8 rq_err_on_wqe_overflow; - u8 rq_chain_rec_wqes; - u8 sq_hw_db; - u32 reserved3; -}; /* HW DATA */ - -struct gdma_create_queue_resp { - struct gdma_resp_hdr hdr; - u32 queue_index; -}; /* HW DATA */ - -/* GDMA_DISABLE_QUEUE */ -struct gdma_disable_queue_req { - struct gdma_req_hdr hdr; - u32 type; - u32 queue_index; - u32 alloc_res_id_on_creation; -}; /* HW DATA */ - -/* GDMA_CREATE_DMA_REGION */ -struct gdma_create_dma_region_req { - struct gdma_req_hdr hdr; - - /* The total size of the DMA region */ - u64 length; - - /* The offset in the first page */ - u32 offset_in_page; - - /* enum gdma_page_type */ - u32 gdma_page_type; - - /* The total number of pages */ - u32 page_count; - - /* If page_addr_list_len is smaller than page_count, - * the remaining page addresses will be added via the - * message GDMA_DMA_REGION_ADD_PAGES. - */ - u32 page_addr_list_len; - u64 page_addr_list[]; -}; /* HW DATA */ - -struct gdma_create_dma_region_resp { - struct gdma_resp_hdr hdr; - u64 gdma_region; -}; /* HW DATA */ - -/* GDMA_DMA_REGION_ADD_PAGES */ -struct gdma_dma_region_add_pages_req { - struct gdma_req_hdr hdr; - - u64 gdma_region; - - u32 page_addr_list_len; - u32 reserved3; - - u64 page_addr_list[]; -}; /* HW DATA */ - -/* GDMA_DESTROY_DMA_REGION */ -struct gdma_destroy_dma_region_req { - struct gdma_req_hdr hdr; - - u64 gdma_region; -}; /* HW DATA */ - -int mana_gd_verify_vf_version(struct pci_dev *pdev); - -int mana_gd_register_device(struct gdma_dev *gd); -int mana_gd_deregister_device(struct gdma_dev *gd); - -int mana_gd_post_work_request(struct gdma_queue *wq, - const struct gdma_wqe_request *wqe_req, - struct gdma_posted_wqe_info *wqe_info); - -int mana_gd_post_and_ring(struct gdma_queue *queue, - const struct gdma_wqe_request *wqe, - struct gdma_posted_wqe_info *wqe_info); - -int mana_gd_alloc_res_map(u32 res_avail, struct gdma_resource *r); -void mana_gd_free_res_map(struct gdma_resource *r); - -void mana_gd_wq_ring_doorbell(struct gdma_context *gc, - struct gdma_queue *queue); - -int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, - struct gdma_mem_info *gmi); - -void mana_gd_free_memory(struct gdma_mem_info *gmi); - -int mana_gd_send_request(struct gdma_context *gc, u32 req_len, const void *req, - u32 resp_len, void *resp); -#endif /* _GDMA_H */ diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index f0e22954d5c0..69795bc679e7 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -6,7 +6,7 @@ #include #include -#include "mana.h" +#include static u32 mana_gd_r32(struct gdma_context *g, u64 offset) { diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 543a5d5c304f..76829ab43d40 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright (c) 2021, Microsoft Corporation. */ -#include "gdma.h" -#include "hw_channel.h" +#include +#include static int mana_hwc_get_msg_index(struct hw_channel_context *hwc, u16 *msg_id) { diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.h b/drivers/net/ethernet/microsoft/mana/hw_channel.h deleted file mode 100644 index 6a757a6e2732..000000000000 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.h +++ /dev/null @@ -1,195 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* Copyright (c) 2021, Microsoft Corporation. */ - -#ifndef _HW_CHANNEL_H -#define _HW_CHANNEL_H - -#define DEFAULT_LOG2_THROTTLING_FOR_ERROR_EQ 4 - -#define HW_CHANNEL_MAX_REQUEST_SIZE 0x1000 -#define HW_CHANNEL_MAX_RESPONSE_SIZE 0x1000 - -#define HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH 1 - -#define HWC_INIT_DATA_CQID 1 -#define HWC_INIT_DATA_RQID 2 -#define HWC_INIT_DATA_SQID 3 -#define HWC_INIT_DATA_QUEUE_DEPTH 4 -#define HWC_INIT_DATA_MAX_REQUEST 5 -#define HWC_INIT_DATA_MAX_RESPONSE 6 -#define HWC_INIT_DATA_MAX_NUM_CQS 7 -#define HWC_INIT_DATA_PDID 8 -#define HWC_INIT_DATA_GPA_MKEY 9 -#define HWC_INIT_DATA_PF_DEST_RQ_ID 10 -#define HWC_INIT_DATA_PF_DEST_CQ_ID 11 - -/* Structures labeled with "HW DATA" are exchanged with the hardware. All of - * them are naturally aligned and hence don't need __packed. - */ - -union hwc_init_eq_id_db { - u32 as_uint32; - - struct { - u32 eq_id : 16; - u32 doorbell : 16; - }; -}; /* HW DATA */ - -union hwc_init_type_data { - u32 as_uint32; - - struct { - u32 value : 24; - u32 type : 8; - }; -}; /* HW DATA */ - -struct hwc_rx_oob { - u32 type : 6; - u32 eom : 1; - u32 som : 1; - u32 vendor_err : 8; - u32 reserved1 : 16; - - u32 src_virt_wq : 24; - u32 src_vfid : 8; - - u32 reserved2; - - union { - u32 wqe_addr_low; - u32 wqe_offset; - }; - - u32 wqe_addr_high; - - u32 client_data_unit : 14; - u32 reserved3 : 18; - - u32 tx_oob_data_size; - - u32 chunk_offset : 21; - u32 reserved4 : 11; -}; /* HW DATA */ - -struct hwc_tx_oob { - u32 reserved1; - - u32 reserved2; - - u32 vrq_id : 24; - u32 dest_vfid : 8; - - u32 vrcq_id : 24; - u32 reserved3 : 8; - - u32 vscq_id : 24; - u32 loopback : 1; - u32 lso_override: 1; - u32 dest_pf : 1; - u32 reserved4 : 5; - - u32 vsq_id : 24; - u32 reserved5 : 8; -}; /* HW DATA */ - -struct hwc_work_request { - void *buf_va; - void *buf_sge_addr; - u32 buf_len; - u32 msg_size; - - struct gdma_wqe_request wqe_req; - struct hwc_tx_oob tx_oob; - - struct gdma_sge sge; -}; - -/* hwc_dma_buf represents the array of in-flight WQEs. - * mem_info as know as the GDMA mapped memory is partitioned and used by - * in-flight WQEs. - * The number of WQEs is determined by the number of in-flight messages. - */ -struct hwc_dma_buf { - struct gdma_mem_info mem_info; - - u32 gpa_mkey; - - u32 num_reqs; - struct hwc_work_request reqs[]; -}; - -typedef void hwc_rx_event_handler_t(void *ctx, u32 gdma_rxq_id, - const struct hwc_rx_oob *rx_oob); - -typedef void hwc_tx_event_handler_t(void *ctx, u32 gdma_txq_id, - const struct hwc_rx_oob *rx_oob); - -struct hwc_cq { - struct hw_channel_context *hwc; - - struct gdma_queue *gdma_cq; - struct gdma_queue *gdma_eq; - struct gdma_comp *comp_buf; - u16 queue_depth; - - hwc_rx_event_handler_t *rx_event_handler; - void *rx_event_ctx; - - hwc_tx_event_handler_t *tx_event_handler; - void *tx_event_ctx; -}; - -struct hwc_wq { - struct hw_channel_context *hwc; - - struct gdma_queue *gdma_wq; - struct hwc_dma_buf *msg_buf; - u16 queue_depth; - - struct hwc_cq *hwc_cq; -}; - -struct hwc_caller_ctx { - struct completion comp_event; - void *output_buf; - u32 output_buflen; - - u32 error; /* Linux error code */ - u32 status_code; -}; - -struct hw_channel_context { - struct gdma_dev *gdma_dev; - struct device *dev; - - u16 num_inflight_msg; - u32 max_req_msg_size; - - u16 hwc_init_q_depth_max; - u32 hwc_init_max_req_msg_size; - u32 hwc_init_max_resp_msg_size; - - struct completion hwc_init_eqe_comp; - - struct hwc_wq *rxq; - struct hwc_wq *txq; - struct hwc_cq *cq; - - struct semaphore sema; - struct gdma_resource inflight_msg_res; - - u32 pf_dest_vrq_id; - u32 pf_dest_vrcq_id; - - struct hwc_caller_ctx *caller_ctx; -}; - -int mana_hwc_create_channel(struct gdma_context *gc); -void mana_hwc_destroy_channel(struct gdma_context *gc); - -int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, - const void *req, u32 resp_len, void *resp); - -#endif /* _HW_CHANNEL_H */ diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h deleted file mode 100644 index 6e9e86fb4c02..000000000000 --- a/drivers/net/ethernet/microsoft/mana/mana.h +++ /dev/null @@ -1,650 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* Copyright (c) 2021, Microsoft Corporation. */ - -#ifndef _MANA_H -#define _MANA_H - -#include "gdma.h" -#include "hw_channel.h" - -/* Microsoft Azure Network Adapter (MANA)'s definitions - * - * Structures labeled with "HW DATA" are exchanged with the hardware. All of - * them are naturally aligned and hence don't need __packed. - */ - -/* MANA protocol version */ -#define MANA_MAJOR_VERSION 0 -#define MANA_MINOR_VERSION 1 -#define MANA_MICRO_VERSION 1 - -typedef u64 mana_handle_t; -#define INVALID_MANA_HANDLE ((mana_handle_t)-1) - -enum TRI_STATE { - TRI_STATE_UNKNOWN = -1, - TRI_STATE_FALSE = 0, - TRI_STATE_TRUE = 1 -}; - -/* Number of entries for hardware indirection table must be in power of 2 */ -#define MANA_INDIRECT_TABLE_SIZE 64 -#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1) - -/* The Toeplitz hash key's length in bytes: should be multiple of 8 */ -#define MANA_HASH_KEY_SIZE 40 - -#define COMP_ENTRY_SIZE 64 - -#define ADAPTER_MTU_SIZE 1500 -#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14) - -#define RX_BUFFERS_PER_QUEUE 512 - -#define MAX_SEND_BUFFERS_PER_QUEUE 256 - -#define EQ_SIZE (8 * PAGE_SIZE) -#define LOG2_EQ_THROTTLE 3 - -#define MAX_PORTS_IN_MANA_DEV 256 - -struct mana_stats_rx { - u64 packets; - u64 bytes; - u64 xdp_drop; - u64 xdp_tx; - u64 xdp_redirect; - struct u64_stats_sync syncp; -}; - -struct mana_stats_tx { - u64 packets; - u64 bytes; - u64 xdp_xmit; - struct u64_stats_sync syncp; -}; - -struct mana_txq { - struct gdma_queue *gdma_sq; - - union { - u32 gdma_txq_id; - struct { - u32 reserved1 : 10; - u32 vsq_frame : 14; - u32 reserved2 : 8; - }; - }; - - u16 vp_offset; - - struct net_device *ndev; - - /* The SKBs are sent to the HW and we are waiting for the CQEs. */ - struct sk_buff_head pending_skbs; - struct netdev_queue *net_txq; - - atomic_t pending_sends; - - struct mana_stats_tx stats; -}; - -/* skb data and frags dma mappings */ -struct mana_skb_head { - dma_addr_t dma_handle[MAX_SKB_FRAGS + 1]; - - u32 size[MAX_SKB_FRAGS + 1]; -}; - -#define MANA_HEADROOM sizeof(struct mana_skb_head) - -enum mana_tx_pkt_format { - MANA_SHORT_PKT_FMT = 0, - MANA_LONG_PKT_FMT = 1, -}; - -struct mana_tx_short_oob { - u32 pkt_fmt : 2; - u32 is_outer_ipv4 : 1; - u32 is_outer_ipv6 : 1; - u32 comp_iphdr_csum : 1; - u32 comp_tcp_csum : 1; - u32 comp_udp_csum : 1; - u32 supress_txcqe_gen : 1; - u32 vcq_num : 24; - - u32 trans_off : 10; /* Transport header offset */ - u32 vsq_frame : 14; - u32 short_vp_offset : 8; -}; /* HW DATA */ - -struct mana_tx_long_oob { - u32 is_encap : 1; - u32 inner_is_ipv6 : 1; - u32 inner_tcp_opt : 1; - u32 inject_vlan_pri_tag : 1; - u32 reserved1 : 12; - u32 pcp : 3; /* 802.1Q */ - u32 dei : 1; /* 802.1Q */ - u32 vlan_id : 12; /* 802.1Q */ - - u32 inner_frame_offset : 10; - u32 inner_ip_rel_offset : 6; - u32 long_vp_offset : 12; - u32 reserved2 : 4; - - u32 reserved3; - u32 reserved4; -}; /* HW DATA */ - -struct mana_tx_oob { - struct mana_tx_short_oob s_oob; - struct mana_tx_long_oob l_oob; -}; /* HW DATA */ - -enum mana_cq_type { - MANA_CQ_TYPE_RX, - MANA_CQ_TYPE_TX, -}; - -enum mana_cqe_type { - CQE_INVALID = 0, - CQE_RX_OKAY = 1, - CQE_RX_COALESCED_4 = 2, - CQE_RX_OBJECT_FENCE = 3, - CQE_RX_TRUNCATED = 4, - - CQE_TX_OKAY = 32, - CQE_TX_SA_DROP = 33, - CQE_TX_MTU_DROP = 34, - CQE_TX_INVALID_OOB = 35, - CQE_TX_INVALID_ETH_TYPE = 36, - CQE_TX_HDR_PROCESSING_ERROR = 37, - CQE_TX_VF_DISABLED = 38, - CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, - CQE_TX_VPORT_DISABLED = 40, - CQE_TX_VLAN_TAGGING_VIOLATION = 41, -}; - -#define MANA_CQE_COMPLETION 1 - -struct mana_cqe_header { - u32 cqe_type : 6; - u32 client_type : 2; - u32 vendor_err : 24; -}; /* HW DATA */ - -/* NDIS HASH Types */ -#define NDIS_HASH_IPV4 BIT(0) -#define NDIS_HASH_TCP_IPV4 BIT(1) -#define NDIS_HASH_UDP_IPV4 BIT(2) -#define NDIS_HASH_IPV6 BIT(3) -#define NDIS_HASH_TCP_IPV6 BIT(4) -#define NDIS_HASH_UDP_IPV6 BIT(5) -#define NDIS_HASH_IPV6_EX BIT(6) -#define NDIS_HASH_TCP_IPV6_EX BIT(7) -#define NDIS_HASH_UDP_IPV6_EX BIT(8) - -#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) -#define MANA_HASH_L4 \ - (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ - NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) - -struct mana_rxcomp_perpkt_info { - u32 pkt_len : 16; - u32 reserved1 : 16; - u32 reserved2; - u32 pkt_hash; -}; /* HW DATA */ - -#define MANA_RXCOMP_OOB_NUM_PPI 4 - -/* Receive completion OOB */ -struct mana_rxcomp_oob { - struct mana_cqe_header cqe_hdr; - - u32 rx_vlan_id : 12; - u32 rx_vlantag_present : 1; - u32 rx_outer_iphdr_csum_succeed : 1; - u32 rx_outer_iphdr_csum_fail : 1; - u32 reserved1 : 1; - u32 rx_hashtype : 9; - u32 rx_iphdr_csum_succeed : 1; - u32 rx_iphdr_csum_fail : 1; - u32 rx_tcp_csum_succeed : 1; - u32 rx_tcp_csum_fail : 1; - u32 rx_udp_csum_succeed : 1; - u32 rx_udp_csum_fail : 1; - u32 reserved2 : 1; - - struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; - - u32 rx_wqe_offset; -}; /* HW DATA */ - -struct mana_tx_comp_oob { - struct mana_cqe_header cqe_hdr; - - u32 tx_data_offset; - - u32 tx_sgl_offset : 5; - u32 tx_wqe_offset : 27; - - u32 reserved[12]; -}; /* HW DATA */ - -struct mana_rxq; - -#define CQE_POLLING_BUFFER 512 - -struct mana_cq { - struct gdma_queue *gdma_cq; - - /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ - u32 gdma_id; - - /* Type of the CQ: TX or RX */ - enum mana_cq_type type; - - /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. - * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. - */ - struct mana_rxq *rxq; - - /* Pointer to the mana_txq that is pushing TX CQEs to the queue. - * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. - */ - struct mana_txq *txq; - - /* Buffer which the CQ handler can copy the CQE's into. */ - struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; - - /* NAPI data */ - struct napi_struct napi; - int work_done; - int budget; -}; - -#define GDMA_MAX_RQE_SGES 15 - -struct mana_recv_buf_oob { - /* A valid GDMA work request representing the data buffer. */ - struct gdma_wqe_request wqe_req; - - void *buf_va; - dma_addr_t buf_dma_addr; - - /* SGL of the buffer going to be sent has part of the work request. */ - u32 num_sge; - struct gdma_sge sgl[GDMA_MAX_RQE_SGES]; - - /* Required to store the result of mana_gd_post_work_request. - * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the - * work queue when the WQE is consumed. - */ - struct gdma_posted_wqe_info wqe_inf; -}; - -struct mana_rxq { - struct gdma_queue *gdma_rq; - /* Cache the gdma receive queue id */ - u32 gdma_id; - - /* Index of RQ in the vPort, not gdma receive queue id */ - u32 rxq_idx; - - u32 datasize; - - mana_handle_t rxobj; - - struct mana_cq rx_cq; - - struct completion fence_event; - - struct net_device *ndev; - - /* Total number of receive buffers to be allocated */ - u32 num_rx_buf; - - u32 buf_index; - - struct mana_stats_rx stats; - - struct bpf_prog __rcu *bpf_prog; - struct xdp_rxq_info xdp_rxq; - struct page *xdp_save_page; - bool xdp_flush; - int xdp_rc; /* XDP redirect return code */ - - /* MUST BE THE LAST MEMBER: - * Each receive buffer has an associated mana_recv_buf_oob. - */ - struct mana_recv_buf_oob rx_oobs[]; -}; - -struct mana_tx_qp { - struct mana_txq txq; - - struct mana_cq tx_cq; - - mana_handle_t tx_object; -}; - -struct mana_ethtool_stats { - u64 stop_queue; - u64 wake_queue; -}; - -struct mana_context { - struct gdma_dev *gdma_dev; - - u16 num_ports; - - struct mana_eq *eqs; - - struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; -}; - -struct mana_port_context { - struct mana_context *ac; - struct net_device *ndev; - - u8 mac_addr[ETH_ALEN]; - - enum TRI_STATE rss_state; - - mana_handle_t default_rxobj; - bool tx_shortform_allowed; - u16 tx_vp_offset; - - struct mana_tx_qp *tx_qp; - - /* Indirection Table for RX & TX. The values are queue indexes */ - u32 indir_table[MANA_INDIRECT_TABLE_SIZE]; - - /* Indirection table containing RxObject Handles */ - mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE]; - - /* Hash key used by the NIC */ - u8 hashkey[MANA_HASH_KEY_SIZE]; - - /* This points to an array of num_queues of RQ pointers. */ - struct mana_rxq **rxqs; - - struct bpf_prog *bpf_prog; - - /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ - unsigned int max_queues; - unsigned int num_queues; - - mana_handle_t port_handle; - mana_handle_t pf_filter_handle; - - /* Mutex for sharing access to vport_use_count */ - struct mutex vport_mutex; - int vport_use_count; - - u16 port_idx; - - bool port_is_up; - bool port_st_save; /* Saved port state */ - - struct mana_ethtool_stats eth_stats; -}; - -int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); -int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx, - bool update_hash, bool update_tab); - -int mana_alloc_queues(struct net_device *ndev); -int mana_attach(struct net_device *ndev); -int mana_detach(struct net_device *ndev, bool from_close); - -int mana_probe(struct gdma_dev *gd, bool resuming); -void mana_remove(struct gdma_dev *gd, bool suspending); - -void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); -int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, - u32 flags); -u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, - struct xdp_buff *xdp, void *buf_va, uint pkt_len); -struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); -void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); -int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); - -extern const struct ethtool_ops mana_ethtool_ops; - -struct mana_obj_spec { - u32 queue_index; - u64 gdma_region; - u32 queue_size; - u32 attached_eq; - u32 modr_ctx_id; -}; - -enum mana_command_code { - MANA_QUERY_DEV_CONFIG = 0x20001, - MANA_QUERY_GF_STAT = 0x20002, - MANA_CONFIG_VPORT_TX = 0x20003, - MANA_CREATE_WQ_OBJ = 0x20004, - MANA_DESTROY_WQ_OBJ = 0x20005, - MANA_FENCE_RQ = 0x20006, - MANA_CONFIG_VPORT_RX = 0x20007, - MANA_QUERY_VPORT_CONFIG = 0x20008, - - /* Privileged commands for the PF mode */ - MANA_REGISTER_FILTER = 0x28000, - MANA_DEREGISTER_FILTER = 0x28001, - MANA_REGISTER_HW_PORT = 0x28003, - MANA_DEREGISTER_HW_PORT = 0x28004, -}; - -/* Query Device Configuration */ -struct mana_query_device_cfg_req { - struct gdma_req_hdr hdr; - - /* MANA Nic Driver Capability flags */ - u64 mn_drv_cap_flags1; - u64 mn_drv_cap_flags2; - u64 mn_drv_cap_flags3; - u64 mn_drv_cap_flags4; - - u32 proto_major_ver; - u32 proto_minor_ver; - u32 proto_micro_ver; - - u32 reserved; -}; /* HW DATA */ - -struct mana_query_device_cfg_resp { - struct gdma_resp_hdr hdr; - - u64 pf_cap_flags1; - u64 pf_cap_flags2; - u64 pf_cap_flags3; - u64 pf_cap_flags4; - - u16 max_num_vports; - u16 reserved; - u32 max_num_eqs; -}; /* HW DATA */ - -/* Query vPort Configuration */ -struct mana_query_vport_cfg_req { - struct gdma_req_hdr hdr; - u32 vport_index; -}; /* HW DATA */ - -struct mana_query_vport_cfg_resp { - struct gdma_resp_hdr hdr; - u32 max_num_sq; - u32 max_num_rq; - u32 num_indirection_ent; - u32 reserved1; - u8 mac_addr[6]; - u8 reserved2[2]; - mana_handle_t vport; -}; /* HW DATA */ - -/* Configure vPort */ -struct mana_config_vport_req { - struct gdma_req_hdr hdr; - mana_handle_t vport; - u32 pdid; - u32 doorbell_pageid; -}; /* HW DATA */ - -struct mana_config_vport_resp { - struct gdma_resp_hdr hdr; - u16 tx_vport_offset; - u8 short_form_allowed; - u8 reserved; -}; /* HW DATA */ - -/* Create WQ Object */ -struct mana_create_wqobj_req { - struct gdma_req_hdr hdr; - mana_handle_t vport; - u32 wq_type; - u32 reserved; - u64 wq_gdma_region; - u64 cq_gdma_region; - u32 wq_size; - u32 cq_size; - u32 cq_moderation_ctx_id; - u32 cq_parent_qid; -}; /* HW DATA */ - -struct mana_create_wqobj_resp { - struct gdma_resp_hdr hdr; - u32 wq_id; - u32 cq_id; - mana_handle_t wq_obj; -}; /* HW DATA */ - -/* Destroy WQ Object */ -struct mana_destroy_wqobj_req { - struct gdma_req_hdr hdr; - u32 wq_type; - u32 reserved; - mana_handle_t wq_obj_handle; -}; /* HW DATA */ - -struct mana_destroy_wqobj_resp { - struct gdma_resp_hdr hdr; -}; /* HW DATA */ - -/* Fence RQ */ -struct mana_fence_rq_req { - struct gdma_req_hdr hdr; - mana_handle_t wq_obj_handle; -}; /* HW DATA */ - -struct mana_fence_rq_resp { - struct gdma_resp_hdr hdr; -}; /* HW DATA */ - -/* Configure vPort Rx Steering */ -struct mana_cfg_rx_steer_req { - struct gdma_req_hdr hdr; - mana_handle_t vport; - u16 num_indir_entries; - u16 indir_tab_offset; - u32 rx_enable; - u32 rss_enable; - u8 update_default_rxobj; - u8 update_hashkey; - u8 update_indir_tab; - u8 reserved; - mana_handle_t default_rxobj; - u8 hashkey[MANA_HASH_KEY_SIZE]; -}; /* HW DATA */ - -struct mana_cfg_rx_steer_resp { - struct gdma_resp_hdr hdr; -}; /* HW DATA */ - -/* Register HW vPort */ -struct mana_register_hw_vport_req { - struct gdma_req_hdr hdr; - u16 attached_gfid; - u8 is_pf_default_vport; - u8 reserved1; - u8 allow_all_ether_types; - u8 reserved2; - u8 reserved3; - u8 reserved4; -}; /* HW DATA */ - -struct mana_register_hw_vport_resp { - struct gdma_resp_hdr hdr; - mana_handle_t hw_vport_handle; -}; /* HW DATA */ - -/* Deregister HW vPort */ -struct mana_deregister_hw_vport_req { - struct gdma_req_hdr hdr; - mana_handle_t hw_vport_handle; -}; /* HW DATA */ - -struct mana_deregister_hw_vport_resp { - struct gdma_resp_hdr hdr; -}; /* HW DATA */ - -/* Register filter */ -struct mana_register_filter_req { - struct gdma_req_hdr hdr; - mana_handle_t vport; - u8 mac_addr[6]; - u8 reserved1; - u8 reserved2; - u8 reserved3; - u8 reserved4; - u16 reserved5; - u32 reserved6; - u32 reserved7; - u32 reserved8; -}; /* HW DATA */ - -struct mana_register_filter_resp { - struct gdma_resp_hdr hdr; - mana_handle_t filter_handle; -}; /* HW DATA */ - -/* Deregister filter */ -struct mana_deregister_filter_req { - struct gdma_req_hdr hdr; - mana_handle_t filter_handle; -}; /* HW DATA */ - -struct mana_deregister_filter_resp { - struct gdma_resp_hdr hdr; -}; /* HW DATA */ - -#define MANA_MAX_NUM_QUEUES 64 - -#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) - -struct mana_tx_package { - struct gdma_wqe_request wqe_req; - struct gdma_sge sgl_array[5]; - struct gdma_sge *sgl_ptr; - - struct mana_tx_oob tx_oob; - - struct gdma_posted_wqe_info wqe_info; -}; - -int mana_create_wq_obj(struct mana_port_context *apc, - mana_handle_t vport, - u32 wq_type, struct mana_obj_spec *wq_spec, - struct mana_obj_spec *cq_spec, - mana_handle_t *wq_obj); - -void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, - mana_handle_t wq_obj); - -int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, - u32 doorbell_pg_id); -void mana_uncfg_vport(struct mana_port_context *apc); -#endif /* _MANA_H */ diff --git a/drivers/net/ethernet/microsoft/mana/mana_auxiliary.h b/drivers/net/ethernet/microsoft/mana/mana_auxiliary.h deleted file mode 100644 index 373d59756846..000000000000 --- a/drivers/net/ethernet/microsoft/mana/mana_auxiliary.h +++ /dev/null @@ -1,10 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* Copyright (c) 2022, Microsoft Corporation. */ - -#include "mana.h" -#include - -struct mana_adev { - struct auxiliary_device adev; - struct gdma_dev *mdev; -}; diff --git a/drivers/net/ethernet/microsoft/mana/mana_bpf.c b/drivers/net/ethernet/microsoft/mana/mana_bpf.c index 421fd39ff3a8..3caea631229c 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_bpf.c +++ b/drivers/net/ethernet/microsoft/mana/mana_bpf.c @@ -8,7 +8,7 @@ #include #include -#include "mana.h" +#include void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev) { diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index b6303a43fa7c..ffa2a0e2c213 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -12,8 +12,8 @@ #include #include -#include "mana.h" -#include "mana_auxiliary.h" +#include +#include static DEFINE_IDA(mana_adev_ida); diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index c530db76880f..6f98de6d7440 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -5,7 +5,7 @@ #include #include -#include "mana.h" +#include static const struct { char name[ETH_GSTRING_LEN]; diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c index da255da62176..5553af9c8085 100644 --- a/drivers/net/ethernet/microsoft/mana/shm_channel.c +++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c @@ -6,7 +6,7 @@ #include #include -#include "shm_channel.h" +#include #define PAGE_FRAME_L48_WIDTH_BYTES 6 #define PAGE_FRAME_L48_WIDTH_BITS (PAGE_FRAME_L48_WIDTH_BYTES * 8) diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.h b/drivers/net/ethernet/microsoft/mana/shm_channel.h deleted file mode 100644 index 5199b41497ff..000000000000 --- a/drivers/net/ethernet/microsoft/mana/shm_channel.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* Copyright (c) 2021, Microsoft Corporation. */ - -#ifndef _SHM_CHANNEL_H -#define _SHM_CHANNEL_H - -struct shm_channel { - struct device *dev; - void __iomem *base; -}; - -void mana_smc_init(struct shm_channel *sc, struct device *dev, - void __iomem *base); - -int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, - u64 cq_addr, u64 rq_addr, u64 sq_addr, - u32 eq_msix_index); - -int mana_smc_teardown_hwc(struct shm_channel *sc, bool reset_vf); - -#endif /* _SHM_CHANNEL_H */ -- cgit v1.2.3 From aa56549792fb348892fbbae67f6f0c71bb750b65 Mon Sep 17 00:00:00 2001 From: Long Li Date: Thu, 3 Nov 2022 12:16:26 -0700 Subject: net: mana: Define max values for SGL entries The number of maximum SGl entries should be computed from the maximum WQE size for the intended queue type and the corresponding OOB data size. This guarantees the hardware queue can successfully queue requests up to the queue depth exposed to the upper layer. Reviewed-by: Dexuan Cui Signed-off-by: Long Li Link: https://lore.kernel.org/r/1667502990-2559-9-git-send-email-longli@linuxonhyperv.com Acked-by: Haiyang Zhang Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/microsoft/mana/mana_en.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/microsoft') diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index ffa2a0e2c213..f6bcd0cc6cda 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -189,7 +189,7 @@ int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) pkg.wqe_req.client_data_unit = 0; pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; - WARN_ON_ONCE(pkg.wqe_req.num_sge > 30); + WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { pkg.wqe_req.sgl = pkg.sgl_array; -- cgit v1.2.3 From de372f2a9ca7ada2698ecac7df8f02407cd98fa0 Mon Sep 17 00:00:00 2001 From: Ajay Sharma Date: Thu, 3 Nov 2022 12:16:27 -0700 Subject: net: mana: Define and process GDMA response code GDMA_STATUS_MORE_ENTRIES When doing memory registration, the PF may respond with GDMA_STATUS_MORE_ENTRIES to indicate a follow request is needed. This is not an error and should be processed as expected. Signed-off-by: Ajay Sharma Reviewed-by: Dexuan Cui Signed-off-by: Long Li Link: https://lore.kernel.org/r/1667502990-2559-10-git-send-email-longli@linuxonhyperv.com Acked-by: Haiyang Zhang Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/microsoft/mana/hw_channel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/net/ethernet/microsoft') diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 76829ab43d40..9d1507eba5b9 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -836,7 +836,7 @@ int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len, goto out; } - if (ctx->status_code) { + if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) { dev_err(hwc->dev, "HWC: Failed hw_channel req: 0x%x\n", ctx->status_code); err = -EPROTO; -- cgit v1.2.3 From 28c66cfa45388af1126985d1114e0ed762eb2abd Mon Sep 17 00:00:00 2001 From: Ajay Sharma Date: Thu, 3 Nov 2022 12:16:29 -0700 Subject: net: mana: Define data structures for protection domain and memory registration The MANA hardware support protection domain and memory registration for use in RDMA environment. Add those definitions and expose them for use by the RDMA driver. Signed-off-by: Ajay Sharma Signed-off-by: Long Li Link: https://lore.kernel.org/r/1667502990-2559-12-git-send-email-longli@linuxonhyperv.com Reviewed-by: Dexuan Cui Acked-by: Haiyang Zhang Signed-off-by: Leon Romanovsky --- drivers/net/ethernet/microsoft/mana/gdma_main.c | 27 ++++++++++++++++--------- drivers/net/ethernet/microsoft/mana/mana_en.c | 18 +++++++++-------- 2 files changed, 27 insertions(+), 18 deletions(-) (limited to 'drivers/net/ethernet/microsoft') diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 69795bc679e7..46a7d1e6ece9 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -198,7 +198,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc, req.type = queue->type; req.pdid = queue->gdma_dev->pdid; req.doolbell_id = queue->gdma_dev->doorbell; - req.gdma_region = queue->mem_info.gdma_region; + req.gdma_region = queue->mem_info.dma_region_handle; req.queue_size = queue->queue_size; req.log2_throttle_limit = queue->eq.log2_throttle_limit; req.eq_pci_msix_index = queue->eq.msix_index; @@ -212,7 +212,7 @@ static int mana_gd_create_hw_eq(struct gdma_context *gc, queue->id = resp.queue_index; queue->eq.disable_needed = true; - queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; return 0; } @@ -671,24 +671,30 @@ free_q: return err; } -static void mana_gd_destroy_dma_region(struct gdma_context *gc, u64 gdma_region) +int mana_gd_destroy_dma_region(struct gdma_context *gc, + gdma_obj_handle_t dma_region_handle) { struct gdma_destroy_dma_region_req req = {}; struct gdma_general_resp resp = {}; int err; - if (gdma_region == GDMA_INVALID_DMA_REGION) - return; + if (dma_region_handle == GDMA_INVALID_DMA_REGION) + return 0; mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req), sizeof(resp)); - req.gdma_region = gdma_region; + req.dma_region_handle = dma_region_handle; err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); - if (err || resp.hdr.status) + if (err || resp.hdr.status) { dev_err(gc->dev, "Failed to destroy DMA region: %d, 0x%x\n", err, resp.hdr.status); + return -EPROTO; + } + + return 0; } +EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA); static int mana_gd_create_dma_region(struct gdma_dev *gd, struct gdma_mem_info *gmi) @@ -733,14 +739,15 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd, if (err) goto out; - if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) { + if (resp.hdr.status || + resp.dma_region_handle == GDMA_INVALID_DMA_REGION) { dev_err(gc->dev, "Failed to create DMA region: 0x%x\n", resp.hdr.status); err = -EPROTO; goto out; } - gmi->gdma_region = resp.gdma_region; + gmi->dma_region_handle = resp.dma_region_handle; out: kfree(req); return err; @@ -863,7 +870,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue) return; } - mana_gd_destroy_dma_region(gc, gmi->gdma_region); + mana_gd_destroy_dma_region(gc, gmi->dma_region_handle); mana_gd_free_memory(gmi); kfree(queue); } diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index f6bcd0cc6cda..1c59502d34b5 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -1523,10 +1523,10 @@ static int mana_create_txq(struct mana_port_context *apc, memset(&wq_spec, 0, sizeof(wq_spec)); memset(&cq_spec, 0, sizeof(cq_spec)); - wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region; + wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle; wq_spec.queue_size = txq->gdma_sq->queue_size; - cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; + cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; cq_spec.queue_size = cq->gdma_cq->queue_size; cq_spec.modr_ctx_id = 0; cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; @@ -1541,8 +1541,10 @@ static int mana_create_txq(struct mana_port_context *apc, txq->gdma_sq->id = wq_spec.queue_index; cq->gdma_cq->id = cq_spec.queue_index; - txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; - cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + txq->gdma_sq->mem_info.dma_region_handle = + GDMA_INVALID_DMA_REGION; + cq->gdma_cq->mem_info.dma_region_handle = + GDMA_INVALID_DMA_REGION; txq->gdma_txq_id = txq->gdma_sq->id; @@ -1753,10 +1755,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, memset(&wq_spec, 0, sizeof(wq_spec)); memset(&cq_spec, 0, sizeof(cq_spec)); - wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region; + wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle; wq_spec.queue_size = rxq->gdma_rq->queue_size; - cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region; + cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle; cq_spec.queue_size = cq->gdma_cq->queue_size; cq_spec.modr_ctx_id = 0; cq_spec.attached_eq = cq->gdma_cq->cq.parent->id; @@ -1769,8 +1771,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, rxq->gdma_rq->id = wq_spec.queue_index; cq->gdma_cq->id = cq_spec.queue_index; - rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; - cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION; + rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; + cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION; rxq->gdma_id = rxq->gdma_rq->id; cq->gdma_id = cq->gdma_cq->id; -- cgit v1.2.3