diff options
author | Horatiu Vultur <horatiu.vultur@microchip.com> | 2023-04-22 16:23:44 +0200 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2023-04-24 18:58:04 -0700 |
commit | 700f11eb2cbea349bda2599b4b676b49d43b4175 (patch) | |
tree | 62fcef2e01e3dee4946f51350fa56dfec90ee312 /drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c | |
parent | ee3392ed16b064594a14ce5886e412efb05ed17b (diff) | |
download | linux-stable-700f11eb2cbea349bda2599b4b676b49d43b4175.tar.gz linux-stable-700f11eb2cbea349bda2599b4b676b49d43b4175.tar.bz2 linux-stable-700f11eb2cbea349bda2599b4b676b49d43b4175.zip |
lan966x: Don't use xdp_frame when action is XDP_TX
When the action of an xdp program was XDP_TX, lan966x was creating
a xdp_frame and use this one to send the frame back. But it is also
possible to send back the frame without needing a xdp_frame, because
it is possible to send it back using the page.
And then once the frame is transmitted is possible to use directly
page_pool_recycle_direct as lan966x is using page pools.
This would save some CPU usage on this path, which results in higher
number of transmitted frames. Bellow are the statistics:
Frame size: Improvement:
64 ~8%
256 ~11%
512 ~8%
1000 ~0%
1500 ~0%
Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
Link: https://lore.kernel.org/r/20230422142344.3630602-1-horatiu.vultur@microchip.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c')
-rw-r--r-- | drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c | 35 |
1 files changed, 23 insertions, 12 deletions
diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c index 2ed76bb61a73..bd72fbc2220f 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -390,6 +390,7 @@ static void lan966x_fdma_stop_netdev(struct lan966x *lan966x) static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) { struct lan966x_tx *tx = &lan966x->tx; + struct lan966x_rx *rx = &lan966x->rx; struct lan966x_tx_dcb_buf *dcb_buf; struct xdp_frame_bulk bq; struct lan966x_db *db; @@ -432,7 +433,8 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) if (dcb_buf->xdp_ndo) xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq); else - xdp_return_frame_rx_napi(dcb_buf->data.xdpf); + page_pool_recycle_direct(rx->page_pool, + dcb_buf->data.page); } clear = true; @@ -699,15 +701,14 @@ static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use) tx->last_in_use = next_to_use; } -int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, - struct xdp_frame *xdpf, - struct page *page, - bool dma_map) +int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, void *ptr, u32 len) { struct lan966x *lan966x = port->lan966x; struct lan966x_tx_dcb_buf *next_dcb_buf; struct lan966x_tx *tx = &lan966x->tx; + struct xdp_frame *xdpf; dma_addr_t dma_addr; + struct page *page; int next_to_use; __be32 *ifh; int ret = 0; @@ -722,8 +723,13 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, goto out; } + /* Get the next buffer */ + next_dcb_buf = &tx->dcbs_buf[next_to_use]; + /* Generate new IFH */ - if (dma_map) { + if (!len) { + xdpf = ptr; + if (xdpf->headroom < IFH_LEN_BYTES) { ret = NETDEV_TX_OK; goto out; @@ -743,11 +749,16 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, goto out; } + next_dcb_buf->data.xdpf = xdpf; + next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES; + /* Setup next dcb */ lan966x_fdma_tx_setup_dcb(tx, next_to_use, xdpf->len + IFH_LEN_BYTES, dma_addr); } else { + page = ptr; + ifh = page_address(page) + XDP_PACKET_HEADROOM; memset(ifh, 0x0, sizeof(__be32) * IFH_LEN); lan966x_ifh_set_bypass(ifh, 1); @@ -756,21 +767,21 @@ int lan966x_fdma_xmit_xdpf(struct lan966x_port *port, dma_addr = page_pool_get_dma_addr(page); dma_sync_single_for_device(lan966x->dev, dma_addr + XDP_PACKET_HEADROOM, - xdpf->len + IFH_LEN_BYTES, + len + IFH_LEN_BYTES, DMA_TO_DEVICE); + next_dcb_buf->data.page = page; + next_dcb_buf->len = len + IFH_LEN_BYTES; + /* Setup next dcb */ lan966x_fdma_tx_setup_dcb(tx, next_to_use, - xdpf->len + IFH_LEN_BYTES, + len + IFH_LEN_BYTES, dma_addr + XDP_PACKET_HEADROOM); } /* Fill up the buffer */ - next_dcb_buf = &tx->dcbs_buf[next_to_use]; next_dcb_buf->use_skb = false; - next_dcb_buf->data.xdpf = xdpf; - next_dcb_buf->xdp_ndo = dma_map; - next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES; + next_dcb_buf->xdp_ndo = !len; next_dcb_buf->dma_addr = dma_addr; next_dcb_buf->used = true; next_dcb_buf->ptp = false; |