diff options
author | Maciej Fijalkowski <maciej.fijalkowski@intel.com> | 2024-10-07 14:24:57 +0200 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2024-10-14 17:23:45 +0200 |
commit | 1d10b2bed2d4b2003f174da739d8163b7f7957cf (patch) | |
tree | 931b6b5405b4d07ad28cdfea02fd465f0e8f042a /net/xdp | |
parent | 6e126872191df946a6fe01b79273119d32d96711 (diff) | |
download | linux-stable-1d10b2bed2d4b2003f174da739d8163b7f7957cf.tar.gz linux-stable-1d10b2bed2d4b2003f174da739d8163b7f7957cf.tar.bz2 linux-stable-1d10b2bed2d4b2003f174da739d8163b7f7957cf.zip |
xsk: Wrap duplicated code to function
Both allocation paths have exactly the same code responsible for getting
and initializing xskb. Pull it out to common function.
Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20241007122458.282590-6-maciej.fijalkowski@intel.com
Diffstat (limited to 'net/xdp')
-rw-r--r-- | net/xdp/xsk_buff_pool.c | 34 |
1 files changed, 18 insertions, 16 deletions
diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c index e946ba4a5ccf..ae71da7d2cd6 100644 --- a/net/xdp/xsk_buff_pool.c +++ b/net/xdp/xsk_buff_pool.c @@ -503,6 +503,22 @@ static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) return *addr < pool->addrs_cnt; } +static struct xdp_buff_xsk *xp_get_xskb(struct xsk_buff_pool *pool, u64 addr) +{ + struct xdp_buff_xsk *xskb; + + if (pool->unaligned) { + xskb = pool->free_heads[--pool->free_heads_cnt]; + xp_init_xskb_addr(xskb, pool, addr); + if (pool->dma_pages) + xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); + } else { + xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; + } + + return xskb; +} + static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) { struct xdp_buff_xsk *xskb; @@ -528,14 +544,7 @@ static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) break; } - if (pool->unaligned) { - xskb = pool->free_heads[--pool->free_heads_cnt]; - xp_init_xskb_addr(xskb, pool, addr); - if (pool->dma_pages) - xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); - } else { - xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; - } + xskb = xp_get_xskb(pool, addr); xskq_cons_release(pool->fq); return xskb; @@ -593,14 +602,7 @@ static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xd continue; } - if (pool->unaligned) { - xskb = pool->free_heads[--pool->free_heads_cnt]; - xp_init_xskb_addr(xskb, pool, addr); - if (pool->dma_pages) - xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); - } else { - xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; - } + xskb = xp_get_xskb(pool, addr); *xdp = &xskb->xdp; xdp++; |