summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAlexander Lobakin <aleksander.lobakin@intel.com>2024-03-29 17:55:06 +0100
committerJakub Kicinski <kuba@kernel.org>2024-04-02 18:13:49 -0700
commit4a96a4e807c390a9d91b450ebe04eeb2e0ecc076 (patch)
tree7cd6a0a9228500d95ff2b37b70c1109da474fdae /include
parent8db2509faa331865903a81a92f15c449e821b1d7 (diff)
downloadlinux-stable-4a96a4e807c390a9d91b450ebe04eeb2e0ecc076.tar.gz
linux-stable-4a96a4e807c390a9d91b450ebe04eeb2e0ecc076.tar.bz2
linux-stable-4a96a4e807c390a9d91b450ebe04eeb2e0ecc076.zip
page_pool: check for PP direct cache locality later
Since we have pool->p.napi (Jakub) and pool->cpuid (Lorenzo) to check whether it's safe to use direct recycling, we can use both globally for each page instead of relying solely on @allow_direct argument. Let's assume that @allow_direct means "I'm sure it's local, don't waste time rechecking this" and when it's false, try the mentioned params to still recycle the page directly. If neither is true, we'll lose some CPU cycles, but then it surely won't be hotpath. On the other hand, paths where it's possible to use direct cache, but not possible to safely set @allow_direct, will benefit from this move. The whole propagation of @napi_safe through a dozen of skb freeing functions can now go away, which saves us some stack space. Signed-off-by: Alexander Lobakin <aleksander.lobakin@intel.com> Link: https://lore.kernel.org/r/20240329165507.3240110-2-aleksander.lobakin@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/skbuff.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index b7f1ecdaec38..03ea36a82cdd 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3510,25 +3510,25 @@ int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
unsigned int headroom);
int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
struct bpf_prog *prog);
-bool napi_pp_put_page(struct page *page, bool napi_safe);
+bool napi_pp_put_page(struct page *page);
static inline void
-skb_page_unref(const struct sk_buff *skb, struct page *page, bool napi_safe)
+skb_page_unref(const struct sk_buff *skb, struct page *page)
{
#ifdef CONFIG_PAGE_POOL
- if (skb->pp_recycle && napi_pp_put_page(page, napi_safe))
+ if (skb->pp_recycle && napi_pp_put_page(page))
return;
#endif
put_page(page);
}
static inline void
-napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe)
+napi_frag_unref(skb_frag_t *frag, bool recycle)
{
struct page *page = skb_frag_page(frag);
#ifdef CONFIG_PAGE_POOL
- if (recycle && napi_pp_put_page(page, napi_safe))
+ if (recycle && napi_pp_put_page(page))
return;
#endif
put_page(page);
@@ -3544,7 +3544,7 @@ napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe)
*/
static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{
- napi_frag_unref(frag, recycle, false);
+ napi_frag_unref(frag, recycle);
}
/**