summaryrefslogtreecommitdiffstats
path: root/net/core/skbuff.c
diff options
context:
space:
mode:
authorYajun Deng <yajun.deng@linux.dev>2021-09-14 11:49:35 +0800
committerDavid S. Miller <davem@davemloft.net>2021-09-14 14:28:58 +0100
commit32e3573f739209e612ea5c98291251b00cc6f70e (patch)
treecc3eb4a806d0a9d01c7be97e9fde666fafcbc797 /net/core/skbuff.c
parentb9bbc4c1debc837ba56872fb3b2499ba6459ca8b (diff)
downloadlinux-32e3573f739209e612ea5c98291251b00cc6f70e.tar.gz
linux-32e3573f739209e612ea5c98291251b00cc6f70e.tar.bz2
linux-32e3573f739209e612ea5c98291251b00cc6f70e.zip
skbuff: inline page_frag_alloc_align()
The __alloc_frag_align() is short, and only called by two functions, so inline page_frag_alloc_align() for reduce the overhead of calls. Reported-by: kernel test robot <oliver.sang@intel.com> Signed-off-by: Yajun Deng <yajun.deng@linux.dev> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r--net/core/skbuff.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2170bea2c7de..7c2ab27fcbf9 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -134,34 +134,31 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
-static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask,
- unsigned int align_mask)
+void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask);
-}
-
-void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
-{
fragsz = SKB_DATA_ALIGN(fragsz);
- return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
+ return page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
}
EXPORT_SYMBOL(__napi_alloc_frag_align);
void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
{
- struct page_frag_cache *nc;
void *data;
fragsz = SKB_DATA_ALIGN(fragsz);
if (in_hardirq() || irqs_disabled()) {
- nc = this_cpu_ptr(&netdev_alloc_cache);
+ struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache);
+
data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
} else {
+ struct napi_alloc_cache *nc;
+
local_bh_disable();
- data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
+ nc = this_cpu_ptr(&napi_alloc_cache);
+ data = page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask);
local_bh_enable();
}
return data;