summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/slab.c8
-rw-r--r--mm/slob.c2
-rw-r--r--mm/slub.c24
3 files changed, 24 insertions, 10 deletions
diff --git a/mm/slab.c b/mm/slab.c
index d2cd304fd8af..1a88fded7f19 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2746,7 +2746,7 @@ static int cache_grow(struct kmem_cache *cachep,
* Be lazy and only check for valid flags here, keeping it out of the
* critical path in kmem_cache_alloc().
*/
- BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
+ BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
local_flags = (flags & GFP_LEVEL_MASK);
/* Take the l3 list lock to change the colour_next on this node */
@@ -3392,6 +3392,9 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
+ if (unlikely((flags & __GFP_ZERO) && ptr))
+ memset(ptr, 0, obj_size(cachep));
+
return ptr;
}
@@ -3443,6 +3446,9 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
prefetchw(objp);
+ if (unlikely((flags & __GFP_ZERO) && objp))
+ memset(objp, 0, obj_size(cachep));
+
return objp;
}
diff --git a/mm/slob.c b/mm/slob.c
index 41d32c3c0be4..b3a45588fc46 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -334,6 +334,8 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
BUG_ON(!b);
spin_unlock_irqrestore(&slob_lock, flags);
}
+ if (unlikely((gfp & __GFP_ZERO) && b))
+ memset(b, 0, size);
return b;
}
diff --git a/mm/slub.c b/mm/slub.c
index 548d78df81e1..479eb5c01917 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1077,7 +1077,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
void *last;
void *p;
- BUG_ON(flags & ~(GFP_DMA | GFP_LEVEL_MASK));
+ BUG_ON(flags & ~(GFP_DMA | __GFP_ZERO | GFP_LEVEL_MASK));
if (flags & __GFP_WAIT)
local_irq_enable();
@@ -1540,7 +1540,7 @@ debug:
* Otherwise we can simply pick the next object from the lockless free list.
*/
static void __always_inline *slab_alloc(struct kmem_cache *s,
- gfp_t gfpflags, int node, void *addr)
+ gfp_t gfpflags, int node, void *addr, int length)
{
struct page *page;
void **object;
@@ -1558,19 +1558,25 @@ static void __always_inline *slab_alloc(struct kmem_cache *s,
page->lockless_freelist = object[page->offset];
}
local_irq_restore(flags);
+
+ if (unlikely((gfpflags & __GFP_ZERO) && object))
+ memset(object, 0, length);
+
return object;
}
void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
{
- return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
+ return slab_alloc(s, gfpflags, -1,
+ __builtin_return_address(0), s->objsize);
}
EXPORT_SYMBOL(kmem_cache_alloc);
#ifdef CONFIG_NUMA
void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
{
- return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
+ return slab_alloc(s, gfpflags, node,
+ __builtin_return_address(0), s->objsize);
}
EXPORT_SYMBOL(kmem_cache_alloc_node);
#endif
@@ -2318,7 +2324,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if (ZERO_OR_NULL_PTR(s))
return s;
- return slab_alloc(s, flags, -1, __builtin_return_address(0));
+ return slab_alloc(s, flags, -1, __builtin_return_address(0), size);
}
EXPORT_SYMBOL(__kmalloc);
@@ -2330,7 +2336,7 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (ZERO_OR_NULL_PTR(s))
return s;
- return slab_alloc(s, flags, node, __builtin_return_address(0));
+ return slab_alloc(s, flags, node, __builtin_return_address(0), size);
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
@@ -2643,7 +2649,7 @@ void *kmem_cache_zalloc(struct kmem_cache *s, gfp_t flags)
{
void *x;
- x = slab_alloc(s, flags, -1, __builtin_return_address(0));
+ x = slab_alloc(s, flags, -1, __builtin_return_address(0), 0);
if (x)
memset(x, 0, s->objsize);
return x;
@@ -2693,7 +2699,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
if (ZERO_OR_NULL_PTR(s))
return s;
- return slab_alloc(s, gfpflags, -1, caller);
+ return slab_alloc(s, gfpflags, -1, caller, size);
}
void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
@@ -2704,7 +2710,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
if (ZERO_OR_NULL_PTR(s))
return s;
- return slab_alloc(s, gfpflags, node, caller);
+ return slab_alloc(s, gfpflags, node, caller, size);
}
#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)