diff options
author | Jesper Dangaard Brouer <brouer@redhat.com> | 2016-03-15 14:53:38 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 16:55:16 -0700 |
commit | fab9963a69dbd71304357dbfe4ec5345f14cebdd (patch) | |
tree | 369f7f82b5f17d426ccaddc4295d6568f7647068 /mm/slab.c | |
parent | 11c7aec2a9b4e685bbf6a15148e7841b3525fc0c (diff) | |
download | linux-stable-fab9963a69dbd71304357dbfe4ec5345f14cebdd.tar.gz linux-stable-fab9963a69dbd71304357dbfe4ec5345f14cebdd.tar.bz2 linux-stable-fab9963a69dbd71304357dbfe4ec5345f14cebdd.zip |
mm: fault-inject take over bootstrap kmem_cache check
Remove the SLAB specific function slab_should_failslab(), by moving the
check against fault-injection for the bootstrap slab, into the shared
function should_failslab() (used by both SLAB and SLUB).
This is a step towards sharing alloc_hook's between SLUB and SLAB.
This bootstrap slab "kmem_cache" is used for allocating struct
kmem_cache objects to the allocator itself.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 12 |
1 files changed, 2 insertions, 10 deletions
diff --git a/mm/slab.c b/mm/slab.c index 621fbcb35a36..95f344d79453 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2926,14 +2926,6 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, #define cache_alloc_debugcheck_after(a,b,objp,d) (objp) #endif -static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags) -{ - if (unlikely(cachep == kmem_cache)) - return false; - - return should_failslab(cachep->object_size, flags, cachep->flags); -} - static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *objp; @@ -3155,7 +3147,7 @@ slab_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, lockdep_trace_alloc(flags); - if (slab_should_failslab(cachep, flags)) + if (should_failslab(cachep, flags)) return NULL; cachep = memcg_kmem_get_cache(cachep, flags); @@ -3243,7 +3235,7 @@ slab_alloc(struct kmem_cache *cachep, gfp_t flags, unsigned long caller) lockdep_trace_alloc(flags); - if (slab_should_failslab(cachep, flags)) + if (should_failslab(cachep, flags)) return NULL; cachep = memcg_kmem_get_cache(cachep, flags); |