summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c35
1 files changed, 12 insertions, 23 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 272e809404d5..e0819fa96559 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1031,12 +1031,12 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
}
/*
- * Construct gfp mask to allocate from a specific node but do not invoke reclaim
- * or warn about failures.
+ * Construct gfp mask to allocate from a specific node but do not direct reclaim
+ * or warn about failures. kswapd may still wake to reclaim in the background.
*/
static inline gfp_t gfp_exact_node(gfp_t flags)
{
- return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_WAIT;
+ return (flags | __GFP_THISNODE | __GFP_NOWARN) & ~__GFP_DIRECT_RECLAIM;
}
#endif
@@ -1889,21 +1889,10 @@ static void slab_destroy(struct kmem_cache *cachep, struct page *page)
freelist = page->freelist;
slab_destroy_debugcheck(cachep, page);
- if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
- struct rcu_head *head;
-
- /*
- * RCU free overloads the RCU head over the LRU.
- * slab_page has been overloeaded over the LRU,
- * however it is not used from now on so that
- * we can use it safely.
- */
- head = (void *)&page->rcu_head;
- call_rcu(head, kmem_rcu_free);
-
- } else {
+ if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
+ call_rcu(&page->rcu_head, kmem_rcu_free);
+ else
kmem_freepages(cachep, page);
- }
/*
* From now on, we don't use freelist
@@ -2633,7 +2622,7 @@ static int cache_grow(struct kmem_cache *cachep,
offset *= cachep->colour_off;
- if (local_flags & __GFP_WAIT)
+ if (gfpflags_allow_blocking(local_flags))
local_irq_enable();
/*
@@ -2663,7 +2652,7 @@ static int cache_grow(struct kmem_cache *cachep,
cache_init_objs(cachep, page);
- if (local_flags & __GFP_WAIT)
+ if (gfpflags_allow_blocking(local_flags))
local_irq_disable();
check_irq_off();
spin_lock(&n->list_lock);
@@ -2677,7 +2666,7 @@ static int cache_grow(struct kmem_cache *cachep,
opps1:
kmem_freepages(cachep, page);
failed:
- if (local_flags & __GFP_WAIT)
+ if (gfpflags_allow_blocking(local_flags))
local_irq_disable();
return 0;
}
@@ -2869,7 +2858,7 @@ force_grow:
static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
gfp_t flags)
{
- might_sleep_if(flags & __GFP_WAIT);
+ might_sleep_if(gfpflags_allow_blocking(flags));
#if DEBUG
kmem_flagcheck(cachep, flags);
#endif
@@ -3057,11 +3046,11 @@ retry:
*/
struct page *page;
- if (local_flags & __GFP_WAIT)
+ if (gfpflags_allow_blocking(local_flags))
local_irq_enable();
kmem_flagcheck(cache, flags);
page = kmem_getpages(cache, local_flags, numa_mem_id());
- if (local_flags & __GFP_WAIT)
+ if (gfpflags_allow_blocking(local_flags))
local_irq_disable();
if (page) {
/*