summaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2016-12-12 16:41:44 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 18:55:07 -0800
commitbf00bd3458041c4643a13d80fb349d29cb66eb63 (patch)
tree743f4b6e25be9e4f5fbfb4d4c3472cd83b299cad /mm/slab.c
parentf728b0a5d72ae99c446f933912914a61254c03b6 (diff)
downloadlinux-bf00bd3458041c4643a13d80fb349d29cb66eb63.tar.gz
linux-bf00bd3458041c4643a13d80fb349d29cb66eb63.tar.bz2
linux-bf00bd3458041c4643a13d80fb349d29cb66eb63.zip
mm, slab: maintain total slab count instead of active count
Rather than tracking the number of active slabs for each node, track the total number of slabs. This is a minor improvement that avoids active slab tracking when a slab goes from free to partial or partial to free. For slab debugging, this also removes an explicit free count since it can easily be inferred by the difference in number of total objects and number of active objects. Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1612042020110.115755@chino.kir.corp.google.com Signed-off-by: David Rientjes <rientjes@google.com> Suggested-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Greg Thelen <gthelen@google.com> Cc: Aruna Ramakrishna <aruna.ramakrishna@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c70
1 files changed, 29 insertions, 41 deletions
diff --git a/mm/slab.c b/mm/slab.c
index e06da6ceaf73..87b29e76cafd 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -227,7 +227,7 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
INIT_LIST_HEAD(&parent->slabs_full);
INIT_LIST_HEAD(&parent->slabs_partial);
INIT_LIST_HEAD(&parent->slabs_free);
- parent->active_slabs = 0;
+ parent->total_slabs = 0;
parent->free_slabs = 0;
parent->shared = NULL;
parent->alien = NULL;
@@ -1381,20 +1381,18 @@ slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
cachep->name, cachep->size, cachep->gfporder);
for_each_kmem_cache_node(cachep, node, n) {
- unsigned long active_objs = 0, free_objs = 0;
- unsigned long active_slabs, num_slabs;
+ unsigned long total_slabs, free_slabs, free_objs;
spin_lock_irqsave(&n->list_lock, flags);
- active_slabs = n->active_slabs;
- num_slabs = active_slabs + n->free_slabs;
-
- active_objs += (num_slabs * cachep->num) - n->free_objects;
- free_objs += n->free_objects;
+ total_slabs = n->total_slabs;
+ free_slabs = n->free_slabs;
+ free_objs = n->free_objects;
spin_unlock_irqrestore(&n->list_lock, flags);
- pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
- node, active_slabs, num_slabs, active_objs,
- num_slabs * cachep->num, free_objs);
+ pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
+ node, total_slabs - free_slabs, total_slabs,
+ (total_slabs * cachep->num) - free_objs,
+ total_slabs * cachep->num);
}
#endif
}
@@ -2307,6 +2305,7 @@ static int drain_freelist(struct kmem_cache *cache,
page = list_entry(p, struct page, lru);
list_del(&page->lru);
n->free_slabs--;
+ n->total_slabs--;
/*
* Safe to drop the lock. The slab is no longer linked
* to the cache.
@@ -2741,13 +2740,12 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
n = get_node(cachep, page_to_nid(page));
spin_lock(&n->list_lock);
+ n->total_slabs++;
if (!page->active) {
list_add_tail(&page->lru, &(n->slabs_free));
n->free_slabs++;
- } else {
+ } else
fixup_slab_list(cachep, n, page, &list);
- n->active_slabs++;
- }
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - page->active;
@@ -2874,7 +2872,7 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
/* Try to find non-pfmemalloc slab if needed */
static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
- struct page *page, bool *page_is_free, bool pfmemalloc)
+ struct page *page, bool pfmemalloc)
{
if (!page)
return NULL;
@@ -2893,10 +2891,9 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
/* Move pfmemalloc slab to the end of list to speed up next search */
list_del(&page->lru);
- if (*page_is_free) {
- WARN_ON(page->active);
+ if (!page->active) {
list_add_tail(&page->lru, &n->slabs_free);
- *page_is_free = false;
+ n->free_slabs++;
} else
list_add_tail(&page->lru, &n->slabs_partial);
@@ -2908,7 +2905,7 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
n->free_touched = 1;
list_for_each_entry(page, &n->slabs_free, lru) {
if (!PageSlabPfmemalloc(page)) {
- *page_is_free = true;
+ n->free_slabs--;
return page;
}
}
@@ -2919,26 +2916,19 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
{
struct page *page;
- bool page_is_free = false;
assert_spin_locked(&n->list_lock);
- page = list_first_entry_or_null(&n->slabs_partial,
- struct page, lru);
+ page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
if (!page) {
n->free_touched = 1;
- page = list_first_entry_or_null(&n->slabs_free,
- struct page, lru);
+ page = list_first_entry_or_null(&n->slabs_free, struct page,
+ lru);
if (page)
- page_is_free = true;
+ n->free_slabs--;
}
if (sk_memalloc_socks())
- page = get_valid_first_slab(n, page, &page_is_free, pfmemalloc);
-
- if (page && page_is_free) {
- n->active_slabs++;
- n->free_slabs--;
- }
+ page = get_valid_first_slab(n, page, pfmemalloc);
return page;
}
@@ -3441,7 +3431,6 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
if (page->active == 0) {
list_add(&page->lru, &n->slabs_free);
n->free_slabs++;
- n->active_slabs--;
} else {
/* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the
@@ -3457,6 +3446,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
page = list_last_entry(&n->slabs_free, struct page, lru);
list_move(&page->lru, list);
n->free_slabs--;
+ n->total_slabs--;
}
}
@@ -4109,8 +4099,8 @@ out:
void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
{
unsigned long active_objs, num_objs, active_slabs;
- unsigned long num_slabs = 0, free_objs = 0, shared_avail = 0;
- unsigned long num_slabs_free = 0;
+ unsigned long total_slabs = 0, free_objs = 0, shared_avail = 0;
+ unsigned long free_slabs = 0;
int node;
struct kmem_cache_node *n;
@@ -4118,9 +4108,8 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
check_irq_on();
spin_lock_irq(&n->list_lock);
- num_slabs += n->active_slabs + n->free_slabs;
- num_slabs_free += n->free_slabs;
-
+ total_slabs += n->total_slabs;
+ free_slabs += n->free_slabs;
free_objs += n->free_objects;
if (n->shared)
@@ -4128,15 +4117,14 @@ void get_slabinfo(struct kmem_cache *cachep, struct slabinfo *sinfo)
spin_unlock_irq(&n->list_lock);
}
- num_objs = num_slabs * cachep->num;
- active_slabs = num_slabs - num_slabs_free;
-
+ num_objs = total_slabs * cachep->num;
+ active_slabs = total_slabs - free_slabs;
active_objs = num_objs - free_objs;
sinfo->active_objs = active_objs;
sinfo->num_objs = num_objs;
sinfo->active_slabs = active_slabs;
- sinfo->num_slabs = num_slabs;
+ sinfo->num_slabs = total_slabs;
sinfo->shared_avail = shared_avail;
sinfo->limit = cachep->limit;
sinfo->batchcount = cachep->batchcount;