diff options
author | Wanpeng Li <liwanp@linux.vnet.ibm.com> | 2013-07-04 08:33:22 +0800 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2013-07-07 18:37:46 +0300 |
commit | 0fa8103be4c20f893486c533e4c6dfbc5ccddeb4 (patch) | |
tree | 890b6eb373acf23232c6f05fdd5fb90ea511c83b /mm/slab.c | |
parent | 069e2b351de67e7a837b15b3d26c65c19b790cc3 (diff) | |
download | linux-stable-0fa8103be4c20f893486c533e4c6dfbc5ccddeb4.tar.gz linux-stable-0fa8103be4c20f893486c533e4c6dfbc5ccddeb4.tar.bz2 linux-stable-0fa8103be4c20f893486c533e4c6dfbc5ccddeb4.zip |
mm/slab: Fix drain freelist excessively
The drain_freelist is called to drain slabs_free lists for cache reap,
cache shrink, memory hotplug callback etc. The tofree parameter should
be the number of slab to free instead of the number of slab objects to
free.
This patch fix the callers that pass # of objects. Make sure they pass #
of slabs.
Acked-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Wanpeng Li <liwanp@linux.vnet.ibm.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/mm/slab.c b/mm/slab.c index 273a5ac2ade3..c9b4da9a1fe5 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1180,6 +1180,12 @@ static int init_cache_node_node(int node) return 0; } +static inline int slabs_tofree(struct kmem_cache *cachep, + struct kmem_cache_node *n) +{ + return (n->free_objects + cachep->num - 1) / cachep->num; +} + static void __cpuinit cpuup_canceled(long cpu) { struct kmem_cache *cachep; @@ -1241,7 +1247,7 @@ free_array_cache: n = cachep->node[node]; if (!n) continue; - drain_freelist(cachep, n, n->free_objects); + drain_freelist(cachep, n, slabs_tofree(cachep, n)); } } @@ -1408,7 +1414,7 @@ static int __meminit drain_cache_node_node(int node) if (!n) continue; - drain_freelist(cachep, n, n->free_objects); + drain_freelist(cachep, n, slabs_tofree(cachep, n)); if (!list_empty(&n->slabs_full) || !list_empty(&n->slabs_partial)) { @@ -2534,7 +2540,7 @@ static int __cache_shrink(struct kmem_cache *cachep) if (!n) continue; - drain_freelist(cachep, n, n->free_objects); + drain_freelist(cachep, n, slabs_tofree(cachep, n)); ret += !list_empty(&n->slabs_full) || !list_empty(&n->slabs_partial); |