summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-01 12:25:51 -0500
committerPekka Enberg <penberg@kernel.org>2011-07-02 13:26:54 +0300
commit61728d1efc927eccfa64c50ede4998a8765805c3 (patch)
tree8aeaa3bf585d297400386decf94df2ad842aaecc /mm
parent5cc6eee8a8c1aefe9c86fe7345a2aa1c4ca70dc6 (diff)
downloadlinux-stable-61728d1efc927eccfa64c50ede4998a8765805c3.tar.gz
linux-stable-61728d1efc927eccfa64c50ede4998a8765805c3.tar.bz2
linux-stable-61728d1efc927eccfa64c50ede4998a8765805c3.zip
slub: Pass kmem_cache struct to lock and freeze slab
We need more information about the slab for the cmpxchg implementation. Signed-off-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c15
1 files changed, 8 insertions, 7 deletions
diff --git a/mm/slub.c b/mm/slub.c
index e39be0928a22..5cf98ff09360 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1457,8 +1457,8 @@ static inline void remove_partial(struct kmem_cache_node *n,
*
* Must hold list_lock.
*/
-static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
- struct page *page)
+static inline int lock_and_freeze_slab(struct kmem_cache *s,
+ struct kmem_cache_node *n, struct page *page)
{
if (slab_trylock(page)) {
remove_partial(n, page);
@@ -1470,7 +1470,8 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
/*
* Try to allocate a partial slab from a specific node.
*/
-static struct page *get_partial_node(struct kmem_cache_node *n)
+static struct page *get_partial_node(struct kmem_cache *s,
+ struct kmem_cache_node *n)
{
struct page *page;
@@ -1485,7 +1486,7 @@ static struct page *get_partial_node(struct kmem_cache_node *n)
spin_lock(&n->list_lock);
list_for_each_entry(page, &n->partial, lru)
- if (lock_and_freeze_slab(n, page))
+ if (lock_and_freeze_slab(s, n, page))
goto out;
page = NULL;
out:
@@ -1536,7 +1537,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
n->nr_partial > s->min_partial) {
- page = get_partial_node(n);
+ page = get_partial_node(s, n);
if (page) {
put_mems_allowed();
return page;
@@ -1556,7 +1557,7 @@ static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
struct page *page;
int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
- page = get_partial_node(get_node(s, searchnode));
+ page = get_partial_node(s, get_node(s, searchnode));
if (page || node != NUMA_NO_NODE)
return page;
@@ -2081,7 +2082,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
{
void *prior;
void **object = (void *)x;
- unsigned long flags;
+ unsigned long uninitialized_var(flags);
local_irq_save(flags);
slab_lock(page);