diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2016-08-23 14:53:19 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-09-06 18:30:20 +0200 |
commit | 6731d4f12315aed5f7eefc52dac30428e382d7d0 (patch) | |
tree | 66abd6f0c2aa4191a11e076b7090f29e257cc7b3 /mm/slab.c | |
parent | e6d4989a9ad1ccc343f29578a461612ed80fc6c5 (diff) | |
download | linux-stable-6731d4f12315aed5f7eefc52dac30428e382d7d0.tar.gz linux-stable-6731d4f12315aed5f7eefc52dac30428e382d7d0.tar.bz2 linux-stable-6731d4f12315aed5f7eefc52dac30428e382d7d0.zip |
slab: Convert to hotplug state machine
Install the callbacks via the state machine.
Signed-off-by: Richard Weinberger <richard@nod.at>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: linux-mm@kvack.org
Cc: rt@linutronix.de
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Christoph Lameter <cl@linux.com>
Link: http://lkml.kernel.org/r/20160823125319.abeapfjapf2kfezp@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 114 |
1 files changed, 51 insertions, 63 deletions
diff --git a/mm/slab.c b/mm/slab.c index b67271024135..090fb26b3a39 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -886,6 +886,7 @@ static int init_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp) return 0; } +#if (defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)) || defined(CONFIG_SMP) /* * Allocates and initializes node for a node on each slab cache, used for * either memory or cpu hotplug. If memory is being hot-added, the kmem_cache_node @@ -908,6 +909,7 @@ static int init_cache_node_node(int node) return 0; } +#endif static int setup_kmem_cache_node(struct kmem_cache *cachep, int node, gfp_t gfp, bool force_change) @@ -975,6 +977,8 @@ fail: return ret; } +#ifdef CONFIG_SMP + static void cpuup_canceled(long cpu) { struct kmem_cache *cachep; @@ -1075,65 +1079,54 @@ bad: return -ENOMEM; } -static int cpuup_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) +int slab_prepare_cpu(unsigned int cpu) { - long cpu = (long)hcpu; - int err = 0; + int err; - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - mutex_lock(&slab_mutex); - err = cpuup_prepare(cpu); - mutex_unlock(&slab_mutex); - break; - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - start_cpu_timer(cpu); - break; -#ifdef CONFIG_HOTPLUG_CPU - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - /* - * Shutdown cache reaper. Note that the slab_mutex is - * held so that if cache_reap() is invoked it cannot do - * anything expensive but will only modify reap_work - * and reschedule the timer. - */ - cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); - /* Now the cache_reaper is guaranteed to be not running. */ - per_cpu(slab_reap_work, cpu).work.func = NULL; - break; - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - start_cpu_timer(cpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - /* - * Even if all the cpus of a node are down, we don't free the - * kmem_cache_node of any cache. This to avoid a race between - * cpu_down, and a kmalloc allocation from another cpu for - * memory from the node of the cpu going down. The node - * structure is usually allocated from kmem_cache_create() and - * gets destroyed at kmem_cache_destroy(). - */ - /* fall through */ + mutex_lock(&slab_mutex); + err = cpuup_prepare(cpu); + mutex_unlock(&slab_mutex); + return err; +} + +/* + * This is called for a failed online attempt and for a successful + * offline. + * + * Even if all the cpus of a node are down, we don't free the + * kmem_list3 of any cache. This to avoid a race between cpu_down, and + * a kmalloc allocation from another cpu for memory from the node of + * the cpu going down. The list3 structure is usually allocated from + * kmem_cache_create() and gets destroyed at kmem_cache_destroy(). + */ +int slab_dead_cpu(unsigned int cpu) +{ + mutex_lock(&slab_mutex); + cpuup_canceled(cpu); + mutex_unlock(&slab_mutex); + return 0; +} #endif - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - mutex_lock(&slab_mutex); - cpuup_canceled(cpu); - mutex_unlock(&slab_mutex); - break; - } - return notifier_from_errno(err); + +static int slab_online_cpu(unsigned int cpu) +{ + start_cpu_timer(cpu); + return 0; } -static struct notifier_block cpucache_notifier = { - &cpuup_callback, NULL, 0 -}; +static int slab_offline_cpu(unsigned int cpu) +{ + /* + * Shutdown cache reaper. Note that the slab_mutex is held so + * that if cache_reap() is invoked it cannot do anything + * expensive but will only modify reap_work and reschedule the + * timer. + */ + cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu)); + /* Now the cache_reaper is guaranteed to be not running. */ + per_cpu(slab_reap_work, cpu).work.func = NULL; + return 0; +} #if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) /* @@ -1336,12 +1329,6 @@ void __init kmem_cache_init_late(void) /* Done! */ slab_state = FULL; - /* - * Register a cpu startup notifier callback that initializes - * cpu_cache_get for all new cpus - */ - register_cpu_notifier(&cpucache_notifier); - #ifdef CONFIG_NUMA /* * Register a memory hotplug callback that initializes and frees @@ -1358,13 +1345,14 @@ void __init kmem_cache_init_late(void) static int __init cpucache_init(void) { - int cpu; + int ret; /* * Register the timers that return unneeded pages to the page allocator */ - for_each_online_cpu(cpu) - start_cpu_timer(cpu); + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SLAB online", + slab_online_cpu, slab_offline_cpu); + WARN_ON(ret < 0); /* Done! */ slab_state = FULL; |