diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2009-01-01 10:12:29 +1030 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2009-01-01 10:12:29 +1030 |
commit | 174596a0b9f21e8844d70566a6bb29bf48a87750 (patch) | |
tree | e16bc37554afe1996c464cadb09764290b4ff3be /mm | |
parent | 41c7bb9588904eb060a95bcad47bd3804a1ece25 (diff) | |
download | linux-stable-174596a0b9f21e8844d70566a6bb29bf48a87750.tar.gz linux-stable-174596a0b9f21e8844d70566a6bb29bf48a87750.tar.bz2 linux-stable-174596a0b9f21e8844d70566a6bb29bf48a87750.zip |
cpumask: convert mm/
Impact: Use new API
Convert kernel mm functions to use struct cpumask.
We skip include/linux/percpu.h and mm/allocpercpu.c, which are in flux.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Mike Travis <travis@sgi.com>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/pdflush.c | 16 | ||||
-rw-r--r-- | mm/slab.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 20 | ||||
-rw-r--r-- | mm/vmscan.c | 2 | ||||
-rw-r--r-- | mm/vmstat.c | 4 |
5 files changed, 28 insertions, 16 deletions
diff --git a/mm/pdflush.c b/mm/pdflush.c index a0a14c4d5072..15de509b68fd 100644 --- a/mm/pdflush.c +++ b/mm/pdflush.c @@ -172,7 +172,16 @@ static int __pdflush(struct pdflush_work *my_work) static int pdflush(void *dummy) { struct pdflush_work my_work; - cpumask_t cpus_allowed; + cpumask_var_t cpus_allowed; + + /* + * Since the caller doesn't even check kthread_run() worked, let's not + * freak out too much if this fails. + */ + if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { + printk(KERN_WARNING "pdflush failed to allocate cpumask\n"); + return 0; + } /* * pdflush can spend a lot of time doing encryption via dm-crypt. We @@ -187,8 +196,9 @@ static int pdflush(void *dummy) * This is needed as pdflush's are dynamically created and destroyed. * The boottime pdflush's are easily placed w/o these 2 lines. */ - cpuset_cpus_allowed(current, &cpus_allowed); - set_cpus_allowed_ptr(current, &cpus_allowed); + cpuset_cpus_allowed(current, cpus_allowed); + set_cpus_allowed_ptr(current, cpus_allowed); + free_cpumask_var(cpus_allowed); return __pdflush(&my_work); } diff --git a/mm/slab.c b/mm/slab.c index f97e564bdf11..ddc41f337d58 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2157,7 +2157,7 @@ kmem_cache_create (const char *name, size_t size, size_t align, /* * We use cache_chain_mutex to ensure a consistent view of - * cpu_online_map as well. Please see cpuup_callback + * cpu_online_mask as well. Please see cpuup_callback */ get_online_cpus(); mutex_lock(&cache_chain_mutex); diff --git a/mm/slub.c b/mm/slub.c index 0d861c3154b6..f0e2892fe403 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1970,7 +1970,7 @@ static DEFINE_PER_CPU(struct kmem_cache_cpu, kmem_cache_cpu)[NR_KMEM_CACHE_CPU]; static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free); -static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE; +static DECLARE_BITMAP(kmem_cach_cpu_free_init_once, CONFIG_NR_CPUS); static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s, int cpu, gfp_t flags) @@ -2045,13 +2045,13 @@ static void init_alloc_cpu_cpu(int cpu) { int i; - if (cpu_isset(cpu, kmem_cach_cpu_free_init_once)) + if (cpumask_test_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once))) return; for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--) free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu); - cpu_set(cpu, kmem_cach_cpu_free_init_once); + cpumask_set_cpu(cpu, to_cpumask(kmem_cach_cpu_free_init_once)); } static void __init init_alloc_cpu(void) @@ -3451,7 +3451,7 @@ struct location { long max_time; long min_pid; long max_pid; - cpumask_t cpus; + DECLARE_BITMAP(cpus, NR_CPUS); nodemask_t nodes; }; @@ -3526,7 +3526,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, if (track->pid > l->max_pid) l->max_pid = track->pid; - cpu_set(track->cpu, l->cpus); + cpumask_set_cpu(track->cpu, + to_cpumask(l->cpus)); } node_set(page_to_nid(virt_to_page(track)), l->nodes); return 1; @@ -3556,8 +3557,8 @@ static int add_location(struct loc_track *t, struct kmem_cache *s, l->max_time = age; l->min_pid = track->pid; l->max_pid = track->pid; - cpus_clear(l->cpus); - cpu_set(track->cpu, l->cpus); + cpumask_clear(to_cpumask(l->cpus)); + cpumask_set_cpu(track->cpu, to_cpumask(l->cpus)); nodes_clear(l->nodes); node_set(page_to_nid(virt_to_page(track)), l->nodes); return 1; @@ -3638,11 +3639,12 @@ static int list_locations(struct kmem_cache *s, char *buf, len += sprintf(buf + len, " pid=%ld", l->min_pid); - if (num_online_cpus() > 1 && !cpus_empty(l->cpus) && + if (num_online_cpus() > 1 && + !cpumask_empty(to_cpumask(l->cpus)) && len < PAGE_SIZE - 60) { len += sprintf(buf + len, " cpus="); len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, - &l->cpus); + to_cpumask(l->cpus)); } if (num_online_nodes() > 1 && !nodes_empty(l->nodes) && diff --git a/mm/vmscan.c b/mm/vmscan.c index 240f062f71f1..d196f46c8808 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1902,7 +1902,7 @@ static int kswapd(void *p) }; node_to_cpumask_ptr(cpumask, pgdat->node_id); - if (!cpus_empty(*cpumask)) + if (!cpumask_empty(cpumask)) set_cpus_allowed_ptr(tsk, cpumask); current->reclaim_state = &reclaim_state; diff --git a/mm/vmstat.c b/mm/vmstat.c index c3ccfda23adc..91149746bb8d 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -20,7 +20,7 @@ DEFINE_PER_CPU(struct vm_event_state, vm_event_states) = {{0}}; EXPORT_PER_CPU_SYMBOL(vm_event_states); -static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) +static void sum_vm_events(unsigned long *ret, const struct cpumask *cpumask) { int cpu; int i; @@ -43,7 +43,7 @@ static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask) void all_vm_events(unsigned long *ret) { get_online_cpus(); - sum_vm_events(ret, &cpu_online_map); + sum_vm_events(ret, cpu_online_mask); put_online_cpus(); } EXPORT_SYMBOL_GPL(all_vm_events); |