diff options
author | Mike Travis <travis@sgi.com> | 2008-04-04 18:11:10 -0700 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-19 19:44:59 +0200 |
commit | c5f59f0833df945eef7ff35f3dc6ba61c5f293dd (patch) | |
tree | 32c1a94847d0154051c79011212d401462723d55 /mm | |
parent | b53e921ba1cff8453dc9a87a84052fa12d5b30bd (diff) | |
download | linux-c5f59f0833df945eef7ff35f3dc6ba61c5f293dd.tar.gz linux-c5f59f0833df945eef7ff35f3dc6ba61c5f293dd.tar.bz2 linux-c5f59f0833df945eef7ff35f3dc6ba61c5f293dd.zip |
nodemask: use new node_to_cpumask_ptr function
* Use new node_to_cpumask_ptr. This creates a pointer to the
cpumask for a given node. This definition is in mm patch:
asm-generic-add-node_to_cpumask_ptr-macro.patch
* Use new set_cpus_allowed_ptr function.
Depends on:
[mm-patch]: asm-generic-add-node_to_cpumask_ptr-macro.patch
[sched-devel]: sched: add new set_cpus_allowed_ptr function
[x86/latest]: x86: add cpus_scnprintf function
Cc: Greg Kroah-Hartman <gregkh@suse.de>
Cc: Greg Banks <gnb@melbourne.sgi.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/slab.c | 5 | ||||
-rw-r--r-- | mm/vmscan.c | 18 |
3 files changed, 13 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 402a504f1228..32e796af12a1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2029,6 +2029,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) int n, val; int min_val = INT_MAX; int best_node = -1; + node_to_cpumask_ptr(tmp, 0); /* Use the local node if we haven't already */ if (!node_isset(node, *used_node_mask)) { @@ -2037,7 +2038,6 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) } for_each_node_state(n, N_HIGH_MEMORY) { - cpumask_t tmp; /* Don't want a node to appear more than once */ if (node_isset(n, *used_node_mask)) @@ -2050,8 +2050,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask) val += (n < node); /* Give preference to headless and unused nodes */ - tmp = node_to_cpumask(n); - if (!cpus_empty(tmp)) + node_to_cpumask_ptr_next(tmp, n); + if (!cpus_empty(*tmp)) val += PENALTY_FOR_NODE_WITH_CPUS; /* Slight preference for less loaded node */ diff --git a/mm/slab.c b/mm/slab.c index 04b308c3bc54..03927cb5ec9e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1160,14 +1160,13 @@ static void __cpuinit cpuup_canceled(long cpu) struct kmem_cache *cachep; struct kmem_list3 *l3 = NULL; int node = cpu_to_node(cpu); + node_to_cpumask_ptr(mask, node); list_for_each_entry(cachep, &cache_chain, next) { struct array_cache *nc; struct array_cache *shared; struct array_cache **alien; - cpumask_t mask; - mask = node_to_cpumask(node); /* cpu is dead; no one can alloc from it. */ nc = cachep->array[cpu]; cachep->array[cpu] = NULL; @@ -1183,7 +1182,7 @@ static void __cpuinit cpuup_canceled(long cpu) if (nc) free_block(cachep, nc->entry, nc->avail, node); - if (!cpus_empty(mask)) { + if (!cpus_empty(*mask)) { spin_unlock_irq(&l3->list_lock); goto free_array_cache; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 4046434046e6..f80a5b7c057f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1647,11 +1647,10 @@ static int kswapd(void *p) struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; - cpumask_t cpumask; + node_to_cpumask_ptr(cpumask, pgdat->node_id); - cpumask = node_to_cpumask(pgdat->node_id); - if (!cpus_empty(cpumask)) - set_cpus_allowed(tsk, cpumask); + if (!cpus_empty(*cpumask)) + set_cpus_allowed_ptr(tsk, cpumask); current->reclaim_state = &reclaim_state; /* @@ -1880,17 +1879,16 @@ out: static int __devinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { - pg_data_t *pgdat; - cpumask_t mask; int nid; if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { for_each_node_state(nid, N_HIGH_MEMORY) { - pgdat = NODE_DATA(nid); - mask = node_to_cpumask(pgdat->node_id); - if (any_online_cpu(mask) != NR_CPUS) + pg_data_t *pgdat = NODE_DATA(nid); + node_to_cpumask_ptr(mask, pgdat->node_id); + + if (any_online_cpu(*mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ - set_cpus_allowed(pgdat->kswapd, mask); + set_cpus_allowed_ptr(pgdat->kswapd, mask); } } return NOTIFY_OK; |