summaryrefslogtreecommitdiffstats
path: root/mm/vmstat.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmstat.c')
-rw-r--r--mm/vmstat.c61
1 files changed, 30 insertions, 31 deletions
diff --git a/mm/vmstat.c b/mm/vmstat.c
index e8d846f57774..422d960ffcd8 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -21,21 +21,14 @@ EXPORT_PER_CPU_SYMBOL(vm_event_states);
static void sum_vm_events(unsigned long *ret, cpumask_t *cpumask)
{
- int cpu = 0;
+ int cpu;
int i;
memset(ret, 0, NR_VM_EVENT_ITEMS * sizeof(unsigned long));
- cpu = first_cpu(*cpumask);
- while (cpu < NR_CPUS) {
+ for_each_cpu_mask(cpu, *cpumask) {
struct vm_event_state *this = &per_cpu(vm_event_states, cpu);
- cpu = next_cpu(cpu, *cpumask);
-
- if (cpu < NR_CPUS)
- prefetch(&per_cpu(vm_event_states, cpu));
-
-
for (i = 0; i < NR_VM_EVENT_ITEMS; i++)
ret[i] += this->event[i];
}
@@ -284,6 +277,10 @@ EXPORT_SYMBOL(dec_zone_page_state);
/*
* Update the zone counters for one cpu.
*
+ * The cpu specified must be either the current cpu or a processor that
+ * is not online. If it is the current cpu then the execution thread must
+ * be pinned to the current cpu.
+ *
* Note that refresh_cpu_vm_stats strives to only access
* node local memory. The per cpu pagesets on remote zones are placed
* in the memory local to the processor using that pageset. So the
@@ -299,7 +296,7 @@ void refresh_cpu_vm_stats(int cpu)
{
struct zone *zone;
int i;
- unsigned long flags;
+ int global_diff[NR_VM_ZONE_STAT_ITEMS] = { 0, };
for_each_zone(zone) {
struct per_cpu_pageset *p;
@@ -311,15 +308,19 @@ void refresh_cpu_vm_stats(int cpu)
for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
if (p->vm_stat_diff[i]) {
+ unsigned long flags;
+ int v;
+
local_irq_save(flags);
- zone_page_state_add(p->vm_stat_diff[i],
- zone, i);
+ v = p->vm_stat_diff[i];
p->vm_stat_diff[i] = 0;
+ local_irq_restore(flags);
+ atomic_long_add(v, &zone->vm_stat[i]);
+ global_diff[i] += v;
#ifdef CONFIG_NUMA
/* 3 seconds idle till flush */
p->expire = 3;
#endif
- local_irq_restore(flags);
}
#ifdef CONFIG_NUMA
/*
@@ -329,7 +330,7 @@ void refresh_cpu_vm_stats(int cpu)
* Check if there are pages remaining in this pageset
* if not then there is nothing to expire.
*/
- if (!p->expire || (!p->pcp[0].count && !p->pcp[1].count))
+ if (!p->expire || !p->pcp.count)
continue;
/*
@@ -344,13 +345,14 @@ void refresh_cpu_vm_stats(int cpu)
if (p->expire)
continue;
- if (p->pcp[0].count)
- drain_zone_pages(zone, p->pcp + 0);
-
- if (p->pcp[1].count)
- drain_zone_pages(zone, p->pcp + 1);
+ if (p->pcp.count)
+ drain_zone_pages(zone, &p->pcp);
#endif
}
+
+ for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
+ if (global_diff[i])
+ atomic_long_add(global_diff[i], &vm_stat[i]);
}
#endif
@@ -681,20 +683,17 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
"\n pagesets");
for_each_online_cpu(i) {
struct per_cpu_pageset *pageset;
- int j;
pageset = zone_pcp(zone, i);
- for (j = 0; j < ARRAY_SIZE(pageset->pcp); j++) {
- seq_printf(m,
- "\n cpu: %i pcp: %i"
- "\n count: %i"
- "\n high: %i"
- "\n batch: %i",
- i, j,
- pageset->pcp[j].count,
- pageset->pcp[j].high,
- pageset->pcp[j].batch);
- }
+ seq_printf(m,
+ "\n cpu: %i"
+ "\n count: %i"
+ "\n high: %i"
+ "\n batch: %i",
+ i,
+ pageset->pcp.count,
+ pageset->pcp.high,
+ pageset->pcp.batch);
#ifdef CONFIG_SMP
seq_printf(m, "\n vm stats threshold: %d",
pageset->stat_threshold);