summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorWei Yang <richard.weiyang@gmail.com>2018-12-28 00:38:58 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 12:11:51 -0800
commitd9367bd06faa1beb2951899272f925bdf877d28b (patch)
tree3b6bd6d9c6779ffd8ee020d4b01b033efbacc1e1 /mm
parentd53ce042277a94eadf9a8a31fc41fac54c67dec5 (diff)
downloadlinux-d9367bd06faa1beb2951899272f925bdf877d28b.tar.gz
linux-d9367bd06faa1beb2951899272f925bdf877d28b.tar.bz2
linux-d9367bd06faa1beb2951899272f925bdf877d28b.zip
mm, page_alloc: enable pcpu_drain with zone capability
drain_all_pages is documented to drain per-cpu pages for a given zone (if non-NULL). The current implementation doesn't match the description though. It will drain all pcp pages for all zones that happen to have cached pages on the same cpu as the given zone. This will lead to premature pcp cache draining for zones that are not of any interest to the caller - e.g. compaction, hwpoison or memory offline. This forces the page allocator to take locks and potential lock contention as a result. There is no real reason for this sub-optimal implementation. Replace per-cpu work item with a dedicated structure which contains a pointer to the zone and pass it over to the worker. This will get the zone information all the way down to the worker function and do the right job. [akpm@linux-foundation.org: avoid 80-col tricks] [mhocko@suse.com: refactor the whole changelog] Link: http://lkml.kernel.org/r/20181212142550.61686-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Acked-by: Michal Hocko <mhocko@suse.com> Reviewed-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2cd1f9bb1b52..75865e1325b5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -97,8 +97,12 @@ int _node_numa_mem_[MAX_NUMNODES];
#endif
/* work_structs for global per-cpu drains */
+struct pcpu_drain {
+ struct zone *zone;
+ struct work_struct work;
+};
DEFINE_MUTEX(pcpu_drain_mutex);
-DEFINE_PER_CPU(struct work_struct, pcpu_drain);
+DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
volatile unsigned long latent_entropy __latent_entropy;
@@ -2658,6 +2662,10 @@ void drain_local_pages(struct zone *zone)
static void drain_local_pages_wq(struct work_struct *work)
{
+ struct pcpu_drain *drain;
+
+ drain = container_of(work, struct pcpu_drain, work);
+
/*
* drain_all_pages doesn't use proper cpu hotplug protection so
* we can race with cpu offline when the WQ can move this from
@@ -2666,7 +2674,7 @@ static void drain_local_pages_wq(struct work_struct *work)
* a different one.
*/
preempt_disable();
- drain_local_pages(NULL);
+ drain_local_pages(drain->zone);
preempt_enable();
}
@@ -2737,12 +2745,14 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps) {
- struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
- INIT_WORK(work, drain_local_pages_wq);
- queue_work_on(cpu, mm_percpu_wq, work);
+ struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);
+
+ drain->zone = zone;
+ INIT_WORK(&drain->work, drain_local_pages_wq);
+ queue_work_on(cpu, mm_percpu_wq, &drain->work);
}
for_each_cpu(cpu, &cpus_with_pcps)
- flush_work(per_cpu_ptr(&pcpu_drain, cpu));
+ flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
mutex_unlock(&pcpu_drain_mutex);
}