summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-07-10 15:50:06 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-10 16:32:33 -0700
commita47fed5b5b014f5a13878b90ef2c3a7dc294189f (patch)
tree5b1585724f028ec27c7ec701c30105feecbe5735 /mm
parent24c79d8e0a46bbd010ca9e0dc988a23981bcd423 (diff)
downloadlinux-a47fed5b5b014f5a13878b90ef2c3a7dc294189f.tar.gz
linux-a47fed5b5b014f5a13878b90ef2c3a7dc294189f.tar.bz2
linux-a47fed5b5b014f5a13878b90ef2c3a7dc294189f.zip
mm: swap: provide lru_add_drain_all_cpuslocked()
The rework of the cpu hotplug locking unearthed potential deadlocks with the memory hotplug locking code. The solution for these is to rework the memory hotplug locking code as well and take the cpu hotplug lock before the memory hotplug lock in mem_hotplug_begin(), but this will cause a recursive locking of the cpu hotplug lock when the memory hotplug code calls lru_add_drain_all(). Split out the inner workings of lru_add_drain_all() into lru_add_drain_all_cpuslocked() so this function can be invoked from the memory hotplug code with the cpu hotplug lock held. Link: http://lkml.kernel.org/r/20170704093421.419329357@linutronix.de Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reported-by: Andrey Ryabinin <aryabinin@virtuozzo.com> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/swap.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 4f44dbd7f780..60b1d2a75852 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -688,7 +688,7 @@ static void lru_add_drain_per_cpu(struct work_struct *dummy)
static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
-void lru_add_drain_all(void)
+void lru_add_drain_all_cpuslocked(void)
{
static DEFINE_MUTEX(lock);
static struct cpumask has_work;
@@ -702,7 +702,6 @@ void lru_add_drain_all(void)
return;
mutex_lock(&lock);
- get_online_cpus();
cpumask_clear(&has_work);
for_each_online_cpu(cpu) {
@@ -722,10 +721,16 @@ void lru_add_drain_all(void)
for_each_cpu(cpu, &has_work)
flush_work(&per_cpu(lru_add_drain_work, cpu));
- put_online_cpus();
mutex_unlock(&lock);
}
+void lru_add_drain_all(void)
+{
+ get_online_cpus();
+ lru_add_drain_all_cpuslocked();
+ put_online_cpus();
+}
+
/**
* release_pages - batched put_page()
* @pages: array of pages to release