summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>2019-09-19 13:37:04 -0400
committerIngo Molnar <mingo@kernel.org>2019-09-25 17:42:31 +0200
commitc6d68c1c4a4d6611fc0f8145d764226571d737ca (patch)
treef808d18297778aa54b35e5cd6bf838b2636d2f8d /kernel/sched
parent19a4ff534bb09686f53800564cb977bad2177c00 (diff)
downloadlinux-c6d68c1c4a4d6611fc0f8145d764226571d737ca.tar.gz
linux-c6d68c1c4a4d6611fc0f8145d764226571d737ca.tar.bz2
linux-c6d68c1c4a4d6611fc0f8145d764226571d737ca.zip
sched/membarrier: Skip IPIs when mm->mm_users == 1
If there is only a single mm_user for the mm, the private expedited membarrier command can skip the IPIs, because only a single thread is using the mm. Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Chris Metcalf <cmetcalf@ezchip.com> Cc: Christoph Lameter <cl@linux.com> Cc: Eric W. Biederman <ebiederm@xmission.com> Cc: Kirill Tkhai <tkhai@yandex.ru> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paul E. McKenney <paulmck@linux.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King - ARM Linux admin <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Link: https://lkml.kernel.org/r/20190919173705.2181-7-mathieu.desnoyers@efficios.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/membarrier.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c
index 070cf433bb9a..fced54ad0f3d 100644
--- a/kernel/sched/membarrier.c
+++ b/kernel/sched/membarrier.c
@@ -145,20 +145,21 @@ static int membarrier_private_expedited(int flags)
int cpu;
bool fallback = false;
cpumask_var_t tmpmask;
+ struct mm_struct *mm = current->mm;
if (flags & MEMBARRIER_FLAG_SYNC_CORE) {
if (!IS_ENABLED(CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE))
return -EINVAL;
- if (!(atomic_read(&current->mm->membarrier_state) &
+ if (!(atomic_read(&mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY))
return -EPERM;
} else {
- if (!(atomic_read(&current->mm->membarrier_state) &
+ if (!(atomic_read(&mm->membarrier_state) &
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY))
return -EPERM;
}
- if (num_online_cpus() == 1)
+ if (atomic_read(&mm->mm_users) == 1 || num_online_cpus() == 1)
return 0;
/*
@@ -194,7 +195,7 @@ static int membarrier_private_expedited(int flags)
continue;
rcu_read_lock();
p = rcu_dereference(cpu_rq(cpu)->curr);
- if (p && p->mm == current->mm) {
+ if (p && p->mm == mm) {
if (!fallback)
__cpumask_set_cpu(cpu, tmpmask);
else