summaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-12-25 01:04:57 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-02-16 12:56:11 +0100
commit85008bde411d5be480d3926ec3fb26b93d46001f (patch)
tree148e6f3f179b155dd902b5649a6f73b509cd6dee /kernel/sched/core.c
parentec903b6daa586dedff31c27d6b7d8d84cbd11131 (diff)
downloadlinux-stable-85008bde411d5be480d3926ec3fb26b93d46001f.tar.gz
linux-stable-85008bde411d5be480d3926ec3fb26b93d46001f.tar.bz2
linux-stable-85008bde411d5be480d3926ec3fb26b93d46001f.zip
sched: Avoid double preemption in __cond_resched_*lock*()
[ Upstream commit 7e406d1ff39b8ee574036418a5043c86723170cf ] For PREEMPT/DYNAMIC_PREEMPT the *_unlock() will already trigger a preemption, no point in then calling preempt_schedule_common() *again*. Use _cond_resched() instead, since this is a NOP for the preemptible configs while it provide a preemption point for the others. Reported-by: xuhaifeng <xuhaifeng@oppo.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/YcGnvDEYBwOiV0cR@hirez.programming.kicks-ass.net Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c12
1 files changed, 3 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0d12ec7be301..c2dec6ce9809 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8199,9 +8199,7 @@ int __cond_resched_lock(spinlock_t *lock)
if (spin_needbreak(lock) || resched) {
spin_unlock(lock);
- if (resched)
- preempt_schedule_common();
- else
+ if (!_cond_resched())
cpu_relax();
ret = 1;
spin_lock(lock);
@@ -8219,9 +8217,7 @@ int __cond_resched_rwlock_read(rwlock_t *lock)
if (rwlock_needbreak(lock) || resched) {
read_unlock(lock);
- if (resched)
- preempt_schedule_common();
- else
+ if (!_cond_resched())
cpu_relax();
ret = 1;
read_lock(lock);
@@ -8239,9 +8235,7 @@ int __cond_resched_rwlock_write(rwlock_t *lock)
if (rwlock_needbreak(lock) || resched) {
write_unlock(lock);
- if (resched)
- preempt_schedule_common();
- else
+ if (!_cond_resched())
cpu_relax();
ret = 1;
write_lock(lock);