summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2021-06-30 17:35:18 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-09-22 11:47:48 +0200
commitb7a041073f4e72083dcd688a003a59bf9d9c9124 (patch)
tree6573768d0f44197457d15a974c80ef8c8b28d1f5
parent777631932f4a5185c18b3df1b47f365dbaa70252 (diff)
downloadlinux-stable-b7a041073f4e72083dcd688a003a59bf9d9c9124.tar.gz
linux-stable-b7a041073f4e72083dcd688a003a59bf9d9c9124.tar.bz2
linux-stable-b7a041073f4e72083dcd688a003a59bf9d9c9124.zip
locking/mutex: Fix HANDOFF condition
[ Upstream commit 048661a1f963e9517630f080687d48af79ed784c ] Yanfei reported that setting HANDOFF should not depend on recomputing @first, only on @first state. Which would then give: if (ww_ctx || !first) first = __mutex_waiter_is_first(lock, &waiter); if (first) __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF); But because 'ww_ctx || !first' is basically 'always' and the test for first is relatively cheap, omit that first branch entirely. Reported-by: Yanfei Xu <yanfei.xu@windriver.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Waiman Long <longman@redhat.com> Reviewed-by: Yanfei Xu <yanfei.xu@windriver.com> Link: https://lore.kernel.org/r/20210630154114.896786297@infradead.org Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--kernel/locking/mutex.c15
1 files changed, 5 insertions, 10 deletions
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 354151fef06a..fbc62d360419 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -911,7 +911,6 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
struct mutex_waiter waiter;
- bool first = false;
struct ww_mutex *ww;
int ret;
@@ -986,6 +985,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
set_current_state(state);
for (;;) {
+ bool first;
+
/*
* Once we hold wait_lock, we're serialized against
* mutex_unlock() handing the lock off to us, do a trylock
@@ -1014,15 +1015,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
spin_unlock(&lock->wait_lock);
schedule_preempt_disabled();
- /*
- * ww_mutex needs to always recheck its position since its waiter
- * list is not FIFO ordered.
- */
- if (ww_ctx || !first) {
- first = __mutex_waiter_is_first(lock, &waiter);
- if (first)
- __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
- }
+ first = __mutex_waiter_is_first(lock, &waiter);
+ if (first)
+ __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
set_current_state(state);
/*