summaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-08-25 12:33:14 +0200
committerPeter Zijlstra <peterz@infradead.org>2021-08-25 15:42:33 +0200
commit37e8abff2bebbf9947d6b784f5c75ed48a717089 (patch)
tree8c86061bf1bc62bf51bd51dffe7feb419031ab2a /kernel/locking
parentc3123c431447da99db160264506de9897c003513 (diff)
downloadlinux-37e8abff2bebbf9947d6b784f5c75ed48a717089.tar.gz
linux-37e8abff2bebbf9947d6b784f5c75ed48a717089.tar.bz2
linux-37e8abff2bebbf9947d6b784f5c75ed48a717089.zip
locking/rtmutex: Dequeue waiter on ww_mutex deadlock
The rt_mutex based ww_mutex variant queues the new waiter first in the lock's rbtree before evaluating the ww_mutex specific conditions which might decide that the waiter should back out. This check and conditional exit happens before the waiter is enqueued into the PI chain. The failure handling at the call site assumes that the waiter, if it is the top most waiter on the lock, is queued in the PI chain and then proceeds to adjust the unmodified PI chain, which results in RB tree corruption. Dequeue the waiter from the lock waiter list in the ww_mutex error exit path to prevent this. Fixes: add461325ec5 ("locking/rtmutex: Extend the rtmutex core to support ww_mutex") Reported-by: Sebastian Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20210825102454.042280541@linutronix.de
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/rtmutex.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index b3c09611ef6a..c8fe74ef8db9 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1082,8 +1082,13 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
/* Check whether the waiter should back out immediately */
rtm = container_of(lock, struct rt_mutex, rtmutex);
res = __ww_mutex_add_waiter(waiter, rtm, ww_ctx);
- if (res)
+ if (res) {
+ raw_spin_lock(&task->pi_lock);
+ rt_mutex_dequeue(lock, waiter);
+ task->pi_blocked_on = NULL;
+ raw_spin_unlock(&task->pi_lock);
return res;
+ }
}
if (!owner)