summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-12-12 15:14:53 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2022-12-12 15:14:53 -0800
commit617fe4fa82b2fe5bcb99f97f223f408603bfa5a0 (patch)
tree5e2ed50799b8005c086e70ef24376fb215970b15 /kernel
parent2f60f830842d6c6960a0c038695f7bb4f0ef896d (diff)
parent90d758896787048fa3d4209309d4800f3920e66f (diff)
downloadlinux-stable-617fe4fa82b2fe5bcb99f97f223f408603bfa5a0.tar.gz
linux-stable-617fe4fa82b2fe5bcb99f97f223f408603bfa5a0.tar.bz2
linux-stable-617fe4fa82b2fe5bcb99f97f223f408603bfa5a0.zip
Merge tag 'locking-core-2022-12-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar: "Two changes in this cycle: - a micro-optimization in static_key_slow_inc_cpuslocked() - fix futex death-notification wakeup bug" * tag 'locking-core-2022-12-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: futex: Resend potentially swallowed owner death notification jump_label: Use atomic_try_cmpxchg() in static_key_slow_inc_cpuslocked()
Diffstat (limited to 'kernel')
-rw-r--r--kernel/futex/core.c26
-rw-r--r--kernel/jump_label.c8
2 files changed, 19 insertions, 15 deletions
diff --git a/kernel/futex/core.c b/kernel/futex/core.c
index b22ef1efe751..514e4582b863 100644
--- a/kernel/futex/core.c
+++ b/kernel/futex/core.c
@@ -638,6 +638,7 @@ static int handle_futex_death(u32 __user *uaddr, struct task_struct *curr,
bool pi, bool pending_op)
{
u32 uval, nval, mval;
+ pid_t owner;
int err;
/* Futex address must be 32bit aligned */
@@ -659,6 +660,10 @@ retry:
* 2. A woken up waiter is killed before it can acquire the
* futex in user space.
*
+ * In the second case, the wake up notification could be generated
+ * by the unlock path in user space after setting the futex value
+ * to zero or by the kernel after setting the OWNER_DIED bit below.
+ *
* In both cases the TID validation below prevents a wakeup of
* potential waiters which can cause these waiters to block
* forever.
@@ -667,24 +672,27 @@ retry:
*
* 1) task->robust_list->list_op_pending != NULL
* @pending_op == true
- * 2) User space futex value == 0
+ * 2) The owner part of user space futex value == 0
* 3) Regular futex: @pi == false
*
* If these conditions are met, it is safe to attempt waking up a
* potential waiter without touching the user space futex value and
- * trying to set the OWNER_DIED bit. The user space futex value is
- * uncontended and the rest of the user space mutex state is
- * consistent, so a woken waiter will just take over the
- * uncontended futex. Setting the OWNER_DIED bit would create
- * inconsistent state and malfunction of the user space owner died
- * handling.
+ * trying to set the OWNER_DIED bit. If the futex value is zero,
+ * the rest of the user space mutex state is consistent, so a woken
+ * waiter will just take over the uncontended futex. Setting the
+ * OWNER_DIED bit would create inconsistent state and malfunction
+ * of the user space owner died handling. Otherwise, the OWNER_DIED
+ * bit is already set, and the woken waiter is expected to deal with
+ * this.
*/
- if (pending_op && !pi && !uval) {
+ owner = uval & FUTEX_TID_MASK;
+
+ if (pending_op && !pi && !owner) {
futex_wake(uaddr, 1, 1, FUTEX_BITSET_MATCH_ANY);
return 0;
}
- if ((uval & FUTEX_TID_MASK) != task_pid_vnr(curr))
+ if (owner != task_pid_vnr(curr))
return 0;
/*
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 714ac4c3b556..4d6c6f5f60db 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -115,8 +115,6 @@ EXPORT_SYMBOL_GPL(static_key_count);
void static_key_slow_inc_cpuslocked(struct static_key *key)
{
- int v, v1;
-
STATIC_KEY_CHECK_USE(key);
lockdep_assert_cpus_held();
@@ -132,11 +130,9 @@ void static_key_slow_inc_cpuslocked(struct static_key *key)
* so it counts as "enabled" in jump_label_update(). Note that
* atomic_inc_unless_negative() checks >= 0, so roll our own.
*/
- for (v = atomic_read(&key->enabled); v > 0; v = v1) {
- v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
- if (likely(v1 == v))
+ for (int v = atomic_read(&key->enabled); v > 0; )
+ if (likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)))
return;
- }
jump_label_lock();
if (atomic_read(&key->enabled) == 0) {