summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-05-23 06:30:08 -1000
committerLinus Torvalds <torvalds@linux-foundation.org>2021-05-23 06:30:08 -1000
commit0898678c742ee913691d7f4a1606309825eee33b (patch)
tree295cbe24bea6550d208dee4862d32d1354b9cc97 /kernel
parentf73d2a429334dbd30bc9a7e5ef5e07a676d3d499 (diff)
parent3a010c493271f04578b133de977e0e5dd2848cea (diff)
downloadlinux-stable-0898678c742ee913691d7f4a1606309825eee33b.tar.gz
linux-stable-0898678c742ee913691d7f4a1606309825eee33b.tar.bz2
linux-stable-0898678c742ee913691d7f4a1606309825eee33b.zip
Merge tag 'locking-urgent-2021-05-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Thomas Gleixner: "Two locking fixes: - Invoke the lockdep tracepoints in the correct place so the ordering is correct again - Don't leave the mutex WAITER bit stale when the last waiter is dropping out early due to a signal as that forces all subsequent lock operations needlessly into the slowpath until it's cleaned up again" * tag 'locking-urgent-2021-05-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/mutex: clear MUTEX_FLAGS if wait_list is empty due to signal locking/lockdep: Correct calling tracepoints
Diffstat (limited to 'kernel')
-rw-r--r--kernel/locking/lockdep.c4
-rw-r--r--kernel/locking/mutex-debug.c4
-rw-r--r--kernel/locking/mutex-debug.h2
-rw-r--r--kernel/locking/mutex.c18
-rw-r--r--kernel/locking/mutex.h4
5 files changed, 19 insertions, 13 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 48d736aa03b2..7641bd407239 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -5736,7 +5736,7 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- trace_lock_acquired(lock, ip);
+ trace_lock_contended(lock, ip);
if (unlikely(!lock_stat || !lockdep_enabled()))
return;
@@ -5754,7 +5754,7 @@ void lock_acquired(struct lockdep_map *lock, unsigned long ip)
{
unsigned long flags;
- trace_lock_contended(lock, ip);
+ trace_lock_acquired(lock, ip);
if (unlikely(!lock_stat || !lockdep_enabled()))
return;
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index a7276aaf2abc..db9301591e3f 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -57,7 +57,7 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
task->blocked_on = waiter;
}
-void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
@@ -65,7 +65,7 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
task->blocked_on = NULL;
- list_del_init(&waiter->list);
+ INIT_LIST_HEAD(&waiter->list);
waiter->task = NULL;
}
diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
index 1edd3f45a4ec..53e631e1d76d 100644
--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -22,7 +22,7 @@ extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
extern void debug_mutex_add_waiter(struct mutex *lock,
struct mutex_waiter *waiter,
struct task_struct *task);
-extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
+extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index cb6b112ce155..013e1b08a1bf 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -194,7 +194,7 @@ static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_wait
* Add @waiter to a given location in the lock wait_list and set the
* FLAG_WAITERS flag if it's the first waiter.
*/
-static void __sched
+static void
__mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
struct list_head *list)
{
@@ -205,6 +205,16 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
__mutex_set_flag(lock, MUTEX_FLAG_WAITERS);
}
+static void
+__mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter)
+{
+ list_del(&waiter->list);
+ if (likely(list_empty(&lock->wait_list)))
+ __mutex_clear_flag(lock, MUTEX_FLAGS);
+
+ debug_mutex_remove_waiter(lock, waiter, current);
+}
+
/*
* Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
@@ -1061,9 +1071,7 @@ acquired:
__ww_mutex_check_waiters(lock, ww_ctx);
}
- mutex_remove_waiter(lock, &waiter, current);
- if (likely(list_empty(&lock->wait_list)))
- __mutex_clear_flag(lock, MUTEX_FLAGS);
+ __mutex_remove_waiter(lock, &waiter);
debug_mutex_free_waiter(&waiter);
@@ -1080,7 +1088,7 @@ skip_wait:
err:
__set_current_state(TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, current);
+ __mutex_remove_waiter(lock, &waiter);
err_early_kill:
spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 1c2287d3fa71..f0c710b1d192 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -10,12 +10,10 @@
* !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/
-#define mutex_remove_waiter(lock, waiter, task) \
- __list_del((waiter)->list.prev, (waiter)->list.next)
-
#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
#define debug_mutex_free_waiter(waiter) do { } while (0)
#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
+#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
#define debug_mutex_unlock(lock) do { } while (0)
#define debug_mutex_init(lock, name, key) do { } while (0)