summaryrefslogtreecommitdiffstats
path: root/kernel/locking/rwsem.c
diff options
context:
space:
mode:
authorWaiman Long <longman@redhat.com>2020-11-20 23:14:14 -0500
committerPeter Zijlstra <peterz@infradead.org>2020-12-09 17:08:48 +0100
commit1a728dff855a318bb58bcc1259b1826a7ad9f0bd (patch)
treefc773e33ed49257132e2123517323574c41b8e48 /kernel/locking/rwsem.c
parent2f06f702925b512a95b95dca3855549c047eef58 (diff)
downloadlinux-1a728dff855a318bb58bcc1259b1826a7ad9f0bd.tar.gz
linux-1a728dff855a318bb58bcc1259b1826a7ad9f0bd.tar.bz2
linux-1a728dff855a318bb58bcc1259b1826a7ad9f0bd.zip
locking/rwsem: Enable reader optimistic lock stealing
If the optimistic spinning queue is empty and the rwsem does not have the handoff or write-lock bits set, it is actually not necessary to call rwsem_optimistic_spin() to spin on it. Instead, it can steal the lock directly as its reader bias is in the count already. If it is the first reader in this state, it will try to wake up other readers in the wait queue. With this patch applied, the following were the lock event counts after rebooting a 2-socket system and a "make -j96" kernel rebuild. rwsem_opt_rlock=4437 rwsem_rlock=29 rwsem_rlock_steal=19 So lock stealing represents about 0.4% of all the read locks acquired in the slow path. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Davidlohr Bueso <dbueso@suse.de> Link: https://lkml.kernel.org/r/20201121041416.12285-4-longman@redhat.com
Diffstat (limited to 'kernel/locking/rwsem.c')
-rw-r--r--kernel/locking/rwsem.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index c055f4b28b23..ba5e239d08e7 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -976,6 +976,12 @@ static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
}
return false;
}
+
+static inline bool rwsem_no_spinners(struct rw_semaphore *sem)
+{
+ return !osq_is_locked(&sem->osq);
+}
+
#else
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
unsigned long nonspinnable)
@@ -996,6 +1002,11 @@ static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
return false;
}
+static inline bool rwsem_no_spinners(sem)
+{
+ return false;
+}
+
static inline int
rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
{
@@ -1027,6 +1038,22 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state)
goto queue;
/*
+ * Reader optimistic lock stealing
+ *
+ * We can take the read lock directly without doing
+ * rwsem_optimistic_spin() if the conditions are right.
+ * Also wake up other readers if it is the first reader.
+ */
+ if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF)) &&
+ rwsem_no_spinners(sem)) {
+ rwsem_set_reader_owned(sem);
+ lockevent_inc(rwsem_rlock_steal);
+ if (rcnt == 1)
+ goto wake_readers;
+ return sem;
+ }
+
+ /*
* Save the current read-owner of rwsem, if available, and the
* reader nonspinnable bit.
*/
@@ -1048,6 +1075,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state)
* Wake up other readers in the wait list if the front
* waiter is a reader.
*/
+wake_readers:
if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) {
raw_spin_lock_irq(&sem->wait_lock);
if (!list_empty(&sem->wait_list))