summaryrefslogtreecommitdiffstats
path: root/kernel/locking/rwsem.c
diff options
context:
space:
mode:
authorWaiman Long <longman@redhat.com>2019-05-20 16:59:05 -0400
committerIngo Molnar <mingo@kernel.org>2019-06-17 12:27:59 +0200
commit3f6d517a3ece6e6ced7abcbe798ff332ac5ca586 (patch)
tree43206e2cc7d73f4092731691329c45a9ead7d48d /kernel/locking/rwsem.c
parent6cef7ff6e43cbdb9fa8eb91eb9a6b25d45ae11e3 (diff)
downloadlinux-stable-3f6d517a3ece6e6ced7abcbe798ff332ac5ca586.tar.gz
linux-stable-3f6d517a3ece6e6ced7abcbe798ff332ac5ca586.tar.bz2
linux-stable-3f6d517a3ece6e6ced7abcbe798ff332ac5ca586.zip
locking/rwsem: Make rwsem_spin_on_owner() return owner state
This patch modifies rwsem_spin_on_owner() to return four possible values to better reflect the state of lock holder which enables us to make a better decision of what to do next. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tim Chen <tim.c.chen@linux.intel.com> Cc: Will Deacon <will.deacon@arm.com> Cc: huang ying <huang.ying.caritas@gmail.com> Link: https://lkml.kernel.org/r/20190520205918.22251-7-longman@redhat.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/rwsem.c')
-rw-r--r--kernel/locking/rwsem.c65
1 files changed, 47 insertions, 18 deletions
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index f56329240ef1..8d0f2acfe13d 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -414,17 +414,54 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
}
/*
- * Return true only if we can still spin on the owner field of the rwsem.
+ * The rwsem_spin_on_owner() function returns the folowing 4 values
+ * depending on the lock owner state.
+ * OWNER_NULL : owner is currently NULL
+ * OWNER_WRITER: when owner changes and is a writer
+ * OWNER_READER: when owner changes and the new owner may be a reader.
+ * OWNER_NONSPINNABLE:
+ * when optimistic spinning has to stop because either the
+ * owner stops running, is unknown, or its timeslice has
+ * been used up.
*/
-static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
+enum owner_state {
+ OWNER_NULL = 1 << 0,
+ OWNER_WRITER = 1 << 1,
+ OWNER_READER = 1 << 2,
+ OWNER_NONSPINNABLE = 1 << 3,
+};
+#define OWNER_SPINNABLE (OWNER_NULL | OWNER_WRITER)
+
+static inline enum owner_state rwsem_owner_state(unsigned long owner)
{
- struct task_struct *owner = READ_ONCE(sem->owner);
+ if (!owner)
+ return OWNER_NULL;
- if (!is_rwsem_owner_spinnable(owner))
- return false;
+ if (owner & RWSEM_ANONYMOUSLY_OWNED)
+ return OWNER_NONSPINNABLE;
+
+ if (owner & RWSEM_READER_OWNED)
+ return OWNER_READER;
+
+ return OWNER_WRITER;
+}
+
+static noinline enum owner_state rwsem_spin_on_owner(struct rw_semaphore *sem)
+{
+ struct task_struct *tmp, *owner = READ_ONCE(sem->owner);
+ enum owner_state state = rwsem_owner_state((unsigned long)owner);
+
+ if (state != OWNER_WRITER)
+ return state;
rcu_read_lock();
- while (owner && (READ_ONCE(sem->owner) == owner)) {
+ for (;;) {
+ tmp = READ_ONCE(sem->owner);
+ if (tmp != owner) {
+ state = rwsem_owner_state((unsigned long)tmp);
+ break;
+ }
+
/*
* Ensure we emit the owner->on_cpu, dereference _after_
* checking sem->owner still matches owner, if that fails,
@@ -433,24 +470,16 @@ static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
*/
barrier();
- /*
- * abort spinning when need_resched or owner is not running or
- * owner's cpu is preempted.
- */
if (need_resched() || !owner_on_cpu(owner)) {
- rcu_read_unlock();
- return false;
+ state = OWNER_NONSPINNABLE;
+ break;
}
cpu_relax();
}
rcu_read_unlock();
- /*
- * If there is a new owner or the owner is not set, we continue
- * spinning.
- */
- return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
+ return state;
}
static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
@@ -473,7 +502,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
* 2) readers own the lock as we can't determine if they are
* actively running or not.
*/
- while (rwsem_spin_on_owner(sem)) {
+ while (rwsem_spin_on_owner(sem) & OWNER_SPINNABLE) {
/*
* Try to acquire the lock
*/