summaryrefslogtreecommitdiffstats
path: root/include/asm-generic/qspinlock.h
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2020-07-24 23:14:21 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2020-07-27 00:01:29 +1000
commit20c0e8269e9d515e677670902c7e1cc0209d6ad9 (patch)
tree1de7272a98830301f6d21a1b8cdab7d28d0e83c1 /include/asm-generic/qspinlock.h
parentaa65ff6b18e0366db1790609956a4ac7308c5668 (diff)
downloadlinux-20c0e8269e9d515e677670902c7e1cc0209d6ad9.tar.gz
linux-20c0e8269e9d515e677670902c7e1cc0209d6ad9.tar.bz2
linux-20c0e8269e9d515e677670902c7e1cc0209d6ad9.zip
powerpc/pseries: Implement paravirt qspinlocks for SPLPAR
This implements the generic paravirt qspinlocks using H_PROD and H_CONFER to kick and wait. This uses an un-directed yield to any CPU rather than the directed yield to a pre-empted lock holder that paravirtualised simple spinlocks use, that requires no kick hcall. This is something that could be investigated and improved in future. Performance results can be found in the commit which added queued spinlocks. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200724131423.1362108-5-npiggin@gmail.com
Diffstat (limited to 'include/asm-generic/qspinlock.h')
-rw-r--r--include/asm-generic/qspinlock.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index fb0a814d4395..38ca14e79a86 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -69,6 +69,7 @@ static __always_inline int queued_spin_trylock(struct qspinlock *lock)
extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+#ifndef queued_spin_lock
/**
* queued_spin_lock - acquire a queued spinlock
* @lock: Pointer to queued spinlock structure
@@ -82,6 +83,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
queued_spin_lock_slowpath(lock, val);
}
+#endif
#ifndef queued_spin_unlock
/**