summaryrefslogtreecommitdiffstats
path: root/include/linux/local_lock_internal.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-08-15 23:29:28 +0200
committerIngo Molnar <mingo@kernel.org>2021-08-17 19:08:49 +0200
commit026659b9774e4c586baeb457557fcfc4e0ad144b (patch)
tree5355b9097ba53475d11d1c2de92555d043c9b664 /include/linux/local_lock_internal.h
parent31552385f8e9d0869117014bf8e55ba0497e3ec8 (diff)
downloadlinux-stable-026659b9774e4c586baeb457557fcfc4e0ad144b.tar.gz
linux-stable-026659b9774e4c586baeb457557fcfc4e0ad144b.tar.bz2
linux-stable-026659b9774e4c586baeb457557fcfc4e0ad144b.zip
locking/local_lock: Add PREEMPT_RT support
On PREEMPT_RT enabled kernels local_lock maps to a per CPU 'sleeping' spinlock which protects the critical section while staying preemptible. CPU locality is established by disabling migration. Provide the necessary types and macros to substitute the non-RT variant. Co-developed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210815211306.023630962@linutronix.de
Diffstat (limited to 'include/linux/local_lock_internal.h')
-rw-r--r--include/linux/local_lock_internal.h44
1 files changed, 44 insertions, 0 deletions
diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
index 3f02b818625e..975e33b793a7 100644
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -6,6 +6,8 @@
#include <linux/percpu-defs.h>
#include <linux/lockdep.h>
+#ifndef CONFIG_PREEMPT_RT
+
typedef struct {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
@@ -95,3 +97,45 @@ do { \
local_lock_release(this_cpu_ptr(lock)); \
local_irq_restore(flags); \
} while (0)
+
+#else /* !CONFIG_PREEMPT_RT */
+
+/*
+ * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
+ * critical section while staying preemptible.
+ */
+typedef spinlock_t local_lock_t;
+
+#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
+
+#define __local_lock_init(l) \
+ do { \
+ local_spin_lock_init((l)); \
+ } while (0)
+
+#define __local_lock(__lock) \
+ do { \
+ migrate_disable(); \
+ spin_lock(this_cpu_ptr((__lock))); \
+ } while (0)
+
+#define __local_lock_irq(lock) __local_lock(lock)
+
+#define __local_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+ __local_lock(lock); \
+ } while (0)
+
+#define __local_unlock(__lock) \
+ do { \
+ spin_unlock(this_cpu_ptr((__lock))); \
+ migrate_enable(); \
+ } while (0)
+
+#define __local_unlock_irq(lock) __local_unlock(lock)
+
+#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
+
+#endif /* CONFIG_PREEMPT_RT */