summaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2012-05-08 13:29:45 +0930
committerAl Viro <viro@zeniv.linux.org.uk>2012-05-29 23:28:41 -0400
commit9dd6fa03ab31bb57cee4623a689d058d222fbe68 (patch)
tree3f55c3527ecae7a053b0305d0a8f45e8328a416c /include/linux
parentea022dfb3c2a4680483b00eb2fecc9fc4f6091d1 (diff)
downloadlinux-stable-9dd6fa03ab31bb57cee4623a689d058d222fbe68.tar.gz
linux-stable-9dd6fa03ab31bb57cee4623a689d058d222fbe68.tar.bz2
linux-stable-9dd6fa03ab31bb57cee4623a689d058d222fbe68.zip
lglock: remove online variants of lock
Optimizing the slow paths adds a lot of complexity. If you need to grab every lock often, you have other problems. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Acked-by: Nick Piggin <npiggin@kernel.dk> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/lglock.h58
1 files changed, 2 insertions, 56 deletions
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 87f402ccec55..0fdd821e77b7 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -28,8 +28,8 @@
#define br_lock_init(name) name##_lock_init()
#define br_read_lock(name) name##_local_lock()
#define br_read_unlock(name) name##_local_unlock()
-#define br_write_lock(name) name##_global_lock_online()
-#define br_write_unlock(name) name##_global_unlock_online()
+#define br_write_lock(name) name##_global_lock()
+#define br_write_unlock(name) name##_global_unlock()
#define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name)
#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name)
@@ -42,8 +42,6 @@
#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
#define lg_global_lock(name) name##_global_lock()
#define lg_global_unlock(name) name##_global_unlock()
-#define lg_global_lock_online(name) name##_global_lock_online()
-#define lg_global_unlock_online(name) name##_global_unlock_online()
#ifdef CONFIG_DEBUG_LOCK_ALLOC
#define LOCKDEP_INIT_MAP lockdep_init_map
@@ -68,36 +66,13 @@
extern void name##_local_unlock_cpu(int cpu); \
extern void name##_global_lock(void); \
extern void name##_global_unlock(void); \
- extern void name##_global_lock_online(void); \
- extern void name##_global_unlock_online(void); \
#define DEFINE_LGLOCK(name) \
\
DEFINE_SPINLOCK(name##_cpu_lock); \
- cpumask_t name##_cpus __read_mostly; \
DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \
DEFINE_LGLOCK_LOCKDEP(name); \
\
- static int \
- name##_lg_cpu_callback(struct notifier_block *nb, \
- unsigned long action, void *hcpu) \
- { \
- switch (action & ~CPU_TASKS_FROZEN) { \
- case CPU_UP_PREPARE: \
- spin_lock(&name##_cpu_lock); \
- cpu_set((unsigned long)hcpu, name##_cpus); \
- spin_unlock(&name##_cpu_lock); \
- break; \
- case CPU_UP_CANCELED: case CPU_DEAD: \
- spin_lock(&name##_cpu_lock); \
- cpu_clear((unsigned long)hcpu, name##_cpus); \
- spin_unlock(&name##_cpu_lock); \
- } \
- return NOTIFY_OK; \
- } \
- static struct notifier_block name##_lg_cpu_notifier = { \
- .notifier_call = name##_lg_cpu_callback, \
- }; \
void name##_lock_init(void) { \
int i; \
LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
@@ -106,11 +81,6 @@
lock = &per_cpu(name##_lock, i); \
*lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
} \
- register_hotcpu_notifier(&name##_lg_cpu_notifier); \
- get_online_cpus(); \
- for_each_online_cpu(i) \
- cpu_set(i, name##_cpus); \
- put_online_cpus(); \
} \
EXPORT_SYMBOL(name##_lock_init); \
\
@@ -150,30 +120,6 @@
} \
EXPORT_SYMBOL(name##_local_unlock_cpu); \
\
- void name##_global_lock_online(void) { \
- int i; \
- spin_lock(&name##_cpu_lock); \
- rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
- for_each_cpu(i, &name##_cpus) { \
- arch_spinlock_t *lock; \
- lock = &per_cpu(name##_lock, i); \
- arch_spin_lock(lock); \
- } \
- } \
- EXPORT_SYMBOL(name##_global_lock_online); \
- \
- void name##_global_unlock_online(void) { \
- int i; \
- rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
- for_each_cpu(i, &name##_cpus) { \
- arch_spinlock_t *lock; \
- lock = &per_cpu(name##_lock, i); \
- arch_spin_unlock(lock); \
- } \
- spin_unlock(&name##_cpu_lock); \
- } \
- EXPORT_SYMBOL(name##_global_unlock_online); \
- \
void name##_global_lock(void) { \
int i; \
preempt_disable(); \