summaryrefslogtreecommitdiffstats
path: root/kernel/up.c
diff options
context:
space:
mode:
authorDavid Daney <david.daney@cavium.com>2013-09-11 14:23:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 15:58:23 -0700
commitfa688207c9db48b64ab6538abc3fcdf26110b9ec (patch)
tree47fff6ebaa5b0b7d3feca64010051899e29db475 /kernel/up.c
parentc14c338cb05c700a260480c197cfd6da8f8b7d2e (diff)
downloadlinux-fa688207c9db48b64ab6538abc3fcdf26110b9ec.tar.gz
linux-fa688207c9db48b64ab6538abc3fcdf26110b9ec.tar.bz2
linux-fa688207c9db48b64ab6538abc3fcdf26110b9ec.zip
smp: quit unconditionally enabling irq in on_each_cpu_mask and on_each_cpu_cond
As in commit f21afc25f9ed ("smp.h: Use local_irq_{save,restore}() in !SMP version of on_each_cpu()"), we don't want to enable irqs if they are not already enabled. There are currently no known problematical callers of these functions, but since it is a known failure pattern, we preemptively fix them. Since they are not trivial functions, make them non-inline by moving them to up.c. This also makes it so we don't have to fix #include dependancies for preempt_{disable,enable}. Signed-off-by: David Daney <david.daney@cavium.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/up.c')
-rw-r--r--kernel/up.c39
1 files changed, 39 insertions, 0 deletions
diff --git a/kernel/up.c b/kernel/up.c
index c54c75e9faf7..144e57255234 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -19,3 +19,42 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
return 0;
}
EXPORT_SYMBOL(smp_call_function_single);
+
+/*
+ * Note we still need to test the mask even for UP
+ * because we actually can get an empty mask from
+ * code that on SMP might call us without the local
+ * CPU in the mask.
+ */
+void on_each_cpu_mask(const struct cpumask *mask,
+ smp_call_func_t func, void *info, bool wait)
+{
+ unsigned long flags;
+
+ if (cpumask_test_cpu(0, mask)) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL(on_each_cpu_mask);
+
+/*
+ * Preemption is disabled here to make sure the cond_func is called under the
+ * same condtions in UP and SMP.
+ */
+void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+ smp_call_func_t func, void *info, bool wait,
+ gfp_t gfp_flags)
+{
+ unsigned long flags;
+
+ preempt_disable();
+ if (cond_func(0, info)) {
+ local_irq_save(flags);
+ func(info);
+ local_irq_restore(flags);
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL(on_each_cpu_cond);