summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/smp.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index 17c6e5860231..2fe66f7c617a 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -194,6 +194,7 @@ void generic_smp_call_function_interrupt(void)
*/
list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
int refs;
+ void (*func) (void *info);
/*
* Since we walk the list without any locks, we might
@@ -213,24 +214,32 @@ void generic_smp_call_function_interrupt(void)
if (atomic_read(&data->refs) == 0)
continue;
- if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
- continue;
-
+ func = data->csd.func; /* for later warn */
data->csd.func(data->csd.info);
+ /*
+ * If the cpu mask is not still set then it enabled interrupts,
+ * we took another smp interrupt, and executed the function
+ * twice on this cpu. In theory that copy decremented refs.
+ */
+ if (!cpumask_test_and_clear_cpu(cpu, data->cpumask)) {
+ WARN(1, "%pS enabled interrupts and double executed\n",
+ func);
+ continue;
+ }
+
refs = atomic_dec_return(&data->refs);
WARN_ON(refs < 0);
- if (!refs) {
- WARN_ON(!cpumask_empty(data->cpumask));
-
- raw_spin_lock(&call_function.lock);
- list_del_rcu(&data->csd.list);
- raw_spin_unlock(&call_function.lock);
- }
if (refs)
continue;
+ WARN_ON(!cpumask_empty(data->cpumask));
+
+ raw_spin_lock(&call_function.lock);
+ list_del_rcu(&data->csd.list);
+ raw_spin_unlock(&call_function.lock);
+
csd_unlock(&data->csd);
}