summaryrefslogtreecommitdiffstats
path: root/include/linux/interrupt.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2021-03-09 09:42:10 +0100
committerThomas Gleixner <tglx@linutronix.de>2021-03-17 16:33:57 +0100
commiteb2dafbba8b824ee77f166629babd470dd0b1c0a (patch)
treec3a84b3625d79c8b129e05c252be3f7d3ab376f8 /include/linux/interrupt.h
parent697d8c63c4a2991a22a896a5e6adcdbb28fefe56 (diff)
downloadlinux-eb2dafbba8b824ee77f166629babd470dd0b1c0a.tar.gz
linux-eb2dafbba8b824ee77f166629babd470dd0b1c0a.tar.bz2
linux-eb2dafbba8b824ee77f166629babd470dd0b1c0a.zip
tasklets: Prevent tasklet_unlock_spin_wait() deadlock on RT
tasklet_unlock_spin_wait() spin waits for the TASKLET_STATE_SCHED bit in the tasklet state to be cleared. This works on !RT nicely because the corresponding execution can only happen on a different CPU. On RT softirq processing is preemptible, therefore a task preempting the softirq processing thread can spin forever. Prevent this by invoking local_bh_disable()/enable() inside the loop. In case that the softirq processing thread was preempted by the current task, current will block on the local lock which yields the CPU to the preempted softirq processing thread. If the tasklet is processed on a different CPU then the local_bh_disable()/enable() pair is just a waste of processor cycles. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20210309084241.988908275@linutronix.de
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r--include/linux/interrupt.h12
1 files changed, 2 insertions, 10 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index b50be4fbbc98..352db93c2eed 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -658,7 +658,7 @@ enum
TASKLET_STATE_RUN /* Tasklet is running (SMP only) */
};
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
@@ -666,16 +666,8 @@ static inline int tasklet_trylock(struct tasklet_struct *t)
void tasklet_unlock(struct tasklet_struct *t);
void tasklet_unlock_wait(struct tasklet_struct *t);
+void tasklet_unlock_spin_wait(struct tasklet_struct *t);
-/*
- * Do not use in new code. Waiting for tasklets from atomic contexts is
- * error prone and should be avoided.
- */
-static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t)
-{
- while (test_bit(TASKLET_STATE_RUN, &t->state))
- cpu_relax();
-}
#else
static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
static inline void tasklet_unlock(struct tasklet_struct *t) { }