From cbe16f35bee6880becca6f20d2ebf6b457148552 Mon Sep 17 00:00:00 2001 From: Barry Song Date: Wed, 3 Mar 2021 11:49:15 +1300 Subject: genirq: Add IRQF_NO_AUTOEN for request_irq/nmi() Many drivers don't want interrupts enabled automatically via request_irq(). So they are handling this issue by either way of the below two: (1) irq_set_status_flags(irq, IRQ_NOAUTOEN); request_irq(dev, irq...); (2) request_irq(dev, irq...); disable_irq(irq); The code in the second way is silly and unsafe. In the small time gap between request_irq() and disable_irq(), interrupts can still come. The code in the first way is safe though it's subobtimal. Add a new IRQF_NO_AUTOEN flag which can be handed in by drivers to request_irq() and request_nmi(). It prevents the automatic enabling of the requested interrupt/nmi in the same safe way as #1 above. With that the various usage sites of #1 and #2 above can be simplified and corrected. Signed-off-by: Barry Song Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar Cc: dmitry.torokhov@gmail.com Link: https://lore.kernel.org/r/20210302224916.13980-2-song.bao.hua@hisilicon.com --- include/linux/interrupt.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 967e25767153..76f1161a441a 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -61,6 +61,9 @@ * interrupt handler after suspending interrupts. For system * wakeup devices users need to implement wakeup detection in * their interrupt handlers. + * IRQF_NO_AUTOEN - Don't enable IRQ or NMI automatically when users request it. + * Users will enable it explicitly by enable_irq() or enable_nmi() + * later. */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 @@ -74,6 +77,7 @@ #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 +#define IRQF_NO_AUTOEN 0x00080000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) -- cgit v1.2.3 From 3a0ade0c521a542f8a25e96ce8ea0dfaa532ac75 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Sat, 6 Mar 2021 13:36:58 -0800 Subject: tasklet: Remove tasklet_kill_immediate Ever since RCU was converted to softirq, it has no users. Signed-off-by: Davidlohr Bueso Signed-off-by: Thomas Gleixner Acked-by: Paul E. McKenney Link: https://lore.kernel.org/r/20210306213658.12862-1-dave@stgolabs.net --- include/linux/interrupt.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 76f1161a441a..2b98156ec707 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -716,7 +716,6 @@ static inline void tasklet_enable(struct tasklet_struct *t) } extern void tasklet_kill(struct tasklet_struct *t); -extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); extern void tasklet_setup(struct tasklet_struct *t, -- cgit v1.2.3 From d2da74d1278a1b51ef18beafa9da770f0db1c617 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 9 Mar 2021 09:42:04 +0100 Subject: tasklets: Replace barrier() with cpu_relax() in tasklet_unlock_wait() A barrier() in a tight loop which waits for something to happen on a remote CPU is a pointless exercise. Replace it with cpu_relax() which allows HT siblings to make progress. Signed-off-by: Thomas Gleixner Tested-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210309084241.249343366@linutronix.de --- include/linux/interrupt.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 2b98156ec707..d689fd738152 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -672,7 +672,8 @@ static inline void tasklet_unlock(struct tasklet_struct *t) static inline void tasklet_unlock_wait(struct tasklet_struct *t) { - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } + while (test_bit(TASKLET_STATE_RUN, &t->state)) + cpu_relax(); } #else #define tasklet_trylock(t) 1 -- cgit v1.2.3 From 6951547a1399c8f56468ed93bea8f769b891aec3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 9 Mar 2021 09:42:05 +0100 Subject: tasklets: Use static inlines for stub implementations Inlines exist for a reason. Signed-off-by: Thomas Gleixner Tested-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210309084241.407702697@linutronix.de --- include/linux/interrupt.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index d689fd738152..0a4ce25c1464 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -676,9 +676,9 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t) cpu_relax(); } #else -#define tasklet_trylock(t) 1 -#define tasklet_unlock_wait(t) do { } while (0) -#define tasklet_unlock(t) do { } while (0) +static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } +static inline void tasklet_unlock(struct tasklet_struct *t) { } +static inline void tasklet_unlock_wait(struct tasklet_struct *t) { } #endif extern void __tasklet_schedule(struct tasklet_struct *t); -- cgit v1.2.3 From ca5f625118955fc544c3cb3dee7055d33ecadafb Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 9 Mar 2021 09:42:06 +0100 Subject: tasklets: Provide tasklet_disable_in_atomic() Replacing the spin wait loops in tasklet_unlock_wait() with wait_var_event() is not possible as a handful of tasklet_disable() invocations are happening in atomic context. All other invocations are in teardown paths which can sleep. Provide tasklet_disable_in_atomic() and tasklet_unlock_spin_wait() to convert the few atomic use cases over, which allows to change tasklet_disable() and tasklet_unlock_wait() in a later step. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210309084241.563164193@linutronix.de --- include/linux/interrupt.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 0a4ce25c1464..3c8a29176258 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -675,10 +675,21 @@ static inline void tasklet_unlock_wait(struct tasklet_struct *t) while (test_bit(TASKLET_STATE_RUN, &t->state)) cpu_relax(); } + +/* + * Do not use in new code. Waiting for tasklets from atomic contexts is + * error prone and should be avoided. + */ +static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) +{ + while (test_bit(TASKLET_STATE_RUN, &t->state)) + cpu_relax(); +} #else static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } static inline void tasklet_unlock(struct tasklet_struct *t) { } static inline void tasklet_unlock_wait(struct tasklet_struct *t) { } +static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { } #endif extern void __tasklet_schedule(struct tasklet_struct *t); @@ -703,6 +714,17 @@ static inline void tasklet_disable_nosync(struct tasklet_struct *t) smp_mb__after_atomic(); } +/* + * Do not use in new code. Disabling tasklets from atomic contexts is + * error prone and should be avoided. + */ +static inline void tasklet_disable_in_atomic(struct tasklet_struct *t) +{ + tasklet_disable_nosync(t); + tasklet_unlock_spin_wait(t); + smp_mb(); +} + static inline void tasklet_disable(struct tasklet_struct *t) { tasklet_disable_nosync(t); -- cgit v1.2.3 From b0cd02c2a9494dbf0a1cc7dc7a3b8b400c158d37 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 9 Mar 2021 09:42:07 +0100 Subject: tasklets: Use spin wait in tasklet_disable() temporarily To ease the transition use spin waiting in tasklet_disable() until all usage sites from atomic context have been cleaned up. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210309084241.685352806@linutronix.de --- include/linux/interrupt.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 3c8a29176258..b7f00121f124 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -728,7 +728,8 @@ static inline void tasklet_disable_in_atomic(struct tasklet_struct *t) static inline void tasklet_disable(struct tasklet_struct *t) { tasklet_disable_nosync(t); - tasklet_unlock_wait(t); + /* Spin wait until all atomic users are converted */ + tasklet_unlock_spin_wait(t); smp_mb(); } -- cgit v1.2.3 From da044747401fc16202e223c9da970ed4e84fd84d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 9 Mar 2021 09:42:08 +0100 Subject: tasklets: Replace spin wait in tasklet_unlock_wait() tasklet_unlock_wait() spin waits for TASKLET_STATE_RUN to be cleared. This is wasting CPU cycles in a tight loop which is especially painful in a guest when the CPU running the tasklet is scheduled out. tasklet_unlock_wait() is invoked from tasklet_kill() which is used in teardown paths and not performance critical at all. Replace the spin wait with wait_var_event(). There are no users of tasklet_unlock_wait() which are invoked from atomic contexts. The usage in tasklet_disable() has been replaced temporarily with the spin waiting variant until the atomic users are fixed up and will be converted to the sleep wait variant later. Signed-off-by: Peter Zijlstra Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210309084241.783936921@linutronix.de --- include/linux/interrupt.h | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index b7f00121f124..b50be4fbbc98 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -664,17 +664,8 @@ static inline int tasklet_trylock(struct tasklet_struct *t) return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); } -static inline void tasklet_unlock(struct tasklet_struct *t) -{ - smp_mb__before_atomic(); - clear_bit(TASKLET_STATE_RUN, &(t)->state); -} - -static inline void tasklet_unlock_wait(struct tasklet_struct *t) -{ - while (test_bit(TASKLET_STATE_RUN, &t->state)) - cpu_relax(); -} +void tasklet_unlock(struct tasklet_struct *t); +void tasklet_unlock_wait(struct tasklet_struct *t); /* * Do not use in new code. Waiting for tasklets from atomic contexts is -- cgit v1.2.3 From eb2dafbba8b824ee77f166629babd470dd0b1c0a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 9 Mar 2021 09:42:10 +0100 Subject: tasklets: Prevent tasklet_unlock_spin_wait() deadlock on RT tasklet_unlock_spin_wait() spin waits for the TASKLET_STATE_SCHED bit in the tasklet state to be cleared. This works on !RT nicely because the corresponding execution can only happen on a different CPU. On RT softirq processing is preemptible, therefore a task preempting the softirq processing thread can spin forever. Prevent this by invoking local_bh_disable()/enable() inside the loop. In case that the softirq processing thread was preempted by the current task, current will block on the local lock which yields the CPU to the preempted softirq processing thread. If the tasklet is processed on a different CPU then the local_bh_disable()/enable() pair is just a waste of processor cycles. Signed-off-by: Thomas Gleixner Tested-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210309084241.988908275@linutronix.de --- include/linux/interrupt.h | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index b50be4fbbc98..352db93c2eed 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -658,7 +658,7 @@ enum TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ }; -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) static inline int tasklet_trylock(struct tasklet_struct *t) { return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); @@ -666,16 +666,8 @@ static inline int tasklet_trylock(struct tasklet_struct *t) void tasklet_unlock(struct tasklet_struct *t); void tasklet_unlock_wait(struct tasklet_struct *t); +void tasklet_unlock_spin_wait(struct tasklet_struct *t); -/* - * Do not use in new code. Waiting for tasklets from atomic contexts is - * error prone and should be avoided. - */ -static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) -{ - while (test_bit(TASKLET_STATE_RUN, &t->state)) - cpu_relax(); -} #else static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } static inline void tasklet_unlock(struct tasklet_struct *t) { } -- cgit v1.2.3 From 6fd4e861250b5c89ad460a9f265caeb1bbbfc323 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 9 Mar 2021 09:42:17 +0100 Subject: tasklets: Switch tasklet_disable() to the sleep wait variant -- NOT FOR IMMEDIATE MERGING -- Now that all users of tasklet_disable() are invoked from sleepable context, convert it to use tasklet_unlock_wait() which might sleep. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210309084242.726452321@linutronix.de --- include/linux/interrupt.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 352db93c2eed..4777850a6dc7 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -711,8 +711,7 @@ static inline void tasklet_disable_in_atomic(struct tasklet_struct *t) static inline void tasklet_disable(struct tasklet_struct *t) { tasklet_disable_nosync(t); - /* Spin wait until all atomic users are converted */ - tasklet_unlock_spin_wait(t); + tasklet_unlock_wait(t); smp_mb(); } -- cgit v1.2.3 From 728b478d2d358480b333b42d0e10e0fecb20114c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 9 Mar 2021 09:55:53 +0100 Subject: softirq: Add RT specific softirq accounting RT requires the softirq processing and local bottomhalf disabled regions to be preemptible. Using the normal preempt count based serialization is therefore not possible because this implicitely disables preemption. RT kernels use a per CPU local lock to serialize bottomhalfs. As local_bh_disable() can nest the lock can only be acquired on the outermost invocation of local_bh_disable() and released when the nest count becomes zero. Tasks which hold the local lock can be preempted so its required to keep track of the nest count per task. Add a RT only counter to task struct and adjust the relevant macros in preempt.h. Signed-off-by: Thomas Gleixner Tested-by: Sebastian Andrzej Siewior Tested-by: Paul E. McKenney Reviewed-by: Frederic Weisbecker Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210309085726.983627589@linutronix.de --- include/linux/hardirq.h | 1 + include/linux/preempt.h | 6 +++++- include/linux/sched.h | 3 +++ 3 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 7c9d6a2d7e90..69bc86ea382c 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -6,6 +6,7 @@ #include #include #include +#include #include #include diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 69cc8b64aa3a..9881eac0698f 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -79,7 +79,11 @@ #define nmi_count() (preempt_count() & NMI_MASK) #define hardirq_count() (preempt_count() & HARDIRQ_MASK) -#define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#ifdef CONFIG_PREEMPT_RT +# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK) +#else +# define softirq_count() (preempt_count() & SOFTIRQ_MASK) +#endif #define irq_count() (nmi_count() | hardirq_count() | softirq_count()) /* diff --git a/include/linux/sched.h b/include/linux/sched.h index ef00bb22164c..743a613c9cf3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1044,6 +1044,9 @@ struct task_struct { int softirq_context; int irq_config; #endif +#ifdef CONFIG_PREEMPT_RT + int softirq_disable_cnt; +#endif #ifdef CONFIG_LOCKDEP # define MAX_LOCK_DEPTH 48UL -- cgit v1.2.3 From 8b1c04acad082dec76f3f8f7e1fa13493d6cbb79 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 9 Mar 2021 09:55:56 +0100 Subject: softirq: Make softirq control and processing RT aware Provide a local lock based serialization for soft interrupts on RT which allows the local_bh_disabled() sections and servicing soft interrupts to be preemptible. Provide the necessary inline helpers which allow to reuse the bulk of the softirq processing code. Signed-off-by: Thomas Gleixner Tested-by: Sebastian Andrzej Siewior Tested-by: Paul E. McKenney Reviewed-by: Frederic Weisbecker Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210309085727.426370483@linutronix.de --- include/linux/bottom_half.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index a19519f4241d..e4dd613a070e 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -4,7 +4,7 @@ #include -#ifdef CONFIG_TRACE_IRQFLAGS +#if defined(CONFIG_PREEMPT_RT) || defined(CONFIG_TRACE_IRQFLAGS) extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); #else static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) -- cgit v1.2.3 From 47c218dcae6587fb5bce30f1656b13e22391c8e3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 9 Mar 2021 09:55:57 +0100 Subject: tick/sched: Prevent false positive softirq pending warnings on RT On RT a task which has soft interrupts disabled can block on a lock and schedule out to idle while soft interrupts are pending. This triggers the warning in the NOHZ idle code which complains about going idle with pending soft interrupts. But as the task is blocked soft interrupt processing is temporarily blocked as well which means that such a warning is a false positive. To prevent that check the per CPU state which indicates that a scheduled out task has soft interrupts disabled. Signed-off-by: Thomas Gleixner Tested-by: Sebastian Andrzej Siewior Tested-by: Paul E. McKenney Reviewed-by: Frederic Weisbecker Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210309085727.527563866@linutronix.de --- include/linux/bottom_half.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include/linux') diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index e4dd613a070e..eed86eb0a1de 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -32,4 +32,10 @@ static inline void local_bh_enable(void) __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); } +#ifdef CONFIG_PREEMPT_RT +extern bool local_bh_blocked(void); +#else +static inline bool local_bh_blocked(void) { return false; } +#endif + #endif /* _LINUX_BH_H */ -- cgit v1.2.3 From ba9e6cab49c1465c2c322dcb03d771d5cbecb692 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 9 Mar 2021 09:55:58 +0100 Subject: rcu: Prevent false positive softirq warning on RT Soft interrupt disabled sections can legitimately be preempted or schedule out when blocking on a lock on RT enabled kernels so the RCU preempt check warning has to be disabled for RT kernels. Signed-off-by: Thomas Gleixner Tested-by: Sebastian Andrzej Siewior Tested-by: Paul E. McKenney Reviewed-by: Paul E. McKenney Reviewed-by: Frederic Weisbecker Acked-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20210309085727.626304079@linutronix.de --- include/linux/rcupdate.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index bd04f722714f..6d855ef091ba 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -334,7 +334,8 @@ static inline void rcu_preempt_sleep_check(void) { } #define rcu_sleep_check() \ do { \ rcu_preempt_sleep_check(); \ - RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ + RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map), \ "Illegal context switch in RCU-bh read-side critical section"); \ RCU_LOCKDEP_WARN(lock_is_held(&rcu_sched_lock_map), \ "Illegal context switch in RCU-sched read-side critical section"); \ -- cgit v1.2.3 From a359f757965aafd0f58570de95dc6bc06cf12a9c Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 22 Mar 2021 04:21:30 +0100 Subject: irq: Fix typos in comments Fix ~36 single-word typos in the IRQ, irqchip and irqdomain code comments. Signed-off-by: Ingo Molnar Cc: Thomas Gleixner Cc: Marc Zyngier Cc: Borislav Petkov Cc: Peter Zijlstra Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- include/linux/irq.h | 4 ++-- include/linux/irqdesc.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irq.h b/include/linux/irq.h index 2efde6a79b7e..bee82809107c 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -116,7 +116,7 @@ enum { * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to * support stacked irqchips, which indicates skipping - * all descendent irqchips. + * all descendant irqchips. */ enum { IRQ_SET_MASK_OK = 0, @@ -302,7 +302,7 @@ static inline bool irqd_is_level_type(struct irq_data *d) /* * Must only be called of irqchip.irq_set_affinity() or low level - * hieararchy domain allocation functions. + * hierarchy domain allocation functions. */ static inline void irqd_set_single_target(struct irq_data *d) { diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 891b323266df..df4651250785 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -32,7 +32,7 @@ struct pt_regs; * @last_unhandled: aging timer for unhandled count * @irqs_unhandled: stats field for spurious unhandled interrupts * @threads_handled: stats field for deferred spurious detection of threaded handlers - * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers + * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers * @lock: locking for SMP * @affinity_hint: hint to user space for preferred irq affinity * @affinity_notify: context for notification of affinity changes -- cgit v1.2.3 From 4a35d6a03744ded782c9301f5f5d78ad68ce680f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 7 Apr 2021 13:17:10 +0100 Subject: irqdomain: Get rid of irq_create_identity_mapping() The sole user of irq_create_identity_mapping() having been converted, get rid of the unused helper. Signed-off-by: Marc Zyngier --- include/linux/irqdomain.h | 6 ------ 1 file changed, 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 33cacc8af26d..d2c61de208a8 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -419,12 +419,6 @@ extern int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base, irq_hw_number_t hwirq_base, int count); -static inline int irq_create_identity_mapping(struct irq_domain *host, - irq_hw_number_t hwirq) -{ - return irq_create_strict_mappings(host, hwirq, hwirq, 1); -} - extern const struct irq_domain_ops irq_domain_simple_ops; /* stock xlate functions */ -- cgit v1.2.3 From 46135d6f878ab00261d4a2082d620bfb41019aab Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Wed, 17 Mar 2021 10:07:19 +0000 Subject: irqchip/gic-v4.1: Disable vSGI upon (GIC CPUIF < v4.1) detection GIC CPU interfaces versions predating GIC v4.1 were not built to accommodate vINTID within the vSGI range; as reported in the GIC specifications (8.2 "Changes to the CPU interface"), it is CONSTRAINED UNPREDICTABLE to deliver a vSGI to a PE with ID_AA64PFR0_EL1.GIC < b0011. Check the GIC CPUIF version by reading the SYS_ID_AA64_PFR0_EL1. Disable vSGIs if a CPUIF version < 4.1 is detected to prevent using vSGIs on systems where they may misbehave. Signed-off-by: Lorenzo Pieralisi Cc: Marc Zyngier Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20210317100719.3331-2-lorenzo.pieralisi@arm.com --- include/linux/irqchip/arm-gic-v4.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux') diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h index 943c3411ca10..2c63375bbd43 100644 --- a/include/linux/irqchip/arm-gic-v4.h +++ b/include/linux/irqchip/arm-gic-v4.h @@ -145,4 +145,6 @@ int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *vpe_ops, const struct irq_domain_ops *sgi_ops); +bool gic_cpuif_has_vsgi(void); + #endif -- cgit v1.2.3 From 1a0b05e435544cd53cd3936bdab425d88784b71a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 2 Apr 2021 16:02:37 +0100 Subject: irqdomain: Get rid of irq_create_strict_mappings() No user of this helper is left, remove it. Signed-off-by: Marc Zyngier --- include/linux/irqdomain.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'include/linux') diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index d2c61de208a8..7a1dd7b969b6 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -415,9 +415,6 @@ static inline unsigned int irq_linear_revmap(struct irq_domain *domain, extern unsigned int irq_find_mapping(struct irq_domain *host, irq_hw_number_t hwirq); extern unsigned int irq_create_direct_mapping(struct irq_domain *host); -extern int irq_create_strict_mappings(struct irq_domain *domain, - unsigned int irq_base, - irq_hw_number_t hwirq_base, int count); extern const struct irq_domain_ops irq_domain_simple_ops; -- cgit v1.2.3