diff options
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r-- | include/linux/interrupt.h | 85 |
1 files changed, 65 insertions, 20 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 55e0d4253e49..59b72ca1c5d1 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -14,6 +14,8 @@ #include <linux/smp.h> #include <linux/percpu.h> #include <linux/hrtimer.h> +#include <linux/kref.h> +#include <linux/workqueue.h> #include <asm/atomic.h> #include <asm/ptrace.h> @@ -55,7 +57,8 @@ * Used by threaded interrupts which need to keep the * irq line disabled until the threaded handler has been run. * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend - * + * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set + * IRQF_NO_THREAD - Interrupt cannot be threaded */ #define IRQF_DISABLED 0x00000020 #define IRQF_SAMPLE_RANDOM 0x00000040 @@ -67,22 +70,10 @@ #define IRQF_IRQPOLL 0x00001000 #define IRQF_ONESHOT 0x00002000 #define IRQF_NO_SUSPEND 0x00004000 +#define IRQF_FORCE_RESUME 0x00008000 +#define IRQF_NO_THREAD 0x00010000 -#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) - -/* - * Bits used by threaded handlers: - * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run - * IRQTF_DIED - handler thread died - * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed - * IRQTF_AFFINITY - irq thread is requested to adjust affinity - */ -enum { - IRQTF_RUNTHREAD, - IRQTF_DIED, - IRQTF_WARNED, - IRQTF_AFFINITY, -}; +#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) /* * These values can be returned by request_any_context_irq() and @@ -110,6 +101,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *); * @thread_fn: interupt handler function for threaded interrupts * @thread: thread pointer for threaded interrupts * @thread_flags: flags related to @thread + * @thread_mask: bitmask for keeping track of @thread activity */ struct irqaction { irq_handler_t handler; @@ -120,6 +112,7 @@ struct irqaction { irq_handler_t thread_fn; struct task_struct *thread; unsigned long thread_flags; + unsigned long thread_mask; const char *name; struct proc_dir_entry *dir; } ____cacheline_internodealigned_in_smp; @@ -240,6 +233,35 @@ extern int irq_can_set_affinity(unsigned int irq); extern int irq_select_affinity(unsigned int irq); extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m); + +/** + * struct irq_affinity_notify - context for notification of IRQ affinity changes + * @irq: Interrupt to which notification applies + * @kref: Reference count, for internal use + * @work: Work item, for internal use + * @notify: Function to be called on change. This will be + * called in process context. + * @release: Function to be called on release. This will be + * called in process context. Once registered, the + * structure must only be freed when this function is + * called or later. + */ +struct irq_affinity_notify { + unsigned int irq; + struct kref kref; + struct work_struct work; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); + void (*release)(struct kref *ref); +}; + +extern int +irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); + +static inline void irq_run_affinity_notifiers(void) +{ + flush_scheduled_work(); +} + #else /* CONFIG_SMP */ static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m) @@ -255,7 +277,7 @@ static inline int irq_can_set_affinity(unsigned int irq) static inline int irq_select_affinity(unsigned int irq) { return 0; } static inline int irq_set_affinity_hint(unsigned int irq, - const struct cpumask *m) + const struct cpumask *m) { return -EINVAL; } @@ -314,16 +336,24 @@ static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long } /* IRQ wakeup (PM) control: */ -extern int set_irq_wake(unsigned int irq, unsigned int on); +extern int irq_set_irq_wake(unsigned int irq, unsigned int on); + +#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT +/* Please do not use: Use the replacement functions instead */ +static inline int set_irq_wake(unsigned int irq, unsigned int on) +{ + return irq_set_irq_wake(irq, on); +} +#endif static inline int enable_irq_wake(unsigned int irq) { - return set_irq_wake(irq, 1); + return irq_set_irq_wake(irq, 1); } static inline int disable_irq_wake(unsigned int irq) { - return set_irq_wake(irq, 0); + return irq_set_irq_wake(irq, 0); } #else /* !CONFIG_GENERIC_HARDIRQS */ @@ -353,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq) } #endif /* CONFIG_GENERIC_HARDIRQS */ + +#ifdef CONFIG_IRQ_FORCED_THREADING +extern bool force_irqthreads; +#else +#define force_irqthreads (0) +#endif + #ifndef __ARCH_SET_SOFTIRQ_PENDING #define set_softirq_pending(x) (local_softirq_pending() = (x)) #define or_softirq_pending(x) (local_softirq_pending() |= (x)) @@ -426,6 +463,13 @@ extern void raise_softirq(unsigned int nr); */ DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list); +DECLARE_PER_CPU(struct task_struct *, ksoftirqd); + +static inline struct task_struct *this_cpu_ksoftirqd(void) +{ + return this_cpu_read(ksoftirqd); +} + /* Try to send a softirq to a remote cpu. If this cannot be done, the * work will be queued to the local cpu. */ @@ -645,6 +689,7 @@ static inline void init_irq_proc(void) struct seq_file; int show_interrupts(struct seq_file *p, void *v); +int arch_show_interrupts(struct seq_file *p, int prec); extern int early_irq_init(void); extern int arch_probe_nr_irqs(void); |