summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/powernv/opal-irqchip.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2018-05-11 03:20:05 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2018-06-03 20:40:30 +1000
commit56c0b48b1e443efa5d6f4d60513302c934e55b17 (patch)
tree95ae0d148361c2a1eac119173585d3475ddba936 /arch/powerpc/platforms/powernv/opal-irqchip.c
parentee03b9b4479d1302d01cebedda3518dc967697b7 (diff)
downloadlinux-56c0b48b1e443efa5d6f4d60513302c934e55b17.tar.gz
linux-56c0b48b1e443efa5d6f4d60513302c934e55b17.tar.bz2
linux-56c0b48b1e443efa5d6f4d60513302c934e55b17.zip
powerpc/powernv: process all OPAL event interrupts with kopald
Using irq_work for processing OPAL event interrupts is not necessary. irq_work is typically used to schedule work from NMI context, a softirq may be more appropriate. However OPAL events are not particularly performance or latency critical, so they can all be invoked by kopald. This patch removes the irq_work queueing, and instead wakes up kopald when there is an event to be processed. kopald processes interrupts individually, enabling irqs and calling cond_resched between each one to minimise latencies. Event handlers themselves should still use threaded handlers, workqueues, etc. as necessary to avoid high interrupts-off latencies within any single interrupt. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/platforms/powernv/opal-irqchip.c')
-rw-r--r--arch/powerpc/platforms/powernv/opal-irqchip.c87
1 files changed, 39 insertions, 48 deletions
diff --git a/arch/powerpc/platforms/powernv/opal-irqchip.c b/arch/powerpc/platforms/powernv/opal-irqchip.c
index 05ffe05f0fdc..605c7e5d52c2 100644
--- a/arch/powerpc/platforms/powernv/opal-irqchip.c
+++ b/arch/powerpc/platforms/powernv/opal-irqchip.c
@@ -22,7 +22,6 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/irq_work.h>
#include <asm/machdep.h>
#include <asm/opal.h>
@@ -38,37 +37,47 @@ struct opal_event_irqchip {
unsigned long mask;
};
static struct opal_event_irqchip opal_event_irqchip;
-
+static u64 last_outstanding_events;
static unsigned int opal_irq_count;
static unsigned int *opal_irqs;
-static void opal_handle_irq_work(struct irq_work *work);
-static u64 last_outstanding_events;
-static struct irq_work opal_event_irq_work = {
- .func = opal_handle_irq_work,
-};
-
-void opal_handle_events(uint64_t events)
+void opal_handle_events(void)
{
- int virq, hwirq = 0;
- u64 mask = opal_event_irqchip.mask;
+ __be64 events = 0;
+ u64 e;
+
+ e = READ_ONCE(last_outstanding_events) & opal_event_irqchip.mask;
+again:
+ while (e) {
+ int virq, hwirq;
+
+ hwirq = fls64(e) - 1;
+ e &= ~BIT_ULL(hwirq);
+
+ local_irq_disable();
+ virq = irq_find_mapping(opal_event_irqchip.domain, hwirq);
+ if (virq) {
+ irq_enter();
+ generic_handle_irq(virq);
+ irq_exit();
+ }
+ local_irq_enable();
- if (!in_irq() && (events & mask)) {
- last_outstanding_events = events;
- irq_work_queue(&opal_event_irq_work);
- return;
+ cond_resched();
}
+ last_outstanding_events = 0;
+ if (opal_poll_events(&events) != OPAL_SUCCESS)
+ return;
+ e = be64_to_cpu(events) & opal_event_irqchip.mask;
+ if (e)
+ goto again;
+}
- while (events & mask) {
- hwirq = fls64(events) - 1;
- if (BIT_ULL(hwirq) & mask) {
- virq = irq_find_mapping(opal_event_irqchip.domain,
- hwirq);
- if (virq)
- generic_handle_irq(virq);
- }
- events &= ~BIT_ULL(hwirq);
- }
+bool opal_have_pending_events(void)
+{
+ if (last_outstanding_events & opal_event_irqchip.mask)
+ return true;
+ return false;
}
static void opal_event_mask(struct irq_data *d)
@@ -78,24 +87,9 @@ static void opal_event_mask(struct irq_data *d)
static void opal_event_unmask(struct irq_data *d)
{
- __be64 events;
-
set_bit(d->hwirq, &opal_event_irqchip.mask);
-
- opal_poll_events(&events);
- last_outstanding_events = be64_to_cpu(events);
-
- /*
- * We can't just handle the events now with opal_handle_events().
- * If we did we would deadlock when opal_event_unmask() is called from
- * handle_level_irq() with the irq descriptor lock held, because
- * calling opal_handle_events() would call generic_handle_irq() and
- * then handle_level_irq() which would try to take the descriptor lock
- * again. Instead queue the events for later.
- */
- if (last_outstanding_events & opal_event_irqchip.mask)
- /* Need to retrigger the interrupt */
- irq_work_queue(&opal_event_irq_work);
+ if (opal_have_pending_events())
+ opal_wake_poller();
}
static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
@@ -136,16 +130,13 @@ static irqreturn_t opal_interrupt(int irq, void *data)
__be64 events;
opal_handle_interrupt(virq_to_hw(irq), &events);
- opal_handle_events(be64_to_cpu(events));
+ last_outstanding_events = be64_to_cpu(events);
+ if (opal_have_pending_events())
+ opal_wake_poller();
return IRQ_HANDLED;
}
-static void opal_handle_irq_work(struct irq_work *work)
-{
- opal_handle_events(last_outstanding_events);
-}
-
static int opal_event_match(struct irq_domain *h, struct device_node *node,
enum irq_domain_bus_token bus_token)
{