summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-12-10 20:26:02 +0100
committerThomas Gleixner <tglx@linutronix.de>2020-12-15 16:19:35 +0100
commit1ca1b4e2c0cbc88ce3939910ac36dca51d326fe4 (patch)
tree374287b4be327b2c5f9fccf7c5ac865a7c322f9d
parent67473b8194bc3ecc42d60a4f5dc1ed479f28ed6e (diff)
downloadlinux-1ca1b4e2c0cbc88ce3939910ac36dca51d326fe4.tar.gz
linux-1ca1b4e2c0cbc88ce3939910ac36dca51d326fe4.tar.bz2
linux-1ca1b4e2c0cbc88ce3939910ac36dca51d326fe4.zip
xen/events: Use immediate affinity setting
There is absolutely no reason to mimic the x86 deferred affinity setting. This mechanism is required to handle the hardware induced issues of IO/APIC and MSI and is not in use when the interrupts are remapped. XEN does not need this and can simply change the affinity from the calling context. The core code invokes this with the interrupt descriptor lock held so it is fully serialized against any other operation. Mark the interrupts with IRQ_MOVE_PCNTXT to disable the deferred affinity setting. The conditional mask/unmask operation is already handled in xen_rebind_evtchn_to_cpu(). This makes XEN on x86 use the same mechanics as on e.g. ARM64 where deferred affinity setting is not required and not implemented and the code path in the ack functions is compiled out. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Juergen Gross <jgross@suse.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Link: https://lore.kernel.org/r/20201210194045.157601122@linutronix.de
-rw-r--r--drivers/xen/events/events_base.c35
1 files changed, 9 insertions, 26 deletions
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 9cade1994785..eaba42a89c8f 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -628,6 +628,11 @@ static void xen_irq_init(unsigned irq)
info->refcnt = -1;
set_info_for_irq(irq, info);
+ /*
+ * Interrupt affinity setting can be immediate. No point
+ * in delaying it until an interrupt is handled.
+ */
+ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
INIT_LIST_HEAD(&info->eoi_list);
list_add_tail(&info->list, &xen_irq_list_head);
@@ -739,18 +744,7 @@ static void eoi_pirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn))
return;
- if (unlikely(irqd_is_setaffinity_pending(data)) &&
- likely(!irqd_irq_disabled(data))) {
- int masked = test_and_set_mask(evtchn);
-
- clear_evtchn(evtchn);
-
- irq_move_masked_irq(data);
-
- if (!masked)
- unmask_evtchn(evtchn);
- } else
- clear_evtchn(evtchn);
+ clear_evtchn(evtchn);
if (pirq_needs_eoi(data->irq)) {
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
@@ -1641,7 +1635,6 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
mutex_unlock(&irq_mapping_update_lock);
bind_evtchn_to_cpu(evtchn, info->cpu);
- /* This will be deferred until interrupt is processed */
irq_set_affinity(irq, cpumask_of(info->cpu));
/* Unmask the event channel. */
@@ -1688,8 +1681,9 @@ static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
bool force)
{
unsigned tcpu = cpumask_first_and(dest, cpu_online_mask);
- int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
+ int ret;
+ ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
if (!ret)
irq_data_update_effective_affinity(data, cpumask_of(tcpu));
@@ -1719,18 +1713,7 @@ static void ack_dynirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn))
return;
- if (unlikely(irqd_is_setaffinity_pending(data)) &&
- likely(!irqd_irq_disabled(data))) {
- int masked = test_and_set_mask(evtchn);
-
- clear_evtchn(evtchn);
-
- irq_move_masked_irq(data);
-
- if (!masked)
- unmask_evtchn(evtchn);
- } else
- clear_evtchn(evtchn);
+ clear_evtchn(evtchn);
}
static void mask_ack_dynirq(struct irq_data *data)