diff options
author | Michael Kelley <mikelley@microsoft.com> | 2021-03-02 13:38:18 -0800 |
---|---|---|
committer | Wei Liu <wei.liu@kernel.org> | 2021-03-08 17:33:00 +0000 |
commit | d608715d4771cf2d63de07a5d7b026b6f52a70a5 (patch) | |
tree | 73a567821ae3e494a4df3c3f08deffb6b3d8afdc /drivers/hv | |
parent | 946f4b8680b8ad177f6489e023a1d95e82d502e2 (diff) | |
download | linux-d608715d4771cf2d63de07a5d7b026b6f52a70a5.tar.gz linux-d608715d4771cf2d63de07a5d7b026b6f52a70a5.tar.bz2 linux-d608715d4771cf2d63de07a5d7b026b6f52a70a5.zip |
Drivers: hv: vmbus: Move handling of VMbus interrupts
VMbus interrupts are most naturally modelled as per-cpu IRQs. But
because x86/x64 doesn't have per-cpu IRQs, the core VMbus interrupt
handling machinery is done in code under arch/x86 and Linux IRQs are
not used. Adding support for ARM64 means adding equivalent code
using per-cpu IRQs under arch/arm64.
A better model is to treat per-cpu IRQs as the normal path (which it is
for modern architectures), and the x86/x64 path as the exception. Do this
by incorporating standard Linux per-cpu IRQ allocation into the main VMbus
driver, and bypassing it in the x86/x64 exception case. For x86/x64,
special case code is retained under arch/x86, but no VMbus interrupt
handling code is needed under arch/arm64.
No functional change.
Signed-off-by: Michael Kelley <mikelley@microsoft.com>
Reviewed-by: Boqun Feng <boqun.feng@gmail.com>
Link: https://lore.kernel.org/r/1614721102-2241-7-git-send-email-mikelley@microsoft.com
Signed-off-by: Wei Liu <wei.liu@kernel.org>
Diffstat (limited to 'drivers/hv')
-rw-r--r-- | drivers/hv/hv.c | 8 | ||||
-rw-r--r-- | drivers/hv/vmbus_drv.c | 63 |
2 files changed, 63 insertions, 8 deletions
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c index afe7a62b447f..917b29e873c5 100644 --- a/drivers/hv/hv.c +++ b/drivers/hv/hv.c @@ -16,6 +16,7 @@ #include <linux/version.h> #include <linux/random.h> #include <linux/clockchips.h> +#include <linux/interrupt.h> #include <clocksource/hyperv_timer.h> #include <asm/mshyperv.h> #include "hyperv_vmbus.h" @@ -214,10 +215,12 @@ void hv_synic_enable_regs(unsigned int cpu) hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64); /* Setup the shared SINT. */ + if (vmbus_irq != -1) + enable_percpu_irq(vmbus_irq, 0); shared_sint.as_uint64 = hv_get_register(HV_REGISTER_SINT0 + VMBUS_MESSAGE_SINT); - shared_sint.vector = hv_get_vector(); + shared_sint.vector = vmbus_interrupt; shared_sint.masked = false; /* @@ -285,6 +288,9 @@ void hv_synic_disable_regs(unsigned int cpu) sctrl.as_uint64 = hv_get_register(HV_REGISTER_SCONTROL); sctrl.enable = 0; hv_set_register(HV_REGISTER_SCONTROL, sctrl.as_uint64); + + if (vmbus_irq != -1) + disable_percpu_irq(vmbus_irq); } diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 7524d71a0f84..51c40d5e3c8a 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -48,8 +48,10 @@ static int hyperv_cpuhp_online; static void *hv_panic_page; +static long __percpu *vmbus_evt; + /* Values parsed from ACPI DSDT */ -static int vmbus_irq; +int vmbus_irq; int vmbus_interrupt; /* @@ -1381,7 +1383,13 @@ static void vmbus_isr(void) tasklet_schedule(&hv_cpu->msg_dpc); } - add_interrupt_randomness(hv_get_vector(), 0); + add_interrupt_randomness(vmbus_interrupt, 0); +} + +static irqreturn_t vmbus_percpu_isr(int irq, void *dev_id) +{ + vmbus_isr(); + return IRQ_HANDLED; } /* @@ -1496,9 +1504,28 @@ static int vmbus_bus_init(void) if (ret) return ret; - ret = hv_setup_vmbus_irq(vmbus_irq, vmbus_isr); - if (ret) - goto err_setup; + /* + * VMbus interrupts are best modeled as per-cpu interrupts. If + * on an architecture with support for per-cpu IRQs (e.g. ARM64), + * allocate a per-cpu IRQ using standard Linux kernel functionality. + * If not on such an architecture (e.g., x86/x64), then rely on + * code in the arch-specific portion of the code tree to connect + * the VMbus interrupt handler. + */ + + if (vmbus_irq == -1) { + hv_setup_vmbus_handler(vmbus_isr); + } else { + vmbus_evt = alloc_percpu(long); + ret = request_percpu_irq(vmbus_irq, vmbus_percpu_isr, + "Hyper-V VMbus", vmbus_evt); + if (ret) { + pr_err("Can't request Hyper-V VMbus IRQ %d, Err %d", + vmbus_irq, ret); + free_percpu(vmbus_evt); + goto err_setup; + } + } ret = hv_synic_alloc(); if (ret) @@ -1559,7 +1586,12 @@ err_connect: err_cpuhp: hv_synic_free(); err_alloc: - hv_remove_vmbus_irq(); + if (vmbus_irq == -1) { + hv_remove_vmbus_handler(); + } else { + free_percpu_irq(vmbus_irq, vmbus_evt); + free_percpu(vmbus_evt); + } err_setup: bus_unregister(&hv_bus); unregister_sysctl_table(hv_ctl_table_hdr); @@ -2677,6 +2709,18 @@ static int __init hv_acpi_init(void) ret = -ETIMEDOUT; goto cleanup; } + + /* + * If we're on an architecture with a hardcoded hypervisor + * vector (i.e. x86/x64), override the VMbus interrupt found + * in the ACPI tables. Ensure vmbus_irq is not set since the + * normal Linux IRQ mechanism is not used in this case. + */ +#ifdef HYPERVISOR_CALLBACK_VECTOR + vmbus_interrupt = HYPERVISOR_CALLBACK_VECTOR; + vmbus_irq = -1; +#endif + hv_debug_init(); ret = vmbus_bus_init(); @@ -2707,7 +2751,12 @@ static void __exit vmbus_exit(void) vmbus_connection.conn_state = DISCONNECTED; hv_stimer_global_cleanup(); vmbus_disconnect(); - hv_remove_vmbus_irq(); + if (vmbus_irq == -1) { + hv_remove_vmbus_handler(); + } else { + free_percpu_irq(vmbus_irq, vmbus_evt); + free_percpu(vmbus_evt); + } for_each_online_cpu(cpu) { struct hv_per_cpu_context *hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu); |