diff options
author | David S. Miller <davem@davemloft.net> | 2016-11-15 10:54:36 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-11-15 10:54:36 -0500 |
commit | bb598c1b8c9bf56981927dcb8c0dc34b8ff95342 (patch) | |
tree | 69fe6d3bcdbf0acb76e42b144d8af5a0234ccdcb /virt/kvm | |
parent | eb2ca35f1814dad3ca547261eedfbbd0d65a0efc (diff) | |
parent | e76d21c40bd6c67fd4e2c1540d77e113df962b4d (diff) | |
download | linux-bb598c1b8c9bf56981927dcb8c0dc34b8ff95342.tar.gz linux-bb598c1b8c9bf56981927dcb8c0dc34b8ff95342.tar.bz2 linux-bb598c1b8c9bf56981927dcb8c0dc34b8ff95342.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Several cases of bug fixes in 'net' overlapping other changes in
'net-next-.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.c | 41 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic-mmio.h | 14 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 12 | ||||
-rw-r--r-- | virt/kvm/eventfd.c | 22 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 6 |
5 files changed, 71 insertions, 24 deletions
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index e18b30ddcdce..ebe1b9fa3c4d 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -453,17 +453,33 @@ struct vgic_io_device *kvm_to_vgic_iodev(const struct kvm_io_device *dev) return container_of(dev, struct vgic_io_device, dev); } -static bool check_region(const struct vgic_register_region *region, +static bool check_region(const struct kvm *kvm, + const struct vgic_register_region *region, gpa_t addr, int len) { - if ((region->access_flags & VGIC_ACCESS_8bit) && len == 1) - return true; - if ((region->access_flags & VGIC_ACCESS_32bit) && - len == sizeof(u32) && !(addr & 3)) - return true; - if ((region->access_flags & VGIC_ACCESS_64bit) && - len == sizeof(u64) && !(addr & 7)) - return true; + int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; + + switch (len) { + case sizeof(u8): + flags = VGIC_ACCESS_8bit; + break; + case sizeof(u32): + flags = VGIC_ACCESS_32bit; + break; + case sizeof(u64): + flags = VGIC_ACCESS_64bit; + break; + default: + return false; + } + + if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) { + if (!region->bits_per_irq) + return true; + + /* Do we access a non-allocated IRQ? */ + return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs; + } return false; } @@ -477,7 +493,7 @@ static int dispatch_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, addr - iodev->base_addr); - if (!region || !check_region(region, addr, len)) { + if (!region || !check_region(vcpu->kvm, region, addr, len)) { memset(val, 0, len); return 0; } @@ -510,10 +526,7 @@ static int dispatch_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, addr - iodev->base_addr); - if (!region) - return 0; - - if (!check_region(region, addr, len)) + if (!region || !check_region(vcpu->kvm, region, addr, len)) return 0; switch (iodev->iodev_type) { diff --git a/virt/kvm/arm/vgic/vgic-mmio.h b/virt/kvm/arm/vgic/vgic-mmio.h index 4c34d39d44a0..84961b4e4422 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.h +++ b/virt/kvm/arm/vgic/vgic-mmio.h @@ -50,15 +50,15 @@ extern struct kvm_io_device_ops kvm_io_gic_ops; #define VGIC_ADDR_IRQ_MASK(bits) (((bits) * 1024 / 8) - 1) /* - * (addr & mask) gives us the byte offset for the INT ID, so we want to - * divide this with 'bytes per irq' to get the INT ID, which is given - * by '(bits) / 8'. But we do this with fixed-point-arithmetic and - * take advantage of the fact that division by a fraction equals - * multiplication with the inverted fraction, and scale up both the - * numerator and denominator with 8 to support at most 64 bits per IRQ: + * (addr & mask) gives us the _byte_ offset for the INT ID. + * We multiply this by 8 the get the _bit_ offset, then divide this by + * the number of bits to learn the actual INT ID. + * But instead of a division (which requires a "long long div" implementation), + * we shift by the binary logarithm of <bits>. + * This assumes that <bits> is a power of two. */ #define VGIC_ADDR_TO_INTID(addr, bits) (((addr) & VGIC_ADDR_IRQ_MASK(bits)) * \ - 64 / (bits) / 8) + 8 >> ilog2(bits)) /* * Some VGIC registers store per-IRQ information, with a different number diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 2893d5ba523a..6440b56ec90e 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -273,6 +273,18 @@ retry: * no more work for us to do. */ spin_unlock(&irq->irq_lock); + + /* + * We have to kick the VCPU here, because we could be + * queueing an edge-triggered interrupt for which we + * get no EOI maintenance interrupt. In that case, + * while the IRQ is already on the VCPU's AP list, the + * VCPU could have EOI'ed the original interrupt and + * won't see this one until it exits for some other + * reason. + */ + if (vcpu) + kvm_vcpu_kick(vcpu); return false; } diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index f397e9b20370..a29786dd9522 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -42,6 +42,7 @@ #ifdef CONFIG_HAVE_KVM_IRQFD +static struct workqueue_struct *irqfd_cleanup_wq; static void irqfd_inject(struct work_struct *work) @@ -167,7 +168,7 @@ irqfd_deactivate(struct kvm_kernel_irqfd *irqfd) list_del_init(&irqfd->list); - schedule_work(&irqfd->shutdown); + queue_work(irqfd_cleanup_wq, &irqfd->shutdown); } int __attribute__((weak)) kvm_arch_set_irq_inatomic( @@ -554,7 +555,7 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) * so that we guarantee there will not be any more interrupts on this * gsi once this deassign function returns. */ - flush_work(&irqfd->shutdown); + flush_workqueue(irqfd_cleanup_wq); return 0; } @@ -591,7 +592,7 @@ kvm_irqfd_release(struct kvm *kvm) * Block until we know all outstanding shutdown jobs have completed * since we do not take a kvm* reference. */ - flush_work(&irqfd->shutdown); + flush_workqueue(irqfd_cleanup_wq); } @@ -621,8 +622,23 @@ void kvm_irq_routing_update(struct kvm *kvm) spin_unlock_irq(&kvm->irqfds.lock); } +/* + * create a host-wide workqueue for issuing deferred shutdown requests + * aggregated from all vm* instances. We need our own isolated + * queue to ease flushing work items when a VM exits. + */ +int kvm_irqfd_init(void) +{ + irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0); + if (!irqfd_cleanup_wq) + return -ENOMEM; + + return 0; +} + void kvm_irqfd_exit(void) { + destroy_workqueue(irqfd_cleanup_wq); } #endif diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2907b7b78654..5c360347a1e9 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3844,7 +3844,12 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, * kvm_arch_init makes sure there's at most one caller * for architectures that support multiple implementations, * like intel and amd on x86. + * kvm_arch_init must be called before kvm_irqfd_init to avoid creating + * conflicts in case kvm is already setup for another implementation. */ + r = kvm_irqfd_init(); + if (r) + goto out_irqfd; if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { r = -ENOMEM; @@ -3926,6 +3931,7 @@ out_free_0a: free_cpumask_var(cpus_hardware_enabled); out_free_0: kvm_irqfd_exit(); +out_irqfd: kvm_arch_exit(); out_fail: return r; |