summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 59ce2fb49821..5a11f4d3972c 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -309,21 +309,24 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
struct vgic_irq *irq;
u32 *intids;
- int irq_count = dist->lpi_list_count, i = 0;
+ int irq_count, i = 0;
/*
- * We use the current value of the list length, which may change
- * after the kmalloc. We don't care, because the guest shouldn't
- * change anything while the command handling is still running,
- * and in the worst case we would miss a new IRQ, which one wouldn't
- * expect to be covered by this command anyway.
+ * There is an obvious race between allocating the array and LPIs
+ * being mapped/unmapped. If we ended up here as a result of a
+ * command, we're safe (locks are held, preventing another
+ * command). If coming from another path (such as enabling LPIs),
+ * we must be careful not to overrun the array.
*/
+ irq_count = READ_ONCE(dist->lpi_list_count);
intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
if (!intids)
return -ENOMEM;
spin_lock(&dist->lpi_list_lock);
list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
+ if (i == irq_count)
+ break;
/* We don't need to "get" the IRQ, as we hold the list lock. */
if (irq->target_vcpu != vcpu)
continue;