summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2016-06-13 14:48:25 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2016-06-16 00:05:00 +0200
commit6c7caebc26c5f0b618f0ef6b851e9f5f27c3812f (patch)
tree0d804b1b15e565ca3286f82a6cd219e9e3d00828 /virt
parent682a8108872f78560c891cf30c7d08aa01dac943 (diff)
downloadlinux-6c7caebc26c5f0b618f0ef6b851e9f5f27c3812f.tar.gz
linux-6c7caebc26c5f0b618f0ef6b851e9f5f27c3812f.tar.bz2
linux-6c7caebc26c5f0b618f0ef6b851e9f5f27c3812f.zip
KVM: introduce kvm->created_vcpus
The race between creating the irqchip and the first VCPU is currently fixed by checking the presence of an irqchip before updating kvm->online_vcpus, and undoing the whole VCPU creation if someone created the irqchip in the meanwhile. Instead, introduce a new field in struct kvm that will count VCPUs under a mutex, without the atomic access and memory ordering that we need elsewhere to protect the vcpus array. This also plugs the race and is more easily applicable in all similar circumstances. Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 02e98f3131bd..15b757ae64e1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2346,9 +2346,20 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
if (id >= KVM_MAX_VCPU_ID)
return -EINVAL;
+ mutex_lock(&kvm->lock);
+ if (kvm->created_vcpus == KVM_MAX_VCPUS) {
+ mutex_unlock(&kvm->lock);
+ return -EINVAL;
+ }
+
+ kvm->created_vcpus++;
+ mutex_unlock(&kvm->lock);
+
vcpu = kvm_arch_vcpu_create(kvm, id);
- if (IS_ERR(vcpu))
- return PTR_ERR(vcpu);
+ if (IS_ERR(vcpu)) {
+ r = PTR_ERR(vcpu);
+ goto vcpu_decrement;
+ }
preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
@@ -2361,10 +2372,6 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
r = -EINVAL;
goto unlock_vcpu_destroy;
}
- if (atomic_read(&kvm->online_vcpus) == KVM_MAX_VCPUS) {
- r = -EINVAL;
- goto unlock_vcpu_destroy;
- }
if (kvm_get_vcpu_by_id(kvm, id)) {
r = -EEXIST;
goto unlock_vcpu_destroy;
@@ -2397,6 +2404,10 @@ unlock_vcpu_destroy:
mutex_unlock(&kvm->lock);
vcpu_destroy:
kvm_arch_vcpu_destroy(vcpu);
+vcpu_decrement:
+ mutex_lock(&kvm->lock);
+ kvm->created_vcpus--;
+ mutex_unlock(&kvm->lock);
return r;
}