summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-12-23 14:35:26 -0200
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 12:35:45 -0300
commit79fac95ecfa3969aab8119d37ccd7226165f933a (patch)
tree4103169c7fd2b7fb1ba95dae2db6b3f365476b9d /virt
parentf656ce0185cabbbb0cf96877306879661297c7ad (diff)
downloadlinux-79fac95ecfa3969aab8119d37ccd7226165f933a.tar.gz
linux-79fac95ecfa3969aab8119d37ccd7226165f933a.tar.bz2
linux-79fac95ecfa3969aab8119d37ccd7226165f933a.zip
KVM: convert slots_lock to a mutex
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/coalesced_mmio.c14
-rw-r--r--virt/kvm/eventfd.c10
-rw-r--r--virt/kvm/ioapic.c4
-rw-r--r--virt/kvm/kvm_main.c10
4 files changed, 19 insertions, 19 deletions
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index a736a93ca7b7..5de6594260cb 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -110,9 +110,9 @@ int kvm_coalesced_mmio_init(struct kvm *kvm)
dev->kvm = kvm;
kvm->coalesced_mmio_dev = dev;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
if (ret < 0)
goto out_free_dev;
@@ -140,16 +140,16 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
if (dev == NULL)
return -EINVAL;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return -ENOBUFS;
}
dev->zone[dev->nb_zones] = *zone;
dev->nb_zones++;
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
}
@@ -163,7 +163,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
if (dev == NULL)
return -EINVAL;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
i = dev->nb_zones;
while(i) {
@@ -181,7 +181,7 @@ int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
i--;
}
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
}
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 315a586ec4d5..486c604365d9 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -508,7 +508,7 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
else
p->wildcard = true;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
/* Verify that there isnt a match already */
if (ioeventfd_check_collision(kvm, p)) {
@@ -524,12 +524,12 @@ kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
list_add_tail(&p->list, &kvm->ioeventfds);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return 0;
unlock_fail:
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
fail:
kfree(p);
@@ -551,7 +551,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
if (IS_ERR(eventfd))
return PTR_ERR(eventfd);
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
@@ -571,7 +571,7 @@ kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
break;
}
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
eventfd_ctx_put(eventfd);
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index f326a6f301cc..f01392f51e86 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -372,9 +372,9 @@ int kvm_ioapic_init(struct kvm *kvm)
kvm_ioapic_reset(ioapic);
kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
ioapic->kvm = kvm;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
if (ret < 0)
kfree(ioapic);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 659bc12ad16a..2b7cd6c0d9ca 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -429,7 +429,7 @@ static struct kvm *kvm_create_vm(void)
kvm_eventfd_init(kvm);
mutex_init(&kvm->lock);
mutex_init(&kvm->irq_lock);
- init_rwsem(&kvm->slots_lock);
+ mutex_init(&kvm->slots_lock);
atomic_set(&kvm->users_count, 1);
spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
@@ -763,9 +763,9 @@ int kvm_set_memory_region(struct kvm *kvm,
{
int r;
- down_write(&kvm->slots_lock);
+ mutex_lock(&kvm->slots_lock);
r = __kvm_set_memory_region(kvm, mem, user_alloc);
- up_write(&kvm->slots_lock);
+ mutex_unlock(&kvm->slots_lock);
return r;
}
EXPORT_SYMBOL_GPL(kvm_set_memory_region);
@@ -1997,7 +1997,7 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
return -EOPNOTSUPP;
}
-/* Caller must have write lock on slots_lock. */
+/* Caller must hold slots_lock. */
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev)
{
@@ -2019,7 +2019,7 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx,
return 0;
}
-/* Caller must have write lock on slots_lock. */
+/* Caller must hold slots_lock. */
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
struct kvm_io_device *dev)
{