summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2016-08-16 16:48:20 +0200
committerChristoffer Dall <christoffer.dall@linaro.org>2016-09-08 12:53:00 +0200
commitba7b9169b52038ce017782479c6a24cd37f34ae5 (patch)
tree8557f62c3312667d99e429285f0ecb26d7e9ba12
parent714848026531043ae06ca7bcd4a852c8bb8348c7 (diff)
downloadlinux-stable-ba7b9169b52038ce017782479c6a24cd37f34ae5.tar.gz
linux-stable-ba7b9169b52038ce017782479c6a24cd37f34ae5.tar.bz2
linux-stable-ba7b9169b52038ce017782479c6a24cd37f34ae5.zip
KVM: arm/arm64: Factor out vgic_attr_regs_access functionality
As we are about to deal with multiple data types and situations where the vgic should not be initialized when doing userspace accesses on the register attributes, factor out the functionality of vgic_attr_regs_access into smaller bits which can be reused by a new function later. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> Reviewed-by: Eric Auger <eric.auger@redhat.com>
-rw-r--r--virt/kvm/arm/vgic/vgic-kvm-device.c100
1 files changed, 73 insertions, 27 deletions
diff --git a/virt/kvm/arm/vgic/vgic-kvm-device.c b/virt/kvm/arm/vgic/vgic-kvm-device.c
index 1813f93b5cde..19fa331f2d11 100644
--- a/virt/kvm/arm/vgic/vgic-kvm-device.c
+++ b/virt/kvm/arm/vgic/vgic-kvm-device.c
@@ -233,6 +233,67 @@ int kvm_register_vgic_device(unsigned long type)
return ret;
}
+struct vgic_reg_attr {
+ struct kvm_vcpu *vcpu;
+ gpa_t addr;
+};
+
+static int parse_vgic_v2_attr(struct kvm_device *dev,
+ struct kvm_device_attr *attr,
+ struct vgic_reg_attr *reg_attr)
+{
+ int cpuid;
+
+ cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
+ KVM_DEV_ARM_VGIC_CPUID_SHIFT;
+
+ if (cpuid >= atomic_read(&dev->kvm->online_vcpus))
+ return -EINVAL;
+
+ reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid);
+ reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+
+ return 0;
+}
+
+/* unlocks vcpus from @vcpu_lock_idx and smaller */
+static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
+{
+ struct kvm_vcpu *tmp_vcpu;
+
+ for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
+ tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
+ mutex_unlock(&tmp_vcpu->mutex);
+ }
+}
+
+static void unlock_all_vcpus(struct kvm *kvm)
+{
+ unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
+}
+
+/* Returns true if all vcpus were locked, false otherwise */
+static bool lock_all_vcpus(struct kvm *kvm)
+{
+ struct kvm_vcpu *tmp_vcpu;
+ int c;
+
+ /*
+ * Any time a vcpu is run, vcpu_load is called which tries to grab the
+ * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
+ * that no other VCPUs are run and fiddle with the vgic state while we
+ * access it.
+ */
+ kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
+ if (!mutex_trylock(&tmp_vcpu->mutex)) {
+ unlock_vcpus(kvm, c - 1);
+ return false;
+ }
+ }
+
+ return true;
+}
+
/** vgic_attr_regs_access: allows user space to read/write VGIC registers
*
* @dev: kvm device handle
@@ -245,15 +306,17 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
struct kvm_device_attr *attr,
u32 *reg, bool is_write)
{
+ struct vgic_reg_attr reg_attr;
gpa_t addr;
- int cpuid, ret, c;
- struct kvm_vcpu *vcpu, *tmp_vcpu;
- int vcpu_lock_idx = -1;
+ struct kvm_vcpu *vcpu;
+ int ret;
- cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
- KVM_DEV_ARM_VGIC_CPUID_SHIFT;
- vcpu = kvm_get_vcpu(dev->kvm, cpuid);
- addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
+ ret = parse_vgic_v2_attr(dev, attr, &reg_attr);
+ if (ret)
+ return ret;
+
+ vcpu = reg_attr.vcpu;
+ addr = reg_attr.addr;
mutex_lock(&dev->kvm->lock);
@@ -261,24 +324,11 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
if (ret)
goto out;
- if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) {
- ret = -EINVAL;
+ if (!lock_all_vcpus(dev->kvm)) {
+ ret = -EBUSY;
goto out;
}
- /*
- * Any time a vcpu is run, vcpu_load is called which tries to grab the
- * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
- * that no other VCPUs are run and fiddle with the vgic state while we
- * access it.
- */
- ret = -EBUSY;
- kvm_for_each_vcpu(c, tmp_vcpu, dev->kvm) {
- if (!mutex_trylock(&tmp_vcpu->mutex))
- goto out;
- vcpu_lock_idx = c;
- }
-
switch (attr->group) {
case KVM_DEV_ARM_VGIC_GRP_CPU_REGS:
ret = vgic_v2_cpuif_uaccess(vcpu, is_write, addr, reg);
@@ -291,12 +341,8 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
break;
}
+ unlock_all_vcpus(dev->kvm);
out:
- for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
- tmp_vcpu = kvm_get_vcpu(dev->kvm, vcpu_lock_idx);
- mutex_unlock(&tmp_vcpu->mutex);
- }
-
mutex_unlock(&dev->kvm->lock);
return ret;
}