summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOliver Upton <oupton@google.com>2021-08-18 20:21:30 +0000
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2021-09-22 12:28:03 +0200
commit53921242cf995a6c9867154696ba9f07b6bd7957 (patch)
tree4c3cb2874c362c24331ab2682a16cecd5666ada9
parentb9b89da56af761fbb12da26ca5eb0c068446ec16 (diff)
downloadlinux-stable-53921242cf995a6c9867154696ba9f07b6bd7957.tar.gz
linux-stable-53921242cf995a6c9867154696ba9f07b6bd7957.tar.bz2
linux-stable-53921242cf995a6c9867154696ba9f07b6bd7957.zip
KVM: arm64: Fix read-side race on updates to vcpu reset state
[ Upstream commit 6654f9dfcb88fea3b9affc180dc3c04333d0f306 ] KVM correctly serializes writes to a vCPU's reset state, however since we do not take the KVM lock on the read side it is entirely possible to read state from two different reset requests. Cure the race for now by taking the KVM lock when reading the reset_state structure. Fixes: 358b28f09f0a ("arm/arm64: KVM: Allow a VCPU to fully reset itself") Signed-off-by: Oliver Upton <oupton@google.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20210818202133.1106786-2-oupton@google.com Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r--arch/arm64/kvm/reset.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 6058a80ec9ec..204c62debf06 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -263,10 +263,16 @@ static bool vcpu_allowed_register_width(struct kvm_vcpu *vcpu)
*/
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
{
+ struct vcpu_reset_state reset_state;
int ret;
bool loaded;
u32 pstate;
+ mutex_lock(&vcpu->kvm->lock);
+ reset_state = vcpu->arch.reset_state;
+ WRITE_ONCE(vcpu->arch.reset_state.reset, false);
+ mutex_unlock(&vcpu->kvm->lock);
+
/* Reset PMU outside of the non-preemptible section */
kvm_pmu_vcpu_reset(vcpu);
@@ -325,8 +331,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
* Additional reset state handling that PSCI may have imposed on us.
* Must be done after all the sys_reg reset.
*/
- if (vcpu->arch.reset_state.reset) {
- unsigned long target_pc = vcpu->arch.reset_state.pc;
+ if (reset_state.reset) {
+ unsigned long target_pc = reset_state.pc;
/* Gracefully handle Thumb2 entry point */
if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
@@ -335,13 +341,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
}
/* Propagate caller endianness */
- if (vcpu->arch.reset_state.be)
+ if (reset_state.be)
kvm_vcpu_set_be(vcpu);
*vcpu_pc(vcpu) = target_pc;
- vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
-
- vcpu->arch.reset_state.reset = false;
+ vcpu_set_reg(vcpu, 0, reset_state.r0);
}
/* Reset timer */