diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2020-06-11 14:02:32 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-06-11 14:02:32 -0400 |
commit | 49b3deaad3452217d62dbd78da8df24eb0c7e169 (patch) | |
tree | 5880be1125857c9be5d8adf9efe855b3595a9698 /arch/arm64/kvm/aarch32.c | |
parent | e0135a104c52ccc977bc04a972bc889e0298b068 (diff) | |
parent | 15c99816ed9396c548eed2e84f30c14caccad1f4 (diff) | |
download | linux-stable-49b3deaad3452217d62dbd78da8df24eb0c7e169.tar.gz linux-stable-49b3deaad3452217d62dbd78da8df24eb0c7e169.tar.bz2 linux-stable-49b3deaad3452217d62dbd78da8df24eb0c7e169.zip |
Merge tag 'kvmarm-fixes-5.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
KVM/arm64 fixes for Linux 5.8, take #1
* 32bit VM fixes:
- Fix embarassing mapping issue between AArch32 CSSELR and AArch64
ACTLR
- Add ACTLR2 support for AArch32
- Get rid of the useless ACTLR_EL1 save/restore
- Fix CP14/15 accesses for AArch32 guests on BE hosts
- Ensure that we don't loose any state when injecting a 32bit
exception when running on a VHE host
* 64bit VM fixes:
- Fix PtrAuth host saving happening in preemptible contexts
- Optimize PtrAuth lazy enable
- Drop vcpu to cpu context pointer
- Fix sparse warnings for HYP per-CPU accesses
Diffstat (limited to 'arch/arm64/kvm/aarch32.c')
-rw-r--r-- | arch/arm64/kvm/aarch32.c | 28 |
1 files changed, 28 insertions, 0 deletions
diff --git a/arch/arm64/kvm/aarch32.c b/arch/arm64/kvm/aarch32.c index 0a356aa91aa1..40a62a99fbf8 100644 --- a/arch/arm64/kvm/aarch32.c +++ b/arch/arm64/kvm/aarch32.c @@ -33,6 +33,26 @@ static const u8 return_offsets[8][2] = { [7] = { 4, 4 }, /* FIQ, unused */ }; +static bool pre_fault_synchronize(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + if (vcpu->arch.sysregs_loaded_on_cpu) { + kvm_arch_vcpu_put(vcpu); + return true; + } + + preempt_enable(); + return false; +} + +static void post_fault_synchronize(struct kvm_vcpu *vcpu, bool loaded) +{ + if (loaded) { + kvm_arch_vcpu_load(vcpu, smp_processor_id()); + preempt_enable(); + } +} + /* * When an exception is taken, most CPSR fields are left unchanged in the * handler. However, some are explicitly overridden (e.g. M[4:0]). @@ -155,7 +175,10 @@ static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) void kvm_inject_undef32(struct kvm_vcpu *vcpu) { + bool loaded = pre_fault_synchronize(vcpu); + prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4); + post_fault_synchronize(vcpu, loaded); } /* @@ -168,6 +191,9 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 vect_offset; u32 *far, *fsr; bool is_lpae; + bool loaded; + + loaded = pre_fault_synchronize(vcpu); if (is_pabt) { vect_offset = 12; @@ -191,6 +217,8 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, /* no need to shuffle FS[4] into DFSR[10] as its 0 */ *fsr = DFSR_FSC_EXTABT_nLPAE; } + + post_fault_synchronize(vcpu, loaded); } void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) |