summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2018-02-06 17:56:07 +0000
committerBen Hutchings <ben@decadent.org.uk>2018-06-16 22:22:06 +0100
commitcd53f44e7910881891e667e0d0a9cf3af69ac766 (patch)
tree378f821314c2d9c40aaef216b4df7d6c02973507 /arch/arm64
parent09afbc21c9d34ffd283c4fd0b55239f5aafae54f (diff)
downloadlinux-stable-cd53f44e7910881891e667e0d0a9cf3af69ac766.tar.gz
linux-stable-cd53f44e7910881891e667e0d0a9cf3af69ac766.tar.bz2
linux-stable-cd53f44e7910881891e667e0d0a9cf3af69ac766.zip
arm64: KVM: Increment PC after handling an SMC trap
commit f5115e8869e1dfafac0e414b4f1664f3a84a4683 upstream. When handling an SMC trap, the "preferred return address" is set to that of the SMC, and not the next PC (which is a departure from the behaviour of an SMC that isn't trapped). Increment PC in the handler, as the guest is otherwise forever stuck... Fixes: acfb3b883f6d ("arm64: KVM: Fix SMCCC handling of unimplemented SMC/HVC calls") Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com> [bwh: Backported to 3.16: adjust context] Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/kvm/handle_exit.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 096824bedab6..fcc86311db73 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -43,7 +43,16 @@ static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
+ /*
+ * "If an SMC instruction executed at Non-secure EL1 is
+ * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
+ * Trap exception, not a Secure Monitor Call exception [...]"
+ *
+ * We need to advance the PC after the trap, as it would
+ * otherwise return to the same address...
+ */
*vcpu_reg(vcpu, 0) = ~0UL;
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 1;
}