From 688c50aa72f64ca21767486e5eef876ec23e418c Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 4 Jan 2017 16:10:28 +0100 Subject: KVM: arm/arm64: Move timer save/restore out of the hyp code As we are about to be lazy with saving and restoring the timer registers, we prepare by moving all possible timer configuration logic out of the hyp code. All virtual timer registers can be programmed from EL1 and since the arch timer is always a level triggered interrupt we can safely do this with interrupts disabled in the host kernel on the way to the guest without taking vtimer interrupts in the host kernel (yet). The downside is that the cntvoff register can only be programmed from hyp mode, so we jump into hyp mode and back to program it. This is also safe, because the host kernel doesn't use the virtual timer in the KVM code. It may add a little performance performance penalty, but only until following commits where we move this operation to vcpu load/put. Signed-off-by: Christoffer Dall Reviewed-by: Marc Zyngier --- arch/arm64/kvm/hyp/switch.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 945e79c641c4..4994f4bdaca5 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -298,7 +298,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) __activate_vm(vcpu); __vgic_restore_state(vcpu); - __timer_restore_state(vcpu); + __timer_enable_traps(vcpu); /* * We must restore the 32-bit state before the sysregs, thanks @@ -368,7 +368,7 @@ again: __sysreg_save_guest_state(guest_ctxt); __sysreg32_save_state(vcpu); - __timer_save_state(vcpu); + __timer_disable_traps(vcpu); __vgic_save_state(vcpu); __deactivate_traps(vcpu); @@ -436,7 +436,7 @@ void __hyp_text __noreturn __hyp_panic(void) vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); - __timer_save_state(vcpu); + __timer_disable_traps(vcpu); __deactivate_traps(vcpu); __deactivate_vm(vcpu); __sysreg_restore_host_state(host_ctxt); -- cgit v1.2.3 From c1b135af8387bc6c6d43ed61a65647c68f231ea1 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Fri, 16 Jun 2017 23:12:06 -0700 Subject: KVM: arm/arm64: Use kvm_arm_timer_set/get_reg for guest register traps When trapping on a guest access to one of the timer registers, we were messing with the internals of the timer state from the sysregs handling code, and that logic was about to receive more added complexity when optimizing the timer handling code. Therefore, since we already have timer register access functions (to access registers from userspace), reuse those for the timer register traps from a VM and let the timer code maintain its own consistency. Signed-off-by: Christoffer Dall Acked-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 41 ++++++++++++++--------------------------- 1 file changed, 14 insertions(+), 27 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2e070d3baf9f..bb0e41b3154e 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -841,13 +841,16 @@ static bool access_cntp_tval(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); u64 now = kvm_phys_timer_read(); + u64 cval; - if (p->is_write) - ptimer->cnt_cval = p->regval + now; - else - p->regval = ptimer->cnt_cval - now; + if (p->is_write) { + kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, + p->regval + now); + } else { + cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); + p->regval = cval - now; + } return true; } @@ -856,24 +859,10 @@ static bool access_cntp_ctl(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); - - if (p->is_write) { - /* ISTATUS bit is read-only */ - ptimer->cnt_ctl = p->regval & ~ARCH_TIMER_CTRL_IT_STAT; - } else { - u64 now = kvm_phys_timer_read(); - - p->regval = ptimer->cnt_ctl; - /* - * Set ISTATUS bit if it's expired. - * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is - * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit - * regardless of ENABLE bit for our implementation convenience. - */ - if (ptimer->cnt_cval <= now) - p->regval |= ARCH_TIMER_CTRL_IT_STAT; - } + if (p->is_write) + kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval); + else + p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL); return true; } @@ -882,12 +871,10 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); - if (p->is_write) - ptimer->cnt_cval = p->regval; + kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval); else - p->regval = ptimer->cnt_cval; + p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); return true; } -- cgit v1.2.3 From 74a64a981662ab34289b3c90f6f964aa38ec1d9f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 29 Oct 2017 02:18:09 +0000 Subject: KVM: arm/arm64: Unify 32bit fault injection Both arm and arm64 implementations are capable of injecting faults, and yet have completely divergent implementations, leading to different bugs and reduced maintainability. Let's elect the arm64 version as the canonical one and move it into aarch32.c, which is common to both architectures. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm64/kvm/inject_fault.c | 74 ++----------------------------------------- 1 file changed, 3 insertions(+), 71 deletions(-) (limited to 'arch/arm64/kvm') diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index da6a8cfa54a0..8ecbcb40e317 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -33,74 +33,6 @@ #define LOWER_EL_AArch64_VECTOR 0x400 #define LOWER_EL_AArch32_VECTOR 0x600 -static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) -{ - unsigned long cpsr; - unsigned long new_spsr_value = *vcpu_cpsr(vcpu); - bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); - u32 return_offset = (is_thumb) ? 4 : 0; - u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); - - cpsr = mode | COMPAT_PSR_I_BIT; - - if (sctlr & (1 << 30)) - cpsr |= COMPAT_PSR_T_BIT; - if (sctlr & (1 << 25)) - cpsr |= COMPAT_PSR_E_BIT; - - *vcpu_cpsr(vcpu) = cpsr; - - /* Note: These now point to the banked copies */ - *vcpu_spsr(vcpu) = new_spsr_value; - *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; - - /* Branch to exception vector */ - if (sctlr & (1 << 13)) - vect_offset += 0xffff0000; - else /* always have security exceptions */ - vect_offset += vcpu_cp15(vcpu, c12_VBAR); - - *vcpu_pc(vcpu) = vect_offset; -} - -static void inject_undef32(struct kvm_vcpu *vcpu) -{ - prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); -} - -/* - * Modelled after TakeDataAbortException() and TakePrefetchAbortException - * pseudocode. - */ -static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, - unsigned long addr) -{ - u32 vect_offset; - u32 *far, *fsr; - bool is_lpae; - - if (is_pabt) { - vect_offset = 12; - far = &vcpu_cp15(vcpu, c6_IFAR); - fsr = &vcpu_cp15(vcpu, c5_IFSR); - } else { /* !iabt */ - vect_offset = 16; - far = &vcpu_cp15(vcpu, c6_DFAR); - fsr = &vcpu_cp15(vcpu, c5_DFSR); - } - - prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); - - *far = addr; - - /* Give the guest an IMPLEMENTATION DEFINED exception */ - is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); - if (is_lpae) - *fsr = 1 << 9 | 0x34; - else - *fsr = 0x14; -} - enum exception_type { except_type_sync = 0, except_type_irq = 0x80, @@ -197,7 +129,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) { if (!(vcpu->arch.hcr_el2 & HCR_RW)) - inject_abt32(vcpu, false, addr); + kvm_inject_dabt32(vcpu, addr); else inject_abt64(vcpu, false, addr); } @@ -213,7 +145,7 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) { if (!(vcpu->arch.hcr_el2 & HCR_RW)) - inject_abt32(vcpu, true, addr); + kvm_inject_pabt32(vcpu, addr); else inject_abt64(vcpu, true, addr); } @@ -227,7 +159,7 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) void kvm_inject_undefined(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.hcr_el2 & HCR_RW)) - inject_undef32(vcpu); + kvm_inject_undef32(vcpu); else inject_undef64(vcpu); } -- cgit v1.2.3