diff options
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r-- | arch/arm64/kvm/Kconfig | 3 | ||||
-rw-r--r-- | arch/arm64/kvm/arm.c | 47 | ||||
-rw-r--r-- | arch/arm64/kvm/handle_exit.c | 2 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/Makefile | 3 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/entry.S | 15 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/hyp-entry.S | 96 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/include/hyp/debug-sr.h | 60 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/include/hyp/switch.h | 64 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/switch.c | 9 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/tlb.c | 7 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/vgic-v3-sr.c | 16 | ||||
-rw-r--r-- | arch/arm64/kvm/hyp/vhe/switch.c | 9 | ||||
-rw-r--r-- | arch/arm64/kvm/hypercalls.c | 33 | ||||
-rw-r--r-- | arch/arm64/kvm/mmu.c | 31 | ||||
-rw-r--r-- | arch/arm64/kvm/pmu-emul.c | 26 | ||||
-rw-r--r-- | arch/arm64/kvm/psci.c | 74 | ||||
-rw-r--r-- | arch/arm64/kvm/pvtime.c | 29 | ||||
-rw-r--r-- | arch/arm64/kvm/reset.c | 4 | ||||
-rw-r--r-- | arch/arm64/kvm/sys_regs.c | 19 | ||||
-rw-r--r-- | arch/arm64/kvm/trace_arm.h | 16 | ||||
-rw-r--r-- | arch/arm64/kvm/trace_handle_exit.h | 6 | ||||
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-mmio-v3.c | 4 |
22 files changed, 330 insertions, 243 deletions
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig index 318c8f2df245..043756db8f6e 100644 --- a/arch/arm64/kvm/Kconfig +++ b/arch/arm64/kvm/Kconfig @@ -57,9 +57,6 @@ config KVM_ARM_PMU Adds support for a virtual Performance Monitoring Unit (PMU) in virtual machines. -config KVM_INDIRECT_VECTORS - def_bool HARDEN_BRANCH_PREDICTOR || RANDOMIZE_BASE - endif # KVM endif # VIRTUALIZATION diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 691d21e4c717..acf9a993dfb6 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -206,6 +206,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) */ r = 1; break; + case KVM_CAP_STEAL_TIME: + r = kvm_arm_pvtime_supported(); + break; default: r = kvm_arch_vm_ioctl_check_extension(kvm, ext); break; @@ -1256,6 +1259,40 @@ long kvm_arch_vm_ioctl(struct file *filp, } } +static int kvm_map_vectors(void) +{ + /* + * SV2 = ARM64_SPECTRE_V2 + * HEL2 = ARM64_HARDEN_EL2_VECTORS + * + * !SV2 + !HEL2 -> use direct vectors + * SV2 + !HEL2 -> use hardened vectors in place + * !SV2 + HEL2 -> allocate one vector slot and use exec mapping + * SV2 + HEL2 -> use hardened vectors and use exec mapping + */ + if (cpus_have_const_cap(ARM64_SPECTRE_V2)) { + __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs); + __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base); + } + + if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) { + phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs); + unsigned long size = __BP_HARDEN_HYP_VECS_SZ; + + /* + * Always allocate a spare vector slot, as we don't + * know yet which CPUs have a BP hardening slot that + * we can reuse. + */ + __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot); + BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS); + return create_hyp_exec_mappings(vect_pa, size, + &__kvm_bp_vect_base); + } + + return 0; +} + static void cpu_init_hyp_mode(void) { phys_addr_t pgd_ptr; @@ -1292,7 +1329,7 @@ static void cpu_init_hyp_mode(void) * at EL2. */ if (this_cpu_has_cap(ARM64_SSBS) && - arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) { + arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) { kvm_call_hyp_nvhe(__kvm_enable_ssbs); } } @@ -1549,10 +1586,6 @@ static int init_hyp_mode(void) } } - err = hyp_map_aux_data(); - if (err) - kvm_err("Cannot map host auxiliary data: %d\n", err); - return 0; out_err: @@ -1640,6 +1673,10 @@ int kvm_arch_init(void *opaque) return -ENODEV; } + if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)) + kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n" \ + "Only trusted guests should be used on this system.\n"); + for_each_online_cpu(cpu) { smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1); if (ret < 0) { diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index fe6c7d79309d..5d690d60ccad 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -128,7 +128,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu) switch (ESR_ELx_EC(esr)) { case ESR_ELx_EC_WATCHPT_LOW: run->debug.arch.far = vcpu->arch.fault.far_el2; - /* fall through */ + fallthrough; case ESR_ELx_EC_SOFTSTP_LOW: case ESR_ELx_EC_BREAKPT_LOW: case ESR_ELx_EC_BKPT32: diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index f54f0e89a71c..d898f0da5802 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -10,5 +10,4 @@ subdir-ccflags-y := -I$(incdir) \ -DDISABLE_BRANCH_PROFILING \ $(DISABLE_STACKLEAK_PLUGIN) -obj-$(CONFIG_KVM) += vhe/ nvhe/ -obj-$(CONFIG_KVM_INDIRECT_VECTORS) += smccc_wa.o +obj-$(CONFIG_KVM) += vhe/ nvhe/ smccc_wa.o diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index ee32a7743389..76e7eaf4675e 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -196,20 +196,23 @@ alternative_endif // This is our single instruction exception window. A pending // SError is guaranteed to occur at the earliest when we unmask // it, and at the latest just after the ISB. - .global abort_guest_exit_start abort_guest_exit_start: isb - .global abort_guest_exit_end abort_guest_exit_end: msr daifset, #4 // Mask aborts + ret + + _kvm_extable abort_guest_exit_start, 9997f + _kvm_extable abort_guest_exit_end, 9997f +9997: + msr daifset, #4 // Mask aborts + mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) - // If the exception took place, restore the EL1 exception - // context so that we can report some information. - // Merge the exception code with the SError pending bit. - tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f + // restore the EL1 exception context so that we can report some + // information. Merge the exception code with the SError pending bit. msr elr_el2, x2 msr esr_el2, x3 msr spsr_el2, x4 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index 689fccbc9de7..7ea277b82967 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -15,6 +15,30 @@ #include <asm/kvm_mmu.h> #include <asm/mmu.h> +.macro save_caller_saved_regs_vect + /* x0 and x1 were saved in the vector entry */ + stp x2, x3, [sp, #-16]! + stp x4, x5, [sp, #-16]! + stp x6, x7, [sp, #-16]! + stp x8, x9, [sp, #-16]! + stp x10, x11, [sp, #-16]! + stp x12, x13, [sp, #-16]! + stp x14, x15, [sp, #-16]! + stp x16, x17, [sp, #-16]! +.endm + +.macro restore_caller_saved_regs_vect + ldp x16, x17, [sp], #16 + ldp x14, x15, [sp], #16 + ldp x12, x13, [sp], #16 + ldp x10, x11, [sp], #16 + ldp x8, x9, [sp], #16 + ldp x6, x7, [sp], #16 + ldp x4, x5, [sp], #16 + ldp x2, x3, [sp], #16 + ldp x0, x1, [sp], #16 +.endm + .text .macro do_el2_call @@ -92,35 +116,6 @@ el1_hvc_guest: ARM_SMCCC_ARCH_WORKAROUND_2) cbnz w1, el1_trap -#ifdef CONFIG_ARM64_SSBD -alternative_cb arm64_enable_wa2_handling - b wa2_end -alternative_cb_end - get_vcpu_ptr x2, x0 - ldr x0, [x2, #VCPU_WORKAROUND_FLAGS] - - // Sanitize the argument and update the guest flags - ldr x1, [sp, #8] // Guest's x1 - clz w1, w1 // Murphy's device: - lsr w1, w1, #5 // w1 = !!w1 without using - eor w1, w1, #1 // the flags... - bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1 - str x0, [x2, #VCPU_WORKAROUND_FLAGS] - - /* Check that we actually need to perform the call */ - hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2 - cbz x0, wa2_end - - mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 - smc #0 - - /* Don't leak data from the SMC call */ - mov x3, xzr -wa2_end: - mov x2, xzr - mov x1, xzr -#endif - wa_epilogue: mov x0, xzr add sp, sp, #16 @@ -143,13 +138,19 @@ el1_error: b __guest_exit el2_sync: - /* Check for illegal exception return, otherwise panic */ + /* Check for illegal exception return */ mrs x0, spsr_el2 + tbnz x0, #20, 1f - /* if this was something else, then panic! */ - tst x0, #PSR_IL_BIT - b.eq __hyp_panic + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + bl kvm_unexpected_el2_exception + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect + + eret +1: /* Let's attempt a recovery from the illegal exception return */ get_vcpu_ptr x1, x0 mov x0, #ARM_EXCEPTION_IL @@ -157,27 +158,14 @@ el2_sync: el2_error: - ldp x0, x1, [sp], #16 + save_caller_saved_regs_vect + stp x29, x30, [sp, #-16]! + + bl kvm_unexpected_el2_exception + + ldp x29, x30, [sp], #16 + restore_caller_saved_regs_vect - /* - * Only two possibilities: - * 1) Either we come from the exit path, having just unmasked - * PSTATE.A: change the return code to an EL2 fault, and - * carry on, as we're already in a sane state to handle it. - * 2) Or we come from anywhere else, and that's a bug: we panic. - * - * For (1), x0 contains the original return code and x1 doesn't - * contain anything meaningful at that stage. We can reuse them - * as temp registers. - * For (2), who cares? - */ - mrs x0, elr_el2 - adr x1, abort_guest_exit_start - cmp x0, x1 - adr x1, abort_guest_exit_end - ccmp x0, x1, #4, ne - b.ne __hyp_panic - mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) eret sb @@ -271,7 +259,6 @@ SYM_CODE_START(__kvm_hyp_vector) valid_vect el1_error // Error 32-bit EL1 SYM_CODE_END(__kvm_hyp_vector) -#ifdef CONFIG_KVM_INDIRECT_VECTORS .macro hyp_ventry .align 7 1: esb @@ -321,4 +308,3 @@ SYM_CODE_START(__bp_harden_hyp_vecs) 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ .org 1b SYM_CODE_END(__bp_harden_hyp_vecs) -#endif diff --git a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h index 0297dc63988c..5e28ea6aa097 100644 --- a/arch/arm64/kvm/hyp/include/hyp/debug-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/debug-sr.h @@ -21,70 +21,70 @@ #define save_debug(ptr,reg,nr) \ switch (nr) { \ case 15: ptr[15] = read_debug(reg, 15); \ - /* Fall through */ \ + fallthrough; \ case 14: ptr[14] = read_debug(reg, 14); \ - /* Fall through */ \ + fallthrough; \ case 13: ptr[13] = read_debug(reg, 13); \ - /* Fall through */ \ + fallthrough; \ case 12: ptr[12] = read_debug(reg, 12); \ - /* Fall through */ \ + fallthrough; \ case 11: ptr[11] = read_debug(reg, 11); \ - /* Fall through */ \ + fallthrough; \ case 10: ptr[10] = read_debug(reg, 10); \ - /* Fall through */ \ + fallthrough; \ case 9: ptr[9] = read_debug(reg, 9); \ - /* Fall through */ \ + fallthrough; \ case 8: ptr[8] = read_debug(reg, 8); \ - /* Fall through */ \ + fallthrough; \ case 7: ptr[7] = read_debug(reg, 7); \ - /* Fall through */ \ + fallthrough; \ case 6: ptr[6] = read_debug(reg, 6); \ - /* Fall through */ \ + fallthrough; \ case 5: ptr[5] = read_debug(reg, 5); \ - /* Fall through */ \ + fallthrough; \ case 4: ptr[4] = read_debug(reg, 4); \ - /* Fall through */ \ + fallthrough; \ case 3: ptr[3] = read_debug(reg, 3); \ - /* Fall through */ \ + fallthrough; \ case 2: ptr[2] = read_debug(reg, 2); \ - /* Fall through */ \ + fallthrough; \ case 1: ptr[1] = read_debug(reg, 1); \ - /* Fall through */ \ + fallthrough; \ default: ptr[0] = read_debug(reg, 0); \ } #define restore_debug(ptr,reg,nr) \ switch (nr) { \ case 15: write_debug(ptr[15], reg, 15); \ - /* Fall through */ \ + fallthrough; \ case 14: write_debug(ptr[14], reg, 14); \ - /* Fall through */ \ + fallthrough; \ case 13: write_debug(ptr[13], reg, 13); \ - /* Fall through */ \ + fallthrough; \ case 12: write_debug(ptr[12], reg, 12); \ - /* Fall through */ \ + fallthrough; \ case 11: write_debug(ptr[11], reg, 11); \ - /* Fall through */ \ + fallthrough; \ case 10: write_debug(ptr[10], reg, 10); \ - /* Fall through */ \ + fallthrough; \ case 9: write_debug(ptr[9], reg, 9); \ - /* Fall through */ \ + fallthrough; \ case 8: write_debug(ptr[8], reg, 8); \ - /* Fall through */ \ + fallthrough; \ case 7: write_debug(ptr[7], reg, 7); \ - /* Fall through */ \ + fallthrough; \ case 6: write_debug(ptr[6], reg, 6); \ - /* Fall through */ \ + fallthrough; \ case 5: write_debug(ptr[5], reg, 5); \ - /* Fall through */ \ + fallthrough; \ case 4: write_debug(ptr[4], reg, 4); \ - /* Fall through */ \ + fallthrough; \ case 3: write_debug(ptr[3], reg, 3); \ - /* Fall through */ \ + fallthrough; \ case 2: write_debug(ptr[2], reg, 2); \ - /* Fall through */ \ + fallthrough; \ case 1: write_debug(ptr[1], reg, 1); \ - /* Fall through */ \ + fallthrough; \ default: write_debug(ptr[0], reg, 0); \ } diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 426ef65601dd..d0f07e8cc3ff 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -17,6 +17,7 @@ #include <asm/barrier.h> #include <asm/cpufeature.h> +#include <asm/extable.h> #include <asm/kprobes.h> #include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> @@ -29,6 +30,9 @@ extern const char __hyp_panic_string[]; +extern struct exception_table_entry __start___kvm_ex_table; +extern struct exception_table_entry __stop___kvm_ex_table; + /* Check whether the FP regs were dirtied while in the host-side run loop: */ static inline bool update_fp_enabled(struct kvm_vcpu *vcpu) { @@ -142,10 +146,10 @@ static inline bool __translate_far_to_hpfar(u64 far, u64 *hpfar) * saved the guest context yet, and we may return early... */ par = read_sysreg(par_el1); - asm volatile("at s1e1r, %0" : : "r" (far)); - isb(); - - tmp = read_sysreg(par_el1); + if (!__kvm_at("s1e1r", far)) + tmp = read_sysreg(par_el1); + else + tmp = SYS_PAR_EL1_F; /* back to the guest */ write_sysreg(par, par_el1); if (unlikely(tmp & SYS_PAR_EL1_F)) @@ -445,7 +449,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && kvm_vcpu_dabt_isvalid(vcpu) && !kvm_vcpu_abt_issea(vcpu) && - !kvm_vcpu_dabt_iss1tw(vcpu); + !kvm_vcpu_abt_iss1tw(vcpu); if (valid) { int ret = __vgic_v2_perform_cpuif_access(vcpu); @@ -475,37 +479,31 @@ exit: return false; } -static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu) +static inline void __kvm_unexpected_el2_exception(void) { - if (!cpus_have_final_cap(ARM64_SSBD)) - return false; - - return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG); -} + unsigned long addr, fixup; + struct kvm_cpu_context *host_ctxt; + struct exception_table_entry *entry, *end; + unsigned long elr_el2 = read_sysreg(elr_el2); + + entry = hyp_symbol_addr(__start___kvm_ex_table); + end = hyp_symbol_addr(__stop___kvm_ex_table); + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt; + + while (entry < end) { + addr = (unsigned long)&entry->insn + entry->insn; + fixup = (unsigned long)&entry->fixup + entry->fixup; + + if (addr != elr_el2) { + entry++; + continue; + } -static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu) -{ -#ifdef CONFIG_ARM64_SSBD - /* - * The host runs with the workaround always present. If the - * guest wants it disabled, so be it... - */ - if (__needs_ssbd_off(vcpu) && - __hyp_this_cpu_read(arm64_ssbd_callback_required)) - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL); -#endif -} + write_sysreg(fixup, elr_el2); + return; + } -static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu) -{ -#ifdef CONFIG_ARM64_SSBD - /* - * If the guest has disabled the workaround, bring it back on. - */ - if (__needs_ssbd_off(vcpu) && - __hyp_this_cpu_read(arm64_ssbd_callback_required)) - arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL); -#endif + hyp_panic(host_ctxt); } #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index 341be2f2f312..8d3dd4f47924 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -202,8 +202,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __debug_switch_to_guest(vcpu); - __set_guest_arch_workaround_state(vcpu); - do { /* Jump in the fire! */ exit_code = __guest_enter(vcpu, host_ctxt); @@ -211,8 +209,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); - __set_host_arch_workaround_state(vcpu); - __sysreg_save_state_nvhe(guest_ctxt); __sysreg32_save_state(vcpu); __timer_disable_traps(vcpu); @@ -270,3 +266,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) read_sysreg(hpfar_el2), par, vcpu); unreachable(); } + +asmlinkage void kvm_unexpected_el2_exception(void) +{ + return __kvm_unexpected_el2_exception(); +} diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c index 69eae608d670..b15d65a42042 100644 --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -31,7 +31,14 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, isb(); } + /* + * __load_guest_stage2() includes an ISB only when the AT + * workaround is applied. Take care of the opposite condition, + * ensuring that we always have an ISB, but not two ISBs back + * to back. + */ __load_guest_stage2(mmu); + asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); } static void __tlb_switch_to_host(struct tlb_inv_context *cxt) diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c index 5a0073511efb..452f4cacd674 100644 --- a/arch/arm64/kvm/hyp/vgic-v3-sr.c +++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c @@ -340,10 +340,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: cpu_if->vgic_ap0r[3] = __vgic_v3_read_ap0rn(3); cpu_if->vgic_ap0r[2] = __vgic_v3_read_ap0rn(2); - /* Fall through */ + fallthrough; case 6: cpu_if->vgic_ap0r[1] = __vgic_v3_read_ap0rn(1); - /* Fall through */ + fallthrough; default: cpu_if->vgic_ap0r[0] = __vgic_v3_read_ap0rn(0); } @@ -352,10 +352,10 @@ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: cpu_if->vgic_ap1r[3] = __vgic_v3_read_ap1rn(3); cpu_if->vgic_ap1r[2] = __vgic_v3_read_ap1rn(2); - /* Fall through */ + fallthrough; case 6: cpu_if->vgic_ap1r[1] = __vgic_v3_read_ap1rn(1); - /* Fall through */ + fallthrough; default: cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0); } @@ -373,10 +373,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[3], 3); __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[2], 2); - /* Fall through */ + fallthrough; case 6: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[1], 1); - /* Fall through */ + fallthrough; default: __vgic_v3_write_ap0rn(cpu_if->vgic_ap0r[0], 0); } @@ -385,10 +385,10 @@ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if) case 7: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[3], 3); __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[2], 2); - /* Fall through */ + fallthrough; case 6: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[1], 1); - /* Fall through */ + fallthrough; default: __vgic_v3_write_ap1rn(cpu_if->vgic_ap1r[0], 0); } diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index c52d714e0d75..ecf67e678203 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -131,8 +131,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) sysreg_restore_guest_state_vhe(guest_ctxt); __debug_switch_to_guest(vcpu); - __set_guest_arch_workaround_state(vcpu); - do { /* Jump in the fire! */ exit_code = __guest_enter(vcpu, host_ctxt); @@ -140,8 +138,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); - __set_host_arch_workaround_state(vcpu); - sysreg_save_guest_state_vhe(guest_ctxt); __deactivate_traps(vcpu); @@ -217,3 +213,8 @@ void __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) __hyp_call_panic(spsr, elr, par, host_ctxt); unreachable(); } + +asmlinkage void kvm_unexpected_el2_exception(void) +{ + return __kvm_unexpected_el2_exception(); +} diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 550dfa3e53cd..9824025ccc5c 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -24,27 +24,36 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) feature = smccc_get_arg1(vcpu); switch (feature) { case ARM_SMCCC_ARCH_WORKAROUND_1: - switch (kvm_arm_harden_branch_predictor()) { - case KVM_BP_HARDEN_UNKNOWN: + switch (arm64_get_spectre_v2_state()) { + case SPECTRE_VULNERABLE: break; - case KVM_BP_HARDEN_WA_NEEDED: + case SPECTRE_MITIGATED: val = SMCCC_RET_SUCCESS; break; - case KVM_BP_HARDEN_NOT_REQUIRED: + case SPECTRE_UNAFFECTED: val = SMCCC_RET_NOT_REQUIRED; break; } break; case ARM_SMCCC_ARCH_WORKAROUND_2: - switch (kvm_arm_have_ssbd()) { - case KVM_SSBD_FORCE_DISABLE: - case KVM_SSBD_UNKNOWN: + switch (arm64_get_spectre_v4_state()) { + case SPECTRE_VULNERABLE: break; - case KVM_SSBD_KERNEL: - val = SMCCC_RET_SUCCESS; - break; - case KVM_SSBD_FORCE_ENABLE: - case KVM_SSBD_MITIGATED: + case SPECTRE_MITIGATED: + /* + * SSBS everywhere: Indicate no firmware + * support, as the SSBS support will be + * indicated to the guest and the default is + * safe. + * + * Otherwise, expose a permanent mitigation + * to the guest, and hide SSBS so that the + * guest stays protected. + */ + if (cpus_have_final_cap(ARM64_SSBS)) + break; + fallthrough; + case SPECTRE_UNAFFECTED: val = SMCCC_RET_NOT_REQUIRED; break; } diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 0121ef2c7c8d..3d26b47a1343 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -343,7 +343,8 @@ static void unmap_stage2_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd, * destroying the VM), otherwise another faulting VCPU may come in and mess * with things behind our backs. */ -static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) +static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, + bool may_block) { struct kvm *kvm = mmu->kvm; pgd_t *pgd; @@ -369,11 +370,16 @@ static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 si * If the range is too large, release the kvm->mmu_lock * to prevent starvation and lockup detector warnings. */ - if (next != end) + if (may_block && next != end) cond_resched_lock(&kvm->mmu_lock); } while (pgd++, addr = next, addr != end); } +static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) +{ + __unmap_stage2_range(mmu, start, size, true); +} + static void stage2_flush_ptes(struct kvm_s2_mmu *mmu, pmd_t *pmd, phys_addr_t addr, phys_addr_t end) { @@ -1843,7 +1849,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu; write_fault = kvm_is_write_fault(vcpu); - exec_fault = kvm_vcpu_trap_is_iabt(vcpu); + exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); VM_BUG_ON(write_fault && exec_fault); if (fault_status == FSC_PERM && !write_fault && !exec_fault) { @@ -1871,6 +1877,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) { force_pte = true; vma_pagesize = PAGE_SIZE; + vma_shift = PAGE_SHIFT; } /* @@ -1964,7 +1971,12 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, (fault_status == FSC_PERM && stage2_is_exec(mmu, fault_ipa, vma_pagesize)); - if (vma_pagesize == PUD_SIZE) { + /* + * If PUD_SIZE == PMD_SIZE, there is no real PUD level, and + * all we have is a 2-level page table. Trying to map a PUD in + * this case would be fatally wrong. + */ + if (PUD_SIZE != PMD_SIZE && vma_pagesize == PUD_SIZE) { pud_t new_pud = kvm_pfn_pud(pfn, mem_type); new_pud = kvm_pud_mkhuge(new_pud); @@ -2119,7 +2131,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) goto out; } - if (kvm_vcpu_dabt_iss1tw(vcpu)) { + if (kvm_vcpu_abt_iss1tw(vcpu)) { kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); ret = 1; goto out_unlock; @@ -2208,18 +2220,21 @@ static int handle_hva_to_gpa(struct kvm *kvm, static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) { - unmap_stage2_range(&kvm->arch.mmu, gpa, size); + unsigned flags = *(unsigned *)data; + bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE; + + __unmap_stage2_range(&kvm->arch.mmu, gpa, size, may_block); return 0; } int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end) + unsigned long start, unsigned long end, unsigned flags) { if (!kvm->arch.mmu.pgd) return 0; trace_kvm_unmap_hva_range(start, end); - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags); return 0; } diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index f0d0312c0a55..81916e360b1e 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -269,6 +269,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) kvm_pmu_release_perf_event(&pmu->pmc[i]); + irq_work_sync(&vcpu->arch.pmu.overflow_work); } u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) @@ -434,6 +435,22 @@ void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) } /** + * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding + * to the event. + * This is why we need a callback to do it once outside of the NMI context. + */ +static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work) +{ + struct kvm_vcpu *vcpu; + struct kvm_pmu *pmu; + + pmu = container_of(work, struct kvm_pmu, overflow_work); + vcpu = kvm_pmc_to_vcpu(pmu->pmc); + + kvm_vcpu_kick(vcpu); +} + +/** * When the perf event overflows, set the overflow status and inform the vcpu. */ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, @@ -465,7 +482,11 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event, if (kvm_pmu_overflow_status(vcpu)) { kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); - kvm_vcpu_kick(vcpu); + + if (!in_nmi()) + kvm_vcpu_kick(vcpu); + else + irq_work_queue(&vcpu->arch.pmu.overflow_work); } cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); @@ -764,6 +785,9 @@ static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) return ret; } + init_irq_work(&vcpu->arch.pmu.overflow_work, + kvm_pmu_perf_overflow_notify_vcpu); + vcpu->arch.pmu.created = true; return 0; } diff --git a/arch/arm64/kvm/psci.c b/arch/arm64/kvm/psci.c index 83415e96b589..db4056ecccfd 100644 --- a/arch/arm64/kvm/psci.c +++ b/arch/arm64/kvm/psci.c @@ -425,27 +425,30 @@ static int get_kernel_wa_level(u64 regid) { switch (regid) { case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: - switch (kvm_arm_harden_branch_predictor()) { - case KVM_BP_HARDEN_UNKNOWN: + switch (arm64_get_spectre_v2_state()) { + case SPECTRE_VULNERABLE: return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL; - case KVM_BP_HARDEN_WA_NEEDED: + case SPECTRE_MITIGATED: return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL; - case KVM_BP_HARDEN_NOT_REQUIRED: + case SPECTRE_UNAFFECTED: return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED; } return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL; case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: - switch (kvm_arm_have_ssbd()) { - case KVM_SSBD_FORCE_DISABLE: - return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; - case KVM_SSBD_KERNEL: - return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL; - case KVM_SSBD_FORCE_ENABLE: - case KVM_SSBD_MITIGATED: + switch (arm64_get_spectre_v4_state()) { + case SPECTRE_MITIGATED: + /* + * As for the hypercall discovery, we pretend we + * don't have any FW mitigation if SSBS is there at + * all times. + */ + if (cpus_have_final_cap(ARM64_SSBS)) + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; + fallthrough; + case SPECTRE_UNAFFECTED: return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED; - case KVM_SSBD_UNKNOWN: - default: - return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN; + case SPECTRE_VULNERABLE: + return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; } } @@ -462,14 +465,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) val = kvm_psci_version(vcpu, vcpu->kvm); break; case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1: - val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; - break; case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2: val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK; - - if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL && - kvm_arm_get_vcpu_workaround_2_flag(vcpu)) - val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED; break; default: return -ENOENT; @@ -527,34 +524,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED)) return -EINVAL; - wa_level = val & KVM_REG_FEATURE_LEVEL_MASK; - - if (get_kernel_wa_level(reg->id) < wa_level) - return -EINVAL; - /* The enabled bit must not be set unless the level is AVAIL. */ - if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL && - wa_level != val) + if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) && + (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL) return -EINVAL; - /* Are we finished or do we need to check the enable bit ? */ - if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL) - return 0; - /* - * If this kernel supports the workaround to be switched on - * or off, make sure it matches the requested setting. + * Map all the possible incoming states to the only two we + * really want to deal with. */ - switch (wa_level) { - case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL: - kvm_arm_set_vcpu_workaround_2_flag(vcpu, - val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED); + switch (val & KVM_REG_FEATURE_LEVEL_MASK) { + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL: + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN: + wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL; break; + case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL: case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED: - kvm_arm_set_vcpu_workaround_2_flag(vcpu, true); + wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED; break; + default: + return -EINVAL; } + /* + * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the + * other way around. + */ + if (get_kernel_wa_level(reg->id) < wa_level) + return -EINVAL; + return 0; default: return -ENOENT; diff --git a/arch/arm64/kvm/pvtime.c b/arch/arm64/kvm/pvtime.c index f7b52ce1557e..920ac43077ad 100644 --- a/arch/arm64/kvm/pvtime.c +++ b/arch/arm64/kvm/pvtime.c @@ -13,25 +13,22 @@ void kvm_update_stolen_time(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; - u64 steal; - __le64 steal_le; - u64 offset; - int idx; u64 base = vcpu->arch.steal.base; + u64 last_steal = vcpu->arch.steal.last_steal; + u64 offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time); + u64 steal = 0; + int idx; if (base == GPA_INVALID) return; - /* Let's do the local bookkeeping */ - steal = vcpu->arch.steal.steal; - steal += current->sched_info.run_delay - vcpu->arch.steal.last_steal; - vcpu->arch.steal.last_steal = current->sched_info.run_delay; - vcpu->arch.steal.steal = steal; - - steal_le = cpu_to_le64(steal); idx = srcu_read_lock(&kvm->srcu); - offset = offsetof(struct pvclock_vcpu_stolen_time, stolen_time); - kvm_put_guest(kvm, base + offset, steal_le, u64); + if (!kvm_get_guest(kvm, base + offset, steal)) { + steal = le64_to_cpu(steal); + vcpu->arch.steal.last_steal = READ_ONCE(current->sched_info.run_delay); + steal += vcpu->arch.steal.last_steal - last_steal; + kvm_put_guest(kvm, base + offset, cpu_to_le64(steal)); + } srcu_read_unlock(&kvm->srcu, idx); } @@ -43,7 +40,8 @@ long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu) switch (feature) { case ARM_SMCCC_HV_PV_TIME_FEATURES: case ARM_SMCCC_HV_PV_TIME_ST: - val = SMCCC_RET_SUCCESS; + if (vcpu->arch.steal.base != GPA_INVALID) + val = SMCCC_RET_SUCCESS; break; } @@ -64,7 +62,6 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) * Start counting stolen time from the time the guest requests * the feature enabled. */ - vcpu->arch.steal.steal = 0; vcpu->arch.steal.last_steal = current->sched_info.run_delay; idx = srcu_read_lock(&kvm->srcu); @@ -74,7 +71,7 @@ gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu) return base; } -static bool kvm_arm_pvtime_supported(void) +bool kvm_arm_pvtime_supported(void) { return !!sched_info_on(); } diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index ee33875c5c2a..f6e8b4a75cbb 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.reset_state.reset = false; } - /* Default workaround setup is enabled (if supported) */ - if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL) - vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; - /* Reset timer */ ret = kvm_timer_vcpu_reset(vcpu); out: diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 077293b5115f..9ca270603980 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1131,6 +1131,11 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu, if (!vcpu_has_sve(vcpu)) val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT); val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT); + if (!(val & (0xfUL << ID_AA64PFR0_CSV2_SHIFT)) && + arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) + val |= (1UL << ID_AA64PFR0_CSV2_SHIFT); + } else if (id == SYS_ID_AA64PFR1_EL1) { + val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT); } else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) { val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) | (0xfUL << ID_AA64ISAR1_API_SHIFT) | @@ -1382,6 +1387,13 @@ static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static bool access_mte_regs(struct kvm_vcpu *vcpu, struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + kvm_inject_undefined(vcpu); + return false; +} + /* sys_reg_desc initialiser for known cpufeature ID registers */ #define ID_SANITISED(name) { \ SYS_DESC(SYS_##name), \ @@ -1547,6 +1559,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 }, { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 }, + + { SYS_DESC(SYS_RGSR_EL1), access_mte_regs }, + { SYS_DESC(SYS_GCR_EL1), access_mte_regs }, + { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility }, { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 }, { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 }, @@ -1571,6 +1587,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi }, { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi }, + { SYS_DESC(SYS_TFSR_EL1), access_mte_regs }, + { SYS_DESC(SYS_TFSRE0_EL1), access_mte_regs }, + { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 }, { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 }, diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h index 4691053c5ee4..ff0444352bba 100644 --- a/arch/arm64/kvm/trace_arm.h +++ b/arch/arm64/kvm/trace_arm.h @@ -23,7 +23,7 @@ TRACE_EVENT(kvm_entry, __entry->vcpu_pc = vcpu_pc; ), - TP_printk("PC: 0x%08lx", __entry->vcpu_pc) + TP_printk("PC: 0x%016lx", __entry->vcpu_pc) ); TRACE_EVENT(kvm_exit, @@ -42,7 +42,7 @@ TRACE_EVENT(kvm_exit, __entry->vcpu_pc = vcpu_pc; ), - TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%08lx", + TP_printk("%s: HSR_EC: 0x%04x (%s), PC: 0x%016lx", __print_symbolic(__entry->ret, kvm_arm_exception_type), __entry->esr_ec, __print_symbolic(__entry->esr_ec, kvm_arm_exception_class), @@ -69,7 +69,7 @@ TRACE_EVENT(kvm_guest_fault, __entry->ipa = ipa; ), - TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx", + TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#016lx", __entry->ipa, __entry->hsr, __entry->hxfar, __entry->vcpu_pc) ); @@ -131,7 +131,7 @@ TRACE_EVENT(kvm_mmio_emulate, __entry->cpsr = cpsr; ), - TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)", + TP_printk("Emulate MMIO at: 0x%016lx (instr: %08lx, cpsr: %08lx)", __entry->vcpu_pc, __entry->instr, __entry->cpsr) ); @@ -149,7 +149,7 @@ TRACE_EVENT(kvm_unmap_hva_range, __entry->end = end; ), - TP_printk("mmu notifier unmap range: %#08lx -- %#08lx", + TP_printk("mmu notifier unmap range: %#016lx -- %#016lx", __entry->start, __entry->end) ); @@ -165,7 +165,7 @@ TRACE_EVENT(kvm_set_spte_hva, __entry->hva = hva; ), - TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) + TP_printk("mmu notifier set pte hva: %#016lx", __entry->hva) ); TRACE_EVENT(kvm_age_hva, @@ -182,7 +182,7 @@ TRACE_EVENT(kvm_age_hva, __entry->end = end; ), - TP_printk("mmu notifier age hva: %#08lx -- %#08lx", + TP_printk("mmu notifier age hva: %#016lx -- %#016lx", __entry->start, __entry->end) ); @@ -198,7 +198,7 @@ TRACE_EVENT(kvm_test_age_hva, __entry->hva = hva; ), - TP_printk("mmu notifier test age hva: %#08lx", __entry->hva) + TP_printk("mmu notifier test age hva: %#016lx", __entry->hva) ); TRACE_EVENT(kvm_set_way_flush, diff --git a/arch/arm64/kvm/trace_handle_exit.h b/arch/arm64/kvm/trace_handle_exit.h index 2c56d1e0f5bd..8d78acc4fba7 100644 --- a/arch/arm64/kvm/trace_handle_exit.h +++ b/arch/arm64/kvm/trace_handle_exit.h @@ -22,7 +22,7 @@ TRACE_EVENT(kvm_wfx_arm64, __entry->is_wfe = is_wfe; ), - TP_printk("guest executed wf%c at: 0x%08lx", + TP_printk("guest executed wf%c at: 0x%016lx", __entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc) ); @@ -42,7 +42,7 @@ TRACE_EVENT(kvm_hvc_arm64, __entry->imm = imm; ), - TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx)", + TP_printk("HVC at 0x%016lx (r0: 0x%016lx, imm: 0x%lx)", __entry->vcpu_pc, __entry->r0, __entry->imm) ); @@ -135,7 +135,7 @@ TRACE_EVENT(trap_reg, __entry->write_value = write_value; ), - TP_printk("%s %s reg %d (0x%08llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value) + TP_printk("%s %s reg %d (0x%016llx)", __entry->fn, __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value) ); TRACE_EVENT(kvm_handle_sys_reg, diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 5c786b915cd3..52d6f24f65dc 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -1001,8 +1001,8 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) raw_spin_lock_irqsave(&irq->irq_lock, flags); /* - * An access targetting Group0 SGIs can only generate - * those, while an access targetting Group1 SGIs can + * An access targeting Group0 SGIs can only generate + * those, while an access targeting Group1 SGIs can * generate interrupts of either group. */ if (!irq->group || allow_group1) { |