From 35a2491a624af1fa7ab6990639f5246cd5f12592 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 1 Feb 2016 17:54:35 +0000 Subject: arm/arm64: KVM: Add hook for C-based stage2 init As we're about to move the stage2 init to C code, introduce some C hooks that will later be populated with arch-specific implementations. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 4 ++++ arch/arm/kvm/arm.c | 1 + 2 files changed, 5 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index f9f27792d8ed..f1e86f1eb2e5 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -220,6 +220,10 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr); } +static inline void __cpu_init_stage2(void) +{ +} + static inline int kvm_arch_dev_ioctl_check_extension(long ext) { return 0; diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index dda1959f0dde..6b76e0152e58 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -985,6 +985,7 @@ static void cpu_init_hyp_mode(void *dummy) vector_ptr = (unsigned long)__kvm_hyp_vector; __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr); + __cpu_init_stage2(); kvm_arm_init_debug(); } -- cgit v1.2.3 From 1a61ae7af4d65ee311a737d550da6cf92a3aea4c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 2 Jan 2016 13:57:18 +0000 Subject: ARM: KVM: Move the HYP code to its own section In order to be able to spread the HYP code into multiple compilation units, adopt a layout similar to that of arm64: - the HYP text is emited in its own section (.hyp.text) - two linker generated symbols are use to identify the boundaries of that section No functionnal change. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 6 ++++-- arch/arm/include/asm/virt.h | 4 ++++ arch/arm/kernel/vmlinux.lds.S | 6 ++++++ arch/arm/kvm/interrupts.S | 13 +++++-------- 4 files changed, 19 insertions(+), 10 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 194c91b610ff..fa2fd253974f 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -19,6 +19,8 @@ #ifndef __ARM_KVM_ASM_H__ #define __ARM_KVM_ASM_H__ +#include + /* 0 is reserved as an invalid value. */ #define c0_MPIDR 1 /* MultiProcessor ID Register */ #define c0_CSSELR 2 /* Cache Size Selection Register */ @@ -91,8 +93,8 @@ extern char __kvm_hyp_exit_end[]; extern char __kvm_hyp_vector[]; -extern char __kvm_hyp_code_start[]; -extern char __kvm_hyp_code_end[]; +#define __kvm_hyp_code_start __hyp_text_start +#define __kvm_hyp_code_end __hyp_text_end extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 4371f45c5784..5fdbfea6defb 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -74,6 +74,10 @@ static inline bool is_hyp_mode_mismatched(void) { return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); } + +/* The section containing the hypervisor text */ +extern char __hyp_text_start[]; +extern char __hyp_text_end[]; #endif #endif /* __ASSEMBLY__ */ diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 8b60fde5ce48..b4139cbbbdd9 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -18,6 +18,11 @@ *(.proc.info.init) \ VMLINUX_SYMBOL(__proc_info_end) = .; +#define HYPERVISOR_TEXT \ + VMLINUX_SYMBOL(__hyp_text_start) = .; \ + *(.hyp.text) \ + VMLINUX_SYMBOL(__hyp_text_end) = .; + #define IDMAP_TEXT \ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__idmap_text_start) = .; \ @@ -108,6 +113,7 @@ SECTIONS TEXT_TEXT SCHED_TEXT LOCK_TEXT + HYPERVISOR_TEXT KPROBES_TEXT *(.gnu.warning) *(.glue_7) diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 900ef6dd8f72..9d9cb71df449 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -28,9 +28,7 @@ #include "interrupts_head.S" .text - -__kvm_hyp_code_start: - .globl __kvm_hyp_code_start + .pushsection .hyp.text, "ax" /******************************************************************** * Flush per-VMID TLBs @@ -314,8 +312,6 @@ THUMB( orr r2, r2, #PSR_T_BIT ) eret .endm - .text - .align 5 __kvm_hyp_vector: .globl __kvm_hyp_vector @@ -511,10 +507,9 @@ hyp_fiq: .ltorg -__kvm_hyp_code_end: - .globl __kvm_hyp_code_end + .popsection - .section ".rodata" + .pushsection ".rodata" und_die_str: .ascii "unexpected undefined exception in Hyp mode at: %#08x\n" @@ -524,3 +519,5 @@ dabt_die_str: .ascii "unexpected data abort in Hyp mode at: %#08x\n" svc_die_str: .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n" + + .popsection -- cgit v1.2.3 From 42428525a9eefea9dda68de684381ce9f3dc4266 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 2 Jan 2016 14:04:48 +0000 Subject: ARM: KVM: Remove __kvm_hyp_code_start/__kvm_hyp_code_end Now that we've unified the way we refer to the HYP text between arm and arm64, drop __kvm_hyp_code_start/end, and just use the __hyp_text_start/end symbols. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 3 --- arch/arm/kvm/arm.c | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index fa2fd253974f..4841225d10ea 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -93,9 +93,6 @@ extern char __kvm_hyp_exit_end[]; extern char __kvm_hyp_vector[]; -#define __kvm_hyp_code_start __hyp_text_start -#define __kvm_hyp_code_end __hyp_text_end - extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 6b76e0152e58..fcf6c130c986 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -1075,7 +1075,7 @@ static int init_hyp_mode(void) /* * Map the Hyp-code called directly from the host */ - err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); + err = create_hyp_mappings(__hyp_text_start, __hyp_text_end); if (err) { kvm_err("Cannot map world-switch code\n"); goto out_free_mappings; -- cgit v1.2.3 From 0ca5565df8ef7534c0d85ec87e6c74f8ebe86e88 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Jan 2016 11:01:49 +0000 Subject: ARM: KVM: Move VFP registers to a CPU context structure In order to turn the WS code into something that looks a bit more like the arm64 version, move the VFP registers into a CPU context container for both the host and the guest. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 11 +++++++---- arch/arm/kernel/asm-offsets.c | 5 +++-- arch/arm/kvm/coproc.c | 20 ++++++++++---------- arch/arm/kvm/interrupts.S | 10 ++++++---- 4 files changed, 26 insertions(+), 20 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index f1e86f1eb2e5..b64ac8e4adaa 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -88,9 +88,15 @@ struct kvm_vcpu_fault_info { u32 hyp_pc; /* PC when exception was taken from Hyp mode */ }; -typedef struct vfp_hard_struct kvm_cpu_context_t; +struct kvm_cpu_context { + struct vfp_hard_struct vfp; +}; + +typedef struct kvm_cpu_context kvm_cpu_context_t; struct kvm_vcpu_arch { + struct kvm_cpu_context ctxt; + struct kvm_regs regs; int target; /* Processor target */ @@ -111,9 +117,6 @@ struct kvm_vcpu_arch { /* Exception Information */ struct kvm_vcpu_fault_info fault; - /* Floating point registers (VFP and Advanced SIMD/NEON) */ - struct vfp_hard_struct vfp_guest; - /* Host FP context */ kvm_cpu_context_t *host_cpu_context; diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 871b8267d211..346bfca29720 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -173,8 +173,9 @@ int main(void) DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); - DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); - DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.host_cpu_context)); + DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); + DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); + DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index f3d88dc388bc..1a643f38031d 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -901,7 +901,7 @@ static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) if (vfpid < num_fp_regs()) { if (KVM_REG_SIZE(id) != 8) return -ENOENT; - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid], id); } @@ -911,13 +911,13 @@ static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) switch (vfpid) { case KVM_REG_ARM_VFP_FPEXC: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id); case KVM_REG_ARM_VFP_FPSCR: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id); case KVM_REG_ARM_VFP_FPINST: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id); case KVM_REG_ARM_VFP_FPINST2: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); + return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id); case KVM_REG_ARM_VFP_MVFR0: val = fmrx(MVFR0); return reg_to_user(uaddr, &val, id); @@ -945,7 +945,7 @@ static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) if (vfpid < num_fp_regs()) { if (KVM_REG_SIZE(id) != 8) return -ENOENT; - return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], + return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid], uaddr, id); } @@ -955,13 +955,13 @@ static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) switch (vfpid) { case KVM_REG_ARM_VFP_FPEXC: - return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id); case KVM_REG_ARM_VFP_FPSCR: - return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id); case KVM_REG_ARM_VFP_FPINST: - return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id); case KVM_REG_ARM_VFP_FPINST2: - return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); + return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id); /* These are invariant. */ case KVM_REG_ARM_VFP_MVFR0: if (reg_from_user(&val, uaddr, id)) diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 9d9cb71df449..7bfb28936914 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -172,10 +172,11 @@ __kvm_vcpu_return: #ifdef CONFIG_VFPv3 @ Switch VFP/NEON hardware state to the host's - add r7, vcpu, #VCPU_VFP_GUEST + add r7, vcpu, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) store_vfp_state r7 - add r7, vcpu, #VCPU_VFP_HOST + add r7, vcpu, #VCPU_HOST_CTXT ldr r7, [r7] + add r7, r7, #CPU_CTXT_VFP restore_vfp_state r7 after_vfp_restore: @@ -482,10 +483,11 @@ switch_to_guest_vfp: set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11)) @ Switch VFP/NEON hardware state to the guest's - add r7, r0, #VCPU_VFP_HOST + add r7, r0, #VCPU_HOST_CTXT ldr r7, [r7] + add r7, r7, #CPU_CTXT_VFP store_vfp_state r7 - add r7, r0, #VCPU_VFP_GUEST + add r7, r0, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) restore_vfp_state r7 pop {r3-r7} -- cgit v1.2.3 From fb32a52a1d4487f3ac5b7ccb659d0beb11ec504f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Jan 2016 11:26:01 +0000 Subject: ARM: KVM: Move CP15 array into the CPU context structure Continuing our rework of the CPU context, we now move the CP15 array into the CPU context structure. As this causes quite a bit of churn, we introduce the vcpu_cp15() macro that abstract the location of the actual array. This will probably help next time we have to revisit that code. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 2 +- arch/arm/include/asm/kvm_host.h | 6 +++--- arch/arm/include/asm/kvm_mmu.h | 2 +- arch/arm/kernel/asm-offsets.c | 2 +- arch/arm/kvm/coproc.c | 32 ++++++++++++++++---------------- arch/arm/kvm/coproc.h | 16 ++++++++-------- arch/arm/kvm/emulate.c | 22 +++++++++++----------- arch/arm/kvm/interrupts_head.S | 3 ++- 8 files changed, 43 insertions(+), 42 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 3095df091ff8..32bb52a489d0 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -192,7 +192,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu) static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) { - return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK; + return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK; } static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index b64ac8e4adaa..4203701cc7f4 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -90,6 +90,7 @@ struct kvm_vcpu_fault_info { struct kvm_cpu_context { struct vfp_hard_struct vfp; + u32 cp15[NR_CP15_REGS]; }; typedef struct kvm_cpu_context kvm_cpu_context_t; @@ -102,9 +103,6 @@ struct kvm_vcpu_arch { int target; /* Processor target */ DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); - /* System control coprocessor (cp15) */ - u32 cp15[NR_CP15_REGS]; - /* The CPU type we expose to the VM */ u32 midr; @@ -161,6 +159,8 @@ struct kvm_vcpu_stat { u64 exits; }; +#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r] + int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init); unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index a520b7987a29..da44be9db4fa 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -179,7 +179,7 @@ struct kvm; static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) { - return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; + return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101; } static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 346bfca29720..43f8b01072c1 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -172,10 +172,10 @@ int main(void) #ifdef CONFIG_KVM_ARM_HOST DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); - DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); + DEFINE(CPU_CTXT_CP15, offsetof(struct kvm_cpu_context, cp15)); DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 1a643f38031d..e3e86c4cfed2 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -54,8 +54,8 @@ static inline void vcpu_cp15_reg64_set(struct kvm_vcpu *vcpu, const struct coproc_reg *r, u64 val) { - vcpu->arch.cp15[r->reg] = val & 0xffffffff; - vcpu->arch.cp15[r->reg + 1] = val >> 32; + vcpu_cp15(vcpu, r->reg) = val & 0xffffffff; + vcpu_cp15(vcpu, r->reg + 1) = val >> 32; } static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, @@ -63,9 +63,9 @@ static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu, { u64 val; - val = vcpu->arch.cp15[r->reg + 1]; + val = vcpu_cp15(vcpu, r->reg + 1); val = val << 32; - val = val | vcpu->arch.cp15[r->reg]; + val = val | vcpu_cp15(vcpu, r->reg); return val; } @@ -104,7 +104,7 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) * vcpu_id, but we read the 'U' bit from the underlying * hardware directly. */ - vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | + vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | (vcpu->vcpu_id & 3)); } @@ -117,7 +117,7 @@ static bool access_actlr(struct kvm_vcpu *vcpu, if (p->is_write) return ignore_write(vcpu, p); - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; + *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR); return true; } @@ -139,7 +139,7 @@ static bool access_l2ctlr(struct kvm_vcpu *vcpu, if (p->is_write) return ignore_write(vcpu, p); - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; + *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR); return true; } @@ -156,7 +156,7 @@ static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) ncores = min(ncores, 3U); l2ctlr |= (ncores & 3) << 24; - vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; + vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr; } static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) @@ -171,7 +171,7 @@ static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) else actlr &= ~(1U << 6); - vcpu->arch.cp15[c1_ACTLR] = actlr; + vcpu_cp15(vcpu, c1_ACTLR) = actlr; } /* @@ -218,9 +218,9 @@ bool access_vm_reg(struct kvm_vcpu *vcpu, BUG_ON(!p->is_write); - vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); + vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1); if (p->is_64bit) - vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); + vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2); kvm_toggle_cache(vcpu, was_enabled); return true; @@ -1030,7 +1030,7 @@ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) val = vcpu_cp15_reg64_get(vcpu, r); ret = reg_to_user(uaddr, &val, reg->id); } else if (KVM_REG_SIZE(reg->id) == 4) { - ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); + ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id); } return ret; @@ -1060,7 +1060,7 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) if (!ret) vcpu_cp15_reg64_set(vcpu, r, val); } else if (KVM_REG_SIZE(reg->id) == 4) { - ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); + ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id); } return ret; @@ -1248,7 +1248,7 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) const struct coproc_reg *table; /* Catch someone adding a register without putting in reset entry. */ - memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); + memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15)); /* Generic chip reset first (so target could override). */ reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); @@ -1257,6 +1257,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) reset_coproc_regs(vcpu, table, num); for (num = 1; num < NR_CP15_REGS; num++) - if (vcpu->arch.cp15[num] == 0x42424242) - panic("Didn't reset vcpu->arch.cp15[%zi]", num); + if (vcpu_cp15(vcpu, num) == 0x42424242) + panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); } diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 88d24a3a9778..27351323871d 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -47,7 +47,7 @@ struct coproc_reg { /* Initialization for vcpu. */ void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); - /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ + /* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */ unsigned long reg; /* Value (usually reset value) */ @@ -104,25 +104,25 @@ static inline void reset_unknown(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); - vcpu->arch.cp15[r->reg] = 0xdecafbad; + BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); + vcpu_cp15(vcpu, r->reg) = 0xdecafbad; } static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); - vcpu->arch.cp15[r->reg] = r->val; + BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); + vcpu_cp15(vcpu, r->reg) = r->val; } static inline void reset_unknown64(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { BUG_ON(!r->reg); - BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); + BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15)); - vcpu->arch.cp15[r->reg] = 0xdecafbad; - vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; + vcpu_cp15(vcpu, r->reg) = 0xdecafbad; + vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee; } static inline int cmp_reg(const struct coproc_reg *i1, diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index dc99159857b4..ee161b1c66da 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -266,8 +266,8 @@ void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) static u32 exc_vector_base(struct kvm_vcpu *vcpu) { - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; - u32 vbar = vcpu->arch.cp15[c12_VBAR]; + u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); + u32 vbar = vcpu_cp15(vcpu, c12_VBAR); if (sctlr & SCTLR_V) return 0xffff0000; @@ -282,7 +282,7 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu) static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode) { unsigned long cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; + u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode; @@ -357,22 +357,22 @@ static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) if (is_pabt) { /* Set IFAR and IFSR */ - vcpu->arch.cp15[c6_IFAR] = addr; - is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); + vcpu_cp15(vcpu, c6_IFAR) = addr; + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); /* Always give debug fault for now - should give guest a clue */ if (is_lpae) - vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22; + vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22; else - vcpu->arch.cp15[c5_IFSR] = 2; + vcpu_cp15(vcpu, c5_IFSR) = 2; } else { /* !iabt */ /* Set DFAR and DFSR */ - vcpu->arch.cp15[c6_DFAR] = addr; - is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); + vcpu_cp15(vcpu, c6_DFAR) = addr; + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); /* Always give debug fault for now - should give guest a clue */ if (is_lpae) - vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22; + vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22; else - vcpu->arch.cp15[c5_DFSR] = 2; + vcpu_cp15(vcpu, c5_DFSR) = 2; } } diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index 51a59504bef4..b9d953158877 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S @@ -4,7 +4,8 @@ #define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) #define VCPU_USR_SP (VCPU_USR_REG(13)) #define VCPU_USR_LR (VCPU_USR_REG(14)) -#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4)) +#define VCPU_CP15_BASE (VCPU_GUEST_CTXT + CPU_CTXT_CP15) +#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15_BASE + (_cp15_reg_idx * 4)) /* * Many of these macros need to access the VCPU structure, which is always -- cgit v1.2.3 From c2a8dab507ca6f8990c12372052efc830f51dd3f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Jan 2016 11:26:01 +0000 Subject: ARM: KVM: Move GP registers into the CPU context structure Continuing our rework of the CPU context, we now move the GP registers into the CPU context structure. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 8 ++++---- arch/arm/include/asm/kvm_host.h | 3 +-- arch/arm/kernel/asm-offsets.c | 18 +++++++++--------- arch/arm/kvm/emulate.c | 12 ++++++------ arch/arm/kvm/guest.c | 4 ++-- arch/arm/kvm/interrupts_head.S | 11 +++++++++++ arch/arm/kvm/reset.c | 2 +- 7 files changed, 34 insertions(+), 24 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 32bb52a489d0..f710616ccadc 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -68,12 +68,12 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu) { - return &vcpu->arch.regs.usr_regs.ARM_pc; + return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc; } static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu) { - return &vcpu->arch.regs.usr_regs.ARM_cpsr; + return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr; } static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) @@ -83,13 +83,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) { - unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; + unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); } static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) { - unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; + unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK; return cpsr_mode > USR_MODE;; } diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 4203701cc7f4..02932ba8a653 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -89,6 +89,7 @@ struct kvm_vcpu_fault_info { }; struct kvm_cpu_context { + struct kvm_regs gp_regs; struct vfp_hard_struct vfp; u32 cp15[NR_CP15_REGS]; }; @@ -98,8 +99,6 @@ typedef struct kvm_cpu_context kvm_cpu_context_t; struct kvm_vcpu_arch { struct kvm_cpu_context ctxt; - struct kvm_regs regs; - int target; /* Processor target */ DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 43f8b01072c1..2f3e0b064066 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -176,15 +176,15 @@ int main(void) DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); DEFINE(CPU_CTXT_CP15, offsetof(struct kvm_cpu_context, cp15)); - DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); - DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); - DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); - DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); - DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); - DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); - DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); - DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); + DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); + DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs)); + DEFINE(GP_REGS_SVC, offsetof(struct kvm_regs, svc_regs)); + DEFINE(GP_REGS_ABT, offsetof(struct kvm_regs, abt_regs)); + DEFINE(GP_REGS_UND, offsetof(struct kvm_regs, und_regs)); + DEFINE(GP_REGS_IRQ, offsetof(struct kvm_regs, irq_regs)); + DEFINE(GP_REGS_FIQ, offsetof(struct kvm_regs, fiq_regs)); + DEFINE(GP_REGS_PC, offsetof(struct kvm_regs, usr_regs.ARM_pc)); + DEFINE(GP_REGS_CPSR, offsetof(struct kvm_regs, usr_regs.ARM_cpsr)); DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr)); DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index ee161b1c66da..a494def3f195 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -112,7 +112,7 @@ static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { */ unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) { - unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs; + unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs; unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; switch (mode) { @@ -147,15 +147,15 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK; switch (mode) { case SVC_MODE: - return &vcpu->arch.regs.KVM_ARM_SVC_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr; case ABT_MODE: - return &vcpu->arch.regs.KVM_ARM_ABT_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr; case UND_MODE: - return &vcpu->arch.regs.KVM_ARM_UND_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr; case IRQ_MODE: - return &vcpu->arch.regs.KVM_ARM_IRQ_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr; case FIQ_MODE: - return &vcpu->arch.regs.KVM_ARM_FIQ_spsr; + return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr; default: BUG(); } diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 5fa69d7bae58..86e26fbd5ba3 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -55,7 +55,7 @@ static u64 core_reg_offset_from_id(u64 id) static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.regs; + struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; u64 off; if (KVM_REG_SIZE(reg->id) != 4) @@ -72,7 +72,7 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.regs; + struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs; u64 off, val; if (KVM_REG_SIZE(reg->id) != 4) diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S index b9d953158877..e0943cb80ab3 100644 --- a/arch/arm/kvm/interrupts_head.S +++ b/arch/arm/kvm/interrupts_head.S @@ -1,6 +1,17 @@ #include #include +/* Compat macro, until we get rid of this file entierely */ +#define VCPU_GP_REGS (VCPU_GUEST_CTXT + CPU_CTXT_GP_REGS) +#define VCPU_USR_REGS (VCPU_GP_REGS + GP_REGS_USR) +#define VCPU_SVC_REGS (VCPU_GP_REGS + GP_REGS_SVC) +#define VCPU_ABT_REGS (VCPU_GP_REGS + GP_REGS_ABT) +#define VCPU_UND_REGS (VCPU_GP_REGS + GP_REGS_UND) +#define VCPU_IRQ_REGS (VCPU_GP_REGS + GP_REGS_IRQ) +#define VCPU_FIQ_REGS (VCPU_GP_REGS + GP_REGS_FIQ) +#define VCPU_PC (VCPU_GP_REGS + GP_REGS_PC) +#define VCPU_CPSR (VCPU_GP_REGS + GP_REGS_CPSR) + #define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) #define VCPU_USR_SP (VCPU_USR_REG(13)) #define VCPU_USR_LR (VCPU_USR_REG(14)) diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index eeb85858d6bb..0048b5a62a50 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c @@ -71,7 +71,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) } /* Reset core registers */ - memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs)); + memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs)); /* Reset CP15 registers */ kvm_reset_coprocs(vcpu); -- cgit v1.2.3 From 08dcbfda0774d5550447835f20a647b7e4c94481 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 21 Oct 2015 10:09:49 +0100 Subject: ARM: KVM: Add a HYP-specific header file In order to expose the various HYP services that are private to the hypervisor, add a new hyp.h file. So far, it only contains mundane things such as section annotation and VA manipulation. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/hyp.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 arch/arm/kvm/hyp/hyp.h (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h new file mode 100644 index 000000000000..c72387073b09 --- /dev/null +++ b/arch/arm/kvm/hyp/hyp.h @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ARM_KVM_HYP_H__ +#define __ARM_KVM_HYP_H__ + +#include +#include +#include + +#define __hyp_text __section(.hyp.text) notrace + +#define kern_hyp_va(v) (v) +#define hyp_kern_va(v) (v) + +#endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3 From 3c29568768dfe6965ca51e1a78f9f31ebc0c500a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 2 Jan 2016 15:07:13 +0000 Subject: ARM: KVM: Add system register accessor macros In order to move system register (CP15, mostly) access to C code, add a few macros to facilitate this, and minimize the difference between 32 and 64bit CP15 registers. This will get heavily used in the following patches. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/hyp.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index c72387073b09..727089f0ddb6 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -27,4 +27,19 @@ #define kern_hyp_va(v) (v) #define hyp_kern_va(v) (v) +#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ + "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 +#define __ACCESS_CP15_64(Op1, CRm) \ + "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 + +#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) +#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) + +#define __read_sysreg(r, w, c, t) ({ \ + t __val; \ + asm volatile(r " " c : "=r" (__val)); \ + __val; \ +}) +#define read_sysreg(...) __read_sysreg(__VA_ARGS__) + #endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3 From 1d58d2cbf723704e070d560507787b9912b63839 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 2 Jan 2016 15:09:54 +0000 Subject: ARM: KVM: Add TLB invalidation code Convert the TLB invalidation code to C, hooking it into the build system whilst we're at it. Signed-off-by: Marc Zyngier --- arch/arm/kvm/Makefile | 1 + arch/arm/kvm/hyp/Makefile | 5 ++++ arch/arm/kvm/hyp/hyp.h | 5 ++++ arch/arm/kvm/hyp/tlb.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 81 insertions(+) create mode 100644 arch/arm/kvm/hyp/Makefile create mode 100644 arch/arm/kvm/hyp/tlb.c (limited to 'arch/arm') diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile index c5eef02c52ba..eb1bf4309c13 100644 --- a/arch/arm/kvm/Makefile +++ b/arch/arm/kvm/Makefile @@ -17,6 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) KVM := ../../../virt/kvm kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o +obj-$(CONFIG_KVM_ARM_HOST) += hyp/ obj-y += kvm-arm.o init.o interrupts.o obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile new file mode 100644 index 000000000000..36c760df2360 --- /dev/null +++ b/arch/arm/kvm/hyp/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for Kernel-based Virtual Machine module, HYP part +# + +obj-$(CONFIG_KVM_ARM_HOST) += tlb.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 727089f0ddb6..5808bbd38c5f 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -42,4 +42,9 @@ }) #define read_sysreg(...) __read_sysreg(__VA_ARGS__) +#define VTTBR __ACCESS_CP15_64(6, c2) +#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) +#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) +#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) + #endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c new file mode 100644 index 000000000000..aaa44bbac766 --- /dev/null +++ b/arch/arm/kvm/hyp/tlb.c @@ -0,0 +1,70 @@ +/* + * Original code: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * Mostly rewritten in C by Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "hyp.h" + +/** + * Flush per-VMID TLBs + * + * __kvm_tlb_flush_vmid(struct kvm *kvm); + * + * We rely on the hardware to broadcast the TLB invalidation to all CPUs + * inside the inner-shareable domain (which is the case for all v7 + * implementations). If we come across a non-IS SMP implementation, we'll + * have to use an IPI based mechanism. Until then, we stick to the simple + * hardware assisted version. + * + * As v7 does not support flushing per IPA, just nuke the whole TLB + * instead, ignoring the ipa value. + */ +static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) +{ + dsb(ishst); + + /* Switch to requested VMID */ + kvm = kern_hyp_va(kvm); + write_sysreg(kvm->arch.vttbr, VTTBR); + isb(); + + write_sysreg(0, TLBIALLIS); + dsb(ish); + isb(); + + write_sysreg(0, VTTBR); +} + +__alias(__tlb_flush_vmid) void __weak __kvm_tlb_flush_vmid(struct kvm *kvm); + +static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) +{ + __tlb_flush_vmid(kvm); +} + +__alias(__tlb_flush_vmid_ipa) void __weak __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, + phys_addr_t ipa); + +static void __hyp_text __tlb_flush_vm_context(void) +{ + write_sysreg(0, TLBIALLNSNHIS); + write_sysreg(0, ICIALLUIS); + dsb(ish); +} + +__alias(__tlb_flush_vm_context) void __weak __kvm_flush_vm_context(void); -- cgit v1.2.3 From c7ce6c63a05f83998996fdebc4867b007a571f82 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 3 Jan 2016 12:55:01 +0000 Subject: ARM: KVM: Add CP15 save/restore code Concert the CP15 save/restore code to C. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/cp15-sr.c | 84 ++++++++++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/hyp.h | 28 ++++++++++++++++ 3 files changed, 113 insertions(+) create mode 100644 arch/arm/kvm/hyp/cp15-sr.c (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 36c760df2360..9f96fcbbcd8d 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -3,3 +3,4 @@ # obj-$(CONFIG_KVM_ARM_HOST) += tlb.o +obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c new file mode 100644 index 000000000000..732abbc34bd0 --- /dev/null +++ b/arch/arm/kvm/hyp/cp15-sr.c @@ -0,0 +1,84 @@ +/* + * Original code: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * Mostly rewritten in C by Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "hyp.h" + +static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx) +{ + return (u64 *)(ctxt->cp15 + idx); +} + +void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt) +{ + ctxt->cp15[c0_MPIDR] = read_sysreg(VMPIDR); + ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR); + ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR); + ctxt->cp15[c1_CPACR] = read_sysreg(CPACR); + *cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0); + *cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1); + ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR); + ctxt->cp15[c3_DACR] = read_sysreg(DACR); + ctxt->cp15[c5_DFSR] = read_sysreg(DFSR); + ctxt->cp15[c5_IFSR] = read_sysreg(IFSR); + ctxt->cp15[c5_ADFSR] = read_sysreg(ADFSR); + ctxt->cp15[c5_AIFSR] = read_sysreg(AIFSR); + ctxt->cp15[c6_DFAR] = read_sysreg(DFAR); + ctxt->cp15[c6_IFAR] = read_sysreg(IFAR); + *cp15_64(ctxt, c7_PAR) = read_sysreg(PAR); + ctxt->cp15[c10_PRRR] = read_sysreg(PRRR); + ctxt->cp15[c10_NMRR] = read_sysreg(NMRR); + ctxt->cp15[c10_AMAIR0] = read_sysreg(AMAIR0); + ctxt->cp15[c10_AMAIR1] = read_sysreg(AMAIR1); + ctxt->cp15[c12_VBAR] = read_sysreg(VBAR); + ctxt->cp15[c13_CID] = read_sysreg(CID); + ctxt->cp15[c13_TID_URW] = read_sysreg(TID_URW); + ctxt->cp15[c13_TID_URO] = read_sysreg(TID_URO); + ctxt->cp15[c13_TID_PRIV] = read_sysreg(TID_PRIV); + ctxt->cp15[c14_CNTKCTL] = read_sysreg(CNTKCTL); +} + +void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt) +{ + write_sysreg(ctxt->cp15[c0_MPIDR], VMPIDR); + write_sysreg(ctxt->cp15[c0_CSSELR], CSSELR); + write_sysreg(ctxt->cp15[c1_SCTLR], SCTLR); + write_sysreg(ctxt->cp15[c1_CPACR], CPACR); + write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0); + write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1); + write_sysreg(ctxt->cp15[c2_TTBCR], TTBCR); + write_sysreg(ctxt->cp15[c3_DACR], DACR); + write_sysreg(ctxt->cp15[c5_DFSR], DFSR); + write_sysreg(ctxt->cp15[c5_IFSR], IFSR); + write_sysreg(ctxt->cp15[c5_ADFSR], ADFSR); + write_sysreg(ctxt->cp15[c5_AIFSR], AIFSR); + write_sysreg(ctxt->cp15[c6_DFAR], DFAR); + write_sysreg(ctxt->cp15[c6_IFAR], IFAR); + write_sysreg(*cp15_64(ctxt, c7_PAR), PAR); + write_sysreg(ctxt->cp15[c10_PRRR], PRRR); + write_sysreg(ctxt->cp15[c10_NMRR], NMRR); + write_sysreg(ctxt->cp15[c10_AMAIR0], AMAIR0); + write_sysreg(ctxt->cp15[c10_AMAIR1], AMAIR1); + write_sysreg(ctxt->cp15[c12_VBAR], VBAR); + write_sysreg(ctxt->cp15[c13_CID], CID); + write_sysreg(ctxt->cp15[c13_TID_URW], TID_URW); + write_sysreg(ctxt->cp15[c13_TID_URO], TID_URO); + write_sysreg(ctxt->cp15[c13_TID_PRIV], TID_PRIV); + write_sysreg(ctxt->cp15[c14_CNTKCTL], CNTKCTL); +} diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 5808bbd38c5f..ab2cb828d60a 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -42,9 +42,37 @@ }) #define read_sysreg(...) __read_sysreg(__VA_ARGS__) +#define TTBR0 __ACCESS_CP15_64(0, c2) +#define TTBR1 __ACCESS_CP15_64(1, c2) #define VTTBR __ACCESS_CP15_64(6, c2) +#define PAR __ACCESS_CP15_64(0, c7) +#define CSSELR __ACCESS_CP15(c0, 2, c0, 0) +#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) +#define SCTLR __ACCESS_CP15(c1, 0, c0, 0) +#define CPACR __ACCESS_CP15(c1, 0, c0, 2) +#define TTBCR __ACCESS_CP15(c2, 0, c0, 2) +#define DACR __ACCESS_CP15(c3, 0, c0, 0) +#define DFSR __ACCESS_CP15(c5, 0, c0, 0) +#define IFSR __ACCESS_CP15(c5, 0, c0, 1) +#define ADFSR __ACCESS_CP15(c5, 0, c1, 0) +#define AIFSR __ACCESS_CP15(c5, 0, c1, 1) +#define DFAR __ACCESS_CP15(c6, 0, c0, 0) +#define IFAR __ACCESS_CP15(c6, 0, c0, 2) #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) #define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) +#define PRRR __ACCESS_CP15(c10, 0, c2, 0) +#define NMRR __ACCESS_CP15(c10, 0, c2, 1) +#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0) +#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1) +#define VBAR __ACCESS_CP15(c12, 0, c0, 0) +#define CID __ACCESS_CP15(c13, 0, c0, 1) +#define TID_URW __ACCESS_CP15(c13, 0, c0, 2) +#define TID_URO __ACCESS_CP15(c13, 0, c0, 3) +#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) +#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) + +void __sysreg_save_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_state(struct kvm_cpu_context *ctxt); #endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3 From e59bff9bf302bf1332c6421b39ba2e82b84e63a6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 4 Jan 2016 08:54:50 +0000 Subject: ARM: KVM: Add timer save/restore This patch shouldn't exist, as we should be able to reuse the arm64 version for free. I'll get there eventually, but in the meantime I need a timer ticking. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp.h | 8 +++++ arch/arm/kvm/hyp/timer-sr.c | 71 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+) create mode 100644 arch/arm/kvm/hyp/timer-sr.c (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 9f96fcbbcd8d..9241ae845252 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -4,3 +4,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index ab2cb828d60a..4924418aee4f 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -46,6 +46,9 @@ #define TTBR1 __ACCESS_CP15_64(1, c2) #define VTTBR __ACCESS_CP15_64(6, c2) #define PAR __ACCESS_CP15_64(0, c7) +#define CNTV_CVAL __ACCESS_CP15_64(3, c14) +#define CNTVOFF __ACCESS_CP15_64(4, c14) + #define CSSELR __ACCESS_CP15(c0, 2, c0, 0) #define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) #define SCTLR __ACCESS_CP15(c1, 0, c0, 0) @@ -71,6 +74,11 @@ #define TID_URO __ACCESS_CP15(c13, 0, c0, 3) #define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) #define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) +#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) +#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) + +void __timer_save_state(struct kvm_vcpu *vcpu); +void __timer_restore_state(struct kvm_vcpu *vcpu); void __sysreg_save_state(struct kvm_cpu_context *ctxt); void __sysreg_restore_state(struct kvm_cpu_context *ctxt); diff --git a/arch/arm/kvm/hyp/timer-sr.c b/arch/arm/kvm/hyp/timer-sr.c new file mode 100644 index 000000000000..d7535fd0784e --- /dev/null +++ b/arch/arm/kvm/hyp/timer-sr.c @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "hyp.h" + +/* vcpu is already in the HYP VA space */ +void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + u64 val; + + if (kvm->arch.timer.enabled) { + timer->cntv_ctl = read_sysreg(CNTV_CTL); + timer->cntv_cval = read_sysreg(CNTV_CVAL); + } + + /* Disable the virtual timer */ + write_sysreg(0, CNTV_CTL); + + /* Allow physical timer/counter access for the host */ + val = read_sysreg(CNTHCTL); + val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; + write_sysreg(val, CNTHCTL); + + /* Clear cntvoff for the host */ + write_sysreg(0, CNTVOFF); +} + +void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + u64 val; + + /* + * Disallow physical timer access for the guest + * Physical counter access is allowed + */ + val = read_sysreg(CNTHCTL); + val &= ~CNTHCTL_EL1PCEN; + val |= CNTHCTL_EL1PCTEN; + write_sysreg(val, CNTHCTL); + + if (kvm->arch.timer.enabled) { + write_sysreg(kvm->arch.timer.cntvoff, CNTVOFF); + write_sysreg(timer->cntv_cval, CNTV_CVAL); + isb(); + write_sysreg(timer->cntv_ctl, CNTV_CTL); + } +} -- cgit v1.2.3 From c0c2cdbffef2369a94998fc6d85af25eded92b60 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 4 Jan 2016 09:06:11 +0000 Subject: ARM: KVM: Add vgic v2 save/restore This patch shouldn't exist, as we should be able to reuse the arm64 version for free. I'll get there eventually, but in the meantime I need an interrupt controller. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp.h | 3 ++ arch/arm/kvm/hyp/vgic-v2-sr.c | 84 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 88 insertions(+) create mode 100644 arch/arm/kvm/hyp/vgic-v2-sr.c (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 9241ae845252..d8acbb691249 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 4924418aee4f..7eb1c21d2d21 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -80,6 +80,9 @@ void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_restore_state(struct kvm_vcpu *vcpu); +void __vgic_v2_save_state(struct kvm_vcpu *vcpu); +void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); + void __sysreg_save_state(struct kvm_cpu_context *ctxt); void __sysreg_restore_state(struct kvm_cpu_context *ctxt); diff --git a/arch/arm/kvm/hyp/vgic-v2-sr.c b/arch/arm/kvm/hyp/vgic-v2-sr.c new file mode 100644 index 000000000000..e71761238cfc --- /dev/null +++ b/arch/arm/kvm/hyp/vgic-v2-sr.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2012-2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "hyp.h" + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + struct vgic_dist *vgic = &kvm->arch.vgic; + void __iomem *base = kern_hyp_va(vgic->vctrl_base); + u32 eisr0, eisr1, elrsr0, elrsr1; + int i, nr_lr; + + if (!base) + return; + + nr_lr = vcpu->arch.vgic_cpu.nr_lr; + cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR); + cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR); + eisr0 = readl_relaxed(base + GICH_EISR0); + elrsr0 = readl_relaxed(base + GICH_ELRSR0); + if (unlikely(nr_lr > 32)) { + eisr1 = readl_relaxed(base + GICH_EISR1); + elrsr1 = readl_relaxed(base + GICH_ELRSR1); + } else { + eisr1 = elrsr1 = 0; + } +#ifdef CONFIG_CPU_BIG_ENDIAN + cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1; + cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; +#else + cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0; + cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; +#endif + cpu_if->vgic_apr = readl_relaxed(base + GICH_APR); + + writel_relaxed(0, base + GICH_HCR); + + for (i = 0; i < nr_lr; i++) + cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); +} + +/* vcpu is already in the HYP VA space */ +void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; + struct vgic_dist *vgic = &kvm->arch.vgic; + void __iomem *base = kern_hyp_va(vgic->vctrl_base); + int i, nr_lr; + + if (!base) + return; + + writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); + writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); + writel_relaxed(cpu_if->vgic_apr, base + GICH_APR); + + nr_lr = vcpu->arch.vgic_cpu.nr_lr; + for (i = 0; i < nr_lr; i++) + writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4)); +} -- cgit v1.2.3 From 59cbcdb5d83b49d1d2e161f3468f850f9fa4b968 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 4 Jan 2016 15:41:51 +0000 Subject: ARM: KVM: Add VFP save/restore This is almost a copy/paste of the existing version, with a couple of subtle differences: - Only write to FPEXC once on the save path - Add an isb when enabling VFP access The patch also defines a few sysreg accessors and a __vfp_enabled predicate that test the VFP trapping state. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp.h | 13 +++++++++ arch/arm/kvm/hyp/vfp.S | 68 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+) create mode 100644 arch/arm/kvm/hyp/vfp.S (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index d8acbb691249..5a45f4c21f83 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += vfp.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 7eb1c21d2d21..dce0f7305cf5 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -21,6 +21,7 @@ #include #include #include +#include #define __hyp_text __section(.hyp.text) notrace @@ -31,6 +32,8 @@ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 #define __ACCESS_CP15_64(Op1, CRm) \ "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 +#define __ACCESS_VFP(CRn) \ + "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 #define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) #define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) @@ -53,6 +56,7 @@ #define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) #define SCTLR __ACCESS_CP15(c1, 0, c0, 0) #define CPACR __ACCESS_CP15(c1, 0, c0, 2) +#define HCPTR __ACCESS_CP15(c1, 4, c1, 2) #define TTBCR __ACCESS_CP15(c2, 0, c0, 2) #define DACR __ACCESS_CP15(c3, 0, c0, 0) #define DFSR __ACCESS_CP15(c5, 0, c0, 0) @@ -77,6 +81,8 @@ #define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) #define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) +#define VFP_FPEXC __ACCESS_VFP(FPEXC) + void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_restore_state(struct kvm_vcpu *vcpu); @@ -86,4 +92,11 @@ void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); void __sysreg_save_state(struct kvm_cpu_context *ctxt); void __sysreg_restore_state(struct kvm_cpu_context *ctxt); +void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); +void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); +static inline bool __vfp_enabled(void) +{ + return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); +} + #endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/kvm/hyp/vfp.S b/arch/arm/kvm/hyp/vfp.S new file mode 100644 index 000000000000..7c297e87eb8b --- /dev/null +++ b/arch/arm/kvm/hyp/vfp.S @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + + .text + .pushsection .hyp.text, "ax" + +/* void __vfp_save_state(struct vfp_hard_struct *vfp); */ +ENTRY(__vfp_save_state) + push {r4, r5} + VFPFMRX r1, FPEXC + + @ Make sure *really* VFP is enabled so we can touch the registers. + orr r5, r1, #FPEXC_EN + tst r5, #FPEXC_EX @ Check for VFP Subarchitecture + bic r5, r5, #FPEXC_EX @ FPEXC_EX disable + VFPFMXR FPEXC, r5 + isb + + VFPFMRX r2, FPSCR + beq 1f + + @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so + @ we only need to save them if FPEXC_EX is set. + VFPFMRX r3, FPINST + tst r5, #FPEXC_FP2V + VFPFMRX r4, FPINST2, ne @ vmrsne +1: + VFPFSTMIA r0, r5 @ Save VFP registers + stm r0, {r1-r4} @ Save FPEXC, FPSCR, FPINST, FPINST2 + pop {r4, r5} + bx lr +ENDPROC(__vfp_save_state) + +/* void __vfp_restore_state(struct vfp_hard_struct *vfp); + * Assume FPEXC_EN is on and FPEXC_EX is off */ +ENTRY(__vfp_restore_state) + VFPFLDMIA r0, r1 @ Load VFP registers + ldm r0, {r0-r3} @ Load FPEXC, FPSCR, FPINST, FPINST2 + + VFPFMXR FPSCR, r1 + tst r0, #FPEXC_EX @ Check for VFP Subarchitecture + beq 1f + VFPFMXR FPINST, r2 + tst r0, #FPEXC_FP2V + VFPFMXR FPINST2, r3, ne +1: + VFPFMXR FPEXC, r0 @ FPEXC (last, in case !EN) + bx lr +ENDPROC(__vfp_restore_state) + + .popsection -- cgit v1.2.3 From 33280b4cd1dc0bc7df8d6d3bd1b64c377c9e44d9 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:38:09 +0000 Subject: ARM: KVM: Add banked registers save/restore Banked registers are one of the many perks of the 32bit architecture, and the world switch needs to cope with it. This requires some "special" accessors, as these are not accessed using a standard coprocessor instruction. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/banked-sr.c | 77 ++++++++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/hyp.h | 11 +++++++ 3 files changed, 89 insertions(+) create mode 100644 arch/arm/kvm/hyp/banked-sr.c (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 5a45f4c21f83..173bd1dd77e7 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o +obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c new file mode 100644 index 000000000000..d02dc804f611 --- /dev/null +++ b/arch/arm/kvm/hyp/banked-sr.c @@ -0,0 +1,77 @@ +/* + * Original code: + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * Mostly rewritten in C by Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include "hyp.h" + +__asm__(".arch_extension virt"); + +void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt) +{ + ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr); + ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp); + ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR); + ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc); + ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc); + ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc); + ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt); + ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt); + ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt); + ctxt->gp_regs.KVM_ARM_UND_sp = read_special(SP_und); + ctxt->gp_regs.KVM_ARM_UND_lr = read_special(LR_und); + ctxt->gp_regs.KVM_ARM_UND_spsr = read_special(SPSR_und); + ctxt->gp_regs.KVM_ARM_IRQ_sp = read_special(SP_irq); + ctxt->gp_regs.KVM_ARM_IRQ_lr = read_special(LR_irq); + ctxt->gp_regs.KVM_ARM_IRQ_spsr = read_special(SPSR_irq); + ctxt->gp_regs.KVM_ARM_FIQ_r8 = read_special(R8_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_r9 = read_special(R9_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_r10 = read_special(R10_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_fp = read_special(R11_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_ip = read_special(R12_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_sp = read_special(SP_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_lr = read_special(LR_fiq); + ctxt->gp_regs.KVM_ARM_FIQ_spsr = read_special(SPSR_fiq); +} + +void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt) +{ + write_special(ctxt->gp_regs.usr_regs.ARM_sp, SP_usr); + write_special(ctxt->gp_regs.usr_regs.ARM_pc, ELR_hyp); + write_special(ctxt->gp_regs.usr_regs.ARM_cpsr, SPSR_cxsf); + write_special(ctxt->gp_regs.KVM_ARM_SVC_sp, SP_svc); + write_special(ctxt->gp_regs.KVM_ARM_SVC_lr, LR_svc); + write_special(ctxt->gp_regs.KVM_ARM_SVC_spsr, SPSR_svc); + write_special(ctxt->gp_regs.KVM_ARM_ABT_sp, SP_abt); + write_special(ctxt->gp_regs.KVM_ARM_ABT_lr, LR_abt); + write_special(ctxt->gp_regs.KVM_ARM_ABT_spsr, SPSR_abt); + write_special(ctxt->gp_regs.KVM_ARM_UND_sp, SP_und); + write_special(ctxt->gp_regs.KVM_ARM_UND_lr, LR_und); + write_special(ctxt->gp_regs.KVM_ARM_UND_spsr, SPSR_und); + write_special(ctxt->gp_regs.KVM_ARM_IRQ_sp, SP_irq); + write_special(ctxt->gp_regs.KVM_ARM_IRQ_lr, LR_irq); + write_special(ctxt->gp_regs.KVM_ARM_IRQ_spsr, SPSR_irq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_r8, R8_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_r9, R9_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_r10, R10_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_fp, R11_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_ip, R12_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_sp, SP_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_lr, LR_fiq); + write_special(ctxt->gp_regs.KVM_ARM_FIQ_spsr, SPSR_fiq); +} diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index dce0f7305cf5..278eb1fa5231 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -45,6 +45,14 @@ }) #define read_sysreg(...) __read_sysreg(__VA_ARGS__) +#define write_special(v, r) \ + asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) +#define read_special(r) ({ \ + u32 __val; \ + asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ + __val; \ +}) + #define TTBR0 __ACCESS_CP15_64(0, c2) #define TTBR1 __ACCESS_CP15_64(1, c2) #define VTTBR __ACCESS_CP15_64(6, c2) @@ -99,4 +107,7 @@ static inline bool __vfp_enabled(void) return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); } +void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); +void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); + #endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3 From 89ef2b21ed2173e01995371261a9f9789bc1e47a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:40:51 +0000 Subject: ARM: KVM: Add guest entry code Add the very minimal piece of code that is now required to jump into the guest (and return from it). This code is only concerned with save/restoring the USR registers (r0-r12+lr for the guest, r4-r12+lr for the host), as everything else is dealt with in C (VFP is another matter though). Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/entry.S | 70 +++++++++++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/hyp.h | 2 ++ 3 files changed, 73 insertions(+) create mode 100644 arch/arm/kvm/hyp/entry.S (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 173bd1dd77e7..c77969008665 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -8,3 +8,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += entry.o diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S new file mode 100644 index 000000000000..32f79b090040 --- /dev/null +++ b/arch/arm/kvm/hyp/entry.S @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2016 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . +*/ + +#include +#include +#include + + .arch_extension virt + + .text + .pushsection .hyp.text, "ax" + +#define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR) + +/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */ +ENTRY(__guest_enter) + @ Save host registers + add r1, r1, #(USR_REGS_OFFSET + S_R4) + stm r1!, {r4-r12} + str lr, [r1, #4] @ Skip SP_usr (already saved) + + @ Restore guest registers + add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0) + ldr lr, [r0, #S_LR] + ldm r0, {r0-r12} + + clrex + eret +ENDPROC(__guest_enter) + +ENTRY(__guest_exit) + /* + * return convention: + * guest r0, r1, r2 saved on the stack + * r0: vcpu pointer + * r1: exception code + */ + + add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3) + stm r2!, {r3-r12} + str lr, [r2, #4] + add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0) + pop {r3, r4, r5} @ r0, r1, r2 + stm r2, {r3-r5} + + ldr r0, [r0, #VCPU_HOST_CTXT] + add r0, r0, #(USR_REGS_OFFSET + S_R4) + ldm r0!, {r4-r12} + ldr lr, [r0, #4] + + mov r0, r1 + bx lr +ENDPROC(__guest_exit) + + .popsection + diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 278eb1fa5231..b3f6ed233564 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -110,4 +110,6 @@ static inline bool __vfp_enabled(void) void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); +int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, + struct kvm_cpu_context *host); #endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3 From 96e5e670cc681703c00eaf442c8796da6aa25ca0 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 6 Jan 2016 13:53:51 +0000 Subject: ARM: KVM: Add VFP lazy save/restore handler Similar to the arm64 version, add the code that deals with VFP traps, re-enabling VFP, save/restoring the registers and resuming the guest. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/entry.S | 31 +++++++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S index 32f79b090040..21c238871c9e 100644 --- a/arch/arm/kvm/hyp/entry.S +++ b/arch/arm/kvm/hyp/entry.S @@ -66,5 +66,36 @@ ENTRY(__guest_exit) bx lr ENDPROC(__guest_exit) +/* + * If VFPv3 support is not available, then we will not switch the VFP + * registers; however cp10 and cp11 accesses will still trap and fallback + * to the regular coprocessor emulation code, which currently will + * inject an undefined exception to the guest. + */ +#ifdef CONFIG_VFPv3 +ENTRY(__vfp_guest_restore) + push {r3, r4, lr} + + @ NEON/VFP used. Turn on VFP access. + mrc p15, 4, r1, c1, c1, 2 @ HCPTR + bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11)) + mcr p15, 4, r1, c1, c1, 2 @ HCPTR + isb + + @ Switch VFP/NEON hardware state to the guest's + mov r4, r0 + ldr r0, [r0, #VCPU_HOST_CTXT] + add r0, r0, #CPU_CTXT_VFP + bl __vfp_save_state + add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) + bl __vfp_restore_state + + pop {r3, r4, lr} + pop {r0, r1, r2} + clrex + eret +ENDPROC(__vfp_guest_restore) +#endif + .popsection -- cgit v1.2.3 From 9dddc2dfa5da95fa380635d400f80767077ef936 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:42:49 +0000 Subject: ARM: KVM: Add the new world switch implementation The new world switch implementation is modeled after the arm64 one, calling the various save/restore functions in turn, and having as little state as possible. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp.h | 7 +++ arch/arm/kvm/hyp/switch.c | 140 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 148 insertions(+) create mode 100644 arch/arm/kvm/hyp/switch.c (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index c77969008665..cfab402d7695 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o +obj-$(CONFIG_KVM_ARM_HOST) += switch.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index b3f6ed233564..ef582c9ad96d 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -60,11 +60,16 @@ #define CNTV_CVAL __ACCESS_CP15_64(3, c14) #define CNTVOFF __ACCESS_CP15_64(4, c14) +#define MIDR __ACCESS_CP15(c0, 0, c0, 0) #define CSSELR __ACCESS_CP15(c0, 2, c0, 0) +#define VPIDR __ACCESS_CP15(c0, 4, c0, 0) #define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) #define SCTLR __ACCESS_CP15(c1, 0, c0, 0) #define CPACR __ACCESS_CP15(c1, 0, c0, 2) +#define HCR __ACCESS_CP15(c1, 4, c1, 0) +#define HDCR __ACCESS_CP15(c1, 4, c1, 1) #define HCPTR __ACCESS_CP15(c1, 4, c1, 2) +#define HSTR __ACCESS_CP15(c1, 4, c1, 3) #define TTBCR __ACCESS_CP15(c2, 0, c0, 2) #define DACR __ACCESS_CP15(c3, 0, c0, 0) #define DFSR __ACCESS_CP15(c5, 0, c0, 0) @@ -73,6 +78,7 @@ #define AIFSR __ACCESS_CP15(c5, 0, c1, 1) #define DFAR __ACCESS_CP15(c6, 0, c0, 0) #define IFAR __ACCESS_CP15(c6, 0, c0, 2) +#define HDFAR __ACCESS_CP15(c6, 4, c0, 0) #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) #define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) @@ -85,6 +91,7 @@ #define TID_URW __ACCESS_CP15(c13, 0, c0, 2) #define TID_URO __ACCESS_CP15(c13, 0, c0, 3) #define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) +#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2) #define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) #define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) #define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c new file mode 100644 index 000000000000..a1f3c1cf8f74 --- /dev/null +++ b/arch/arm/kvm/hyp/switch.c @@ -0,0 +1,140 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include "hyp.h" + +__asm__(".arch_extension virt"); + +/* + * Activate the traps, saving the host's fpexc register before + * overwriting it. We'll restore it on VM exit. + */ +static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host) +{ + u32 val; + + /* + * We are about to set HCPTR.TCP10/11 to trap all floating point + * register accesses to HYP, however, the ARM ARM clearly states that + * traps are only taken to HYP if the operation would not otherwise + * trap to SVC. Therefore, always make sure that for 32-bit guests, + * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits. + */ + val = read_sysreg(VFP_FPEXC); + *fpexc_host = val; + if (!(val & FPEXC_EN)) { + write_sysreg(val | FPEXC_EN, VFP_FPEXC); + isb(); + } + + write_sysreg(vcpu->arch.hcr | vcpu->arch.irq_lines, HCR); + /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ + write_sysreg(HSTR_T(15), HSTR); + write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR); + val = read_sysreg(HDCR); + write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR); +} + +static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) +{ + u32 val; + + write_sysreg(0, HCR); + write_sysreg(0, HSTR); + val = read_sysreg(HDCR); + write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR); + write_sysreg(0, HCPTR); +} + +static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = kern_hyp_va(vcpu->kvm); + write_sysreg(kvm->arch.vttbr, VTTBR); + write_sysreg(vcpu->arch.midr, VPIDR); +} + +static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) +{ + write_sysreg(0, VTTBR); + write_sysreg(read_sysreg(MIDR), VPIDR); +} + +static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) +{ + __vgic_v2_save_state(vcpu); +} + +static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) +{ + __vgic_v2_restore_state(vcpu); +} + +static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *host_ctxt; + struct kvm_cpu_context *guest_ctxt; + bool fp_enabled; + u64 exit_code; + u32 fpexc; + + vcpu = kern_hyp_va(vcpu); + write_sysreg(vcpu, HTPIDR); + + host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + guest_ctxt = &vcpu->arch.ctxt; + + __sysreg_save_state(host_ctxt); + __banked_save_state(host_ctxt); + + __activate_traps(vcpu, &fpexc); + __activate_vm(vcpu); + + __vgic_restore_state(vcpu); + __timer_restore_state(vcpu); + + __sysreg_restore_state(guest_ctxt); + __banked_restore_state(guest_ctxt); + + /* Jump in the fire! */ + exit_code = __guest_enter(vcpu, host_ctxt); + /* And we're baaack! */ + + fp_enabled = __vfp_enabled(); + + __banked_save_state(guest_ctxt); + __sysreg_save_state(guest_ctxt); + __timer_save_state(vcpu); + __vgic_save_state(vcpu); + + __deactivate_traps(vcpu); + __deactivate_vm(vcpu); + + __banked_restore_state(host_ctxt); + __sysreg_restore_state(host_ctxt); + + if (fp_enabled) { + __vfp_save_state(&guest_ctxt->vfp); + __vfp_restore_state(&host_ctxt->vfp); + } + + write_sysreg(fpexc, VFP_FPEXC); + + return exit_code; +} + +__alias(__guest_run) int __weak __kvm_vcpu_run(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From 97e964371377ebf3958701b082597c2162e3df18 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 13 Jan 2016 19:02:51 +0000 Subject: ARM: KVM: Add populating of fault data structure On guest exit, we must take care of populating our fault data structure so that the host code can handle it. This includes resolving the IPA for permission faults, which can result in restarting the guest. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/hyp.h | 4 ++++ arch/arm/kvm/hyp/switch.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index ef582c9ad96d..8b1156b691ff 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -76,10 +76,14 @@ #define IFSR __ACCESS_CP15(c5, 0, c0, 1) #define ADFSR __ACCESS_CP15(c5, 0, c1, 0) #define AIFSR __ACCESS_CP15(c5, 0, c1, 1) +#define HSR __ACCESS_CP15(c5, 4, c2, 0) #define DFAR __ACCESS_CP15(c6, 0, c0, 0) #define IFAR __ACCESS_CP15(c6, 0, c0, 2) #define HDFAR __ACCESS_CP15(c6, 4, c0, 0) +#define HIFAR __ACCESS_CP15(c6, 4, c0, 2) +#define HPFAR __ACCESS_CP15(c6, 4, c0, 4) #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) +#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) #define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) #define PRRR __ACCESS_CP15(c10, 0, c2, 0) diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index a1f3c1cf8f74..0dd0ba33b8a7 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -84,6 +84,56 @@ static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) __vgic_v2_restore_state(vcpu); } +static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) +{ + u32 hsr = read_sysreg(HSR); + u8 ec = hsr >> HSR_EC_SHIFT; + u32 hpfar, far; + + vcpu->arch.fault.hsr = hsr; + + if (ec == HSR_EC_IABT) + far = read_sysreg(HIFAR); + else if (ec == HSR_EC_DABT) + far = read_sysreg(HDFAR); + else + return true; + + /* + * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: + * + * Abort on the stage 2 translation for a memory access from a + * Non-secure PL1 or PL0 mode: + * + * For any Access flag fault or Translation fault, and also for any + * Permission fault on the stage 2 translation of a memory access + * made as part of a translation table walk for a stage 1 translation, + * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR + * is UNKNOWN. + */ + if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) { + u64 par, tmp; + + par = read_sysreg(PAR); + write_sysreg(far, ATS1CPR); + isb(); + + tmp = read_sysreg(PAR); + write_sysreg(par, PAR); + + if (unlikely(tmp & 1)) + return false; /* Translation failed, back to guest */ + + hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4; + } else { + hpfar = read_sysreg(HPFAR); + } + + vcpu->arch.fault.hxfar = far; + vcpu->arch.fault.hpfar = hpfar; + return true; +} + static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *host_ctxt; @@ -111,9 +161,13 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) __banked_restore_state(guest_ctxt); /* Jump in the fire! */ +again: exit_code = __guest_enter(vcpu, host_ctxt); /* And we're baaack! */ + if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu)) + goto again; + fp_enabled = __vfp_enabled(); __banked_save_state(guest_ctxt); -- cgit v1.2.3 From bafc6c2a22553193d1d1ce83783b7faa0924158e Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:43:18 +0000 Subject: ARM: KVM: Add HYP mode entry code This part is almost entierely borrowed from the existing code, just slightly simplifying the HYP function call (as we now save SPSR_hyp in the world switch). Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp-entry.S | 157 +++++++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/hyp.h | 2 + 3 files changed, 160 insertions(+) create mode 100644 arch/arm/kvm/hyp/hyp-entry.S (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index cfab402d7695..a7d3a7e0b702 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -9,4 +9,5 @@ obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o +obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S new file mode 100644 index 000000000000..1b4aa02fd364 --- /dev/null +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -0,0 +1,157 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + */ + +#include +#include +#include + + .arch_extension virt + + .text + .pushsection .hyp.text, "ax" + +.macro load_vcpu reg + mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR +.endm + +/******************************************************************** + * Hypervisor exception vector and handlers + * + * + * The KVM/ARM Hypervisor ABI is defined as follows: + * + * Entry to Hyp mode from the host kernel will happen _only_ when an HVC + * instruction is issued since all traps are disabled when running the host + * kernel as per the Hyp-mode initialization at boot time. + * + * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc + * below) when the HVC instruction is called from SVC mode (i.e. a guest or the + * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC + * instructions are called from within Hyp-mode. + * + * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): + * Switching to Hyp mode is done through a simple HVC #0 instruction. The + * exception vector code will check that the HVC comes from VMID==0. + * - r0 contains a pointer to a HYP function + * - r1, r2, and r3 contain arguments to the above function. + * - The HYP function will be called with its arguments in r0, r1 and r2. + * On HYP function return, we return directly to SVC. + * + * Note that the above is used to execute code in Hyp-mode from a host-kernel + * point of view, and is a different concept from performing a world-switch and + * executing guest code SVC mode (with a VMID != 0). + */ + + .align 5 +__hyp_vector: + .global __hyp_vector +__kvm_hyp_vector: + .weak __kvm_hyp_vector + + @ Hyp-mode exception vector + W(b) hyp_reset + W(b) hyp_undef + W(b) hyp_svc + W(b) hyp_pabt + W(b) hyp_dabt + W(b) hyp_hvc + W(b) hyp_irq + W(b) hyp_fiq + +.macro invalid_vector label, cause + .align +\label: b . +.endm + + invalid_vector hyp_reset + invalid_vector hyp_undef + invalid_vector hyp_svc + invalid_vector hyp_pabt + invalid_vector hyp_dabt + invalid_vector hyp_fiq + +hyp_hvc: + /* + * Getting here is either because of a trap from a guest, + * or from executing HVC from the host kernel, which means + * "do something in Hyp mode". + */ + push {r0, r1, r2} + + @ Check syndrome register + mrc p15, 4, r1, c5, c2, 0 @ HSR + lsr r0, r1, #HSR_EC_SHIFT + cmp r0, #HSR_EC_HVC + bne guest_trap @ Not HVC instr. + + /* + * Let's check if the HVC came from VMID 0 and allow simple + * switch to Hyp mode + */ + mrrc p15, 6, r0, r2, c2 + lsr r2, r2, #16 + and r2, r2, #0xff + cmp r2, #0 + bne guest_trap @ Guest called HVC + + /* + * Getting here means host called HVC, we shift parameters and branch + * to Hyp function. + */ + pop {r0, r1, r2} + + /* Check for __hyp_get_vectors */ + cmp r0, #-1 + mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR + beq 1f + + push {lr} + + mov lr, r0 + mov r0, r1 + mov r1, r2 + mov r2, r3 + +THUMB( orr lr, #1) + blx lr @ Call the HYP function + + pop {lr} +1: eret + +guest_trap: + load_vcpu r0 @ Load VCPU pointer to r0 + +#ifdef CONFIG_VFPv3 + @ Check for a VFP access + lsr r1, r1, #HSR_EC_SHIFT + cmp r1, #HSR_EC_CP_0_13 + beq __vfp_guest_restore +#endif + + mov r1, #ARM_EXCEPTION_HVC + b __guest_exit + +hyp_irq: + push {r0, r1, r2} + mov r1, #ARM_EXCEPTION_IRQ + load_vcpu r0 @ Load VCPU pointer to r0 + b __guest_exit + + .ltorg + + .popsection diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 8b1156b691ff..8b9c2eb5a9dc 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -123,4 +123,6 @@ void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host); +int asmlinkage __hyp_do_panic(const char *, int, u32); + #endif /* __ARM_KVM_HYP_H__ */ -- cgit v1.2.3 From c36b6db5f3e4c1bd21659aee8e67226352d254ae Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 6 Jan 2016 09:12:42 +0000 Subject: ARM: KVM: Add panic handling code Instead of spinning forever, let's "properly" handle any unexpected exception ("properly" meaning "print a spat on the console and die"). This has proved useful quite a few times... Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/hyp-entry.S | 28 +++++++++++++++++++++------- arch/arm/kvm/hyp/switch.c | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 7 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index 1b4aa02fd364..54a8d67ad980 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -75,15 +75,29 @@ __kvm_hyp_vector: .macro invalid_vector label, cause .align -\label: b . +\label: mov r0, #\cause + b __hyp_panic .endm - invalid_vector hyp_reset - invalid_vector hyp_undef - invalid_vector hyp_svc - invalid_vector hyp_pabt - invalid_vector hyp_dabt - invalid_vector hyp_fiq + invalid_vector hyp_reset ARM_EXCEPTION_RESET + invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED + invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE + invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT + invalid_vector hyp_dabt ARM_EXCEPTION_DATA_ABORT + invalid_vector hyp_fiq ARM_EXCEPTION_FIQ + +ENTRY(__hyp_do_panic) + mrs lr, cpsr + bic lr, lr, #MODE_MASK + orr lr, lr, #SVC_MODE +THUMB( orr lr, lr, #PSR_T_BIT ) + msr spsr_cxsf, lr + ldr lr, =panic + msr ELR_hyp, lr + ldr lr, =kvm_call_hyp + clrex + eret +ENDPROC(__hyp_do_panic) hyp_hvc: /* diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index 0dd0ba33b8a7..abbe90b5f2ff 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -192,3 +192,41 @@ again: } __alias(__guest_run) int __weak __kvm_vcpu_run(struct kvm_vcpu *vcpu); + +static const char * const __hyp_panic_string[] = { + [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x", + [ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x", + [ARM_EXCEPTION_SOFTWARE] = "\nHYP panic: SVC PC:%08x CPSR:%08x", + [ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x", + [ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x", + [ARM_EXCEPTION_IRQ] = "\nHYP panic: IRQ PC:%08x CPSR:%08x", + [ARM_EXCEPTION_FIQ] = "\nHYP panic: FIQ PC:%08x CPSR:%08x", + [ARM_EXCEPTION_HVC] = "\nHYP panic: HVC PC:%08x CPSR:%08x", +}; + +void __hyp_text __noreturn __hyp_panic(int cause) +{ + u32 elr = read_special(ELR_hyp); + u32 val; + + if (cause == ARM_EXCEPTION_DATA_ABORT) + val = read_sysreg(HDFAR); + else + val = read_special(SPSR); + + if (read_sysreg(VTTBR)) { + struct kvm_vcpu *vcpu; + struct kvm_cpu_context *host_ctxt; + + vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR); + host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); + __deactivate_traps(vcpu); + __deactivate_vm(vcpu); + __sysreg_restore_state(host_ctxt); + } + + /* Call panic for real */ + __hyp_do_panic(__hyp_panic_string[cause], elr, val); + + unreachable(); +} -- cgit v1.2.3 From b57cd6f6407d420d522ab71b9c0dd11993e49ba1 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 6 Jan 2016 12:10:58 +0000 Subject: ARM: KVM: Change kvm_call_hyp return type to unsigned long Having u64 as the kvm_call_hyp return type is problematic, as it forces all kind of tricks for the return values from HYP to be promoted to 64bit (LE has the LSB in r0, and BE has them in r1). Since the only user of the return value is perfectly happy with a 32bit value, let's make kvm_call_hyp return an unsigned long, which is 32bit on ARM. This solves yet another headache. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 2 +- arch/arm/kvm/interrupts.S | 10 ++-------- 2 files changed, 3 insertions(+), 9 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 02932ba8a653..c62d71751f7a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -165,7 +165,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); -u64 kvm_call_hyp(void *hypfn, ...); +unsigned long kvm_call_hyp(void *hypfn, ...); void force_vm_exit(const cpumask_t *mask); #define KVM_ARCH_WANT_MMU_NOTIFIER diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 7bfb28936914..01eb169f38f6 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -207,20 +207,14 @@ after_vfp_restore: restore_host_regs clrex @ Clear exclusive monitor -#ifndef CONFIG_CPU_ENDIAN_BE8 mov r0, r1 @ Return the return code - mov r1, #0 @ Clear upper bits in return value -#else - @ r1 already has return code - mov r0, #0 @ Clear upper bits in return value -#endif /* CONFIG_CPU_ENDIAN_BE8 */ bx lr @ return to IOCTL /******************************************************************** * Call function in Hyp mode * * - * u64 kvm_call_hyp(void *hypfn, ...); + * unsigned long kvm_call_hyp(void *hypfn, ...); * * This is not really a variadic function in the classic C-way and care must * be taken when calling this to ensure parameters are passed in registers @@ -231,7 +225,7 @@ after_vfp_restore: * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the * function pointer can be passed). The function being called must be mapped * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are - * passed in r0 and r1. + * passed in r0 (strictly 32bit). * * A function pointer with a value of 0xffffffff has a special meaning, * and is used to implement __hyp_get_vectors in the same way as in -- cgit v1.2.3 From b98e2e728eed3091edbce64cfcc447a482b7726c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:45:17 +0000 Subject: ARM: KVM: Remove the old world switch As we now have a full reimplementation of the world switch, it is time to kiss the old stuff goodbye. I'm not sure we'll miss it. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/interrupts.S | 469 +---------------------------- arch/arm/kvm/interrupts_head.S | 660 ----------------------------------------- 2 files changed, 1 insertion(+), 1128 deletions(-) delete mode 100644 arch/arm/kvm/interrupts_head.S (limited to 'arch/arm') diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index 01eb169f38f6..b1bd316f14c0 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S @@ -17,198 +17,8 @@ */ #include -#include -#include -#include -#include -#include -#include -#include -#include -#include "interrupts_head.S" .text - .pushsection .hyp.text, "ax" - -/******************************************************************** - * Flush per-VMID TLBs - * - * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); - * - * We rely on the hardware to broadcast the TLB invalidation to all CPUs - * inside the inner-shareable domain (which is the case for all v7 - * implementations). If we come across a non-IS SMP implementation, we'll - * have to use an IPI based mechanism. Until then, we stick to the simple - * hardware assisted version. - * - * As v7 does not support flushing per IPA, just nuke the whole TLB - * instead, ignoring the ipa value. - */ -ENTRY(__kvm_tlb_flush_vmid_ipa) - push {r2, r3} - - dsb ishst - add r0, r0, #KVM_VTTBR - ldrd r2, r3, [r0] - mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR - isb - mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) - dsb ish - isb - mov r2, #0 - mov r3, #0 - mcrr p15, 6, r2, r3, c2 @ Back to VMID #0 - isb @ Not necessary if followed by eret - - pop {r2, r3} - bx lr -ENDPROC(__kvm_tlb_flush_vmid_ipa) - -/** - * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs - * - * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address - * parameter - */ - -ENTRY(__kvm_tlb_flush_vmid) - b __kvm_tlb_flush_vmid_ipa -ENDPROC(__kvm_tlb_flush_vmid) - -/******************************************************************** - * Flush TLBs and instruction caches of all CPUs inside the inner-shareable - * domain, for all VMIDs - * - * void __kvm_flush_vm_context(void); - */ -ENTRY(__kvm_flush_vm_context) - mov r0, #0 @ rn parameter for c15 flushes is SBZ - - /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */ - mcr p15, 4, r0, c8, c3, 4 - /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ - mcr p15, 0, r0, c7, c1, 0 - dsb ish - isb @ Not necessary if followed by eret - - bx lr -ENDPROC(__kvm_flush_vm_context) - - -/******************************************************************** - * Hypervisor world-switch code - * - * - * int __kvm_vcpu_run(struct kvm_vcpu *vcpu) - */ -ENTRY(__kvm_vcpu_run) - @ Save the vcpu pointer - mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR - - save_host_regs - - restore_vgic_state - restore_timer_state - - @ Store hardware CP15 state and load guest state - read_cp15_state store_to_vcpu = 0 - write_cp15_state read_from_vcpu = 1 - - @ If the host kernel has not been configured with VFPv3 support, - @ then it is safer if we deny guests from using it as well. -#ifdef CONFIG_VFPv3 - @ Set FPEXC_EN so the guest doesn't trap floating point instructions - VFPFMRX r2, FPEXC @ VMRS - push {r2} - orr r2, r2, #FPEXC_EN - VFPFMXR FPEXC, r2 @ VMSR -#endif - - @ Configure Hyp-role - configure_hyp_role vmentry - - @ Trap coprocessor CRx accesses - set_hstr vmentry - set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) - set_hdcr vmentry - - @ Write configured ID register into MIDR alias - ldr r1, [vcpu, #VCPU_MIDR] - mcr p15, 4, r1, c0, c0, 0 - - @ Write guest view of MPIDR into VMPIDR - ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)] - mcr p15, 4, r1, c0, c0, 5 - - @ Set up guest memory translation - ldr r1, [vcpu, #VCPU_KVM] - add r1, r1, #KVM_VTTBR - ldrd r2, r3, [r1] - mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR - - @ We're all done, just restore the GPRs and go to the guest - restore_guest_regs - clrex @ Clear exclusive monitor - eret - -__kvm_vcpu_return: - /* - * return convention: - * guest r0, r1, r2 saved on the stack - * r0: vcpu pointer - * r1: exception code - */ - save_guest_regs - - @ Set VMID == 0 - mov r2, #0 - mov r3, #0 - mcrr p15, 6, r2, r3, c2 @ Write VTTBR - - @ Don't trap coprocessor accesses for host kernel - set_hstr vmexit - set_hdcr vmexit - set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore - -#ifdef CONFIG_VFPv3 - @ Switch VFP/NEON hardware state to the host's - add r7, vcpu, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) - store_vfp_state r7 - add r7, vcpu, #VCPU_HOST_CTXT - ldr r7, [r7] - add r7, r7, #CPU_CTXT_VFP - restore_vfp_state r7 - -after_vfp_restore: - @ Restore FPEXC_EN which we clobbered on entry - pop {r2} - VFPFMXR FPEXC, r2 -#else -after_vfp_restore: -#endif - - @ Reset Hyp-role - configure_hyp_role vmexit - - @ Let host read hardware MIDR - mrc p15, 0, r2, c0, c0, 0 - mcr p15, 4, r2, c0, c0, 0 - - @ Back to hardware MPIDR - mrc p15, 0, r2, c0, c0, 5 - mcr p15, 4, r2, c0, c0, 5 - - @ Store guest CP15 state and restore host state - read_cp15_state store_to_vcpu = 1 - write_cp15_state read_from_vcpu = 0 - - save_timer_state - save_vgic_state - - restore_host_regs - clrex @ Clear exclusive monitor - mov r0, r1 @ Return the return code - bx lr @ return to IOCTL /******************************************************************** * Call function in Hyp mode @@ -239,281 +49,4 @@ after_vfp_restore: ENTRY(kvm_call_hyp) hvc #0 bx lr - -/******************************************************************** - * Hypervisor exception vector and handlers - * - * - * The KVM/ARM Hypervisor ABI is defined as follows: - * - * Entry to Hyp mode from the host kernel will happen _only_ when an HVC - * instruction is issued since all traps are disabled when running the host - * kernel as per the Hyp-mode initialization at boot time. - * - * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc - * below) when the HVC instruction is called from SVC mode (i.e. a guest or the - * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC - * instructions are called from within Hyp-mode. - * - * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): - * Switching to Hyp mode is done through a simple HVC #0 instruction. The - * exception vector code will check that the HVC comes from VMID==0 and if - * so will push the necessary state (SPSR, lr_usr) on the Hyp stack. - * - r0 contains a pointer to a HYP function - * - r1, r2, and r3 contain arguments to the above function. - * - The HYP function will be called with its arguments in r0, r1 and r2. - * On HYP function return, we return directly to SVC. - * - * Note that the above is used to execute code in Hyp-mode from a host-kernel - * point of view, and is a different concept from performing a world-switch and - * executing guest code SVC mode (with a VMID != 0). - */ - -/* Handle undef, svc, pabt, or dabt by crashing with a user notice */ -.macro bad_exception exception_code, panic_str - push {r0-r2} - mrrc p15, 6, r0, r1, c2 @ Read VTTBR - lsr r1, r1, #16 - ands r1, r1, #0xff - beq 99f - - load_vcpu @ Load VCPU pointer - .if \exception_code == ARM_EXCEPTION_DATA_ABORT - mrc p15, 4, r2, c5, c2, 0 @ HSR - mrc p15, 4, r1, c6, c0, 0 @ HDFAR - str r2, [vcpu, #VCPU_HSR] - str r1, [vcpu, #VCPU_HxFAR] - .endif - .if \exception_code == ARM_EXCEPTION_PREF_ABORT - mrc p15, 4, r2, c5, c2, 0 @ HSR - mrc p15, 4, r1, c6, c0, 2 @ HIFAR - str r2, [vcpu, #VCPU_HSR] - str r1, [vcpu, #VCPU_HxFAR] - .endif - mov r1, #\exception_code - b __kvm_vcpu_return - - @ We were in the host already. Let's craft a panic-ing return to SVC. -99: mrs r2, cpsr - bic r2, r2, #MODE_MASK - orr r2, r2, #SVC_MODE -THUMB( orr r2, r2, #PSR_T_BIT ) - msr spsr_cxsf, r2 - mrs r1, ELR_hyp - ldr r2, =panic - msr ELR_hyp, r2 - ldr r0, =\panic_str - clrex @ Clear exclusive monitor - eret -.endm - - .align 5 -__kvm_hyp_vector: - .globl __kvm_hyp_vector - - @ Hyp-mode exception vector - W(b) hyp_reset - W(b) hyp_undef - W(b) hyp_svc - W(b) hyp_pabt - W(b) hyp_dabt - W(b) hyp_hvc - W(b) hyp_irq - W(b) hyp_fiq - - .align -hyp_reset: - b hyp_reset - - .align -hyp_undef: - bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str - - .align -hyp_svc: - bad_exception ARM_EXCEPTION_HVC, svc_die_str - - .align -hyp_pabt: - bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str - - .align -hyp_dabt: - bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str - - .align -hyp_hvc: - /* - * Getting here is either becuase of a trap from a guest or from calling - * HVC from the host kernel, which means "switch to Hyp mode". - */ - push {r0, r1, r2} - - @ Check syndrome register - mrc p15, 4, r1, c5, c2, 0 @ HSR - lsr r0, r1, #HSR_EC_SHIFT - cmp r0, #HSR_EC_HVC - bne guest_trap @ Not HVC instr. - - /* - * Let's check if the HVC came from VMID 0 and allow simple - * switch to Hyp mode - */ - mrrc p15, 6, r0, r2, c2 - lsr r2, r2, #16 - and r2, r2, #0xff - cmp r2, #0 - bne guest_trap @ Guest called HVC - - /* - * Getting here means host called HVC, we shift parameters and branch - * to Hyp function. - */ - pop {r0, r1, r2} - - /* Check for __hyp_get_vectors */ - cmp r0, #-1 - mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR - beq 1f - - push {lr} - mrs lr, SPSR - push {lr} - - mov lr, r0 - mov r0, r1 - mov r1, r2 - mov r2, r3 - -THUMB( orr lr, #1) - blx lr @ Call the HYP function - - pop {lr} - msr SPSR_csxf, lr - pop {lr} -1: eret - -guest_trap: - load_vcpu @ Load VCPU pointer to r0 - str r1, [vcpu, #VCPU_HSR] - - @ Check if we need the fault information - lsr r1, r1, #HSR_EC_SHIFT -#ifdef CONFIG_VFPv3 - cmp r1, #HSR_EC_CP_0_13 - beq switch_to_guest_vfp -#endif - cmp r1, #HSR_EC_IABT - mrceq p15, 4, r2, c6, c0, 2 @ HIFAR - beq 2f - cmp r1, #HSR_EC_DABT - bne 1f - mrc p15, 4, r2, c6, c0, 0 @ HDFAR - -2: str r2, [vcpu, #VCPU_HxFAR] - - /* - * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: - * - * Abort on the stage 2 translation for a memory access from a - * Non-secure PL1 or PL0 mode: - * - * For any Access flag fault or Translation fault, and also for any - * Permission fault on the stage 2 translation of a memory access - * made as part of a translation table walk for a stage 1 translation, - * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR - * is UNKNOWN. - */ - - /* Check for permission fault, and S1PTW */ - mrc p15, 4, r1, c5, c2, 0 @ HSR - and r0, r1, #HSR_FSC_TYPE - cmp r0, #FSC_PERM - tsteq r1, #(1 << 7) @ S1PTW - mrcne p15, 4, r2, c6, c0, 4 @ HPFAR - bne 3f - - /* Preserve PAR */ - mrrc p15, 0, r0, r1, c7 @ PAR - push {r0, r1} - - /* Resolve IPA using the xFAR */ - mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR - isb - mrrc p15, 0, r0, r1, c7 @ PAR - tst r0, #1 - bne 4f @ Failed translation - ubfx r2, r0, #12, #20 - lsl r2, r2, #4 - orr r2, r2, r1, lsl #24 - - /* Restore PAR */ - pop {r0, r1} - mcrr p15, 0, r0, r1, c7 @ PAR - -3: load_vcpu @ Load VCPU pointer to r0 - str r2, [r0, #VCPU_HPFAR] - -1: mov r1, #ARM_EXCEPTION_HVC - b __kvm_vcpu_return - -4: pop {r0, r1} @ Failed translation, return to guest - mcrr p15, 0, r0, r1, c7 @ PAR - clrex - pop {r0, r1, r2} - eret - -/* - * If VFPv3 support is not available, then we will not switch the VFP - * registers; however cp10 and cp11 accesses will still trap and fallback - * to the regular coprocessor emulation code, which currently will - * inject an undefined exception to the guest. - */ -#ifdef CONFIG_VFPv3 -switch_to_guest_vfp: - push {r3-r7} - - @ NEON/VFP used. Turn on VFP access. - set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11)) - - @ Switch VFP/NEON hardware state to the guest's - add r7, r0, #VCPU_HOST_CTXT - ldr r7, [r7] - add r7, r7, #CPU_CTXT_VFP - store_vfp_state r7 - add r7, r0, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP) - restore_vfp_state r7 - - pop {r3-r7} - pop {r0-r2} - clrex - eret -#endif - - .align -hyp_irq: - push {r0, r1, r2} - mov r1, #ARM_EXCEPTION_IRQ - load_vcpu @ Load VCPU pointer to r0 - b __kvm_vcpu_return - - .align -hyp_fiq: - b hyp_fiq - - .ltorg - - .popsection - - .pushsection ".rodata" - -und_die_str: - .ascii "unexpected undefined exception in Hyp mode at: %#08x\n" -pabt_die_str: - .ascii "unexpected prefetch abort in Hyp mode at: %#08x\n" -dabt_die_str: - .ascii "unexpected data abort in Hyp mode at: %#08x\n" -svc_die_str: - .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n" - - .popsection +ENDPROC(kvm_call_hyp) diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S deleted file mode 100644 index e0943cb80ab3..000000000000 --- a/arch/arm/kvm/interrupts_head.S +++ /dev/null @@ -1,660 +0,0 @@ -#include -#include - -/* Compat macro, until we get rid of this file entierely */ -#define VCPU_GP_REGS (VCPU_GUEST_CTXT + CPU_CTXT_GP_REGS) -#define VCPU_USR_REGS (VCPU_GP_REGS + GP_REGS_USR) -#define VCPU_SVC_REGS (VCPU_GP_REGS + GP_REGS_SVC) -#define VCPU_ABT_REGS (VCPU_GP_REGS + GP_REGS_ABT) -#define VCPU_UND_REGS (VCPU_GP_REGS + GP_REGS_UND) -#define VCPU_IRQ_REGS (VCPU_GP_REGS + GP_REGS_IRQ) -#define VCPU_FIQ_REGS (VCPU_GP_REGS + GP_REGS_FIQ) -#define VCPU_PC (VCPU_GP_REGS + GP_REGS_PC) -#define VCPU_CPSR (VCPU_GP_REGS + GP_REGS_CPSR) - -#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) -#define VCPU_USR_SP (VCPU_USR_REG(13)) -#define VCPU_USR_LR (VCPU_USR_REG(14)) -#define VCPU_CP15_BASE (VCPU_GUEST_CTXT + CPU_CTXT_CP15) -#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15_BASE + (_cp15_reg_idx * 4)) - -/* - * Many of these macros need to access the VCPU structure, which is always - * held in r0. These macros should never clobber r1, as it is used to hold the - * exception code on the return path (except of course the macro that switches - * all the registers before the final jump to the VM). - */ -vcpu .req r0 @ vcpu pointer always in r0 - -/* Clobbers {r2-r6} */ -.macro store_vfp_state vfp_base - @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions - VFPFMRX r2, FPEXC - @ Make sure VFP is enabled so we can touch the registers. - orr r6, r2, #FPEXC_EN - VFPFMXR FPEXC, r6 - - VFPFMRX r3, FPSCR - tst r2, #FPEXC_EX @ Check for VFP Subarchitecture - beq 1f - @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so - @ we only need to save them if FPEXC_EX is set. - VFPFMRX r4, FPINST - tst r2, #FPEXC_FP2V - VFPFMRX r5, FPINST2, ne @ vmrsne - bic r6, r2, #FPEXC_EX @ FPEXC_EX disable - VFPFMXR FPEXC, r6 -1: - VFPFSTMIA \vfp_base, r6 @ Save VFP registers - stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2 -.endm - -/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */ -.macro restore_vfp_state vfp_base - VFPFLDMIA \vfp_base, r6 @ Load VFP registers - ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2 - - VFPFMXR FPSCR, r3 - tst r2, #FPEXC_EX @ Check for VFP Subarchitecture - beq 1f - VFPFMXR FPINST, r4 - tst r2, #FPEXC_FP2V - VFPFMXR FPINST2, r5, ne -1: - VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN) -.endm - -/* These are simply for the macros to work - value don't have meaning */ -.equ usr, 0 -.equ svc, 1 -.equ abt, 2 -.equ und, 3 -.equ irq, 4 -.equ fiq, 5 - -.macro push_host_regs_mode mode - mrs r2, SP_\mode - mrs r3, LR_\mode - mrs r4, SPSR_\mode - push {r2, r3, r4} -.endm - -/* - * Store all host persistent registers on the stack. - * Clobbers all registers, in all modes, except r0 and r1. - */ -.macro save_host_regs - /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */ - mrs r2, ELR_hyp - push {r2} - - /* usr regs */ - push {r4-r12} @ r0-r3 are always clobbered - mrs r2, SP_usr - mov r3, lr - push {r2, r3} - - push_host_regs_mode svc - push_host_regs_mode abt - push_host_regs_mode und - push_host_regs_mode irq - - /* fiq regs */ - mrs r2, r8_fiq - mrs r3, r9_fiq - mrs r4, r10_fiq - mrs r5, r11_fiq - mrs r6, r12_fiq - mrs r7, SP_fiq - mrs r8, LR_fiq - mrs r9, SPSR_fiq - push {r2-r9} -.endm - -.macro pop_host_regs_mode mode - pop {r2, r3, r4} - msr SP_\mode, r2 - msr LR_\mode, r3 - msr SPSR_\mode, r4 -.endm - -/* - * Restore all host registers from the stack. - * Clobbers all registers, in all modes, except r0 and r1. - */ -.macro restore_host_regs - pop {r2-r9} - msr r8_fiq, r2 - msr r9_fiq, r3 - msr r10_fiq, r4 - msr r11_fiq, r5 - msr r12_fiq, r6 - msr SP_fiq, r7 - msr LR_fiq, r8 - msr SPSR_fiq, r9 - - pop_host_regs_mode irq - pop_host_regs_mode und - pop_host_regs_mode abt - pop_host_regs_mode svc - - pop {r2, r3} - msr SP_usr, r2 - mov lr, r3 - pop {r4-r12} - - pop {r2} - msr ELR_hyp, r2 -.endm - -/* - * Restore SP, LR and SPSR for a given mode. offset is the offset of - * this mode's registers from the VCPU base. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r1, r2, r3, r4. - */ -.macro restore_guest_regs_mode mode, offset - add r1, vcpu, \offset - ldm r1, {r2, r3, r4} - msr SP_\mode, r2 - msr LR_\mode, r3 - msr SPSR_\mode, r4 -.endm - -/* - * Restore all guest registers from the vcpu struct. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers *all* registers. - */ -.macro restore_guest_regs - restore_guest_regs_mode svc, #VCPU_SVC_REGS - restore_guest_regs_mode abt, #VCPU_ABT_REGS - restore_guest_regs_mode und, #VCPU_UND_REGS - restore_guest_regs_mode irq, #VCPU_IRQ_REGS - - add r1, vcpu, #VCPU_FIQ_REGS - ldm r1, {r2-r9} - msr r8_fiq, r2 - msr r9_fiq, r3 - msr r10_fiq, r4 - msr r11_fiq, r5 - msr r12_fiq, r6 - msr SP_fiq, r7 - msr LR_fiq, r8 - msr SPSR_fiq, r9 - - @ Load return state - ldr r2, [vcpu, #VCPU_PC] - ldr r3, [vcpu, #VCPU_CPSR] - msr ELR_hyp, r2 - msr SPSR_cxsf, r3 - - @ Load user registers - ldr r2, [vcpu, #VCPU_USR_SP] - ldr r3, [vcpu, #VCPU_USR_LR] - msr SP_usr, r2 - mov lr, r3 - add vcpu, vcpu, #(VCPU_USR_REGS) - ldm vcpu, {r0-r12} -.endm - -/* - * Save SP, LR and SPSR for a given mode. offset is the offset of - * this mode's registers from the VCPU base. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2, r3, r4, r5. - */ -.macro save_guest_regs_mode mode, offset - add r2, vcpu, \offset - mrs r3, SP_\mode - mrs r4, LR_\mode - mrs r5, SPSR_\mode - stm r2, {r3, r4, r5} -.endm - -/* - * Save all guest registers to the vcpu struct - * Expects guest's r0, r1, r2 on the stack. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2, r3, r4, r5. - */ -.macro save_guest_regs - @ Store usr registers - add r2, vcpu, #VCPU_USR_REG(3) - stm r2, {r3-r12} - add r2, vcpu, #VCPU_USR_REG(0) - pop {r3, r4, r5} @ r0, r1, r2 - stm r2, {r3, r4, r5} - mrs r2, SP_usr - mov r3, lr - str r2, [vcpu, #VCPU_USR_SP] - str r3, [vcpu, #VCPU_USR_LR] - - @ Store return state - mrs r2, ELR_hyp - mrs r3, spsr - str r2, [vcpu, #VCPU_PC] - str r3, [vcpu, #VCPU_CPSR] - - @ Store other guest registers - save_guest_regs_mode svc, #VCPU_SVC_REGS - save_guest_regs_mode abt, #VCPU_ABT_REGS - save_guest_regs_mode und, #VCPU_UND_REGS - save_guest_regs_mode irq, #VCPU_IRQ_REGS -.endm - -/* Reads cp15 registers from hardware and stores them in memory - * @store_to_vcpu: If 0, registers are written in-order to the stack, - * otherwise to the VCPU struct pointed to by vcpup - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2 - r12 - */ -.macro read_cp15_state store_to_vcpu - mrc p15, 0, r2, c1, c0, 0 @ SCTLR - mrc p15, 0, r3, c1, c0, 2 @ CPACR - mrc p15, 0, r4, c2, c0, 2 @ TTBCR - mrc p15, 0, r5, c3, c0, 0 @ DACR - mrrc p15, 0, r6, r7, c2 @ TTBR 0 - mrrc p15, 1, r8, r9, c2 @ TTBR 1 - mrc p15, 0, r10, c10, c2, 0 @ PRRR - mrc p15, 0, r11, c10, c2, 1 @ NMRR - mrc p15, 2, r12, c0, c0, 0 @ CSSELR - - .if \store_to_vcpu == 0 - push {r2-r12} @ Push CP15 registers - .else - str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] - str r3, [vcpu, #CP15_OFFSET(c1_CPACR)] - str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] - str r5, [vcpu, #CP15_OFFSET(c3_DACR)] - add r2, vcpu, #CP15_OFFSET(c2_TTBR0) - strd r6, r7, [r2] - add r2, vcpu, #CP15_OFFSET(c2_TTBR1) - strd r8, r9, [r2] - str r10, [vcpu, #CP15_OFFSET(c10_PRRR)] - str r11, [vcpu, #CP15_OFFSET(c10_NMRR)] - str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] - .endif - - mrc p15, 0, r2, c13, c0, 1 @ CID - mrc p15, 0, r3, c13, c0, 2 @ TID_URW - mrc p15, 0, r4, c13, c0, 3 @ TID_URO - mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV - mrc p15, 0, r6, c5, c0, 0 @ DFSR - mrc p15, 0, r7, c5, c0, 1 @ IFSR - mrc p15, 0, r8, c5, c1, 0 @ ADFSR - mrc p15, 0, r9, c5, c1, 1 @ AIFSR - mrc p15, 0, r10, c6, c0, 0 @ DFAR - mrc p15, 0, r11, c6, c0, 2 @ IFAR - mrc p15, 0, r12, c12, c0, 0 @ VBAR - - .if \store_to_vcpu == 0 - push {r2-r12} @ Push CP15 registers - .else - str r2, [vcpu, #CP15_OFFSET(c13_CID)] - str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] - str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] - str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] - str r6, [vcpu, #CP15_OFFSET(c5_DFSR)] - str r7, [vcpu, #CP15_OFFSET(c5_IFSR)] - str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] - str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] - str r10, [vcpu, #CP15_OFFSET(c6_DFAR)] - str r11, [vcpu, #CP15_OFFSET(c6_IFAR)] - str r12, [vcpu, #CP15_OFFSET(c12_VBAR)] - .endif - - mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL - mrrc p15, 0, r4, r5, c7 @ PAR - mrc p15, 0, r6, c10, c3, 0 @ AMAIR0 - mrc p15, 0, r7, c10, c3, 1 @ AMAIR1 - - .if \store_to_vcpu == 0 - push {r2,r4-r7} - .else - str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] - add r12, vcpu, #CP15_OFFSET(c7_PAR) - strd r4, r5, [r12] - str r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)] - str r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)] - .endif -.endm - -/* - * Reads cp15 registers from memory and writes them to hardware - * @read_from_vcpu: If 0, registers are read in-order from the stack, - * otherwise from the VCPU struct pointed to by vcpup - * - * Assumes vcpu pointer in vcpu reg - */ -.macro write_cp15_state read_from_vcpu - .if \read_from_vcpu == 0 - pop {r2,r4-r7} - .else - ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)] - add r12, vcpu, #CP15_OFFSET(c7_PAR) - ldrd r4, r5, [r12] - ldr r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)] - ldr r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)] - .endif - - mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL - mcrr p15, 0, r4, r5, c7 @ PAR - mcr p15, 0, r6, c10, c3, 0 @ AMAIR0 - mcr p15, 0, r7, c10, c3, 1 @ AMAIR1 - - .if \read_from_vcpu == 0 - pop {r2-r12} - .else - ldr r2, [vcpu, #CP15_OFFSET(c13_CID)] - ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] - ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] - ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] - ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)] - ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)] - ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] - ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] - ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)] - ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)] - ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)] - .endif - - mcr p15, 0, r2, c13, c0, 1 @ CID - mcr p15, 0, r3, c13, c0, 2 @ TID_URW - mcr p15, 0, r4, c13, c0, 3 @ TID_URO - mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV - mcr p15, 0, r6, c5, c0, 0 @ DFSR - mcr p15, 0, r7, c5, c0, 1 @ IFSR - mcr p15, 0, r8, c5, c1, 0 @ ADFSR - mcr p15, 0, r9, c5, c1, 1 @ AIFSR - mcr p15, 0, r10, c6, c0, 0 @ DFAR - mcr p15, 0, r11, c6, c0, 2 @ IFAR - mcr p15, 0, r12, c12, c0, 0 @ VBAR - - .if \read_from_vcpu == 0 - pop {r2-r12} - .else - ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] - ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)] - ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] - ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)] - add r12, vcpu, #CP15_OFFSET(c2_TTBR0) - ldrd r6, r7, [r12] - add r12, vcpu, #CP15_OFFSET(c2_TTBR1) - ldrd r8, r9, [r12] - ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)] - ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)] - ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] - .endif - - mcr p15, 0, r2, c1, c0, 0 @ SCTLR - mcr p15, 0, r3, c1, c0, 2 @ CPACR - mcr p15, 0, r4, c2, c0, 2 @ TTBCR - mcr p15, 0, r5, c3, c0, 0 @ DACR - mcrr p15, 0, r6, r7, c2 @ TTBR 0 - mcrr p15, 1, r8, r9, c2 @ TTBR 1 - mcr p15, 0, r10, c10, c2, 0 @ PRRR - mcr p15, 0, r11, c10, c2, 1 @ NMRR - mcr p15, 2, r12, c0, c0, 0 @ CSSELR -.endm - -/* - * Save the VGIC CPU state into memory - * - * Assumes vcpu pointer in vcpu reg - */ -.macro save_vgic_state - /* Get VGIC VCTRL base into r2 */ - ldr r2, [vcpu, #VCPU_KVM] - ldr r2, [r2, #KVM_VGIC_VCTRL] - cmp r2, #0 - beq 2f - - /* Compute the address of struct vgic_cpu */ - add r11, vcpu, #VCPU_VGIC_CPU - - /* Save all interesting registers */ - ldr r4, [r2, #GICH_VMCR] - ldr r5, [r2, #GICH_MISR] - ldr r6, [r2, #GICH_EISR0] - ldr r7, [r2, #GICH_EISR1] - ldr r8, [r2, #GICH_ELRSR0] - ldr r9, [r2, #GICH_ELRSR1] - ldr r10, [r2, #GICH_APR] -ARM_BE8(rev r4, r4 ) -ARM_BE8(rev r5, r5 ) -ARM_BE8(rev r6, r6 ) -ARM_BE8(rev r7, r7 ) -ARM_BE8(rev r8, r8 ) -ARM_BE8(rev r9, r9 ) -ARM_BE8(rev r10, r10 ) - - str r4, [r11, #VGIC_V2_CPU_VMCR] - str r5, [r11, #VGIC_V2_CPU_MISR] -#ifdef CONFIG_CPU_ENDIAN_BE8 - str r6, [r11, #(VGIC_V2_CPU_EISR + 4)] - str r7, [r11, #VGIC_V2_CPU_EISR] - str r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)] - str r9, [r11, #VGIC_V2_CPU_ELRSR] -#else - str r6, [r11, #VGIC_V2_CPU_EISR] - str r7, [r11, #(VGIC_V2_CPU_EISR + 4)] - str r8, [r11, #VGIC_V2_CPU_ELRSR] - str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)] -#endif - str r10, [r11, #VGIC_V2_CPU_APR] - - /* Clear GICH_HCR */ - mov r5, #0 - str r5, [r2, #GICH_HCR] - - /* Save list registers */ - add r2, r2, #GICH_LR0 - add r3, r11, #VGIC_V2_CPU_LR - ldr r4, [r11, #VGIC_CPU_NR_LR] -1: ldr r6, [r2], #4 -ARM_BE8(rev r6, r6 ) - str r6, [r3], #4 - subs r4, r4, #1 - bne 1b -2: -.endm - -/* - * Restore the VGIC CPU state from memory - * - * Assumes vcpu pointer in vcpu reg - */ -.macro restore_vgic_state - /* Get VGIC VCTRL base into r2 */ - ldr r2, [vcpu, #VCPU_KVM] - ldr r2, [r2, #KVM_VGIC_VCTRL] - cmp r2, #0 - beq 2f - - /* Compute the address of struct vgic_cpu */ - add r11, vcpu, #VCPU_VGIC_CPU - - /* We only restore a minimal set of registers */ - ldr r3, [r11, #VGIC_V2_CPU_HCR] - ldr r4, [r11, #VGIC_V2_CPU_VMCR] - ldr r8, [r11, #VGIC_V2_CPU_APR] -ARM_BE8(rev r3, r3 ) -ARM_BE8(rev r4, r4 ) -ARM_BE8(rev r8, r8 ) - - str r3, [r2, #GICH_HCR] - str r4, [r2, #GICH_VMCR] - str r8, [r2, #GICH_APR] - - /* Restore list registers */ - add r2, r2, #GICH_LR0 - add r3, r11, #VGIC_V2_CPU_LR - ldr r4, [r11, #VGIC_CPU_NR_LR] -1: ldr r6, [r3], #4 -ARM_BE8(rev r6, r6 ) - str r6, [r2], #4 - subs r4, r4, #1 - bne 1b -2: -.endm - -#define CNTHCTL_PL1PCTEN (1 << 0) -#define CNTHCTL_PL1PCEN (1 << 1) - -/* - * Save the timer state onto the VCPU and allow physical timer/counter access - * for the host. - * - * Assumes vcpu pointer in vcpu reg - * Clobbers r2-r5 - */ -.macro save_timer_state - ldr r4, [vcpu, #VCPU_KVM] - ldr r2, [r4, #KVM_TIMER_ENABLED] - cmp r2, #0 - beq 1f - - mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL - str r2, [vcpu, #VCPU_TIMER_CNTV_CTL] - - isb - - mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL - ldr r4, =VCPU_TIMER_CNTV_CVAL - add r5, vcpu, r4 - strd r2, r3, [r5] - - @ Ensure host CNTVCT == CNTPCT - mov r2, #0 - mcrr p15, 4, r2, r2, c14 @ CNTVOFF - -1: - mov r2, #0 @ Clear ENABLE - mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL - - @ Allow physical timer/counter access for the host - mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL - orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN) - mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL -.endm - -/* - * Load the timer state from the VCPU and deny physical timer/counter access - * for the host. - * - * Assumes vcpu pointer in vcpu reg - * Clobbers r2-r5 - */ -.macro restore_timer_state - @ Disallow physical timer access for the guest - @ Physical counter access is allowed - mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL - orr r2, r2, #CNTHCTL_PL1PCTEN - bic r2, r2, #CNTHCTL_PL1PCEN - mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL - - ldr r4, [vcpu, #VCPU_KVM] - ldr r2, [r4, #KVM_TIMER_ENABLED] - cmp r2, #0 - beq 1f - - ldr r2, [r4, #KVM_TIMER_CNTVOFF] - ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)] - mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF - - ldr r4, =VCPU_TIMER_CNTV_CVAL - add r5, vcpu, r4 - ldrd r2, r3, [r5] - mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL - isb - - ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL] - and r2, r2, #3 - mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL -1: -.endm - -.equ vmentry, 0 -.equ vmexit, 1 - -/* Configures the HSTR (Hyp System Trap Register) on entry/return - * (hardware reset value is 0) */ -.macro set_hstr operation - mrc p15, 4, r2, c1, c1, 3 - ldr r3, =HSTR_T(15) - .if \operation == vmentry - orr r2, r2, r3 @ Trap CR{15} - .else - bic r2, r2, r3 @ Don't trap any CRx accesses - .endif - mcr p15, 4, r2, c1, c1, 3 -.endm - -/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return - * (hardware reset value is 0). Keep previous value in r2. - * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if - * VFP wasn't already enabled (always executed on vmtrap). - * If a label is specified with vmexit, it is branched to if VFP wasn't - * enabled. - */ -.macro set_hcptr operation, mask, label = none - mrc p15, 4, r2, c1, c1, 2 - ldr r3, =\mask - .if \operation == vmentry - orr r3, r2, r3 @ Trap coproc-accesses defined in mask - .else - bic r3, r2, r3 @ Don't trap defined coproc-accesses - .endif - mcr p15, 4, r3, c1, c1, 2 - .if \operation != vmentry - .if \operation == vmexit - tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) - beq 1f - .endif - isb - .if \label != none - b \label - .endif -1: - .endif -.endm - -/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return - * (hardware reset value is 0) */ -.macro set_hdcr operation - mrc p15, 4, r2, c1, c1, 1 - ldr r3, =(HDCR_TPM|HDCR_TPMCR) - .if \operation == vmentry - orr r2, r2, r3 @ Trap some perfmon accesses - .else - bic r2, r2, r3 @ Don't trap any perfmon accesses - .endif - mcr p15, 4, r2, c1, c1, 1 -.endm - -/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */ -.macro configure_hyp_role operation - .if \operation == vmentry - ldr r2, [vcpu, #VCPU_HCR] - ldr r3, [vcpu, #VCPU_IRQ_LINES] - orr r2, r2, r3 - .else - mov r2, #0 - .endif - mcr p15, 4, r2, c1, c1, 0 @ HCR -.endm - -.macro load_vcpu - mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR -.endm -- cgit v1.2.3 From d4c7688c51e57be20ca5f3dffa4c8771888a42fc Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 1 Feb 2016 19:56:31 +0000 Subject: ARM: KVM: Switch to C-based stage2 init As we now have hooks to setup VTCR from C code, let's drop the original VTCR setup and reimplement it as part of the HYP code. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 2 ++ arch/arm/include/asm/kvm_host.h | 1 + arch/arm/kvm/hyp/Makefile | 1 + arch/arm/kvm/hyp/hyp.h | 2 ++ arch/arm/kvm/hyp/s2-setup.c | 34 ++++++++++++++++++++++++++++++++++ arch/arm/kvm/init.S | 8 -------- 6 files changed, 40 insertions(+), 8 deletions(-) create mode 100644 arch/arm/kvm/hyp/s2-setup.c (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 4841225d10ea..3283a2f63254 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -98,6 +98,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); + +extern void __init_stage2_translation(void); #endif #endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index c62d71751f7a..0fe41aaf2171 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -224,6 +224,7 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr, static inline void __cpu_init_stage2(void) { + kvm_call_hyp(__init_stage2_translation); } static inline int kvm_arch_dev_ioctl_check_extension(long ext) diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index a7d3a7e0b702..7152369504a6 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o obj-$(CONFIG_KVM_ARM_HOST) += switch.o +obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h index 8b9c2eb5a9dc..ff6de6a3af2d 100644 --- a/arch/arm/kvm/hyp/hyp.h +++ b/arch/arm/kvm/hyp/hyp.h @@ -71,6 +71,8 @@ #define HCPTR __ACCESS_CP15(c1, 4, c1, 2) #define HSTR __ACCESS_CP15(c1, 4, c1, 3) #define TTBCR __ACCESS_CP15(c2, 0, c0, 2) +#define HTCR __ACCESS_CP15(c2, 4, c0, 2) +#define VTCR __ACCESS_CP15(c2, 4, c1, 2) #define DACR __ACCESS_CP15(c3, 0, c0, 0) #define DFSR __ACCESS_CP15(c5, 0, c0, 0) #define IFSR __ACCESS_CP15(c5, 0, c0, 1) diff --git a/arch/arm/kvm/hyp/s2-setup.c b/arch/arm/kvm/hyp/s2-setup.c new file mode 100644 index 000000000000..f5f49c53be28 --- /dev/null +++ b/arch/arm/kvm/hyp/s2-setup.c @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2016 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include "hyp.h" + +void __hyp_text __init_stage2_translation(void) +{ + u64 val; + + val = read_sysreg(VTCR) & ~VTCR_MASK; + + val |= read_sysreg(HTCR) & VTCR_HTCR_SH; + val |= KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S; + + write_sysreg(val, VTCR); +} diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 3988e72d16ff..1f9ae17476f9 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -84,14 +84,6 @@ __do_hyp_init: orr r0, r0, r1 mcr p15, 4, r0, c2, c0, 2 @ HTCR - mrc p15, 4, r1, c2, c1, 2 @ VTCR - ldr r2, =VTCR_MASK - bic r1, r1, r2 - bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits - orr r1, r0, r1 - orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S) - mcr p15, 4, r1, c2, c1, 2 @ VTCR - @ Use the same memory attributes for hyp. accesses as the kernel @ (copy MAIRx ro HMAIRx). mrc p15, 0, r0, c10, c2, 0 -- cgit v1.2.3 From fa85e25dad0f3f4e7ff2c58a914dcfe53210f680 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 6 Jan 2016 09:32:23 +0000 Subject: ARM: KVM: Remove __weak attributes Now that the old code is long gone, we can remove all the weak attributes, as there is only one version of the code. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/hyp-entry.S | 4 +--- arch/arm/kvm/hyp/switch.c | 2 +- arch/arm/kvm/hyp/tlb.c | 6 +++--- 3 files changed, 5 insertions(+), 7 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index 54a8d67ad980..78091383a5d9 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -58,10 +58,8 @@ */ .align 5 -__hyp_vector: - .global __hyp_vector __kvm_hyp_vector: - .weak __kvm_hyp_vector + .global __kvm_hyp_vector @ Hyp-mode exception vector W(b) hyp_reset diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index abbe90b5f2ff..f11ede159080 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -191,7 +191,7 @@ again: return exit_code; } -__alias(__guest_run) int __weak __kvm_vcpu_run(struct kvm_vcpu *vcpu); +__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu); static const char * const __hyp_panic_string[] = { [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x", diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index aaa44bbac766..82958b8f6a74 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c @@ -50,14 +50,14 @@ static void __hyp_text __tlb_flush_vmid(struct kvm *kvm) write_sysreg(0, VTTBR); } -__alias(__tlb_flush_vmid) void __weak __kvm_tlb_flush_vmid(struct kvm *kvm); +__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm); static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { __tlb_flush_vmid(kvm); } -__alias(__tlb_flush_vmid_ipa) void __weak __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, +__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); static void __hyp_text __tlb_flush_vm_context(void) @@ -67,4 +67,4 @@ static void __hyp_text __tlb_flush_vm_context(void) dsb(ish); } -__alias(__tlb_flush_vm_context) void __weak __kvm_flush_vm_context(void); +__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void); -- cgit v1.2.3 From 4448932fb09a44d73f820afd8fa145a24b3b3995 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 22:53:33 +0000 Subject: ARM: KVM: Turn CP15 defines to an enum Just like on arm64, having the CP15 registers expressed as a set of #defines has been very conflict-prone. Let's turn it into an enum, which should make it more manageable. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 33 --------------------------------- arch/arm/include/asm/kvm_host.h | 39 +++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/guest.c | 1 - 3 files changed, 39 insertions(+), 34 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 3283a2f63254..083825f12c93 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -21,39 +21,6 @@ #include -/* 0 is reserved as an invalid value. */ -#define c0_MPIDR 1 /* MultiProcessor ID Register */ -#define c0_CSSELR 2 /* Cache Size Selection Register */ -#define c1_SCTLR 3 /* System Control Register */ -#define c1_ACTLR 4 /* Auxiliary Control Register */ -#define c1_CPACR 5 /* Coprocessor Access Control */ -#define c2_TTBR0 6 /* Translation Table Base Register 0 */ -#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */ -#define c2_TTBR1 8 /* Translation Table Base Register 1 */ -#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */ -#define c2_TTBCR 10 /* Translation Table Base Control R. */ -#define c3_DACR 11 /* Domain Access Control Register */ -#define c5_DFSR 12 /* Data Fault Status Register */ -#define c5_IFSR 13 /* Instruction Fault Status Register */ -#define c5_ADFSR 14 /* Auxilary Data Fault Status R */ -#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */ -#define c6_DFAR 16 /* Data Fault Address Register */ -#define c6_IFAR 17 /* Instruction Fault Address Register */ -#define c7_PAR 18 /* Physical Address Register */ -#define c7_PAR_high 19 /* PAR top 32 bits */ -#define c9_L2CTLR 20 /* Cortex A15/A7 L2 Control Register */ -#define c10_PRRR 21 /* Primary Region Remap Register */ -#define c10_NMRR 22 /* Normal Memory Remap Register */ -#define c12_VBAR 23 /* Vector Base Address Register */ -#define c13_CID 24 /* Context ID Register */ -#define c13_TID_URW 25 /* Thread ID, User R/W */ -#define c13_TID_URO 26 /* Thread ID, User R/O */ -#define c13_TID_PRIV 27 /* Thread ID, Privileged */ -#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */ -#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */ -#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */ -#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */ - #define ARM_EXCEPTION_RESET 0 #define ARM_EXCEPTION_UNDEFINED 1 #define ARM_EXCEPTION_SOFTWARE 2 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 0fe41aaf2171..daf6a71071da 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -88,6 +88,45 @@ struct kvm_vcpu_fault_info { u32 hyp_pc; /* PC when exception was taken from Hyp mode */ }; +/* + * 0 is reserved as an invalid value. + * Order should be kept in sync with the save/restore code. + */ +enum vcpu_sysreg { + __INVALID_SYSREG__, + c0_MPIDR, /* MultiProcessor ID Register */ + c0_CSSELR, /* Cache Size Selection Register */ + c1_SCTLR, /* System Control Register */ + c1_ACTLR, /* Auxiliary Control Register */ + c1_CPACR, /* Coprocessor Access Control */ + c2_TTBR0, /* Translation Table Base Register 0 */ + c2_TTBR0_high, /* TTBR0 top 32 bits */ + c2_TTBR1, /* Translation Table Base Register 1 */ + c2_TTBR1_high, /* TTBR1 top 32 bits */ + c2_TTBCR, /* Translation Table Base Control R. */ + c3_DACR, /* Domain Access Control Register */ + c5_DFSR, /* Data Fault Status Register */ + c5_IFSR, /* Instruction Fault Status Register */ + c5_ADFSR, /* Auxilary Data Fault Status R */ + c5_AIFSR, /* Auxilary Instrunction Fault Status R */ + c6_DFAR, /* Data Fault Address Register */ + c6_IFAR, /* Instruction Fault Address Register */ + c7_PAR, /* Physical Address Register */ + c7_PAR_high, /* PAR top 32 bits */ + c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */ + c10_PRRR, /* Primary Region Remap Register */ + c10_NMRR, /* Normal Memory Remap Register */ + c12_VBAR, /* Vector Base Address Register */ + c13_CID, /* Context ID Register */ + c13_TID_URW, /* Thread ID, User R/W */ + c13_TID_URO, /* Thread ID, User R/O */ + c13_TID_PRIV, /* Thread ID, Privileged */ + c14_CNTKCTL, /* Timer Control Register (PL1) */ + c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */ + c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */ + NR_CP15_REGS /* Number of regs (incl. invalid) */ +}; + struct kvm_cpu_context { struct kvm_regs gp_regs; struct vfp_hard_struct vfp; diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c index 86e26fbd5ba3..12cbb6824443 100644 --- a/arch/arm/kvm/guest.c +++ b/arch/arm/kvm/guest.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include -- cgit v1.2.3 From ff3a01d1e029847abb2d0735843ac6f7ae1385ea Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:54:07 +0000 Subject: ARM: KVM: Cleanup asm-offsets.c Since we don't have much assembler left, most of the KVM stuff in asm-offsets.c is now superfluous. Let's get rid of it. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kernel/asm-offsets.c | 30 ------------------------------ 1 file changed, 30 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 2f3e0b064066..1f24c32e11fe 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -170,42 +170,12 @@ int main(void) DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE); BLANK(); #ifdef CONFIG_KVM_ARM_HOST - DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); - DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt)); DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context)); DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); - DEFINE(CPU_CTXT_CP15, offsetof(struct kvm_cpu_context, cp15)); DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs)); - DEFINE(GP_REGS_SVC, offsetof(struct kvm_regs, svc_regs)); - DEFINE(GP_REGS_ABT, offsetof(struct kvm_regs, abt_regs)); - DEFINE(GP_REGS_UND, offsetof(struct kvm_regs, und_regs)); - DEFINE(GP_REGS_IRQ, offsetof(struct kvm_regs, irq_regs)); - DEFINE(GP_REGS_FIQ, offsetof(struct kvm_regs, fiq_regs)); - DEFINE(GP_REGS_PC, offsetof(struct kvm_regs, usr_regs.ARM_pc)); - DEFINE(GP_REGS_CPSR, offsetof(struct kvm_regs, usr_regs.ARM_cpsr)); - DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr)); - DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); - DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr)); - DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar)); - DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar)); DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); - DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu)); - DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr)); - DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr)); - DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr)); - DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr)); - DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); - DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); - DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); - DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr)); - DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl)); - DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval)); - DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff)); - DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled)); - DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); - DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); #endif BLANK(); #ifdef CONFIG_VDSO -- cgit v1.2.3 From 311b5b363cd28aa778de083178f147f32622e331 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:57:36 +0000 Subject: ARM: KVM: Remove unused hyp_pc field This field was never populated, and the panic code already does something similar. Delete the related code. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 5 ----- arch/arm/include/asm/kvm_host.h | 1 - arch/arm/kernel/asm-offsets.c | 1 - arch/arm/kvm/handle_exit.c | 5 ----- 4 files changed, 12 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index f710616ccadc..8a8c6ded9ca7 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -108,11 +108,6 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu) return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8; } -static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu) -{ - return vcpu->arch.fault.hyp_pc; -} - static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu) { return kvm_vcpu_get_hsr(vcpu) & HSR_ISV; diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index daf6a71071da..19e9aba85463 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -85,7 +85,6 @@ struct kvm_vcpu_fault_info { u32 hsr; /* Hyp Syndrome Register */ u32 hxfar; /* Hyp Data/Inst. Fault Address Register */ u32 hpfar; /* Hyp IPA Fault Address Register */ - u32 hyp_pc; /* PC when exception was taken from Hyp mode */ }; /* diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 1f24c32e11fe..27d05813ff09 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -175,7 +175,6 @@ int main(void) DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp)); DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs)); - DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc)); #endif BLANK(); #ifdef CONFIG_VDSO diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 3ede90d8b20b..5377d7539a40 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -147,11 +147,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, switch (exception_index) { case ARM_EXCEPTION_IRQ: return 1; - case ARM_EXCEPTION_UNDEFINED: - kvm_err("Undefined exception in Hyp mode at: %#08lx\n", - kvm_vcpu_get_hyp_pc(vcpu)); - BUG(); - panic("KVM: Hypervisor undefined exception!\n"); case ARM_EXCEPTION_DATA_ABORT: case ARM_EXCEPTION_PREF_ABORT: case ARM_EXCEPTION_HVC: -- cgit v1.2.3 From f9e515eeb1833e8bf621948b43ee7c6236c7a167 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 18:58:28 +0000 Subject: ARM: KVM: Remove handling of ARM_EXCEPTION_DATA/PREF_ABORT These are now handled as a panic, so there is little point in keeping them around. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/handle_exit.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 5377d7539a40..3f1ef0dbc899 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -147,8 +147,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, switch (exception_index) { case ARM_EXCEPTION_IRQ: return 1; - case ARM_EXCEPTION_DATA_ABORT: - case ARM_EXCEPTION_PREF_ABORT: case ARM_EXCEPTION_HVC: /* * See ARM ARM B1.14.1: "Hyp traps on instructions -- cgit v1.2.3 From 402f352876ba0df574533e59d72fc3e9871f791a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 5 Jan 2016 22:55:10 +0000 Subject: ARM: KVM: Remove __kvm_hyp_exit/__kvm_hyp_exit_end I have no idea what these were for - probably a leftover from an early implementation. Good bye! Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 083825f12c93..15d58b42d5a1 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -55,9 +55,6 @@ struct kvm_vcpu; extern char __kvm_hyp_init[]; extern char __kvm_hyp_init_end[]; -extern char __kvm_hyp_exit[]; -extern char __kvm_hyp_exit_end[]; - extern char __kvm_hyp_vector[]; extern void __kvm_flush_vm_context(void); -- cgit v1.2.3 From 57c841f131ef295b583365d2fddd6b0d16e82c10 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 29 Jan 2016 15:01:28 +0000 Subject: arm/arm64: KVM: Handle out-of-RAM cache maintenance as a NOP So far, our handling of cache maintenance by VA has been pretty simple: Either the access is in the guest RAM and generates a S2 fault, which results in the page being mapped RW, or we go down the io_mem_abort() path, and nuke the guest. The first one is fine, but the second one is extremely weird. Treating the CM as an I/O is wrong, and nothing in the ARM ARM indicates that we should generate a fault for something that cannot end-up in the cache anyway (even if the guest maps it, it will keep on faulting at stage-2 for emulation). So let's just skip this instruction, and let the guest get away with it. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_emulate.h | 5 +++++ arch/arm/kvm/mmu.c | 16 ++++++++++++++++ 2 files changed, 21 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 8a8c6ded9ca7..ee5328fc4b06 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -138,6 +138,11 @@ static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu) return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW; } +static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu) +{ + return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM); +} + /* Get Access Size from a data abort */ static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu) { diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index aba61fd3697a..c3eb10ea0971 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -1430,6 +1430,22 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) goto out_unlock; } + /* + * Check for a cache maintenance operation. Since we + * ended-up here, we know it is outside of any memory + * slot. But we can't find out if that is for a device, + * or if the guest is just being stupid. The only thing + * we know for sure is that this range cannot be cached. + * + * So let's assume that the guest is just being + * cautious, and skip the instruction. + */ + if (kvm_vcpu_dabt_is_cm(vcpu)) { + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + ret = 1; + goto out_unlock; + } + /* * The IPA is reported as [MAX:12], so we need to * complement it with the bottom 12 bits from the -- cgit v1.2.3 From 82deae0fc8ba256c1061dd4de42f0ef6cb9f954f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 9 Jun 2014 19:47:09 +0100 Subject: arm/arm64: Add new is_kernel_in_hyp_mode predicate With ARMv8.1 VHE extension, it will be possible to run the kernel at EL2 (aka HYP mode). In order for the kernel to easily find out where it is running, add a new predicate that returns whether or not the kernel is in HYP mode. For completeness, the 32bit code also get such a predicate (always returning false) so that code common to both architecture (timers, KVM) can use it transparently. Acked-by: Christoffer Dall Acked-by: Catalin Marinas Signed-off-by: Marc Zyngier --- arch/arm/include/asm/virt.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 5fdbfea6defb..d4ceaf5f299b 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -75,6 +75,11 @@ static inline bool is_hyp_mode_mismatched(void) return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH); } +static inline bool is_kernel_in_hyp_mode(void) +{ + return false; +} + /* The section containing the hypervisor text */ extern char __hyp_text_start[]; extern char __hyp_text_end[]; -- cgit v1.2.3 From 1e947bad0b63b351cbdd9ad55ea5bf7e31c76036 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 29 Jan 2015 11:59:54 +0000 Subject: arm64: KVM: Skip HYP setup when already running in HYP With the kernel running at EL2, there is no point trying to configure page tables for HYP, as the kernel is already mapped. Take this opportunity to refactor the whole init a bit, allowing the various parts of the hypervisor bringup to be split across multiple functions. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/arm.c | 173 +++++++++++++++++++++++++++++++++++------------------ arch/arm/kvm/mmu.c | 7 +++ 2 files changed, 121 insertions(+), 59 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index fcf6c130c986..686350d05174 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -967,6 +967,11 @@ long kvm_arch_vm_ioctl(struct file *filp, } } +static void cpu_init_stage2(void *dummy) +{ + __cpu_init_stage2(); +} + static void cpu_init_hyp_mode(void *dummy) { phys_addr_t boot_pgd_ptr; @@ -1036,6 +1041,82 @@ static inline void hyp_cpu_pm_init(void) } #endif +static void teardown_common_resources(void) +{ + free_percpu(kvm_host_cpu_state); +} + +static int init_common_resources(void) +{ + kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); + if (!kvm_host_cpu_state) { + kvm_err("Cannot allocate host CPU state\n"); + return -ENOMEM; + } + + return 0; +} + +static int init_subsystems(void) +{ + int err; + + /* + * Init HYP view of VGIC + */ + err = kvm_vgic_hyp_init(); + switch (err) { + case 0: + vgic_present = true; + break; + case -ENODEV: + case -ENXIO: + vgic_present = false; + break; + default: + return err; + } + + /* + * Init HYP architected timer support + */ + err = kvm_timer_hyp_init(); + if (err) + return err; + + kvm_perf_init(); + kvm_coproc_table_init(); + + return 0; +} + +static void teardown_hyp_mode(void) +{ + int cpu; + + if (is_kernel_in_hyp_mode()) + return; + + free_hyp_pgds(); + for_each_possible_cpu(cpu) + free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); +} + +static int init_vhe_mode(void) +{ + /* + * Execute the init code on each CPU. + */ + on_each_cpu(cpu_init_stage2, NULL, 1); + + /* set size of VMID supported by CPU */ + kvm_vmid_bits = kvm_get_vmid_bits(); + kvm_info("%d-bit VMID\n", kvm_vmid_bits); + + kvm_info("VHE mode initialized successfully\n"); + return 0; +} + /** * Inits Hyp-mode on all online CPUs */ @@ -1066,7 +1147,7 @@ static int init_hyp_mode(void) stack_page = __get_free_page(GFP_KERNEL); if (!stack_page) { err = -ENOMEM; - goto out_free_stack_pages; + goto out_err; } per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; @@ -1078,13 +1159,13 @@ static int init_hyp_mode(void) err = create_hyp_mappings(__hyp_text_start, __hyp_text_end); if (err) { kvm_err("Cannot map world-switch code\n"); - goto out_free_mappings; + goto out_err; } err = create_hyp_mappings(__start_rodata, __end_rodata); if (err) { kvm_err("Cannot map rodata section\n"); - goto out_free_mappings; + goto out_err; } /* @@ -1096,20 +1177,10 @@ static int init_hyp_mode(void) if (err) { kvm_err("Cannot map hyp stack\n"); - goto out_free_mappings; + goto out_err; } } - /* - * Map the host CPU structures - */ - kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t); - if (!kvm_host_cpu_state) { - err = -ENOMEM; - kvm_err("Cannot allocate host CPU state\n"); - goto out_free_mappings; - } - for_each_possible_cpu(cpu) { kvm_cpu_context_t *cpu_ctxt; @@ -1118,7 +1189,7 @@ static int init_hyp_mode(void) if (err) { kvm_err("Cannot map host CPU state: %d\n", err); - goto out_free_context; + goto out_err; } } @@ -1127,34 +1198,22 @@ static int init_hyp_mode(void) */ on_each_cpu(cpu_init_hyp_mode, NULL, 1); - /* - * Init HYP view of VGIC - */ - err = kvm_vgic_hyp_init(); - switch (err) { - case 0: - vgic_present = true; - break; - case -ENODEV: - case -ENXIO: - vgic_present = false; - break; - default: - goto out_free_context; - } - - /* - * Init HYP architected timer support - */ - err = kvm_timer_hyp_init(); - if (err) - goto out_free_context; - #ifndef CONFIG_HOTPLUG_CPU free_boot_hyp_pgd(); #endif - kvm_perf_init(); + cpu_notifier_register_begin(); + + err = __register_cpu_notifier(&hyp_init_cpu_nb); + + cpu_notifier_register_done(); + + if (err) { + kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); + goto out_err; + } + + hyp_cpu_pm_init(); /* set size of VMID supported by CPU */ kvm_vmid_bits = kvm_get_vmid_bits(); @@ -1163,14 +1222,9 @@ static int init_hyp_mode(void) kvm_info("Hyp mode initialized successfully\n"); return 0; -out_free_context: - free_percpu(kvm_host_cpu_state); -out_free_mappings: - free_hyp_pgds(); -out_free_stack_pages: - for_each_possible_cpu(cpu) - free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); + out_err: + teardown_hyp_mode(); kvm_err("error initializing Hyp mode: %d\n", err); return err; } @@ -1214,26 +1268,27 @@ int kvm_arch_init(void *opaque) } } - cpu_notifier_register_begin(); - - err = init_hyp_mode(); + err = init_common_resources(); if (err) - goto out_err; + return err; - err = __register_cpu_notifier(&hyp_init_cpu_nb); - if (err) { - kvm_err("Cannot register HYP init CPU notifier (%d)\n", err); + if (is_kernel_in_hyp_mode()) + err = init_vhe_mode(); + else + err = init_hyp_mode(); + if (err) goto out_err; - } - - cpu_notifier_register_done(); - hyp_cpu_pm_init(); + err = init_subsystems(); + if (err) + goto out_hyp; - kvm_coproc_table_init(); return 0; + +out_hyp: + teardown_hyp_mode(); out_err: - cpu_notifier_register_done(); + teardown_common_resources(); return err; } diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index c3eb10ea0971..58dbd5c439df 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "trace.h" @@ -598,6 +599,9 @@ int create_hyp_mappings(void *from, void *to) unsigned long start = KERN_TO_HYP((unsigned long)from); unsigned long end = KERN_TO_HYP((unsigned long)to); + if (is_kernel_in_hyp_mode()) + return 0; + start = start & PAGE_MASK; end = PAGE_ALIGN(end); @@ -630,6 +634,9 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) unsigned long start = KERN_TO_HYP((unsigned long)from); unsigned long end = KERN_TO_HYP((unsigned long)to); + if (is_kernel_in_hyp_mode()) + return 0; + /* Check for a valid kernel IO mapping */ if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1)) return -EINVAL; -- cgit v1.2.3 From f1c9cad7c508f59fedd9f77eb36e5859e11ce5ab Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 28 Jan 2016 14:31:37 +0000 Subject: ARM: KVM: Move kvm/hyp/hyp.h to include/asm/kvm_hyp.h In order to be able to use the code located in virt/kvm/arm/hyp, we need to make the global hyp.h file accessible from include/asm, similar to what we did for arm64. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_hyp.h | 130 +++++++++++++++++++++++++++++++++++++++++ arch/arm/kvm/hyp/banked-sr.c | 2 +- arch/arm/kvm/hyp/cp15-sr.c | 2 +- arch/arm/kvm/hyp/hyp.h | 130 ----------------------------------------- arch/arm/kvm/hyp/s2-setup.c | 3 +- arch/arm/kvm/hyp/switch.c | 2 +- arch/arm/kvm/hyp/timer-sr.c | 4 +- arch/arm/kvm/hyp/tlb.c | 2 +- arch/arm/kvm/hyp/vgic-v2-sr.c | 4 +- 9 files changed, 137 insertions(+), 142 deletions(-) create mode 100644 arch/arm/include/asm/kvm_hyp.h delete mode 100644 arch/arm/kvm/hyp/hyp.h (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h new file mode 100644 index 000000000000..ff6de6a3af2d --- /dev/null +++ b/arch/arm/include/asm/kvm_hyp.h @@ -0,0 +1,130 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef __ARM_KVM_HYP_H__ +#define __ARM_KVM_HYP_H__ + +#include +#include +#include +#include + +#define __hyp_text __section(.hyp.text) notrace + +#define kern_hyp_va(v) (v) +#define hyp_kern_va(v) (v) + +#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ + "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 +#define __ACCESS_CP15_64(Op1, CRm) \ + "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 +#define __ACCESS_VFP(CRn) \ + "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 + +#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) +#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) + +#define __read_sysreg(r, w, c, t) ({ \ + t __val; \ + asm volatile(r " " c : "=r" (__val)); \ + __val; \ +}) +#define read_sysreg(...) __read_sysreg(__VA_ARGS__) + +#define write_special(v, r) \ + asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) +#define read_special(r) ({ \ + u32 __val; \ + asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ + __val; \ +}) + +#define TTBR0 __ACCESS_CP15_64(0, c2) +#define TTBR1 __ACCESS_CP15_64(1, c2) +#define VTTBR __ACCESS_CP15_64(6, c2) +#define PAR __ACCESS_CP15_64(0, c7) +#define CNTV_CVAL __ACCESS_CP15_64(3, c14) +#define CNTVOFF __ACCESS_CP15_64(4, c14) + +#define MIDR __ACCESS_CP15(c0, 0, c0, 0) +#define CSSELR __ACCESS_CP15(c0, 2, c0, 0) +#define VPIDR __ACCESS_CP15(c0, 4, c0, 0) +#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) +#define SCTLR __ACCESS_CP15(c1, 0, c0, 0) +#define CPACR __ACCESS_CP15(c1, 0, c0, 2) +#define HCR __ACCESS_CP15(c1, 4, c1, 0) +#define HDCR __ACCESS_CP15(c1, 4, c1, 1) +#define HCPTR __ACCESS_CP15(c1, 4, c1, 2) +#define HSTR __ACCESS_CP15(c1, 4, c1, 3) +#define TTBCR __ACCESS_CP15(c2, 0, c0, 2) +#define HTCR __ACCESS_CP15(c2, 4, c0, 2) +#define VTCR __ACCESS_CP15(c2, 4, c1, 2) +#define DACR __ACCESS_CP15(c3, 0, c0, 0) +#define DFSR __ACCESS_CP15(c5, 0, c0, 0) +#define IFSR __ACCESS_CP15(c5, 0, c0, 1) +#define ADFSR __ACCESS_CP15(c5, 0, c1, 0) +#define AIFSR __ACCESS_CP15(c5, 0, c1, 1) +#define HSR __ACCESS_CP15(c5, 4, c2, 0) +#define DFAR __ACCESS_CP15(c6, 0, c0, 0) +#define IFAR __ACCESS_CP15(c6, 0, c0, 2) +#define HDFAR __ACCESS_CP15(c6, 4, c0, 0) +#define HIFAR __ACCESS_CP15(c6, 4, c0, 2) +#define HPFAR __ACCESS_CP15(c6, 4, c0, 4) +#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) +#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) +#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) +#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) +#define PRRR __ACCESS_CP15(c10, 0, c2, 0) +#define NMRR __ACCESS_CP15(c10, 0, c2, 1) +#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0) +#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1) +#define VBAR __ACCESS_CP15(c12, 0, c0, 0) +#define CID __ACCESS_CP15(c13, 0, c0, 1) +#define TID_URW __ACCESS_CP15(c13, 0, c0, 2) +#define TID_URO __ACCESS_CP15(c13, 0, c0, 3) +#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) +#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2) +#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) +#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) +#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) + +#define VFP_FPEXC __ACCESS_VFP(FPEXC) + +void __timer_save_state(struct kvm_vcpu *vcpu); +void __timer_restore_state(struct kvm_vcpu *vcpu); + +void __vgic_v2_save_state(struct kvm_vcpu *vcpu); +void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); + +void __sysreg_save_state(struct kvm_cpu_context *ctxt); +void __sysreg_restore_state(struct kvm_cpu_context *ctxt); + +void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); +void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); +static inline bool __vfp_enabled(void) +{ + return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); +} + +void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); +void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); + +int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, + struct kvm_cpu_context *host); +int asmlinkage __hyp_do_panic(const char *, int, u32); + +#endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c index d02dc804f611..111bda8cdebd 100644 --- a/arch/arm/kvm/hyp/banked-sr.c +++ b/arch/arm/kvm/hyp/banked-sr.c @@ -18,7 +18,7 @@ * along with this program. If not, see . */ -#include "hyp.h" +#include __asm__(".arch_extension virt"); diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c index 732abbc34bd0..c4782812714c 100644 --- a/arch/arm/kvm/hyp/cp15-sr.c +++ b/arch/arm/kvm/hyp/cp15-sr.c @@ -18,7 +18,7 @@ * along with this program. If not, see . */ -#include "hyp.h" +#include static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx) { diff --git a/arch/arm/kvm/hyp/hyp.h b/arch/arm/kvm/hyp/hyp.h deleted file mode 100644 index ff6de6a3af2d..000000000000 --- a/arch/arm/kvm/hyp/hyp.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright (C) 2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM_KVM_HYP_H__ -#define __ARM_KVM_HYP_H__ - -#include -#include -#include -#include - -#define __hyp_text __section(.hyp.text) notrace - -#define kern_hyp_va(v) (v) -#define hyp_kern_va(v) (v) - -#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \ - "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32 -#define __ACCESS_CP15_64(Op1, CRm) \ - "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64 -#define __ACCESS_VFP(CRn) \ - "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32 - -#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v))) -#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__) - -#define __read_sysreg(r, w, c, t) ({ \ - t __val; \ - asm volatile(r " " c : "=r" (__val)); \ - __val; \ -}) -#define read_sysreg(...) __read_sysreg(__VA_ARGS__) - -#define write_special(v, r) \ - asm volatile("msr " __stringify(r) ", %0" : : "r" (v)) -#define read_special(r) ({ \ - u32 __val; \ - asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \ - __val; \ -}) - -#define TTBR0 __ACCESS_CP15_64(0, c2) -#define TTBR1 __ACCESS_CP15_64(1, c2) -#define VTTBR __ACCESS_CP15_64(6, c2) -#define PAR __ACCESS_CP15_64(0, c7) -#define CNTV_CVAL __ACCESS_CP15_64(3, c14) -#define CNTVOFF __ACCESS_CP15_64(4, c14) - -#define MIDR __ACCESS_CP15(c0, 0, c0, 0) -#define CSSELR __ACCESS_CP15(c0, 2, c0, 0) -#define VPIDR __ACCESS_CP15(c0, 4, c0, 0) -#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5) -#define SCTLR __ACCESS_CP15(c1, 0, c0, 0) -#define CPACR __ACCESS_CP15(c1, 0, c0, 2) -#define HCR __ACCESS_CP15(c1, 4, c1, 0) -#define HDCR __ACCESS_CP15(c1, 4, c1, 1) -#define HCPTR __ACCESS_CP15(c1, 4, c1, 2) -#define HSTR __ACCESS_CP15(c1, 4, c1, 3) -#define TTBCR __ACCESS_CP15(c2, 0, c0, 2) -#define HTCR __ACCESS_CP15(c2, 4, c0, 2) -#define VTCR __ACCESS_CP15(c2, 4, c1, 2) -#define DACR __ACCESS_CP15(c3, 0, c0, 0) -#define DFSR __ACCESS_CP15(c5, 0, c0, 0) -#define IFSR __ACCESS_CP15(c5, 0, c0, 1) -#define ADFSR __ACCESS_CP15(c5, 0, c1, 0) -#define AIFSR __ACCESS_CP15(c5, 0, c1, 1) -#define HSR __ACCESS_CP15(c5, 4, c2, 0) -#define DFAR __ACCESS_CP15(c6, 0, c0, 0) -#define IFAR __ACCESS_CP15(c6, 0, c0, 2) -#define HDFAR __ACCESS_CP15(c6, 4, c0, 0) -#define HIFAR __ACCESS_CP15(c6, 4, c0, 2) -#define HPFAR __ACCESS_CP15(c6, 4, c0, 4) -#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) -#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) -#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) -#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) -#define PRRR __ACCESS_CP15(c10, 0, c2, 0) -#define NMRR __ACCESS_CP15(c10, 0, c2, 1) -#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0) -#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1) -#define VBAR __ACCESS_CP15(c12, 0, c0, 0) -#define CID __ACCESS_CP15(c13, 0, c0, 1) -#define TID_URW __ACCESS_CP15(c13, 0, c0, 2) -#define TID_URO __ACCESS_CP15(c13, 0, c0, 3) -#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4) -#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2) -#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0) -#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1) -#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0) - -#define VFP_FPEXC __ACCESS_VFP(FPEXC) - -void __timer_save_state(struct kvm_vcpu *vcpu); -void __timer_restore_state(struct kvm_vcpu *vcpu); - -void __vgic_v2_save_state(struct kvm_vcpu *vcpu); -void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); - -void __sysreg_save_state(struct kvm_cpu_context *ctxt); -void __sysreg_restore_state(struct kvm_cpu_context *ctxt); - -void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp); -void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp); -static inline bool __vfp_enabled(void) -{ - return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10))); -} - -void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt); -void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt); - -int asmlinkage __guest_enter(struct kvm_vcpu *vcpu, - struct kvm_cpu_context *host); -int asmlinkage __hyp_do_panic(const char *, int, u32); - -#endif /* __ARM_KVM_HYP_H__ */ diff --git a/arch/arm/kvm/hyp/s2-setup.c b/arch/arm/kvm/hyp/s2-setup.c index f5f49c53be28..7be39af2ed6c 100644 --- a/arch/arm/kvm/hyp/s2-setup.c +++ b/arch/arm/kvm/hyp/s2-setup.c @@ -18,8 +18,7 @@ #include #include #include - -#include "hyp.h" +#include void __hyp_text __init_stage2_translation(void) { diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index f11ede159080..b13caa90cd44 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -16,7 +16,7 @@ */ #include -#include "hyp.h" +#include __asm__(".arch_extension virt"); diff --git a/arch/arm/kvm/hyp/timer-sr.c b/arch/arm/kvm/hyp/timer-sr.c index d7535fd0784e..2bb0c926e01c 100644 --- a/arch/arm/kvm/hyp/timer-sr.c +++ b/arch/arm/kvm/hyp/timer-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include /* vcpu is already in the HYP VA space */ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c index 82958b8f6a74..a2636001e616 100644 --- a/arch/arm/kvm/hyp/tlb.c +++ b/arch/arm/kvm/hyp/tlb.c @@ -18,7 +18,7 @@ * along with this program. If not, see . */ -#include "hyp.h" +#include /** * Flush per-VMID TLBs diff --git a/arch/arm/kvm/hyp/vgic-v2-sr.c b/arch/arm/kvm/hyp/vgic-v2-sr.c index e71761238cfc..9514a7d90d71 100644 --- a/arch/arm/kvm/hyp/vgic-v2-sr.c +++ b/arch/arm/kvm/hyp/vgic-v2-sr.c @@ -19,9 +19,7 @@ #include #include -#include - -#include "hyp.h" +#include /* vcpu is already in the HYP VA space */ void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From b5fa5d3e628bd301b89937ee4f7814297d8e2e31 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 28 Jan 2016 14:33:05 +0000 Subject: ARM: KVM: Use common version of vgic-v2-sr.c No need to keep our own private version, the common one is strictly identical. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/hyp/Makefile | 5 ++- arch/arm/kvm/hyp/vgic-v2-sr.c | 82 ------------------------------------------- 2 files changed, 4 insertions(+), 83 deletions(-) delete mode 100644 arch/arm/kvm/hyp/vgic-v2-sr.c (limited to 'arch/arm') diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 7152369504a6..8f735d970ef1 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -2,10 +2,13 @@ # Makefile for Kernel-based Virtual Machine module, HYP part # +KVM=../../../../virt/kvm + +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o + obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o -obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o diff --git a/arch/arm/kvm/hyp/vgic-v2-sr.c b/arch/arm/kvm/hyp/vgic-v2-sr.c deleted file mode 100644 index 9514a7d90d71..000000000000 --- a/arch/arm/kvm/hyp/vgic-v2-sr.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2012-2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - -#include - -/* vcpu is already in the HYP VA space */ -void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; - struct vgic_dist *vgic = &kvm->arch.vgic; - void __iomem *base = kern_hyp_va(vgic->vctrl_base); - u32 eisr0, eisr1, elrsr0, elrsr1; - int i, nr_lr; - - if (!base) - return; - - nr_lr = vcpu->arch.vgic_cpu.nr_lr; - cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR); - cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR); - eisr0 = readl_relaxed(base + GICH_EISR0); - elrsr0 = readl_relaxed(base + GICH_ELRSR0); - if (unlikely(nr_lr > 32)) { - eisr1 = readl_relaxed(base + GICH_EISR1); - elrsr1 = readl_relaxed(base + GICH_ELRSR1); - } else { - eisr1 = elrsr1 = 0; - } -#ifdef CONFIG_CPU_BIG_ENDIAN - cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1; - cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1; -#else - cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0; - cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0; -#endif - cpu_if->vgic_apr = readl_relaxed(base + GICH_APR); - - writel_relaxed(0, base + GICH_HCR); - - for (i = 0; i < nr_lr; i++) - cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4)); -} - -/* vcpu is already in the HYP VA space */ -void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2; - struct vgic_dist *vgic = &kvm->arch.vgic; - void __iomem *base = kern_hyp_va(vgic->vctrl_base); - int i, nr_lr; - - if (!base) - return; - - writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR); - writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR); - writel_relaxed(cpu_if->vgic_apr, base + GICH_APR); - - nr_lr = vcpu->arch.vgic_cpu.nr_lr; - for (i = 0; i < nr_lr; i++) - writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4)); -} -- cgit v1.2.3 From 68130cb5db09cb8a285a59f70ac72d2bfa8685fd Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 28 Jan 2016 14:48:42 +0000 Subject: ARM: KVM: Use common version of timer-sr.c Using the common HYP timer code is a bit more tricky, since we use system register names. Nothing a set of macros cannot work around... Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_hyp.h | 9 ++++++ arch/arm/kvm/hyp/Makefile | 2 +- arch/arm/kvm/hyp/timer-sr.c | 69 ------------------------------------------ 3 files changed, 10 insertions(+), 70 deletions(-) delete mode 100644 arch/arm/kvm/hyp/timer-sr.c (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index ff6de6a3af2d..f0e860761380 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h @@ -104,6 +104,15 @@ #define VFP_FPEXC __ACCESS_VFP(FPEXC) +/* AArch64 compatibility macros, only for the timer so far */ +#define read_sysreg_el0(r) read_sysreg(r##_el0) +#define write_sysreg_el0(v, r) write_sysreg(v, r##_el0) + +#define cntv_ctl_el0 CNTV_CTL +#define cntv_cval_el0 CNTV_CVAL +#define cntvoff_el2 CNTVOFF +#define cnthctl_el2 CNTHCTL + void __timer_save_state(struct kvm_vcpu *vcpu); void __timer_restore_state(struct kvm_vcpu *vcpu); diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile index 8f735d970ef1..8dfa5f7f9290 100644 --- a/arch/arm/kvm/hyp/Makefile +++ b/arch/arm/kvm/hyp/Makefile @@ -5,10 +5,10 @@ KVM=../../../../virt/kvm obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += tlb.o obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o -obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += vfp.o obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o obj-$(CONFIG_KVM_ARM_HOST) += entry.o diff --git a/arch/arm/kvm/hyp/timer-sr.c b/arch/arm/kvm/hyp/timer-sr.c deleted file mode 100644 index 2bb0c926e01c..000000000000 --- a/arch/arm/kvm/hyp/timer-sr.c +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Copyright (C) 2012-2015 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include -#include - -#include - -/* vcpu is already in the HYP VA space */ -void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - u64 val; - - if (kvm->arch.timer.enabled) { - timer->cntv_ctl = read_sysreg(CNTV_CTL); - timer->cntv_cval = read_sysreg(CNTV_CVAL); - } - - /* Disable the virtual timer */ - write_sysreg(0, CNTV_CTL); - - /* Allow physical timer/counter access for the host */ - val = read_sysreg(CNTHCTL); - val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; - write_sysreg(val, CNTHCTL); - - /* Clear cntvoff for the host */ - write_sysreg(0, CNTVOFF); -} - -void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) -{ - struct kvm *kvm = kern_hyp_va(vcpu->kvm); - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - u64 val; - - /* - * Disallow physical timer access for the guest - * Physical counter access is allowed - */ - val = read_sysreg(CNTHCTL); - val &= ~CNTHCTL_EL1PCEN; - val |= CNTHCTL_EL1PCTEN; - write_sysreg(val, CNTHCTL); - - if (kvm->arch.timer.enabled) { - write_sysreg(kvm->arch.timer.cntvoff, CNTVOFF); - write_sysreg(timer->cntv_cval, CNTV_CVAL); - isb(); - write_sysreg(timer->cntv_ctl, CNTV_CTL); - } -} -- cgit v1.2.3 From b02386eb7dac7555a208d81aef2a0e5c6f0f8085 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Fri, 26 Feb 2016 19:29:19 +0800 Subject: arm64: KVM: Add PMU overflow interrupt routing When calling perf_event_create_kernel_counter to create perf_event, assign a overflow handler. Then when the perf event overflows, set the corresponding bit of guest PMOVSSET register. If this counter is enabled and its interrupt is enabled as well, kick the vcpu to sync the interrupt. On VM entry, if there is counter overflowed and interrupt level is changed, inject the interrupt with corresponding level. On VM exit, sync the interrupt level as well if it has been changed. Signed-off-by: Shannon Zhao Reviewed-by: Marc Zyngier Reviewed-by: Andrew Jones Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/arm.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 686350d05174..c5e959187abd 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -28,6 +28,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include "trace.h" @@ -577,6 +578,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * non-preemptible context. */ preempt_disable(); + kvm_pmu_flush_hwstate(vcpu); kvm_timer_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu); @@ -593,6 +595,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || vcpu->arch.power_off || vcpu->arch.pause) { local_irq_enable(); + kvm_pmu_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); preempt_enable(); @@ -642,10 +645,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); /* - * We must sync the timer state before the vgic state so that - * the vgic can properly sample the updated state of the + * We must sync the PMU and timer state before the vgic state so + * that the vgic can properly sample the updated state of the * interrupt line. */ + kvm_pmu_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); -- cgit v1.2.3 From 5f0a714a2b63c25ffba5d832773f3ca4f0d02e21 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Fri, 11 Sep 2015 15:18:05 +0800 Subject: arm64: KVM: Free perf event of PMU when destroying vcpu When KVM frees VCPU, it needs to free the perf_event of PMU. Signed-off-by: Shannon Zhao Reviewed-by: Marc Zyngier Reviewed-by: Andrew Jones Signed-off-by: Marc Zyngier --- arch/arm/kvm/arm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index c5e959187abd..9d133df2da53 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -266,6 +266,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) kvm_mmu_free_memory_caches(vcpu); kvm_timer_vcpu_terminate(vcpu); kvm_vgic_vcpu_destroy(vcpu); + kvm_pmu_vcpu_destroy(vcpu); kmem_cache_free(kvm_vcpu_cache, vcpu); } -- cgit v1.2.3 From f577f6c2a6a5ccabe98061f256a1e2ff468d5e93 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 11 Jan 2016 20:56:17 +0800 Subject: arm64: KVM: Introduce per-vcpu kvm device controls In some cases it needs to get/set attributes specific to a vcpu and so needs something else than ONE_REG. Let's copy the KVM_DEVICE approach, and define the respective ioctls for the vcpu file descriptor. Signed-off-by: Shannon Zhao Reviewed-by: Andrew Jones Acked-by: Peter Maydell Signed-off-by: Marc Zyngier --- arch/arm/kvm/arm.c | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 9d133df2da53..166232356291 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -828,11 +828,51 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, return 0; } +static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + default: + break; + } + + return ret; +} + +static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + default: + break; + } + + return ret; +} + +static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + default: + break; + } + + return ret; +} + long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { struct kvm_vcpu *vcpu = filp->private_data; void __user *argp = (void __user *)arg; + struct kvm_device_attr attr; switch (ioctl) { case KVM_ARM_VCPU_INIT: { @@ -875,6 +915,21 @@ long kvm_arch_vcpu_ioctl(struct file *filp, return -E2BIG; return kvm_arm_copy_reg_indices(vcpu, user_list->reg); } + case KVM_SET_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_arm_vcpu_set_attr(vcpu, &attr); + } + case KVM_GET_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_arm_vcpu_get_attr(vcpu, &attr); + } + case KVM_HAS_DEVICE_ATTR: { + if (copy_from_user(&attr, argp, sizeof(attr))) + return -EFAULT; + return kvm_arm_vcpu_has_attr(vcpu, &attr); + } default: return -EINVAL; } -- cgit v1.2.3 From bb0c70bcca6ba3c84afc2da7426f3b923bbe6825 Mon Sep 17 00:00:00 2001 From: Shannon Zhao Date: Mon, 11 Jan 2016 21:35:32 +0800 Subject: arm64: KVM: Add a new vcpu device control group for PMUv3 To configure the virtual PMUv3 overflow interrupt number, we use the vcpu kvm_device ioctl, encapsulating the KVM_ARM_VCPU_PMU_V3_IRQ attribute within the KVM_ARM_VCPU_PMU_V3_CTRL group. After configuring the PMUv3, call the vcpu ioctl with attribute KVM_ARM_VCPU_PMU_V3_INIT to initialize the PMUv3. Signed-off-by: Shannon Zhao Acked-by: Peter Maydell Reviewed-by: Andrew Jones Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 15 +++++++++++++++ arch/arm/kvm/arm.c | 3 +++ 2 files changed, 18 insertions(+) (limited to 'arch/arm') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 19e9aba85463..385070180c25 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -287,5 +287,20 @@ static inline void kvm_arm_init_debug(void) {} static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {} static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {} +static inline int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} +static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} #endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 166232356291..75c7fed5d14c 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -835,6 +835,7 @@ static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, switch (attr->group) { default: + ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr); break; } @@ -848,6 +849,7 @@ static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, switch (attr->group) { default: + ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr); break; } @@ -861,6 +863,7 @@ static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, switch (attr->group) { default: + ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr); break; } -- cgit v1.2.3 From 504bfce18a76c9fb6ad5a5f894750f4fca6cde39 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Jan 2016 15:37:03 +0000 Subject: ARM: KVM: Properly sort the invariant table Not having the invariant table properly sorted is an oddity, and may get in the way of future optimisations. Let's fix it. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/coproc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index e3e86c4cfed2..9aa462ec9c56 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -645,6 +645,9 @@ static struct coproc_reg invariant_cp15[] = { { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, + { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, + { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, + { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, @@ -660,9 +663,6 @@ static struct coproc_reg invariant_cp15[] = { { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, - - { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, - { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, }; /* -- cgit v1.2.3 From b613f59dd2628937860f37dbfbe315d9edcb1668 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Jan 2016 15:34:35 +0000 Subject: ARM: KVM: Enforce sorting of all CP tables Since we're obviously terrible at sorting the CP tables, make sure we're going to do it properly (or fail to boot). arm64 has had the same mechanism for a while, and nobody ever broke it... Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/coproc.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 9aa462ec9c56..40d6db1ca4a8 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -381,17 +381,26 @@ static const struct coproc_reg cp15_regs[] = { { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, }; +static int check_reg_table(const struct coproc_reg *table, unsigned int n) +{ + unsigned int i; + + for (i = 1; i < n; i++) { + if (cmp_reg(&table[i-1], &table[i]) >= 0) { + kvm_err("reg table %p out of order (%d)\n", table, i - 1); + return 1; + } + } + + return 0; +} + /* Target specific emulation tables */ static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) { - unsigned int i; - - for (i = 1; i < table->num; i++) - BUG_ON(cmp_reg(&table->table[i-1], - &table->table[i]) >= 0); - + BUG_ON(check_reg_table(table->table, table->num)); target_tables[table->target] = table; } @@ -1210,8 +1219,8 @@ void kvm_coproc_table_init(void) unsigned int i; /* Make sure tables are unique and in order. */ - for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) - BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); + BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs))); + BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15))); /* We abuse the reset function to overwrite the table itself. */ for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) -- cgit v1.2.3 From f1d67d4ac79aef6de709d7a21b35851685a1d3ee Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Jan 2016 17:04:52 +0000 Subject: ARM: KVM: Rename struct coproc_reg::is_64 to is_64bit As we're going to play some tricks on the struct coproc_reg, make sure its 64bit indicator field matches that of coproc_params. Acked-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/coproc.c | 4 ++-- arch/arm/kvm/coproc.h | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 40d6db1ca4a8..bb0690271dd2 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -423,7 +423,7 @@ static const struct coproc_reg *find_reg(const struct coproc_params *params, for (i = 0; i < num; i++) { const struct coproc_reg *r = &table[i]; - if (params->is_64bit != r->is_64) + if (params->is_64bit != r->is_64bit) continue; if (params->CRn != r->CRn) continue; @@ -1105,7 +1105,7 @@ static int write_demux_regids(u64 __user *uindices) static u64 cp15_to_index(const struct coproc_reg *reg) { u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); - if (reg->is_64) { + if (reg->is_64bit) { val |= KVM_REG_SIZE_U64; val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); /* diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 27351323871d..eef1759c2b65 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h @@ -37,7 +37,7 @@ struct coproc_reg { unsigned long Op1; unsigned long Op2; - bool is_64; + bool is_64bit; /* Trapped access from guest, if non-NULL. */ bool (*access)(struct kvm_vcpu *, @@ -141,7 +141,7 @@ static inline int cmp_reg(const struct coproc_reg *i1, return i1->Op1 - i2->Op1; if (i1->Op2 != i2->Op2) return i1->Op2 - i2->Op2; - return i2->is_64 - i1->is_64; + return i2->is_64bit - i1->is_64bit; } @@ -150,8 +150,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, #define CRm64(_x) .CRn = _x, .CRm = 0 #define Op1(_x) .Op1 = _x #define Op2(_x) .Op2 = _x -#define is64 .is_64 = true -#define is32 .is_64 = false +#define is64 .is_64bit = true +#define is32 .is_64bit = false bool access_vm_reg(struct kvm_vcpu *vcpu, const struct coproc_params *p, -- cgit v1.2.3 From d06a5440a02cf8ff67b1cd4ee75a30b1b1c66cff Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 21 Jan 2016 17:34:22 +0000 Subject: ARM: KVM: Switch the CP reg search to be a binary search Doing a linear search is a bit silly when we can do a binary search. Not that we trap that so many things that it has become a burden yet, but it makes sense to align it with the arm64 code. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/coproc.c | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index bb0690271dd2..1bb2b79c01ff 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c @@ -16,6 +16,8 @@ * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ + +#include #include #include #include @@ -414,29 +416,32 @@ static const struct coproc_reg *get_target_table(unsigned target, size_t *num) return table->table; } +#define reg_to_match_value(x) \ + ({ \ + unsigned long val; \ + val = (x)->CRn << 11; \ + val |= (x)->CRm << 7; \ + val |= (x)->Op1 << 4; \ + val |= (x)->Op2 << 1; \ + val |= !(x)->is_64bit; \ + val; \ + }) + +static int match_reg(const void *key, const void *elt) +{ + const unsigned long pval = (unsigned long)key; + const struct coproc_reg *r = elt; + + return pval - reg_to_match_value(r); +} + static const struct coproc_reg *find_reg(const struct coproc_params *params, const struct coproc_reg table[], unsigned int num) { - unsigned int i; - - for (i = 0; i < num; i++) { - const struct coproc_reg *r = &table[i]; + unsigned long pval = reg_to_match_value(params); - if (params->is_64bit != r->is_64bit) - continue; - if (params->CRn != r->CRn) - continue; - if (params->CRm != r->CRm) - continue; - if (params->Op1 != r->Op1) - continue; - if (params->Op2 != r->Op2) - continue; - - return r; - } - return NULL; + return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg); } static int emulate_cp15(struct kvm_vcpu *vcpu, -- cgit v1.2.3 From 9b4a3004439d5be680faf41f4267968ca11bb9f6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 29 Jan 2016 19:04:48 +0000 Subject: KVM: arm/arm64: timer: Add active state caching Programming the active state in the (re)distributor can be an expensive operation so it makes some sense to try and reduce the number of accesses as much as possible. So far, we program the active state on each VM entry, but there is some opportunity to do less. An obvious solution is to cache the active state in memory, and only program it in the HW when conditions change. But because the HW can also change things under our feet (the active state can transition from 1 to 0 when the guest does an EOI), some precautions have to be taken, which amount to only caching an "inactive" state, and always programing it otherwise. With this in place, we observe a reduction of around 700 cycles on a 2GHz GICv2 platform for a NULL hypercall. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier --- arch/arm/kvm/arm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 75c7fed5d14c..9ca653e34d8c 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -322,6 +322,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1; kvm_arm_set_running_vcpu(NULL); + kvm_timer_vcpu_put(vcpu); } int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, -- cgit v1.2.3