summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/arm.c
diff options
context:
space:
mode:
authorWill Deacon <will@kernel.org>2020-09-28 11:45:24 +0100
committerWill Deacon <will@kernel.org>2020-09-29 16:08:17 +0100
commit9ef2b48be9bba70ded9372fc81e678e707306060 (patch)
tree78ee8e1e33c1baf46d920331edfa1a7980316b78 /arch/arm64/kvm/arm.c
parent31c84d6c9cde673b3866972552ec52e4c522b4fa (diff)
downloadlinux-9ef2b48be9bba70ded9372fc81e678e707306060.tar.gz
linux-9ef2b48be9bba70ded9372fc81e678e707306060.tar.bz2
linux-9ef2b48be9bba70ded9372fc81e678e707306060.zip
KVM: arm64: Allow patching EL2 vectors even with KASLR is not enabled
Patching the EL2 exception vectors is integral to the Spectre-v2 workaround, where it can be necessary to execute CPU-specific sequences to nobble the branch predictor before running the hypervisor text proper. Remove the dependency on CONFIG_RANDOMIZE_BASE and allow the EL2 vectors to be patched even when KASLR is not enabled. Fixes: 7a132017e7a5 ("KVM: arm64: Replace CONFIG_KVM_INDIRECT_VECTORS with CONFIG_RANDOMIZE_BASE") Reported-by: kernel test robot <lkp@intel.com> Link: https://lore.kernel.org/r/202009221053.Jv1XsQUZ%lkp@intel.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kvm/arm.c')
-rw-r--r--arch/arm64/kvm/arm.c34
1 files changed, 34 insertions, 0 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 9b90a701462c..13e559ac7235 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1256,6 +1256,40 @@ long kvm_arch_vm_ioctl(struct file *filp,
}
}
+static int kvm_map_vectors(void)
+{
+ /*
+ * SV2 = ARM64_SPECTRE_V2
+ * HEL2 = ARM64_HARDEN_EL2_VECTORS
+ *
+ * !SV2 + !HEL2 -> use direct vectors
+ * SV2 + !HEL2 -> use hardened vectors in place
+ * !SV2 + HEL2 -> allocate one vector slot and use exec mapping
+ * SV2 + HEL2 -> use hardened vectors and use exec mapping
+ */
+ if (cpus_have_const_cap(ARM64_SPECTRE_V2)) {
+ __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs);
+ __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
+ }
+
+ if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
+ phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs);
+ unsigned long size = __BP_HARDEN_HYP_VECS_SZ;
+
+ /*
+ * Always allocate a spare vector slot, as we don't
+ * know yet which CPUs have a BP hardening slot that
+ * we can reuse.
+ */
+ __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+ BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
+ return create_hyp_exec_mappings(vect_pa, size,
+ &__kvm_bp_vect_base);
+ }
+
+ return 0;
+}
+
static void cpu_init_hyp_mode(void)
{
phys_addr_t pgd_ptr;