summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2016-06-30 18:40:40 +0100
committerChristoffer Dall <christoffer.dall@linaro.org>2016-07-03 23:41:27 +0200
commitfd81e6bf3928c14f90a033df164c375d4ce0fd85 (patch)
tree9bf9ac123d247ee0bcc9702b683031a65d21b541 /arch/arm64
parentd53d9bc65289dc50b42587313466594e4d611f0f (diff)
downloadlinux-stable-fd81e6bf3928c14f90a033df164c375d4ce0fd85.tar.gz
linux-stable-fd81e6bf3928c14f90a033df164c375d4ce0fd85.tar.bz2
linux-stable-fd81e6bf3928c14f90a033df164c375d4ce0fd85.zip
arm64: KVM: Refactor kern_hyp_va to deal with multiple offsets
As we move towards a selectable HYP VA range, it is obvious that we don't want to test a variable to find out if we need to use the bottom VA range, the top VA range, or use the address as is (for VHE). Instead, we can expand our current helper to generate the right mask or nop with code patching. We default to using the top VA space, with alternatives to switch to the bottom one or to nop out the instructions. Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h11
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h42
2 files changed, 39 insertions, 14 deletions
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index 1d81f9abd172..cff510574fae 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -25,17 +25,6 @@
#define __hyp_text __section(.hyp.text) notrace
-static inline unsigned long __kern_hyp_va(unsigned long v)
-{
- asm volatile(ALTERNATIVE("and %0, %0, %1",
- "nop",
- ARM64_HAS_VIRT_HOST_EXTN)
- : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK));
- return v;
-}
-
-#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
-
#define read_sysreg_elx(r,nvh,vh) \
({ \
u64 reg; \
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 5e543231f615..2970537161d2 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -90,13 +90,33 @@
/*
* Convert a kernel VA into a HYP VA.
* reg: VA to be converted.
+ *
+ * This generates the following sequences:
+ * - High mask:
+ * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
+ * nop
+ * - Low mask:
+ * and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
+ * and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK
+ * - VHE:
+ * nop
+ * nop
+ *
+ * The "low mask" version works because the mask is a strict subset of
+ * the "high mask", hence performing the first mask for nothing.
+ * Should be completely invisible on any viable CPU.
*/
.macro kern_hyp_va reg
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- and \reg, \reg, #HYP_PAGE_OFFSET_MASK
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK
alternative_else
nop
alternative_endif
+alternative_if_not ARM64_HYP_OFFSET_LOW
+ nop
+alternative_else
+ and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK
+alternative_endif
.endm
#else
@@ -107,7 +127,23 @@ alternative_endif
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
-#define KERN_TO_HYP(kva) ((unsigned long)kva & HYP_PAGE_OFFSET_MASK)
+static inline unsigned long __kern_hyp_va(unsigned long v)
+{
+ asm volatile(ALTERNATIVE("and %0, %0, %1",
+ "nop",
+ ARM64_HAS_VIRT_HOST_EXTN)
+ : "+r" (v)
+ : "i" (HYP_PAGE_OFFSET_HIGH_MASK));
+ asm volatile(ALTERNATIVE("nop",
+ "and %0, %0, %1",
+ ARM64_HYP_OFFSET_LOW)
+ : "+r" (v)
+ : "i" (HYP_PAGE_OFFSET_LOW_MASK));
+ return v;
+}
+
+#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
+#define KERN_TO_HYP(v) kern_hyp_va(v)
/*
* We currently only support a 40bit IPA.