summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2021-08-06 12:31:07 +0100
committerMarc Zyngier <maz@kernel.org>2021-08-20 09:11:28 +0100
commit4efc0ede4f31d7ec25c3dee0c8f07f93735cee6d (patch)
tree5e1e2b80b0d02559275e66dcfbd10fd2d75081d0 /arch/arm64/kvm/hyp
parent923a547d71b967c808a596968cf8022102f8b5b2 (diff)
downloadlinux-stable-4efc0ede4f31d7ec25c3dee0c8f07f93735cee6d.tar.gz
linux-stable-4efc0ede4f31d7ec25c3dee0c8f07f93735cee6d.tar.bz2
linux-stable-4efc0ede4f31d7ec25c3dee0c8f07f93735cee6d.zip
KVM: arm64: Unify stage-2 programming behind __load_stage2()
The protected mode relies on a separate helper to load the S2 context. Move over to the __load_guest_stage2() helper instead, and rename it to __load_stage2() to present a unified interface. Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Jade Alglave <jade.alglave@arm.com> Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210806113109.2475-5-will@kernel.org
Diffstat (limited to 'arch/arm64/kvm/hyp')
-rw-r--r--arch/arm64/kvm/hyp/include/nvhe/mem_protect.h2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c4
-rw-r--r--arch/arm64/kvm/hyp/vhe/switch.c6
-rw-r--r--arch/arm64/kvm/hyp/vhe/tlb.c4
6 files changed, 10 insertions, 10 deletions
diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
index 9c227d87c36d..8901dc95d7de 100644
--- a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
+++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
@@ -29,7 +29,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
static __always_inline void __load_host_stage2(void)
{
if (static_branch_likely(&kvm_protected_mode_initialized))
- __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
+ __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
else
write_sysreg(0, vttbr_el2);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index d938ce95d3bd..36aea13c9e5a 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -126,7 +126,7 @@ int __pkvm_prot_finalize(void)
kvm_flush_dcache_to_poc(params, sizeof(*params));
write_sysreg(params->hcr_el2, hcr_el2);
- __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
+ __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
/*
* Make sure to have an ISB before the TLB maintenance below but only
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index e50a49082923..3e7ad32b3f0d 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -215,7 +215,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__sysreg_restore_state_nvhe(guest_ctxt);
mmu = kern_hyp_va(vcpu->arch.hw_mmu);
- __load_guest_stage2(mmu, kern_hyp_va(mmu->arch));
+ __load_stage2(mmu, kern_hyp_va(mmu->arch));
__activate_traps(vcpu);
__hyp_vgic_restore_state(vcpu);
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index 76229407d8f0..d296d617f589 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -34,12 +34,12 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
}
/*
- * __load_guest_stage2() includes an ISB only when the AT
+ * __load_stage2() includes an ISB only when the AT
* workaround is applied. Take care of the opposite condition,
* ensuring that we always have an ISB, but not two ISBs back
* to back.
*/
- __load_guest_stage2(mmu, kern_hyp_va(mmu->arch));
+ __load_stage2(mmu, kern_hyp_va(mmu->arch));
asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
}
diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c
index 0cb7523a501a..709f6438283e 100644
--- a/arch/arm64/kvm/hyp/vhe/switch.c
+++ b/arch/arm64/kvm/hyp/vhe/switch.c
@@ -124,11 +124,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
*
* We have already configured the guest's stage 1 translation in
* kvm_vcpu_load_sysregs_vhe above. We must now call
- * __load_guest_stage2 before __activate_traps, because
- * __load_guest_stage2 configures stage 2 translation, and
+ * __load_stage2 before __activate_traps, because
+ * __load_stage2 configures stage 2 translation, and
* __activate_traps clear HCR_EL2.TGE (among other things).
*/
- __load_guest_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
+ __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
__activate_traps(vcpu);
__kvm_adjust_pc(vcpu);
diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c
index 5e9fb3989e0b..24cef9b87f9e 100644
--- a/arch/arm64/kvm/hyp/vhe/tlb.c
+++ b/arch/arm64/kvm/hyp/vhe/tlb.c
@@ -50,10 +50,10 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
*
* ARM erratum 1165522 requires some special handling (again),
* as we need to make sure both stages of translation are in
- * place before clearing TGE. __load_guest_stage2() already
+ * place before clearing TGE. __load_stage2() already
* has an ISB in order to deal with this.
*/
- __load_guest_stage2(mmu, mmu->arch);
+ __load_stage2(mmu, mmu->arch);
val = read_sysreg(hcr_el2);
val &= ~HCR_TGE;
write_sysreg(val, hcr_el2);