summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-01-30 13:57:43 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-01-30 13:57:43 -0800
commit0aebc6a440b942df6221a7765f077f02217e0114 (patch)
treeb5004b4194cc7e69c76465287f34263bf0b5aab5 /virt
parent72906f38934a49faf4d2d38ea9ae32adcf7d5d0c (diff)
parentec89ab50a03a33a4a648869e868b1964354fb2d1 (diff)
downloadlinux-stable-0aebc6a440b942df6221a7765f077f02217e0114.tar.gz
linux-stable-0aebc6a440b942df6221a7765f077f02217e0114.tar.bz2
linux-stable-0aebc6a440b942df6221a7765f077f02217e0114.zip
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: "The main theme of this pull request is security covering variants 2 and 3 for arm64. I expect to send additional patches next week covering an improved firmware interface (requires firmware changes) for variant 2 and way for KPTI to be disabled on unaffected CPUs (Cavium's ThunderX doesn't work properly with KPTI enabled because of a hardware erratum). Summary: - Security mitigations: - variant 2: invalidate the branch predictor with a call to secure firmware - variant 3: implement KPTI for arm64 - 52-bit physical address support for arm64 (ARMv8.2) - arm64 support for RAS (firmware first only) and SDEI (software delegated exception interface; allows firmware to inject a RAS error into the OS) - perf support for the ARM DynamIQ Shared Unit PMU - CPUID and HWCAP bits updated for new floating point multiplication instructions in ARMv8.4 - remove some virtual memory layout printks during boot - fix initial page table creation to cope with larger than 32M kernel images when 16K pages are enabled" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (104 commits) arm64: Fix TTBR + PAN + 52-bit PA logic in cpu_do_switch_mm arm64: Turn on KPTI only on CPUs that need it arm64: Branch predictor hardening for Cavium ThunderX2 arm64: Run enable method for errata work arounds on late CPUs arm64: Move BP hardening to check_and_switch_context arm64: mm: ignore memory above supported physical address size arm64: kpti: Fix the interaction between ASID switching and software PAN KVM: arm64: Emulate RAS error registers and set HCR_EL2's TERR & TEA KVM: arm64: Handle RAS SErrors from EL2 on guest exit KVM: arm64: Handle RAS SErrors from EL1 on guest exit KVM: arm64: Save ESR_EL2 on guest SError KVM: arm64: Save/Restore guest DISR_EL1 KVM: arm64: Set an impdef ESR for Virtual-SError using VSESR_EL2. KVM: arm/arm64: mask/unmask daif around VHE guests arm64: kernel: Prepare for a DISR user arm64: Unconditionally enable IESB on exception entry/return for firmware-first arm64: kernel: Survive corrected RAS errors notified by SError arm64: cpufeature: Detect CPU RAS Extentions arm64: sysreg: Move to use definitions for all the SCTLR bits arm64: cpufeature: __this_cpu_has_cap() shouldn't stop early ...
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/arm.c35
-rw-r--r--virt/kvm/arm/mmu.c12
2 files changed, 24 insertions, 23 deletions
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 2e43f9d42bd5..08464b2fba1d 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -53,8 +53,8 @@
__asm__(".arch_extension virt");
#endif
+DEFINE_PER_CPU(kvm_cpu_context_t, kvm_host_cpu_state);
static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-static kvm_cpu_context_t __percpu *kvm_host_cpu_state;
/* Per-CPU variable containing the currently running vcpu. */
static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
@@ -354,7 +354,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
+ vcpu->arch.host_cpu_context = this_cpu_ptr(&kvm_host_cpu_state);
kvm_arm_set_running_vcpu(vcpu);
kvm_vgic_load(vcpu);
@@ -509,7 +509,7 @@ static void update_vttbr(struct kvm *kvm)
pgd_phys = virt_to_phys(kvm->arch.pgd);
BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
- kvm->arch.vttbr = pgd_phys | vmid;
+ kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
spin_unlock(&kvm_vmid_lock);
}
@@ -704,9 +704,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
trace_kvm_entry(*vcpu_pc(vcpu));
guest_enter_irqoff();
+ if (has_vhe())
+ kvm_arm_vhe_guest_enter();
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
+ if (has_vhe())
+ kvm_arm_vhe_guest_exit();
vcpu->mode = OUTSIDE_GUEST_MODE;
vcpu->stat.exits++;
/*
@@ -759,6 +763,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
guest_exit();
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
+ /* Exit types that need handling before we can be preempted */
+ handle_exit_early(vcpu, run, ret);
+
preempt_enable();
ret = handle_exit(vcpu, run, ret);
@@ -1158,7 +1165,7 @@ static void cpu_init_hyp_mode(void *dummy)
pgd_ptr = kvm_mmu_get_httbr();
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
hyp_stack_ptr = stack_page + PAGE_SIZE;
- vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
+ vector_ptr = (unsigned long)kvm_get_hyp_vector();
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
@@ -1272,19 +1279,8 @@ static inline void hyp_cpu_pm_exit(void)
}
#endif
-static void teardown_common_resources(void)
-{
- free_percpu(kvm_host_cpu_state);
-}
-
static int init_common_resources(void)
{
- kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
- if (!kvm_host_cpu_state) {
- kvm_err("Cannot allocate host CPU state\n");
- return -ENOMEM;
- }
-
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
@@ -1403,6 +1399,12 @@ static int init_hyp_mode(void)
goto out_err;
}
+ err = kvm_map_vectors();
+ if (err) {
+ kvm_err("Cannot map vectors\n");
+ goto out_err;
+ }
+
/*
* Map the Hyp stack pages
*/
@@ -1420,7 +1422,7 @@ static int init_hyp_mode(void)
for_each_possible_cpu(cpu) {
kvm_cpu_context_t *cpu_ctxt;
- cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
+ cpu_ctxt = per_cpu_ptr(&kvm_host_cpu_state, cpu);
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
if (err) {
@@ -1544,7 +1546,6 @@ out_hyp:
if (!in_hyp_mode)
teardown_hyp_mode();
out_err:
- teardown_common_resources();
return err;
}
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 9dea96380339..f8eaf86b740a 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -621,7 +621,7 @@ static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
return 0;
}
-static int __create_hyp_mappings(pgd_t *pgdp,
+static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
unsigned long start, unsigned long end,
unsigned long pfn, pgprot_t prot)
{
@@ -634,7 +634,7 @@ static int __create_hyp_mappings(pgd_t *pgdp,
addr = start & PAGE_MASK;
end = PAGE_ALIGN(end);
do {
- pgd = pgdp + pgd_index(addr);
+ pgd = pgdp + ((addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1));
if (pgd_none(*pgd)) {
pud = pud_alloc_one(NULL, addr);
@@ -697,8 +697,8 @@ int create_hyp_mappings(void *from, void *to, pgprot_t prot)
int err;
phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
- err = __create_hyp_mappings(hyp_pgd, virt_addr,
- virt_addr + PAGE_SIZE,
+ err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
+ virt_addr, virt_addr + PAGE_SIZE,
__phys_to_pfn(phys_addr),
prot);
if (err)
@@ -729,7 +729,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
return -EINVAL;
- return __create_hyp_mappings(hyp_pgd, start, end,
+ return __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD, start, end,
__phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
}
@@ -1735,7 +1735,7 @@ static int kvm_map_idmap_text(pgd_t *pgd)
int err;
/* Create the idmap in the boot page tables */
- err = __create_hyp_mappings(pgd,
+ err = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
hyp_idmap_start, hyp_idmap_end,
__phys_to_pfn(hyp_idmap_start),
PAGE_HYP_EXEC);